Skip to content

Commit

Permalink
block: Check locking assumptions at runtime
Browse files Browse the repository at this point in the history
Instead of documenting the locking assumptions of most block layer
functions as a comment, use lockdep_assert_held() to verify locking
assumptions at runtime.

Signed-off-by: Bart Van Assche <[email protected]>
Reviewed-by: Christoph Hellwig <[email protected]>
Cc: Hannes Reinecke <[email protected]>
Cc: Omar Sandoval <[email protected]>
Cc: Ming Lei <[email protected]>
Signed-off-by: Jens Axboe <[email protected]>
  • Loading branch information
Bart Van Assche authored and axboe committed Jun 21, 2017
1 parent 9e0c829 commit 2fff8a9
Show file tree
Hide file tree
Showing 5 changed files with 64 additions and 37 deletions.
71 changes: 47 additions & 24 deletions block/blk-core.c
Original file line number Diff line number Diff line change
Expand Up @@ -236,10 +236,12 @@ static void blk_delay_work(struct work_struct *work)
* Description:
* Sometimes queueing needs to be postponed for a little while, to allow
* resources to come back. This function will make sure that queueing is
* restarted around the specified time. Queue lock must be held.
* restarted around the specified time.
*/
void blk_delay_queue(struct request_queue *q, unsigned long msecs)
{
lockdep_assert_held(q->queue_lock);

if (likely(!blk_queue_dead(q)))
queue_delayed_work(kblockd_workqueue, &q->delay_work,
msecs_to_jiffies(msecs));
Expand All @@ -257,6 +259,8 @@ EXPORT_SYMBOL(blk_delay_queue);
**/
void blk_start_queue_async(struct request_queue *q)
{
lockdep_assert_held(q->queue_lock);

queue_flag_clear(QUEUE_FLAG_STOPPED, q);
blk_run_queue_async(q);
}
Expand All @@ -269,10 +273,11 @@ EXPORT_SYMBOL(blk_start_queue_async);
* Description:
* blk_start_queue() will clear the stop flag on the queue, and call
* the request_fn for the queue if it was in a stopped state when
* entered. Also see blk_stop_queue(). Queue lock must be held.
* entered. Also see blk_stop_queue().
**/
void blk_start_queue(struct request_queue *q)
{
lockdep_assert_held(q->queue_lock);
WARN_ON(!irqs_disabled());

queue_flag_clear(QUEUE_FLAG_STOPPED, q);
Expand All @@ -292,10 +297,12 @@ EXPORT_SYMBOL(blk_start_queue);
* or if it simply chooses not to queue more I/O at one point, it can
* call this function to prevent the request_fn from being called until
* the driver has signalled it's ready to go again. This happens by calling
* blk_start_queue() to restart queue operations. Queue lock must be held.
* blk_start_queue() to restart queue operations.
**/
void blk_stop_queue(struct request_queue *q)
{
lockdep_assert_held(q->queue_lock);

cancel_delayed_work(&q->delay_work);
queue_flag_set(QUEUE_FLAG_STOPPED, q);
}
Expand Down Expand Up @@ -348,6 +355,8 @@ EXPORT_SYMBOL(blk_sync_queue);
*/
inline void __blk_run_queue_uncond(struct request_queue *q)
{
lockdep_assert_held(q->queue_lock);

if (unlikely(blk_queue_dead(q)))
return;

Expand All @@ -369,11 +378,12 @@ EXPORT_SYMBOL_GPL(__blk_run_queue_uncond);
* @q: The queue to run
*
* Description:
* See @blk_run_queue. This variant must be called with the queue lock
* held and interrupts disabled.
* See @blk_run_queue.
*/
void __blk_run_queue(struct request_queue *q)
{
lockdep_assert_held(q->queue_lock);

if (unlikely(blk_queue_stopped(q)))
return;

Expand All @@ -387,10 +397,17 @@ EXPORT_SYMBOL(__blk_run_queue);
*
* Description:
* Tells kblockd to perform the equivalent of @blk_run_queue on behalf
* of us. The caller must hold the queue lock.
* of us.
*
* Note:
* Since it is not allowed to run q->delay_work after blk_cleanup_queue()
* has canceled q->delay_work, callers must hold the queue lock to avoid
* race conditions between blk_cleanup_queue() and blk_run_queue_async().
*/
void blk_run_queue_async(struct request_queue *q)
{
lockdep_assert_held(q->queue_lock);

if (likely(!blk_queue_stopped(q) && !blk_queue_dead(q)))
mod_delayed_work(kblockd_workqueue, &q->delay_work, 0);
}
Expand Down Expand Up @@ -1136,6 +1153,8 @@ static struct request *__get_request(struct request_list *rl, unsigned int op,
int may_queue;
req_flags_t rq_flags = RQF_ALLOCED;

lockdep_assert_held(q->queue_lock);

if (unlikely(blk_queue_dying(q)))
return ERR_PTR(-ENODEV);

Expand Down Expand Up @@ -1309,6 +1328,8 @@ static struct request *get_request(struct request_queue *q, unsigned int op,
struct request_list *rl;
struct request *rq;

lockdep_assert_held(q->queue_lock);

rl = blk_get_rl(q, bio); /* transferred to @rq on success */
retry:
rq = __get_request(rl, op, bio, gfp_mask);
Expand Down Expand Up @@ -1402,6 +1423,8 @@ EXPORT_SYMBOL(blk_get_request);
*/
void blk_requeue_request(struct request_queue *q, struct request *rq)
{
lockdep_assert_held(q->queue_lock);

blk_delete_timer(rq);
blk_clear_rq_complete(rq);
trace_block_rq_requeue(q, rq);
Expand Down Expand Up @@ -1476,9 +1499,6 @@ static void blk_pm_put_request(struct request *rq)
static inline void blk_pm_put_request(struct request *rq) {}
#endif

/*
* queue lock must be held
*/
void __blk_put_request(struct request_queue *q, struct request *req)
{
req_flags_t rq_flags = req->rq_flags;
Expand All @@ -1491,6 +1511,8 @@ void __blk_put_request(struct request_queue *q, struct request *req)
return;
}

lockdep_assert_held(q->queue_lock);

blk_pm_put_request(req);

elv_completed_request(q, req);
Expand Down Expand Up @@ -2327,9 +2349,6 @@ EXPORT_SYMBOL_GPL(blk_insert_cloned_request);
*
* Return:
* The number of bytes to fail.
*
* Context:
* queue_lock must be held.
*/
unsigned int blk_rq_err_bytes(const struct request *rq)
{
Expand Down Expand Up @@ -2469,15 +2488,14 @@ void blk_account_io_start(struct request *rq, bool new_io)
* Return:
* Pointer to the request at the top of @q if available. Null
* otherwise.
*
* Context:
* queue_lock must be held.
*/
struct request *blk_peek_request(struct request_queue *q)
{
struct request *rq;
int ret;

lockdep_assert_held(q->queue_lock);

while ((rq = __elv_next_request(q)) != NULL) {

rq = blk_pm_peek_request(q, rq);
Expand Down Expand Up @@ -2593,12 +2611,11 @@ void blk_dequeue_request(struct request *rq)
*
* Block internal functions which don't want to start timer should
* call blk_dequeue_request().
*
* Context:
* queue_lock must be held.
*/
void blk_start_request(struct request *req)
{
lockdep_assert_held(req->q->queue_lock);

blk_dequeue_request(req);

if (test_bit(QUEUE_FLAG_STATS, &req->q->queue_flags)) {
Expand All @@ -2623,14 +2640,13 @@ EXPORT_SYMBOL(blk_start_request);
* Return:
* Pointer to the request at the top of @q if available. Null
* otherwise.
*
* Context:
* queue_lock must be held.
*/
struct request *blk_fetch_request(struct request_queue *q)
{
struct request *rq;

lockdep_assert_held(q->queue_lock);

rq = blk_peek_request(q);
if (rq)
blk_start_request(rq);
Expand Down Expand Up @@ -2776,13 +2792,12 @@ void blk_unprep_request(struct request *req)
}
EXPORT_SYMBOL_GPL(blk_unprep_request);

/*
* queue lock must be held
*/
void blk_finish_request(struct request *req, blk_status_t error)
{
struct request_queue *q = req->q;

lockdep_assert_held(req->q->queue_lock);

if (req->rq_flags & RQF_STATS)
blk_stat_add(req);

Expand Down Expand Up @@ -2864,6 +2879,8 @@ static bool blk_end_bidi_request(struct request *rq, blk_status_t error,
static bool __blk_end_bidi_request(struct request *rq, blk_status_t error,
unsigned int nr_bytes, unsigned int bidi_bytes)
{
lockdep_assert_held(rq->q->queue_lock);

if (blk_update_bidi_request(rq, error, nr_bytes, bidi_bytes))
return true;

Expand Down Expand Up @@ -2930,6 +2947,8 @@ EXPORT_SYMBOL(blk_end_request_all);
bool __blk_end_request(struct request *rq, blk_status_t error,
unsigned int nr_bytes)
{
lockdep_assert_held(rq->q->queue_lock);

return __blk_end_bidi_request(rq, error, nr_bytes, 0);
}
EXPORT_SYMBOL(__blk_end_request);
Expand All @@ -2947,6 +2966,8 @@ void __blk_end_request_all(struct request *rq, blk_status_t error)
bool pending;
unsigned int bidi_bytes = 0;

lockdep_assert_held(rq->q->queue_lock);

if (unlikely(blk_bidi_rq(rq)))
bidi_bytes = blk_rq_bytes(rq->next_rq);

Expand Down Expand Up @@ -3211,6 +3232,8 @@ static void queue_unplugged(struct request_queue *q, unsigned int depth,
bool from_schedule)
__releases(q->queue_lock)
{
lockdep_assert_held(q->queue_lock);

trace_block_unplug(q, depth, !from_schedule);

if (from_schedule)
Expand Down
8 changes: 5 additions & 3 deletions block/blk-flush.c
Original file line number Diff line number Diff line change
Expand Up @@ -346,6 +346,8 @@ static void flush_data_end_io(struct request *rq, blk_status_t error)
struct request_queue *q = rq->q;
struct blk_flush_queue *fq = blk_get_flush_queue(q, NULL);

lockdep_assert_held(q->queue_lock);

/*
* Updating q->in_flight[] here for making this tag usable
* early. Because in blk_queue_start_tag(),
Expand Down Expand Up @@ -411,9 +413,6 @@ static void mq_flush_data_end_io(struct request *rq, blk_status_t error)
* or __blk_mq_run_hw_queue() to dispatch request.
* @rq is being submitted. Analyze what needs to be done and put it on the
* right queue.
*
* CONTEXT:
* spin_lock_irq(q->queue_lock) in !mq case
*/
void blk_insert_flush(struct request *rq)
{
Expand All @@ -422,6 +421,9 @@ void blk_insert_flush(struct request *rq)
unsigned int policy = blk_flush_policy(fflags, rq);
struct blk_flush_queue *fq = blk_get_flush_queue(q, rq->mq_ctx);

if (!q->mq_ops)
lockdep_assert_held(q->queue_lock);

/*
* @policy now records what operations need to be done. Adjust
* REQ_PREFLUSH and FUA for the driver.
Expand Down
3 changes: 3 additions & 0 deletions block/blk-merge.c
Original file line number Diff line number Diff line change
Expand Up @@ -648,6 +648,9 @@ static void blk_account_io_merge(struct request *req)
static struct request *attempt_merge(struct request_queue *q,
struct request *req, struct request *next)
{
if (!q->mq_ops)
lockdep_assert_held(q->queue_lock);

if (!rq_mergeable(req) || !rq_mergeable(next))
return NULL;

Expand Down
15 changes: 6 additions & 9 deletions block/blk-tag.c
Original file line number Diff line number Diff line change
Expand Up @@ -258,15 +258,14 @@ EXPORT_SYMBOL(blk_queue_resize_tags);
* all transfers have been done for a request. It's important to call
* this function before end_that_request_last(), as that will put the
* request back on the free list thus corrupting the internal tag list.
*
* Notes:
* queue lock must be held.
**/
void blk_queue_end_tag(struct request_queue *q, struct request *rq)
{
struct blk_queue_tag *bqt = q->queue_tags;
unsigned tag = rq->tag; /* negative tags invalid */

lockdep_assert_held(q->queue_lock);

BUG_ON(tag >= bqt->real_max_depth);

list_del_init(&rq->queuelist);
Expand Down Expand Up @@ -307,16 +306,15 @@ EXPORT_SYMBOL(blk_queue_end_tag);
* calling this function. The request will also be removed from
* the request queue, so it's the drivers responsibility to readd
* it if it should need to be restarted for some reason.
*
* Notes:
* queue lock must be held.
**/
int blk_queue_start_tag(struct request_queue *q, struct request *rq)
{
struct blk_queue_tag *bqt = q->queue_tags;
unsigned max_depth;
int tag;

lockdep_assert_held(q->queue_lock);

if (unlikely((rq->rq_flags & RQF_QUEUED))) {
printk(KERN_ERR
"%s: request %p for device [%s] already tagged %d",
Expand Down Expand Up @@ -389,14 +387,13 @@ EXPORT_SYMBOL(blk_queue_start_tag);
* Hardware conditions may dictate a need to stop all pending requests.
* In this case, we will safely clear the block side of the tag queue and
* readd all requests to the request queue in the right order.
*
* Notes:
* queue lock must be held.
**/
void blk_queue_invalidate_tags(struct request_queue *q)
{
struct list_head *tmp, *n;

lockdep_assert_held(q->queue_lock);

list_for_each_safe(tmp, n, &q->tag_busy_list)
blk_requeue_request(q, list_entry_rq(tmp));
}
Expand Down
4 changes: 3 additions & 1 deletion block/blk-timeout.c
Original file line number Diff line number Diff line change
Expand Up @@ -189,13 +189,15 @@ unsigned long blk_rq_timeout(unsigned long timeout)
* Notes:
* Each request has its own timer, and as it is added to the queue, we
* set up the timer. When the request completes, we cancel the timer.
* Queue lock must be held for the non-mq case, mq case doesn't care.
*/
void blk_add_timer(struct request *req)
{
struct request_queue *q = req->q;
unsigned long expiry;

if (!q->mq_ops)
lockdep_assert_held(q->queue_lock);

/* blk-mq has its own handler, so we don't need ->rq_timed_out_fn */
if (!q->mq_ops && !q->rq_timed_out_fn)
return;
Expand Down

0 comments on commit 2fff8a9

Please sign in to comment.