Skip to content

Commit

Permalink
Merge tag 'for-5.16/block-2021-11-09' of git://git.kernel.dk/linux-block
Browse files Browse the repository at this point in the history
Pull block fixes from Jens Axboe:

 - Set of fixes for the batched tag allocation (Ming, me)

 - add_disk() error handling fix (Luis)

 - Nested queue quiesce fixes (Ming)

 - Shared tags init error handling fix (Ye)

 - Misc cleanups (Jean, Ming, me)

* tag 'for-5.16/block-2021-11-09' of git://git.kernel.dk/linux-block:
  nvme: wait until quiesce is done
  scsi: make sure that request queue queiesce and unquiesce balanced
  scsi: avoid to quiesce sdev->request_queue two times
  blk-mq: add one API for waiting until quiesce is done
  blk-mq: don't free tags if the tag_set is used by other device in queue initialztion
  block: fix device_add_disk() kobject_create_and_add() error handling
  block: ensure cached plug request matches the current queue
  block: move queue enter logic into blk_mq_submit_bio()
  block: make bio_queue_enter() fast-path available inline
  block: split request allocation components into helpers
  block: have plug stored requests hold references to the queue
  blk-mq: update hctx->nr_active in blk_mq_end_request_batch()
  blk-mq: add RQF_ELV debug entry
  blk-mq: only try to run plug merge if request has same queue with incoming bio
  block: move RQF_ELV setting into allocators
  dm: don't stop request queue after the dm device is suspended
  block: replace always false argument with 'false'
  block: assign correct tag before doing prefetch of request
  blk-mq: fix redundant check of !e expression
  • Loading branch information
torvalds committed Nov 9, 2021
2 parents 1dc1f92 + 26af1cd commit 3e28850
Show file tree
Hide file tree
Showing 13 changed files with 263 additions and 140 deletions.
61 changes: 21 additions & 40 deletions block/blk-core.c
Original file line number Diff line number Diff line change
Expand Up @@ -386,30 +386,6 @@ void blk_cleanup_queue(struct request_queue *q)
}
EXPORT_SYMBOL(blk_cleanup_queue);

static bool blk_try_enter_queue(struct request_queue *q, bool pm)
{
rcu_read_lock();
if (!percpu_ref_tryget_live_rcu(&q->q_usage_counter))
goto fail;

/*
* The code that increments the pm_only counter must ensure that the
* counter is globally visible before the queue is unfrozen.
*/
if (blk_queue_pm_only(q) &&
(!pm || queue_rpm_status(q) == RPM_SUSPENDED))
goto fail_put;

rcu_read_unlock();
return true;

fail_put:
blk_queue_exit(q);
fail:
rcu_read_unlock();
return false;
}

/**
* blk_queue_enter() - try to increase q->q_usage_counter
* @q: request queue pointer
Expand Down Expand Up @@ -442,10 +418,8 @@ int blk_queue_enter(struct request_queue *q, blk_mq_req_flags_t flags)
return 0;
}

static inline int bio_queue_enter(struct bio *bio)
int __bio_queue_enter(struct request_queue *q, struct bio *bio)
{
struct request_queue *q = bdev_get_queue(bio->bi_bdev);

while (!blk_try_enter_queue(q, false)) {
struct gendisk *disk = bio->bi_bdev->bd_disk;

Expand Down Expand Up @@ -742,7 +716,7 @@ static inline blk_status_t blk_check_zone_append(struct request_queue *q,
return BLK_STS_OK;
}

static noinline_for_stack bool submit_bio_checks(struct bio *bio)
noinline_for_stack bool submit_bio_checks(struct bio *bio)
{
struct block_device *bdev = bio->bi_bdev;
struct request_queue *q = bdev_get_queue(bdev);
Expand Down Expand Up @@ -860,22 +834,23 @@ static noinline_for_stack bool submit_bio_checks(struct bio *bio)
return false;
}

static void __submit_bio(struct bio *bio)
static void __submit_bio_fops(struct gendisk *disk, struct bio *bio)
{
struct gendisk *disk = bio->bi_bdev->bd_disk;

if (unlikely(bio_queue_enter(bio) != 0))
return;
if (submit_bio_checks(bio) && blk_crypto_bio_prep(&bio))
disk->fops->submit_bio(bio);
blk_queue_exit(disk->queue);
}

if (!submit_bio_checks(bio) || !blk_crypto_bio_prep(&bio))
goto queue_exit;
if (!disk->fops->submit_bio) {
static void __submit_bio(struct bio *bio)
{
struct gendisk *disk = bio->bi_bdev->bd_disk;

if (!disk->fops->submit_bio)
blk_mq_submit_bio(bio);
return;
}
disk->fops->submit_bio(bio);
queue_exit:
blk_queue_exit(disk->queue);
else
__submit_bio_fops(disk, bio);
}

/*
Expand Down Expand Up @@ -1615,7 +1590,13 @@ void blk_flush_plug(struct blk_plug *plug, bool from_schedule)
flush_plug_callbacks(plug, from_schedule);
if (!rq_list_empty(plug->mq_list))
blk_mq_flush_plug_list(plug, from_schedule);
if (unlikely(!from_schedule && plug->cached_rq))
/*
* Unconditionally flush out cached requests, even if the unplug
* event came from schedule. Since we know hold references to the
* queue for cached requests, we don't want a blocked task holding
* up a queue freeze/quiesce event.
*/
if (unlikely(!rq_list_empty(plug->cached_rq)))
blk_mq_free_plug_rqs(plug);
}

Expand Down
6 changes: 4 additions & 2 deletions block/blk-merge.c
Original file line number Diff line number Diff line change
Expand Up @@ -1101,9 +1101,11 @@ bool blk_attempt_plug_merge(struct request_queue *q, struct bio *bio,
* the same queue, there should be only one such rq in a queue
*/
*same_queue_rq = true;

if (blk_attempt_bio_merge(q, rq, bio, nr_segs, false) ==
BIO_MERGE_OK)
return true;
}
if (blk_attempt_bio_merge(q, rq, bio, nr_segs, false) == BIO_MERGE_OK)
return true;
return false;
}

Expand Down
1 change: 1 addition & 0 deletions block/blk-mq-debugfs.c
Original file line number Diff line number Diff line change
Expand Up @@ -308,6 +308,7 @@ static const char *const rqf_name[] = {
RQF_NAME(SPECIAL_PAYLOAD),
RQF_NAME(ZONE_WRITE_LOCKED),
RQF_NAME(MQ_POLL_SLEPT),
RQF_NAME(ELV),
};
#undef RQF_NAME

Expand Down
15 changes: 11 additions & 4 deletions block/blk-mq-sched.c
Original file line number Diff line number Diff line change
Expand Up @@ -370,15 +370,20 @@ bool blk_mq_sched_bio_merge(struct request_queue *q, struct bio *bio,
bool ret = false;
enum hctx_type type;

if (e && e->type->ops.bio_merge)
return e->type->ops.bio_merge(q, bio, nr_segs);
if (bio_queue_enter(bio))
return false;

if (e && e->type->ops.bio_merge) {
ret = e->type->ops.bio_merge(q, bio, nr_segs);
goto out_put;
}

ctx = blk_mq_get_ctx(q);
hctx = blk_mq_map_queue(q, bio->bi_opf, ctx);
type = hctx->type;
if (!(hctx->flags & BLK_MQ_F_SHOULD_MERGE) ||
list_empty_careful(&ctx->rq_lists[type]))
return false;
goto out_put;

/* default per sw-queue merge */
spin_lock(&ctx->lock);
Expand All @@ -391,6 +396,8 @@ bool blk_mq_sched_bio_merge(struct request_queue *q, struct bio *bio,
ret = true;

spin_unlock(&ctx->lock);
out_put:
blk_queue_exit(q);
return ret;
}

Expand Down Expand Up @@ -497,7 +504,7 @@ void blk_mq_sched_insert_requests(struct blk_mq_hw_ctx *hctx,
* busy in case of 'none' scheduler, and this way may save
* us one extra enqueue & dequeue to sw queue.
*/
if (!hctx->dispatch_busy && !e && !run_queue_async) {
if (!hctx->dispatch_busy && !run_queue_async) {
blk_mq_try_issue_list_directly(hctx, list);
if (list_empty(list))
goto out;
Expand Down
Loading

0 comments on commit 3e28850

Please sign in to comment.