Skip to content

Commit

Permalink
block: make bio_queue_enter() fast-path available inline
Browse files Browse the repository at this point in the history
Just a prep patch for shifting the queue enter logic. This moves the
expected fast path inline, and leaves __bio_queue_enter() as an
out-of-line function call. We don't want to inline the latter, as it's
mostly slow path code.

Reviewed-by: Christoph Hellwig <[email protected]>
Signed-off-by: Jens Axboe <[email protected]>
  • Loading branch information
axboe committed Nov 4, 2021
1 parent 7153971 commit c98cb5b
Show file tree
Hide file tree
Showing 2 changed files with 35 additions and 27 deletions.
28 changes: 1 addition & 27 deletions block/blk-core.c
Original file line number Diff line number Diff line change
Expand Up @@ -386,30 +386,6 @@ void blk_cleanup_queue(struct request_queue *q)
}
EXPORT_SYMBOL(blk_cleanup_queue);

static bool blk_try_enter_queue(struct request_queue *q, bool pm)
{
rcu_read_lock();
if (!percpu_ref_tryget_live_rcu(&q->q_usage_counter))
goto fail;

/*
* The code that increments the pm_only counter must ensure that the
* counter is globally visible before the queue is unfrozen.
*/
if (blk_queue_pm_only(q) &&
(!pm || queue_rpm_status(q) == RPM_SUSPENDED))
goto fail_put;

rcu_read_unlock();
return true;

fail_put:
blk_queue_exit(q);
fail:
rcu_read_unlock();
return false;
}

/**
* blk_queue_enter() - try to increase q->q_usage_counter
* @q: request queue pointer
Expand Down Expand Up @@ -442,10 +418,8 @@ int blk_queue_enter(struct request_queue *q, blk_mq_req_flags_t flags)
return 0;
}

static inline int bio_queue_enter(struct bio *bio)
int __bio_queue_enter(struct request_queue *q, struct bio *bio)
{
struct request_queue *q = bdev_get_queue(bio->bi_bdev);

while (!blk_try_enter_queue(q, false)) {
struct gendisk *disk = bio->bi_bdev->bd_disk;

Expand Down
34 changes: 34 additions & 0 deletions block/blk.h
Original file line number Diff line number Diff line change
Expand Up @@ -55,6 +55,40 @@ void blk_free_flush_queue(struct blk_flush_queue *q);
void blk_freeze_queue(struct request_queue *q);
void __blk_mq_unfreeze_queue(struct request_queue *q, bool force_atomic);
void blk_queue_start_drain(struct request_queue *q);
int __bio_queue_enter(struct request_queue *q, struct bio *bio);

static inline bool blk_try_enter_queue(struct request_queue *q, bool pm)
{
rcu_read_lock();
if (!percpu_ref_tryget_live_rcu(&q->q_usage_counter))
goto fail;

/*
* The code that increments the pm_only counter must ensure that the
* counter is globally visible before the queue is unfrozen.
*/
if (blk_queue_pm_only(q) &&
(!pm || queue_rpm_status(q) == RPM_SUSPENDED))
goto fail_put;

rcu_read_unlock();
return true;

fail_put:
blk_queue_exit(q);
fail:
rcu_read_unlock();
return false;
}

static inline int bio_queue_enter(struct bio *bio)
{
struct request_queue *q = bdev_get_queue(bio->bi_bdev);

if (blk_try_enter_queue(q, false))
return 0;
return __bio_queue_enter(q, bio);
}

#define BIO_INLINE_VECS 4
struct bio_vec *bvec_alloc(mempool_t *pool, unsigned short *nr_vecs,
Expand Down

0 comments on commit c98cb5b

Please sign in to comment.