Skip to content

Commit

Permalink
block: allocate io_context upfront
Browse files Browse the repository at this point in the history
Block layer very lazy allocation of ioc.  It waits until the moment
ioc is absolutely necessary; unfortunately, that time could be inside
queue lock and __get_request() performs unlock - try alloc - retry
dancing.

Just allocate it up-front on entry to block layer.  We're not saving
the rain forest by deferring it to the last possible moment and
complicating things unnecessarily.

This patch is to prepare for further updates to request allocation
path.

Signed-off-by: Tejun Heo <[email protected]>
Acked-by: Vivek Goyal <[email protected]>
Signed-off-by: Jens Axboe <[email protected]>
  • Loading branch information
htejun authored and axboe committed Jun 25, 2012
1 parent a06e05e commit 7f4b35d
Show file tree
Hide file tree
Showing 2 changed files with 15 additions and 30 deletions.
42 changes: 15 additions & 27 deletions block/blk-core.c
Original file line number Diff line number Diff line change
Expand Up @@ -855,15 +855,11 @@ static struct request *__get_request(struct request_queue *q, int rw_flags,
{
struct request *rq;
struct request_list *rl = &q->rq;
struct elevator_type *et;
struct io_context *ioc;
struct elevator_type *et = q->elevator->type;
struct io_context *ioc = rq_ioc(bio);
struct io_cq *icq = NULL;
const bool is_sync = rw_is_sync(rw_flags) != 0;
bool retried = false;
int may_queue;
retry:
et = q->elevator->type;
ioc = rq_ioc(bio);

if (unlikely(blk_queue_dead(q)))
return NULL;
Expand All @@ -874,20 +870,6 @@ static struct request *__get_request(struct request_queue *q, int rw_flags,

if (rl->count[is_sync]+1 >= queue_congestion_on_threshold(q)) {
if (rl->count[is_sync]+1 >= q->nr_requests) {
/*
* We want ioc to record batching state. If it's
* not already there, creating a new one requires
* dropping queue_lock, which in turn requires
* retesting conditions to avoid queue hang.
*/
if (!ioc && !retried) {
spin_unlock_irq(q->queue_lock);
create_io_context(gfp_mask, q->node);
spin_lock_irq(q->queue_lock);
retried = true;
goto retry;
}

/*
* The queue will fill after this allocation, so set
* it as full, and mark this process as "batching".
Expand Down Expand Up @@ -955,12 +937,8 @@ static struct request *__get_request(struct request_queue *q, int rw_flags,
/* init elvpriv */
if (rw_flags & REQ_ELVPRIV) {
if (unlikely(et->icq_cache && !icq)) {
create_io_context(gfp_mask, q->node);
ioc = rq_ioc(bio);
if (!ioc)
goto fail_elvpriv;

icq = ioc_create_icq(ioc, q, gfp_mask);
if (ioc)
icq = ioc_create_icq(ioc, q, gfp_mask);
if (!icq)
goto fail_elvpriv;
}
Expand Down Expand Up @@ -1071,7 +1049,6 @@ static struct request *get_request(struct request_queue *q, int rw_flags,
* to allocate at least one request, and up to a big batch of them
* for a small period time. See ioc_batching, ioc_set_batching
*/
create_io_context(GFP_NOIO, q->node);
ioc_set_batching(q, current->io_context);

spin_lock_irq(q->queue_lock);
Expand All @@ -1086,6 +1063,9 @@ struct request *blk_get_request(struct request_queue *q, int rw, gfp_t gfp_mask)

BUG_ON(rw != READ && rw != WRITE);

/* create ioc upfront */
create_io_context(gfp_mask, q->node);

spin_lock_irq(q->queue_lock);
rq = get_request(q, rw, NULL, gfp_mask);
if (!rq)
Expand Down Expand Up @@ -1698,6 +1678,14 @@ generic_make_request_checks(struct bio *bio)
goto end_io;
}

/*
* Various block parts want %current->io_context and lazy ioc
* allocation ends up trading a lot of pain for a small amount of
* memory. Just allocate it upfront. This may fail and block
* layer knows how to live with it.
*/
create_io_context(GFP_ATOMIC, q->node);

if (blk_throtl_bio(q, bio))
return false; /* throttled, will be resubmitted later */

Expand Down
3 changes: 0 additions & 3 deletions block/blk-throttle.c
Original file line number Diff line number Diff line change
Expand Up @@ -1123,9 +1123,6 @@ bool blk_throtl_bio(struct request_queue *q, struct bio *bio)
goto out;
}

/* bio_associate_current() needs ioc, try creating */
create_io_context(GFP_ATOMIC, q->node);

/*
* A throtl_grp pointer retrieved under rcu can be used to access
* basic fields like stats and io rates. If a group has no rules,
Expand Down

0 comments on commit 7f4b35d

Please sign in to comment.