Skip to content

Commit

Permalink
blk-mq: Convert to new hotplug state machine
Browse files Browse the repository at this point in the history
Install the callbacks via the state machine so we can phase out the cpu
hotplug notifiers mess.

Signed-off-by: Sebastian Andrzej Siewior <[email protected]>
Signed-off-by: Thomas Gleixner <[email protected]>
Cc: Peter Zijlstra <[email protected]>
Cc: [email protected]
Cc: [email protected]
Cc: Christoph Hellwing <[email protected]>
Link: http://lkml.kernel.org/r/[email protected]
Signed-off-by: Thomas Gleixner <[email protected]>
Signed-off-by: Jens Axboe <[email protected]>
  • Loading branch information
Sebastian Andrzej Siewior authored and axboe committed Sep 22, 2016
1 parent 9467f85 commit 65d5291
Showing 1 changed file with 43 additions and 44 deletions.
87 changes: 43 additions & 44 deletions block/blk-mq.c
Original file line number Diff line number Diff line change
Expand Up @@ -2116,50 +2116,18 @@ static void blk_mq_queue_reinit(struct request_queue *q,
blk_mq_sysfs_register(q);
}

static int blk_mq_queue_reinit_notify(struct notifier_block *nb,
unsigned long action, void *hcpu)
/*
* New online cpumask which is going to be set in this hotplug event.
* Declare this cpumasks as global as cpu-hotplug operation is invoked
* one-by-one and dynamically allocating this could result in a failure.
*/
static struct cpumask cpuhp_online_new;

static void blk_mq_queue_reinit_work(void)
{
struct request_queue *q;
int cpu = (unsigned long)hcpu;
/*
* New online cpumask which is going to be set in this hotplug event.
* Declare this cpumasks as global as cpu-hotplug operation is invoked
* one-by-one and dynamically allocating this could result in a failure.
*/
static struct cpumask online_new;

/*
* Before hotadded cpu starts handling requests, new mappings must
* be established. Otherwise, these requests in hw queue might
* never be dispatched.
*
* For example, there is a single hw queue (hctx) and two CPU queues
* (ctx0 for CPU0, and ctx1 for CPU1).
*
* Now CPU1 is just onlined and a request is inserted into
* ctx1->rq_list and set bit0 in pending bitmap as ctx1->index_hw is
* still zero.
*
* And then while running hw queue, flush_busy_ctxs() finds bit0 is
* set in pending bitmap and tries to retrieve requests in
* hctx->ctxs[0]->rq_list. But htx->ctxs[0] is a pointer to ctx0,
* so the request in ctx1->rq_list is ignored.
*/
switch (action & ~CPU_TASKS_FROZEN) {
case CPU_DEAD:
case CPU_UP_CANCELED:
cpumask_copy(&online_new, cpu_online_mask);
break;
case CPU_UP_PREPARE:
cpumask_copy(&online_new, cpu_online_mask);
cpumask_set_cpu(cpu, &online_new);
break;
default:
return NOTIFY_OK;
}

mutex_lock(&all_q_mutex);

/*
* We need to freeze and reinit all existing queues. Freezing
* involves synchronous wait for an RCU grace period and doing it
Expand All @@ -2180,13 +2148,43 @@ static int blk_mq_queue_reinit_notify(struct notifier_block *nb,
}

list_for_each_entry(q, &all_q_list, all_q_node)
blk_mq_queue_reinit(q, &online_new);
blk_mq_queue_reinit(q, &cpuhp_online_new);

list_for_each_entry(q, &all_q_list, all_q_node)
blk_mq_unfreeze_queue(q);

mutex_unlock(&all_q_mutex);
return NOTIFY_OK;
}

static int blk_mq_queue_reinit_dead(unsigned int cpu)
{
cpumask_clear_cpu(cpu, &cpuhp_online_new);
blk_mq_queue_reinit_work();
return 0;
}

/*
* Before hotadded cpu starts handling requests, new mappings must be
* established. Otherwise, these requests in hw queue might never be
* dispatched.
*
* For example, there is a single hw queue (hctx) and two CPU queues (ctx0
* for CPU0, and ctx1 for CPU1).
*
* Now CPU1 is just onlined and a request is inserted into ctx1->rq_list
* and set bit0 in pending bitmap as ctx1->index_hw is still zero.
*
* And then while running hw queue, flush_busy_ctxs() finds bit0 is set in
* pending bitmap and tries to retrieve requests in hctx->ctxs[0]->rq_list.
* But htx->ctxs[0] is a pointer to ctx0, so the request in ctx1->rq_list
* is ignored.
*/
static int blk_mq_queue_reinit_prepare(unsigned int cpu)
{
cpumask_copy(&cpuhp_online_new, cpu_online_mask);
cpumask_set_cpu(cpu, &cpuhp_online_new);
blk_mq_queue_reinit_work();
return 0;
}

static int __blk_mq_alloc_rq_maps(struct blk_mq_tag_set *set)
Expand Down Expand Up @@ -2391,8 +2389,9 @@ static int __init blk_mq_init(void)
cpuhp_setup_state_multi(CPUHP_BLK_MQ_DEAD, "block/mq:dead", NULL,
blk_mq_hctx_notify_dead);

hotcpu_notifier(blk_mq_queue_reinit_notify, 0);

cpuhp_setup_state_nocalls(CPUHP_BLK_MQ_PREPARE, "block/mq:prepare",
blk_mq_queue_reinit_prepare,
blk_mq_queue_reinit_dead);
return 0;
}
subsys_initcall(blk_mq_init);

0 comments on commit 65d5291

Please sign in to comment.