Skip to content

Commit

Permalink
block: Make blk_get_request() block for non-PM requests while suspended
Browse files Browse the repository at this point in the history
Instead of allowing requests that are not power management requests
to enter the queue in runtime suspended status (RPM_SUSPENDED), make
the blk_get_request() caller block. This change fixes a starvation
issue: it is now guaranteed that power management requests will be
executed no matter how many blk_get_request() callers are waiting.
For blk-mq, instead of maintaining the q->nr_pending counter, rely
on q->q_usage_counter. Call pm_runtime_mark_last_busy() every time a
request finishes instead of only if the queue depth drops to zero.

Signed-off-by: Bart Van Assche <[email protected]>
Reviewed-by: Ming Lei <[email protected]>
Reviewed-by: Christoph Hellwig <[email protected]>
Cc: Jianchao Wang <[email protected]>
Cc: Hannes Reinecke <[email protected]>
Cc: Johannes Thumshirn <[email protected]>
Cc: Alan Stern <[email protected]>
Signed-off-by: Jens Axboe <[email protected]>
  • Loading branch information
bvanassche authored and axboe committed Sep 26, 2018
1 parent bdd6316 commit 7cedffe
Show file tree
Hide file tree
Showing 2 changed files with 47 additions and 34 deletions.
37 changes: 8 additions & 29 deletions block/blk-core.c
Original file line number Diff line number Diff line change
Expand Up @@ -2746,30 +2746,6 @@ void blk_account_io_done(struct request *req, u64 now)
}
}

#ifdef CONFIG_PM
/*
* Don't process normal requests when queue is suspended
* or in the process of suspending/resuming
*/
static bool blk_pm_allow_request(struct request *rq)
{
switch (rq->q->rpm_status) {
case RPM_RESUMING:
case RPM_SUSPENDING:
return rq->rq_flags & RQF_PM;
case RPM_SUSPENDED:
return false;
default:
return true;
}
}
#else
static bool blk_pm_allow_request(struct request *rq)
{
return true;
}
#endif

void blk_account_io_start(struct request *rq, bool new_io)
{
struct hd_struct *part;
Expand Down Expand Up @@ -2815,11 +2791,14 @@ static struct request *elv_next_request(struct request_queue *q)

while (1) {
list_for_each_entry(rq, &q->queue_head, queuelist) {
if (blk_pm_allow_request(rq))
return rq;

if (rq->rq_flags & RQF_SOFTBARRIER)
break;
#ifdef CONFIG_PM
/*
* If a request gets queued in state RPM_SUSPENDED
* then that's a kernel bug.
*/
WARN_ON_ONCE(q->rpm_status == RPM_SUSPENDED);
#endif
return rq;
}

/*
Expand Down
44 changes: 39 additions & 5 deletions block/blk-pm.c
Original file line number Diff line number Diff line change
@@ -1,8 +1,11 @@
// SPDX-License-Identifier: GPL-2.0

#include <linux/blk-mq.h>
#include <linux/blk-pm.h>
#include <linux/blkdev.h>
#include <linux/pm_runtime.h>
#include "blk-mq.h"
#include "blk-mq-tag.h"

/**
* blk_pm_runtime_init - Block layer runtime PM initialization routine
Expand Down Expand Up @@ -68,14 +71,40 @@ int blk_pre_runtime_suspend(struct request_queue *q)
if (!q->dev)
return ret;

WARN_ON_ONCE(q->rpm_status != RPM_ACTIVE);

/*
* Increase the pm_only counter before checking whether any
* non-PM blk_queue_enter() calls are in progress to avoid that any
* new non-PM blk_queue_enter() calls succeed before the pm_only
* counter is decreased again.
*/
blk_set_pm_only(q);
ret = -EBUSY;
/* Switch q_usage_counter from per-cpu to atomic mode. */
blk_freeze_queue_start(q);
/*
* Wait until atomic mode has been reached. Since that
* involves calling call_rcu(), it is guaranteed that later
* blk_queue_enter() calls see the pm-only state. See also
* http://lwn.net/Articles/573497/.
*/
percpu_ref_switch_to_atomic_sync(&q->q_usage_counter);
if (percpu_ref_is_zero(&q->q_usage_counter))
ret = 0;
/* Switch q_usage_counter back to per-cpu mode. */
blk_mq_unfreeze_queue(q);

spin_lock_irq(q->queue_lock);
if (q->nr_pending) {
ret = -EBUSY;
if (ret < 0)
pm_runtime_mark_last_busy(q->dev);
} else {
else
q->rpm_status = RPM_SUSPENDING;
}
spin_unlock_irq(q->queue_lock);

if (ret)
blk_clear_pm_only(q);

return ret;
}
EXPORT_SYMBOL(blk_pre_runtime_suspend);
Expand Down Expand Up @@ -106,6 +135,9 @@ void blk_post_runtime_suspend(struct request_queue *q, int err)
pm_runtime_mark_last_busy(q->dev);
}
spin_unlock_irq(q->queue_lock);

if (err)
blk_clear_pm_only(q);
}
EXPORT_SYMBOL(blk_post_runtime_suspend);

Expand Down Expand Up @@ -153,13 +185,15 @@ void blk_post_runtime_resume(struct request_queue *q, int err)
spin_lock_irq(q->queue_lock);
if (!err) {
q->rpm_status = RPM_ACTIVE;
__blk_run_queue(q);
pm_runtime_mark_last_busy(q->dev);
pm_request_autosuspend(q->dev);
} else {
q->rpm_status = RPM_SUSPENDED;
}
spin_unlock_irq(q->queue_lock);

if (!err)
blk_clear_pm_only(q);
}
EXPORT_SYMBOL(blk_post_runtime_resume);

Expand Down

0 comments on commit 7cedffe

Please sign in to comment.