Skip to content

Commit

Permalink
Merge branch 'for-linus' of git://git.kernel.dk/linux-2.6-block
Browse files Browse the repository at this point in the history
* 'for-linus' of git://git.kernel.dk/linux-2.6-block:
  block: add blk_run_queue_async
  block: blk_delay_queue() should use kblockd workqueue
  md: fix up raid1/raid10 unplugging.
  md: incorporate new plugging into raid5.
  md: provide generic support for handling unplug callbacks.
  md - remove old plugging code.
  md/dm - remove remains of plug_fn callback.
  md: use new plugging interface for RAID IO.
  block: drop queue lock before calling __blk_run_queue() for kblockd punt
  Revert "block: add callback function for unplug notification"
  block: Enhance new plugging support to support general callbacks
  • Loading branch information
torvalds committed Apr 18, 2011
2 parents 5d5b1b9 + 24ecfbe commit 8a83f33
Show file tree
Hide file tree
Showing 18 changed files with 186 additions and 187 deletions.
83 changes: 65 additions & 18 deletions block/blk-core.c
Original file line number Diff line number Diff line change
Expand Up @@ -204,7 +204,7 @@ static void blk_delay_work(struct work_struct *work)

q = container_of(work, struct request_queue, delay_work.work);
spin_lock_irq(q->queue_lock);
__blk_run_queue(q, false);
__blk_run_queue(q);
spin_unlock_irq(q->queue_lock);
}

Expand All @@ -220,7 +220,8 @@ static void blk_delay_work(struct work_struct *work)
*/
void blk_delay_queue(struct request_queue *q, unsigned long msecs)
{
schedule_delayed_work(&q->delay_work, msecs_to_jiffies(msecs));
queue_delayed_work(kblockd_workqueue, &q->delay_work,
msecs_to_jiffies(msecs));
}
EXPORT_SYMBOL(blk_delay_queue);

Expand All @@ -238,7 +239,7 @@ void blk_start_queue(struct request_queue *q)
WARN_ON(!irqs_disabled());

queue_flag_clear(QUEUE_FLAG_STOPPED, q);
__blk_run_queue(q, false);
__blk_run_queue(q);
}
EXPORT_SYMBOL(blk_start_queue);

Expand Down Expand Up @@ -296,9 +297,8 @@ EXPORT_SYMBOL(blk_sync_queue);
* Description:
* See @blk_run_queue. This variant must be called with the queue lock
* held and interrupts disabled.
*
*/
void __blk_run_queue(struct request_queue *q, bool force_kblockd)
void __blk_run_queue(struct request_queue *q)
{
if (unlikely(blk_queue_stopped(q)))
return;
Expand All @@ -307,14 +307,28 @@ void __blk_run_queue(struct request_queue *q, bool force_kblockd)
* Only recurse once to avoid overrunning the stack, let the unplug
* handling reinvoke the handler shortly if we already got there.
*/
if (!force_kblockd && !queue_flag_test_and_set(QUEUE_FLAG_REENTER, q)) {
if (!queue_flag_test_and_set(QUEUE_FLAG_REENTER, q)) {
q->request_fn(q);
queue_flag_clear(QUEUE_FLAG_REENTER, q);
} else
queue_delayed_work(kblockd_workqueue, &q->delay_work, 0);
}
EXPORT_SYMBOL(__blk_run_queue);

/**
* blk_run_queue_async - run a single device queue in workqueue context
* @q: The queue to run
*
* Description:
* Tells kblockd to perform the equivalent of @blk_run_queue on behalf
* of us.
*/
void blk_run_queue_async(struct request_queue *q)
{
if (likely(!blk_queue_stopped(q)))
queue_delayed_work(kblockd_workqueue, &q->delay_work, 0);
}

/**
* blk_run_queue - run a single device queue
* @q: The queue to run
Expand All @@ -328,7 +342,7 @@ void blk_run_queue(struct request_queue *q)
unsigned long flags;

spin_lock_irqsave(q->queue_lock, flags);
__blk_run_queue(q, false);
__blk_run_queue(q);
spin_unlock_irqrestore(q->queue_lock, flags);
}
EXPORT_SYMBOL(blk_run_queue);
Expand Down Expand Up @@ -977,7 +991,7 @@ void blk_insert_request(struct request_queue *q, struct request *rq,
blk_queue_end_tag(q, rq);

add_acct_request(q, rq, where);
__blk_run_queue(q, false);
__blk_run_queue(q);
spin_unlock_irqrestore(q->queue_lock, flags);
}
EXPORT_SYMBOL(blk_insert_request);
Expand Down Expand Up @@ -1321,7 +1335,7 @@ static int __make_request(struct request_queue *q, struct bio *bio)
} else {
spin_lock_irq(q->queue_lock);
add_acct_request(q, req, where);
__blk_run_queue(q, false);
__blk_run_queue(q);
out_unlock:
spin_unlock_irq(q->queue_lock);
}
Expand Down Expand Up @@ -2638,6 +2652,7 @@ void blk_start_plug(struct blk_plug *plug)

plug->magic = PLUG_MAGIC;
INIT_LIST_HEAD(&plug->list);
INIT_LIST_HEAD(&plug->cb_list);
plug->should_sort = 0;

/*
Expand Down Expand Up @@ -2670,12 +2685,41 @@ static int plug_rq_cmp(void *priv, struct list_head *a, struct list_head *b)
*/
static void queue_unplugged(struct request_queue *q, unsigned int depth,
bool from_schedule)
__releases(q->queue_lock)
{
trace_block_unplug(q, depth, !from_schedule);
__blk_run_queue(q, from_schedule);

if (q->unplugged_fn)
q->unplugged_fn(q);
/*
* If we are punting this to kblockd, then we can safely drop
* the queue_lock before waking kblockd (which needs to take
* this lock).
*/
if (from_schedule) {
spin_unlock(q->queue_lock);
blk_run_queue_async(q);
} else {
__blk_run_queue(q);
spin_unlock(q->queue_lock);
}

}

static void flush_plug_callbacks(struct blk_plug *plug)
{
LIST_HEAD(callbacks);

if (list_empty(&plug->cb_list))
return;

list_splice_init(&plug->cb_list, &callbacks);

while (!list_empty(&callbacks)) {
struct blk_plug_cb *cb = list_first_entry(&callbacks,
struct blk_plug_cb,
list);
list_del(&cb->list);
cb->callback(cb);
}
}

void blk_flush_plug_list(struct blk_plug *plug, bool from_schedule)
Expand All @@ -2688,6 +2732,7 @@ void blk_flush_plug_list(struct blk_plug *plug, bool from_schedule)

BUG_ON(plug->magic != PLUG_MAGIC);

flush_plug_callbacks(plug);
if (list_empty(&plug->list))
return;

Expand All @@ -2712,10 +2757,11 @@ void blk_flush_plug_list(struct blk_plug *plug, bool from_schedule)
BUG_ON(!(rq->cmd_flags & REQ_ON_PLUG));
BUG_ON(!rq->q);
if (rq->q != q) {
if (q) {
/*
* This drops the queue lock
*/
if (q)
queue_unplugged(q, depth, from_schedule);
spin_unlock(q->queue_lock);
}
q = rq->q;
depth = 0;
spin_lock(q->queue_lock);
Expand All @@ -2733,10 +2779,11 @@ void blk_flush_plug_list(struct blk_plug *plug, bool from_schedule)
depth++;
}

if (q) {
/*
* This drops the queue lock
*/
if (q)
queue_unplugged(q, depth, from_schedule);
spin_unlock(q->queue_lock);
}

local_irq_restore(flags);
}
Expand Down
2 changes: 1 addition & 1 deletion block/blk-exec.c
Original file line number Diff line number Diff line change
Expand Up @@ -55,7 +55,7 @@ void blk_execute_rq_nowait(struct request_queue *q, struct gendisk *bd_disk,
WARN_ON(irqs_disabled());
spin_lock_irq(q->queue_lock);
__elv_add_request(q, rq, where);
__blk_run_queue(q, false);
__blk_run_queue(q);
/* the queue is stopped so it won't be plugged+unplugged */
if (rq->cmd_type == REQ_TYPE_PM_RESUME)
q->request_fn(q);
Expand Down
4 changes: 2 additions & 2 deletions block/blk-flush.c
Original file line number Diff line number Diff line change
Expand Up @@ -218,7 +218,7 @@ static void flush_end_io(struct request *flush_rq, int error)
* request_fn may confuse the driver. Always use kblockd.
*/
if (queued)
__blk_run_queue(q, true);
blk_run_queue_async(q);
}

/**
Expand Down Expand Up @@ -274,7 +274,7 @@ static void flush_data_end_io(struct request *rq, int error)
* the comment in flush_end_io().
*/
if (blk_flush_complete_seq(rq, REQ_FSEQ_DATA, error))
__blk_run_queue(q, true);
blk_run_queue_async(q);
}

/**
Expand Down
16 changes: 0 additions & 16 deletions block/blk-settings.c
Original file line number Diff line number Diff line change
Expand Up @@ -790,22 +790,6 @@ void blk_queue_flush(struct request_queue *q, unsigned int flush)
}
EXPORT_SYMBOL_GPL(blk_queue_flush);

/**
* blk_queue_unplugged - register a callback for an unplug event
* @q: the request queue for the device
* @fn: the function to call
*
* Some stacked drivers may need to know when IO is dispatched on an
* unplug event. By registrering a callback here, they will be notified
* when someone flushes their on-stack queue plug. The function will be
* called with the queue lock held.
*/
void blk_queue_unplugged(struct request_queue *q, unplugged_fn *fn)
{
q->unplugged_fn = fn;
}
EXPORT_SYMBOL(blk_queue_unplugged);

static int __init blk_settings_init(void)
{
blk_max_low_pfn = max_low_pfn - 1;
Expand Down
1 change: 1 addition & 0 deletions block/blk.h
Original file line number Diff line number Diff line change
Expand Up @@ -22,6 +22,7 @@ void blk_rq_timed_out_timer(unsigned long data);
void blk_delete_timer(struct request *);
void blk_add_timer(struct request *);
void __generic_unplug_device(struct request_queue *);
void blk_run_queue_async(struct request_queue *q);

/*
* Internal atomic flags for request handling
Expand Down
6 changes: 3 additions & 3 deletions block/cfq-iosched.c
Original file line number Diff line number Diff line change
Expand Up @@ -3368,7 +3368,7 @@ cfq_rq_enqueued(struct cfq_data *cfqd, struct cfq_queue *cfqq,
cfqd->busy_queues > 1) {
cfq_del_timer(cfqd, cfqq);
cfq_clear_cfqq_wait_request(cfqq);
__blk_run_queue(cfqd->queue, false);
__blk_run_queue(cfqd->queue);
} else {
cfq_blkiocg_update_idle_time_stats(
&cfqq->cfqg->blkg);
Expand All @@ -3383,7 +3383,7 @@ cfq_rq_enqueued(struct cfq_data *cfqd, struct cfq_queue *cfqq,
* this new queue is RT and the current one is BE
*/
cfq_preempt_queue(cfqd, cfqq);
__blk_run_queue(cfqd->queue, false);
__blk_run_queue(cfqd->queue);
}
}

Expand Down Expand Up @@ -3743,7 +3743,7 @@ static void cfq_kick_queue(struct work_struct *work)
struct request_queue *q = cfqd->queue;

spin_lock_irq(q->queue_lock);
__blk_run_queue(cfqd->queue, false);
__blk_run_queue(cfqd->queue);
spin_unlock_irq(q->queue_lock);
}

Expand Down
4 changes: 2 additions & 2 deletions block/elevator.c
Original file line number Diff line number Diff line change
Expand Up @@ -642,7 +642,7 @@ void elv_quiesce_start(struct request_queue *q)
*/
elv_drain_elevator(q);
while (q->rq.elvpriv) {
__blk_run_queue(q, false);
__blk_run_queue(q);
spin_unlock_irq(q->queue_lock);
msleep(10);
spin_lock_irq(q->queue_lock);
Expand Down Expand Up @@ -695,7 +695,7 @@ void __elv_add_request(struct request_queue *q, struct request *rq, int where)
* with anything. There's no point in delaying queue
* processing.
*/
__blk_run_queue(q, false);
__blk_run_queue(q);
break;

case ELEVATOR_INSERT_SORT_MERGE:
Expand Down
8 changes: 0 additions & 8 deletions drivers/md/dm-raid.c
Original file line number Diff line number Diff line change
Expand Up @@ -390,13 +390,6 @@ static int raid_is_congested(struct dm_target_callbacks *cb, int bits)
return md_raid5_congested(&rs->md, bits);
}

static void raid_unplug(struct dm_target_callbacks *cb)
{
struct raid_set *rs = container_of(cb, struct raid_set, callbacks);

md_raid5_kick_device(rs->md.private);
}

/*
* Construct a RAID4/5/6 mapping:
* Args:
Expand Down Expand Up @@ -487,7 +480,6 @@ static int raid_ctr(struct dm_target *ti, unsigned argc, char **argv)
}

rs->callbacks.congested_fn = raid_is_congested;
rs->callbacks.unplug_fn = raid_unplug;
dm_table_add_target_callbacks(ti->table, &rs->callbacks);

return 0;
Expand Down
Loading

0 comments on commit 8a83f33

Please sign in to comment.