Skip to content

Commit

Permalink
[PATCH] Propagate down request sync flag
Browse files Browse the repository at this point in the history
We need to do this, otherwise the io schedulers don't get access to the
sync flag. Then they cannot tell the difference between a regular write
and an O_DIRECT write, which can cause a performance loss.

Signed-off-by: Jens Axboe <[email protected]>
  • Loading branch information
Jens Axboe committed Dec 13, 2006
1 parent 445722f commit 7749a8d
Show file tree
Hide file tree
Showing 2 changed files with 32 additions and 14 deletions.
18 changes: 12 additions & 6 deletions block/cfq-iosched.c
Original file line number Diff line number Diff line change
Expand Up @@ -219,9 +219,12 @@ static int cfq_queue_empty(request_queue_t *q)
return !cfqd->busy_queues;
}

static inline pid_t cfq_queue_pid(struct task_struct *task, int rw)
static inline pid_t cfq_queue_pid(struct task_struct *task, int rw, int is_sync)
{
if (rw == READ || rw == WRITE_SYNC)
/*
* Use the per-process queue, for read requests and syncronous writes
*/
if (!(rw & REQ_RW) || is_sync)
return task->pid;

return CFQ_KEY_ASYNC;
Expand Down Expand Up @@ -473,7 +476,7 @@ static struct request *
cfq_find_rq_fmerge(struct cfq_data *cfqd, struct bio *bio)
{
struct task_struct *tsk = current;
pid_t key = cfq_queue_pid(tsk, bio_data_dir(bio));
pid_t key = cfq_queue_pid(tsk, bio_data_dir(bio), bio_sync(bio));
struct cfq_queue *cfqq;

cfqq = cfq_find_cfq_hash(cfqd, key, tsk->ioprio);
Expand Down Expand Up @@ -1748,14 +1751,17 @@ static int cfq_may_queue(request_queue_t *q, int rw)
struct cfq_data *cfqd = q->elevator->elevator_data;
struct task_struct *tsk = current;
struct cfq_queue *cfqq;
unsigned int key;

key = cfq_queue_pid(tsk, rw, rw & REQ_RW_SYNC);

/*
* don't force setup of a queue from here, as a call to may_queue
* does not necessarily imply that a request actually will be queued.
* so just lookup a possibly existing queue, or return 'may queue'
* if that fails
*/
cfqq = cfq_find_cfq_hash(cfqd, cfq_queue_pid(tsk, rw), tsk->ioprio);
cfqq = cfq_find_cfq_hash(cfqd, key, tsk->ioprio);
if (cfqq) {
cfq_init_prio_data(cfqq);
cfq_prio_boost(cfqq);
Expand Down Expand Up @@ -1798,10 +1804,10 @@ cfq_set_request(request_queue_t *q, struct request *rq, gfp_t gfp_mask)
struct task_struct *tsk = current;
struct cfq_io_context *cic;
const int rw = rq_data_dir(rq);
pid_t key = cfq_queue_pid(tsk, rw);
const int is_sync = rq_is_sync(rq);
pid_t key = cfq_queue_pid(tsk, rw, is_sync);
struct cfq_queue *cfqq;
unsigned long flags;
int is_sync = key != CFQ_KEY_ASYNC;

might_sleep_if(gfp_mask & __GFP_WAIT);

Expand Down
28 changes: 20 additions & 8 deletions block/ll_rw_blk.c
Original file line number Diff line number Diff line change
Expand Up @@ -2058,15 +2058,16 @@ static void freed_request(request_queue_t *q, int rw, int priv)
* Returns NULL on failure, with queue_lock held.
* Returns !NULL on success, with queue_lock *not held*.
*/
static struct request *get_request(request_queue_t *q, int rw, struct bio *bio,
gfp_t gfp_mask)
static struct request *get_request(request_queue_t *q, int rw_flags,
struct bio *bio, gfp_t gfp_mask)
{
struct request *rq = NULL;
struct request_list *rl = &q->rq;
struct io_context *ioc = NULL;
const int rw = rw_flags & 0x01;
int may_queue, priv;

may_queue = elv_may_queue(q, rw);
may_queue = elv_may_queue(q, rw_flags);
if (may_queue == ELV_MQUEUE_NO)
goto rq_starved;

Expand Down Expand Up @@ -2114,7 +2115,7 @@ static struct request *get_request(request_queue_t *q, int rw, struct bio *bio,

spin_unlock_irq(q->queue_lock);

rq = blk_alloc_request(q, rw, priv, gfp_mask);
rq = blk_alloc_request(q, rw_flags, priv, gfp_mask);
if (unlikely(!rq)) {
/*
* Allocation failed presumably due to memory. Undo anything
Expand Down Expand Up @@ -2162,20 +2163,21 @@ static struct request *get_request(request_queue_t *q, int rw, struct bio *bio,
*
* Called with q->queue_lock held, and returns with it unlocked.
*/
static struct request *get_request_wait(request_queue_t *q, int rw,
static struct request *get_request_wait(request_queue_t *q, int rw_flags,
struct bio *bio)
{
const int rw = rw_flags & 0x01;
struct request *rq;

rq = get_request(q, rw, bio, GFP_NOIO);
rq = get_request(q, rw_flags, bio, GFP_NOIO);
while (!rq) {
DEFINE_WAIT(wait);
struct request_list *rl = &q->rq;

prepare_to_wait_exclusive(&rl->wait[rw], &wait,
TASK_UNINTERRUPTIBLE);

rq = get_request(q, rw, bio, GFP_NOIO);
rq = get_request(q, rw_flags, bio, GFP_NOIO);

if (!rq) {
struct io_context *ioc;
Expand Down Expand Up @@ -2910,6 +2912,7 @@ static int __make_request(request_queue_t *q, struct bio *bio)
int el_ret, nr_sectors, barrier, err;
const unsigned short prio = bio_prio(bio);
const int sync = bio_sync(bio);
int rw_flags;

nr_sectors = bio_sectors(bio);

Expand Down Expand Up @@ -2983,11 +2986,20 @@ static int __make_request(request_queue_t *q, struct bio *bio)
}

get_rq:
/*
* This sync check and mask will be re-done in init_request_from_bio(),
* but we need to set it earlier to expose the sync flag to the
* rq allocator and io schedulers.
*/
rw_flags = bio_data_dir(bio);
if (sync)
rw_flags |= REQ_RW_SYNC;

/*
* Grab a free request. This is might sleep but can not fail.
* Returns with the queue unlocked.
*/
req = get_request_wait(q, bio_data_dir(bio), bio);
req = get_request_wait(q, rw_flags, bio);

/*
* After dropping the lock and possibly sleeping here, our request
Expand Down

0 comments on commit 7749a8d

Please sign in to comment.