Skip to content

Commit

Permalink
Merge branch 'post-2.6.15' of git://brick.kernel.dk/data/git/linux-2.…
Browse files Browse the repository at this point in the history
…6-block

Manual fixup for merge with Jens' "Suspend support for libata", commit
ID 9b84754.

Signed-off-by: Linus Torvalds <[email protected]>
  • Loading branch information
Linus Torvalds committed Jan 6, 2006
2 parents 7ed4091 + e650c30 commit d99cf9d
Show file tree
Hide file tree
Showing 50 changed files with 695 additions and 644 deletions.
10 changes: 2 additions & 8 deletions Documentation/block/biodoc.txt
Original file line number Diff line number Diff line change
Expand Up @@ -263,14 +263,8 @@ A flag in the bio structure, BIO_BARRIER is used to identify a barrier i/o.
The generic i/o scheduler would make sure that it places the barrier request and
all other requests coming after it after all the previous requests in the
queue. Barriers may be implemented in different ways depending on the
driver. A SCSI driver for example could make use of ordered tags to
preserve the necessary ordering with a lower impact on throughput. For IDE
this might be two sync cache flush: a pre and post flush when encountering
a barrier write.

There is a provision for queues to indicate what kind of barriers they
can provide. This is as of yet unmerged, details will be added here once it
is in the kernel.
driver. For more details regarding I/O barriers, please read barrier.txt
in this directory.

1.2.2 Request Priority/Latency

Expand Down
144 changes: 25 additions & 119 deletions block/as-iosched.c
Original file line number Diff line number Diff line change
Expand Up @@ -182,6 +182,9 @@ struct as_rq {

static kmem_cache_t *arq_pool;

static void as_move_to_dispatch(struct as_data *ad, struct as_rq *arq);
static void as_antic_stop(struct as_data *ad);

/*
* IO Context helper functions
*/
Expand Down Expand Up @@ -370,7 +373,7 @@ static struct as_rq *as_find_first_arq(struct as_data *ad, int data_dir)
* existing request against the same sector), which can happen when using
* direct IO, then return the alias.
*/
static struct as_rq *as_add_arq_rb(struct as_data *ad, struct as_rq *arq)
static struct as_rq *__as_add_arq_rb(struct as_data *ad, struct as_rq *arq)
{
struct rb_node **p = &ARQ_RB_ROOT(ad, arq)->rb_node;
struct rb_node *parent = NULL;
Expand All @@ -397,6 +400,16 @@ static struct as_rq *as_add_arq_rb(struct as_data *ad, struct as_rq *arq)
return NULL;
}

static void as_add_arq_rb(struct as_data *ad, struct as_rq *arq)
{
struct as_rq *alias;

while ((unlikely(alias = __as_add_arq_rb(ad, arq)))) {
as_move_to_dispatch(ad, alias);
as_antic_stop(ad);
}
}

static inline void as_del_arq_rb(struct as_data *ad, struct as_rq *arq)
{
if (!ON_RB(&arq->rb_node)) {
Expand Down Expand Up @@ -1133,23 +1146,6 @@ static void as_move_to_dispatch(struct as_data *ad, struct as_rq *arq)
/*
* take it off the sort and fifo list, add to dispatch queue
*/
while (!list_empty(&rq->queuelist)) {
struct request *__rq = list_entry_rq(rq->queuelist.next);
struct as_rq *__arq = RQ_DATA(__rq);

list_del(&__rq->queuelist);

elv_dispatch_add_tail(ad->q, __rq);

if (__arq->io_context && __arq->io_context->aic)
atomic_inc(&__arq->io_context->aic->nr_dispatched);

WARN_ON(__arq->state != AS_RQ_QUEUED);
__arq->state = AS_RQ_DISPATCHED;

ad->nr_dispatched++;
}

as_remove_queued_request(ad->q, rq);
WARN_ON(arq->state != AS_RQ_QUEUED);

Expand Down Expand Up @@ -1325,50 +1321,13 @@ static int as_dispatch_request(request_queue_t *q, int force)
return 1;
}

/*
* Add arq to a list behind alias
*/
static inline void
as_add_aliased_request(struct as_data *ad, struct as_rq *arq,
struct as_rq *alias)
{
struct request *req = arq->request;
struct list_head *insert = alias->request->queuelist.prev;

/*
* Transfer list of aliases
*/
while (!list_empty(&req->queuelist)) {
struct request *__rq = list_entry_rq(req->queuelist.next);
struct as_rq *__arq = RQ_DATA(__rq);

list_move_tail(&__rq->queuelist, &alias->request->queuelist);

WARN_ON(__arq->state != AS_RQ_QUEUED);
}

/*
* Another request with the same start sector on the rbtree.
* Link this request to that sector. They are untangled in
* as_move_to_dispatch
*/
list_add(&arq->request->queuelist, insert);

/*
* Don't want to have to handle merges.
*/
as_del_arq_hash(arq);
arq->request->flags |= REQ_NOMERGE;
}

/*
* add arq to rbtree and fifo
*/
static void as_add_request(request_queue_t *q, struct request *rq)
{
struct as_data *ad = q->elevator->elevator_data;
struct as_rq *arq = RQ_DATA(rq);
struct as_rq *alias;
int data_dir;

arq->state = AS_RQ_NEW;
Expand All @@ -1387,33 +1346,17 @@ static void as_add_request(request_queue_t *q, struct request *rq)
atomic_inc(&arq->io_context->aic->nr_queued);
}

alias = as_add_arq_rb(ad, arq);
if (!alias) {
/*
* set expire time (only used for reads) and add to fifo list
*/
arq->expires = jiffies + ad->fifo_expire[data_dir];
list_add_tail(&arq->fifo, &ad->fifo_list[data_dir]);
as_add_arq_rb(ad, arq);
if (rq_mergeable(arq->request))
as_add_arq_hash(ad, arq);

if (rq_mergeable(arq->request))
as_add_arq_hash(ad, arq);
as_update_arq(ad, arq); /* keep state machine up to date */

} else {
as_add_aliased_request(ad, arq, alias);

/*
* have we been anticipating this request?
* or does it come from the same process as the one we are
* anticipating for?
*/
if (ad->antic_status == ANTIC_WAIT_REQ
|| ad->antic_status == ANTIC_WAIT_NEXT) {
if (as_can_break_anticipation(ad, arq))
as_antic_stop(ad);
}
}
/*
* set expire time (only used for reads) and add to fifo list
*/
arq->expires = jiffies + ad->fifo_expire[data_dir];
list_add_tail(&arq->fifo, &ad->fifo_list[data_dir]);

as_update_arq(ad, arq); /* keep state machine up to date */
arq->state = AS_RQ_QUEUED;
}

Expand Down Expand Up @@ -1536,23 +1479,8 @@ static void as_merged_request(request_queue_t *q, struct request *req)
* if the merge was a front merge, we need to reposition request
*/
if (rq_rb_key(req) != arq->rb_key) {
struct as_rq *alias, *next_arq = NULL;

if (ad->next_arq[arq->is_sync] == arq)
next_arq = as_find_next_arq(ad, arq);

/*
* Note! We should really be moving any old aliased requests
* off this request and try to insert them into the rbtree. We
* currently don't bother. Ditto the next function.
*/
as_del_arq_rb(ad, arq);
if ((alias = as_add_arq_rb(ad, arq))) {
list_del_init(&arq->fifo);
as_add_aliased_request(ad, arq, alias);
if (next_arq)
ad->next_arq[arq->is_sync] = next_arq;
}
as_add_arq_rb(ad, arq);
/*
* Note! At this stage of this and the next function, our next
* request may not be optimal - eg the request may have "grown"
Expand All @@ -1579,18 +1507,8 @@ static void as_merged_requests(request_queue_t *q, struct request *req,
as_add_arq_hash(ad, arq);

if (rq_rb_key(req) != arq->rb_key) {
struct as_rq *alias, *next_arq = NULL;

if (ad->next_arq[arq->is_sync] == arq)
next_arq = as_find_next_arq(ad, arq);

as_del_arq_rb(ad, arq);
if ((alias = as_add_arq_rb(ad, arq))) {
list_del_init(&arq->fifo);
as_add_aliased_request(ad, arq, alias);
if (next_arq)
ad->next_arq[arq->is_sync] = next_arq;
}
as_add_arq_rb(ad, arq);
}

/*
Expand All @@ -1609,18 +1527,6 @@ static void as_merged_requests(request_queue_t *q, struct request *req,
}
}

/*
* Transfer list of aliases
*/
while (!list_empty(&next->queuelist)) {
struct request *__rq = list_entry_rq(next->queuelist.next);
struct as_rq *__arq = RQ_DATA(__rq);

list_move_tail(&__rq->queuelist, &req->queuelist);

WARN_ON(__arq->state != AS_RQ_QUEUED);
}

/*
* kill knowledge of next, this one is a goner
*/
Expand Down
16 changes: 8 additions & 8 deletions block/cfq-iosched.c
Original file line number Diff line number Diff line change
Expand Up @@ -25,15 +25,15 @@
/*
* tunables
*/
static int cfq_quantum = 4; /* max queue in one round of service */
static int cfq_queued = 8; /* minimum rq allocate limit per-queue*/
static int cfq_fifo_expire[2] = { HZ / 4, HZ / 8 };
static int cfq_back_max = 16 * 1024; /* maximum backwards seek, in KiB */
static int cfq_back_penalty = 2; /* penalty of a backwards seek */
static const int cfq_quantum = 4; /* max queue in one round of service */
static const int cfq_queued = 8; /* minimum rq allocate limit per-queue*/
static const int cfq_fifo_expire[2] = { HZ / 4, HZ / 8 };
static const int cfq_back_max = 16 * 1024; /* maximum backwards seek, in KiB */
static const int cfq_back_penalty = 2; /* penalty of a backwards seek */

static int cfq_slice_sync = HZ / 10;
static const int cfq_slice_sync = HZ / 10;
static int cfq_slice_async = HZ / 25;
static int cfq_slice_async_rq = 2;
static const int cfq_slice_async_rq = 2;
static int cfq_slice_idle = HZ / 100;

#define CFQ_IDLE_GRACE (HZ / 10)
Expand All @@ -45,7 +45,7 @@ static int cfq_slice_idle = HZ / 100;
/*
* disable queueing at the driver/hardware level
*/
static int cfq_max_depth = 2;
static const int cfq_max_depth = 2;

/*
* for the hash of cfqq inside the cfqd
Expand Down
8 changes: 4 additions & 4 deletions block/deadline-iosched.c
Original file line number Diff line number Diff line change
Expand Up @@ -19,10 +19,10 @@
/*
* See Documentation/block/deadline-iosched.txt
*/
static int read_expire = HZ / 2; /* max time before a read is submitted. */
static int write_expire = 5 * HZ; /* ditto for writes, these limits are SOFT! */
static int writes_starved = 2; /* max times reads can starve a write */
static int fifo_batch = 16; /* # of sequential requests treated as one
static const int read_expire = HZ / 2; /* max time before a read is submitted. */
static const int write_expire = 5 * HZ; /* ditto for writes, these limits are SOFT! */
static const int writes_starved = 2; /* max times reads can starve a write */
static const int fifo_batch = 16; /* # of sequential requests treated as one
by the above parameters. For throughput. */

static const int deadline_hash_shift = 5;
Expand Down
Loading

0 comments on commit d99cf9d

Please sign in to comment.