Skip to content

Commit

Permalink
Merge branch 'for-linus' of git://git.kernel.dk/linux-2.6-block
Browse files Browse the repository at this point in the history
* 'for-linus' of git://git.kernel.dk/linux-2.6-block:
  block: Skip I/O merges when disabled
  block: add large command support
  block: replace sizeof(rq->cmd) with BLK_MAX_CDB
  ide: use blk_rq_init() to initialize the request
  block: use blk_rq_init() to initialize the request
  block: rename and export rq_init()
  block: no need to initialize rq->cmd with blk_get_request
  block: no need to initialize rq->cmd in prepare_flush_fn hook
  block/blk-barrier.c:blk_ordered_cur_seq() mustn't be inline
  block/elevator.c:elv_rq_merge_ok() mustn't be inline
  block: make queue flags non-atomic
  block: add dma alignment and padding support to blk_rq_map_kern
  unexport blk_max_pfn
  ps3disk: Remove superfluous cast
  block: make rq_init() do a full memset()
  relay: fix splice problem
  • Loading branch information
torvalds committed Apr 29, 2008
2 parents fee4b19 + ac9fafa commit bd5d435
Show file tree
Hide file tree
Showing 38 changed files with 275 additions and 120 deletions.
11 changes: 3 additions & 8 deletions block/blk-barrier.c
Original file line number Diff line number Diff line change
Expand Up @@ -53,7 +53,7 @@ EXPORT_SYMBOL(blk_queue_ordered);
/*
* Cache flushing for ordered writes handling
*/
inline unsigned blk_ordered_cur_seq(struct request_queue *q)
unsigned blk_ordered_cur_seq(struct request_queue *q)
{
if (!q->ordseq)
return 0;
Expand Down Expand Up @@ -143,10 +143,8 @@ static void queue_flush(struct request_queue *q, unsigned which)
end_io = post_flush_end_io;
}

blk_rq_init(q, rq);
rq->cmd_flags = REQ_HARDBARRIER;
rq_init(q, rq);
rq->elevator_private = NULL;
rq->elevator_private2 = NULL;
rq->rq_disk = q->bar_rq.rq_disk;
rq->end_io = end_io;
q->prepare_flush_fn(q, rq);
Expand All @@ -167,14 +165,11 @@ static inline struct request *start_ordered(struct request_queue *q,
blkdev_dequeue_request(rq);
q->orig_bar_rq = rq;
rq = &q->bar_rq;
rq->cmd_flags = 0;
rq_init(q, rq);
blk_rq_init(q, rq);
if (bio_data_dir(q->orig_bar_rq->bio) == WRITE)
rq->cmd_flags |= REQ_RW;
if (q->ordered & QUEUE_ORDERED_FUA)
rq->cmd_flags |= REQ_FUA;
rq->elevator_private = NULL;
rq->elevator_private2 = NULL;
init_request_from_bio(rq, q->orig_bar_rq->bio);
rq->end_io = bar_end_io;

Expand Down
75 changes: 34 additions & 41 deletions block/blk-core.c
Original file line number Diff line number Diff line change
Expand Up @@ -107,41 +107,21 @@ struct backing_dev_info *blk_get_backing_dev_info(struct block_device *bdev)
}
EXPORT_SYMBOL(blk_get_backing_dev_info);

/*
* We can't just memset() the structure, since the allocation path
* already stored some information in the request.
*/
void rq_init(struct request_queue *q, struct request *rq)
void blk_rq_init(struct request_queue *q, struct request *rq)
{
memset(rq, 0, sizeof(*rq));

INIT_LIST_HEAD(&rq->queuelist);
INIT_LIST_HEAD(&rq->donelist);
rq->q = q;
rq->sector = rq->hard_sector = (sector_t) -1;
rq->nr_sectors = rq->hard_nr_sectors = 0;
rq->current_nr_sectors = rq->hard_cur_sectors = 0;
rq->bio = rq->biotail = NULL;
INIT_HLIST_NODE(&rq->hash);
RB_CLEAR_NODE(&rq->rb_node);
rq->rq_disk = NULL;
rq->nr_phys_segments = 0;
rq->nr_hw_segments = 0;
rq->ioprio = 0;
rq->special = NULL;
rq->buffer = NULL;
rq->cmd = rq->__cmd;
rq->tag = -1;
rq->errors = 0;
rq->ref_count = 1;
rq->cmd_len = 0;
memset(rq->cmd, 0, sizeof(rq->cmd));
rq->data_len = 0;
rq->extra_len = 0;
rq->sense_len = 0;
rq->data = NULL;
rq->sense = NULL;
rq->end_io = NULL;
rq->end_io_data = NULL;
rq->next_rq = NULL;
}
EXPORT_SYMBOL(blk_rq_init);

static void req_bio_endio(struct request *rq, struct bio *bio,
unsigned int nbytes, int error)
Expand Down Expand Up @@ -194,7 +174,7 @@ void blk_dump_rq_flags(struct request *rq, char *msg)

if (blk_pc_request(rq)) {
printk(KERN_INFO " cdb: ");
for (bit = 0; bit < sizeof(rq->cmd); bit++)
for (bit = 0; bit < BLK_MAX_CDB; bit++)
printk("%02x ", rq->cmd[bit]);
printk("\n");
}
Expand All @@ -220,7 +200,8 @@ void blk_plug_device(struct request_queue *q)
if (blk_queue_stopped(q))
return;

if (!test_and_set_bit(QUEUE_FLAG_PLUGGED, &q->queue_flags)) {
if (!test_bit(QUEUE_FLAG_PLUGGED, &q->queue_flags)) {
__set_bit(QUEUE_FLAG_PLUGGED, &q->queue_flags);
mod_timer(&q->unplug_timer, jiffies + q->unplug_delay);
blk_add_trace_generic(q, NULL, 0, BLK_TA_PLUG);
}
Expand All @@ -235,9 +216,10 @@ int blk_remove_plug(struct request_queue *q)
{
WARN_ON(!irqs_disabled());

if (!test_and_clear_bit(QUEUE_FLAG_PLUGGED, &q->queue_flags))
if (!test_bit(QUEUE_FLAG_PLUGGED, &q->queue_flags))
return 0;

queue_flag_clear(QUEUE_FLAG_PLUGGED, q);
del_timer(&q->unplug_timer);
return 1;
}
Expand Down Expand Up @@ -333,15 +315,16 @@ void blk_start_queue(struct request_queue *q)
{
WARN_ON(!irqs_disabled());

clear_bit(QUEUE_FLAG_STOPPED, &q->queue_flags);
queue_flag_clear(QUEUE_FLAG_STOPPED, q);

/*
* one level of recursion is ok and is much faster than kicking
* the unplug handling
*/
if (!test_and_set_bit(QUEUE_FLAG_REENTER, &q->queue_flags)) {
if (!test_bit(QUEUE_FLAG_REENTER, &q->queue_flags)) {
queue_flag_set(QUEUE_FLAG_REENTER, q);
q->request_fn(q);
clear_bit(QUEUE_FLAG_REENTER, &q->queue_flags);
queue_flag_clear(QUEUE_FLAG_REENTER, q);
} else {
blk_plug_device(q);
kblockd_schedule_work(&q->unplug_work);
Expand All @@ -366,7 +349,7 @@ EXPORT_SYMBOL(blk_start_queue);
void blk_stop_queue(struct request_queue *q)
{
blk_remove_plug(q);
set_bit(QUEUE_FLAG_STOPPED, &q->queue_flags);
queue_flag_set(QUEUE_FLAG_STOPPED, q);
}
EXPORT_SYMBOL(blk_stop_queue);

Expand Down Expand Up @@ -395,27 +378,37 @@ EXPORT_SYMBOL(blk_sync_queue);
* blk_run_queue - run a single device queue
* @q: The queue to run
*/
void blk_run_queue(struct request_queue *q)
void __blk_run_queue(struct request_queue *q)
{
unsigned long flags;

spin_lock_irqsave(q->queue_lock, flags);
blk_remove_plug(q);

/*
* Only recurse once to avoid overrunning the stack, let the unplug
* handling reinvoke the handler shortly if we already got there.
*/
if (!elv_queue_empty(q)) {
if (!test_and_set_bit(QUEUE_FLAG_REENTER, &q->queue_flags)) {
if (!test_bit(QUEUE_FLAG_REENTER, &q->queue_flags)) {
queue_flag_set(QUEUE_FLAG_REENTER, q);
q->request_fn(q);
clear_bit(QUEUE_FLAG_REENTER, &q->queue_flags);
queue_flag_clear(QUEUE_FLAG_REENTER, q);
} else {
blk_plug_device(q);
kblockd_schedule_work(&q->unplug_work);
}
}
}
EXPORT_SYMBOL(__blk_run_queue);

/**
* blk_run_queue - run a single device queue
* @q: The queue to run
*/
void blk_run_queue(struct request_queue *q)
{
unsigned long flags;

spin_lock_irqsave(q->queue_lock, flags);
__blk_run_queue(q);
spin_unlock_irqrestore(q->queue_lock, flags);
}
EXPORT_SYMBOL(blk_run_queue);
Expand All @@ -428,7 +421,7 @@ void blk_put_queue(struct request_queue *q)
void blk_cleanup_queue(struct request_queue *q)
{
mutex_lock(&q->sysfs_lock);
set_bit(QUEUE_FLAG_DEAD, &q->queue_flags);
queue_flag_set_unlocked(QUEUE_FLAG_DEAD, q);
mutex_unlock(&q->sysfs_lock);

if (q->elevator)
Expand Down Expand Up @@ -607,6 +600,8 @@ blk_alloc_request(struct request_queue *q, int rw, int priv, gfp_t gfp_mask)
if (!rq)
return NULL;

blk_rq_init(q, rq);

/*
* first three bits are identical in rq->cmd_flags and bio->bi_rw,
* see bio.h and blkdev.h
Expand Down Expand Up @@ -789,8 +784,6 @@ static struct request *get_request(struct request_queue *q, int rw_flags,
if (ioc_batching(q, ioc))
ioc->nr_batch_requests--;

rq_init(q, rq);

blk_add_trace_generic(q, bio, rw, BLK_TA_GETRQ);
out:
return rq;
Expand Down
21 changes: 20 additions & 1 deletion block/blk-map.c
Original file line number Diff line number Diff line change
Expand Up @@ -255,24 +255,43 @@ EXPORT_SYMBOL(blk_rq_unmap_user);
* @kbuf: the kernel buffer
* @len: length of user data
* @gfp_mask: memory allocation flags
*
* Description:
* Data will be mapped directly if possible. Otherwise a bounce
* buffer is used.
*/
int blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf,
unsigned int len, gfp_t gfp_mask)
{
unsigned long kaddr;
unsigned int alignment;
int reading = rq_data_dir(rq) == READ;
int do_copy = 0;
struct bio *bio;

if (len > (q->max_hw_sectors << 9))
return -EINVAL;
if (!len || !kbuf)
return -EINVAL;

bio = bio_map_kern(q, kbuf, len, gfp_mask);
kaddr = (unsigned long)kbuf;
alignment = queue_dma_alignment(q) | q->dma_pad_mask;
do_copy = ((kaddr & alignment) || (len & alignment));

if (do_copy)
bio = bio_copy_kern(q, kbuf, len, gfp_mask, reading);
else
bio = bio_map_kern(q, kbuf, len, gfp_mask);

if (IS_ERR(bio))
return PTR_ERR(bio);

if (rq_data_dir(rq) == WRITE)
bio->bi_rw |= (1 << BIO_RW);

if (do_copy)
rq->cmd_flags |= REQ_COPY_USER;

blk_rq_bio_prep(q, rq, bio);
blk_queue_bounce(q, &rq->bio);
rq->buffer = rq->data = NULL;
Expand Down
6 changes: 3 additions & 3 deletions block/blk-merge.c
Original file line number Diff line number Diff line change
Expand Up @@ -55,7 +55,7 @@ void blk_recalc_rq_segments(struct request *rq)
if (!rq->bio)
return;

cluster = q->queue_flags & (1 << QUEUE_FLAG_CLUSTER);
cluster = test_bit(QUEUE_FLAG_CLUSTER, &q->queue_flags);
hw_seg_size = seg_size = 0;
phys_size = hw_size = nr_phys_segs = nr_hw_segs = 0;
rq_for_each_segment(bv, rq, iter) {
Expand Down Expand Up @@ -128,7 +128,7 @@ EXPORT_SYMBOL(blk_recount_segments);
static int blk_phys_contig_segment(struct request_queue *q, struct bio *bio,
struct bio *nxt)
{
if (!(q->queue_flags & (1 << QUEUE_FLAG_CLUSTER)))
if (!test_bit(QUEUE_FLAG_CLUSTER, &q->queue_flags))
return 0;

if (!BIOVEC_PHYS_MERGEABLE(__BVEC_END(bio), __BVEC_START(nxt)))
Expand Down Expand Up @@ -175,7 +175,7 @@ int blk_rq_map_sg(struct request_queue *q, struct request *rq,
int nsegs, cluster;

nsegs = 0;
cluster = q->queue_flags & (1 << QUEUE_FLAG_CLUSTER);
cluster = test_bit(QUEUE_FLAG_CLUSTER, &q->queue_flags);

/*
* for each bio in rq
Expand Down
3 changes: 1 addition & 2 deletions block/blk-settings.c
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,6 @@ unsigned long blk_max_low_pfn;
EXPORT_SYMBOL(blk_max_low_pfn);

unsigned long blk_max_pfn;
EXPORT_SYMBOL(blk_max_pfn);

/**
* blk_queue_prep_rq - set a prepare_request function for queue
Expand Down Expand Up @@ -288,7 +287,7 @@ void blk_queue_stack_limits(struct request_queue *t, struct request_queue *b)
t->max_segment_size = min(t->max_segment_size, b->max_segment_size);
t->hardsect_size = max(t->hardsect_size, b->hardsect_size);
if (!test_bit(QUEUE_FLAG_CLUSTER, &b->queue_flags))
clear_bit(QUEUE_FLAG_CLUSTER, &t->queue_flags);
queue_flag_clear(QUEUE_FLAG_CLUSTER, t);
}
EXPORT_SYMBOL(blk_queue_stack_limits);

Expand Down
26 changes: 26 additions & 0 deletions block/blk-sysfs.c
Original file line number Diff line number Diff line change
Expand Up @@ -135,6 +135,25 @@ static ssize_t queue_max_hw_sectors_show(struct request_queue *q, char *page)
return queue_var_show(max_hw_sectors_kb, (page));
}

static ssize_t queue_nomerges_show(struct request_queue *q, char *page)
{
return queue_var_show(blk_queue_nomerges(q), page);
}

static ssize_t queue_nomerges_store(struct request_queue *q, const char *page,
size_t count)
{
unsigned long nm;
ssize_t ret = queue_var_store(&nm, page, count);

if (nm)
set_bit(QUEUE_FLAG_NOMERGES, &q->queue_flags);
else
clear_bit(QUEUE_FLAG_NOMERGES, &q->queue_flags);

return ret;
}


static struct queue_sysfs_entry queue_requests_entry = {
.attr = {.name = "nr_requests", .mode = S_IRUGO | S_IWUSR },
Expand Down Expand Up @@ -170,13 +189,20 @@ static struct queue_sysfs_entry queue_hw_sector_size_entry = {
.show = queue_hw_sector_size_show,
};

static struct queue_sysfs_entry queue_nomerges_entry = {
.attr = {.name = "nomerges", .mode = S_IRUGO | S_IWUSR },
.show = queue_nomerges_show,
.store = queue_nomerges_store,
};

static struct attribute *default_attrs[] = {
&queue_requests_entry.attr,
&queue_ra_entry.attr,
&queue_max_hw_sectors_entry.attr,
&queue_max_sectors_entry.attr,
&queue_iosched_entry.attr,
&queue_hw_sector_size_entry.attr,
&queue_nomerges_entry.attr,
NULL,
};

Expand Down
8 changes: 4 additions & 4 deletions block/blk-tag.c
Original file line number Diff line number Diff line change
Expand Up @@ -70,7 +70,7 @@ void __blk_queue_free_tags(struct request_queue *q)
__blk_free_tags(bqt);

q->queue_tags = NULL;
q->queue_flags &= ~(1 << QUEUE_FLAG_QUEUED);
queue_flag_clear(QUEUE_FLAG_QUEUED, q);
}

/**
Expand Down Expand Up @@ -98,7 +98,7 @@ EXPORT_SYMBOL(blk_free_tags);
**/
void blk_queue_free_tags(struct request_queue *q)
{
clear_bit(QUEUE_FLAG_QUEUED, &q->queue_flags);
queue_flag_clear(QUEUE_FLAG_QUEUED, q);
}
EXPORT_SYMBOL(blk_queue_free_tags);

Expand Down Expand Up @@ -188,7 +188,7 @@ int blk_queue_init_tags(struct request_queue *q, int depth,
rc = blk_queue_resize_tags(q, depth);
if (rc)
return rc;
set_bit(QUEUE_FLAG_QUEUED, &q->queue_flags);
queue_flag_set(QUEUE_FLAG_QUEUED, q);
return 0;
} else
atomic_inc(&tags->refcnt);
Expand All @@ -197,7 +197,7 @@ int blk_queue_init_tags(struct request_queue *q, int depth,
* assign it, all done
*/
q->queue_tags = tags;
q->queue_flags |= (1 << QUEUE_FLAG_QUEUED);
queue_flag_set(QUEUE_FLAG_QUEUED, q);
INIT_LIST_HEAD(&q->tag_busy_list);
return 0;
fail:
Expand Down
1 change: 0 additions & 1 deletion block/blk.h
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,6 @@
extern struct kmem_cache *blk_requestq_cachep;
extern struct kobj_type blk_queue_ktype;

void rq_init(struct request_queue *q, struct request *rq);
void init_request_from_bio(struct request *req, struct bio *bio);
void blk_rq_bio_prep(struct request_queue *q, struct request *rq,
struct bio *bio);
Expand Down
Loading

0 comments on commit bd5d435

Please sign in to comment.