Skip to content

Commit

Permalink
Introduce rq_for_each_segment replacing rq_for_each_bio
Browse files Browse the repository at this point in the history
Every usage of rq_for_each_bio wraps a usage of
bio_for_each_segment, so these can be combined into
rq_for_each_segment.

We define "struct req_iterator" to hold the 'bio' and 'index' that
are needed for the double iteration.

Signed-off-by: Neil Brown <[email protected]>

Various compile fixes by me...

Signed-off-by: Jens Axboe <[email protected]>
  • Loading branch information
neilbrown authored and Jens Axboe committed Oct 10, 2007
1 parent 9dfa528 commit 5705f70
Show file tree
Hide file tree
Showing 14 changed files with 131 additions and 162 deletions.
20 changes: 10 additions & 10 deletions Documentation/block/biodoc.txt
Original file line number Diff line number Diff line change
Expand Up @@ -477,9 +477,9 @@ With this multipage bio design:
the same bi_io_vec array, but with the index and size accordingly modified)
- A linked list of bios is used as before for unrelated merges (*) - this
avoids reallocs and makes independent completions easier to handle.
- Code that traverses the req list needs to make a distinction between
segments of a request (bio_for_each_segment) and the distinct completion
units/bios (rq_for_each_bio).
- Code that traverses the req list can find all the segments of a bio
by using rq_for_each_segment. This handles the fact that a request
has multiple bios, each of which can have multiple segments.
- Drivers which can't process a large bio in one shot can use the bi_idx
field to keep track of the next bio_vec entry to process.
(e.g a 1MB bio_vec needs to be handled in max 128kB chunks for IDE)
Expand Down Expand Up @@ -664,14 +664,14 @@ in lvm or md.

3.2.1 Traversing segments and completion units in a request

The macros bio_for_each_segment() and rq_for_each_bio() should be used for
traversing the bios in the request list (drivers should avoid directly
trying to do it themselves). Using these helpers should also make it easier
to cope with block changes in the future.
The macro rq_for_each_segment() should be used for traversing the bios
in the request list (drivers should avoid directly trying to do it
themselves). Using these helpers should also make it easier to cope
with block changes in the future.

rq_for_each_bio(bio, rq)
bio_for_each_segment(bio_vec, bio, i)
/* bio_vec is now current segment */
struct req_iterator iter;
rq_for_each_segment(bio_vec, rq, iter)
/* bio_vec is now current segment */

I/O completion callbacks are per-bio rather than per-segment, so drivers
that traverse bio chains on completion need to keep that in mind. Drivers
Expand Down
19 changes: 6 additions & 13 deletions block/ll_rw_blk.c
Original file line number Diff line number Diff line change
Expand Up @@ -1244,8 +1244,7 @@ static void blk_recalc_rq_segments(struct request *rq)
int seg_size;
int hw_seg_size;
int cluster;
struct bio *bio;
int i;
struct req_iterator iter;
int high, highprv = 1;
struct request_queue *q = rq->q;

Expand All @@ -1255,8 +1254,7 @@ static void blk_recalc_rq_segments(struct request *rq)
cluster = q->queue_flags & (1 << QUEUE_FLAG_CLUSTER);
hw_seg_size = seg_size = 0;
phys_size = hw_size = nr_phys_segs = nr_hw_segs = 0;
rq_for_each_bio(bio, rq)
bio_for_each_segment(bv, bio, i) {
rq_for_each_segment(bv, rq, iter) {
/*
* the trick here is making sure that a high page is never
* considered part of another segment, since that might
Expand Down Expand Up @@ -1353,8 +1351,8 @@ int blk_rq_map_sg(struct request_queue *q, struct request *rq,
struct scatterlist *sg)
{
struct bio_vec *bvec, *bvprv;
struct bio *bio;
int nsegs, i, cluster;
struct req_iterator iter;
int nsegs, cluster;

nsegs = 0;
cluster = q->queue_flags & (1 << QUEUE_FLAG_CLUSTER);
Expand All @@ -1363,11 +1361,7 @@ int blk_rq_map_sg(struct request_queue *q, struct request *rq,
* for each bio in rq
*/
bvprv = NULL;
rq_for_each_bio(bio, rq) {
/*
* for each segment in bio
*/
bio_for_each_segment(bvec, bio, i) {
rq_for_each_segment(bvec, rq, iter) {
int nbytes = bvec->bv_len;

if (bvprv && cluster) {
Expand All @@ -1390,8 +1384,7 @@ int blk_rq_map_sg(struct request_queue *q, struct request *rq,
nsegs++;
}
bvprv = bvec;
} /* segments in bio */
} /* bios in rq */
} /* segments in rq */

return nsegs;
}
Expand Down
81 changes: 38 additions & 43 deletions drivers/block/floppy.c
Original file line number Diff line number Diff line change
Expand Up @@ -2437,22 +2437,19 @@ static void rw_interrupt(void)
/* Compute maximal contiguous buffer size. */
static int buffer_chain_size(void)
{
struct bio *bio;
struct bio_vec *bv;
int size, i;
int size;
struct req_iterator iter;
char *base;

base = bio_data(current_req->bio);
size = 0;

rq_for_each_bio(bio, current_req) {
bio_for_each_segment(bv, bio, i) {
if (page_address(bv->bv_page) + bv->bv_offset !=
base + size)
break;
rq_for_each_segment(bv, current_req, iter) {
if (page_address(bv->bv_page) + bv->bv_offset != base + size)
break;

size += bv->bv_len;
}
size += bv->bv_len;
}

return size >> 9;
Expand All @@ -2479,9 +2476,9 @@ static void copy_buffer(int ssize, int max_sector, int max_sector_2)
{
int remaining; /* number of transferred 512-byte sectors */
struct bio_vec *bv;
struct bio *bio;
char *buffer, *dma_buffer;
int size, i;
int size;
struct req_iterator iter;

max_sector = transfer_size(ssize,
min(max_sector, max_sector_2),
Expand Down Expand Up @@ -2514,43 +2511,41 @@ static void copy_buffer(int ssize, int max_sector, int max_sector_2)

size = current_req->current_nr_sectors << 9;

rq_for_each_bio(bio, current_req) {
bio_for_each_segment(bv, bio, i) {
if (!remaining)
break;
rq_for_each_segment(bv, current_req, iter) {
if (!remaining)
break;

size = bv->bv_len;
SUPBOUND(size, remaining);
size = bv->bv_len;
SUPBOUND(size, remaining);

buffer = page_address(bv->bv_page) + bv->bv_offset;
buffer = page_address(bv->bv_page) + bv->bv_offset;
#ifdef FLOPPY_SANITY_CHECK
if (dma_buffer + size >
floppy_track_buffer + (max_buffer_sectors << 10) ||
dma_buffer < floppy_track_buffer) {
DPRINT("buffer overrun in copy buffer %d\n",
(int)((floppy_track_buffer -
dma_buffer) >> 9));
printk("fsector_t=%d buffer_min=%d\n",
fsector_t, buffer_min);
printk("current_count_sectors=%ld\n",
current_count_sectors);
if (CT(COMMAND) == FD_READ)
printk("read\n");
if (CT(COMMAND) == FD_WRITE)
printk("write\n");
break;
}
if (((unsigned long)buffer) % 512)
DPRINT("%p buffer not aligned\n", buffer);
#endif
if (dma_buffer + size >
floppy_track_buffer + (max_buffer_sectors << 10) ||
dma_buffer < floppy_track_buffer) {
DPRINT("buffer overrun in copy buffer %d\n",
(int)((floppy_track_buffer -
dma_buffer) >> 9));
printk("fsector_t=%d buffer_min=%d\n",
fsector_t, buffer_min);
printk("current_count_sectors=%ld\n",
current_count_sectors);
if (CT(COMMAND) == FD_READ)
memcpy(buffer, dma_buffer, size);
else
memcpy(dma_buffer, buffer, size);

remaining -= size;
dma_buffer += size;
printk("read\n");
if (CT(COMMAND) == FD_WRITE)
printk("write\n");
break;
}
if (((unsigned long)buffer) % 512)
DPRINT("%p buffer not aligned\n", buffer);
#endif
if (CT(COMMAND) == FD_READ)
memcpy(buffer, dma_buffer, size);
else
memcpy(dma_buffer, buffer, size);

remaining -= size;
dma_buffer += size;
}
#ifdef FLOPPY_SANITY_CHECK
if (remaining) {
Expand Down
10 changes: 4 additions & 6 deletions drivers/block/lguest_blk.c
Original file line number Diff line number Diff line change
Expand Up @@ -142,12 +142,11 @@ static irqreturn_t lgb_irq(int irq, void *_bd)
* return the total length. */
static unsigned int req_to_dma(struct request *req, struct lguest_dma *dma)
{
unsigned int i = 0, idx, len = 0;
struct bio *bio;
unsigned int i = 0, len = 0;
struct req_iterator iter;
struct bio_vec *bvec;

rq_for_each_bio(bio, req) {
struct bio_vec *bvec;
bio_for_each_segment(bvec, bio, idx) {
rq_for_each_segment(bvec, req, iter) {
/* We told the block layer not to give us too many. */
BUG_ON(i == LGUEST_MAX_DMA_SECTIONS);
/* If we had a zero-length segment, it would look like
Expand All @@ -160,7 +159,6 @@ static unsigned int req_to_dma(struct request *req, struct lguest_dma *dma)
dma->len[i] = bvec->bv_len;
len += bvec->bv_len;
i++;
}
}
/* If the array isn't full, we mark the end with a 0 length */
if (i < LGUEST_MAX_DMA_SECTIONS)
Expand Down
22 changes: 9 additions & 13 deletions drivers/block/nbd.c
Original file line number Diff line number Diff line change
Expand Up @@ -180,7 +180,7 @@ static inline int sock_send_bvec(struct socket *sock, struct bio_vec *bvec,

static int nbd_send_req(struct nbd_device *lo, struct request *req)
{
int result, i, flags;
int result, flags;
struct nbd_request request;
unsigned long size = req->nr_sectors << 9;
struct socket *sock = lo->sock;
Expand All @@ -205,16 +205,15 @@ static int nbd_send_req(struct nbd_device *lo, struct request *req)
}

if (nbd_cmd(req) == NBD_CMD_WRITE) {
struct bio *bio;
struct req_iterator iter;
struct bio_vec *bvec;
/*
* we are really probing at internals to determine
* whether to set MSG_MORE or not...
*/
rq_for_each_bio(bio, req) {
struct bio_vec *bvec;
bio_for_each_segment(bvec, bio, i) {
rq_for_each_segment(bvec, req, iter) {
flags = 0;
if ((i < (bio->bi_vcnt - 1)) || bio->bi_next)
if (!rq_iter_last(req, iter))
flags = MSG_MORE;
dprintk(DBG_TX, "%s: request %p: sending %d bytes data\n",
lo->disk->disk_name, req,
Expand All @@ -226,7 +225,6 @@ static int nbd_send_req(struct nbd_device *lo, struct request *req)
result);
goto error_out;
}
}
}
}
return 0;
Expand Down Expand Up @@ -321,11 +319,10 @@ static struct request *nbd_read_stat(struct nbd_device *lo)
dprintk(DBG_RX, "%s: request %p: got reply\n",
lo->disk->disk_name, req);
if (nbd_cmd(req) == NBD_CMD_READ) {
int i;
struct bio *bio;
rq_for_each_bio(bio, req) {
struct bio_vec *bvec;
bio_for_each_segment(bvec, bio, i) {
struct req_iterator iter;
struct bio_vec *bvec;

rq_for_each_segment(bvec, req, iter) {
result = sock_recv_bvec(sock, bvec);
if (result <= 0) {
printk(KERN_ERR "%s: Receive data failed (result %d)\n",
Expand All @@ -336,7 +333,6 @@ static struct request *nbd_read_stat(struct nbd_device *lo)
}
dprintk(DBG_RX, "%s: request %p: got %d bytes data\n",
lo->disk->disk_name, req, bvec->bv_len);
}
}
}
return req;
Expand Down
31 changes: 16 additions & 15 deletions drivers/block/ps3disk.c
Original file line number Diff line number Diff line change
Expand Up @@ -91,30 +91,30 @@ static void ps3disk_scatter_gather(struct ps3_storage_device *dev,
struct request *req, int gather)
{
unsigned int offset = 0;
struct bio *bio;
sector_t sector;
struct req_iterator iter;
struct bio_vec *bvec;
unsigned int i = 0, j;
unsigned int i = 0;
size_t size;
void *buf;

rq_for_each_bio(bio, req) {
sector = bio->bi_sector;
rq_for_each_segment(bvec, req, iter) {
unsigned long flags;
dev_dbg(&dev->sbd.core,
"%s:%u: bio %u: %u segs %u sectors from %lu\n",
__func__, __LINE__, i, bio_segments(bio),
bio_sectors(bio), sector);
bio_for_each_segment(bvec, bio, j) {
__func__, __LINE__, i, bio_segments(iter.bio),
bio_sectors(iter.bio),
(unsigned long)iter.bio->bi_sector);

size = bvec->bv_len;
buf = __bio_kmap_atomic(bio, j, KM_IRQ0);
buf = bvec_kmap_irq(bvec, &flags);
if (gather)
memcpy(dev->bounce_buf+offset, buf, size);
else
memcpy(buf, dev->bounce_buf+offset, size);
offset += size;
flush_kernel_dcache_page(bio_iovec_idx(bio, j)->bv_page);
__bio_kunmap_atomic(bio, KM_IRQ0);
}
flush_kernel_dcache_page(bvec->bv_page);
bvec_kunmap_irq(bvec, &flags);

i++;
}
}
Expand All @@ -130,12 +130,13 @@ static int ps3disk_submit_request_sg(struct ps3_storage_device *dev,

#ifdef DEBUG
unsigned int n = 0;
struct bio *bio;
struct bio_vec *bv;
struct req_iterator iter;

rq_for_each_bio(bio, req)
rq_for_each_segment(bv, req, iter)
n++;
dev_dbg(&dev->sbd.core,
"%s:%u: %s req has %u bios for %lu sectors %lu hard sectors\n",
"%s:%u: %s req has %u bvecs for %lu sectors %lu hard sectors\n",
__func__, __LINE__, op, n, req->nr_sectors,
req->hard_nr_sectors);
#endif
Expand Down
7 changes: 2 additions & 5 deletions drivers/block/xen-blkfront.c
Original file line number Diff line number Diff line change
Expand Up @@ -150,9 +150,8 @@ static int blkif_queue_request(struct request *req)
struct blkfront_info *info = req->rq_disk->private_data;
unsigned long buffer_mfn;
struct blkif_request *ring_req;
struct bio *bio;
struct req_iterator iter;
struct bio_vec *bvec;
int idx;
unsigned long id;
unsigned int fsect, lsect;
int ref;
Expand Down Expand Up @@ -186,8 +185,7 @@ static int blkif_queue_request(struct request *req)
ring_req->operation = BLKIF_OP_WRITE_BARRIER;

ring_req->nr_segments = 0;
rq_for_each_bio (bio, req) {
bio_for_each_segment (bvec, bio, idx) {
rq_for_each_segment(bvec, req, iter) {
BUG_ON(ring_req->nr_segments
== BLKIF_MAX_SEGMENTS_PER_REQUEST);
buffer_mfn = pfn_to_mfn(page_to_pfn(bvec->bv_page));
Expand All @@ -213,7 +211,6 @@ static int blkif_queue_request(struct request *req)
.last_sect = lsect };

ring_req->nr_segments++;
}
}

info->ring.req_prod_pvt++;
Expand Down
Loading

0 comments on commit 5705f70

Please sign in to comment.