Skip to content

Commit

Permalink
block: Use accessor functions for queue limits
Browse files Browse the repository at this point in the history
Convert all external users of queue limits to using wrapper functions
instead of poking the request queue variables directly.

Signed-off-by: Martin K. Petersen <[email protected]>
Signed-off-by: Jens Axboe <[email protected]>
  • Loading branch information
martinkpetersen authored and Jens Axboe committed May 22, 2009
1 parent e1defc4 commit ae03bf6
Show file tree
Hide file tree
Showing 25 changed files with 147 additions and 97 deletions.
8 changes: 4 additions & 4 deletions block/blk-barrier.c
Original file line number Diff line number Diff line change
Expand Up @@ -388,10 +388,10 @@ int blkdev_issue_discard(struct block_device *bdev,

bio->bi_sector = sector;

if (nr_sects > q->max_hw_sectors) {
bio->bi_size = q->max_hw_sectors << 9;
nr_sects -= q->max_hw_sectors;
sector += q->max_hw_sectors;
if (nr_sects > queue_max_hw_sectors(q)) {
bio->bi_size = queue_max_hw_sectors(q) << 9;
nr_sects -= queue_max_hw_sectors(q);
sector += queue_max_hw_sectors(q);
} else {
bio->bi_size = nr_sects << 9;
nr_sects = 0;
Expand Down
16 changes: 8 additions & 8 deletions block/blk-core.c
Original file line number Diff line number Diff line change
Expand Up @@ -1437,11 +1437,11 @@ static inline void __generic_make_request(struct bio *bio)
goto end_io;
}

if (unlikely(nr_sectors > q->max_hw_sectors)) {
if (unlikely(nr_sectors > queue_max_hw_sectors(q))) {
printk(KERN_ERR "bio too big device %s (%u > %u)\n",
bdevname(bio->bi_bdev, b),
bio_sectors(bio),
q->max_hw_sectors);
bdevname(bio->bi_bdev, b),
bio_sectors(bio),
queue_max_hw_sectors(q));
goto end_io;
}

Expand Down Expand Up @@ -1608,8 +1608,8 @@ EXPORT_SYMBOL(submit_bio);
*/
int blk_rq_check_limits(struct request_queue *q, struct request *rq)
{
if (blk_rq_sectors(rq) > q->max_sectors ||
blk_rq_bytes(rq) > q->max_hw_sectors << 9) {
if (blk_rq_sectors(rq) > queue_max_sectors(q) ||
blk_rq_bytes(rq) > queue_max_hw_sectors(q) << 9) {
printk(KERN_ERR "%s: over max size limit.\n", __func__);
return -EIO;
}
Expand All @@ -1621,8 +1621,8 @@ int blk_rq_check_limits(struct request_queue *q, struct request *rq)
* limitation.
*/
blk_recalc_rq_segments(rq);
if (rq->nr_phys_segments > q->max_phys_segments ||
rq->nr_phys_segments > q->max_hw_segments) {
if (rq->nr_phys_segments > queue_max_phys_segments(q) ||
rq->nr_phys_segments > queue_max_hw_segments(q)) {
printk(KERN_ERR "%s: over max segments limit.\n", __func__);
return -EIO;
}
Expand Down
4 changes: 2 additions & 2 deletions block/blk-map.c
Original file line number Diff line number Diff line change
Expand Up @@ -115,7 +115,7 @@ int blk_rq_map_user(struct request_queue *q, struct request *rq,
struct bio *bio = NULL;
int ret;

if (len > (q->max_hw_sectors << 9))
if (len > (queue_max_hw_sectors(q) << 9))
return -EINVAL;
if (!len)
return -EINVAL;
Expand Down Expand Up @@ -292,7 +292,7 @@ int blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf,
struct bio *bio;
int ret;

if (len > (q->max_hw_sectors << 9))
if (len > (queue_max_hw_sectors(q) << 9))
return -EINVAL;
if (!len || !kbuf)
return -EINVAL;
Expand Down
27 changes: 14 additions & 13 deletions block/blk-merge.c
Original file line number Diff line number Diff line change
Expand Up @@ -32,11 +32,12 @@ static unsigned int __blk_recalc_rq_segments(struct request_queue *q,
* never considered part of another segment, since that
* might change with the bounce page.
*/
high = page_to_pfn(bv->bv_page) > q->bounce_pfn;
high = page_to_pfn(bv->bv_page) > queue_bounce_pfn(q);
if (high || highprv)
goto new_segment;
if (cluster) {
if (seg_size + bv->bv_len > q->max_segment_size)
if (seg_size + bv->bv_len
> queue_max_segment_size(q))
goto new_segment;
if (!BIOVEC_PHYS_MERGEABLE(bvprv, bv))
goto new_segment;
Expand Down Expand Up @@ -91,7 +92,7 @@ static int blk_phys_contig_segment(struct request_queue *q, struct bio *bio,
return 0;

if (bio->bi_seg_back_size + nxt->bi_seg_front_size >
q->max_segment_size)
queue_max_segment_size(q))
return 0;

if (!bio_has_data(bio))
Expand Down Expand Up @@ -134,7 +135,7 @@ int blk_rq_map_sg(struct request_queue *q, struct request *rq,
int nbytes = bvec->bv_len;

if (bvprv && cluster) {
if (sg->length + nbytes > q->max_segment_size)
if (sg->length + nbytes > queue_max_segment_size(q))
goto new_segment;

if (!BIOVEC_PHYS_MERGEABLE(bvprv, bvec))
Expand Down Expand Up @@ -205,8 +206,8 @@ static inline int ll_new_hw_segment(struct request_queue *q,
{
int nr_phys_segs = bio_phys_segments(q, bio);

if (req->nr_phys_segments + nr_phys_segs > q->max_hw_segments
|| req->nr_phys_segments + nr_phys_segs > q->max_phys_segments) {
if (req->nr_phys_segments + nr_phys_segs > queue_max_hw_segments(q) ||
req->nr_phys_segments + nr_phys_segs > queue_max_phys_segments(q)) {
req->cmd_flags |= REQ_NOMERGE;
if (req == q->last_merge)
q->last_merge = NULL;
Expand All @@ -227,9 +228,9 @@ int ll_back_merge_fn(struct request_queue *q, struct request *req,
unsigned short max_sectors;

if (unlikely(blk_pc_request(req)))
max_sectors = q->max_hw_sectors;
max_sectors = queue_max_hw_sectors(q);
else
max_sectors = q->max_sectors;
max_sectors = queue_max_sectors(q);

if (blk_rq_sectors(req) + bio_sectors(bio) > max_sectors) {
req->cmd_flags |= REQ_NOMERGE;
Expand All @@ -251,9 +252,9 @@ int ll_front_merge_fn(struct request_queue *q, struct request *req,
unsigned short max_sectors;

if (unlikely(blk_pc_request(req)))
max_sectors = q->max_hw_sectors;
max_sectors = queue_max_hw_sectors(q);
else
max_sectors = q->max_sectors;
max_sectors = queue_max_sectors(q);


if (blk_rq_sectors(req) + bio_sectors(bio) > max_sectors) {
Expand Down Expand Up @@ -287,7 +288,7 @@ static int ll_merge_requests_fn(struct request_queue *q, struct request *req,
/*
* Will it become too large?
*/
if ((blk_rq_sectors(req) + blk_rq_sectors(next)) > q->max_sectors)
if ((blk_rq_sectors(req) + blk_rq_sectors(next)) > queue_max_sectors(q))
return 0;

total_phys_segments = req->nr_phys_segments + next->nr_phys_segments;
Expand All @@ -299,10 +300,10 @@ static int ll_merge_requests_fn(struct request_queue *q, struct request *req,
total_phys_segments--;
}

if (total_phys_segments > q->max_phys_segments)
if (total_phys_segments > queue_max_phys_segments(q))
return 0;

if (total_phys_segments > q->max_hw_segments)
if (total_phys_segments > queue_max_hw_segments(q))
return 0;

/* Merge is OK... */
Expand Down
15 changes: 12 additions & 3 deletions block/blk-settings.c
Original file line number Diff line number Diff line change
Expand Up @@ -219,6 +219,15 @@ void blk_queue_max_sectors(struct request_queue *q, unsigned int max_sectors)
}
EXPORT_SYMBOL(blk_queue_max_sectors);

void blk_queue_max_hw_sectors(struct request_queue *q, unsigned int max_sectors)
{
if (BLK_DEF_MAX_SECTORS > max_sectors)
q->max_hw_sectors = BLK_DEF_MAX_SECTORS;
else
q->max_hw_sectors = max_sectors;
}
EXPORT_SYMBOL(blk_queue_max_hw_sectors);

/**
* blk_queue_max_phys_segments - set max phys segments for a request for this queue
* @q: the request queue for the device
Expand Down Expand Up @@ -395,11 +404,11 @@ int blk_queue_dma_drain(struct request_queue *q,
dma_drain_needed_fn *dma_drain_needed,
void *buf, unsigned int size)
{
if (q->max_hw_segments < 2 || q->max_phys_segments < 2)
if (queue_max_hw_segments(q) < 2 || queue_max_phys_segments(q) < 2)
return -EINVAL;
/* make room for appending the drain */
--q->max_hw_segments;
--q->max_phys_segments;
blk_queue_max_hw_segments(q, queue_max_hw_segments(q) - 1);
blk_queue_max_phys_segments(q, queue_max_phys_segments(q) - 1);
q->dma_drain_needed = dma_drain_needed;
q->dma_drain_buffer = buf;
q->dma_drain_size = size;
Expand Down
8 changes: 4 additions & 4 deletions block/blk-sysfs.c
Original file line number Diff line number Diff line change
Expand Up @@ -95,7 +95,7 @@ queue_ra_store(struct request_queue *q, const char *page, size_t count)

static ssize_t queue_max_sectors_show(struct request_queue *q, char *page)
{
int max_sectors_kb = q->max_sectors >> 1;
int max_sectors_kb = queue_max_sectors(q) >> 1;

return queue_var_show(max_sectors_kb, (page));
}
Expand All @@ -109,23 +109,23 @@ static ssize_t
queue_max_sectors_store(struct request_queue *q, const char *page, size_t count)
{
unsigned long max_sectors_kb,
max_hw_sectors_kb = q->max_hw_sectors >> 1,
max_hw_sectors_kb = queue_max_hw_sectors(q) >> 1,
page_kb = 1 << (PAGE_CACHE_SHIFT - 10);
ssize_t ret = queue_var_store(&max_sectors_kb, page, count);

if (max_sectors_kb > max_hw_sectors_kb || max_sectors_kb < page_kb)
return -EINVAL;

spin_lock_irq(q->queue_lock);
q->max_sectors = max_sectors_kb << 1;
blk_queue_max_sectors(q, max_sectors_kb << 1);
spin_unlock_irq(q->queue_lock);

return ret;
}

static ssize_t queue_max_hw_sectors_show(struct request_queue *q, char *page)
{
int max_hw_sectors_kb = q->max_hw_sectors >> 1;
int max_hw_sectors_kb = queue_max_hw_sectors(q) >> 1;

return queue_var_show(max_hw_sectors_kb, (page));
}
Expand Down
2 changes: 1 addition & 1 deletion block/compat_ioctl.c
Original file line number Diff line number Diff line change
Expand Up @@ -766,7 +766,7 @@ long compat_blkdev_ioctl(struct file *file, unsigned cmd, unsigned long arg)
return compat_put_int(arg, bdev_logical_block_size(bdev));
case BLKSECTGET:
return compat_put_ushort(arg,
bdev_get_queue(bdev)->max_sectors);
queue_max_sectors(bdev_get_queue(bdev)));
case BLKRASET: /* compatible, but no compat_ptr (!) */
case BLKFRASET:
if (!capable(CAP_SYS_ADMIN))
Expand Down
10 changes: 5 additions & 5 deletions block/ioctl.c
Original file line number Diff line number Diff line change
Expand Up @@ -152,10 +152,10 @@ static int blk_ioctl_discard(struct block_device *bdev, uint64_t start,
bio->bi_private = &wait;
bio->bi_sector = start;

if (len > q->max_hw_sectors) {
bio->bi_size = q->max_hw_sectors << 9;
len -= q->max_hw_sectors;
start += q->max_hw_sectors;
if (len > queue_max_hw_sectors(q)) {
bio->bi_size = queue_max_hw_sectors(q) << 9;
len -= queue_max_hw_sectors(q);
start += queue_max_hw_sectors(q);
} else {
bio->bi_size = len << 9;
len = 0;
Expand Down Expand Up @@ -313,7 +313,7 @@ int blkdev_ioctl(struct block_device *bdev, fmode_t mode, unsigned cmd,
case BLKSSZGET: /* get block device hardware sector size */
return put_int(arg, bdev_logical_block_size(bdev));
case BLKSECTGET:
return put_ushort(arg, bdev_get_queue(bdev)->max_sectors);
return put_ushort(arg, queue_max_sectors(bdev_get_queue(bdev)));
case BLKRASET:
case BLKFRASET:
if(!capable(CAP_SYS_ADMIN))
Expand Down
8 changes: 4 additions & 4 deletions block/scsi_ioctl.c
Original file line number Diff line number Diff line change
Expand Up @@ -75,7 +75,7 @@ static int sg_set_timeout(struct request_queue *q, int __user *p)

static int sg_get_reserved_size(struct request_queue *q, int __user *p)
{
unsigned val = min(q->sg_reserved_size, q->max_sectors << 9);
unsigned val = min(q->sg_reserved_size, queue_max_sectors(q) << 9);

return put_user(val, p);
}
Expand All @@ -89,8 +89,8 @@ static int sg_set_reserved_size(struct request_queue *q, int __user *p)

if (size < 0)
return -EINVAL;
if (size > (q->max_sectors << 9))
size = q->max_sectors << 9;
if (size > (queue_max_sectors(q) << 9))
size = queue_max_sectors(q) << 9;

q->sg_reserved_size = size;
return 0;
Expand Down Expand Up @@ -264,7 +264,7 @@ static int sg_io(struct request_queue *q, struct gendisk *bd_disk,
if (hdr->cmd_len > BLK_MAX_CDB)
return -EINVAL;

if (hdr->dxfer_len > (q->max_hw_sectors << 9))
if (hdr->dxfer_len > (queue_max_hw_sectors(q) << 9))
return -EIO;

if (hdr->dxfer_len)
Expand Down
6 changes: 4 additions & 2 deletions drivers/block/pktcdvd.c
Original file line number Diff line number Diff line change
Expand Up @@ -991,13 +991,15 @@ static void pkt_iosched_process_queue(struct pktcdvd_device *pd)
*/
static int pkt_set_segment_merging(struct pktcdvd_device *pd, struct request_queue *q)
{
if ((pd->settings.size << 9) / CD_FRAMESIZE <= q->max_phys_segments) {
if ((pd->settings.size << 9) / CD_FRAMESIZE
<= queue_max_phys_segments(q)) {
/*
* The cdrom device can handle one segment/frame
*/
clear_bit(PACKET_MERGE_SEGS, &pd->flags);
return 0;
} else if ((pd->settings.size << 9) / PAGE_SIZE <= q->max_phys_segments) {
} else if ((pd->settings.size << 9) / PAGE_SIZE
<= queue_max_phys_segments(q)) {
/*
* We can handle this case at the expense of some extra memory
* copies during write operations
Expand Down
4 changes: 2 additions & 2 deletions drivers/cdrom/cdrom.c
Original file line number Diff line number Diff line change
Expand Up @@ -2101,8 +2101,8 @@ static int cdrom_read_cdda_bpc(struct cdrom_device_info *cdi, __u8 __user *ubuf,
nr = nframes;
if (cdi->cdda_method == CDDA_BPC_SINGLE)
nr = 1;
if (nr * CD_FRAMESIZE_RAW > (q->max_sectors << 9))
nr = (q->max_sectors << 9) / CD_FRAMESIZE_RAW;
if (nr * CD_FRAMESIZE_RAW > (queue_max_sectors(q) << 9))
nr = (queue_max_sectors(q) << 9) / CD_FRAMESIZE_RAW;

len = nr * CD_FRAMESIZE_RAW;

Expand Down
28 changes: 14 additions & 14 deletions drivers/md/dm-table.c
Original file line number Diff line number Diff line change
Expand Up @@ -510,7 +510,7 @@ void dm_set_device_limits(struct dm_target *ti, struct block_device *bdev)
* combine_restrictions_low()
*/
rs->max_sectors =
min_not_zero(rs->max_sectors, q->max_sectors);
min_not_zero(rs->max_sectors, queue_max_sectors(q));

/*
* Check if merge fn is supported.
Expand All @@ -525,25 +525,25 @@ void dm_set_device_limits(struct dm_target *ti, struct block_device *bdev)

rs->max_phys_segments =
min_not_zero(rs->max_phys_segments,
q->max_phys_segments);
queue_max_phys_segments(q));

rs->max_hw_segments =
min_not_zero(rs->max_hw_segments, q->max_hw_segments);
min_not_zero(rs->max_hw_segments, queue_max_hw_segments(q));

rs->logical_block_size = max(rs->logical_block_size,
queue_logical_block_size(q));

rs->max_segment_size =
min_not_zero(rs->max_segment_size, q->max_segment_size);
min_not_zero(rs->max_segment_size, queue_max_segment_size(q));

rs->max_hw_sectors =
min_not_zero(rs->max_hw_sectors, q->max_hw_sectors);
min_not_zero(rs->max_hw_sectors, queue_max_hw_sectors(q));

rs->seg_boundary_mask =
min_not_zero(rs->seg_boundary_mask,
q->seg_boundary_mask);
queue_segment_boundary(q));

rs->bounce_pfn = min_not_zero(rs->bounce_pfn, q->bounce_pfn);
rs->bounce_pfn = min_not_zero(rs->bounce_pfn, queue_bounce_pfn(q));

rs->no_cluster |= !test_bit(QUEUE_FLAG_CLUSTER, &q->queue_flags);
}
Expand Down Expand Up @@ -914,13 +914,13 @@ void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q)
* restrictions.
*/
blk_queue_max_sectors(q, t->limits.max_sectors);
q->max_phys_segments = t->limits.max_phys_segments;
q->max_hw_segments = t->limits.max_hw_segments;
q->logical_block_size = t->limits.logical_block_size;
q->max_segment_size = t->limits.max_segment_size;
q->max_hw_sectors = t->limits.max_hw_sectors;
q->seg_boundary_mask = t->limits.seg_boundary_mask;
q->bounce_pfn = t->limits.bounce_pfn;
blk_queue_max_phys_segments(q, t->limits.max_phys_segments);
blk_queue_max_hw_segments(q, t->limits.max_hw_segments);
blk_queue_logical_block_size(q, t->limits.logical_block_size);
blk_queue_max_segment_size(q, t->limits.max_segment_size);
blk_queue_max_hw_sectors(q, t->limits.max_hw_sectors);
blk_queue_segment_boundary(q, t->limits.seg_boundary_mask);
blk_queue_bounce_limit(q, t->limits.bounce_pfn);

if (t->limits.no_cluster)
queue_flag_clear_unlocked(QUEUE_FLAG_CLUSTER, q);
Expand Down
2 changes: 1 addition & 1 deletion drivers/md/linear.c
Original file line number Diff line number Diff line change
Expand Up @@ -146,7 +146,7 @@ static linear_conf_t *linear_conf(mddev_t *mddev, int raid_disks)
* a one page request is never in violation.
*/
if (rdev->bdev->bd_disk->queue->merge_bvec_fn &&
mddev->queue->max_sectors > (PAGE_SIZE>>9))
queue_max_sectors(mddev->queue) > (PAGE_SIZE>>9))
blk_queue_max_sectors(mddev->queue, PAGE_SIZE>>9);

disk->num_sectors = rdev->sectors;
Expand Down
Loading

0 comments on commit ae03bf6

Please sign in to comment.