Skip to content

Commit

Permalink
Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel…
Browse files Browse the repository at this point in the history
…/git/axboe/linux-2.6-block

* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/axboe/linux-2.6-block:
  ide: always ensure that blk_delay_queue() is called if we have pending IO
  block: fix request sorting at unplug
  dm: improve block integrity support
  fs: export empty_aops
  ide: ide_requeue_and_plug() reinstate "always plug" behaviour
  blk-throttle: don't call xchg on bool
  ufs: remove unessecary blk_flush_plug
  block: make the flush insertion use the tail of the dispatch list
  block: get rid of elv_insert() interface
  block: dump request state on seeing a corrupted request completion
  • Loading branch information
torvalds committed Apr 5, 2011
2 parents d0de4dc + 782b86e commit 44148a6
Show file tree
Hide file tree
Showing 14 changed files with 148 additions and 91 deletions.
4 changes: 2 additions & 2 deletions block/blk-core.c
Original file line number Diff line number Diff line change
Expand Up @@ -2163,7 +2163,7 @@ bool blk_update_request(struct request *req, int error, unsigned int nr_bytes)
* size, something has gone terribly wrong.
*/
if (blk_rq_bytes(req) < blk_rq_cur_bytes(req)) {
printk(KERN_ERR "blk: request botched\n");
blk_dump_rq_flags(req, "request botched");
req->__data_len = blk_rq_cur_bytes(req);
}

Expand Down Expand Up @@ -2665,7 +2665,7 @@ static int plug_rq_cmp(void *priv, struct list_head *a, struct list_head *b)
struct request *rqa = container_of(a, struct request, queuelist);
struct request *rqb = container_of(b, struct request, queuelist);

return !(rqa->q == rqb->q);
return !(rqa->q <= rqb->q);
}

static void flush_plug_list(struct blk_plug *plug)
Expand Down
6 changes: 3 additions & 3 deletions block/blk-flush.c
Original file line number Diff line number Diff line change
Expand Up @@ -261,7 +261,7 @@ static bool blk_kick_flush(struct request_queue *q)
q->flush_rq.end_io = flush_end_io;

q->flush_pending_idx ^= 1;
elv_insert(q, &q->flush_rq, ELEVATOR_INSERT_REQUEUE);
list_add_tail(&q->flush_rq.queuelist, &q->queue_head);
return true;
}

Expand All @@ -281,7 +281,7 @@ static void flush_data_end_io(struct request *rq, int error)
* blk_insert_flush - insert a new FLUSH/FUA request
* @rq: request to insert
*
* To be called from elv_insert() for %ELEVATOR_INSERT_FLUSH insertions.
* To be called from __elv_add_request() for %ELEVATOR_INSERT_FLUSH insertions.
* @rq is being submitted. Analyze what needs to be done and put it on the
* right queue.
*
Expand Down Expand Up @@ -312,7 +312,7 @@ void blk_insert_flush(struct request *rq)
*/
if ((policy & REQ_FSEQ_DATA) &&
!(policy & (REQ_FSEQ_PREFLUSH | REQ_FSEQ_POSTFLUSH))) {
list_add(&rq->queuelist, &q->queue_head);
list_add_tail(&rq->queuelist, &q->queue_head);
return;
}

Expand Down
12 changes: 11 additions & 1 deletion block/blk-integrity.c
Original file line number Diff line number Diff line change
Expand Up @@ -30,6 +30,8 @@

static struct kmem_cache *integrity_cachep;

static const char *bi_unsupported_name = "unsupported";

/**
* blk_rq_count_integrity_sg - Count number of integrity scatterlist elements
* @q: request queue
Expand Down Expand Up @@ -358,6 +360,14 @@ static struct kobj_type integrity_ktype = {
.release = blk_integrity_release,
};

bool blk_integrity_is_initialized(struct gendisk *disk)
{
struct blk_integrity *bi = blk_get_integrity(disk);

return (bi && bi->name && strcmp(bi->name, bi_unsupported_name) != 0);
}
EXPORT_SYMBOL(blk_integrity_is_initialized);

/**
* blk_integrity_register - Register a gendisk as being integrity-capable
* @disk: struct gendisk pointer to make integrity-aware
Expand Down Expand Up @@ -407,7 +417,7 @@ int blk_integrity_register(struct gendisk *disk, struct blk_integrity *template)
bi->get_tag_fn = template->get_tag_fn;
bi->tag_size = template->tag_size;
} else
bi->name = "unsupported";
bi->name = bi_unsupported_name;

return 0;
}
Expand Down
4 changes: 2 additions & 2 deletions block/blk-throttle.c
Original file line number Diff line number Diff line change
Expand Up @@ -77,7 +77,7 @@ struct throtl_grp {
unsigned long slice_end[2];

/* Some throttle limits got updated for the group */
bool limits_changed;
int limits_changed;
};

struct throtl_data
Expand All @@ -102,7 +102,7 @@ struct throtl_data
/* Work for dispatching throttled bios */
struct delayed_work throtl_work;

bool limits_changed;
int limits_changed;
};

enum tg_state_flags {
Expand Down
35 changes: 15 additions & 20 deletions block/elevator.c
Original file line number Diff line number Diff line change
Expand Up @@ -610,7 +610,7 @@ void elv_requeue_request(struct request_queue *q, struct request *rq)

rq->cmd_flags &= ~REQ_STARTED;

elv_insert(q, rq, ELEVATOR_INSERT_REQUEUE);
__elv_add_request(q, rq, ELEVATOR_INSERT_REQUEUE);
}

void elv_drain_elevator(struct request_queue *q)
Expand Down Expand Up @@ -655,12 +655,25 @@ void elv_quiesce_end(struct request_queue *q)
queue_flag_clear(QUEUE_FLAG_ELVSWITCH, q);
}

void elv_insert(struct request_queue *q, struct request *rq, int where)
void __elv_add_request(struct request_queue *q, struct request *rq, int where)
{
trace_block_rq_insert(q, rq);

rq->q = q;

BUG_ON(rq->cmd_flags & REQ_ON_PLUG);

if (rq->cmd_flags & REQ_SOFTBARRIER) {
/* barriers are scheduling boundary, update end_sector */
if (rq->cmd_type == REQ_TYPE_FS ||
(rq->cmd_flags & REQ_DISCARD)) {
q->end_sector = rq_end_sector(rq);
q->boundary_rq = rq;
}
} else if (!(rq->cmd_flags & REQ_ELVPRIV) &&
where == ELEVATOR_INSERT_SORT)
where = ELEVATOR_INSERT_BACK;

switch (where) {
case ELEVATOR_INSERT_REQUEUE:
case ELEVATOR_INSERT_FRONT:
Expand Down Expand Up @@ -722,24 +735,6 @@ void elv_insert(struct request_queue *q, struct request *rq, int where)
BUG();
}
}

void __elv_add_request(struct request_queue *q, struct request *rq, int where)
{
BUG_ON(rq->cmd_flags & REQ_ON_PLUG);

if (rq->cmd_flags & REQ_SOFTBARRIER) {
/* barriers are scheduling boundary, update end_sector */
if (rq->cmd_type == REQ_TYPE_FS ||
(rq->cmd_flags & REQ_DISCARD)) {
q->end_sector = rq_end_sector(rq);
q->boundary_rq = rq;
}
} else if (!(rq->cmd_flags & REQ_ELVPRIV) &&
where == ELEVATOR_INSERT_SORT)
where = ELEVATOR_INSERT_BACK;

elv_insert(q, rq, where);
}
EXPORT_SYMBOL(__elv_add_request);

void elv_add_request(struct request_queue *q, struct request *rq, int where)
Expand Down
43 changes: 21 additions & 22 deletions drivers/ide/ide-io.c
Original file line number Diff line number Diff line change
Expand Up @@ -430,6 +430,26 @@ static inline void ide_unlock_host(struct ide_host *host)
}
}

static void __ide_requeue_and_plug(struct request_queue *q, struct request *rq)
{
if (rq)
blk_requeue_request(q, rq);
if (rq || blk_peek_request(q)) {
/* Use 3ms as that was the old plug delay */
blk_delay_queue(q, 3);
}
}

void ide_requeue_and_plug(ide_drive_t *drive, struct request *rq)
{
struct request_queue *q = drive->queue;
unsigned long flags;

spin_lock_irqsave(q->queue_lock, flags);
__ide_requeue_and_plug(q, rq);
spin_unlock_irqrestore(q->queue_lock, flags);
}

/*
* Issue a new request to a device.
*/
Expand Down Expand Up @@ -550,28 +570,7 @@ void do_ide_request(struct request_queue *q)
ide_unlock_host(host);
plug_device_2:
spin_lock_irq(q->queue_lock);

if (rq) {
blk_requeue_request(q, rq);
blk_delay_queue(q, queue_run_ms);
}
}

void ide_requeue_and_plug(ide_drive_t *drive, struct request *rq)
{
struct request_queue *q = drive->queue;
unsigned long flags;

spin_lock_irqsave(q->queue_lock, flags);

if (rq)
blk_requeue_request(q, rq);

spin_unlock_irqrestore(q->queue_lock, flags);

/* Use 3ms as that was the old plug delay */
if (rq)
blk_delay_queue(q, 3);
__ide_requeue_and_plug(q, rq);
}

static int drive_is_ready(ide_drive_t *drive)
Expand Down
114 changes: 80 additions & 34 deletions drivers/md/dm-table.c
Original file line number Diff line number Diff line change
Expand Up @@ -926,21 +926,81 @@ static int dm_table_build_index(struct dm_table *t)
return r;
}

/*
* Get a disk whose integrity profile reflects the table's profile.
* If %match_all is true, all devices' profiles must match.
* If %match_all is false, all devices must at least have an
* allocated integrity profile; but uninitialized is ok.
* Returns NULL if integrity support was inconsistent or unavailable.
*/
static struct gendisk * dm_table_get_integrity_disk(struct dm_table *t,
bool match_all)
{
struct list_head *devices = dm_table_get_devices(t);
struct dm_dev_internal *dd = NULL;
struct gendisk *prev_disk = NULL, *template_disk = NULL;

list_for_each_entry(dd, devices, list) {
template_disk = dd->dm_dev.bdev->bd_disk;
if (!blk_get_integrity(template_disk))
goto no_integrity;
if (!match_all && !blk_integrity_is_initialized(template_disk))
continue; /* skip uninitialized profiles */
else if (prev_disk &&
blk_integrity_compare(prev_disk, template_disk) < 0)
goto no_integrity;
prev_disk = template_disk;
}

return template_disk;

no_integrity:
if (prev_disk)
DMWARN("%s: integrity not set: %s and %s profile mismatch",
dm_device_name(t->md),
prev_disk->disk_name,
template_disk->disk_name);
return NULL;
}

/*
* Register the mapped device for blk_integrity support if
* the underlying devices support it.
* the underlying devices have an integrity profile. But all devices
* may not have matching profiles (checking all devices isn't reliable
* during table load because this table may use other DM device(s) which
* must be resumed before they will have an initialized integity profile).
* Stacked DM devices force a 2 stage integrity profile validation:
* 1 - during load, validate all initialized integrity profiles match
* 2 - during resume, validate all integrity profiles match
*/
static int dm_table_prealloc_integrity(struct dm_table *t, struct mapped_device *md)
{
struct list_head *devices = dm_table_get_devices(t);
struct dm_dev_internal *dd;
struct gendisk *template_disk = NULL;

list_for_each_entry(dd, devices, list)
if (bdev_get_integrity(dd->dm_dev.bdev)) {
t->integrity_supported = 1;
return blk_integrity_register(dm_disk(md), NULL);
}
template_disk = dm_table_get_integrity_disk(t, false);
if (!template_disk)
return 0;

if (!blk_integrity_is_initialized(dm_disk(md))) {
t->integrity_supported = 1;
return blk_integrity_register(dm_disk(md), NULL);
}

/*
* If DM device already has an initalized integrity
* profile the new profile should not conflict.
*/
if (blk_integrity_is_initialized(template_disk) &&
blk_integrity_compare(dm_disk(md), template_disk) < 0) {
DMWARN("%s: conflict with existing integrity profile: "
"%s profile mismatch",
dm_device_name(t->md),
template_disk->disk_name);
return 1;
}

/* Preserve existing initialized integrity profile */
t->integrity_supported = 1;
return 0;
}

Expand Down Expand Up @@ -1094,41 +1154,27 @@ int dm_calculate_queue_limits(struct dm_table *table,

/*
* Set the integrity profile for this device if all devices used have
* matching profiles.
* matching profiles. We're quite deep in the resume path but still
* don't know if all devices (particularly DM devices this device
* may be stacked on) have matching profiles. Even if the profiles
* don't match we have no way to fail (to resume) at this point.
*/
static void dm_table_set_integrity(struct dm_table *t)
{
struct list_head *devices = dm_table_get_devices(t);
struct dm_dev_internal *prev = NULL, *dd = NULL;
struct gendisk *template_disk = NULL;

if (!blk_get_integrity(dm_disk(t->md)))
return;

list_for_each_entry(dd, devices, list) {
if (prev &&
blk_integrity_compare(prev->dm_dev.bdev->bd_disk,
dd->dm_dev.bdev->bd_disk) < 0) {
DMWARN("%s: integrity not set: %s and %s mismatch",
dm_device_name(t->md),
prev->dm_dev.bdev->bd_disk->disk_name,
dd->dm_dev.bdev->bd_disk->disk_name);
goto no_integrity;
}
prev = dd;
template_disk = dm_table_get_integrity_disk(t, true);
if (!template_disk &&
blk_integrity_is_initialized(dm_disk(t->md))) {
DMWARN("%s: device no longer has a valid integrity profile",
dm_device_name(t->md));
return;
}

if (!prev || !bdev_get_integrity(prev->dm_dev.bdev))
goto no_integrity;

blk_integrity_register(dm_disk(t->md),
bdev_get_integrity(prev->dm_dev.bdev));

return;

no_integrity:
blk_integrity_register(dm_disk(t->md), NULL);

return;
blk_get_integrity(template_disk));
}

void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q,
Expand Down
9 changes: 8 additions & 1 deletion fs/inode.c
Original file line number Diff line number Diff line change
Expand Up @@ -124,6 +124,14 @@ __cacheline_aligned_in_smp DEFINE_SPINLOCK(inode_wb_list_lock);
*/
static DECLARE_RWSEM(iprune_sem);

/*
* Empty aops. Can be used for the cases where the user does not
* define any of the address_space operations.
*/
const struct address_space_operations empty_aops = {
};
EXPORT_SYMBOL(empty_aops);

/*
* Statistics gathering..
*/
Expand Down Expand Up @@ -176,7 +184,6 @@ int proc_nr_inodes(ctl_table *table, int write,
*/
int inode_init_always(struct super_block *sb, struct inode *inode)
{
static const struct address_space_operations empty_aops;
static const struct inode_operations empty_iops;
static const struct file_operations empty_fops;
struct address_space *const mapping = &inode->i_data;
Expand Down
2 changes: 0 additions & 2 deletions fs/nilfs2/page.c
Original file line number Diff line number Diff line change
Expand Up @@ -495,8 +495,6 @@ unsigned nilfs_page_count_clean_buffers(struct page *page,
void nilfs_mapping_init(struct address_space *mapping,
struct backing_dev_info *bdi)
{
static const struct address_space_operations empty_aops;

mapping->host = NULL;
mapping->flags = 0;
mapping_set_gfp_mask(mapping, GFP_NOFS);
Expand Down
Loading

0 comments on commit 44148a6

Please sign in to comment.