Skip to content

Commit

Permalink
Merge tag 'for-4.14/dm-changes' of git://git.kernel.org/pub/scm/linux…
Browse files Browse the repository at this point in the history
…/kernel/git/device-mapper/linux-dm

Pull device mapper updates from Mike Snitzer:

 - Some request-based DM core and DM multipath fixes and cleanups

 - Constify a few variables in DM core and DM integrity

 - Add bufio optimization and checksum failure accounting to DM
   integrity

 - Fix DM integrity to avoid checking integrity of failed reads

 - Fix DM integrity to use init_completion

 - A couple DM log-writes target fixes

 - Simplify DAX flushing by eliminating the unnecessary flush
   abstraction that was stood up for DM's use.

* tag 'for-4.14/dm-changes' of git://git.kernel.org/pub/scm/linux/kernel/git/device-mapper/linux-dm:
  dax: remove the pmem_dax_ops->flush abstraction
  dm integrity: use init_completion instead of COMPLETION_INITIALIZER_ONSTACK
  dm integrity: make blk_integrity_profile structure const
  dm integrity: do not check integrity for failed read operations
  dm log writes: fix >512b sectorsize support
  dm log writes: don't use all the cpu while waiting to log blocks
  dm ioctl: constify ioctl lookup table
  dm: constify argument arrays
  dm integrity: count and display checksum failures
  dm integrity: optimize writing dm-bufio buffers that are partially changed
  dm rq: do not update rq partially in each ending bio
  dm rq: make dm-sq requeuing behavior consistent with dm-mq behavior
  dm mpath: complain about unsupported __multipath_map_bio() return values
  dm mpath: avoid that building with W=1 causes gcc 7 to complain about fall-through
  • Loading branch information
torvalds committed Sep 14, 2017
2 parents 503f045 + c3ca015 commit dff4d1f
Show file tree
Hide file tree
Showing 23 changed files with 189 additions and 167 deletions.
21 changes: 14 additions & 7 deletions drivers/dax/super.c
Original file line number Diff line number Diff line change
Expand Up @@ -201,8 +201,10 @@ static umode_t dax_visible(struct kobject *kobj, struct attribute *a, int n)
if (!dax_dev)
return 0;

if (a == &dev_attr_write_cache.attr && !dax_dev->ops->flush)
#ifndef CONFIG_ARCH_HAS_PMEM_API
if (a == &dev_attr_write_cache.attr)
return 0;
#endif
return a->mode;
}

Expand Down Expand Up @@ -267,18 +269,23 @@ size_t dax_copy_from_iter(struct dax_device *dax_dev, pgoff_t pgoff, void *addr,
}
EXPORT_SYMBOL_GPL(dax_copy_from_iter);

void dax_flush(struct dax_device *dax_dev, pgoff_t pgoff, void *addr,
size_t size)
#ifdef CONFIG_ARCH_HAS_PMEM_API
void arch_wb_cache_pmem(void *addr, size_t size);
void dax_flush(struct dax_device *dax_dev, void *addr, size_t size)
{
if (!dax_alive(dax_dev))
if (unlikely(!dax_alive(dax_dev)))
return;

if (!test_bit(DAXDEV_WRITE_CACHE, &dax_dev->flags))
if (unlikely(!test_bit(DAXDEV_WRITE_CACHE, &dax_dev->flags)))
return;

if (dax_dev->ops->flush)
dax_dev->ops->flush(dax_dev, pgoff, addr, size);
arch_wb_cache_pmem(addr, size);
}
#else
void dax_flush(struct dax_device *dax_dev, void *addr, size_t size)
{
}
#endif
EXPORT_SYMBOL_GPL(dax_flush);

void dax_write_cache(struct dax_device *dax_dev, bool wc)
Expand Down
95 changes: 67 additions & 28 deletions drivers/md/dm-bufio.c
Original file line number Diff line number Diff line change
Expand Up @@ -63,6 +63,12 @@
#define DM_BUFIO_BLOCK_SIZE_SLAB_LIMIT (PAGE_SIZE >> 1)
#define DM_BUFIO_BLOCK_SIZE_GFP_LIMIT (PAGE_SIZE << (MAX_ORDER - 1))

/*
* Align buffer writes to this boundary.
* Tests show that SSDs have the highest IOPS when using 4k writes.
*/
#define DM_BUFIO_WRITE_ALIGN 4096

/*
* dm_buffer->list_mode
*/
Expand Down Expand Up @@ -149,6 +155,10 @@ struct dm_buffer {
blk_status_t write_error;
unsigned long state;
unsigned long last_accessed;
unsigned dirty_start;
unsigned dirty_end;
unsigned write_start;
unsigned write_end;
struct dm_bufio_client *c;
struct list_head write_list;
struct bio bio;
Expand Down Expand Up @@ -560,7 +570,7 @@ static void dmio_complete(unsigned long error, void *context)
}

static void use_dmio(struct dm_buffer *b, int rw, sector_t sector,
unsigned n_sectors, bio_end_io_t *end_io)
unsigned n_sectors, unsigned offset, bio_end_io_t *end_io)
{
int r;
struct dm_io_request io_req = {
Expand All @@ -578,10 +588,10 @@ static void use_dmio(struct dm_buffer *b, int rw, sector_t sector,

if (b->data_mode != DATA_MODE_VMALLOC) {
io_req.mem.type = DM_IO_KMEM;
io_req.mem.ptr.addr = b->data;
io_req.mem.ptr.addr = (char *)b->data + offset;
} else {
io_req.mem.type = DM_IO_VMA;
io_req.mem.ptr.vma = b->data;
io_req.mem.ptr.vma = (char *)b->data + offset;
}

b->bio.bi_end_io = end_io;
Expand Down Expand Up @@ -609,10 +619,10 @@ static void inline_endio(struct bio *bio)
}

static void use_inline_bio(struct dm_buffer *b, int rw, sector_t sector,
unsigned n_sectors, bio_end_io_t *end_io)
unsigned n_sectors, unsigned offset, bio_end_io_t *end_io)
{
char *ptr;
int len;
unsigned len;

bio_init(&b->bio, b->bio_vec, DM_BUFIO_INLINE_VECS);
b->bio.bi_iter.bi_sector = sector;
Expand All @@ -625,29 +635,20 @@ static void use_inline_bio(struct dm_buffer *b, int rw, sector_t sector,
b->bio.bi_private = end_io;
bio_set_op_attrs(&b->bio, rw, 0);

/*
* We assume that if len >= PAGE_SIZE ptr is page-aligned.
* If len < PAGE_SIZE the buffer doesn't cross page boundary.
*/
ptr = b->data;
ptr = (char *)b->data + offset;
len = n_sectors << SECTOR_SHIFT;

if (len >= PAGE_SIZE)
BUG_ON((unsigned long)ptr & (PAGE_SIZE - 1));
else
BUG_ON((unsigned long)ptr & (len - 1));

do {
if (!bio_add_page(&b->bio, virt_to_page(ptr),
len < PAGE_SIZE ? len : PAGE_SIZE,
unsigned this_step = min((unsigned)(PAGE_SIZE - offset_in_page(ptr)), len);
if (!bio_add_page(&b->bio, virt_to_page(ptr), this_step,
offset_in_page(ptr))) {
BUG_ON(b->c->block_size <= PAGE_SIZE);
use_dmio(b, rw, sector, n_sectors, end_io);
use_dmio(b, rw, sector, n_sectors, offset, end_io);
return;
}

len -= PAGE_SIZE;
ptr += PAGE_SIZE;
len -= this_step;
ptr += this_step;
} while (len > 0);

submit_bio(&b->bio);
Expand All @@ -657,18 +658,33 @@ static void submit_io(struct dm_buffer *b, int rw, bio_end_io_t *end_io)
{
unsigned n_sectors;
sector_t sector;

if (rw == WRITE && b->c->write_callback)
b->c->write_callback(b);
unsigned offset, end;

sector = (b->block << b->c->sectors_per_block_bits) + b->c->start;
n_sectors = 1 << b->c->sectors_per_block_bits;

if (rw != WRITE) {
n_sectors = 1 << b->c->sectors_per_block_bits;
offset = 0;
} else {
if (b->c->write_callback)
b->c->write_callback(b);
offset = b->write_start;
end = b->write_end;
offset &= -DM_BUFIO_WRITE_ALIGN;
end += DM_BUFIO_WRITE_ALIGN - 1;
end &= -DM_BUFIO_WRITE_ALIGN;
if (unlikely(end > b->c->block_size))
end = b->c->block_size;

sector += offset >> SECTOR_SHIFT;
n_sectors = (end - offset) >> SECTOR_SHIFT;
}

if (n_sectors <= ((DM_BUFIO_INLINE_VECS * PAGE_SIZE) >> SECTOR_SHIFT) &&
b->data_mode != DATA_MODE_VMALLOC)
use_inline_bio(b, rw, sector, n_sectors, end_io);
use_inline_bio(b, rw, sector, n_sectors, offset, end_io);
else
use_dmio(b, rw, sector, n_sectors, end_io);
use_dmio(b, rw, sector, n_sectors, offset, end_io);
}

/*----------------------------------------------------------------
Expand Down Expand Up @@ -720,6 +736,9 @@ static void __write_dirty_buffer(struct dm_buffer *b,
clear_bit(B_DIRTY, &b->state);
wait_on_bit_lock_io(&b->state, B_WRITING, TASK_UNINTERRUPTIBLE);

b->write_start = b->dirty_start;
b->write_end = b->dirty_end;

if (!write_list)
submit_io(b, WRITE, write_endio);
else
Expand Down Expand Up @@ -1221,19 +1240,37 @@ void dm_bufio_release(struct dm_buffer *b)
}
EXPORT_SYMBOL_GPL(dm_bufio_release);

void dm_bufio_mark_buffer_dirty(struct dm_buffer *b)
void dm_bufio_mark_partial_buffer_dirty(struct dm_buffer *b,
unsigned start, unsigned end)
{
struct dm_bufio_client *c = b->c;

BUG_ON(start >= end);
BUG_ON(end > b->c->block_size);

dm_bufio_lock(c);

BUG_ON(test_bit(B_READING, &b->state));

if (!test_and_set_bit(B_DIRTY, &b->state))
if (!test_and_set_bit(B_DIRTY, &b->state)) {
b->dirty_start = start;
b->dirty_end = end;
__relink_lru(b, LIST_DIRTY);
} else {
if (start < b->dirty_start)
b->dirty_start = start;
if (end > b->dirty_end)
b->dirty_end = end;
}

dm_bufio_unlock(c);
}
EXPORT_SYMBOL_GPL(dm_bufio_mark_partial_buffer_dirty);

void dm_bufio_mark_buffer_dirty(struct dm_buffer *b)
{
dm_bufio_mark_partial_buffer_dirty(b, 0, b->c->block_size);
}
EXPORT_SYMBOL_GPL(dm_bufio_mark_buffer_dirty);

void dm_bufio_write_dirty_buffers_async(struct dm_bufio_client *c)
Expand Down Expand Up @@ -1398,6 +1435,8 @@ void dm_bufio_release_move(struct dm_buffer *b, sector_t new_block)
wait_on_bit_io(&b->state, B_WRITING,
TASK_UNINTERRUPTIBLE);
set_bit(B_DIRTY, &b->state);
b->dirty_start = 0;
b->dirty_end = c->block_size;
__unlink_buffer(b);
__link_buffer(b, new_block, LIST_DIRTY);
} else {
Expand Down
9 changes: 9 additions & 0 deletions drivers/md/dm-bufio.h
Original file line number Diff line number Diff line change
Expand Up @@ -93,6 +93,15 @@ void dm_bufio_release(struct dm_buffer *b);
*/
void dm_bufio_mark_buffer_dirty(struct dm_buffer *b);

/*
* Mark a part of the buffer dirty.
*
* The specified part of the buffer is scheduled to be written. dm-bufio may
* write the specified part of the buffer or it may write a larger superset.
*/
void dm_bufio_mark_partial_buffer_dirty(struct dm_buffer *b,
unsigned start, unsigned end);

/*
* Initiate writing of dirty buffers, without waiting for completion.
*/
Expand Down
4 changes: 2 additions & 2 deletions drivers/md/dm-cache-target.c
Original file line number Diff line number Diff line change
Expand Up @@ -2306,7 +2306,7 @@ static void init_features(struct cache_features *cf)
static int parse_features(struct cache_args *ca, struct dm_arg_set *as,
char **error)
{
static struct dm_arg _args[] = {
static const struct dm_arg _args[] = {
{0, 2, "Invalid number of cache feature arguments"},
};

Expand Down Expand Up @@ -2348,7 +2348,7 @@ static int parse_features(struct cache_args *ca, struct dm_arg_set *as,
static int parse_policy(struct cache_args *ca, struct dm_arg_set *as,
char **error)
{
static struct dm_arg _args[] = {
static const struct dm_arg _args[] = {
{0, 1024, "Invalid number of policy arguments"},
};

Expand Down
2 changes: 1 addition & 1 deletion drivers/md/dm-crypt.c
Original file line number Diff line number Diff line change
Expand Up @@ -2529,7 +2529,7 @@ static int crypt_ctr_optional(struct dm_target *ti, unsigned int argc, char **ar
{
struct crypt_config *cc = ti->private;
struct dm_arg_set as;
static struct dm_arg _args[] = {
static const struct dm_arg _args[] = {
{0, 6, "Invalid number of feature args"},
};
unsigned int opt_params, val;
Expand Down
4 changes: 2 additions & 2 deletions drivers/md/dm-flakey.c
Original file line number Diff line number Diff line change
Expand Up @@ -51,7 +51,7 @@ static int parse_features(struct dm_arg_set *as, struct flakey_c *fc,
unsigned argc;
const char *arg_name;

static struct dm_arg _args[] = {
static const struct dm_arg _args[] = {
{0, 6, "Invalid number of feature args"},
{1, UINT_MAX, "Invalid corrupt bio byte"},
{0, 255, "Invalid corrupt value to write into bio byte (0-255)"},
Expand Down Expand Up @@ -178,7 +178,7 @@ static int parse_features(struct dm_arg_set *as, struct flakey_c *fc,
*/
static int flakey_ctr(struct dm_target *ti, unsigned int argc, char **argv)
{
static struct dm_arg _args[] = {
static const struct dm_arg _args[] = {
{0, UINT_MAX, "Invalid up interval"},
{0, UINT_MAX, "Invalid down interval"},
};
Expand Down
Loading

0 comments on commit dff4d1f

Please sign in to comment.