Skip to content

Commit

Permalink
Merge tag 'for-5.5/drivers-20191121' of git://git.kernel.dk/linux-block
Browse files Browse the repository at this point in the history
Pull block driver updates from Jens Axboe:
 "Here are the main block driver updates for 5.5. Nothing major in here,
  mostly just fixes. This contains:

   - a set of bcache changes via Coly

   - MD changes from Song

   - loop unmap write-zeroes fix (Darrick)

   - spelling fixes (Geert)

   - zoned additions cleanups to null_blk/dm (Ajay)

   - allow null_blk online submit queue changes (Bart)

   - NVMe changes via Keith, nothing major here either"

* tag 'for-5.5/drivers-20191121' of git://git.kernel.dk/linux-block: (56 commits)
  Revert "bcache: fix fifo index swapping condition in journal_pin_cmp()"
  drivers/md/raid5-ppl.c: use the new spelling of RWH_WRITE_LIFE_NOT_SET
  drivers/md/raid5.c: use the new spelling of RWH_WRITE_LIFE_NOT_SET
  bcache: don't export symbols
  bcache: remove the extra cflags for request.o
  bcache: at least try to shrink 1 node in bch_mca_scan()
  bcache: add idle_max_writeback_rate sysfs interface
  bcache: add code comments in bch_btree_leaf_dirty()
  bcache: fix deadlock in bcache_allocator
  bcache: add code comment bch_keylist_pop() and bch_keylist_pop_front()
  bcache: deleted code comments for dead code in bch_data_insert_keys()
  bcache: add more accurate error messages in read_super()
  bcache: fix static checker warning in bcache_device_free()
  bcache: fix a lost wake-up problem caused by mca_cannibalize_lock
  bcache: fix fifo index swapping condition in journal_pin_cmp()
  md/raid10: prevent access of uninitialized resync_pages offset
  md: avoid invalid memory access for array sb->dev_roles
  md/raid1: avoid soft lockup under high load
  null_blk: add zone open, close, and finish support
  dm: add zone open, close and finish support
  ...
  • Loading branch information
torvalds committed Nov 25, 2019
2 parents ff6814b + 00b8989 commit 2d53943
Show file tree
Hide file tree
Showing 48 changed files with 783 additions and 400 deletions.
26 changes: 18 additions & 8 deletions drivers/block/loop.c
Original file line number Diff line number Diff line change
Expand Up @@ -417,18 +417,20 @@ static int lo_read_transfer(struct loop_device *lo, struct request *rq,
return ret;
}

static int lo_discard(struct loop_device *lo, struct request *rq, loff_t pos)
static int lo_fallocate(struct loop_device *lo, struct request *rq, loff_t pos,
int mode)
{
/*
* We use punch hole to reclaim the free space used by the
* image a.k.a. discard. However we do not support discard if
* encryption is enabled, because it may give an attacker
* useful information.
* We use fallocate to manipulate the space mappings used by the image
* a.k.a. discard/zerorange. However we do not support this if
* encryption is enabled, because it may give an attacker useful
* information.
*/
struct file *file = lo->lo_backing_file;
int mode = FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE;
int ret;

mode |= FALLOC_FL_KEEP_SIZE;

if ((!file->f_op->fallocate) || lo->lo_encrypt_key_size) {
ret = -EOPNOTSUPP;
goto out;
Expand Down Expand Up @@ -596,9 +598,17 @@ static int do_req_filebacked(struct loop_device *lo, struct request *rq)
switch (req_op(rq)) {
case REQ_OP_FLUSH:
return lo_req_flush(lo, rq);
case REQ_OP_DISCARD:
case REQ_OP_WRITE_ZEROES:
return lo_discard(lo, rq, pos);
/*
* If the caller doesn't want deallocation, call zeroout to
* write zeroes the range. Otherwise, punch them out.
*/
return lo_fallocate(lo, rq, pos,
(rq->cmd_flags & REQ_NOUNMAP) ?
FALLOC_FL_ZERO_RANGE :
FALLOC_FL_PUNCH_HOLE);
case REQ_OP_DISCARD:
return lo_fallocate(lo, rq, pos, FALLOC_FL_PUNCH_HOLE);
case REQ_OP_WRITE:
if (lo->transfer)
return lo_write_transfer(lo, rq, pos);
Expand Down
2 changes: 1 addition & 1 deletion drivers/block/mtip32xx/mtip32xx.c
Original file line number Diff line number Diff line change
Expand Up @@ -129,7 +129,7 @@ struct mtip_compat_ide_task_request_s {
/*
* This function check_for_surprise_removal is called
* while card is removed from the system and it will
* read the vendor id from the configration space
* read the vendor id from the configuration space
*
* @pdev Pointer to the pci_dev structure.
*
Expand Down
8 changes: 8 additions & 0 deletions drivers/block/null_blk.h
Original file line number Diff line number Diff line change
Expand Up @@ -96,6 +96,8 @@ int null_zone_report(struct gendisk *disk, sector_t sector,
blk_status_t null_handle_zoned(struct nullb_cmd *cmd,
enum req_opf op, sector_t sector,
sector_t nr_sectors);
size_t null_zone_valid_read_len(struct nullb *nullb,
sector_t sector, unsigned int len);
#else
static inline int null_zone_init(struct nullb_device *dev)
{
Expand All @@ -115,5 +117,11 @@ static inline blk_status_t null_handle_zoned(struct nullb_cmd *cmd,
{
return BLK_STS_NOTSUPP;
}
static inline size_t null_zone_valid_read_len(struct nullb *nullb,
sector_t sector,
unsigned int len)
{
return len;
}
#endif /* CONFIG_BLK_DEV_ZONED */
#endif /* __NULL_BLK_H */
104 changes: 77 additions & 27 deletions drivers/block/null_blk_main.c
Original file line number Diff line number Diff line change
Expand Up @@ -227,7 +227,7 @@ static ssize_t nullb_device_uint_attr_store(unsigned int *val,
int result;

result = kstrtouint(page, 0, &tmp);
if (result)
if (result < 0)
return result;

*val = tmp;
Expand All @@ -241,7 +241,7 @@ static ssize_t nullb_device_ulong_attr_store(unsigned long *val,
unsigned long tmp;

result = kstrtoul(page, 0, &tmp);
if (result)
if (result < 0)
return result;

*val = tmp;
Expand All @@ -255,15 +255,15 @@ static ssize_t nullb_device_bool_attr_store(bool *val, const char *page,
int result;

result = kstrtobool(page, &tmp);
if (result)
if (result < 0)
return result;

*val = tmp;
return count;
}

/* The following macro should only be used with TYPE = {uint, ulong, bool}. */
#define NULLB_DEVICE_ATTR(NAME, TYPE) \
#define NULLB_DEVICE_ATTR(NAME, TYPE, APPLY) \
static ssize_t \
nullb_device_##NAME##_show(struct config_item *item, char *page) \
{ \
Expand All @@ -274,31 +274,57 @@ static ssize_t \
nullb_device_##NAME##_store(struct config_item *item, const char *page, \
size_t count) \
{ \
if (test_bit(NULLB_DEV_FL_CONFIGURED, &to_nullb_device(item)->flags)) \
return -EBUSY; \
return nullb_device_##TYPE##_attr_store( \
&to_nullb_device(item)->NAME, page, count); \
int (*apply_fn)(struct nullb_device *dev, TYPE new_value) = APPLY; \
struct nullb_device *dev = to_nullb_device(item); \
TYPE new_value; \
int ret; \
\
ret = nullb_device_##TYPE##_attr_store(&new_value, page, count); \
if (ret < 0) \
return ret; \
if (apply_fn) \
ret = apply_fn(dev, new_value); \
else if (test_bit(NULLB_DEV_FL_CONFIGURED, &dev->flags)) \
ret = -EBUSY; \
if (ret < 0) \
return ret; \
dev->NAME = new_value; \
return count; \
} \
CONFIGFS_ATTR(nullb_device_, NAME);

NULLB_DEVICE_ATTR(size, ulong);
NULLB_DEVICE_ATTR(completion_nsec, ulong);
NULLB_DEVICE_ATTR(submit_queues, uint);
NULLB_DEVICE_ATTR(home_node, uint);
NULLB_DEVICE_ATTR(queue_mode, uint);
NULLB_DEVICE_ATTR(blocksize, uint);
NULLB_DEVICE_ATTR(irqmode, uint);
NULLB_DEVICE_ATTR(hw_queue_depth, uint);
NULLB_DEVICE_ATTR(index, uint);
NULLB_DEVICE_ATTR(blocking, bool);
NULLB_DEVICE_ATTR(use_per_node_hctx, bool);
NULLB_DEVICE_ATTR(memory_backed, bool);
NULLB_DEVICE_ATTR(discard, bool);
NULLB_DEVICE_ATTR(mbps, uint);
NULLB_DEVICE_ATTR(cache_size, ulong);
NULLB_DEVICE_ATTR(zoned, bool);
NULLB_DEVICE_ATTR(zone_size, ulong);
NULLB_DEVICE_ATTR(zone_nr_conv, uint);
static int nullb_apply_submit_queues(struct nullb_device *dev,
unsigned int submit_queues)
{
struct nullb *nullb = dev->nullb;
struct blk_mq_tag_set *set;

if (!nullb)
return 0;

set = nullb->tag_set;
blk_mq_update_nr_hw_queues(set, submit_queues);
return set->nr_hw_queues == submit_queues ? 0 : -ENOMEM;
}

NULLB_DEVICE_ATTR(size, ulong, NULL);
NULLB_DEVICE_ATTR(completion_nsec, ulong, NULL);
NULLB_DEVICE_ATTR(submit_queues, uint, nullb_apply_submit_queues);
NULLB_DEVICE_ATTR(home_node, uint, NULL);
NULLB_DEVICE_ATTR(queue_mode, uint, NULL);
NULLB_DEVICE_ATTR(blocksize, uint, NULL);
NULLB_DEVICE_ATTR(irqmode, uint, NULL);
NULLB_DEVICE_ATTR(hw_queue_depth, uint, NULL);
NULLB_DEVICE_ATTR(index, uint, NULL);
NULLB_DEVICE_ATTR(blocking, bool, NULL);
NULLB_DEVICE_ATTR(use_per_node_hctx, bool, NULL);
NULLB_DEVICE_ATTR(memory_backed, bool, NULL);
NULLB_DEVICE_ATTR(discard, bool, NULL);
NULLB_DEVICE_ATTR(mbps, uint, NULL);
NULLB_DEVICE_ATTR(cache_size, ulong, NULL);
NULLB_DEVICE_ATTR(zoned, bool, NULL);
NULLB_DEVICE_ATTR(zone_size, ulong, NULL);
NULLB_DEVICE_ATTR(zone_nr_conv, uint, NULL);

static ssize_t nullb_device_power_show(struct config_item *item, char *page)
{
Expand Down Expand Up @@ -996,6 +1022,16 @@ static int copy_from_nullb(struct nullb *nullb, struct page *dest,
return 0;
}

static void nullb_fill_pattern(struct nullb *nullb, struct page *page,
unsigned int len, unsigned int off)
{
void *dst;

dst = kmap_atomic(page);
memset(dst + off, 0xFF, len);
kunmap_atomic(dst);
}

static void null_handle_discard(struct nullb *nullb, sector_t sector, size_t n)
{
size_t temp;
Expand Down Expand Up @@ -1036,10 +1072,24 @@ static int null_transfer(struct nullb *nullb, struct page *page,
unsigned int len, unsigned int off, bool is_write, sector_t sector,
bool is_fua)
{
struct nullb_device *dev = nullb->dev;
unsigned int valid_len = len;
int err = 0;

if (!is_write) {
err = copy_from_nullb(nullb, page, off, sector, len);
if (dev->zoned)
valid_len = null_zone_valid_read_len(nullb,
sector, len);

if (valid_len) {
err = copy_from_nullb(nullb, page, off,
sector, valid_len);
off += valid_len;
len -= valid_len;
}

if (len)
nullb_fill_pattern(nullb, page, len, off);
flush_dcache_page(page);
} else {
flush_dcache_page(page);
Expand Down
54 changes: 49 additions & 5 deletions drivers/block/null_blk_zoned.c
Original file line number Diff line number Diff line change
Expand Up @@ -84,6 +84,24 @@ int null_zone_report(struct gendisk *disk, sector_t sector,
return 0;
}

size_t null_zone_valid_read_len(struct nullb *nullb,
sector_t sector, unsigned int len)
{
struct nullb_device *dev = nullb->dev;
struct blk_zone *zone = &dev->zones[null_zone_no(dev, sector)];
unsigned int nr_sectors = len >> SECTOR_SHIFT;

/* Read must be below the write pointer position */
if (zone->type == BLK_ZONE_TYPE_CONVENTIONAL ||
sector + nr_sectors <= zone->wp)
return len;

if (sector > zone->wp)
return 0;

return (zone->wp - sector) << SECTOR_SHIFT;
}

static blk_status_t null_zone_write(struct nullb_cmd *cmd, sector_t sector,
unsigned int nr_sectors)
{
Expand Down Expand Up @@ -118,14 +136,14 @@ static blk_status_t null_zone_write(struct nullb_cmd *cmd, sector_t sector,
return BLK_STS_OK;
}

static blk_status_t null_zone_reset(struct nullb_cmd *cmd, sector_t sector)
static blk_status_t null_zone_mgmt(struct nullb_cmd *cmd, enum req_opf op,
sector_t sector)
{
struct nullb_device *dev = cmd->nq->dev;
unsigned int zno = null_zone_no(dev, sector);
struct blk_zone *zone = &dev->zones[zno];
struct blk_zone *zone = &dev->zones[null_zone_no(dev, sector)];
size_t i;

switch (req_op(cmd->rq)) {
switch (op) {
case REQ_OP_ZONE_RESET_ALL:
for (i = 0; i < dev->nr_zones; i++) {
if (zone[i].type == BLK_ZONE_TYPE_CONVENTIONAL)
Expand All @@ -141,6 +159,29 @@ static blk_status_t null_zone_reset(struct nullb_cmd *cmd, sector_t sector)
zone->cond = BLK_ZONE_COND_EMPTY;
zone->wp = zone->start;
break;
case REQ_OP_ZONE_OPEN:
if (zone->type == BLK_ZONE_TYPE_CONVENTIONAL)
return BLK_STS_IOERR;
if (zone->cond == BLK_ZONE_COND_FULL)
return BLK_STS_IOERR;

zone->cond = BLK_ZONE_COND_EXP_OPEN;
break;
case REQ_OP_ZONE_CLOSE:
if (zone->type == BLK_ZONE_TYPE_CONVENTIONAL)
return BLK_STS_IOERR;
if (zone->cond == BLK_ZONE_COND_FULL)
return BLK_STS_IOERR;

zone->cond = BLK_ZONE_COND_CLOSED;
break;
case REQ_OP_ZONE_FINISH:
if (zone->type == BLK_ZONE_TYPE_CONVENTIONAL)
return BLK_STS_IOERR;

zone->cond = BLK_ZONE_COND_FULL;
zone->wp = zone->start + zone->len;
break;
default:
return BLK_STS_NOTSUPP;
}
Expand All @@ -155,7 +196,10 @@ blk_status_t null_handle_zoned(struct nullb_cmd *cmd, enum req_opf op,
return null_zone_write(cmd, sector, nr_sectors);
case REQ_OP_ZONE_RESET:
case REQ_OP_ZONE_RESET_ALL:
return null_zone_reset(cmd, sector);
case REQ_OP_ZONE_OPEN:
case REQ_OP_ZONE_CLOSE:
case REQ_OP_ZONE_FINISH:
return null_zone_mgmt(cmd, op, sector);
default:
return BLK_STS_OK;
}
Expand Down
2 changes: 0 additions & 2 deletions drivers/md/bcache/Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -5,5 +5,3 @@ obj-$(CONFIG_BCACHE) += bcache.o
bcache-y := alloc.o bset.o btree.o closure.o debug.o extents.o\
io.o journal.o movinggc.o request.o stats.o super.o sysfs.o trace.o\
util.o writeback.o

CFLAGS_request.o += -Iblock
5 changes: 4 additions & 1 deletion drivers/md/bcache/alloc.c
Original file line number Diff line number Diff line change
Expand Up @@ -377,7 +377,10 @@ static int bch_allocator_thread(void *arg)
if (!fifo_full(&ca->free_inc))
goto retry_invalidate;

bch_prio_write(ca);
if (bch_prio_write(ca, false) < 0) {
ca->invalidate_needs_gc = 1;
wake_up_gc(ca->set);
}
}
}
out:
Expand Down
4 changes: 3 additions & 1 deletion drivers/md/bcache/bcache.h
Original file line number Diff line number Diff line change
Expand Up @@ -582,6 +582,7 @@ struct cache_set {
*/
wait_queue_head_t btree_cache_wait;
struct task_struct *btree_cache_alloc_lock;
spinlock_t btree_cannibalize_lock;

/*
* When we free a btree node, we increment the gen of the bucket the
Expand Down Expand Up @@ -723,6 +724,7 @@ struct cache_set {
unsigned int gc_always_rewrite:1;
unsigned int shrinker_disabled:1;
unsigned int copy_gc_enabled:1;
unsigned int idle_max_writeback_rate_enabled:1;

#define BUCKET_HASH_BITS 12
struct hlist_head bucket_hash[1 << BUCKET_HASH_BITS];
Expand Down Expand Up @@ -977,7 +979,7 @@ bool bch_cached_dev_error(struct cached_dev *dc);
__printf(2, 3)
bool bch_cache_set_error(struct cache_set *c, const char *fmt, ...);

void bch_prio_write(struct cache *ca);
int bch_prio_write(struct cache *ca, bool wait);
void bch_write_bdev_super(struct cached_dev *dc, struct closure *parent);

extern struct workqueue_struct *bcache_wq;
Expand Down
Loading

0 comments on commit 2d53943

Please sign in to comment.