Skip to content

Commit

Permalink
Merge branch 'for-4.8/drivers' of git://git.kernel.dk/linux-block
Browse files Browse the repository at this point in the history
Pull block driver updates from Jens Axboe:
 "This branch also contains core changes.  I've come to the conclusion
  that from 4.9 and forward, I'll be doing just a single branch.  We
  often have dependencies between core and drivers, and it's hard to
  always split them up appropriately without pulling core into drivers
  when that happens.

  That said, this contains:

   - separate secure erase type for the core block layer, from
     Christoph.

   - set of discard fixes, from Christoph.

   - bio shrinking fixes from Christoph, as a followup up to the
     op/flags change in the core branch.

   - map and append request fixes from Christoph.

   - NVMeF (NVMe over Fabrics) code from Christoph.  This is pretty
     exciting!

   - nvme-loop fixes from Arnd.

   - removal of ->driverfs_dev from Dan, after providing a
     device_add_disk() helper.

   - bcache fixes from Bhaktipriya and Yijing.

   - cdrom subchannel read fix from Vchannaiah.

   - set of lightnvm updates from Wenwei, Matias, Johannes, and Javier.

   - set of drbd updates and fixes from Fabian, Lars, and Philipp.

   - mg_disk error path fix from Bart.

   - user notification for failed device add for loop, from Minfei.

   - NVMe in general:
        + NVMe delay quirk from Guilherme.
        + SR-IOV support and command retry limits from Keith.
        + fix for memory-less NUMA node from Masayoshi.
        + use UINT_MAX for discard sectors, from Minfei.
        + cancel IO fixes from Ming.
        + don't allocate unused major, from Neil.
        + error code fixup from Dan.
        + use constants for PSDT/FUSE from James.
        + variable init fix from Jay.
        + fabrics fixes from Ming, Sagi, and Wei.
        + various fixes"

* 'for-4.8/drivers' of git://git.kernel.dk/linux-block: (115 commits)
  nvme/pci: Provide SR-IOV support
  nvme: initialize variable before logical OR'ing it
  block: unexport various bio mapping helpers
  scsi/osd: open code blk_make_request
  target: stop using blk_make_request
  block: simplify and export blk_rq_append_bio
  block: ensure bios return from blk_get_request are properly initialized
  virtio_blk: use blk_rq_map_kern
  memstick: don't allow REQ_TYPE_BLOCK_PC requests
  block: shrink bio size again
  block: simplify and cleanup bvec pool handling
  block: get rid of bio_rw and READA
  block: don't ignore -EOPNOTSUPP blkdev_issue_write_same
  block: introduce BLKDEV_DISCARD_ZERO to fix zeroout
  NVMe: don't allocate unused nvme_major
  nvme: avoid crashes when node 0 is memoryless node.
  nvme: Limit command retries
  loop: Make user notify for adding loop device failed
  nvme-loop: fix nvme-loop Kconfig dependencies
  nvmet: fix return value check in nvmet_subsys_alloc()
  ...
  • Loading branch information
torvalds committed Jul 26, 2016
2 parents d05d7f4 + 13880f5 commit 3fc9d69
Show file tree
Hide file tree
Showing 129 changed files with 11,613 additions and 1,279 deletions.
3 changes: 2 additions & 1 deletion Documentation/ioctl/cdrom.txt
Original file line number Diff line number Diff line change
Expand Up @@ -340,7 +340,8 @@ CDROMSUBCHNL Read subchannel data (struct cdrom_subchnl)
EINVAL format not CDROM_MSF or CDROM_LBA

notes:
Format is converted to CDROM_MSF on return
Format is converted to CDROM_MSF or CDROM_LBA
as per user request on return



Expand Down
7 changes: 7 additions & 0 deletions MAINTAINERS
Original file line number Diff line number Diff line change
Expand Up @@ -8184,6 +8184,13 @@ S: Supported
F: drivers/nvme/host/
F: include/linux/nvme.h

NVM EXPRESS TARGET DRIVER
M: Christoph Hellwig <[email protected]>
M: Sagi Grimberg <[email protected]>
L: [email protected]
S: Supported
F: drivers/nvme/target/

NVMEM FRAMEWORK
M: Srinivas Kandagatla <[email protected]>
M: Maxime Ripard <[email protected]>
Expand Down
3 changes: 1 addition & 2 deletions arch/powerpc/sysdev/axonram.c
Original file line number Diff line number Diff line change
Expand Up @@ -223,7 +223,6 @@ static int axon_ram_probe(struct platform_device *device)
bank->disk->first_minor = azfs_minor;
bank->disk->fops = &axon_ram_devops;
bank->disk->private_data = bank;
bank->disk->driverfs_dev = &device->dev;

sprintf(bank->disk->disk_name, "%s%d",
AXON_RAM_DEVICE_NAME, axon_ram_bank_id);
Expand All @@ -238,7 +237,7 @@ static int axon_ram_probe(struct platform_device *device)
set_capacity(bank->disk, bank->size >> AXON_RAM_SECTOR_SHIFT);
blk_queue_make_request(bank->disk->queue, axon_ram_make_request);
blk_queue_logical_block_size(bank->disk->queue, AXON_RAM_SECTOR_SIZE);
add_disk(bank->disk);
device_add_disk(&device->dev, bank->disk);

bank->irq_id = irq_of_parse_and_map(device->dev.of_node, 0);
if (bank->irq_id == NO_IRQ) {
Expand Down
5 changes: 3 additions & 2 deletions arch/um/drivers/ubd_kern.c
Original file line number Diff line number Diff line change
Expand Up @@ -801,6 +801,7 @@ static void ubd_device_release(struct device *dev)
static int ubd_disk_register(int major, u64 size, int unit,
struct gendisk **disk_out)
{
struct device *parent = NULL;
struct gendisk *disk;

disk = alloc_disk(1 << UBD_SHIFT);
Expand All @@ -823,12 +824,12 @@ static int ubd_disk_register(int major, u64 size, int unit,
ubd_devs[unit].pdev.dev.release = ubd_device_release;
dev_set_drvdata(&ubd_devs[unit].pdev.dev, &ubd_devs[unit]);
platform_device_register(&ubd_devs[unit].pdev);
disk->driverfs_dev = &ubd_devs[unit].pdev.dev;
parent = &ubd_devs[unit].pdev.dev;
}

disk->private_data = &ubd_devs[unit];
disk->queue = ubd_devs[unit].queue;
add_disk(disk);
device_add_disk(parent, disk);

*disk_out = disk;
return 0;
Expand Down
9 changes: 4 additions & 5 deletions block/bio-integrity.c
Original file line number Diff line number Diff line change
Expand Up @@ -54,7 +54,6 @@ struct bio_integrity_payload *bio_integrity_alloc(struct bio *bio,
{
struct bio_integrity_payload *bip;
struct bio_set *bs = bio->bi_pool;
unsigned long idx = BIO_POOL_NONE;
unsigned inline_vecs;

if (!bs || !bs->bio_integrity_pool) {
Expand All @@ -72,17 +71,19 @@ struct bio_integrity_payload *bio_integrity_alloc(struct bio *bio,
memset(bip, 0, sizeof(*bip));

if (nr_vecs > inline_vecs) {
unsigned long idx = 0;

bip->bip_vec = bvec_alloc(gfp_mask, nr_vecs, &idx,
bs->bvec_integrity_pool);
if (!bip->bip_vec)
goto err;
bip->bip_max_vcnt = bvec_nr_vecs(idx);
bip->bip_slab = idx;
} else {
bip->bip_vec = bip->bip_inline_vecs;
bip->bip_max_vcnt = inline_vecs;
}

bip->bip_slab = idx;
bip->bip_bio = bio;
bio->bi_integrity = bip;
bio->bi_rw |= REQ_INTEGRITY;
Expand Down Expand Up @@ -111,9 +112,7 @@ void bio_integrity_free(struct bio *bio)
bip->bip_vec->bv_offset);

if (bs && bs->bio_integrity_pool) {
if (bip->bip_slab != BIO_POOL_NONE)
bvec_free(bs->bvec_integrity_pool, bip->bip_vec,
bip->bip_slab);
bvec_free(bs->bvec_integrity_pool, bip->bip_vec, bip->bip_slab);

mempool_free(bip, bs->bio_integrity_pool);
} else {
Expand Down
35 changes: 18 additions & 17 deletions block/bio.c
Original file line number Diff line number Diff line change
Expand Up @@ -43,7 +43,7 @@
* unsigned short
*/
#define BV(x) { .nr_vecs = x, .name = "biovec-"__stringify(x) }
static struct biovec_slab bvec_slabs[BIOVEC_NR_POOLS] __read_mostly = {
static struct biovec_slab bvec_slabs[BVEC_POOL_NR] __read_mostly = {
BV(1), BV(4), BV(16), BV(64), BV(128), BV(BIO_MAX_PAGES),
};
#undef BV
Expand Down Expand Up @@ -160,11 +160,15 @@ unsigned int bvec_nr_vecs(unsigned short idx)

void bvec_free(mempool_t *pool, struct bio_vec *bv, unsigned int idx)
{
BIO_BUG_ON(idx >= BIOVEC_NR_POOLS);
if (!idx)
return;
idx--;

BIO_BUG_ON(idx >= BVEC_POOL_NR);

if (idx == BIOVEC_MAX_IDX)
if (idx == BVEC_POOL_MAX) {
mempool_free(bv, pool);
else {
} else {
struct biovec_slab *bvs = bvec_slabs + idx;

kmem_cache_free(bvs->slab, bv);
Expand Down Expand Up @@ -206,7 +210,7 @@ struct bio_vec *bvec_alloc(gfp_t gfp_mask, int nr, unsigned long *idx,
* idx now points to the pool we want to allocate from. only the
* 1-vec entry pool is mempool backed.
*/
if (*idx == BIOVEC_MAX_IDX) {
if (*idx == BVEC_POOL_MAX) {
fallback:
bvl = mempool_alloc(pool, gfp_mask);
} else {
Expand All @@ -226,11 +230,12 @@ struct bio_vec *bvec_alloc(gfp_t gfp_mask, int nr, unsigned long *idx,
*/
bvl = kmem_cache_alloc(bvs->slab, __gfp_mask);
if (unlikely(!bvl && (gfp_mask & __GFP_DIRECT_RECLAIM))) {
*idx = BIOVEC_MAX_IDX;
*idx = BVEC_POOL_MAX;
goto fallback;
}
}

(*idx)++;
return bvl;
}

Expand All @@ -250,8 +255,7 @@ static void bio_free(struct bio *bio)
__bio_free(bio);

if (bs) {
if (bio_flagged(bio, BIO_OWNS_VEC))
bvec_free(bs->bvec_pool, bio->bi_io_vec, BIO_POOL_IDX(bio));
bvec_free(bs->bvec_pool, bio->bi_io_vec, BVEC_POOL_IDX(bio));

/*
* If we have front padding, adjust the bio pointer before freeing
Expand Down Expand Up @@ -420,7 +424,6 @@ struct bio *bio_alloc_bioset(gfp_t gfp_mask, int nr_iovecs, struct bio_set *bs)
gfp_t saved_gfp = gfp_mask;
unsigned front_pad;
unsigned inline_vecs;
unsigned long idx = BIO_POOL_NONE;
struct bio_vec *bvl = NULL;
struct bio *bio;
void *p;
Expand Down Expand Up @@ -480,6 +483,8 @@ struct bio *bio_alloc_bioset(gfp_t gfp_mask, int nr_iovecs, struct bio_set *bs)
bio_init(bio);

if (nr_iovecs > inline_vecs) {
unsigned long idx = 0;

bvl = bvec_alloc(gfp_mask, nr_iovecs, &idx, bs->bvec_pool);
if (!bvl && gfp_mask != saved_gfp) {
punt_bios_to_rescuer(bs);
Expand All @@ -490,13 +495,12 @@ struct bio *bio_alloc_bioset(gfp_t gfp_mask, int nr_iovecs, struct bio_set *bs)
if (unlikely(!bvl))
goto err_free;

bio_set_flag(bio, BIO_OWNS_VEC);
bio->bi_flags |= idx << BVEC_POOL_OFFSET;
} else if (nr_iovecs) {
bvl = bio->bi_inline_vecs;
}

bio->bi_pool = bs;
bio->bi_flags |= idx << BIO_POOL_OFFSET;
bio->bi_max_vecs = nr_iovecs;
bio->bi_io_vec = bvl;
return bio;
Expand Down Expand Up @@ -568,7 +572,7 @@ EXPORT_SYMBOL(bio_phys_segments);
*/
void __bio_clone_fast(struct bio *bio, struct bio *bio_src)
{
BUG_ON(bio->bi_pool && BIO_POOL_IDX(bio) != BIO_POOL_NONE);
BUG_ON(bio->bi_pool && BVEC_POOL_IDX(bio));

/*
* most users will be overriding ->bi_bdev with a new target,
Expand Down Expand Up @@ -1097,7 +1101,6 @@ int bio_uncopy_user(struct bio *bio)
bio_put(bio);
return ret;
}
EXPORT_SYMBOL(bio_uncopy_user);

/**
* bio_copy_user_iov - copy user data to bio
Expand Down Expand Up @@ -1392,7 +1395,6 @@ void bio_unmap_user(struct bio *bio)
__bio_unmap_user(bio);
bio_put(bio);
}
EXPORT_SYMBOL(bio_unmap_user);

static void bio_map_kern_endio(struct bio *bio)
{
Expand Down Expand Up @@ -1538,7 +1540,6 @@ struct bio *bio_copy_kern(struct request_queue *q, void *data, unsigned int len,
bio_put(bio);
return ERR_PTR(-ENOMEM);
}
EXPORT_SYMBOL(bio_copy_kern);

/*
* bio_set_pages_dirty() and bio_check_pages_dirty() are support functions
Expand Down Expand Up @@ -1832,7 +1833,7 @@ EXPORT_SYMBOL_GPL(bio_trim);
*/
mempool_t *biovec_create_pool(int pool_entries)
{
struct biovec_slab *bp = bvec_slabs + BIOVEC_MAX_IDX;
struct biovec_slab *bp = bvec_slabs + BVEC_POOL_MAX;

return mempool_create_slab_pool(pool_entries, bp->slab);
}
Expand Down Expand Up @@ -2009,7 +2010,7 @@ static void __init biovec_init_slabs(void)
{
int i;

for (i = 0; i < BIOVEC_NR_POOLS; i++) {
for (i = 0; i < BVEC_POOL_NR; i++) {
int size;
struct biovec_slab *bvs = bvec_slabs + i;

Expand Down
97 changes: 25 additions & 72 deletions block/blk-core.c
Original file line number Diff line number Diff line change
Expand Up @@ -1294,10 +1294,15 @@ static struct request *blk_old_get_request(struct request_queue *q, int rw,

spin_lock_irq(q->queue_lock);
rq = get_request(q, rw, 0, NULL, gfp_mask);
if (IS_ERR(rq))
if (IS_ERR(rq)) {
spin_unlock_irq(q->queue_lock);
/* q->queue_lock is unlocked at this point */
return rq;
}

/* q->queue_lock is unlocked at this point */
rq->__data_len = 0;
rq->__sector = (sector_t) -1;
rq->bio = rq->biotail = NULL;
return rq;
}

Expand All @@ -1312,63 +1317,6 @@ struct request *blk_get_request(struct request_queue *q, int rw, gfp_t gfp_mask)
}
EXPORT_SYMBOL(blk_get_request);

/**
* blk_make_request - given a bio, allocate a corresponding struct request.
* @q: target request queue
* @bio: The bio describing the memory mappings that will be submitted for IO.
* It may be a chained-bio properly constructed by block/bio layer.
* @gfp_mask: gfp flags to be used for memory allocation
*
* blk_make_request is the parallel of generic_make_request for BLOCK_PC
* type commands. Where the struct request needs to be farther initialized by
* the caller. It is passed a &struct bio, which describes the memory info of
* the I/O transfer.
*
* The caller of blk_make_request must make sure that bi_io_vec
* are set to describe the memory buffers. That bio_data_dir() will return
* the needed direction of the request. (And all bio's in the passed bio-chain
* are properly set accordingly)
*
* If called under none-sleepable conditions, mapped bio buffers must not
* need bouncing, by calling the appropriate masked or flagged allocator,
* suitable for the target device. Otherwise the call to blk_queue_bounce will
* BUG.
*
* WARNING: When allocating/cloning a bio-chain, careful consideration should be
* given to how you allocate bios. In particular, you cannot use
* __GFP_DIRECT_RECLAIM for anything but the first bio in the chain. Otherwise
* you risk waiting for IO completion of a bio that hasn't been submitted yet,
* thus resulting in a deadlock. Alternatively bios should be allocated using
* bio_kmalloc() instead of bio_alloc(), as that avoids the mempool deadlock.
* If possible a big IO should be split into smaller parts when allocation
* fails. Partial allocation should not be an error, or you risk a live-lock.
*/
struct request *blk_make_request(struct request_queue *q, struct bio *bio,
gfp_t gfp_mask)
{
struct request *rq = blk_get_request(q, bio_data_dir(bio), gfp_mask);

if (IS_ERR(rq))
return rq;

blk_rq_set_block_pc(rq);

for_each_bio(bio) {
struct bio *bounce_bio = bio;
int ret;

blk_queue_bounce(q, &bounce_bio);
ret = blk_rq_append_bio(q, rq, bounce_bio);
if (unlikely(ret)) {
blk_put_request(rq);
return ERR_PTR(ret);
}
}

return rq;
}
EXPORT_SYMBOL(blk_make_request);

/**
* blk_rq_set_block_pc - initialize a request to type BLOCK_PC
* @rq: request to be initialized
Expand All @@ -1377,9 +1325,6 @@ EXPORT_SYMBOL(blk_make_request);
void blk_rq_set_block_pc(struct request *rq)
{
rq->cmd_type = REQ_TYPE_BLOCK_PC;
rq->__data_len = 0;
rq->__sector = (sector_t) -1;
rq->bio = rq->biotail = NULL;
memset(rq->__cmd, 0, sizeof(rq->__cmd));
}
EXPORT_SYMBOL(blk_rq_set_block_pc);
Expand Down Expand Up @@ -1982,16 +1927,21 @@ generic_make_request_checks(struct bio *bio)
}
}

if ((bio_op(bio) == REQ_OP_DISCARD) &&
(!blk_queue_discard(q) ||
((bio->bi_rw & REQ_SECURE) && !blk_queue_secdiscard(q)))) {
err = -EOPNOTSUPP;
goto end_io;
}

if (bio_op(bio) == REQ_OP_WRITE_SAME && !bdev_write_same(bio->bi_bdev)) {
err = -EOPNOTSUPP;
goto end_io;
switch (bio_op(bio)) {
case REQ_OP_DISCARD:
if (!blk_queue_discard(q))
goto not_supported;
break;
case REQ_OP_SECURE_ERASE:
if (!blk_queue_secure_erase(q))
goto not_supported;
break;
case REQ_OP_WRITE_SAME:
if (!bdev_write_same(bio->bi_bdev))
goto not_supported;
break;
default:
break;
}

/*
Expand All @@ -2008,6 +1958,8 @@ generic_make_request_checks(struct bio *bio)
trace_block_bio_queue(q, bio);
return true;

not_supported:
err = -EOPNOTSUPP;
end_io:
bio->bi_error = err;
bio_endio(bio);
Expand Down Expand Up @@ -3383,6 +3335,7 @@ bool blk_poll(struct request_queue *q, blk_qc_t cookie)

return false;
}
EXPORT_SYMBOL_GPL(blk_poll);

#ifdef CONFIG_PM
/**
Expand Down
Loading

0 comments on commit 3fc9d69

Please sign in to comment.