Skip to content

Commit

Permalink
Merge tag 'md/3.20' of git://neil.brown.name/md
Browse files Browse the repository at this point in the history
Pull md updates from Neil Brown:

 - assorted locking changes so that access to /proc/mdstat
   and much of /sys/block/mdXX/md/* is protected by a spinlock
   rather than a mutex and will never block indefinitely.

 - Make an 'if' condition in RAID5 - which has been implicated
   in recent bugs - more readable.

 - misc minor fixes

* tag 'md/3.20' of git://neil.brown.name/md: (28 commits)
  md/raid10: fix conversion from RAID0 to RAID10
  md: wakeup thread upon rdev_dec_pending()
  md: make reconfig_mutex optional for writes to md sysfs files.
  md: move mddev_lock and related to md.h
  md: use mddev->lock to protect updates to resync_{min,max}.
  md: minor cleanup in safe_delay_store.
  md: move GET_BITMAP_FILE ioctl out from mddev_lock.
  md: tidy up set_bitmap_file
  md: remove unnecessary 'buf' from get_bitmap_file.
  md: remove mddev_lock from rdev_attr_show()
  md: remove mddev_lock() from md_attr_show()
  md/raid5: use ->lock to protect accessing raid5 sysfs attributes.
  md: remove need for mddev_lock() in md_seq_show()
  md/bitmap: protect clearing of ->bitmap by mddev->lock
  md: protect ->pers changes with mddev->lock
  md: level_store: group all important changes into one place.
  md: rename ->stop to ->free
  md: split detach operation out from ->stop.
  md/linear: remove rcu protections in favour of suspend/resume
  md: make merge_bvec_fn more robust in face of personality changes.
  ...
  • Loading branch information
torvalds committed Feb 12, 2015
2 parents 87c9172 + 53a6ab4 commit 5d8e7fb
Show file tree
Hide file tree
Showing 18 changed files with 867 additions and 608 deletions.
1 change: 1 addition & 0 deletions arch/x86/Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -148,6 +148,7 @@ cfi-sections := $(call as-instr,.cfi_sections .debug_frame,-DCONFIG_AS_CFI_SECTI

# does binutils support specific instructions?
asinstr := $(call as-instr,fxsaveq (%rax),-DCONFIG_AS_FXSAVEQ=1)
asinstr += $(call as-instr,pshufb %xmm0$(comma)%xmm0,-DCONFIG_AS_SSSE3=1)
asinstr += $(call as-instr,crc32l %eax$(comma)%eax,-DCONFIG_AS_CRC32=1)
avx_instr := $(call as-instr,vxorps %ymm0$(comma)%ymm1$(comma)%ymm2,-DCONFIG_AS_AVX=1)
avx2_instr :=$(call as-instr,vpbroadcastb %xmm0$(comma)%ymm1,-DCONFIG_AS_AVX2=1)
Expand Down
15 changes: 12 additions & 3 deletions drivers/md/bitmap.c
Original file line number Diff line number Diff line change
Expand Up @@ -1619,7 +1619,9 @@ void bitmap_destroy(struct mddev *mddev)
return;

mutex_lock(&mddev->bitmap_info.mutex);
spin_lock(&mddev->lock);
mddev->bitmap = NULL; /* disconnect from the md device */
spin_unlock(&mddev->lock);
mutex_unlock(&mddev->bitmap_info.mutex);
if (mddev->thread)
mddev->thread->timeout = MAX_SCHEDULE_TIMEOUT;
Expand Down Expand Up @@ -2209,11 +2211,13 @@ __ATTR(metadata, S_IRUGO|S_IWUSR, metadata_show, metadata_store);
static ssize_t can_clear_show(struct mddev *mddev, char *page)
{
int len;
spin_lock(&mddev->lock);
if (mddev->bitmap)
len = sprintf(page, "%s\n", (mddev->bitmap->need_sync ?
"false" : "true"));
else
len = sprintf(page, "\n");
spin_unlock(&mddev->lock);
return len;
}

Expand All @@ -2238,10 +2242,15 @@ __ATTR(can_clear, S_IRUGO|S_IWUSR, can_clear_show, can_clear_store);
static ssize_t
behind_writes_used_show(struct mddev *mddev, char *page)
{
ssize_t ret;
spin_lock(&mddev->lock);
if (mddev->bitmap == NULL)
return sprintf(page, "0\n");
return sprintf(page, "%lu\n",
mddev->bitmap->behind_writes_used);
ret = sprintf(page, "0\n");
else
ret = sprintf(page, "%lu\n",
mddev->bitmap->behind_writes_used);
spin_unlock(&mddev->lock);
return ret;
}

static ssize_t
Expand Down
8 changes: 1 addition & 7 deletions drivers/md/dm-raid.c
Original file line number Diff line number Diff line change
Expand Up @@ -746,13 +746,7 @@ static int raid_is_congested(struct dm_target_callbacks *cb, int bits)
{
struct raid_set *rs = container_of(cb, struct raid_set, callbacks);

if (rs->raid_type->level == 1)
return md_raid1_congested(&rs->md, bits);

if (rs->raid_type->level == 10)
return md_raid10_congested(&rs->md, bits);

return md_raid5_congested(&rs->md, bits);
return mddev_congested(&rs->md, bits);
}

/*
Expand Down
8 changes: 3 additions & 5 deletions drivers/md/faulty.c
Original file line number Diff line number Diff line change
Expand Up @@ -332,13 +332,11 @@ static int run(struct mddev *mddev)
return 0;
}

static int stop(struct mddev *mddev)
static void faulty_free(struct mddev *mddev, void *priv)
{
struct faulty_conf *conf = mddev->private;
struct faulty_conf *conf = priv;

kfree(conf);
mddev->private = NULL;
return 0;
}

static struct md_personality faulty_personality =
Expand All @@ -348,7 +346,7 @@ static struct md_personality faulty_personality =
.owner = THIS_MODULE,
.make_request = make_request,
.run = run,
.stop = stop,
.free = faulty_free,
.status = status,
.check_reshape = reshape,
.size = faulty_size,
Expand Down
67 changes: 20 additions & 47 deletions drivers/md/linear.c
Original file line number Diff line number Diff line change
Expand Up @@ -34,7 +34,7 @@ static inline struct dev_info *which_dev(struct mddev *mddev, sector_t sector)

lo = 0;
hi = mddev->raid_disks - 1;
conf = rcu_dereference(mddev->private);
conf = mddev->private;

/*
* Binary Search
Expand All @@ -60,18 +60,16 @@ static inline struct dev_info *which_dev(struct mddev *mddev, sector_t sector)
*
* Return amount of bytes we can take at this offset
*/
static int linear_mergeable_bvec(struct request_queue *q,
static int linear_mergeable_bvec(struct mddev *mddev,
struct bvec_merge_data *bvm,
struct bio_vec *biovec)
{
struct mddev *mddev = q->queuedata;
struct dev_info *dev0;
unsigned long maxsectors, bio_sectors = bvm->bi_size >> 9;
sector_t sector = bvm->bi_sector + get_start_sect(bvm->bi_bdev);
int maxbytes = biovec->bv_len;
struct request_queue *subq;

rcu_read_lock();
dev0 = which_dev(mddev, sector);
maxsectors = dev0->end_sector - sector;
subq = bdev_get_queue(dev0->rdev->bdev);
Expand All @@ -81,7 +79,6 @@ static int linear_mergeable_bvec(struct request_queue *q,
maxbytes = min(maxbytes, subq->merge_bvec_fn(subq, bvm,
biovec));
}
rcu_read_unlock();

if (maxsectors < bio_sectors)
maxsectors = 0;
Expand All @@ -97,24 +94,18 @@ static int linear_mergeable_bvec(struct request_queue *q,
return maxsectors << 9;
}

static int linear_congested(void *data, int bits)
static int linear_congested(struct mddev *mddev, int bits)
{
struct mddev *mddev = data;
struct linear_conf *conf;
int i, ret = 0;

if (mddev_congested(mddev, bits))
return 1;

rcu_read_lock();
conf = rcu_dereference(mddev->private);
conf = mddev->private;

for (i = 0; i < mddev->raid_disks && !ret ; i++) {
struct request_queue *q = bdev_get_queue(conf->disks[i].rdev->bdev);
ret |= bdi_congested(&q->backing_dev_info, bits);
}

rcu_read_unlock();
return ret;
}

Expand All @@ -123,12 +114,10 @@ static sector_t linear_size(struct mddev *mddev, sector_t sectors, int raid_disk
struct linear_conf *conf;
sector_t array_sectors;

rcu_read_lock();
conf = rcu_dereference(mddev->private);
conf = mddev->private;
WARN_ONCE(sectors || raid_disks,
"%s does not support generic reshape\n", __func__);
array_sectors = conf->array_sectors;
rcu_read_unlock();

return array_sectors;
}
Expand Down Expand Up @@ -217,10 +206,6 @@ static int linear_run (struct mddev *mddev)
mddev->private = conf;
md_set_array_sectors(mddev, linear_size(mddev, 0, 0));

blk_queue_merge_bvec(mddev->queue, linear_mergeable_bvec);
mddev->queue->backing_dev_info.congested_fn = linear_congested;
mddev->queue->backing_dev_info.congested_data = mddev;

ret = md_integrity_register(mddev);
if (ret) {
kfree(conf);
Expand Down Expand Up @@ -252,38 +237,23 @@ static int linear_add(struct mddev *mddev, struct md_rdev *rdev)
if (!newconf)
return -ENOMEM;

oldconf = rcu_dereference_protected(mddev->private,
lockdep_is_held(
&mddev->reconfig_mutex));
mddev_suspend(mddev);
oldconf = mddev->private;
mddev->raid_disks++;
rcu_assign_pointer(mddev->private, newconf);
mddev->private = newconf;
md_set_array_sectors(mddev, linear_size(mddev, 0, 0));
set_capacity(mddev->gendisk, mddev->array_sectors);
mddev_resume(mddev);
revalidate_disk(mddev->gendisk);
kfree_rcu(oldconf, rcu);
kfree(oldconf);
return 0;
}

static int linear_stop (struct mddev *mddev)
static void linear_free(struct mddev *mddev, void *priv)
{
struct linear_conf *conf =
rcu_dereference_protected(mddev->private,
lockdep_is_held(
&mddev->reconfig_mutex));
struct linear_conf *conf = priv;

/*
* We do not require rcu protection here since
* we hold reconfig_mutex for both linear_add and
* linear_stop, so they cannot race.
* We should make sure any old 'conf's are properly
* freed though.
*/
rcu_barrier();
blk_sync_queue(mddev->queue); /* the unplug fn references 'conf'*/
kfree(conf);
mddev->private = NULL;

return 0;
}

static void linear_make_request(struct mddev *mddev, struct bio *bio)
Expand All @@ -299,16 +269,12 @@ static void linear_make_request(struct mddev *mddev, struct bio *bio)
}

do {
rcu_read_lock();

tmp_dev = which_dev(mddev, bio->bi_iter.bi_sector);
start_sector = tmp_dev->end_sector - tmp_dev->rdev->sectors;
end_sector = tmp_dev->end_sector;
data_offset = tmp_dev->rdev->data_offset;
bio->bi_bdev = tmp_dev->rdev->bdev;

rcu_read_unlock();

if (unlikely(bio->bi_iter.bi_sector >= end_sector ||
bio->bi_iter.bi_sector < start_sector))
goto out_of_bounds;
Expand Down Expand Up @@ -355,17 +321,24 @@ static void linear_status (struct seq_file *seq, struct mddev *mddev)
seq_printf(seq, " %dk rounding", mddev->chunk_sectors / 2);
}

static void linear_quiesce(struct mddev *mddev, int state)
{
}

static struct md_personality linear_personality =
{
.name = "linear",
.level = LEVEL_LINEAR,
.owner = THIS_MODULE,
.make_request = linear_make_request,
.run = linear_run,
.stop = linear_stop,
.free = linear_free,
.status = linear_status,
.hot_add_disk = linear_add,
.size = linear_size,
.quiesce = linear_quiesce,
.congested = linear_congested,
.mergeable_bvec = linear_mergeable_bvec,
};

static int __init linear_init (void)
Expand Down
Loading

0 comments on commit 5d8e7fb

Please sign in to comment.