Skip to content

Commit

Permalink
Merge branch 'md-next' of ssh://gitolite.kernel.org/pub/scm/linux/ker…
Browse files Browse the repository at this point in the history
…nel/git/song/md into for-5.17/drivers

Pull MD updates from Song:

"The major changes are:

- REQ_NOWAIT support, by Vishal Verma
- raid6 benchmark optimization, by Dirk Müller
- Fix for acct bioset, by Xiao Ni
- Clean up max_queued_requests, by Mariusz Tkaczyk
- PREEMPT_RT optimization, by Davidlohr Bueso
- Use default_groups in kobj_type, by Greg Kroah-Hartman"

* 'md-next' of ssh://gitolite.kernel.org/pub/scm/linux/kernel/git/song/md:
  md: use default_groups in kobj_type
  md: Move alloc/free acct bioset in to personality
  lib/raid6: Use strict priority ranking for pq gen() benchmarking
  lib/raid6: skip benchmark of non-chosen xor_syndrome functions
  md: fix spelling of "its"
  md: raid456 add nowait support
  md: raid10 add nowait support
  md: raid1 add nowait support
  md: add support for REQ_NOWAIT
  md: drop queue limitation for RAID1 and RAID10
  md/raid5: play nice with PREEMPT_RT
  • Loading branch information
axboe committed Jan 6, 2022
2 parents 050f461 + 1745e85 commit d85bd82
Show file tree
Hide file tree
Showing 13 changed files with 295 additions and 161 deletions.
2 changes: 1 addition & 1 deletion drivers/md/md-cluster.c
Original file line number Diff line number Diff line change
Expand Up @@ -574,7 +574,7 @@ static int process_recvd_msg(struct mddev *mddev, struct cluster_msg *msg)
int ret = 0;

if (WARN(mddev->cluster_info->slot_number - 1 == le32_to_cpu(msg->slot),
"node %d received it's own msg\n", le32_to_cpu(msg->slot)))
"node %d received its own msg\n", le32_to_cpu(msg->slot)))
return -1;
switch (le32_to_cpu(msg->type)) {
case METADATA_UPDATED:
Expand Down
53 changes: 41 additions & 12 deletions drivers/md/md.c
Original file line number Diff line number Diff line change
Expand Up @@ -418,6 +418,12 @@ void md_handle_request(struct mddev *mddev, struct bio *bio)
rcu_read_lock();
if (is_suspended(mddev, bio)) {
DEFINE_WAIT(__wait);
/* Bail out if REQ_NOWAIT is set for the bio */
if (bio->bi_opf & REQ_NOWAIT) {
rcu_read_unlock();
bio_wouldblock_error(bio);
return;
}
for (;;) {
prepare_to_wait(&mddev->sb_wait, &__wait,
TASK_UNINTERRUPTIBLE);
Expand Down Expand Up @@ -3602,6 +3608,7 @@ static struct attribute *rdev_default_attrs[] = {
&rdev_ppl_size.attr,
NULL,
};
ATTRIBUTE_GROUPS(rdev_default);
static ssize_t
rdev_attr_show(struct kobject *kobj, struct attribute *attr, char *page)
{
Expand Down Expand Up @@ -3651,7 +3658,7 @@ static const struct sysfs_ops rdev_sysfs_ops = {
static struct kobj_type rdev_ktype = {
.release = rdev_free,
.sysfs_ops = &rdev_sysfs_ops,
.default_attrs = rdev_default_attrs,
.default_groups = rdev_default_groups,
};

int md_rdev_init(struct md_rdev *rdev)
Expand Down Expand Up @@ -5787,6 +5794,7 @@ int md_run(struct mddev *mddev)
int err;
struct md_rdev *rdev;
struct md_personality *pers;
bool nowait = true;

if (list_empty(&mddev->disks))
/* cannot run an array with no devices.. */
Expand Down Expand Up @@ -5857,8 +5865,13 @@ int md_run(struct mddev *mddev)
}
}
sysfs_notify_dirent_safe(rdev->sysfs_state);
nowait = nowait && blk_queue_nowait(bdev_get_queue(rdev->bdev));
}

/* Set the NOWAIT flags if all underlying devices support it */
if (nowait)
blk_queue_flag_set(QUEUE_FLAG_NOWAIT, mddev->queue);

if (!bioset_initialized(&mddev->bio_set)) {
err = bioset_init(&mddev->bio_set, BIO_POOL_SIZE, 0, BIOSET_NEED_BVECS);
if (err)
Expand All @@ -5869,13 +5882,6 @@ int md_run(struct mddev *mddev)
if (err)
goto exit_bio_set;
}
if (mddev->level != 1 && mddev->level != 10 &&
!bioset_initialized(&mddev->io_acct_set)) {
err = bioset_init(&mddev->io_acct_set, BIO_POOL_SIZE,
offsetof(struct md_io_acct, bio_clone), 0);
if (err)
goto exit_sync_set;
}

spin_lock(&pers_lock);
pers = find_pers(mddev->level, mddev->clevel);
Expand Down Expand Up @@ -6052,9 +6058,6 @@ int md_run(struct mddev *mddev)
module_put(pers->owner);
md_bitmap_destroy(mddev);
abort:
if (mddev->level != 1 && mddev->level != 10)
bioset_exit(&mddev->io_acct_set);
exit_sync_set:
bioset_exit(&mddev->sync_set);
exit_bio_set:
bioset_exit(&mddev->bio_set);
Expand Down Expand Up @@ -7002,6 +7005,15 @@ static int hot_add_disk(struct mddev *mddev, dev_t dev)
set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
if (!mddev->thread)
md_update_sb(mddev, 1);
/*
* If the new disk does not support REQ_NOWAIT,
* disable on the whole MD.
*/
if (!blk_queue_nowait(bdev_get_queue(rdev->bdev))) {
pr_info("%s: Disabling nowait because %s does not support nowait\n",
mdname(mddev), bdevname(rdev->bdev, b));
blk_queue_flag_clear(QUEUE_FLAG_NOWAIT, mddev->queue);
}
/*
* Kick recovery, maybe this spare has to be added to the
* array immediately.
Expand Down Expand Up @@ -8400,7 +8412,7 @@ int md_setup_cluster(struct mddev *mddev, int nodes)
spin_lock(&pers_lock);
/* ensure module won't be unloaded */
if (!md_cluster_ops || !try_module_get(md_cluster_mod)) {
pr_warn("can't find md-cluster module or get it's reference.\n");
pr_warn("can't find md-cluster module or get its reference.\n");
spin_unlock(&pers_lock);
return -ENOENT;
}
Expand Down Expand Up @@ -8587,6 +8599,23 @@ void md_submit_discard_bio(struct mddev *mddev, struct md_rdev *rdev,
}
EXPORT_SYMBOL_GPL(md_submit_discard_bio);

int acct_bioset_init(struct mddev *mddev)
{
int err = 0;

if (!bioset_initialized(&mddev->io_acct_set))
err = bioset_init(&mddev->io_acct_set, BIO_POOL_SIZE,
offsetof(struct md_io_acct, bio_clone), 0);
return err;
}
EXPORT_SYMBOL_GPL(acct_bioset_init);

void acct_bioset_exit(struct mddev *mddev)
{
bioset_exit(&mddev->io_acct_set);
}
EXPORT_SYMBOL_GPL(acct_bioset_exit);

static void md_end_io_acct(struct bio *bio)
{
struct md_io_acct *md_io_acct = bio->bi_private;
Expand Down
2 changes: 2 additions & 0 deletions drivers/md/md.h
Original file line number Diff line number Diff line change
Expand Up @@ -721,6 +721,8 @@ extern void md_error(struct mddev *mddev, struct md_rdev *rdev);
extern void md_finish_reshape(struct mddev *mddev);
void md_submit_discard_bio(struct mddev *mddev, struct md_rdev *rdev,
struct bio *bio, sector_t start, sector_t size);
int acct_bioset_init(struct mddev *mddev);
void acct_bioset_exit(struct mddev *mddev);
void md_account_bio(struct mddev *mddev, struct bio **bio);

extern bool __must_check md_flush_request(struct mddev *mddev, struct bio *bio);
Expand Down
38 changes: 28 additions & 10 deletions drivers/md/raid0.c
Original file line number Diff line number Diff line change
Expand Up @@ -356,7 +356,21 @@ static sector_t raid0_size(struct mddev *mddev, sector_t sectors, int raid_disks
return array_sectors;
}

static void raid0_free(struct mddev *mddev, void *priv);
static void free_conf(struct mddev *mddev, struct r0conf *conf)
{
kfree(conf->strip_zone);
kfree(conf->devlist);
kfree(conf);
mddev->private = NULL;
}

static void raid0_free(struct mddev *mddev, void *priv)
{
struct r0conf *conf = priv;

free_conf(mddev, conf);
acct_bioset_exit(mddev);
}

static int raid0_run(struct mddev *mddev)
{
Expand All @@ -370,11 +384,16 @@ static int raid0_run(struct mddev *mddev)
if (md_check_no_bitmap(mddev))
return -EINVAL;

if (acct_bioset_init(mddev)) {
pr_err("md/raid0:%s: alloc acct bioset failed.\n", mdname(mddev));
return -ENOMEM;
}

/* if private is not null, we are here after takeover */
if (mddev->private == NULL) {
ret = create_strip_zones(mddev, &conf);
if (ret < 0)
return ret;
goto exit_acct_set;
mddev->private = conf;
}
conf = mddev->private;
Expand Down Expand Up @@ -413,17 +432,16 @@ static int raid0_run(struct mddev *mddev)
dump_zones(mddev);

ret = md_integrity_register(mddev);
if (ret)
goto free;

return ret;
}

static void raid0_free(struct mddev *mddev, void *priv)
{
struct r0conf *conf = priv;

kfree(conf->strip_zone);
kfree(conf->devlist);
kfree(conf);
free:
free_conf(mddev, conf);
exit_acct_set:
acct_bioset_exit(mddev);
return ret;
}

static void raid0_handle_discard(struct mddev *mddev, struct bio *bio)
Expand Down
6 changes: 0 additions & 6 deletions drivers/md/raid1-10.c
Original file line number Diff line number Diff line change
Expand Up @@ -22,12 +22,6 @@

#define BIO_SPECIAL(bio) ((unsigned long)bio <= 2)

/* When there are this many requests queue to be written by
* the raid thread, we become 'congested' to provide back-pressure
* for writeback.
*/
static int max_queued_requests = 1024;

/* for managing resync I/O pages */
struct resync_pages {
void *raid_bio;
Expand Down
83 changes: 56 additions & 27 deletions drivers/md/raid1.c
Original file line number Diff line number Diff line change
Expand Up @@ -929,8 +929,10 @@ static void lower_barrier(struct r1conf *conf, sector_t sector_nr)
wake_up(&conf->wait_barrier);
}

static void _wait_barrier(struct r1conf *conf, int idx)
static bool _wait_barrier(struct r1conf *conf, int idx, bool nowait)
{
bool ret = true;

/*
* We need to increase conf->nr_pending[idx] very early here,
* then raise_barrier() can be blocked when it waits for
Expand Down Expand Up @@ -961,7 +963,7 @@ static void _wait_barrier(struct r1conf *conf, int idx)
*/
if (!READ_ONCE(conf->array_frozen) &&
!atomic_read(&conf->barrier[idx]))
return;
return ret;

/*
* After holding conf->resync_lock, conf->nr_pending[idx]
Expand All @@ -979,18 +981,27 @@ static void _wait_barrier(struct r1conf *conf, int idx)
*/
wake_up(&conf->wait_barrier);
/* Wait for the barrier in same barrier unit bucket to drop. */
wait_event_lock_irq(conf->wait_barrier,
!conf->array_frozen &&
!atomic_read(&conf->barrier[idx]),
conf->resync_lock);
atomic_inc(&conf->nr_pending[idx]);

/* Return false when nowait flag is set */
if (nowait) {
ret = false;
} else {
wait_event_lock_irq(conf->wait_barrier,
!conf->array_frozen &&
!atomic_read(&conf->barrier[idx]),
conf->resync_lock);
atomic_inc(&conf->nr_pending[idx]);
}

atomic_dec(&conf->nr_waiting[idx]);
spin_unlock_irq(&conf->resync_lock);
return ret;
}

static void wait_read_barrier(struct r1conf *conf, sector_t sector_nr)
static bool wait_read_barrier(struct r1conf *conf, sector_t sector_nr, bool nowait)
{
int idx = sector_to_idx(sector_nr);
bool ret = true;

/*
* Very similar to _wait_barrier(). The difference is, for read
Expand All @@ -1002,7 +1013,7 @@ static void wait_read_barrier(struct r1conf *conf, sector_t sector_nr)
atomic_inc(&conf->nr_pending[idx]);

if (!READ_ONCE(conf->array_frozen))
return;
return ret;

spin_lock_irq(&conf->resync_lock);
atomic_inc(&conf->nr_waiting[idx]);
Expand All @@ -1013,19 +1024,28 @@ static void wait_read_barrier(struct r1conf *conf, sector_t sector_nr)
*/
wake_up(&conf->wait_barrier);
/* Wait for array to be unfrozen */
wait_event_lock_irq(conf->wait_barrier,
!conf->array_frozen,
conf->resync_lock);
atomic_inc(&conf->nr_pending[idx]);

/* Return false when nowait flag is set */
if (nowait) {
/* Return false when nowait flag is set */
ret = false;
} else {
wait_event_lock_irq(conf->wait_barrier,
!conf->array_frozen,
conf->resync_lock);
atomic_inc(&conf->nr_pending[idx]);
}

atomic_dec(&conf->nr_waiting[idx]);
spin_unlock_irq(&conf->resync_lock);
return ret;
}

static void wait_barrier(struct r1conf *conf, sector_t sector_nr)
static bool wait_barrier(struct r1conf *conf, sector_t sector_nr, bool nowait)
{
int idx = sector_to_idx(sector_nr);

_wait_barrier(conf, idx);
return _wait_barrier(conf, idx, nowait);
}

static void _allow_barrier(struct r1conf *conf, int idx)
Expand Down Expand Up @@ -1236,7 +1256,11 @@ static void raid1_read_request(struct mddev *mddev, struct bio *bio,
* Still need barrier for READ in case that whole
* array is frozen.
*/
wait_read_barrier(conf, bio->bi_iter.bi_sector);
if (!wait_read_barrier(conf, bio->bi_iter.bi_sector,
bio->bi_opf & REQ_NOWAIT)) {
bio_wouldblock_error(bio);
return;
}

if (!r1_bio)
r1_bio = alloc_r1bio(mddev, bio);
Expand Down Expand Up @@ -1336,6 +1360,10 @@ static void raid1_write_request(struct mddev *mddev, struct bio *bio,
bio->bi_iter.bi_sector, bio_end_sector(bio))) {

DEFINE_WAIT(w);
if (bio->bi_opf & REQ_NOWAIT) {
bio_wouldblock_error(bio);
return;
}
for (;;) {
prepare_to_wait(&conf->wait_barrier,
&w, TASK_IDLE);
Expand All @@ -1353,17 +1381,15 @@ static void raid1_write_request(struct mddev *mddev, struct bio *bio,
* thread has put up a bar for new requests.
* Continue immediately if no resync is active currently.
*/
wait_barrier(conf, bio->bi_iter.bi_sector);
if (!wait_barrier(conf, bio->bi_iter.bi_sector,
bio->bi_opf & REQ_NOWAIT)) {
bio_wouldblock_error(bio);
return;
}

r1_bio = alloc_r1bio(mddev, bio);
r1_bio->sectors = max_write_sectors;

if (conf->pending_count >= max_queued_requests) {
md_wakeup_thread(mddev->thread);
raid1_log(mddev, "wait queued");
wait_event(conf->wait_barrier,
conf->pending_count < max_queued_requests);
}
/* first select target devices under rcu_lock and
* inc refcount on their rdev. Record them by setting
* bios[x] to bio
Expand Down Expand Up @@ -1458,9 +1484,14 @@ static void raid1_write_request(struct mddev *mddev, struct bio *bio,
rdev_dec_pending(conf->mirrors[j].rdev, mddev);
r1_bio->state = 0;
allow_barrier(conf, bio->bi_iter.bi_sector);

if (bio->bi_opf & REQ_NOWAIT) {
bio_wouldblock_error(bio);
return;
}
raid1_log(mddev, "wait rdev %d blocked", blocked_rdev->raid_disk);
md_wait_for_blocked_rdev(blocked_rdev, mddev);
wait_barrier(conf, bio->bi_iter.bi_sector);
wait_barrier(conf, bio->bi_iter.bi_sector, false);
goto retry_write;
}

Expand Down Expand Up @@ -1687,7 +1718,7 @@ static void close_sync(struct r1conf *conf)
int idx;

for (idx = 0; idx < BARRIER_BUCKETS_NR; idx++) {
_wait_barrier(conf, idx);
_wait_barrier(conf, idx, false);
_allow_barrier(conf, idx);
}

Expand Down Expand Up @@ -3409,5 +3440,3 @@ MODULE_DESCRIPTION("RAID1 (mirroring) personality for MD");
MODULE_ALIAS("md-personality-3"); /* RAID1 */
MODULE_ALIAS("md-raid1");
MODULE_ALIAS("md-level-1");

module_param(max_queued_requests, int, S_IRUGO|S_IWUSR);
Loading

0 comments on commit d85bd82

Please sign in to comment.