Skip to content

Commit

Permalink
bcache: add backing_request_endio() for bi_end_io
Browse files Browse the repository at this point in the history
In order to catch I/O error of backing device, a separate bi_end_io
call back is required. Then a per backing device counter can record I/O
errors number and retire the backing device if the counter reaches a
per backing device I/O error limit.

This patch adds backing_request_endio() to bcache backing device I/O code
path, this is a preparation for further complicated backing device failure
handling. So far there is no real code logic change, I make this change a
separate patch to make sure it is stable and reliable for further work.

Changelog:
v2: Fix code comments typo, remove a redundant bch_writeback_add() line
    added in v4 patch set.
v1: indeed this is new added in this patch set.

[mlyle: truncated commit subject]

Signed-off-by: Coly Li <[email protected]>
Reviewed-by: Hannes Reinecke <[email protected]>
Reviewed-by: Michael Lyle <[email protected]>
Cc: Junhui Tang <[email protected]>
Cc: Michael Lyle <[email protected]>
  • Loading branch information
Coly Li authored and mlyle committed Mar 16, 2018
1 parent ea12e23 commit c6059b2
Show file tree
Hide file tree
Showing 3 changed files with 79 additions and 16 deletions.
93 changes: 77 additions & 16 deletions drivers/md/bcache/request.c
Original file line number Diff line number Diff line change
Expand Up @@ -139,6 +139,7 @@ static void bch_data_invalidate(struct closure *cl)
}

op->insert_data_done = true;
/* get in bch_data_insert() */
bio_put(bio);
out:
continue_at(cl, bch_data_insert_keys, op->wq);
Expand Down Expand Up @@ -630,6 +631,38 @@ static void request_endio(struct bio *bio)
closure_put(cl);
}

static void backing_request_endio(struct bio *bio)
{
struct closure *cl = bio->bi_private;

if (bio->bi_status) {
struct search *s = container_of(cl, struct search, cl);
/*
* If a bio has REQ_PREFLUSH for writeback mode, it is
* speically assembled in cached_dev_write() for a non-zero
* write request which has REQ_PREFLUSH. we don't set
* s->iop.status by this failure, the status will be decided
* by result of bch_data_insert() operation.
*/
if (unlikely(s->iop.writeback &&
bio->bi_opf & REQ_PREFLUSH)) {
char buf[BDEVNAME_SIZE];

bio_devname(bio, buf);
pr_err("Can't flush %s: returned bi_status %i",
buf, bio->bi_status);
} else {
/* set to orig_bio->bi_status in bio_complete() */
s->iop.status = bio->bi_status;
}
s->recoverable = false;
/* should count I/O error for backing device here */
}

bio_put(bio);
closure_put(cl);
}

static void bio_complete(struct search *s)
{
if (s->orig_bio) {
Expand All @@ -644,13 +677,21 @@ static void bio_complete(struct search *s)
}
}

static void do_bio_hook(struct search *s, struct bio *orig_bio)
static void do_bio_hook(struct search *s,
struct bio *orig_bio,
bio_end_io_t *end_io_fn)
{
struct bio *bio = &s->bio.bio;

bio_init(bio, NULL, 0);
__bio_clone_fast(bio, orig_bio);
bio->bi_end_io = request_endio;
/*
* bi_end_io can be set separately somewhere else, e.g. the
* variants in,
* - cache_bio->bi_end_io from cached_dev_cache_miss()
* - n->bi_end_io from cache_lookup_fn()
*/
bio->bi_end_io = end_io_fn;
bio->bi_private = &s->cl;

bio_cnt_set(bio, 3);
Expand All @@ -676,7 +717,7 @@ static inline struct search *search_alloc(struct bio *bio,
s = mempool_alloc(d->c->search, GFP_NOIO);

closure_init(&s->cl, NULL);
do_bio_hook(s, bio);
do_bio_hook(s, bio, request_endio);

s->orig_bio = bio;
s->cache_miss = NULL;
Expand Down Expand Up @@ -743,10 +784,11 @@ static void cached_dev_read_error(struct closure *cl)
trace_bcache_read_retry(s->orig_bio);

s->iop.status = 0;
do_bio_hook(s, s->orig_bio);
do_bio_hook(s, s->orig_bio, backing_request_endio);

/* XXX: invalidate cache */

/* I/O request sent to backing device */
closure_bio_submit(s->iop.c, bio, cl);
}

Expand Down Expand Up @@ -859,7 +901,7 @@ static int cached_dev_cache_miss(struct btree *b, struct search *s,
bio_copy_dev(cache_bio, miss);
cache_bio->bi_iter.bi_size = s->insert_bio_sectors << 9;

cache_bio->bi_end_io = request_endio;
cache_bio->bi_end_io = backing_request_endio;
cache_bio->bi_private = &s->cl;

bch_bio_map(cache_bio, NULL);
Expand All @@ -872,14 +914,16 @@ static int cached_dev_cache_miss(struct btree *b, struct search *s,
s->cache_miss = miss;
s->iop.bio = cache_bio;
bio_get(cache_bio);
/* I/O request sent to backing device */
closure_bio_submit(s->iop.c, cache_bio, &s->cl);

return ret;
out_put:
bio_put(cache_bio);
out_submit:
miss->bi_end_io = request_endio;
miss->bi_end_io = backing_request_endio;
miss->bi_private = &s->cl;
/* I/O request sent to backing device */
closure_bio_submit(s->iop.c, miss, &s->cl);
return ret;
}
Expand Down Expand Up @@ -943,31 +987,46 @@ static void cached_dev_write(struct cached_dev *dc, struct search *s)
s->iop.bio = s->orig_bio;
bio_get(s->iop.bio);

if ((bio_op(bio) != REQ_OP_DISCARD) ||
blk_queue_discard(bdev_get_queue(dc->bdev)))
closure_bio_submit(s->iop.c, bio, cl);
if (bio_op(bio) == REQ_OP_DISCARD &&
!blk_queue_discard(bdev_get_queue(dc->bdev)))
goto insert_data;

/* I/O request sent to backing device */
bio->bi_end_io = backing_request_endio;
closure_bio_submit(s->iop.c, bio, cl);

} else if (s->iop.writeback) {
bch_writeback_add(dc);
s->iop.bio = bio;

if (bio->bi_opf & REQ_PREFLUSH) {
/* Also need to send a flush to the backing device */
struct bio *flush = bio_alloc_bioset(GFP_NOIO, 0,
dc->disk.bio_split);

/*
* Also need to send a flush to the backing
* device.
*/
struct bio *flush;

flush = bio_alloc_bioset(GFP_NOIO, 0,
dc->disk.bio_split);
if (!flush) {
s->iop.status = BLK_STS_RESOURCE;
goto insert_data;
}
bio_copy_dev(flush, bio);
flush->bi_end_io = request_endio;
flush->bi_end_io = backing_request_endio;
flush->bi_private = cl;
flush->bi_opf = REQ_OP_WRITE | REQ_PREFLUSH;

/* I/O request sent to backing device */
closure_bio_submit(s->iop.c, flush, cl);
}
} else {
s->iop.bio = bio_clone_fast(bio, GFP_NOIO, dc->disk.bio_split);

/* I/O request sent to backing device */
bio->bi_end_io = backing_request_endio;
closure_bio_submit(s->iop.c, bio, cl);
}

insert_data:
closure_call(&s->iop.cl, bch_data_insert, NULL, cl);
continue_at(cl, cached_dev_write_complete, NULL);
}
Expand All @@ -981,6 +1040,7 @@ static void cached_dev_nodata(struct closure *cl)
bch_journal_meta(s->iop.c, cl);

/* If it's a flush, we send the flush to the backing device too */
bio->bi_end_io = backing_request_endio;
closure_bio_submit(s->iop.c, bio, cl);

continue_at(cl, cached_dev_bio_complete, NULL);
Expand Down Expand Up @@ -1078,6 +1138,7 @@ static blk_qc_t cached_dev_make_request(struct request_queue *q,
cached_dev_read(dc, s);
}
} else
/* I/O request sent to backing device */
detached_dev_do_request(d, bio);

return BLK_QC_T_NONE;
Expand Down
1 change: 1 addition & 0 deletions drivers/md/bcache/super.c
Original file line number Diff line number Diff line change
Expand Up @@ -273,6 +273,7 @@ void bch_write_bdev_super(struct cached_dev *dc, struct closure *parent)
bio->bi_private = dc;

closure_get(cl);
/* I/O request sent to backing device */
__write_super(&dc->sb, bio);

closure_return_with_destructor(cl, bch_write_bdev_super_unlock);
Expand Down
1 change: 1 addition & 0 deletions drivers/md/bcache/writeback.c
Original file line number Diff line number Diff line change
Expand Up @@ -289,6 +289,7 @@ static void write_dirty(struct closure *cl)
bio_set_dev(&io->bio, io->dc->bdev);
io->bio.bi_end_io = dirty_endio;

/* I/O request sent to backing device */
closure_bio_submit(io->dc->disk.c, &io->bio, cl);
}

Expand Down

0 comments on commit c6059b2

Please sign in to comment.