bcache: add backing_request_endio() for bi_end_io
authorColy Li <colyli@suse.de>
Mon, 19 Mar 2018 00:36:24 +0000 (17:36 -0700)
committerJens Axboe <axboe@kernel.dk>
Mon, 19 Mar 2018 02:15:20 +0000 (20:15 -0600)
In order to catch I/O error of backing device, a separate bi_end_io
call back is required. Then a per backing device counter can record I/O
errors number and retire the backing device if the counter reaches a
per backing device I/O error limit.

This patch adds backing_request_endio() to bcache backing device I/O code
path, this is a preparation for further complicated backing device failure
handling. So far there is no real code logic change, I make this change a
separate patch to make sure it is stable and reliable for further work.

Changelog:
v2: Fix code comments typo, remove a redundant bch_writeback_add() line
    added in v4 patch set.
v1: indeed this is new added in this patch set.

[mlyle: truncated commit subject]

Signed-off-by: Coly Li <colyli@suse.de>
Reviewed-by: Hannes Reinecke <hare@suse.com>
Reviewed-by: Michael Lyle <mlyle@lyle.org>
Cc: Junhui Tang <tang.junhui@zte.com.cn>
Cc: Michael Lyle <mlyle@lyle.org>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
drivers/md/bcache/request.c
drivers/md/bcache/super.c
drivers/md/bcache/writeback.c

index 5c8ae69c85029b058353b0e783e2dd725b781468..b4a5768afbe913854fc72fcec921dd92bb92a003 100644 (file)
@@ -139,6 +139,7 @@ static void bch_data_invalidate(struct closure *cl)
        }
 
        op->insert_data_done = true;
+       /* get in bch_data_insert() */
        bio_put(bio);
 out:
        continue_at(cl, bch_data_insert_keys, op->wq);
@@ -630,6 +631,38 @@ static void request_endio(struct bio *bio)
        closure_put(cl);
 }
 
+static void backing_request_endio(struct bio *bio)
+{
+       struct closure *cl = bio->bi_private;
+
+       if (bio->bi_status) {
+               struct search *s = container_of(cl, struct search, cl);
+               /*
+                * If a bio has REQ_PREFLUSH for writeback mode, it is
+                * speically assembled in cached_dev_write() for a non-zero
+                * write request which has REQ_PREFLUSH. we don't set
+                * s->iop.status by this failure, the status will be decided
+                * by result of bch_data_insert() operation.
+                */
+               if (unlikely(s->iop.writeback &&
+                            bio->bi_opf & REQ_PREFLUSH)) {
+                       char buf[BDEVNAME_SIZE];
+
+                       bio_devname(bio, buf);
+                       pr_err("Can't flush %s: returned bi_status %i",
+                               buf, bio->bi_status);
+               } else {
+                       /* set to orig_bio->bi_status in bio_complete() */
+                       s->iop.status = bio->bi_status;
+               }
+               s->recoverable = false;
+               /* should count I/O error for backing device here */
+       }
+
+       bio_put(bio);
+       closure_put(cl);
+}
+
 static void bio_complete(struct search *s)
 {
        if (s->orig_bio) {
@@ -644,13 +677,21 @@ static void bio_complete(struct search *s)
        }
 }
 
-static void do_bio_hook(struct search *s, struct bio *orig_bio)
+static void do_bio_hook(struct search *s,
+                       struct bio *orig_bio,
+                       bio_end_io_t *end_io_fn)
 {
        struct bio *bio = &s->bio.bio;
 
        bio_init(bio, NULL, 0);
        __bio_clone_fast(bio, orig_bio);
-       bio->bi_end_io          = request_endio;
+       /*
+        * bi_end_io can be set separately somewhere else, e.g. the
+        * variants in,
+        * - cache_bio->bi_end_io from cached_dev_cache_miss()
+        * - n->bi_end_io from cache_lookup_fn()
+        */
+       bio->bi_end_io          = end_io_fn;
        bio->bi_private         = &s->cl;
 
        bio_cnt_set(bio, 3);
@@ -676,7 +717,7 @@ static inline struct search *search_alloc(struct bio *bio,
        s = mempool_alloc(d->c->search, GFP_NOIO);
 
        closure_init(&s->cl, NULL);
-       do_bio_hook(s, bio);
+       do_bio_hook(s, bio, request_endio);
 
        s->orig_bio             = bio;
        s->cache_miss           = NULL;
@@ -743,10 +784,11 @@ static void cached_dev_read_error(struct closure *cl)
                trace_bcache_read_retry(s->orig_bio);
 
                s->iop.status = 0;
-               do_bio_hook(s, s->orig_bio);
+               do_bio_hook(s, s->orig_bio, backing_request_endio);
 
                /* XXX: invalidate cache */
 
+               /* I/O request sent to backing device */
                closure_bio_submit(s->iop.c, bio, cl);
        }
 
@@ -859,7 +901,7 @@ static int cached_dev_cache_miss(struct btree *b, struct search *s,
        bio_copy_dev(cache_bio, miss);
        cache_bio->bi_iter.bi_size      = s->insert_bio_sectors << 9;
 
-       cache_bio->bi_end_io    = request_endio;
+       cache_bio->bi_end_io    = backing_request_endio;
        cache_bio->bi_private   = &s->cl;
 
        bch_bio_map(cache_bio, NULL);
@@ -872,14 +914,16 @@ static int cached_dev_cache_miss(struct btree *b, struct search *s,
        s->cache_miss   = miss;
        s->iop.bio      = cache_bio;
        bio_get(cache_bio);
+       /* I/O request sent to backing device */
        closure_bio_submit(s->iop.c, cache_bio, &s->cl);
 
        return ret;
 out_put:
        bio_put(cache_bio);
 out_submit:
-       miss->bi_end_io         = request_endio;
+       miss->bi_end_io         = backing_request_endio;
        miss->bi_private        = &s->cl;
+       /* I/O request sent to backing device */
        closure_bio_submit(s->iop.c, miss, &s->cl);
        return ret;
 }
@@ -943,31 +987,46 @@ static void cached_dev_write(struct cached_dev *dc, struct search *s)
                s->iop.bio = s->orig_bio;
                bio_get(s->iop.bio);
 
-               if ((bio_op(bio) != REQ_OP_DISCARD) ||
-                   blk_queue_discard(bdev_get_queue(dc->bdev)))
-                       closure_bio_submit(s->iop.c, bio, cl);
+               if (bio_op(bio) == REQ_OP_DISCARD &&
+                   !blk_queue_discard(bdev_get_queue(dc->bdev)))
+                       goto insert_data;
+
+               /* I/O request sent to backing device */
+               bio->bi_end_io = backing_request_endio;
+               closure_bio_submit(s->iop.c, bio, cl);
+
        } else if (s->iop.writeback) {
                bch_writeback_add(dc);
                s->iop.bio = bio;
 
                if (bio->bi_opf & REQ_PREFLUSH) {
-                       /* Also need to send a flush to the backing device */
-                       struct bio *flush = bio_alloc_bioset(GFP_NOIO, 0,
-                                                            dc->disk.bio_split);
-
+                       /*
+                        * Also need to send a flush to the backing
+                        * device.
+                        */
+                       struct bio *flush;
+
+                       flush = bio_alloc_bioset(GFP_NOIO, 0,
+                                                dc->disk.bio_split);
+                       if (!flush) {
+                               s->iop.status = BLK_STS_RESOURCE;
+                               goto insert_data;
+                       }
                        bio_copy_dev(flush, bio);
-                       flush->bi_end_io = request_endio;
+                       flush->bi_end_io = backing_request_endio;
                        flush->bi_private = cl;
                        flush->bi_opf = REQ_OP_WRITE | REQ_PREFLUSH;
-
+                       /* I/O request sent to backing device */
                        closure_bio_submit(s->iop.c, flush, cl);
                }
        } else {
                s->iop.bio = bio_clone_fast(bio, GFP_NOIO, dc->disk.bio_split);
-
+               /* I/O request sent to backing device */
+               bio->bi_end_io = backing_request_endio;
                closure_bio_submit(s->iop.c, bio, cl);
        }
 
+insert_data:
        closure_call(&s->iop.cl, bch_data_insert, NULL, cl);
        continue_at(cl, cached_dev_write_complete, NULL);
 }
@@ -981,6 +1040,7 @@ static void cached_dev_nodata(struct closure *cl)
                bch_journal_meta(s->iop.c, cl);
 
        /* If it's a flush, we send the flush to the backing device too */
+       bio->bi_end_io = backing_request_endio;
        closure_bio_submit(s->iop.c, bio, cl);
 
        continue_at(cl, cached_dev_bio_complete, NULL);
@@ -1078,6 +1138,7 @@ static blk_qc_t cached_dev_make_request(struct request_queue *q,
                                cached_dev_read(dc, s);
                }
        } else
+               /* I/O request sent to backing device */
                detached_dev_do_request(d, bio);
 
        return BLK_QC_T_NONE;
index f1f64853114b92268cbc1b4133e06cde9d6e37da..2f8e70aefc90624c1ed47533bffd6473389e1001 100644 (file)
@@ -273,6 +273,7 @@ void bch_write_bdev_super(struct cached_dev *dc, struct closure *parent)
        bio->bi_private = dc;
 
        closure_get(cl);
+       /* I/O request sent to backing device */
        __write_super(&dc->sb, bio);
 
        closure_return_with_destructor(cl, bch_write_bdev_super_unlock);
index 70092ada68e68a909193f27396626443658c4814..4a9547cdcdc538fe979a1d3287930b148b711c17 100644 (file)
@@ -289,6 +289,7 @@ static void write_dirty(struct closure *cl)
                bio_set_dev(&io->bio, io->dc->bdev);
                io->bio.bi_end_io       = dirty_endio;
 
+               /* I/O request sent to backing device */
                closure_bio_submit(io->dc->disk.c, &io->bio, cl);
        }