block: use bio_for_each_bvec() to map sg
authorMing Lei <ming.lei@redhat.com>
Fri, 15 Feb 2019 11:13:13 +0000 (19:13 +0800)
committerJens Axboe <axboe@kernel.dk>
Fri, 15 Feb 2019 15:40:11 +0000 (08:40 -0700)
It is more efficient to use bio_for_each_bvec() to map sg, meantime
we have to consider splitting multipage bvec as done in blk_bio_segment_split().

Reviewed-by: Omar Sandoval <osandov@fb.com>
Reviewed-by: Christoph Hellwig <hch@lst.de>
Signed-off-by: Ming Lei <ming.lei@redhat.com>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
block/blk-merge.c

index 4ef56b2d2aa563bfebc24f398bfeca8e46d3b776..1912499b08b71045deb3f335b653b8354222b241 100644 (file)
@@ -464,6 +464,54 @@ static int blk_phys_contig_segment(struct request_queue *q, struct bio *bio,
        return biovec_phys_mergeable(q, &end_bv, &nxt_bv);
 }
 
+static struct scatterlist *blk_next_sg(struct scatterlist **sg,
+               struct scatterlist *sglist)
+{
+       if (!*sg)
+               return sglist;
+
+       /*
+        * If the driver previously mapped a shorter list, we could see a
+        * termination bit prematurely unless it fully inits the sg table
+        * on each mapping. We KNOW that there must be more entries here
+        * or the driver would be buggy, so force clear the termination bit
+        * to avoid doing a full sg_init_table() in drivers for each command.
+        */
+       sg_unmark_end(*sg);
+       return sg_next(*sg);
+}
+
+static unsigned blk_bvec_map_sg(struct request_queue *q,
+               struct bio_vec *bvec, struct scatterlist *sglist,
+               struct scatterlist **sg)
+{
+       unsigned nbytes = bvec->bv_len;
+       unsigned nsegs = 0, total = 0, offset = 0;
+
+       while (nbytes > 0) {
+               unsigned seg_size;
+               struct page *pg;
+               unsigned idx;
+
+               *sg = blk_next_sg(sg, sglist);
+
+               seg_size = get_max_segment_size(q, bvec->bv_offset + total);
+               seg_size = min(nbytes, seg_size);
+
+               offset = (total + bvec->bv_offset) % PAGE_SIZE;
+               idx = (total + bvec->bv_offset) / PAGE_SIZE;
+               pg = nth_page(bvec->bv_page, idx);
+
+               sg_set_page(*sg, pg, seg_size, offset);
+
+               total += seg_size;
+               nbytes -= seg_size;
+               nsegs++;
+       }
+
+       return nsegs;
+}
+
 static inline void
 __blk_segment_map_sg(struct request_queue *q, struct bio_vec *bvec,
                     struct scatterlist *sglist, struct bio_vec *bvprv,
@@ -481,25 +529,7 @@ __blk_segment_map_sg(struct request_queue *q, struct bio_vec *bvec,
                (*sg)->length += nbytes;
        } else {
 new_segment:
-               if (!*sg)
-                       *sg = sglist;
-               else {
-                       /*
-                        * If the driver previously mapped a shorter
-                        * list, we could see a termination bit
-                        * prematurely unless it fully inits the sg
-                        * table on each mapping. We KNOW that there
-                        * must be more entries here or the driver
-                        * would be buggy, so force clear the
-                        * termination bit to avoid doing a full
-                        * sg_init_table() in drivers for each command.
-                        */
-                       sg_unmark_end(*sg);
-                       *sg = sg_next(*sg);
-               }
-
-               sg_set_page(*sg, bvec->bv_page, nbytes, bvec->bv_offset);
-               (*nsegs)++;
+               (*nsegs) += blk_bvec_map_sg(q, bvec, sglist, sg);
        }
        *bvprv = *bvec;
 }
@@ -521,7 +551,7 @@ static int __blk_bios_map_sg(struct request_queue *q, struct bio *bio,
        int nsegs = 0;
 
        for_each_bio(bio)
-               bio_for_each_segment(bvec, bio, iter)
+               bio_for_each_bvec(bvec, bio, iter)
                        __blk_segment_map_sg(q, &bvec, sglist, &bvprv, sg,
                                             &nsegs);