scsi: block: remove the cluster flag
authorChristoph Hellwig <hch@lst.de>
Thu, 13 Dec 2018 15:17:10 +0000 (16:17 +0100)
committerMartin K. Petersen <martin.petersen@oracle.com>
Wed, 19 Dec 2018 04:39:26 +0000 (23:39 -0500)
Now that the the SCSI layer replaced the use of the cluster flag with
segment size limits and the DMA boundary we can remove the cluster flag
from the block layer.

Signed-off-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Jens Axboe <axboe@kernel.dk>
Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
block/blk-merge.c
block/blk-settings.c
block/blk-sysfs.c
include/linux/blkdev.h

index 6b5ad275ed565de274746b1ef473f46a3a20a621..4478d53cc6eecd74c15b41cdcb33bd6329d36353 100644 (file)
@@ -194,7 +194,7 @@ static struct bio *blk_bio_segment_split(struct request_queue *q,
                        goto split;
                }
 
-               if (bvprvp && blk_queue_cluster(q)) {
+               if (bvprvp) {
                        if (seg_size + bv.bv_len > queue_max_segment_size(q))
                                goto new_segment;
                        if (!biovec_phys_mergeable(q, bvprvp, &bv))
@@ -294,7 +294,7 @@ static unsigned int __blk_recalc_rq_segments(struct request_queue *q,
                                             bool no_sg_merge)
 {
        struct bio_vec bv, bvprv = { NULL };
-       int cluster, prev = 0;
+       int prev = 0;
        unsigned int seg_size, nr_phys_segs;
        struct bio *fbio, *bbio;
        struct bvec_iter iter;
@@ -312,7 +312,6 @@ static unsigned int __blk_recalc_rq_segments(struct request_queue *q,
        }
 
        fbio = bio;
-       cluster = blk_queue_cluster(q);
        seg_size = 0;
        nr_phys_segs = 0;
        for_each_bio(bio) {
@@ -324,7 +323,7 @@ static unsigned int __blk_recalc_rq_segments(struct request_queue *q,
                        if (no_sg_merge)
                                goto new_segment;
 
-                       if (prev && cluster) {
+                       if (prev) {
                                if (seg_size + bv.bv_len
                                    > queue_max_segment_size(q))
                                        goto new_segment;
@@ -395,9 +394,6 @@ static int blk_phys_contig_segment(struct request_queue *q, struct bio *bio,
 {
        struct bio_vec end_bv = { NULL }, nxt_bv;
 
-       if (!blk_queue_cluster(q))
-               return 0;
-
        if (bio->bi_seg_back_size + nxt->bi_seg_front_size >
            queue_max_segment_size(q))
                return 0;
@@ -414,12 +410,12 @@ static int blk_phys_contig_segment(struct request_queue *q, struct bio *bio,
 static inline void
 __blk_segment_map_sg(struct request_queue *q, struct bio_vec *bvec,
                     struct scatterlist *sglist, struct bio_vec *bvprv,
-                    struct scatterlist **sg, int *nsegs, int *cluster)
+                    struct scatterlist **sg, int *nsegs)
 {
 
        int nbytes = bvec->bv_len;
 
-       if (*sg && *cluster) {
+       if (*sg) {
                if ((*sg)->length + nbytes > queue_max_segment_size(q))
                        goto new_segment;
                if (!biovec_phys_mergeable(q, bvprv, bvec))
@@ -465,12 +461,12 @@ static int __blk_bios_map_sg(struct request_queue *q, struct bio *bio,
 {
        struct bio_vec bvec, bvprv = { NULL };
        struct bvec_iter iter;
-       int cluster = blk_queue_cluster(q), nsegs = 0;
+       int nsegs = 0;
 
        for_each_bio(bio)
                bio_for_each_segment(bvec, bio, iter)
                        __blk_segment_map_sg(q, &bvec, sglist, &bvprv, sg,
-                                            &nsegs, &cluster);
+                                            &nsegs);
 
        return nsegs;
 }
index 696c04c1ab6c1f24a426345e2f13a9fc2f848e58..9c8b62f8c1807504669ae4376c31cd45ce2ded83 100644 (file)
@@ -109,7 +109,6 @@ void blk_set_default_limits(struct queue_limits *lim)
        lim->alignment_offset = 0;
        lim->io_opt = 0;
        lim->misaligned = 0;
-       lim->cluster = 1;
        lim->zoned = BLK_ZONED_NONE;
 }
 EXPORT_SYMBOL(blk_set_default_limits);
@@ -602,8 +601,6 @@ int blk_stack_limits(struct queue_limits *t, struct queue_limits *b,
        t->io_min = max(t->io_min, b->io_min);
        t->io_opt = lcm_not_zero(t->io_opt, b->io_opt);
 
-       t->cluster &= b->cluster;
-
        /* Physical block size a multiple of the logical block size? */
        if (t->physical_block_size & (t->logical_block_size - 1)) {
                t->physical_block_size = t->logical_block_size;
index 844a454a7b3a60a0c3186b589ab12daad0e9b3b8..5144707f25eafcf99ad736d42470f8c9325bad4b 100644 (file)
@@ -136,10 +136,7 @@ static ssize_t queue_max_integrity_segments_show(struct request_queue *q, char *
 
 static ssize_t queue_max_segment_size_show(struct request_queue *q, char *page)
 {
-       if (blk_queue_cluster(q))
-               return queue_var_show(queue_max_segment_size(q), (page));
-
-       return queue_var_show(PAGE_SIZE, (page));
+       return queue_var_show(queue_max_segment_size(q), (page));
 }
 
 static ssize_t queue_logical_block_size_show(struct request_queue *q, char *page)
index 4293dc1cd16053334e3db347967ec911ed7742ef..653ae90eec0be5ba7fe00bd8a0099d589ee955fa 100644 (file)
@@ -389,7 +389,6 @@ struct queue_limits {
 
        unsigned char           misaligned;
        unsigned char           discard_misaligned;
-       unsigned char           cluster;
        unsigned char           raid_partial_stripes_expensive;
        enum blk_zoned_model    zoned;
 };
@@ -785,11 +784,6 @@ static inline bool queue_is_rq_based(struct request_queue *q)
        return q->request_fn || q->mq_ops;
 }
 
-static inline unsigned int blk_queue_cluster(struct request_queue *q)
-{
-       return q->limits.cluster;
-}
-
 static inline enum blk_zoned_model
 blk_queue_zoned_model(struct request_queue *q)
 {