nvme: don't pass struct nvme_ns to nvme_config_discard
authorChristoph Hellwig <hch@lst.de>
Thu, 2 Nov 2017 18:28:54 +0000 (21:28 +0300)
committerJens Axboe <axboe@kernel.dk>
Sat, 11 Nov 2017 02:53:25 +0000 (19:53 -0700)
To allow reusing this function for the multipath node.

Signed-off-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Keith Busch <keith.busch@intel.com>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
drivers/nvme/host/core.c

index 88d886b390f41327c48cd802214384371b286d89..551ec5df6a97f119c494237df8c220ea8e4ee880 100644 (file)
@@ -1118,29 +1118,26 @@ static void nvme_set_chunk_size(struct nvme_ns *ns)
        blk_queue_chunk_sectors(ns->queue, rounddown_pow_of_two(chunk_size));
 }
 
-static void nvme_config_discard(struct nvme_ns *ns)
+static void nvme_config_discard(struct nvme_ctrl *ctrl,
+               unsigned stream_alignment, struct request_queue *queue)
 {
-       struct nvme_ctrl *ctrl = ns->ctrl;
-       u32 logical_block_size = queue_logical_block_size(ns->queue);
+       u32 size = queue_logical_block_size(queue);
+
+       if (stream_alignment)
+               size *= stream_alignment;
 
        BUILD_BUG_ON(PAGE_SIZE / sizeof(struct nvme_dsm_range) <
                        NVME_DSM_MAX_RANGES);
 
-       if (ctrl->nr_streams && ns->sws && ns->sgs) {
-               unsigned int sz = logical_block_size * ns->sws * ns->sgs;
+       queue->limits.discard_alignment = size;
+       queue->limits.discard_granularity = size;
 
-               ns->queue->limits.discard_alignment = sz;
-               ns->queue->limits.discard_granularity = sz;
-       } else {
-               ns->queue->limits.discard_alignment = logical_block_size;
-               ns->queue->limits.discard_granularity = logical_block_size;
-       }
-       blk_queue_max_discard_sectors(ns->queue, UINT_MAX);
-       blk_queue_max_discard_segments(ns->queue, NVME_DSM_MAX_RANGES);
-       queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, ns->queue);
+       blk_queue_max_discard_sectors(queue, UINT_MAX);
+       blk_queue_max_discard_segments(queue, NVME_DSM_MAX_RANGES);
+       queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, queue);
 
        if (ctrl->quirks & NVME_QUIRK_DEALLOCATE_ZEROES)
-               blk_queue_max_write_zeroes_sectors(ns->queue, UINT_MAX);
+               blk_queue_max_write_zeroes_sectors(queue, UINT_MAX);
 }
 
 static void nvme_report_ns_ids(struct nvme_ctrl *ctrl, unsigned int nsid,
@@ -1164,6 +1161,7 @@ static void __nvme_revalidate_disk(struct gendisk *disk, struct nvme_id_ns *id)
 {
        struct nvme_ns *ns = disk->private_data;
        struct nvme_ctrl *ctrl = ns->ctrl;
+       unsigned stream_alignment = 0;
        u16 bs;
 
        /*
@@ -1183,6 +1181,9 @@ static void __nvme_revalidate_disk(struct gendisk *disk, struct nvme_id_ns *id)
        else
                ns->pi_type = 0;
 
+       if (ctrl->nr_streams && ns->sws && ns->sgs)
+               stream_alignment = ns->sws * ns->sgs;
+
        blk_mq_freeze_queue(disk->queue);
        blk_integrity_unregister(disk);
 
@@ -1198,7 +1199,7 @@ static void __nvme_revalidate_disk(struct gendisk *disk, struct nvme_id_ns *id)
                set_capacity(disk, le64_to_cpup(&id->nsze) << (ns->lba_shift - 9));
 
        if (ctrl->oncs & NVME_CTRL_ONCS_DSM)
-               nvme_config_discard(ns);
+               nvme_config_discard(ctrl, stream_alignment, disk->queue);
        blk_mq_unfreeze_queue(disk->queue);
 }