blk-mq: abstract out queue map
authorJens Axboe <axboe@kernel.dk>
Mon, 29 Oct 2018 19:06:14 +0000 (13:06 -0600)
committerJens Axboe <axboe@kernel.dk>
Wed, 7 Nov 2018 20:44:59 +0000 (13:44 -0700)
This is in preparation for allowing multiple sets of maps per
queue, if so desired.

Reviewed-by: Hannes Reinecke <hare@suse.com>
Reviewed-by: Bart Van Assche <bvanassche@acm.org>
Reviewed-by: Keith Busch <keith.busch@intel.com>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
15 files changed:
block/blk-mq-cpumap.c
block/blk-mq-pci.c
block/blk-mq-rdma.c
block/blk-mq-virtio.c
block/blk-mq.c
block/blk-mq.h
drivers/block/virtio_blk.c
drivers/nvme/host/pci.c
drivers/scsi/qla2xxx/qla_os.c
drivers/scsi/scsi_lib.c
drivers/scsi/smartpqi/smartpqi_init.c
drivers/scsi/virtio_scsi.c
include/linux/blk-mq-pci.h
include/linux/blk-mq-virtio.h
include/linux/blk-mq.h

index 3eb169f15842c04723b51cbe9e63d47b9f5d156e..6e6686c559842a979a10c3eadb599cd7d5751f8f 100644 (file)
@@ -30,10 +30,10 @@ static int get_first_sibling(unsigned int cpu)
        return cpu;
 }
 
-int blk_mq_map_queues(struct blk_mq_tag_set *set)
+int blk_mq_map_queues(struct blk_mq_queue_map *qmap)
 {
-       unsigned int *map = set->mq_map;
-       unsigned int nr_queues = set->nr_hw_queues;
+       unsigned int *map = qmap->mq_map;
+       unsigned int nr_queues = qmap->nr_queues;
        unsigned int cpu, first_sibling;
 
        for_each_possible_cpu(cpu) {
@@ -62,12 +62,12 @@ EXPORT_SYMBOL_GPL(blk_mq_map_queues);
  * We have no quick way of doing reverse lookups. This is only used at
  * queue init time, so runtime isn't important.
  */
-int blk_mq_hw_queue_to_node(unsigned int *mq_map, unsigned int index)
+int blk_mq_hw_queue_to_node(struct blk_mq_queue_map *qmap, unsigned int index)
 {
        int i;
 
        for_each_possible_cpu(i) {
-               if (index == mq_map[i])
+               if (index == qmap->mq_map[i])
                        return local_memory_node(cpu_to_node(i));
        }
 
index db644ec624f501ee63f7d90d35412e0dbabf603a..40333d60a850d899f06a01943a59371b0c4c50ac 100644 (file)
  * that maps a queue to the CPUs that have irq affinity for the corresponding
  * vector.
  */
-int blk_mq_pci_map_queues(struct blk_mq_tag_set *set, struct pci_dev *pdev,
+int blk_mq_pci_map_queues(struct blk_mq_queue_map *qmap, struct pci_dev *pdev,
                            int offset)
 {
        const struct cpumask *mask;
        unsigned int queue, cpu;
 
-       for (queue = 0; queue < set->nr_hw_queues; queue++) {
+       for (queue = 0; queue < qmap->nr_queues; queue++) {
                mask = pci_irq_get_affinity(pdev, queue + offset);
                if (!mask)
                        goto fallback;
 
                for_each_cpu(cpu, mask)
-                       set->mq_map[cpu] = queue;
+                       qmap->mq_map[cpu] = queue;
        }
 
        return 0;
 
 fallback:
-       WARN_ON_ONCE(set->nr_hw_queues > 1);
-       blk_mq_clear_mq_map(set);
+       WARN_ON_ONCE(qmap->nr_queues > 1);
+       blk_mq_clear_mq_map(qmap);
        return 0;
 }
 EXPORT_SYMBOL_GPL(blk_mq_pci_map_queues);
index 996167f1de18dbd16696520cfdad96fc12500ae5..a71576aff3a59337b69ddd42c317ed6b6dcd07a7 100644 (file)
@@ -41,12 +41,12 @@ int blk_mq_rdma_map_queues(struct blk_mq_tag_set *set,
                        goto fallback;
 
                for_each_cpu(cpu, mask)
-                       set->mq_map[cpu] = queue;
+                       set->map[0].mq_map[cpu] = queue;
        }
 
        return 0;
 
 fallback:
-       return blk_mq_map_queues(set);
+       return blk_mq_map_queues(&set->map[0]);
 }
 EXPORT_SYMBOL_GPL(blk_mq_rdma_map_queues);
index c3afbca1129956348389c3e6d26b1402c03fd6c3..661fbfef480f0b9361c6bc3426063177dc40c05e 100644 (file)
@@ -29,7 +29,7 @@
  * that maps a queue to the CPUs that have irq affinity for the corresponding
  * vector.
  */
-int blk_mq_virtio_map_queues(struct blk_mq_tag_set *set,
+int blk_mq_virtio_map_queues(struct blk_mq_queue_map *qmap,
                struct virtio_device *vdev, int first_vec)
 {
        const struct cpumask *mask;
@@ -38,17 +38,17 @@ int blk_mq_virtio_map_queues(struct blk_mq_tag_set *set,
        if (!vdev->config->get_vq_affinity)
                goto fallback;
 
-       for (queue = 0; queue < set->nr_hw_queues; queue++) {
+       for (queue = 0; queue < qmap->nr_queues; queue++) {
                mask = vdev->config->get_vq_affinity(vdev, first_vec + queue);
                if (!mask)
                        goto fallback;
 
                for_each_cpu(cpu, mask)
-                       set->mq_map[cpu] = queue;
+                       qmap->mq_map[cpu] = queue;
        }
 
        return 0;
 fallback:
-       return blk_mq_map_queues(set);
+       return blk_mq_map_queues(qmap);
 }
 EXPORT_SYMBOL_GPL(blk_mq_virtio_map_queues);
index 766facfa1f081500bdc1c2c4a6f6117234171d15..fac88d16988bffa0deaea5e2d378813b34660480 100644 (file)
@@ -1975,7 +1975,7 @@ struct blk_mq_tags *blk_mq_alloc_rq_map(struct blk_mq_tag_set *set,
        struct blk_mq_tags *tags;
        int node;
 
-       node = blk_mq_hw_queue_to_node(set->mq_map, hctx_idx);
+       node = blk_mq_hw_queue_to_node(&set->map[0], hctx_idx);
        if (node == NUMA_NO_NODE)
                node = set->numa_node;
 
@@ -2031,7 +2031,7 @@ int blk_mq_alloc_rqs(struct blk_mq_tag_set *set, struct blk_mq_tags *tags,
        size_t rq_size, left;
        int node;
 
-       node = blk_mq_hw_queue_to_node(set->mq_map, hctx_idx);
+       node = blk_mq_hw_queue_to_node(&set->map[0], hctx_idx);
        if (node == NUMA_NO_NODE)
                node = set->numa_node;
 
@@ -2322,7 +2322,7 @@ static void blk_mq_map_swqueue(struct request_queue *q)
         * If the cpu isn't present, the cpu is mapped to first hctx.
         */
        for_each_possible_cpu(i) {
-               hctx_idx = set->mq_map[i];
+               hctx_idx = set->map[0].mq_map[i];
                /* unmapped hw queue can be remapped after CPU topo changed */
                if (!set->tags[hctx_idx] &&
                    !__blk_mq_alloc_rq_map(set, hctx_idx)) {
@@ -2332,7 +2332,7 @@ static void blk_mq_map_swqueue(struct request_queue *q)
                         * case, remap the current ctx to hctx[0] which
                         * is guaranteed to always have tags allocated
                         */
-                       set->mq_map[i] = 0;
+                       set->map[0].mq_map[i] = 0;
                }
 
                ctx = per_cpu_ptr(q->queue_ctx, i);
@@ -2585,7 +2585,7 @@ static void blk_mq_realloc_hw_ctxs(struct blk_mq_tag_set *set,
                int node;
                struct blk_mq_hw_ctx *hctx;
 
-               node = blk_mq_hw_queue_to_node(set->mq_map, i);
+               node = blk_mq_hw_queue_to_node(&set->map[0], i);
                /*
                 * If the hw queue has been mapped to another numa node,
                 * we need to realloc the hctx. If allocation fails, fallback
@@ -2791,18 +2791,18 @@ static int blk_mq_update_queue_map(struct blk_mq_tag_set *set)
                 * for (queue = 0; queue < set->nr_hw_queues; queue++) {
                 *      mask = get_cpu_mask(queue)
                 *      for_each_cpu(cpu, mask)
-                *              set->mq_map[cpu] = queue;
+                *              set->map.mq_map[cpu] = queue;
                 * }
                 *
                 * When we need to remap, the table has to be cleared for
                 * killing stale mapping since one CPU may not be mapped
                 * to any hw queue.
                 */
-               blk_mq_clear_mq_map(set);
+               blk_mq_clear_mq_map(&set->map[0]);
 
                return set->ops->map_queues(set);
        } else
-               return blk_mq_map_queues(set);
+               return blk_mq_map_queues(&set->map[0]);
 }
 
 /*
@@ -2857,10 +2857,12 @@ int blk_mq_alloc_tag_set(struct blk_mq_tag_set *set)
                return -ENOMEM;
 
        ret = -ENOMEM;
-       set->mq_map = kcalloc_node(nr_cpu_ids, sizeof(*set->mq_map),
-                                  GFP_KERNEL, set->numa_node);
-       if (!set->mq_map)
+       set->map[0].mq_map = kcalloc_node(nr_cpu_ids,
+                                         sizeof(*set->map[0].mq_map),
+                                         GFP_KERNEL, set->numa_node);
+       if (!set->map[0].mq_map)
                goto out_free_tags;
+       set->map[0].nr_queues = set->nr_hw_queues;
 
        ret = blk_mq_update_queue_map(set);
        if (ret)
@@ -2876,8 +2878,8 @@ int blk_mq_alloc_tag_set(struct blk_mq_tag_set *set)
        return 0;
 
 out_free_mq_map:
-       kfree(set->mq_map);
-       set->mq_map = NULL;
+       kfree(set->map[0].mq_map);
+       set->map[0].mq_map = NULL;
 out_free_tags:
        kfree(set->tags);
        set->tags = NULL;
@@ -2892,8 +2894,8 @@ void blk_mq_free_tag_set(struct blk_mq_tag_set *set)
        for (i = 0; i < nr_cpu_ids; i++)
                blk_mq_free_map_and_requests(set, i);
 
-       kfree(set->mq_map);
-       set->mq_map = NULL;
+       kfree(set->map[0].mq_map);
+       set->map[0].mq_map = NULL;
 
        kfree(set->tags);
        set->tags = NULL;
@@ -3054,7 +3056,7 @@ fallback:
                        pr_warn("Increasing nr_hw_queues to %d fails, fallback to %d\n",
                                        nr_hw_queues, prev_nr_hw_queues);
                        set->nr_hw_queues = prev_nr_hw_queues;
-                       blk_mq_map_queues(set);
+                       blk_mq_map_queues(&set->map[0]);
                        goto fallback;
                }
                blk_mq_map_swqueue(q);
index 9536be06d0224f549ab737e63a3a74052b3272a5..889f0069dd805f8e3a88b06ff7c67d2e76b04bbc 100644 (file)
@@ -70,14 +70,14 @@ void blk_mq_try_issue_list_directly(struct blk_mq_hw_ctx *hctx,
 /*
  * CPU -> queue mappings
  */
-extern int blk_mq_hw_queue_to_node(unsigned int *map, unsigned int);
+extern int blk_mq_hw_queue_to_node(struct blk_mq_queue_map *qmap, unsigned int);
 
 static inline struct blk_mq_hw_ctx *blk_mq_map_queue(struct request_queue *q,
                int cpu)
 {
        struct blk_mq_tag_set *set = q->tag_set;
 
-       return q->queue_hw_ctx[set->mq_map[cpu]];
+       return q->queue_hw_ctx[set->map[0].mq_map[cpu]];
 }
 
 /*
@@ -206,12 +206,12 @@ static inline void blk_mq_put_driver_tag(struct request *rq)
        __blk_mq_put_driver_tag(hctx, rq);
 }
 
-static inline void blk_mq_clear_mq_map(struct blk_mq_tag_set *set)
+static inline void blk_mq_clear_mq_map(struct blk_mq_queue_map *qmap)
 {
        int cpu;
 
        for_each_possible_cpu(cpu)
-               set->mq_map[cpu] = 0;
+               qmap->mq_map[cpu] = 0;
 }
 
 #endif
index 086c6bb12baaa696fe9be0f33e77c5962d10cd03..6e869d05f91e2f1d0620f654f17cb3c8f6bb1c9c 100644 (file)
@@ -624,7 +624,7 @@ static int virtblk_map_queues(struct blk_mq_tag_set *set)
 {
        struct virtio_blk *vblk = set->driver_data;
 
-       return blk_mq_virtio_map_queues(set, vblk->vdev, 0);
+       return blk_mq_virtio_map_queues(&set->map[0], vblk->vdev, 0);
 }
 
 #ifdef CONFIG_VIRTIO_BLK_SCSI
index c33bb201b8846fae0319e1c575f9737af79a1944..49ad854d1b9119ced816d117e711aac5d1b9dfee 100644 (file)
@@ -435,7 +435,7 @@ static int nvme_pci_map_queues(struct blk_mq_tag_set *set)
 {
        struct nvme_dev *dev = set->driver_data;
 
-       return blk_mq_pci_map_queues(set, to_pci_dev(dev->dev),
+       return blk_mq_pci_map_queues(&set->map[0], to_pci_dev(dev->dev),
                        dev->num_vecs > 1 ? 1 /* admin queue */ : 0);
 }
 
index 29dfd1bd164dcc9ecc49a0115150feeeda1e229f..fdf3e52ee908200d2e6011e4f9a1147313c85074 100644 (file)
@@ -6934,11 +6934,12 @@ static int qla2xxx_map_queues(struct Scsi_Host *shost)
 {
        int rc;
        scsi_qla_host_t *vha = (scsi_qla_host_t *)shost->hostdata;
+       struct blk_mq_queue_map *qmap = &shost->tag_set.map[0];
 
        if (USER_CTRL_IRQ(vha->hw))
-               rc = blk_mq_map_queues(&shost->tag_set);
+               rc = blk_mq_map_queues(qmap);
        else
-               rc = blk_mq_pci_map_queues(&shost->tag_set, vha->hw->pdev, 0);
+               rc = blk_mq_pci_map_queues(qmap, vha->hw->pdev, 0);
        return rc;
 }
 
index 651be30ba96a3e41d42c413835f5def294dc2c38..ed81b8e74cfee9a1bb07e6208222035fb7ad7b4d 100644 (file)
@@ -1812,7 +1812,7 @@ static int scsi_map_queues(struct blk_mq_tag_set *set)
 
        if (shost->hostt->map_queues)
                return shost->hostt->map_queues(shost);
-       return blk_mq_map_queues(set);
+       return blk_mq_map_queues(&set->map[0]);
 }
 
 void __scsi_init_queue(struct Scsi_Host *shost, struct request_queue *q)
index a25a07a0b7f0afdfb4121be2053d8158874bbf6e..bac084260d80e87295e5618e72726491110ed0ad 100644 (file)
@@ -5319,7 +5319,8 @@ static int pqi_map_queues(struct Scsi_Host *shost)
 {
        struct pqi_ctrl_info *ctrl_info = shost_to_hba(shost);
 
-       return blk_mq_pci_map_queues(&shost->tag_set, ctrl_info->pci_dev, 0);
+       return blk_mq_pci_map_queues(&shost->tag_set.map[0],
+                                       ctrl_info->pci_dev, 0);
 }
 
 static int pqi_getpciinfo_ioctl(struct pqi_ctrl_info *ctrl_info,
index 1c72db94270e89990b59e9c2a410110580c50827..c3c95b3142868efac137856517fca495ee3374ee 100644 (file)
@@ -719,8 +719,9 @@ static void virtscsi_target_destroy(struct scsi_target *starget)
 static int virtscsi_map_queues(struct Scsi_Host *shost)
 {
        struct virtio_scsi *vscsi = shost_priv(shost);
+       struct blk_mq_queue_map *qmap = &shost->tag_set.map[0];
 
-       return blk_mq_virtio_map_queues(&shost->tag_set, vscsi->vdev, 2);
+       return blk_mq_virtio_map_queues(qmap, vscsi->vdev, 2);
 }
 
 /*
index 9f4c17f0d2d8f9bb08552e8c0d2b1cf49d7d8750..0b1f45c62623891ac1743a3f8ded7a3da12a80b1 100644 (file)
@@ -2,10 +2,10 @@
 #ifndef _LINUX_BLK_MQ_PCI_H
 #define _LINUX_BLK_MQ_PCI_H
 
-struct blk_mq_tag_set;
+struct blk_mq_queue_map;
 struct pci_dev;
 
-int blk_mq_pci_map_queues(struct blk_mq_tag_set *set, struct pci_dev *pdev,
+int blk_mq_pci_map_queues(struct blk_mq_queue_map *qmap, struct pci_dev *pdev,
                          int offset);
 
 #endif /* _LINUX_BLK_MQ_PCI_H */
index 69b4da262c4508ba98560e79fb986013723fa824..687ae287e1dc2c447add315f1d1abe8e90e4153b 100644 (file)
@@ -2,10 +2,10 @@
 #ifndef _LINUX_BLK_MQ_VIRTIO_H
 #define _LINUX_BLK_MQ_VIRTIO_H
 
-struct blk_mq_tag_set;
+struct blk_mq_queue_map;
 struct virtio_device;
 
-int blk_mq_virtio_map_queues(struct blk_mq_tag_set *set,
+int blk_mq_virtio_map_queues(struct blk_mq_queue_map *qmap,
                struct virtio_device *vdev, int first_vec);
 
 #endif /* _LINUX_BLK_MQ_VIRTIO_H */
index d83a26fb37e5bda1426d0a8b29f990165a7fe38a..1761648886285fedf935ac99dfac2c74da52870c 100644 (file)
@@ -74,10 +74,19 @@ struct blk_mq_hw_ctx {
        struct srcu_struct      srcu[0];
 };
 
+struct blk_mq_queue_map {
+       unsigned int *mq_map;
+       unsigned int nr_queues;
+};
+
+enum {
+       HCTX_MAX_TYPES = 1,
+};
+
 struct blk_mq_tag_set {
-       unsigned int            *mq_map;
+       struct blk_mq_queue_map map[HCTX_MAX_TYPES];
        const struct blk_mq_ops *ops;
-       unsigned int            nr_hw_queues;
+       unsigned int            nr_hw_queues;   /* nr hw queues across maps */
        unsigned int            queue_depth;    /* max hw supported */
        unsigned int            reserved_tags;
        unsigned int            cmd_size;       /* per-request extra data */
@@ -295,7 +304,7 @@ void blk_mq_freeze_queue_wait(struct request_queue *q);
 int blk_mq_freeze_queue_wait_timeout(struct request_queue *q,
                                     unsigned long timeout);
 
-int blk_mq_map_queues(struct blk_mq_tag_set *set);
+int blk_mq_map_queues(struct blk_mq_queue_map *qmap);
 void blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set, int nr_hw_queues);
 
 void blk_mq_quiesce_queue_nowait(struct request_queue *q);