scsi: lpfc: Adjust default value of lpfc_nvmet_mrq
authorJames Smart <jsmart2021@gmail.com>
Tue, 21 Nov 2017 00:00:36 +0000 (16:00 -0800)
committerMartin K. Petersen <martin.petersen@oracle.com>
Tue, 5 Dec 2017 01:32:54 +0000 (20:32 -0500)
The current default for async hw receive queues is 1, which presents
issues under heavy load as number of queues influence the available
async receive buffer limits.

Raise the default to the either the current hw limit (16) or the number
of hw qs configured (io channel value).

Revise the attribute definition for mrq to better reflect what we do for
hw queues. E.g. 0 means default to optimal (# of cpus), non-zero
specifies a specific limit. Before this change, mrq=0 meant target mode
was disabled. As 0 now has a different meaning, rework the if tests to
use the better nvmet_support check.

Signed-off-by: Dick Kennedy <dick.kennedy@broadcom.com>
Signed-off-by: James Smart <james.smart@broadcom.com>
Reviewed-by: Hannes Reinecke <hare@suse.com>
Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
drivers/scsi/lpfc/lpfc_attr.c
drivers/scsi/lpfc/lpfc_debugfs.c
drivers/scsi/lpfc/lpfc_init.c
drivers/scsi/lpfc/lpfc_nvmet.h

index 82f6e219ee3490354bca791dfd2d80866bb7e857..5d83734f6c680b6de3f237011b0e94bc3298ae77 100644 (file)
@@ -3366,12 +3366,13 @@ LPFC_ATTR_R(suppress_rsp, 1, 0, 1,
 
 /*
  * lpfc_nvmet_mrq: Specify number of RQ pairs for processing NVMET cmds
+ * lpfc_nvmet_mrq = 0  driver will calcualte optimal number of RQ pairs
  * lpfc_nvmet_mrq = 1  use a single RQ pair
  * lpfc_nvmet_mrq >= 2  use specified RQ pairs for MRQ
  *
  */
 LPFC_ATTR_R(nvmet_mrq,
-           1, 1, 16,
+           LPFC_NVMET_MRQ_AUTO, LPFC_NVMET_MRQ_AUTO, LPFC_NVMET_MRQ_MAX,
            "Specify number of RQ pairs for processing NVMET cmds");
 
 /*
@@ -6362,6 +6363,9 @@ lpfc_nvme_mod_param_dep(struct lpfc_hba *phba)
                                phba->cfg_nvmet_fb_size = LPFC_NVMET_FB_SZ_MAX;
                }
 
+               if (!phba->cfg_nvmet_mrq)
+                       phba->cfg_nvmet_mrq = phba->cfg_nvme_io_channel;
+
                /* Adjust lpfc_nvmet_mrq to avoid running out of WQE slots */
                if (phba->cfg_nvmet_mrq > phba->cfg_nvme_io_channel) {
                        phba->cfg_nvmet_mrq = phba->cfg_nvme_io_channel;
@@ -6369,10 +6373,13 @@ lpfc_nvme_mod_param_dep(struct lpfc_hba *phba)
                                        "6018 Adjust lpfc_nvmet_mrq to %d\n",
                                        phba->cfg_nvmet_mrq);
                }
+               if (phba->cfg_nvmet_mrq > LPFC_NVMET_MRQ_MAX)
+                       phba->cfg_nvmet_mrq = LPFC_NVMET_MRQ_MAX;
+
        } else {
                /* Not NVME Target mode.  Turn off Target parameters. */
                phba->nvmet_support = 0;
-               phba->cfg_nvmet_mrq = 0;
+               phba->cfg_nvmet_mrq = LPFC_NVMET_MRQ_OFF;
                phba->cfg_nvmet_fb_size = 0;
        }
 
index 4df5a21bd93b8e3cf4a6073efd632a295cc7c43b..b7f57492aefcf4929d23bcd953b67ecda8457239 100644 (file)
@@ -3213,7 +3213,7 @@ lpfc_idiag_cqs_for_eq(struct lpfc_hba *phba, char *pbuffer,
                        return 1;
        }
 
-       if (eqidx < phba->cfg_nvmet_mrq) {
+       if ((eqidx < phba->cfg_nvmet_mrq) && phba->nvmet_support) {
                /* NVMET CQset */
                qp = phba->sli4_hba.nvmet_cqset[eqidx];
                *len = __lpfc_idiag_print_cq(qp, "NVMET CQset", pbuffer, *len);
index dc7a5adc156d8f4561a99f25c220f76916b2799b..a6111c60393a693cc8ea96e470c26ea659782751 100644 (file)
@@ -7933,8 +7933,12 @@ lpfc_sli4_queue_verify(struct lpfc_hba *phba)
                phba->cfg_fcp_io_channel = io_channel;
        if (phba->cfg_nvme_io_channel > io_channel)
                phba->cfg_nvme_io_channel = io_channel;
-       if (phba->cfg_nvme_io_channel < phba->cfg_nvmet_mrq)
-               phba->cfg_nvmet_mrq = phba->cfg_nvme_io_channel;
+       if (phba->nvmet_support) {
+               if (phba->cfg_nvme_io_channel < phba->cfg_nvmet_mrq)
+                       phba->cfg_nvmet_mrq = phba->cfg_nvme_io_channel;
+       }
+       if (phba->cfg_nvmet_mrq > LPFC_NVMET_MRQ_MAX)
+               phba->cfg_nvmet_mrq = LPFC_NVMET_MRQ_MAX;
 
        lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
                        "2574 IO channels: irqs %d fcp %d nvme %d MRQ: %d\n",
@@ -8448,13 +8452,15 @@ lpfc_sli4_queue_destroy(struct lpfc_hba *phba)
        /* Release NVME CQ mapping array */
        lpfc_sli4_release_queue_map(&phba->sli4_hba.nvme_cq_map);
 
-       lpfc_sli4_release_queues(&phba->sli4_hba.nvmet_cqset,
-                                       phba->cfg_nvmet_mrq);
+       if (phba->nvmet_support) {
+               lpfc_sli4_release_queues(&phba->sli4_hba.nvmet_cqset,
+                                        phba->cfg_nvmet_mrq);
 
-       lpfc_sli4_release_queues(&phba->sli4_hba.nvmet_mrq_hdr,
-                                       phba->cfg_nvmet_mrq);
-       lpfc_sli4_release_queues(&phba->sli4_hba.nvmet_mrq_data,
-                                       phba->cfg_nvmet_mrq);
+               lpfc_sli4_release_queues(&phba->sli4_hba.nvmet_mrq_hdr,
+                                        phba->cfg_nvmet_mrq);
+               lpfc_sli4_release_queues(&phba->sli4_hba.nvmet_mrq_data,
+                                        phba->cfg_nvmet_mrq);
+       }
 
        /* Release mailbox command work queue */
        __lpfc_sli4_release_queue(&phba->sli4_hba.mbx_wq);
@@ -9009,19 +9015,22 @@ lpfc_sli4_queue_unset(struct lpfc_hba *phba)
                for (qidx = 0; qidx < phba->cfg_nvme_io_channel; qidx++)
                        lpfc_cq_destroy(phba, phba->sli4_hba.nvme_cq[qidx]);
 
-       /* Unset NVMET MRQ queue */
-       if (phba->sli4_hba.nvmet_mrq_hdr) {
-               for (qidx = 0; qidx < phba->cfg_nvmet_mrq; qidx++)
-                       lpfc_rq_destroy(phba,
+       if (phba->nvmet_support) {
+               /* Unset NVMET MRQ queue */
+               if (phba->sli4_hba.nvmet_mrq_hdr) {
+                       for (qidx = 0; qidx < phba->cfg_nvmet_mrq; qidx++)
+                               lpfc_rq_destroy(
+                                       phba,
                                        phba->sli4_hba.nvmet_mrq_hdr[qidx],
                                        phba->sli4_hba.nvmet_mrq_data[qidx]);
-       }
+               }
 
-       /* Unset NVMET CQ Set complete queue */
-       if (phba->sli4_hba.nvmet_cqset) {
-               for (qidx = 0; qidx < phba->cfg_nvmet_mrq; qidx++)
-                       lpfc_cq_destroy(phba,
-                                       phba->sli4_hba.nvmet_cqset[qidx]);
+               /* Unset NVMET CQ Set complete queue */
+               if (phba->sli4_hba.nvmet_cqset) {
+                       for (qidx = 0; qidx < phba->cfg_nvmet_mrq; qidx++)
+                               lpfc_cq_destroy(
+                                       phba, phba->sli4_hba.nvmet_cqset[qidx]);
+               }
        }
 
        /* Unset FCP response complete queue */
@@ -10397,7 +10406,7 @@ lpfc_get_sli4_parameters(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
            !phba->nvme_support) {
                phba->nvme_support = 0;
                phba->nvmet_support = 0;
-               phba->cfg_nvmet_mrq = 0;
+               phba->cfg_nvmet_mrq = LPFC_NVMET_MRQ_OFF;
                phba->cfg_nvme_io_channel = 0;
                phba->io_channel_irqs = phba->cfg_fcp_io_channel;
                lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_NVME,
index 25a65b0bb7f33a8f764c8d0ac816fa98fcd85434..6723e7b81946798b829fa698d4c4927b75959267 100644 (file)
 #define LPFC_NVMET_RQE_DEF_COUNT       512
 #define LPFC_NVMET_SUCCESS_LEN 12
 
+#define LPFC_NVMET_MRQ_OFF             0xffff
+#define LPFC_NVMET_MRQ_AUTO            0
+#define LPFC_NVMET_MRQ_MAX             16
+
 /* Used for NVME Target */
 struct lpfc_nvmet_tgtport {
        struct lpfc_hba *phba;