scsi: qla2xxx: Return busy if rport going away
authorDarren Trapp <darren.trapp@cavium.com>
Wed, 21 Mar 2018 06:09:35 +0000 (23:09 -0700)
committerMartin K. Petersen <martin.petersen@oracle.com>
Wed, 21 Mar 2018 22:38:54 +0000 (18:38 -0400)
This patch adds mechanism to return EBUSY if rport is going away
to prevent exhausting FC-NVMe layer's retry counter.

Signed-off-by: Darren Trapp <darren.trapp@cavium.com>
Signed-off-by: Himanshu Madhani <himanshu.madhani@cavium.com>
Reviewed-by: Hannes Reinecke <hare@suse.com>
Reviewed-by: Johannes Thumshirn <jthumshirn@suse.de>
Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
drivers/scsi/qla2xxx/qla_def.h
drivers/scsi/qla2xxx/qla_isr.c
drivers/scsi/qla2xxx/qla_nvme.c

index cba749d27154148500b9ffc8a7c8816b83d4cb47..59c449b141cde3f993f852b4a7c76183bd4b32f0 100644 (file)
@@ -2356,6 +2356,7 @@ typedef struct fc_port {
        uint8_t nvme_flag;
 #define NVME_FLAG_REGISTERED 4
 #define NVME_FLAG_DELETING 2
+#define NVME_FLAG_RESETTING 1
 
        struct fc_port *conflict;
        unsigned char logout_completed;
index 913cd6cf5907fd5ad5605ea1d0a54f9289fa4145..bc2c7ded6949f29624d78fabee9caa06a2e92fdc 100644 (file)
@@ -1910,9 +1910,11 @@ qla24xx_nvme_iocb_entry(scsi_qla_host_t *vha, struct req_que *req, void *tsk)
                                ret = QLA_SUCCESS;
                        break;
 
-                       case CS_ABORTED:
                        case CS_RESET:
                        case CS_PORT_UNAVAILABLE:
+                               fcport->nvme_flag |= NVME_FLAG_RESETTING;
+                               /* fall through */
+                       case CS_ABORTED:
                        case CS_PORT_LOGGED_OUT:
                        case CS_PORT_BUSY:
                                ql_log(ql_log_warn, fcport->vha, 0x5060,
index 951fbbab961f619fc0754e1ca384a41c4b08451a..adeda6a4e4fd3a6ceff725eb961e23d03afb0519 100644 (file)
@@ -36,6 +36,7 @@ int qla_nvme_register_remote(struct scsi_qla_host *vha, struct fc_port *fcport)
                return 0;
 
        INIT_WORK(&fcport->nvme_del_work, qla_nvme_unregister_remote_port);
+       fcport->nvme_flag &= ~NVME_FLAG_RESETTING;
 
        memset(&req, 0, sizeof(struct nvme_fc_port_info));
        req.port_name = wwn_to_u64(fcport->port_name);
@@ -193,9 +194,9 @@ static void qla_nvme_abort_work(struct work_struct *work)
        rval = ha->isp_ops->abort_command(sp);
 
        ql_dbg(ql_dbg_io, fcport->vha, 0x212b,
-           "%s: %s command for sp=%p on fcport=%p rval=%x\n", __func__,
-           (rval != QLA_SUCCESS) ? "Failed to abort" : "Aborted",
-           sp, fcport, rval);
+           "%s: %s command for sp=%p, handle=%x on fcport=%p rval=%x\n",
+           __func__, (rval != QLA_SUCCESS) ? "Failed to abort" : "Aborted",
+           sp, sp->handle, fcport, rval);
 }
 
 static void qla_nvme_ls_abort(struct nvme_fc_local_port *lport,
@@ -327,7 +328,7 @@ static int qla2x00_start_nvme_mq(srb_t *sp)
        }
 
        if (index == req->num_outstanding_cmds) {
-               rval = -1;
+               rval = -EBUSY;
                goto queuing_error;
        }
        req_cnt = qla24xx_calc_iocbs(vha, tot_dsds);
@@ -341,7 +342,7 @@ static int qla2x00_start_nvme_mq(srb_t *sp)
                        req->cnt = req->length - (req->ring_index - cnt);
 
                if (req->cnt < (req_cnt + 2)){
-                       rval = -1;
+                       rval = -EBUSY;
                        goto queuing_error;
                }
        }
@@ -476,14 +477,15 @@ static int qla_nvme_post_cmd(struct nvme_fc_local_port *lport,
        fc_port_t *fcport;
        struct srb_iocb *nvme;
        struct scsi_qla_host *vha;
-       int rval = QLA_FUNCTION_FAILED;
+       int rval = -ENODEV;
        srb_t *sp;
        struct qla_qpair *qpair = hw_queue_handle;
        struct nvme_private *priv;
        struct qla_nvme_rport *qla_rport = rport->private;
 
-       if (!fd) {
-               ql_log(ql_log_warn, NULL, 0x2134, "NO NVMe FCP request\n");
+       if (!fd || !qpair) {
+               ql_log(ql_log_warn, NULL, 0x2134,
+                   "NO NVMe request or Queue Handle\n");
                return rval;
        }
 
@@ -495,13 +497,21 @@ static int qla_nvme_post_cmd(struct nvme_fc_local_port *lport,
        }
 
        vha = fcport->vha;
-       if (!qpair)
+
+       /*
+        * If we know the dev is going away while the transport is still sending
+        * IO's return busy back to stall the IO Q.  This happens when the
+        * link goes away and fw hasn't notified us yet, but IO's are being
+        * returned. If the dev comes back quickly we won't exhaust the IO
+        * retry count at the core.
+        */
+       if (fcport->nvme_flag & NVME_FLAG_RESETTING)
                return -EBUSY;
 
        /* Alloc SRB structure */
        sp = qla2xxx_get_qpair_sp(qpair, fcport, GFP_ATOMIC);
        if (!sp)
-               return -EIO;
+               return -EBUSY;
 
        atomic_set(&sp->ref_count, 1);
        init_waitqueue_head(&sp->nvme_ls_waitq);
@@ -519,7 +529,6 @@ static int qla_nvme_post_cmd(struct nvme_fc_local_port *lport,
                    "qla2x00_start_nvme_mq failed = %d\n", rval);
                atomic_dec(&sp->ref_count);
                wake_up(&sp->nvme_ls_waitq);
-               return -EIO;
        }
 
        return rval;