bnxt_en: Send PF driver unload notification to all VFs.
authorJeffrey Huang <huangjw@broadcom.com>
Fri, 26 Feb 2016 09:00:00 +0000 (04:00 -0500)
committerDavid S. Miller <davem@davemloft.net>
Tue, 1 Mar 2016 20:37:00 +0000 (15:37 -0500)
During remove_one() when SRIOV is enabled, the PF driver
should broadcast PF driver unload notification to all
VFs that are attached to VMs. Upon receiving the PF
driver unload notification, the VF driver should print
a warning message to message log.  Certain operations on the
VF may not succeed after the PF has unloaded.

Signed-off-by: Jeffrey Huang <huangjw@broadcom.com>
Signed-off-by: Michael Chan <michael.chan@broadcom.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
drivers/net/ethernet/broadcom/bnxt/bnxt.c
drivers/net/ethernet/broadcom/bnxt/bnxt.h
drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c

index ff1507f3e226290ae343e6a750052d7c678185f2..80c441298e16cac48f63bf6bacff486c21076d69 100644 (file)
@@ -1239,13 +1239,17 @@ static int bnxt_async_event_process(struct bnxt *bp,
        switch (event_id) {
        case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE:
                set_bit(BNXT_LINK_CHNG_SP_EVENT, &bp->sp_event);
-               schedule_work(&bp->sp_task);
+               break;
+       case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_UNLOAD:
+               set_bit(BNXT_HWRM_PF_UNLOAD_SP_EVENT, &bp->sp_event);
                break;
        default:
                netdev_err(bp->dev, "unhandled ASYNC event (id 0x%x)\n",
                           event_id);
-               break;
+               goto async_event_process_exit;
        }
+       schedule_work(&bp->sp_task);
+async_event_process_exit:
        return 0;
 }
 
@@ -5559,6 +5563,8 @@ static void bnxt_cfg_ntp_filters(struct bnxt *bp)
                        }
                }
        }
+       if (test_and_clear_bit(BNXT_HWRM_PF_UNLOAD_SP_EVENT, &bp->sp_event))
+               netdev_info(bp->dev, "Receive PF driver unload event!");
 }
 
 #else
index 2be51b332652b0d0fd63629795599c10e6c25dea..cc798d5a3527ea0077fffde7b5258b801036e04f 100644 (file)
@@ -986,6 +986,7 @@ struct bnxt {
 #define BNXT_VXLAN_DEL_PORT_SP_EVENT   5
 #define BNXT_RESET_TASK_SP_EVENT       6
 #define BNXT_RST_RING_SP_EVENT         7
+#define BNXT_HWRM_PF_UNLOAD_SP_EVENT   8
 
        struct bnxt_pf_info     pf;
 #ifdef CONFIG_BNXT_SRIOV
index 0b4ca35cee9bd3fbb8931978fd906e474287708a..9f6b4cc6bd0ed7a42ec23a5d1bc9667149879172 100644 (file)
@@ -522,6 +522,46 @@ err_out1:
        return rc;
 }
 
+static int bnxt_hwrm_fwd_async_event_cmpl(struct bnxt *bp,
+                                         struct bnxt_vf_info *vf,
+                                         u16 event_id)
+{
+       int rc = 0;
+       struct hwrm_fwd_async_event_cmpl_input req = {0};
+       struct hwrm_fwd_async_event_cmpl_output *resp = bp->hwrm_cmd_resp_addr;
+       struct hwrm_async_event_cmpl *async_cmpl;
+
+       bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FWD_ASYNC_EVENT_CMPL, -1, -1);
+       if (vf)
+               req.encap_async_event_target_id = cpu_to_le16(vf->fw_fid);
+       else
+               /* broadcast this async event to all VFs */
+               req.encap_async_event_target_id = cpu_to_le16(0xffff);
+       async_cmpl = (struct hwrm_async_event_cmpl *)req.encap_async_event_cmpl;
+       async_cmpl->type =
+               cpu_to_le16(HWRM_ASYNC_EVENT_CMPL_TYPE_HWRM_ASYNC_EVENT);
+       async_cmpl->event_id = cpu_to_le16(event_id);
+
+       mutex_lock(&bp->hwrm_cmd_lock);
+       rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+
+       if (rc) {
+               netdev_err(bp->dev, "hwrm_fwd_async_event_cmpl failed. rc:%d\n",
+                          rc);
+               goto fwd_async_event_cmpl_exit;
+       }
+
+       if (resp->error_code) {
+               netdev_err(bp->dev, "hwrm_fwd_async_event_cmpl error %d\n",
+                          resp->error_code);
+               rc = -1;
+       }
+
+fwd_async_event_cmpl_exit:
+       mutex_unlock(&bp->hwrm_cmd_lock);
+       return rc;
+}
+
 void bnxt_sriov_disable(struct bnxt *bp)
 {
        u16 num_vfs = pci_num_vf(bp->pdev);
@@ -530,6 +570,9 @@ void bnxt_sriov_disable(struct bnxt *bp)
                return;
 
        if (pci_vfs_assigned(bp->pdev)) {
+               bnxt_hwrm_fwd_async_event_cmpl(
+                       bp, NULL,
+                       HWRM_ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_UNLOAD);
                netdev_warn(bp->dev, "Unable to free %d VFs because some are assigned to VMs.\n",
                            num_vfs);
        } else {