net: hns3: Fixes the back pressure setting when sriov is enabled
authorYunsheng Lin <linyunsheng@huawei.com>
Tue, 15 May 2018 18:20:11 +0000 (19:20 +0100)
committerDavid S. Miller <davem@davemloft.net>
Wed, 16 May 2018 15:33:08 +0000 (11:33 -0400)
When sriov is enabled, the Qset and tc mapping is not longer one
to one relation.

This patch fixes it by mapping all pf and vf's Qset to tc.

Fixes: 848440544b41 ("net: hns3: Add support of TX Scheduler & Shaper to HNS3 driver")
Signed-off-by: Yunsheng Lin <linyunsheng@huawei.com>
Signed-off-by: Peng Li <lipeng321@huawei.com>
Signed-off-by: Salil Mehta <salil.mehta@huawei.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c
drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h

index c69ecab460f9a42174c8e4ce3f1a754587809166..262c125f81375a8f91f9bccc899144ad01f2605a 100644 (file)
@@ -500,7 +500,8 @@ static int hclge_tm_qs_schd_mode_cfg(struct hclge_dev *hdev, u16 qs_id, u8 mode)
        return hclge_cmd_send(&hdev->hw, &desc, 1);
 }
 
-static int hclge_tm_qs_bp_cfg(struct hclge_dev *hdev, u8 tc)
+static int hclge_tm_qs_bp_cfg(struct hclge_dev *hdev, u8 tc, u8 grp_id,
+                             u32 bit_map)
 {
        struct hclge_bp_to_qs_map_cmd *bp_to_qs_map_cmd;
        struct hclge_desc desc;
@@ -511,9 +512,8 @@ static int hclge_tm_qs_bp_cfg(struct hclge_dev *hdev, u8 tc)
        bp_to_qs_map_cmd = (struct hclge_bp_to_qs_map_cmd *)desc.data;
 
        bp_to_qs_map_cmd->tc_id = tc;
-
-       /* Qset and tc is one by one mapping */
-       bp_to_qs_map_cmd->qs_bit_map = cpu_to_le32(1 << tc);
+       bp_to_qs_map_cmd->qs_group_id = grp_id;
+       bp_to_qs_map_cmd->qs_bit_map = cpu_to_le32(bit_map);
 
        return hclge_cmd_send(&hdev->hw, &desc, 1);
 }
@@ -1167,6 +1167,41 @@ static int hclge_pfc_setup_hw(struct hclge_dev *hdev)
                                      hdev->tm_info.hw_pfc_map);
 }
 
+/* Each Tc has a 1024 queue sets to backpress, it divides to
+ * 32 group, each group contains 32 queue sets, which can be
+ * represented by u32 bitmap.
+ */
+static int hclge_bp_setup_hw(struct hclge_dev *hdev, u8 tc)
+{
+       struct hclge_vport *vport = hdev->vport;
+       u32 i, k, qs_bitmap;
+       int ret;
+
+       for (i = 0; i < HCLGE_BP_GRP_NUM; i++) {
+               qs_bitmap = 0;
+
+               for (k = 0; k < hdev->num_alloc_vport; k++) {
+                       u16 qs_id = vport->qs_offset + tc;
+                       u8 grp, sub_grp;
+
+                       grp = hnae_get_field(qs_id, HCLGE_BP_GRP_ID_M,
+                                            HCLGE_BP_GRP_ID_S);
+                       sub_grp = hnae_get_field(qs_id, HCLGE_BP_SUB_GRP_ID_M,
+                                                HCLGE_BP_SUB_GRP_ID_S);
+                       if (i == grp)
+                               qs_bitmap |= (1 << sub_grp);
+
+                       vport++;
+               }
+
+               ret = hclge_tm_qs_bp_cfg(hdev, tc, i, qs_bitmap);
+               if (ret)
+                       return ret;
+       }
+
+       return 0;
+}
+
 static int hclge_mac_pause_setup_hw(struct hclge_dev *hdev)
 {
        bool tx_en, rx_en;
@@ -1218,7 +1253,7 @@ int hclge_pause_setup_hw(struct hclge_dev *hdev)
                dev_warn(&hdev->pdev->dev, "set pfc pause failed:%d\n", ret);
 
        for (i = 0; i < hdev->tm_info.num_tc; i++) {
-               ret = hclge_tm_qs_bp_cfg(hdev, i);
+               ret = hclge_bp_setup_hw(hdev, i);
                if (ret)
                        return ret;
        }
index 2dbe177581e982c1ecf3e115a6cf9d078f680df3..c2b6e8a6700f067fa38511fdc34b27a1eb1dc75e 100644 (file)
@@ -89,6 +89,11 @@ struct hclge_pg_shapping_cmd {
        __le32 pg_shapping_para;
 };
 
+#define HCLGE_BP_GRP_NUM               32
+#define HCLGE_BP_SUB_GRP_ID_S          0
+#define HCLGE_BP_SUB_GRP_ID_M          GENMASK(4, 0)
+#define HCLGE_BP_GRP_ID_S              5
+#define HCLGE_BP_GRP_ID_M              GENMASK(9, 5)
 struct hclge_bp_to_qs_map_cmd {
        u8 tc_id;
        u8 rsvd[2];