bnxt_en: Introduce bnxt_get_hwrm_resp_addr & bnxt_get_hwrm_seq_id routines.
authorVenkat Duvvuru <venkatkumar.duvvuru@broadcom.com>
Thu, 20 Dec 2018 08:38:47 +0000 (03:38 -0500)
committerDavid S. Miller <davem@davemloft.net>
Thu, 20 Dec 2018 16:26:16 +0000 (08:26 -0800)
These routines will be enhanced in the subsequent patch to
return the 2nd firmware comm. channel's hwrm response address &
sequence id respectively.

Signed-off-by: Venkat Duvvuru <venkatkumar.duvvuru@broadcom.com>
Signed-off-by: Michael Chan <michael.chan@broadcom.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
drivers/net/ethernet/broadcom/bnxt/bnxt.c
drivers/net/ethernet/broadcom/bnxt/bnxt.h
drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c

index 079e1eefe33b757972877c21470f2d5196efd15a..46bbe42412d3f94f05f231da551be332bcd10fea 100644 (file)
@@ -3759,7 +3759,7 @@ static int bnxt_hwrm_do_send_msg(struct bnxt *bp, void *msg, u32 msg_len,
        u8 *resp_addr = (u8 *)bp->hwrm_cmd_resp_addr;
        u32 bar_offset = BNXT_GRCPF_REG_CHIMP_COMM;
 
-       req->seq_id = cpu_to_le16(bp->hwrm_cmd_seq++);
+       req->seq_id = cpu_to_le16(bnxt_get_hwrm_seq_id(bp));
        memset(resp, 0, PAGE_SIZE);
        cp_ring_id = le16_to_cpu(req->cmpl_ring);
        intr_process = (cp_ring_id == INVALID_HW_RING_ID) ? 0 : 1;
@@ -4143,12 +4143,11 @@ static int bnxt_hwrm_cfa_ntuple_filter_free(struct bnxt *bp,
 static int bnxt_hwrm_cfa_ntuple_filter_alloc(struct bnxt *bp,
                                             struct bnxt_ntuple_filter *fltr)
 {
-       int rc = 0;
+       struct bnxt_vnic_info *vnic = &bp->vnic_info[fltr->rxq + 1];
        struct hwrm_cfa_ntuple_filter_alloc_input req = {0};
-       struct hwrm_cfa_ntuple_filter_alloc_output *resp =
-               bp->hwrm_cmd_resp_addr;
+       struct hwrm_cfa_ntuple_filter_alloc_output *resp;
        struct flow_keys *keys = &fltr->fkeys;
-       struct bnxt_vnic_info *vnic = &bp->vnic_info[fltr->rxq + 1];
+       int rc = 0;
 
        bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_NTUPLE_FILTER_ALLOC, -1, -1);
        req.l2_filter_id = bp->vnic_info[0].fw_l2_filter_id[fltr->l2_fltr_idx];
@@ -4194,8 +4193,10 @@ static int bnxt_hwrm_cfa_ntuple_filter_alloc(struct bnxt *bp,
        req.dst_id = cpu_to_le16(vnic->fw_vnic_id);
        mutex_lock(&bp->hwrm_cmd_lock);
        rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
-       if (!rc)
+       if (!rc) {
+               resp = bnxt_get_hwrm_resp_addr(bp, &req);
                fltr->filter_id = resp->ntuple_filter_id;
+       }
        mutex_unlock(&bp->hwrm_cmd_lock);
        return rc;
 }
index 57598164830c229b90c0f6cf3c3dc1df486e0d87..214af8fe84c2574dd220e030cf6863b6b671ffd4 100644 (file)
@@ -1673,6 +1673,19 @@ static inline void bnxt_db_write(struct bnxt *bp, struct bnxt_db_info *db,
        }
 }
 
+static inline void *bnxt_get_hwrm_resp_addr(struct bnxt *bp, void *req)
+{
+       return bp->hwrm_cmd_resp_addr;
+}
+
+static inline u16 bnxt_get_hwrm_seq_id(struct bnxt *bp)
+{
+       u16 seq_id;
+
+       seq_id = bp->hwrm_cmd_seq++;
+       return seq_id;
+}
+
 extern const u16 bnxt_lhint_arr[];
 
 int bnxt_alloc_rx_data(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
index 749f63beddd8d4131e65448890b1d2788fece1a7..b39584896392fbe5311782718f5c62c559830a63 100644 (file)
@@ -420,11 +420,11 @@ static int bnxt_hwrm_cfa_flow_alloc(struct bnxt *bp, struct bnxt_tc_flow *flow,
                                    __le16 ref_flow_handle,
                                    __le32 tunnel_handle, __le16 *flow_handle)
 {
-       struct hwrm_cfa_flow_alloc_output *resp = bp->hwrm_cmd_resp_addr;
        struct bnxt_tc_actions *actions = &flow->actions;
        struct bnxt_tc_l3_key *l3_mask = &flow->l3_mask;
        struct bnxt_tc_l3_key *l3_key = &flow->l3_key;
        struct hwrm_cfa_flow_alloc_input req = { 0 };
+       struct hwrm_cfa_flow_alloc_output *resp;
        u16 flow_flags = 0, action_flags = 0;
        int rc;
 
@@ -527,8 +527,10 @@ static int bnxt_hwrm_cfa_flow_alloc(struct bnxt *bp, struct bnxt_tc_flow *flow,
 
        mutex_lock(&bp->hwrm_cmd_lock);
        rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
-       if (!rc)
+       if (!rc) {
+               resp = bnxt_get_hwrm_resp_addr(bp, &req);
                *flow_handle = resp->flow_handle;
+       }
        mutex_unlock(&bp->hwrm_cmd_lock);
 
        if (rc == HWRM_ERR_CODE_RESOURCE_ALLOC_ERROR)
@@ -544,9 +546,8 @@ static int hwrm_cfa_decap_filter_alloc(struct bnxt *bp,
                                       __le32 ref_decap_handle,
                                       __le32 *decap_filter_handle)
 {
-       struct hwrm_cfa_decap_filter_alloc_output *resp =
-                                               bp->hwrm_cmd_resp_addr;
        struct hwrm_cfa_decap_filter_alloc_input req = { 0 };
+       struct hwrm_cfa_decap_filter_alloc_output *resp;
        struct ip_tunnel_key *tun_key = &flow->tun_key;
        u32 enables = 0;
        int rc;
@@ -599,10 +600,12 @@ static int hwrm_cfa_decap_filter_alloc(struct bnxt *bp,
 
        mutex_lock(&bp->hwrm_cmd_lock);
        rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
-       if (!rc)
+       if (!rc) {
+               resp = bnxt_get_hwrm_resp_addr(bp, &req);
                *decap_filter_handle = resp->decap_filter_id;
-       else
+       } else {
                netdev_info(bp->dev, "%s: Error rc=%d", __func__, rc);
+       }
        mutex_unlock(&bp->hwrm_cmd_lock);
 
        if (rc)
@@ -633,9 +636,8 @@ static int hwrm_cfa_encap_record_alloc(struct bnxt *bp,
                                       struct bnxt_tc_l2_key *l2_info,
                                       __le32 *encap_record_handle)
 {
-       struct hwrm_cfa_encap_record_alloc_output *resp =
-                                               bp->hwrm_cmd_resp_addr;
        struct hwrm_cfa_encap_record_alloc_input req = { 0 };
+       struct hwrm_cfa_encap_record_alloc_output *resp;
        struct hwrm_cfa_encap_data_vxlan *encap =
                        (struct hwrm_cfa_encap_data_vxlan *)&req.encap_data;
        struct hwrm_vxlan_ipv4_hdr *encap_ipv4 =
@@ -667,10 +669,12 @@ static int hwrm_cfa_encap_record_alloc(struct bnxt *bp,
 
        mutex_lock(&bp->hwrm_cmd_lock);
        rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
-       if (!rc)
+       if (!rc) {
+               resp = bnxt_get_hwrm_resp_addr(bp, &req);
                *encap_record_handle = resp->encap_record_id;
-       else
+       } else {
                netdev_info(bp->dev, "%s: Error rc=%d", __func__, rc);
+       }
        mutex_unlock(&bp->hwrm_cmd_lock);
 
        if (rc)
@@ -1401,8 +1405,8 @@ static int
 bnxt_hwrm_cfa_flow_stats_get(struct bnxt *bp, int num_flows,
                             struct bnxt_tc_stats_batch stats_batch[])
 {
-       struct hwrm_cfa_flow_stats_output *resp = bp->hwrm_cmd_resp_addr;
        struct hwrm_cfa_flow_stats_input req = { 0 };
+       struct hwrm_cfa_flow_stats_output *resp;
        __le16 *req_flow_handles = &req.flow_handle_0;
        int rc, i;
 
@@ -1417,8 +1421,12 @@ bnxt_hwrm_cfa_flow_stats_get(struct bnxt *bp, int num_flows,
        mutex_lock(&bp->hwrm_cmd_lock);
        rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
        if (!rc) {
-               __le64 *resp_packets = &resp->packet_0;
-               __le64 *resp_bytes = &resp->byte_0;
+               __le64 *resp_packets;
+               __le64 *resp_bytes;
+
+               resp = bnxt_get_hwrm_resp_addr(bp, &req);
+               resp_packets = &resp->packet_0;
+               resp_bytes = &resp->byte_0;
 
                for (i = 0; i < num_flows; i++) {
                        stats_batch[i].hw_stats.packets =