net: hns3: fix error handling for desc filling
authorYunsheng Lin <linyunsheng@huawei.com>
Mon, 6 May 2019 02:48:48 +0000 (10:48 +0800)
committerDavid S. Miller <davem@davemloft.net>
Tue, 7 May 2019 17:37:13 +0000 (10:37 -0700)
When desc filling fails in hns3_nic_net_xmit, it will call
hns3_clear_desc to unmap the dma mapping. But currently the
ring->next_to_use points to the desc where the desc filling
or dma mapping return error, which means the desc that
ring->next_to_use points to has not done the dma mapping,
the desc that need unmapping is before the ring->next_to_use.

This patch fixes it by calling ring_ptr_move_bw(next_to_use)
before doing unmapping operation, and set desc_cb->dma to
zero to avoid freeing it again when unloading.

Also, when filling skb head or frag fails, both need to unmap
all the way back to next_to_use_head, so remove one desc filling
error handling.

Fixes: 76ad4f0ee747 ("net: hns3: Add support of HNS3 Ethernet Driver for hip08 SoC")
Signed-off-by: Yunsheng Lin <linyunsheng@huawei.com>
Signed-off-by: Huazhong Tan <tanhuazhong@huawei.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
drivers/net/ethernet/hisilicon/hns3/hns3_enet.c

index 7224b382273351198f6bf0b5477a7ef8ff898457..21eac68ed91cf27d020552d929043667895c5c44 100644 (file)
@@ -1224,6 +1224,9 @@ static void hns3_clear_desc(struct hns3_enet_ring *ring, int next_to_use_orig)
                if (ring->next_to_use == next_to_use_orig)
                        break;
 
+               /* rollback one */
+               ring_ptr_move_bw(ring, next_to_use);
+
                /* unmap the descriptor dma address */
                if (ring->desc_cb[ring->next_to_use].type == DESC_TYPE_SKB)
                        dma_unmap_single(dev,
@@ -1237,9 +1240,7 @@ static void hns3_clear_desc(struct hns3_enet_ring *ring, int next_to_use_orig)
                                       DMA_TO_DEVICE);
 
                ring->desc_cb[ring->next_to_use].length = 0;
-
-               /* rollback one */
-               ring_ptr_move_bw(ring, next_to_use);
+               ring->desc_cb[ring->next_to_use].dma = 0;
        }
 }
 
@@ -1252,7 +1253,6 @@ netdev_tx_t hns3_nic_net_xmit(struct sk_buff *skb, struct net_device *netdev)
        struct netdev_queue *dev_queue;
        struct skb_frag_struct *frag;
        int next_to_use_head;
-       int next_to_use_frag;
        int buf_num;
        int seg_num;
        int size;
@@ -1291,9 +1291,8 @@ netdev_tx_t hns3_nic_net_xmit(struct sk_buff *skb, struct net_device *netdev)
        ret = hns3_fill_desc(ring, skb, size, seg_num == 1 ? 1 : 0,
                             DESC_TYPE_SKB);
        if (unlikely(ret))
-               goto head_fill_err;
+               goto fill_err;
 
-       next_to_use_frag = ring->next_to_use;
        /* Fill the fragments */
        for (i = 1; i < seg_num; i++) {
                frag = &skb_shinfo(skb)->frags[i - 1];
@@ -1304,7 +1303,7 @@ netdev_tx_t hns3_nic_net_xmit(struct sk_buff *skb, struct net_device *netdev)
                                     DESC_TYPE_PAGE);
 
                if (unlikely(ret))
-                       goto frag_fill_err;
+                       goto fill_err;
        }
 
        /* Complete translate all packets */
@@ -1317,10 +1316,7 @@ netdev_tx_t hns3_nic_net_xmit(struct sk_buff *skb, struct net_device *netdev)
 
        return NETDEV_TX_OK;
 
-frag_fill_err:
-       hns3_clear_desc(ring, next_to_use_frag);
-
-head_fill_err:
+fill_err:
        hns3_clear_desc(ring, next_to_use_head);
 
 out_err_tx_ok: