053412e7490a7c4964fa380e11b3a3b67f27004d
[openwrt/staging/ldir.git] /
1 From: Lorenzo Bianconi <lorenzo@kernel.org>
2 Date: Fri, 20 May 2022 20:11:30 +0200
3 Subject: [PATCH] net: ethernet: mtk_eth_soc: rely on txd_size in
4 mtk_desc_to_tx_buf
5
6 This is a preliminary patch to add mt7986 ethernet support.
7
8 Tested-by: Sam Shih <sam.shih@mediatek.com>
9 Signed-off-by: Lorenzo Bianconi <lorenzo@kernel.org>
10 Signed-off-by: David S. Miller <davem@davemloft.net>
11 ---
12
13 --- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
14 +++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
15 @@ -891,10 +891,11 @@ static inline void *mtk_qdma_phys_to_vir
16 return ret + (desc - ring->phys);
17 }
18
19 -static inline struct mtk_tx_buf *mtk_desc_to_tx_buf(struct mtk_tx_ring *ring,
20 - struct mtk_tx_dma *txd)
21 +static struct mtk_tx_buf *mtk_desc_to_tx_buf(struct mtk_tx_ring *ring,
22 + struct mtk_tx_dma *txd,
23 + u32 txd_size)
24 {
25 - int idx = txd - ring->dma;
26 + int idx = ((void *)txd - (void *)ring->dma) / txd_size;
27
28 return &ring->buf[idx];
29 }
30 @@ -1016,6 +1017,7 @@ static int mtk_tx_map(struct sk_buff *sk
31 };
32 struct mtk_mac *mac = netdev_priv(dev);
33 struct mtk_eth *eth = mac->hw;
34 + const struct mtk_soc_data *soc = eth->soc;
35 struct mtk_tx_dma *itxd, *txd;
36 struct mtk_tx_dma *itxd_pdma, *txd_pdma;
37 struct mtk_tx_buf *itx_buf, *tx_buf;
38 @@ -1027,7 +1029,7 @@ static int mtk_tx_map(struct sk_buff *sk
39 if (itxd == ring->last_free)
40 return -ENOMEM;
41
42 - itx_buf = mtk_desc_to_tx_buf(ring, itxd);
43 + itx_buf = mtk_desc_to_tx_buf(ring, itxd, soc->txrx.txd_size);
44 memset(itx_buf, 0, sizeof(*itx_buf));
45
46 txd_info.addr = dma_map_single(eth->dma_dev, skb->data, txd_info.size,
47 @@ -1055,7 +1057,7 @@ static int mtk_tx_map(struct sk_buff *sk
48 while (frag_size) {
49 bool new_desc = true;
50
51 - if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA) ||
52 + if (MTK_HAS_CAPS(soc->caps, MTK_QDMA) ||
53 (i & 0x1)) {
54 txd = mtk_qdma_phys_to_virt(ring, txd->txd2);
55 txd_pdma = qdma_to_pdma(ring, txd);
56 @@ -1079,7 +1081,8 @@ static int mtk_tx_map(struct sk_buff *sk
57
58 mtk_tx_set_dma_desc(dev, txd, &txd_info);
59
60 - tx_buf = mtk_desc_to_tx_buf(ring, txd);
61 + tx_buf = mtk_desc_to_tx_buf(ring, txd,
62 + soc->txrx.txd_size);
63 if (new_desc)
64 memset(tx_buf, 0, sizeof(*tx_buf));
65 tx_buf->skb = (struct sk_buff *)MTK_DMA_DUMMY_DESC;
66 @@ -1098,7 +1101,7 @@ static int mtk_tx_map(struct sk_buff *sk
67 /* store skb to cleanup */
68 itx_buf->skb = skb;
69
70 - if (!MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
71 + if (!MTK_HAS_CAPS(soc->caps, MTK_QDMA)) {
72 if (k & 0x1)
73 txd_pdma->txd2 |= TX_DMA_LS0;
74 else
75 @@ -1116,7 +1119,7 @@ static int mtk_tx_map(struct sk_buff *sk
76 */
77 wmb();
78
79 - if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
80 + if (MTK_HAS_CAPS(soc->caps, MTK_QDMA)) {
81 if (netif_xmit_stopped(netdev_get_tx_queue(dev, 0)) ||
82 !netdev_xmit_more())
83 mtk_w32(eth, txd->txd2, MTK_QTX_CTX_PTR);
84 @@ -1130,13 +1133,13 @@ static int mtk_tx_map(struct sk_buff *sk
85
86 err_dma:
87 do {
88 - tx_buf = mtk_desc_to_tx_buf(ring, itxd);
89 + tx_buf = mtk_desc_to_tx_buf(ring, itxd, soc->txrx.txd_size);
90
91 /* unmap dma */
92 mtk_tx_unmap(eth, tx_buf, false);
93
94 itxd->txd3 = TX_DMA_LS0 | TX_DMA_OWNER_CPU;
95 - if (!MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA))
96 + if (!MTK_HAS_CAPS(soc->caps, MTK_QDMA))
97 itxd_pdma->txd2 = TX_DMA_DESP2_DEF;
98
99 itxd = mtk_qdma_phys_to_virt(ring, itxd->txd2);
100 @@ -1450,7 +1453,8 @@ static int mtk_poll_tx_qdma(struct mtk_e
101 if ((desc->txd3 & TX_DMA_OWNER_CPU) == 0)
102 break;
103
104 - tx_buf = mtk_desc_to_tx_buf(ring, desc);
105 + tx_buf = mtk_desc_to_tx_buf(ring, desc,
106 + eth->soc->txrx.txd_size);
107 if (tx_buf->flags & MTK_TX_FLAGS_FPORT1)
108 mac = 1;
109