f06fcce618b9e5291f1e796beabce94339b09757
[openwrt/staging/neocturne.git] /
1 From a41d535855976838d246c079143c948dcf0f7931 Mon Sep 17 00:00:00 2001
2 From: Lorenzo Bianconi <lorenzo@kernel.org>
3 Date: Tue, 25 Jul 2023 01:52:59 +0100
4 Subject: [PATCH 102/250] net: ethernet: mtk_eth_soc: add NETSYS_V3 version
5 support
6
7 Introduce NETSYS_V3 chipset version support.
8 This is a preliminary patch to introduce support for MT7988 SoC.
9
10 Signed-off-by: Lorenzo Bianconi <lorenzo@kernel.org>
11 Signed-off-by: Daniel Golle <daniel@makrotopia.org>
12 Link: https://lore.kernel.org/r/0db2260910755d76fa48e303b9f9bdf4e5a82340.1690246066.git.daniel@makrotopia.org
13 Signed-off-by: Jakub Kicinski <kuba@kernel.org>
14 ---
15 drivers/net/ethernet/mediatek/mtk_eth_soc.c | 105 ++++++++++++++------
16 drivers/net/ethernet/mediatek/mtk_eth_soc.h | 48 +++++++--
17 2 files changed, 116 insertions(+), 37 deletions(-)
18
19 --- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
20 +++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
21 @@ -862,17 +862,32 @@ void mtk_stats_update_mac(struct mtk_mac
22 mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x20 + offs);
23 hw_stats->rx_flow_control_packets +=
24 mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x24 + offs);
25 - hw_stats->tx_skip +=
26 - mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x28 + offs);
27 - hw_stats->tx_collisions +=
28 - mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x2c + offs);
29 - hw_stats->tx_bytes +=
30 - mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x30 + offs);
31 - stats = mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x34 + offs);
32 - if (stats)
33 - hw_stats->tx_bytes += (stats << 32);
34 - hw_stats->tx_packets +=
35 - mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x38 + offs);
36 +
37 + if (mtk_is_netsys_v3_or_greater(eth)) {
38 + hw_stats->tx_skip +=
39 + mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x50 + offs);
40 + hw_stats->tx_collisions +=
41 + mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x54 + offs);
42 + hw_stats->tx_bytes +=
43 + mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x40 + offs);
44 + stats = mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x44 + offs);
45 + if (stats)
46 + hw_stats->tx_bytes += (stats << 32);
47 + hw_stats->tx_packets +=
48 + mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x48 + offs);
49 + } else {
50 + hw_stats->tx_skip +=
51 + mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x28 + offs);
52 + hw_stats->tx_collisions +=
53 + mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x2c + offs);
54 + hw_stats->tx_bytes +=
55 + mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x30 + offs);
56 + stats = mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x34 + offs);
57 + if (stats)
58 + hw_stats->tx_bytes += (stats << 32);
59 + hw_stats->tx_packets +=
60 + mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x38 + offs);
61 + }
62 }
63
64 u64_stats_update_end(&hw_stats->syncp);
65 @@ -1176,7 +1191,10 @@ static void mtk_tx_set_dma_desc_v2(struc
66 data |= TX_DMA_LS0;
67 WRITE_ONCE(desc->txd3, data);
68
69 - data = (mac->id + 1) << TX_DMA_FPORT_SHIFT_V2; /* forward port */
70 + if (mac->id == MTK_GMAC3_ID)
71 + data = PSE_GDM3_PORT;
72 + else
73 + data = (mac->id + 1) << TX_DMA_FPORT_SHIFT_V2; /* forward port */
74 data |= TX_DMA_SWC_V2 | QID_BITS_V2(info->qid);
75 WRITE_ONCE(desc->txd4, data);
76
77 @@ -1187,6 +1205,8 @@ static void mtk_tx_set_dma_desc_v2(struc
78 /* tx checksum offload */
79 if (info->csum)
80 data |= TX_DMA_CHKSUM_V2;
81 + if (mtk_is_netsys_v3_or_greater(eth) && netdev_uses_dsa(dev))
82 + data |= TX_DMA_SPTAG_V3;
83 }
84 WRITE_ONCE(desc->txd5, data);
85
86 @@ -1252,8 +1272,7 @@ static int mtk_tx_map(struct sk_buff *sk
87 mtk_tx_set_dma_desc(dev, itxd, &txd_info);
88
89 itx_buf->flags |= MTK_TX_FLAGS_SINGLE0;
90 - itx_buf->flags |= (!mac->id) ? MTK_TX_FLAGS_FPORT0 :
91 - MTK_TX_FLAGS_FPORT1;
92 + itx_buf->mac_id = mac->id;
93 setup_tx_buf(eth, itx_buf, itxd_pdma, txd_info.addr, txd_info.size,
94 k++);
95
96 @@ -1301,8 +1320,7 @@ static int mtk_tx_map(struct sk_buff *sk
97 memset(tx_buf, 0, sizeof(*tx_buf));
98 tx_buf->data = (void *)MTK_DMA_DUMMY_DESC;
99 tx_buf->flags |= MTK_TX_FLAGS_PAGE0;
100 - tx_buf->flags |= (!mac->id) ? MTK_TX_FLAGS_FPORT0 :
101 - MTK_TX_FLAGS_FPORT1;
102 + tx_buf->mac_id = mac->id;
103
104 setup_tx_buf(eth, tx_buf, txd_pdma, txd_info.addr,
105 txd_info.size, k++);
106 @@ -1604,7 +1622,7 @@ static int mtk_xdp_frame_map(struct mtk_
107 }
108 mtk_tx_set_dma_desc(dev, txd, txd_info);
109
110 - tx_buf->flags |= !mac->id ? MTK_TX_FLAGS_FPORT0 : MTK_TX_FLAGS_FPORT1;
111 + tx_buf->mac_id = mac->id;
112 tx_buf->type = dma_map ? MTK_TYPE_XDP_NDO : MTK_TYPE_XDP_TX;
113 tx_buf->data = (void *)MTK_DMA_DUMMY_DESC;
114
115 @@ -1854,11 +1872,24 @@ static int mtk_poll_rx(struct napi_struc
116 break;
117
118 /* find out which mac the packet come from. values start at 1 */
119 - if (mtk_is_netsys_v2_or_greater(eth))
120 - mac = RX_DMA_GET_SPORT_V2(trxd.rxd5) - 1;
121 - else if (!MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628) &&
122 - !(trxd.rxd4 & RX_DMA_SPECIAL_TAG))
123 + if (mtk_is_netsys_v2_or_greater(eth)) {
124 + u32 val = RX_DMA_GET_SPORT_V2(trxd.rxd5);
125 +
126 + switch (val) {
127 + case PSE_GDM1_PORT:
128 + case PSE_GDM2_PORT:
129 + mac = val - 1;
130 + break;
131 + case PSE_GDM3_PORT:
132 + mac = MTK_GMAC3_ID;
133 + break;
134 + default:
135 + break;
136 + }
137 + } else if (!MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628) &&
138 + !(trxd.rxd4 & RX_DMA_SPECIAL_TAG)) {
139 mac = RX_DMA_GET_SPORT(trxd.rxd4) - 1;
140 + }
141
142 if (unlikely(mac < 0 || mac >= MTK_MAX_DEVS ||
143 !eth->netdev[mac]))
144 @@ -2080,7 +2111,6 @@ static int mtk_poll_tx_qdma(struct mtk_e
145
146 while ((cpu != dma) && budget) {
147 u32 next_cpu = desc->txd2;
148 - int mac = 0;
149
150 desc = mtk_qdma_phys_to_virt(ring, desc->txd2);
151 if ((desc->txd3 & TX_DMA_OWNER_CPU) == 0)
152 @@ -2088,15 +2118,13 @@ static int mtk_poll_tx_qdma(struct mtk_e
153
154 tx_buf = mtk_desc_to_tx_buf(ring, desc,
155 eth->soc->txrx.txd_size);
156 - if (tx_buf->flags & MTK_TX_FLAGS_FPORT1)
157 - mac = 1;
158 -
159 if (!tx_buf->data)
160 break;
161
162 if (tx_buf->data != (void *)MTK_DMA_DUMMY_DESC) {
163 if (tx_buf->type == MTK_TYPE_SKB)
164 - mtk_poll_tx_done(eth, state, mac, tx_buf->data);
165 + mtk_poll_tx_done(eth, state, tx_buf->mac_id,
166 + tx_buf->data);
167
168 budget--;
169 }
170 @@ -3702,7 +3730,24 @@ static int mtk_hw_init(struct mtk_eth *e
171 mtk_w32(eth, eth->soc->txrx.rx_irq_done_mask, reg_map->qdma.int_grp + 4);
172 mtk_w32(eth, 0x21021000, MTK_FE_INT_GRP);
173
174 - if (mtk_is_netsys_v2_or_greater(eth)) {
175 + if (mtk_is_netsys_v3_or_greater(eth)) {
176 + /* PSE should not drop port1, port8 and port9 packets */
177 + mtk_w32(eth, 0x00000302, PSE_DROP_CFG);
178 +
179 + /* GDM and CDM Threshold */
180 + mtk_w32(eth, 0x00000707, MTK_CDMW0_THRES);
181 + mtk_w32(eth, 0x00000077, MTK_CDMW1_THRES);
182 +
183 + /* Disable GDM1 RX CRC stripping */
184 + mtk_m32(eth, MTK_GDMA_STRP_CRC, 0, MTK_GDMA_FWD_CFG(0));
185 +
186 + /* PSE GDM3 MIB counter has incorrect hw default values,
187 + * so the driver ought to read clear the values beforehand
188 + * in case ethtool retrieve wrong mib values.
189 + */
190 + for (i = 0; i < 0x80; i += 0x4)
191 + mtk_r32(eth, reg_map->gdm1_cnt + 0x100 + i);
192 + } else if (!mtk_is_netsys_v1(eth)) {
193 /* PSE should not drop port8 and port9 packets from WDMA Tx */
194 mtk_w32(eth, 0x00000300, PSE_DROP_CFG);
195
196 @@ -4264,7 +4309,11 @@ static int mtk_add_mac(struct mtk_eth *e
197 }
198 spin_lock_init(&mac->hw_stats->stats_lock);
199 u64_stats_init(&mac->hw_stats->syncp);
200 - mac->hw_stats->reg_offset = id * MTK_STAT_OFFSET;
201 +
202 + if (mtk_is_netsys_v3_or_greater(eth))
203 + mac->hw_stats->reg_offset = id * 0x80;
204 + else
205 + mac->hw_stats->reg_offset = id * 0x40;
206
207 /* phylink create */
208 err = of_get_phy_mode(np, &phy_mode);
209 --- a/drivers/net/ethernet/mediatek/mtk_eth_soc.h
210 +++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.h
211 @@ -122,6 +122,7 @@
212 #define MTK_GDMA_ICS_EN BIT(22)
213 #define MTK_GDMA_TCS_EN BIT(21)
214 #define MTK_GDMA_UCS_EN BIT(20)
215 +#define MTK_GDMA_STRP_CRC BIT(16)
216 #define MTK_GDMA_TO_PDMA 0x0
217 #define MTK_GDMA_DROP_ALL 0x7777
218
219 @@ -287,8 +288,6 @@
220 /* QDMA Interrupt grouping registers */
221 #define MTK_RLS_DONE_INT BIT(0)
222
223 -#define MTK_STAT_OFFSET 0x40
224 -
225 /* QDMA TX NUM */
226 #define QID_BITS_V2(x) (((x) & 0x3f) << 16)
227 #define MTK_QDMA_GMAC2_QID 8
228 @@ -301,6 +300,8 @@
229 #define TX_DMA_CHKSUM_V2 (0x7 << 28)
230 #define TX_DMA_TSO_V2 BIT(31)
231
232 +#define TX_DMA_SPTAG_V3 BIT(27)
233 +
234 /* QDMA V2 descriptor txd4 */
235 #define TX_DMA_FPORT_SHIFT_V2 8
236 #define TX_DMA_FPORT_MASK_V2 0xf
237 @@ -634,12 +635,6 @@ enum mtk_tx_flags {
238 */
239 MTK_TX_FLAGS_SINGLE0 = 0x01,
240 MTK_TX_FLAGS_PAGE0 = 0x02,
241 -
242 - /* MTK_TX_FLAGS_FPORTx allows tracking which port the transmitted
243 - * SKB out instead of looking up through hardware TX descriptor.
244 - */
245 - MTK_TX_FLAGS_FPORT0 = 0x04,
246 - MTK_TX_FLAGS_FPORT1 = 0x08,
247 };
248
249 /* This enum allows us to identify how the clock is defined on the array of the
250 @@ -725,6 +720,35 @@ enum mtk_dev_state {
251 MTK_RESETTING
252 };
253
254 +/* PSE Port Definition */
255 +enum mtk_pse_port {
256 + PSE_ADMA_PORT = 0,
257 + PSE_GDM1_PORT,
258 + PSE_GDM2_PORT,
259 + PSE_PPE0_PORT,
260 + PSE_PPE1_PORT,
261 + PSE_QDMA_TX_PORT,
262 + PSE_QDMA_RX_PORT,
263 + PSE_DROP_PORT,
264 + PSE_WDMA0_PORT,
265 + PSE_WDMA1_PORT,
266 + PSE_TDMA_PORT,
267 + PSE_NONE_PORT,
268 + PSE_PPE2_PORT,
269 + PSE_WDMA2_PORT,
270 + PSE_EIP197_PORT,
271 + PSE_GDM3_PORT,
272 + PSE_PORT_MAX
273 +};
274 +
275 +/* GMAC Identifier */
276 +enum mtk_gmac_id {
277 + MTK_GMAC1_ID = 0,
278 + MTK_GMAC2_ID,
279 + MTK_GMAC3_ID,
280 + MTK_GMAC_ID_MAX
281 +};
282 +
283 enum mtk_tx_buf_type {
284 MTK_TYPE_SKB,
285 MTK_TYPE_XDP_TX,
286 @@ -743,7 +767,8 @@ struct mtk_tx_buf {
287 enum mtk_tx_buf_type type;
288 void *data;
289
290 - u32 flags;
291 + u16 mac_id;
292 + u16 flags;
293 DEFINE_DMA_UNMAP_ADDR(dma_addr0);
294 DEFINE_DMA_UNMAP_LEN(dma_len0);
295 DEFINE_DMA_UNMAP_ADDR(dma_addr1);
296 @@ -1192,6 +1217,11 @@ static inline bool mtk_is_netsys_v2_or_g
297 return eth->soc->version > 1;
298 }
299
300 +static inline bool mtk_is_netsys_v3_or_greater(struct mtk_eth *eth)
301 +{
302 + return eth->soc->version > 2;
303 +}
304 +
305 static inline struct mtk_foe_entry *
306 mtk_foe_get_entry(struct mtk_ppe *ppe, u16 hash)
307 {