1 From: Felix Fietkau <nbd@nbd.name>
2 Date: Mon, 20 Mar 2023 11:44:30 +0100
3 Subject: [PATCH] net: ethernet: mtk_eth_soc: add code for offloading flows
6 WED version 2 (on MT7986 and later) can offload flows originating from wireless
7 devices. In order to make that work, ndo_setup_tc needs to be implemented on
8 the netdevs. This adds the required code to offload flows coming in from WED,
9 while keeping track of the incoming wed index used for selecting the correct
12 Signed-off-by: Felix Fietkau <nbd@nbd.name>
14 drivers/net/ethernet/mediatek/mtk_eth_soc.h | 3 +
15 .../net/ethernet/mediatek/mtk_ppe_offload.c | 37 ++++---
16 drivers/net/ethernet/mediatek/mtk_wed.c | 101 ++++++++++++++++++
17 include/linux/soc/mediatek/mtk_wed.h | 6 ++
18 4 files changed, 133 insertions(+), 14 deletions(-)
20 --- a/drivers/net/ethernet/mediatek/mtk_eth_soc.h
21 +++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.h
22 @@ -1432,6 +1432,9 @@ int mtk_gmac_rgmii_path_setup(struct mtk
23 int mtk_eth_offload_init(struct mtk_eth *eth);
24 int mtk_eth_setup_tc(struct net_device *dev, enum tc_setup_type type,
26 +int mtk_flow_offload_cmd(struct mtk_eth *eth, struct flow_cls_offload *cls,
28 +void mtk_flow_offload_cleanup(struct mtk_eth *eth, struct list_head *list);
29 void mtk_eth_set_dma_device(struct mtk_eth *eth, struct device *dma_dev);
32 --- a/drivers/net/ethernet/mediatek/mtk_ppe_offload.c
33 +++ b/drivers/net/ethernet/mediatek/mtk_ppe_offload.c
34 @@ -237,7 +237,8 @@ out:
38 -mtk_flow_offload_replace(struct mtk_eth *eth, struct flow_cls_offload *f)
39 +mtk_flow_offload_replace(struct mtk_eth *eth, struct flow_cls_offload *f,
42 struct flow_rule *rule = flow_cls_offload_flow_rule(f);
43 struct flow_action_entry *act;
44 @@ -454,6 +455,7 @@ mtk_flow_offload_replace(struct mtk_eth
45 entry->cookie = f->cookie;
46 memcpy(&entry->data, &foe, sizeof(entry->data));
47 entry->wed_index = wed_index;
48 + entry->ppe_index = ppe_index;
50 err = mtk_foe_entry_commit(eth->ppe[entry->ppe_index], entry);
52 @@ -522,25 +524,15 @@ mtk_flow_offload_stats(struct mtk_eth *e
54 static DEFINE_MUTEX(mtk_flow_offload_mutex);
57 -mtk_eth_setup_tc_block_cb(enum tc_setup_type type, void *type_data, void *cb_priv)
58 +int mtk_flow_offload_cmd(struct mtk_eth *eth, struct flow_cls_offload *cls,
61 - struct flow_cls_offload *cls = type_data;
62 - struct net_device *dev = cb_priv;
63 - struct mtk_mac *mac = netdev_priv(dev);
64 - struct mtk_eth *eth = mac->hw;
67 - if (!tc_can_offload(dev))
70 - if (type != TC_SETUP_CLSFLOWER)
73 mutex_lock(&mtk_flow_offload_mutex);
74 switch (cls->command) {
75 case FLOW_CLS_REPLACE:
76 - err = mtk_flow_offload_replace(eth, cls);
77 + err = mtk_flow_offload_replace(eth, cls, ppe_index);
79 case FLOW_CLS_DESTROY:
80 err = mtk_flow_offload_destroy(eth, cls);
81 @@ -558,6 +550,23 @@ mtk_eth_setup_tc_block_cb(enum tc_setup_
85 +mtk_eth_setup_tc_block_cb(enum tc_setup_type type, void *type_data, void *cb_priv)
87 + struct flow_cls_offload *cls = type_data;
88 + struct net_device *dev = cb_priv;
89 + struct mtk_mac *mac = netdev_priv(dev);
90 + struct mtk_eth *eth = mac->hw;
92 + if (!tc_can_offload(dev))
95 + if (type != TC_SETUP_CLSFLOWER)
98 + return mtk_flow_offload_cmd(eth, cls, 0);
102 mtk_eth_setup_tc_block(struct net_device *dev, struct flow_block_offload *f)
104 struct mtk_mac *mac = netdev_priv(dev);
105 --- a/drivers/net/ethernet/mediatek/mtk_wed.c
106 +++ b/drivers/net/ethernet/mediatek/mtk_wed.c
108 #include <linux/mfd/syscon.h>
109 #include <linux/debugfs.h>
110 #include <linux/soc/mediatek/mtk_wed.h>
111 +#include <net/flow_offload.h>
112 +#include <net/pkt_cls.h>
113 #include "mtk_eth_soc.h"
114 #include "mtk_wed_regs.h"
117 static struct mtk_wed_hw *hw_list[2];
118 static DEFINE_MUTEX(hw_lock);
120 +struct mtk_wed_flow_block_priv {
121 + struct mtk_wed_hw *hw;
122 + struct net_device *dev;
126 wed_m32(struct mtk_wed_device *dev, u32 reg, u32 mask, u32 val)
128 @@ -1753,6 +1760,99 @@ out:
129 mutex_unlock(&hw_lock);
133 +mtk_wed_setup_tc_block_cb(enum tc_setup_type type, void *type_data, void *cb_priv)
135 + struct mtk_wed_flow_block_priv *priv = cb_priv;
136 + struct flow_cls_offload *cls = type_data;
137 + struct mtk_wed_hw *hw = priv->hw;
139 + if (!tc_can_offload(priv->dev))
140 + return -EOPNOTSUPP;
142 + if (type != TC_SETUP_CLSFLOWER)
143 + return -EOPNOTSUPP;
145 + return mtk_flow_offload_cmd(hw->eth, cls, hw->index);
149 +mtk_wed_setup_tc_block(struct mtk_wed_hw *hw, struct net_device *dev,
150 + struct flow_block_offload *f)
152 + struct mtk_wed_flow_block_priv *priv;
153 + static LIST_HEAD(block_cb_list);
154 + struct flow_block_cb *block_cb;
155 + struct mtk_eth *eth = hw->eth;
156 + flow_setup_cb_t *cb;
158 + if (!eth->soc->offload_version)
159 + return -EOPNOTSUPP;
161 + if (f->binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS)
162 + return -EOPNOTSUPP;
164 + cb = mtk_wed_setup_tc_block_cb;
165 + f->driver_block_list = &block_cb_list;
167 + switch (f->command) {
168 + case FLOW_BLOCK_BIND:
169 + block_cb = flow_block_cb_lookup(f->block, cb, dev);
171 + flow_block_cb_incref(block_cb);
175 + priv = kzalloc(sizeof(*priv), GFP_KERNEL);
181 + block_cb = flow_block_cb_alloc(cb, dev, priv, NULL);
182 + if (IS_ERR(block_cb)) {
184 + return PTR_ERR(block_cb);
187 + flow_block_cb_incref(block_cb);
188 + flow_block_cb_add(block_cb, f);
189 + list_add_tail(&block_cb->driver_list, &block_cb_list);
191 + case FLOW_BLOCK_UNBIND:
192 + block_cb = flow_block_cb_lookup(f->block, cb, dev);
196 + if (!flow_block_cb_decref(block_cb)) {
197 + flow_block_cb_remove(block_cb, f);
198 + list_del(&block_cb->driver_list);
199 + kfree(block_cb->cb_priv);
203 + return -EOPNOTSUPP;
208 +mtk_wed_setup_tc(struct mtk_wed_device *wed, struct net_device *dev,
209 + enum tc_setup_type type, void *type_data)
211 + struct mtk_wed_hw *hw = wed->hw;
213 + if (hw->version < 2)
214 + return -EOPNOTSUPP;
217 + case TC_SETUP_BLOCK:
219 + return mtk_wed_setup_tc_block(hw, dev, type_data);
221 + return -EOPNOTSUPP;
225 void mtk_wed_add_hw(struct device_node *np, struct mtk_eth *eth,
226 void __iomem *wdma, phys_addr_t wdma_phy,
228 @@ -1772,6 +1872,7 @@ void mtk_wed_add_hw(struct device_node *
229 .irq_set_mask = mtk_wed_irq_set_mask,
230 .detach = mtk_wed_detach,
231 .ppe_check = mtk_wed_ppe_check,
232 + .setup_tc = mtk_wed_setup_tc,
234 struct device_node *eth_np = eth->dev->of_node;
235 struct platform_device *pdev;
236 --- a/include/linux/soc/mediatek/mtk_wed.h
237 +++ b/include/linux/soc/mediatek/mtk_wed.h
239 #include <linux/regmap.h>
240 #include <linux/pci.h>
241 #include <linux/skbuff.h>
242 +#include <linux/netdevice.h>
244 #define MTK_WED_TX_QUEUES 2
245 #define MTK_WED_RX_QUEUES 2
246 @@ -180,6 +181,8 @@ struct mtk_wed_ops {
248 u32 (*irq_get)(struct mtk_wed_device *dev, u32 mask);
249 void (*irq_set_mask)(struct mtk_wed_device *dev, u32 mask);
250 + int (*setup_tc)(struct mtk_wed_device *wed, struct net_device *dev,
251 + enum tc_setup_type type, void *type_data);
254 extern const struct mtk_wed_ops __rcu *mtk_soc_wed_ops;
255 @@ -238,6 +241,8 @@ mtk_wed_get_rx_capa(struct mtk_wed_devic
256 (_dev)->ops->msg_update(_dev, _id, _msg, _len)
257 #define mtk_wed_device_stop(_dev) (_dev)->ops->stop(_dev)
258 #define mtk_wed_device_dma_reset(_dev) (_dev)->ops->reset_dma(_dev)
259 +#define mtk_wed_device_setup_tc(_dev, _netdev, _type, _type_data) \
260 + (_dev)->ops->setup_tc(_dev, _netdev, _type, _type_data)
262 static inline bool mtk_wed_device_active(struct mtk_wed_device *dev)
264 @@ -256,6 +261,7 @@ static inline bool mtk_wed_device_active
265 #define mtk_wed_device_update_msg(_dev, _id, _msg, _len) -ENODEV
266 #define mtk_wed_device_stop(_dev) do {} while (0)
267 #define mtk_wed_device_dma_reset(_dev) do {} while (0)
268 +#define mtk_wed_device_setup_tc(_dev, _netdev, _type, _type_data) -EOPNOTSUPP