struct dsa_skb_cb {
struct sk_buff *clone;
+ bool deferred_xmit;
};
struct __dsa_skb_cb {
struct net_device *bridge_dev;
struct devlink_port devlink_port;
struct phylink *pl;
+
+ struct work_struct xmit_work;
+ struct sk_buff_head xmit_queue;
+
/*
* Original copy of the master netdev ethtool_ops
*/
struct sk_buff *clone, unsigned int type);
bool (*port_rxtstamp)(struct dsa_switch *ds, int port,
struct sk_buff *skb, unsigned int type);
+
+ /*
+ * Deferred frame Tx
+ */
+ netdev_tx_t (*port_deferred_xmit)(struct dsa_switch *ds, int port,
+ struct sk_buff *skb);
};
struct dsa_switch_driver {
#define BRCM_TAG_GET_QUEUE(v) ((v) & 0xff)
+netdev_tx_t dsa_enqueue_skb(struct sk_buff *skb, struct net_device *dev);
int dsa_port_get_phy_strings(struct dsa_port *dp, uint8_t *data);
int dsa_port_get_ethtool_phy_stats(struct dsa_port *dp, uint64_t *data);
int dsa_port_get_phy_sset_count(struct dsa_port *dp);
struct net_device *master = dsa_slave_to_master(dev);
struct dsa_port *dp = dsa_slave_to_port(dev);
+ cancel_work_sync(&dp->xmit_work);
+ skb_queue_purge(&dp->xmit_queue);
+
phylink_stop(dp->pl);
dsa_port_disable(dp);
kfree_skb(clone);
}
+netdev_tx_t dsa_enqueue_skb(struct sk_buff *skb, struct net_device *dev)
+{
+ /* SKB for netpoll still need to be mangled with the protocol-specific
+ * tag to be successfully transmitted
+ */
+ if (unlikely(netpoll_tx_running(dev)))
+ return dsa_slave_netpoll_send_skb(dev, skb);
+
+ /* Queue the SKB for transmission on the parent interface, but
+ * do not modify its EtherType
+ */
+ skb->dev = dsa_slave_to_master(dev);
+ dev_queue_xmit(skb);
+
+ return NETDEV_TX_OK;
+}
+EXPORT_SYMBOL_GPL(dsa_enqueue_skb);
+
static netdev_tx_t dsa_slave_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct dsa_slave_priv *p = netdev_priv(dev);
*/
nskb = p->xmit(skb, dev);
if (!nskb) {
- kfree_skb(skb);
+ if (!DSA_SKB_CB(skb)->deferred_xmit)
+ kfree_skb(skb);
return NETDEV_TX_OK;
}
- /* SKB for netpoll still need to be mangled with the protocol-specific
- * tag to be successfully transmitted
- */
- if (unlikely(netpoll_tx_running(dev)))
- return dsa_slave_netpoll_send_skb(dev, nskb);
+ return dsa_enqueue_skb(nskb, dev);
+}
- /* Queue the SKB for transmission on the parent interface, but
- * do not modify its EtherType
- */
- nskb->dev = dsa_slave_to_master(dev);
- dev_queue_xmit(nskb);
+void *dsa_defer_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ struct dsa_port *dp = dsa_slave_to_port(dev);
- return NETDEV_TX_OK;
+ DSA_SKB_CB(skb)->deferred_xmit = true;
+
+ skb_queue_tail(&dp->xmit_queue, skb);
+ schedule_work(&dp->xmit_work);
+ return NULL;
+}
+EXPORT_SYMBOL_GPL(dsa_defer_xmit);
+
+static void dsa_port_xmit_work(struct work_struct *work)
+{
+ struct dsa_port *dp = container_of(work, struct dsa_port, xmit_work);
+ struct dsa_switch *ds = dp->ds;
+ struct sk_buff *skb;
+
+ if (unlikely(!ds->ops->port_deferred_xmit))
+ return;
+
+ while ((skb = skb_dequeue(&dp->xmit_queue)) != NULL)
+ ds->ops->port_deferred_xmit(ds, dp->index, skb);
}
/* ethtool operations *******************************************************/
if (!netif_running(slave_dev))
return 0;
+ cancel_work_sync(&dp->xmit_work);
+ skb_queue_purge(&dp->xmit_queue);
+
netif_device_detach(slave_dev);
rtnl_lock();
}
p->dp = port;
INIT_LIST_HEAD(&p->mall_tc_list);
+ INIT_WORK(&port->xmit_work, dsa_port_xmit_work);
+ skb_queue_head_init(&port->xmit_queue);
p->xmit = cpu_dp->tag_ops->xmit;
port->slave = slave_dev;