struct dlci_local *dlp = netdev_priv(dev);
if (skb)
- dlp->slave->netdev_ops->ndo_start_xmit(skb, dlp->slave);
+ netdev_start_xmit(skb, dlp->slave);
return NETDEV_TX_OK;
}
/* Only send if data is available. */
if (ncm->skb_tx_data) {
ncm->timer_force_tx = true;
- ncm->netdev->netdev_ops->ndo_start_xmit(NULL, ncm->netdev);
+ netdev_start_xmit(NULL, ncm->netdev);
ncm->timer_force_tx = false;
}
}
* (can also return NETDEV_TX_LOCKED iff NETIF_F_LLTX)
* Required can not be NULL.
*
+ * void (*ndo_xmit_flush)(struct net_device *dev, u16 queue);
+ * A driver implements this function when it wishes to support
+ * deferred TX queue flushing. The idea is that the expensive
+ * operation to trigger TX queue processing can be done after
+ * N calls to ndo_start_xmit rather than being done every single
+ * time. In this regime ndo_start_xmit will be called one or more
+ * times, and then a final ndo_xmit_flush call will be made to
+ * have the driver tell the device about the new pending TX queue
+ * entries. The kernel keeps track of which queues need flushing
+ * by monitoring skb->queue_mapping of the packets it submits to
+ * ndo_start_xmit. This is the queue value that will be passed
+ * to ndo_xmit_flush.
+ *
* u16 (*ndo_select_queue)(struct net_device *dev, struct sk_buff *skb,
* void *accel_priv, select_queue_fallback_t fallback);
* Called to decide which queue to when device supports multiple
int (*ndo_stop)(struct net_device *dev);
netdev_tx_t (*ndo_start_xmit) (struct sk_buff *skb,
struct net_device *dev);
+ void (*ndo_xmit_flush)(struct net_device *dev, u16 queue);
u16 (*ndo_select_queue)(struct net_device *dev,
struct sk_buff *skb,
void *accel_priv,
#define dev_proc_init() 0
#endif
+static inline netdev_tx_t __netdev_start_xmit(const struct net_device_ops *ops,
+ struct sk_buff *skb, struct net_device *dev)
+{
+ netdev_tx_t ret;
+ u16 q;
+
+ q = skb->queue_mapping;
+ ret = ops->ndo_start_xmit(skb, dev);
+ if (dev_xmit_complete(ret) && ops->ndo_xmit_flush)
+ ops->ndo_xmit_flush(dev, q);
+
+ return ret;
+}
+
+static inline netdev_tx_t netdev_start_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ const struct net_device_ops *ops = dev->netdev_ops;
+
+ return __netdev_start_xmit(ops, skb, dev);
+}
+
int netdev_class_create_file_ns(struct class_attribute *class_attr,
const void *ns);
void netdev_class_remove_file_ns(struct class_attribute *class_attr,
}
non_ip:
- return mpc->old_ops->ndo_start_xmit(skb, dev);
+ return __netdev_start_xmit(mpc->old_ops, skb, dev);
}
static int atm_mpoa_vcc_attach(struct atm_vcc *vcc, void __user *arg)
int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
struct netdev_queue *txq)
{
- const struct net_device_ops *ops = dev->netdev_ops;
int rc = NETDEV_TX_OK;
unsigned int skb_len;
skb_len = skb->len;
trace_net_dev_start_xmit(skb, dev);
- rc = ops->ndo_start_xmit(skb, dev);
+ rc = netdev_start_xmit(skb, dev);
trace_net_dev_xmit(skb, rc, dev, skb_len);
if (rc == NETDEV_TX_OK)
txq_trans_update(txq);
skb_len = nskb->len;
trace_net_dev_start_xmit(nskb, dev);
- rc = ops->ndo_start_xmit(nskb, dev);
+ rc = netdev_start_xmit(nskb, dev);
trace_net_dev_xmit(nskb, rc, dev, skb_len);
if (unlikely(rc != NETDEV_TX_OK)) {
if (rc & ~NETDEV_TX_MASK)
static int netpoll_start_xmit(struct sk_buff *skb, struct net_device *dev,
struct netdev_queue *txq)
{
- const struct net_device_ops *ops = dev->netdev_ops;
int status = NETDEV_TX_OK;
netdev_features_t features;
skb->vlan_tci = 0;
}
- status = ops->ndo_start_xmit(skb, dev);
+ status = netdev_start_xmit(skb, dev);
if (status == NETDEV_TX_OK)
txq_trans_update(txq);
static void pktgen_xmit(struct pktgen_dev *pkt_dev)
{
struct net_device *odev = pkt_dev->odev;
- netdev_tx_t (*xmit)(struct sk_buff *, struct net_device *)
- = odev->netdev_ops->ndo_start_xmit;
struct netdev_queue *txq;
u16 queue_map;
int ret;
goto unlock;
}
atomic_inc(&(pkt_dev->skb->users));
- ret = (*xmit)(pkt_dev->skb, odev);
+ ret = netdev_start_xmit(pkt_dev->skb, odev);
switch (ret) {
case NETDEV_TX_OK:
static int packet_direct_xmit(struct sk_buff *skb)
{
struct net_device *dev = skb->dev;
- const struct net_device_ops *ops = dev->netdev_ops;
netdev_features_t features;
struct netdev_queue *txq;
int ret = NETDEV_TX_BUSY;
HARD_TX_LOCK(dev, txq, smp_processor_id());
if (!netif_xmit_frozen_or_drv_stopped(txq)) {
- ret = ops->ndo_start_xmit(skb, dev);
+ ret = netdev_start_xmit(skb, dev);
if (ret == NETDEV_TX_OK)
txq_trans_update(txq);
}
do {
struct net_device *slave = qdisc_dev(q);
struct netdev_queue *slave_txq = netdev_get_tx_queue(slave, 0);
- const struct net_device_ops *slave_ops = slave->netdev_ops;
if (slave_txq->qdisc_sleeping != q)
continue;
unsigned int length = qdisc_pkt_len(skb);
if (!netif_xmit_frozen_or_stopped(slave_txq) &&
- slave_ops->ndo_start_xmit(skb, slave) == NETDEV_TX_OK) {
+ netdev_start_xmit(skb, slave) == NETDEV_TX_OK) {
txq_trans_update(slave_txq);
__netif_tx_unlock(slave_txq);
master->slaves = NEXT_SLAVE(q);