if (entry.skb) {
spin_unlock_bh(&q->lock);
- dev->drv->tx_complete_skb(dev, q, &entry, flush);
+ dev->drv->tx_complete_skb(dev, qid, &entry);
spin_lock_bh(&q->lock);
}
free:
e.skb = skb;
e.txwi = t;
- dev->drv->tx_complete_skb(dev, q, &e, true);
+ dev->drv->tx_complete_skb(dev, qid, &e);
mt76_put_txwi(dev, t);
return ret;
}
struct mt76_wcid *wcid,
struct ieee80211_sta *sta, u32 *tx_info);
- void (*tx_complete_skb)(struct mt76_dev *dev, struct mt76_queue *q,
- struct mt76_queue_entry *e, bool flush);
+ void (*tx_complete_skb)(struct mt76_dev *dev, enum mt76_txq_id qid,
+ struct mt76_queue_entry *e);
bool (*tx_status_data)(struct mt76_dev *dev, u8 *update);
rcu_read_unlock();
}
-void mt7603_tx_complete_skb(struct mt76_dev *mdev, struct mt76_queue *q,
- struct mt76_queue_entry *e, bool flush)
+void mt7603_tx_complete_skb(struct mt76_dev *mdev, enum mt76_txq_id qid,
+ struct mt76_queue_entry *e)
{
struct mt7603_dev *dev = container_of(mdev, struct mt7603_dev, mt76);
struct sk_buff *skb = e->skb;
return;
}
- if (q - dev->mt76.q_tx < 4)
+ if (qid < 4)
dev->tx_hang_check = 0;
mt76_tx_complete_skb(mdev, skb);
struct mt76_wcid *wcid, struct ieee80211_sta *sta,
u32 *tx_info);
-void mt7603_tx_complete_skb(struct mt76_dev *mdev, struct mt76_queue *q,
- struct mt76_queue_entry *e, bool flush);
+void mt7603_tx_complete_skb(struct mt76_dev *mdev, enum mt76_txq_id qid,
+ struct mt76_queue_entry *e);
void mt7603_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q,
struct sk_buff *skb);
}
}
-void mt76x02_tx_complete_skb(struct mt76_dev *mdev, struct mt76_queue *q,
- struct mt76_queue_entry *e, bool flush)
+void mt76x02_tx_complete_skb(struct mt76_dev *mdev, enum mt76_txq_id qid,
+ struct mt76_queue_entry *e)
{
struct mt76x02_dev *dev = container_of(mdev, struct mt76x02_dev, mt76);
struct mt76x02_txwi *txwi;
struct sk_buff *skb, struct mt76_wcid *wcid,
struct ieee80211_sta *sta, int len);
void mt76x02_mac_poll_tx_status(struct mt76x02_dev *dev, bool irq);
-void mt76x02_tx_complete_skb(struct mt76_dev *mdev, struct mt76_queue *q,
- struct mt76_queue_entry *e, bool flush);
+void mt76x02_tx_complete_skb(struct mt76_dev *mdev, enum mt76_txq_id qid,
+ struct mt76_queue_entry *e);
void mt76x02_update_channel(struct mt76_dev *mdev);
void mt76x02_mac_work(struct work_struct *work);
struct sk_buff *skb, enum mt76_txq_id qid,
struct mt76_wcid *wcid, struct ieee80211_sta *sta,
u32 *tx_info);
-void mt76x02u_tx_complete_skb(struct mt76_dev *mdev, struct mt76_queue *q,
- struct mt76_queue_entry *e, bool flush);
+void mt76x02u_tx_complete_skb(struct mt76_dev *mdev, enum mt76_txq_id qid,
+ struct mt76_queue_entry *e);
#endif /* __MT76x02_USB_H */
mt76x02_remove_hdr_pad(skb, 2);
}
-void mt76x02u_tx_complete_skb(struct mt76_dev *mdev, struct mt76_queue *q,
- struct mt76_queue_entry *e, bool flush)
+void mt76x02u_tx_complete_skb(struct mt76_dev *mdev, enum mt76_txq_id qid,
+ struct mt76_queue_entry *e)
{
mt76x02u_remove_dma_hdr(e->skb);
mt76_tx_complete_skb(mdev, e->skb);
q->queued--;
spin_unlock_bh(&q->lock);
- dev->drv->tx_complete_skb(dev, q, &entry, false);
+ dev->drv->tx_complete_skb(dev, i, &entry);
spin_lock_bh(&q->lock);
}
mt76_txq_schedule(dev, q);