From 68886f301850c6c35bdff002a719fed1ded584c9 Mon Sep 17 00:00:00 2001
From: Felix Fietkau <nbd@nbd.name>
Date: Tue, 6 Jul 2021 14:12:11 +0200
Subject: [PATCH] mac80211: merge the virtual time based airtime scheduler

Improves airtime fairness, especially for devices with larger firmware buffers

Signed-off-by: Felix Fietkau <nbd@nbd.name>
(cherry-picked from commit a5888ad6b33840d913438ce664c0e7da7e7f53e6)
---
 ...introduce-aql_enable-node-in-debugfs.patch |  111 ++
 ...ange-struct-txq_info-for-fewer-holes.patch |   39 +
 ...to-a-virtual-time-based-airtime-sche.patch | 1277 +++++++++++++++++
 .../500-mac80211_configure_antenna_gain.patch |    6 +-
 4 files changed, 1430 insertions(+), 3 deletions(-)
 create mode 100644 package/kernel/mac80211/patches/subsys/380-mac80211-introduce-aql_enable-node-in-debugfs.patch
 create mode 100644 package/kernel/mac80211/patches/subsys/381-mac80211-rearrange-struct-txq_info-for-fewer-holes.patch
 create mode 100644 package/kernel/mac80211/patches/subsys/382-mac80211-Switch-to-a-virtual-time-based-airtime-sche.patch

diff --git a/package/kernel/mac80211/patches/subsys/380-mac80211-introduce-aql_enable-node-in-debugfs.patch b/package/kernel/mac80211/patches/subsys/380-mac80211-introduce-aql_enable-node-in-debugfs.patch
new file mode 100644
index 0000000000..073782188a
--- /dev/null
+++ b/package/kernel/mac80211/patches/subsys/380-mac80211-introduce-aql_enable-node-in-debugfs.patch
@@ -0,0 +1,111 @@
+From: Lorenzo Bianconi <lorenzo@kernel.org>
+Date: Sat, 9 Jan 2021 18:57:51 +0100
+Subject: [PATCH] mac80211: introduce aql_enable node in debugfs
+
+Introduce aql_enable node in debugfs in order to enable/disable aql.
+This is useful for debugging purpose.
+
+Signed-off-by: Lorenzo Bianconi <lorenzo@kernel.org>
+Link: https://lore.kernel.org/r/e7a934d5d84e4796c4f97ea5de4e66c824296b07.1610214851.git.lorenzo@kernel.org
+Signed-off-by: Johannes Berg <johannes.berg@intel.com>
+---
+
+--- a/net/mac80211/debugfs.c
++++ b/net/mac80211/debugfs.c
+@@ -281,6 +281,56 @@ static const struct file_operations aql_
+ 	.llseek = default_llseek,
+ };
+ 
++static ssize_t aql_enable_read(struct file *file, char __user *user_buf,
++			       size_t count, loff_t *ppos)
++{
++	char buf[3];
++	int len;
++
++	len = scnprintf(buf, sizeof(buf), "%d\n",
++			!static_key_false(&aql_disable.key));
++
++	return simple_read_from_buffer(user_buf, count, ppos, buf, len);
++}
++
++static ssize_t aql_enable_write(struct file *file, const char __user *user_buf,
++				size_t count, loff_t *ppos)
++{
++	bool aql_disabled = static_key_false(&aql_disable.key);
++	char buf[3];
++	size_t len;
++
++	if (count > sizeof(buf))
++		return -EINVAL;
++
++	if (copy_from_user(buf, user_buf, count))
++		return -EFAULT;
++
++	buf[sizeof(buf) - 1] = '\0';
++	len = strlen(buf);
++	if (len > 0 && buf[len - 1] == '\n')
++		buf[len - 1] = 0;
++
++	if (buf[0] == '0' && buf[1] == '\0') {
++		if (!aql_disabled)
++			static_branch_inc(&aql_disable);
++	} else if (buf[0] == '1' && buf[1] == '\0') {
++		if (aql_disabled)
++			static_branch_dec(&aql_disable);
++	} else {
++		return -EINVAL;
++	}
++
++	return count;
++}
++
++static const struct file_operations aql_enable_ops = {
++	.write = aql_enable_write,
++	.read = aql_enable_read,
++	.open = simple_open,
++	.llseek = default_llseek,
++};
++
+ static ssize_t force_tx_status_read(struct file *file,
+ 				    char __user *user_buf,
+ 				    size_t count,
+@@ -569,6 +619,7 @@ void debugfs_hw_add(struct ieee80211_loc
+ 	DEBUGFS_ADD(power);
+ 	DEBUGFS_ADD(hw_conf);
+ 	DEBUGFS_ADD_MODE(force_tx_status, 0600);
++	DEBUGFS_ADD_MODE(aql_enable, 0600);
+ 
+ 	if (local->ops->wake_tx_queue)
+ 		DEBUGFS_ADD_MODE(aqm, 0600);
+--- a/net/mac80211/ieee80211_i.h
++++ b/net/mac80211/ieee80211_i.h
+@@ -1140,6 +1140,8 @@ enum mac80211_scan_state {
+ 	SCAN_ABORT,
+ };
+ 
++DECLARE_STATIC_KEY_FALSE(aql_disable);
++
+ struct ieee80211_local {
+ 	/* embed the driver visible part.
+ 	 * don't cast (use the static inlines below), but we keep
+--- a/net/mac80211/tx.c
++++ b/net/mac80211/tx.c
+@@ -3909,6 +3909,8 @@ void __ieee80211_schedule_txq(struct iee
+ }
+ EXPORT_SYMBOL(__ieee80211_schedule_txq);
+ 
++DEFINE_STATIC_KEY_FALSE(aql_disable);
++
+ bool ieee80211_txq_airtime_check(struct ieee80211_hw *hw,
+ 				 struct ieee80211_txq *txq)
+ {
+@@ -3918,6 +3920,9 @@ bool ieee80211_txq_airtime_check(struct
+ 	if (!wiphy_ext_feature_isset(local->hw.wiphy, NL80211_EXT_FEATURE_AQL))
+ 		return true;
+ 
++	if (static_branch_unlikely(&aql_disable))
++		return true;
++
+ 	if (!txq->sta)
+ 		return true;
+ 
diff --git a/package/kernel/mac80211/patches/subsys/381-mac80211-rearrange-struct-txq_info-for-fewer-holes.patch b/package/kernel/mac80211/patches/subsys/381-mac80211-rearrange-struct-txq_info-for-fewer-holes.patch
new file mode 100644
index 0000000000..708ad6f460
--- /dev/null
+++ b/package/kernel/mac80211/patches/subsys/381-mac80211-rearrange-struct-txq_info-for-fewer-holes.patch
@@ -0,0 +1,39 @@
+From: Johannes Berg <johannes.berg@intel.com>
+Date: Fri, 18 Jun 2021 13:41:44 +0300
+Subject: [PATCH] mac80211: rearrange struct txq_info for fewer holes
+
+We can slightly decrease the size of struct txq_info by
+rearranging some fields for fewer holes, so do that.
+
+Signed-off-by: Johannes Berg <johannes.berg@intel.com>
+Signed-off-by: Luca Coelho <luciano.coelho@intel.com>
+Link: https://lore.kernel.org/r/iwlwifi.20210618133832.1bf019a1fe2e.Ib54622b8d6dc1a9a7dc484e573c073119450538b@changeid
+Signed-off-by: Johannes Berg <johannes.berg@intel.com>
+---
+
+--- a/net/mac80211/ieee80211_i.h
++++ b/net/mac80211/ieee80211_i.h
+@@ -5,7 +5,7 @@
+  * Copyright 2006-2007	Jiri Benc <jbenc@suse.cz>
+  * Copyright 2007-2010	Johannes Berg <johannes@sipsolutions.net>
+  * Copyright 2013-2015  Intel Mobile Communications GmbH
+- * Copyright (C) 2018-2020 Intel Corporation
++ * Copyright (C) 2018-2021 Intel Corporation
+  */
+ 
+ #ifndef IEEE80211_I_H
+@@ -848,9 +848,12 @@ struct txq_info {
+ 	struct fq_tin tin;
+ 	struct codel_vars def_cvars;
+ 	struct codel_stats cstats;
+-	struct sk_buff_head frags;
+-	struct list_head schedule_order;
++
+ 	u16 schedule_round;
++	struct list_head schedule_order;
++
++	struct sk_buff_head frags;
++
+ 	unsigned long flags;
+ 
+ 	/* keep last! */
diff --git a/package/kernel/mac80211/patches/subsys/382-mac80211-Switch-to-a-virtual-time-based-airtime-sche.patch b/package/kernel/mac80211/patches/subsys/382-mac80211-Switch-to-a-virtual-time-based-airtime-sche.patch
new file mode 100644
index 0000000000..2fe12771c0
--- /dev/null
+++ b/package/kernel/mac80211/patches/subsys/382-mac80211-Switch-to-a-virtual-time-based-airtime-sche.patch
@@ -0,0 +1,1277 @@
+From: =?UTF-8?q?Toke=20H=C3=B8iland-J=C3=B8rgensen?= <toke@redhat.com>
+Date: Wed, 23 Jun 2021 15:47:55 +0200
+Subject: [PATCH] mac80211: Switch to a virtual time-based airtime scheduler
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+This switches the airtime scheduler in mac80211 to use a virtual
+time-based scheduler instead of the round-robin scheduler used before.
+This has a couple of advantages:
+
+- No need to sync up the round-robin scheduler in firmware/hardware with
+  the round-robin airtime scheduler.
+
+- If several stations are eligible for transmission we can schedule both
+  of them; no need to hard-block the scheduling rotation until the head
+  of the queue has used up its quantum.
+
+- The check of whether a station is eligible for transmission becomes
+  simpler (in ieee80211_txq_may_transmit()).
+
+The drawback is that scheduling becomes slightly more expensive, as we
+need to maintain an rbtree of TXQs sorted by virtual time. This means
+that ieee80211_register_airtime() becomes O(logN) in the number of
+currently scheduled TXQs because it can change the order of the
+scheduled stations. We mitigate this overhead by only resorting when a
+station changes position in the tree, and hopefully N rarely grows too
+big (it's only TXQs currently backlogged, not all associated stations),
+so it shouldn't be too big of an issue.
+
+To prevent divisions in the fast path, we maintain both station sums and
+pre-computed reciprocals of the sums. This turns the fast-path operation
+into a multiplication, with divisions only happening as the number of
+active stations change (to re-compute the current sum of all active
+station weights). To prevent this re-computation of the reciprocal from
+happening too frequently, we use a time-based notion of station
+activity, instead of updating the weight every time a station gets
+scheduled or de-scheduled. As queues can oscillate between empty and
+occupied quite frequently, this can significantly cut down on the number
+of re-computations. It also has the added benefit of making the station
+airtime calculation independent on whether the queue happened to have
+drained at the time an airtime value was accounted.
+
+Co-developed-by: Yibo Zhao <yiboz@codeaurora.org>
+Signed-off-by: Yibo Zhao <yiboz@codeaurora.org>
+Signed-off-by: Toke Høiland-Jørgensen <toke@redhat.com>
+Link: https://lore.kernel.org/r/20210623134755.235545-1-toke@redhat.com
+Signed-off-by: Johannes Berg <johannes.berg@intel.com>
+---
+
+--- a/include/net/mac80211.h
++++ b/include/net/mac80211.h
+@@ -6557,9 +6557,6 @@ static inline void ieee80211_txq_schedul
+ {
+ }
+ 
+-void __ieee80211_schedule_txq(struct ieee80211_hw *hw,
+-			      struct ieee80211_txq *txq, bool force);
+-
+ /**
+  * ieee80211_schedule_txq - schedule a TXQ for transmission
+  *
+@@ -6572,11 +6569,7 @@ void __ieee80211_schedule_txq(struct iee
+  * The driver may call this function if it has buffered packets for
+  * this TXQ internally.
+  */
+-static inline void
+-ieee80211_schedule_txq(struct ieee80211_hw *hw, struct ieee80211_txq *txq)
+-{
+-	__ieee80211_schedule_txq(hw, txq, true);
+-}
++void ieee80211_schedule_txq(struct ieee80211_hw *hw, struct ieee80211_txq *txq);
+ 
+ /**
+  * ieee80211_return_txq - return a TXQ previously acquired by ieee80211_next_txq()
+@@ -6588,12 +6581,8 @@ ieee80211_schedule_txq(struct ieee80211_
+  * The driver may set force=true if it has buffered packets for this TXQ
+  * internally.
+  */
+-static inline void
+-ieee80211_return_txq(struct ieee80211_hw *hw, struct ieee80211_txq *txq,
+-		     bool force)
+-{
+-	__ieee80211_schedule_txq(hw, txq, force);
+-}
++void ieee80211_return_txq(struct ieee80211_hw *hw, struct ieee80211_txq *txq,
++			  bool force);
+ 
+ /**
+  * ieee80211_txq_may_transmit - check whether TXQ is allowed to transmit
+--- a/net/mac80211/cfg.c
++++ b/net/mac80211/cfg.c
+@@ -1461,6 +1461,38 @@ static void sta_apply_mesh_params(struct
+ #endif
+ }
+ 
++static void sta_apply_airtime_params(struct ieee80211_local *local,
++				     struct sta_info *sta,
++				     struct station_parameters *params)
++{
++	u8 ac;
++
++	for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) {
++		struct airtime_sched_info *air_sched = &local->airtime[ac];
++		struct airtime_info *air_info = &sta->airtime[ac];
++		struct txq_info *txqi;
++		u8 tid;
++
++		spin_lock_bh(&air_sched->lock);
++		for (tid = 0; tid < IEEE80211_NUM_TIDS + 1; tid++) {
++			if (air_info->weight == params->airtime_weight ||
++			    !sta->sta.txq[tid] ||
++			    ac != ieee80211_ac_from_tid(tid))
++				continue;
++
++			airtime_weight_set(air_info, params->airtime_weight);
++
++			txqi = to_txq_info(sta->sta.txq[tid]);
++			if (RB_EMPTY_NODE(&txqi->schedule_order))
++				continue;
++
++			ieee80211_update_airtime_weight(local, air_sched,
++							0, true);
++		}
++		spin_unlock_bh(&air_sched->lock);
++	}
++}
++
+ static int sta_apply_parameters(struct ieee80211_local *local,
+ 				struct sta_info *sta,
+ 				struct station_parameters *params)
+@@ -1648,7 +1680,8 @@ static int sta_apply_parameters(struct i
+ 		sta_apply_mesh_params(local, sta, params);
+ 
+ 	if (params->airtime_weight)
+-		sta->airtime_weight = params->airtime_weight;
++		sta_apply_airtime_params(local, sta, params);
++
+ 
+ 	/* set the STA state after all sta info from usermode has been set */
+ 	if (test_sta_flag(sta, WLAN_STA_TDLS_PEER) ||
+--- a/net/mac80211/debugfs.c
++++ b/net/mac80211/debugfs.c
+@@ -216,14 +216,14 @@ static ssize_t aql_txq_limit_read(struct
+ 			"VI	%u		%u\n"
+ 			"BE	%u		%u\n"
+ 			"BK	%u		%u\n",
+-			local->aql_txq_limit_low[IEEE80211_AC_VO],
+-			local->aql_txq_limit_high[IEEE80211_AC_VO],
+-			local->aql_txq_limit_low[IEEE80211_AC_VI],
+-			local->aql_txq_limit_high[IEEE80211_AC_VI],
+-			local->aql_txq_limit_low[IEEE80211_AC_BE],
+-			local->aql_txq_limit_high[IEEE80211_AC_BE],
+-			local->aql_txq_limit_low[IEEE80211_AC_BK],
+-			local->aql_txq_limit_high[IEEE80211_AC_BK]);
++			local->airtime[IEEE80211_AC_VO].aql_txq_limit_low,
++			local->airtime[IEEE80211_AC_VO].aql_txq_limit_high,
++			local->airtime[IEEE80211_AC_VI].aql_txq_limit_low,
++			local->airtime[IEEE80211_AC_VI].aql_txq_limit_high,
++			local->airtime[IEEE80211_AC_BE].aql_txq_limit_low,
++			local->airtime[IEEE80211_AC_BE].aql_txq_limit_high,
++			local->airtime[IEEE80211_AC_BK].aql_txq_limit_low,
++			local->airtime[IEEE80211_AC_BK].aql_txq_limit_high);
+ 	return simple_read_from_buffer(user_buf, count, ppos,
+ 				       buf, len);
+ }
+@@ -255,11 +255,11 @@ static ssize_t aql_txq_limit_write(struc
+ 	if (ac >= IEEE80211_NUM_ACS)
+ 		return -EINVAL;
+ 
+-	q_limit_low_old = local->aql_txq_limit_low[ac];
+-	q_limit_high_old = local->aql_txq_limit_high[ac];
++	q_limit_low_old = local->airtime[ac].aql_txq_limit_low;
++	q_limit_high_old = local->airtime[ac].aql_txq_limit_high;
+ 
+-	local->aql_txq_limit_low[ac] = q_limit_low;
+-	local->aql_txq_limit_high[ac] = q_limit_high;
++	local->airtime[ac].aql_txq_limit_low = q_limit_low;
++	local->airtime[ac].aql_txq_limit_high = q_limit_high;
+ 
+ 	mutex_lock(&local->sta_mtx);
+ 	list_for_each_entry(sta, &local->sta_list, list) {
+@@ -382,6 +382,46 @@ static const struct file_operations forc
+ 	.llseek = default_llseek,
+ };
+ 
++static ssize_t airtime_read(struct file *file,
++			    char __user *user_buf,
++			    size_t count,
++			    loff_t *ppos)
++{
++	struct ieee80211_local *local = file->private_data;
++	char buf[200];
++	u64 v_t[IEEE80211_NUM_ACS];
++	u64 wt[IEEE80211_NUM_ACS];
++	int len = 0, ac;
++
++	for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) {
++		spin_lock_bh(&local->airtime[ac].lock);
++		v_t[ac] = local->airtime[ac].v_t;
++		wt[ac] = local->airtime[ac].weight_sum;
++		spin_unlock_bh(&local->airtime[ac].lock);
++	}
++	len = scnprintf(buf, sizeof(buf),
++			"\tVO         VI         BE         BK\n"
++			"Virt-t\t%-10llu %-10llu %-10llu %-10llu\n"
++			"Weight\t%-10llu %-10llu %-10llu %-10llu\n",
++			v_t[0],
++			v_t[1],
++			v_t[2],
++			v_t[3],
++			wt[0],
++			wt[1],
++			wt[2],
++			wt[3]);
++
++	return simple_read_from_buffer(user_buf, count, ppos,
++				       buf, len);
++}
++
++static const struct file_operations airtime_ops = {
++	.read = airtime_read,
++	.open = simple_open,
++	.llseek = default_llseek,
++};
++
+ #ifdef CONFIG_PM
+ static ssize_t reset_write(struct file *file, const char __user *user_buf,
+ 			   size_t count, loff_t *ppos)
+@@ -624,7 +664,11 @@ void debugfs_hw_add(struct ieee80211_loc
+ 	if (local->ops->wake_tx_queue)
+ 		DEBUGFS_ADD_MODE(aqm, 0600);
+ 
+-	DEBUGFS_ADD_MODE(airtime_flags, 0600);
++	if (wiphy_ext_feature_isset(local->hw.wiphy,
++				    NL80211_EXT_FEATURE_AIRTIME_FAIRNESS)) {
++		DEBUGFS_ADD_MODE(airtime, 0600);
++		DEBUGFS_ADD_MODE(airtime_flags, 0600);
++	}
+ 
+ 	DEBUGFS_ADD(aql_txq_limit);
+ 	debugfs_create_u32("aql_threshold", 0600,
+--- a/net/mac80211/debugfs_netdev.c
++++ b/net/mac80211/debugfs_netdev.c
+@@ -513,6 +513,34 @@ static ssize_t ieee80211_if_fmt_aqm(
+ }
+ IEEE80211_IF_FILE_R(aqm);
+ 
++static ssize_t ieee80211_if_fmt_airtime(
++	const struct ieee80211_sub_if_data *sdata, char *buf, int buflen)
++{
++	struct ieee80211_local *local = sdata->local;
++	struct ieee80211_txq *txq = sdata->vif.txq;
++	struct airtime_info *air_info;
++	int len;
++
++	if (!txq)
++		return 0;
++
++	spin_lock_bh(&local->airtime[txq->ac].lock);
++	air_info = to_airtime_info(txq);
++	len = scnprintf(buf,
++			buflen,
++			"RX: %llu us\nTX: %llu us\nWeight: %u\n"
++			"Virt-T: %lld us\n",
++			air_info->rx_airtime,
++			air_info->tx_airtime,
++			air_info->weight,
++			air_info->v_t);
++	spin_unlock_bh(&local->airtime[txq->ac].lock);
++
++	return len;
++}
++
++IEEE80211_IF_FILE_R(airtime);
++
+ IEEE80211_IF_FILE(multicast_to_unicast, u.ap.multicast_to_unicast, HEX);
+ 
+ /* IBSS attributes */
+@@ -661,8 +689,10 @@ static void add_common_files(struct ieee
+ 
+ 	if (sdata->local->ops->wake_tx_queue &&
+ 	    sdata->vif.type != NL80211_IFTYPE_P2P_DEVICE &&
+-	    sdata->vif.type != NL80211_IFTYPE_NAN)
++	    sdata->vif.type != NL80211_IFTYPE_NAN) {
+ 		DEBUGFS_ADD(aqm);
++		DEBUGFS_ADD(airtime);
++	}
+ }
+ 
+ static void add_sta_files(struct ieee80211_sub_if_data *sdata)
+--- a/net/mac80211/debugfs_sta.c
++++ b/net/mac80211/debugfs_sta.c
+@@ -202,7 +202,7 @@ static ssize_t sta_airtime_read(struct f
+ 	size_t bufsz = 400;
+ 	char *buf = kzalloc(bufsz, GFP_KERNEL), *p = buf;
+ 	u64 rx_airtime = 0, tx_airtime = 0;
+-	s64 deficit[IEEE80211_NUM_ACS];
++	u64 v_t[IEEE80211_NUM_ACS];
+ 	ssize_t rv;
+ 	int ac;
+ 
+@@ -210,18 +210,18 @@ static ssize_t sta_airtime_read(struct f
+ 		return -ENOMEM;
+ 
+ 	for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) {
+-		spin_lock_bh(&local->active_txq_lock[ac]);
++		spin_lock_bh(&local->airtime[ac].lock);
+ 		rx_airtime += sta->airtime[ac].rx_airtime;
+ 		tx_airtime += sta->airtime[ac].tx_airtime;
+-		deficit[ac] = sta->airtime[ac].deficit;
+-		spin_unlock_bh(&local->active_txq_lock[ac]);
++		v_t[ac] = sta->airtime[ac].v_t;
++		spin_unlock_bh(&local->airtime[ac].lock);
+ 	}
+ 
+ 	p += scnprintf(p, bufsz + buf - p,
+ 		"RX: %llu us\nTX: %llu us\nWeight: %u\n"
+-		"Deficit: VO: %lld us VI: %lld us BE: %lld us BK: %lld us\n",
+-		rx_airtime, tx_airtime, sta->airtime_weight,
+-		deficit[0], deficit[1], deficit[2], deficit[3]);
++		"Virt-T: VO: %lld us VI: %lld us BE: %lld us BK: %lld us\n",
++		rx_airtime, tx_airtime, sta->airtime[0].weight,
++		v_t[0], v_t[1], v_t[2], v_t[3]);
+ 
+ 	rv = simple_read_from_buffer(userbuf, count, ppos, buf, p - buf);
+ 	kfree(buf);
+@@ -236,11 +236,11 @@ static ssize_t sta_airtime_write(struct
+ 	int ac;
+ 
+ 	for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) {
+-		spin_lock_bh(&local->active_txq_lock[ac]);
++		spin_lock_bh(&local->airtime[ac].lock);
+ 		sta->airtime[ac].rx_airtime = 0;
+ 		sta->airtime[ac].tx_airtime = 0;
+-		sta->airtime[ac].deficit = sta->airtime_weight;
+-		spin_unlock_bh(&local->active_txq_lock[ac]);
++		sta->airtime[ac].v_t = 0;
++		spin_unlock_bh(&local->airtime[ac].lock);
+ 	}
+ 
+ 	return count;
+@@ -263,10 +263,10 @@ static ssize_t sta_aql_read(struct file
+ 		return -ENOMEM;
+ 
+ 	for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) {
+-		spin_lock_bh(&local->active_txq_lock[ac]);
++		spin_lock_bh(&local->airtime[ac].lock);
+ 		q_limit_l[ac] = sta->airtime[ac].aql_limit_low;
+ 		q_limit_h[ac] = sta->airtime[ac].aql_limit_high;
+-		spin_unlock_bh(&local->active_txq_lock[ac]);
++		spin_unlock_bh(&local->airtime[ac].lock);
+ 		q_depth[ac] = atomic_read(&sta->airtime[ac].aql_tx_pending);
+ 	}
+ 
+--- a/net/mac80211/ieee80211_i.h
++++ b/net/mac80211/ieee80211_i.h
+@@ -840,20 +840,16 @@ enum txq_info_flags {
+  * @def_flow: used as a fallback flow when a packet destined to @tin hashes to
+  *	a fq_flow which is already owned by a different tin
+  * @def_cvars: codel vars for @def_flow
+- * @frags: used to keep fragments created after dequeue
+  * @schedule_order: used with ieee80211_local->active_txqs
+- * @schedule_round: counter to prevent infinite loops on TXQ scheduling
++ * @frags: used to keep fragments created after dequeue
+  */
+ struct txq_info {
+ 	struct fq_tin tin;
+ 	struct codel_vars def_cvars;
+ 	struct codel_stats cstats;
+-
+-	u16 schedule_round;
+-	struct list_head schedule_order;
++	struct rb_node schedule_order;
+ 
+ 	struct sk_buff_head frags;
+-
+ 	unsigned long flags;
+ 
+ 	/* keep last! */
+@@ -930,6 +926,8 @@ struct ieee80211_sub_if_data {
+ 	struct ieee80211_tx_queue_params tx_conf[IEEE80211_NUM_ACS];
+ 	struct mac80211_qos_map __rcu *qos_map;
+ 
++	struct airtime_info airtime[IEEE80211_NUM_ACS];
++
+ 	struct work_struct csa_finalize_work;
+ 	bool csa_block_tx; /* write-protected by sdata_lock and local->mtx */
+ 	struct cfg80211_chan_def csa_chandef;
+@@ -1143,6 +1141,44 @@ enum mac80211_scan_state {
+ 	SCAN_ABORT,
+ };
+ 
++/**
++ * struct airtime_sched_info - state used for airtime scheduling and AQL
++ *
++ * @lock: spinlock that protects all the fields in this struct
++ * @active_txqs: rbtree of currently backlogged queues, sorted by virtual time
++ * @schedule_pos: the current position maintained while a driver walks the tree
++ *                with ieee80211_next_txq()
++ * @active_list: list of struct airtime_info structs that were active within
++ *               the last AIRTIME_ACTIVE_DURATION (100 ms), used to compute
++ *               weight_sum
++ * @last_weight_update: used for rate limiting walking active_list
++ * @last_schedule_time: tracks the last time a transmission was scheduled; used
++ *                      for catching up v_t if no stations are eligible for
++ *                      transmission.
++ * @v_t: global virtual time; queues with v_t < this are eligible for
++ *       transmission
++ * @weight_sum: total sum of all active stations used for dividing airtime
++ * @weight_sum_reciprocal: reciprocal of weight_sum (to avoid divisions in fast
++ *                         path - see comment above
++ *                         IEEE80211_RECIPROCAL_DIVISOR_64)
++ * @aql_txq_limit_low: AQL limit when total outstanding airtime
++ *                     is < IEEE80211_AQL_THRESHOLD
++ * @aql_txq_limit_high: AQL limit when total outstanding airtime
++ *                      is > IEEE80211_AQL_THRESHOLD
++ */
++struct airtime_sched_info {
++	spinlock_t lock;
++	struct rb_root_cached active_txqs;
++	struct rb_node *schedule_pos;
++	struct list_head active_list;
++	u64 last_weight_update;
++	u64 last_schedule_activity;
++	u64 v_t;
++	u64 weight_sum;
++	u64 weight_sum_reciprocal;
++	u32 aql_txq_limit_low;
++	u32 aql_txq_limit_high;
++};
+ DECLARE_STATIC_KEY_FALSE(aql_disable);
+ 
+ struct ieee80211_local {
+@@ -1156,13 +1192,8 @@ struct ieee80211_local {
+ 	struct codel_params cparams;
+ 
+ 	/* protects active_txqs and txqi->schedule_order */
+-	spinlock_t active_txq_lock[IEEE80211_NUM_ACS];
+-	struct list_head active_txqs[IEEE80211_NUM_ACS];
+-	u16 schedule_round[IEEE80211_NUM_ACS];
+-
++	struct airtime_sched_info airtime[IEEE80211_NUM_ACS];
+ 	u16 airtime_flags;
+-	u32 aql_txq_limit_low[IEEE80211_NUM_ACS];
+-	u32 aql_txq_limit_high[IEEE80211_NUM_ACS];
+ 	u32 aql_threshold;
+ 	atomic_t aql_total_pending_airtime;
+ 
+@@ -1581,6 +1612,125 @@ static inline bool txq_has_queue(struct
+ 	return !(skb_queue_empty(&txqi->frags) && !txqi->tin.backlog_packets);
+ }
+ 
++static inline struct airtime_info *to_airtime_info(struct ieee80211_txq *txq)
++{
++	struct ieee80211_sub_if_data *sdata;
++	struct sta_info *sta;
++
++	if (txq->sta) {
++		sta = container_of(txq->sta, struct sta_info, sta);
++		return &sta->airtime[txq->ac];
++	}
++
++	sdata = vif_to_sdata(txq->vif);
++	return &sdata->airtime[txq->ac];
++}
++
++/* To avoid divisions in the fast path, we keep pre-computed reciprocals for
++ * airtime weight calculations. There are two different weights to keep track
++ * of: The per-station weight and the sum of weights per phy.
++ *
++ * For the per-station weights (kept in airtime_info below), we use 32-bit
++ * reciprocals with a devisor of 2^19. This lets us keep the multiplications and
++ * divisions for the station weights as 32-bit operations at the cost of a bit
++ * of rounding error for high weights; but the choice of divisor keeps rounding
++ * errors <10% for weights <2^15, assuming no more than 8ms of airtime is
++ * reported at a time.
++ *
++ * For the per-phy sum of weights the values can get higher, so we use 64-bit
++ * operations for those with a 32-bit divisor, which should avoid any
++ * significant rounding errors.
++ */
++#define IEEE80211_RECIPROCAL_DIVISOR_64 0x100000000ULL
++#define IEEE80211_RECIPROCAL_SHIFT_64 32
++#define IEEE80211_RECIPROCAL_DIVISOR_32 0x80000U
++#define IEEE80211_RECIPROCAL_SHIFT_32 19
++
++static inline void airtime_weight_set(struct airtime_info *air_info, u16 weight)
++{
++	if (air_info->weight == weight)
++		return;
++
++	air_info->weight = weight;
++	if (weight) {
++		air_info->weight_reciprocal =
++			IEEE80211_RECIPROCAL_DIVISOR_32 / weight;
++	} else {
++		air_info->weight_reciprocal = 0;
++	}
++}
++
++static inline void airtime_weight_sum_set(struct airtime_sched_info *air_sched,
++					  int weight_sum)
++{
++	if (air_sched->weight_sum == weight_sum)
++		return;
++
++	air_sched->weight_sum = weight_sum;
++	if (air_sched->weight_sum) {
++		air_sched->weight_sum_reciprocal = IEEE80211_RECIPROCAL_DIVISOR_64;
++		do_div(air_sched->weight_sum_reciprocal, air_sched->weight_sum);
++	} else {
++		air_sched->weight_sum_reciprocal = 0;
++	}
++}
++
++/* A problem when trying to enforce airtime fairness is that we want to divide
++ * the airtime between the currently *active* stations. However, basing this on
++ * the instantaneous queue state of stations doesn't work, as queues tend to
++ * oscillate very quickly between empty and occupied, leading to the scheduler
++ * thinking only a single station is active when deciding whether to allow
++ * transmission (and thus not throttling correctly).
++ *
++ * To fix this we use a timer-based notion of activity: a station is considered
++ * active if it has been scheduled within the last 100 ms; we keep a separate
++ * list of all the stations considered active in this manner, and lazily update
++ * the total weight of active stations from this list (filtering the stations in
++ * the list by their 'last active' time).
++ *
++ * We add one additional safeguard to guard against stations that manage to get
++ * scheduled every 100 ms but don't transmit a lot of data, and thus don't use
++ * up any airtime. Such stations would be able to get priority for an extended
++ * period of time if they do start transmitting at full capacity again, and so
++ * we add an explicit maximum for how far behind a station is allowed to fall in
++ * the virtual airtime domain. This limit is set to a relatively high value of
++ * 20 ms because the main mechanism for catching up idle stations is the active
++ * state as described above; i.e., the hard limit should only be hit in
++ * pathological cases.
++ */
++#define AIRTIME_ACTIVE_DURATION (100 * NSEC_PER_MSEC)
++#define AIRTIME_MAX_BEHIND 20000 /* 20 ms */
++
++static inline bool airtime_is_active(struct airtime_info *air_info, u64 now)
++{
++	return air_info->last_scheduled >= now - AIRTIME_ACTIVE_DURATION;
++}
++
++static inline void airtime_set_active(struct airtime_sched_info *air_sched,
++				      struct airtime_info *air_info, u64 now)
++{
++	air_info->last_scheduled = now;
++	air_sched->last_schedule_activity = now;
++	list_move_tail(&air_info->list, &air_sched->active_list);
++}
++
++static inline bool airtime_catchup_v_t(struct airtime_sched_info *air_sched,
++				       u64 v_t, u64 now)
++{
++	air_sched->v_t = v_t;
++	return true;
++}
++
++static inline void init_airtime_info(struct airtime_info *air_info,
++				     struct airtime_sched_info *air_sched)
++{
++	atomic_set(&air_info->aql_tx_pending, 0);
++	air_info->aql_limit_low = air_sched->aql_txq_limit_low;
++	air_info->aql_limit_high = air_sched->aql_txq_limit_high;
++	airtime_weight_set(air_info, IEEE80211_DEFAULT_AIRTIME_WEIGHT);
++	INIT_LIST_HEAD(&air_info->list);
++}
++
+ static inline int ieee80211_bssid_match(const u8 *raddr, const u8 *addr)
+ {
+ 	return ether_addr_equal(raddr, addr) ||
+@@ -1821,6 +1971,14 @@ int ieee80211_tx_control_port(struct wip
+ 			      u64 *cookie);
+ int ieee80211_probe_mesh_link(struct wiphy *wiphy, struct net_device *dev,
+ 			      const u8 *buf, size_t len);
++void ieee80211_resort_txq(struct ieee80211_hw *hw,
++			  struct ieee80211_txq *txq);
++void ieee80211_unschedule_txq(struct ieee80211_hw *hw,
++			      struct ieee80211_txq *txq,
++			      bool purge);
++void ieee80211_update_airtime_weight(struct ieee80211_local *local,
++				     struct airtime_sched_info *air_sched,
++				     u64 now, bool force);
+ 
+ /* HT */
+ void ieee80211_apply_htcap_overrides(struct ieee80211_sub_if_data *sdata,
+--- a/net/mac80211/iface.c
++++ b/net/mac80211/iface.c
+@@ -2088,6 +2088,9 @@ int ieee80211_if_add(struct ieee80211_lo
+ 		}
+ 	}
+ 
++	for (i = 0; i < IEEE80211_NUM_ACS; i++)
++		init_airtime_info(&sdata->airtime[i], &local->airtime[i]);
++
+ 	ieee80211_set_default_queues(sdata);
+ 
+ 	sdata->ap_power_level = IEEE80211_UNSET_POWER_LEVEL;
+--- a/net/mac80211/main.c
++++ b/net/mac80211/main.c
+@@ -693,10 +693,13 @@ struct ieee80211_hw *ieee80211_alloc_hw_
+ 	spin_lock_init(&local->queue_stop_reason_lock);
+ 
+ 	for (i = 0; i < IEEE80211_NUM_ACS; i++) {
+-		INIT_LIST_HEAD(&local->active_txqs[i]);
+-		spin_lock_init(&local->active_txq_lock[i]);
+-		local->aql_txq_limit_low[i] = IEEE80211_DEFAULT_AQL_TXQ_LIMIT_L;
+-		local->aql_txq_limit_high[i] =
++		struct airtime_sched_info *air_sched = &local->airtime[i];
++
++		air_sched->active_txqs = RB_ROOT_CACHED;
++		INIT_LIST_HEAD(&air_sched->active_list);
++		spin_lock_init(&air_sched->lock);
++		air_sched->aql_txq_limit_low = IEEE80211_DEFAULT_AQL_TXQ_LIMIT_L;
++		air_sched->aql_txq_limit_high =
+ 			IEEE80211_DEFAULT_AQL_TXQ_LIMIT_H;
+ 	}
+ 
+--- a/net/mac80211/rx.c
++++ b/net/mac80211/rx.c
+@@ -1573,12 +1573,8 @@ static void sta_ps_start(struct sta_info
+ 
+ 	for (tid = 0; tid < IEEE80211_NUM_TIDS; tid++) {
+ 		struct ieee80211_txq *txq = sta->sta.txq[tid];
+-		struct txq_info *txqi = to_txq_info(txq);
+ 
+-		spin_lock(&local->active_txq_lock[txq->ac]);
+-		if (!list_empty(&txqi->schedule_order))
+-			list_del_init(&txqi->schedule_order);
+-		spin_unlock(&local->active_txq_lock[txq->ac]);
++		ieee80211_unschedule_txq(&local->hw, txq, false);
+ 
+ 		if (txq_has_queue(txq))
+ 			set_bit(tid, &sta->txq_buffered_tids);
+--- a/net/mac80211/sta_info.c
++++ b/net/mac80211/sta_info.c
+@@ -426,15 +426,11 @@ struct sta_info *sta_info_alloc(struct i
+ 	if (sta_prepare_rate_control(local, sta, gfp))
+ 		goto free_txq;
+ 
+-	sta->airtime_weight = IEEE80211_DEFAULT_AIRTIME_WEIGHT;
+ 
+ 	for (i = 0; i < IEEE80211_NUM_ACS; i++) {
+ 		skb_queue_head_init(&sta->ps_tx_buf[i]);
+ 		skb_queue_head_init(&sta->tx_filtered[i]);
+-		sta->airtime[i].deficit = sta->airtime_weight;
+-		atomic_set(&sta->airtime[i].aql_tx_pending, 0);
+-		sta->airtime[i].aql_limit_low = local->aql_txq_limit_low[i];
+-		sta->airtime[i].aql_limit_high = local->aql_txq_limit_high[i];
++		init_airtime_info(&sta->airtime[i], &local->airtime[i]);
+ 	}
+ 
+ 	for (i = 0; i < IEEE80211_NUM_TIDS; i++)
+@@ -1893,24 +1889,59 @@ void ieee80211_sta_set_buffered(struct i
+ }
+ EXPORT_SYMBOL(ieee80211_sta_set_buffered);
+ 
+-void ieee80211_sta_register_airtime(struct ieee80211_sta *pubsta, u8 tid,
+-				    u32 tx_airtime, u32 rx_airtime)
++void ieee80211_register_airtime(struct ieee80211_txq *txq,
++				u32 tx_airtime, u32 rx_airtime)
+ {
+-	struct sta_info *sta = container_of(pubsta, struct sta_info, sta);
+-	struct ieee80211_local *local = sta->sdata->local;
+-	u8 ac = ieee80211_ac_from_tid(tid);
++	struct ieee80211_sub_if_data *sdata = vif_to_sdata(txq->vif);
++	struct ieee80211_local *local = sdata->local;
++	u64 weight_sum, weight_sum_reciprocal;
++	struct airtime_sched_info *air_sched;
++	struct airtime_info *air_info;
+ 	u32 airtime = 0;
+ 
+-	if (sta->local->airtime_flags & AIRTIME_USE_TX)
++	air_sched = &local->airtime[txq->ac];
++	air_info = to_airtime_info(txq);
++
++	if (local->airtime_flags & AIRTIME_USE_TX)
+ 		airtime += tx_airtime;
+-	if (sta->local->airtime_flags & AIRTIME_USE_RX)
++	if (local->airtime_flags & AIRTIME_USE_RX)
+ 		airtime += rx_airtime;
+ 
+-	spin_lock_bh(&local->active_txq_lock[ac]);
+-	sta->airtime[ac].tx_airtime += tx_airtime;
+-	sta->airtime[ac].rx_airtime += rx_airtime;
+-	sta->airtime[ac].deficit -= airtime;
+-	spin_unlock_bh(&local->active_txq_lock[ac]);
++	/* Weights scale so the unit weight is 256 */
++	airtime <<= 8;
++
++	spin_lock_bh(&air_sched->lock);
++
++	air_info->tx_airtime += tx_airtime;
++	air_info->rx_airtime += rx_airtime;
++
++	if (air_sched->weight_sum) {
++		weight_sum = air_sched->weight_sum;
++		weight_sum_reciprocal = air_sched->weight_sum_reciprocal;
++	} else {
++		weight_sum = air_info->weight;
++		weight_sum_reciprocal = air_info->weight_reciprocal;
++	}
++
++	/* Round the calculation of global vt */
++	air_sched->v_t += (u64)((airtime + (weight_sum >> 1)) *
++				weight_sum_reciprocal) >> IEEE80211_RECIPROCAL_SHIFT_64;
++	air_info->v_t += (u32)((airtime + (air_info->weight >> 1)) *
++			       air_info->weight_reciprocal) >> IEEE80211_RECIPROCAL_SHIFT_32;
++	ieee80211_resort_txq(&local->hw, txq);
++
++	spin_unlock_bh(&air_sched->lock);
++}
++
++void ieee80211_sta_register_airtime(struct ieee80211_sta *pubsta, u8 tid,
++				    u32 tx_airtime, u32 rx_airtime)
++{
++	struct ieee80211_txq *txq = pubsta->txq[tid];
++
++	if (!txq)
++		return;
++
++	ieee80211_register_airtime(txq, tx_airtime, rx_airtime);
+ }
+ EXPORT_SYMBOL(ieee80211_sta_register_airtime);
+ 
+@@ -2354,7 +2385,7 @@ void sta_set_sinfo(struct sta_info *sta,
+ 	}
+ 
+ 	if (!(sinfo->filled & BIT_ULL(NL80211_STA_INFO_AIRTIME_WEIGHT))) {
+-		sinfo->airtime_weight = sta->airtime_weight;
++		sinfo->airtime_weight = sta->airtime[0].weight;
+ 		sinfo->filled |= BIT_ULL(NL80211_STA_INFO_AIRTIME_WEIGHT);
+ 	}
+ 
+--- a/net/mac80211/sta_info.h
++++ b/net/mac80211/sta_info.h
+@@ -135,18 +135,25 @@ enum ieee80211_agg_stop_reason {
+ #define AIRTIME_USE_TX		BIT(0)
+ #define AIRTIME_USE_RX		BIT(1)
+ 
++
+ struct airtime_info {
+ 	u64 rx_airtime;
+ 	u64 tx_airtime;
+-	s64 deficit;
++	u64 v_t;
++	u64 last_scheduled;
++	struct list_head list;
+ 	atomic_t aql_tx_pending; /* Estimated airtime for frames pending */
+ 	u32 aql_limit_low;
+ 	u32 aql_limit_high;
++	u32 weight_reciprocal;
++	u16 weight;
+ };
+ 
+ void ieee80211_sta_update_pending_airtime(struct ieee80211_local *local,
+ 					  struct sta_info *sta, u8 ac,
+ 					  u16 tx_airtime, bool tx_completed);
++void ieee80211_register_airtime(struct ieee80211_txq *txq,
++				u32 tx_airtime, u32 rx_airtime);
+ 
+ struct sta_info;
+ 
+@@ -515,7 +522,6 @@ struct ieee80211_fragment_cache {
+  * @tid_seq: per-TID sequence numbers for sending to this STA
+  * @airtime: per-AC struct airtime_info describing airtime statistics for this
+  *	station
+- * @airtime_weight: station weight for airtime fairness calculation purposes
+  * @ampdu_mlme: A-MPDU state machine state
+  * @mesh: mesh STA information
+  * @debugfs_dir: debug filesystem directory dentry
+@@ -646,7 +652,6 @@ struct sta_info {
+ 	u16 tid_seq[IEEE80211_QOS_CTL_TID_MASK + 1];
+ 
+ 	struct airtime_info airtime[IEEE80211_NUM_ACS];
+-	u16 airtime_weight;
+ 
+ 	/*
+ 	 * Aggregation information, locked with lock.
+--- a/net/mac80211/status.c
++++ b/net/mac80211/status.c
+@@ -972,6 +972,25 @@ static void __ieee80211_tx_status(struct
+ 		if (!(info->flags & IEEE80211_TX_CTL_INJECTED) && acked)
+ 			ieee80211_frame_acked(sta, skb);
+ 
++	} else if (wiphy_ext_feature_isset(local->hw.wiphy,
++					   NL80211_EXT_FEATURE_AIRTIME_FAIRNESS)) {
++		struct ieee80211_sub_if_data *sdata;
++		struct ieee80211_txq *txq;
++		u32 airtime;
++
++		/* Account airtime to multicast queue */
++		sdata = ieee80211_sdata_from_skb(local, skb);
++
++		if (sdata && (txq = sdata->vif.txq)) {
++			airtime = info->status.tx_time ?:
++				ieee80211_calc_expected_tx_airtime(hw,
++								   &sdata->vif,
++								   NULL,
++								   skb->len,
++								   false);
++
++			ieee80211_register_airtime(txq, airtime, 0);
++		}
+ 	}
+ 
+ 	/* SNMP counters
+--- a/net/mac80211/tx.c
++++ b/net/mac80211/tx.c
+@@ -18,6 +18,7 @@
+ #include <linux/bitmap.h>
+ #include <linux/rcupdate.h>
+ #include <linux/export.h>
++#include <linux/timekeeping.h>
+ #include <net/net_namespace.h>
+ #include <net/ieee80211_radiotap.h>
+ #include <net/cfg80211.h>
+@@ -1489,7 +1490,7 @@ void ieee80211_txq_init(struct ieee80211
+ 	codel_vars_init(&txqi->def_cvars);
+ 	codel_stats_init(&txqi->cstats);
+ 	__skb_queue_head_init(&txqi->frags);
+-	INIT_LIST_HEAD(&txqi->schedule_order);
++	RB_CLEAR_NODE(&txqi->schedule_order);
+ 
+ 	txqi->txq.vif = &sdata->vif;
+ 
+@@ -1533,9 +1534,7 @@ void ieee80211_txq_purge(struct ieee8021
+ 	ieee80211_purge_tx_queue(&local->hw, &txqi->frags);
+ 	spin_unlock_bh(&fq->lock);
+ 
+-	spin_lock_bh(&local->active_txq_lock[txqi->txq.ac]);
+-	list_del_init(&txqi->schedule_order);
+-	spin_unlock_bh(&local->active_txq_lock[txqi->txq.ac]);
++	ieee80211_unschedule_txq(&local->hw, &txqi->txq, true);
+ }
+ 
+ void ieee80211_txq_set_params(struct ieee80211_local *local)
+@@ -3819,102 +3818,259 @@ EXPORT_SYMBOL(ieee80211_tx_dequeue);
+ struct ieee80211_txq *ieee80211_next_txq(struct ieee80211_hw *hw, u8 ac)
+ {
+ 	struct ieee80211_local *local = hw_to_local(hw);
++	struct airtime_sched_info *air_sched;
++	u64 now = ktime_get_boottime_ns();
+ 	struct ieee80211_txq *ret = NULL;
+-	struct txq_info *txqi = NULL, *head = NULL;
+-	bool found_eligible_txq = false;
++	struct airtime_info *air_info;
++	struct txq_info *txqi = NULL;
++	struct rb_node *node;
++	bool first = false;
+ 
+-	spin_lock_bh(&local->active_txq_lock[ac]);
++	air_sched = &local->airtime[ac];
++	spin_lock_bh(&air_sched->lock);
+ 
+- begin:
+-	txqi = list_first_entry_or_null(&local->active_txqs[ac],
+-					struct txq_info,
+-					schedule_order);
+-	if (!txqi)
++	node = air_sched->schedule_pos;
++
++begin:
++	if (!node) {
++		node = rb_first_cached(&air_sched->active_txqs);
++		first = true;
++	} else {
++		node = rb_next(node);
++	}
++
++	if (!node)
+ 		goto out;
+ 
+-	if (txqi == head) {
+-		if (!found_eligible_txq)
+-			goto out;
+-		else
+-			found_eligible_txq = false;
++	txqi = container_of(node, struct txq_info, schedule_order);
++	air_info = to_airtime_info(&txqi->txq);
++
++	if (air_info->v_t > air_sched->v_t &&
++	    (!first || !airtime_catchup_v_t(air_sched, air_info->v_t, now)))
++		goto out;
++
++	if (!ieee80211_txq_airtime_check(hw, &txqi->txq)) {
++		first = false;
++		goto begin;
+ 	}
+ 
+-	if (!head)
+-		head = txqi;
++	air_sched->schedule_pos = node;
++	air_sched->last_schedule_activity = now;
++	ret = &txqi->txq;
++out:
++	spin_unlock_bh(&air_sched->lock);
++	return ret;
++}
++EXPORT_SYMBOL(ieee80211_next_txq);
+ 
+-	if (txqi->txq.sta) {
+-		struct sta_info *sta = container_of(txqi->txq.sta,
+-						    struct sta_info, sta);
+-		bool aql_check = ieee80211_txq_airtime_check(hw, &txqi->txq);
+-		s64 deficit = sta->airtime[txqi->txq.ac].deficit;
++static void __ieee80211_insert_txq(struct rb_root_cached *root,
++				   struct txq_info *txqi)
++{
++	struct rb_node **new = &root->rb_root.rb_node;
++	struct airtime_info *old_air, *new_air;
++	struct rb_node *parent = NULL;
++	struct txq_info *__txqi;
++	bool leftmost = true;
++
++	while (*new) {
++		parent = *new;
++		__txqi = rb_entry(parent, struct txq_info, schedule_order);
++		old_air = to_airtime_info(&__txqi->txq);
++		new_air = to_airtime_info(&txqi->txq);
+ 
+-		if (aql_check)
+-			found_eligible_txq = true;
++		if (new_air->v_t <= old_air->v_t) {
++			new = &parent->rb_left;
++		} else {
++			new = &parent->rb_right;
++			leftmost = false;
++		}
++	}
+ 
+-		if (deficit < 0)
+-			sta->airtime[txqi->txq.ac].deficit +=
+-				sta->airtime_weight;
+-
+-		if (deficit < 0 || !aql_check) {
+-			list_move_tail(&txqi->schedule_order,
+-				       &local->active_txqs[txqi->txq.ac]);
+-			goto begin;
++	rb_link_node(&txqi->schedule_order, parent, new);
++	rb_insert_color_cached(&txqi->schedule_order, root, leftmost);
++}
++
++void ieee80211_resort_txq(struct ieee80211_hw *hw,
++			  struct ieee80211_txq *txq)
++{
++	struct airtime_info *air_info = to_airtime_info(txq);
++	struct ieee80211_local *local = hw_to_local(hw);
++	struct txq_info *txqi = to_txq_info(txq);
++	struct airtime_sched_info *air_sched;
++
++	air_sched = &local->airtime[txq->ac];
++
++	lockdep_assert_held(&air_sched->lock);
++
++	if (!RB_EMPTY_NODE(&txqi->schedule_order)) {
++		struct airtime_info *a_prev = NULL, *a_next = NULL;
++		struct txq_info *t_prev, *t_next;
++		struct rb_node *n_prev, *n_next;
++
++		/* Erasing a node can cause an expensive rebalancing operation,
++		 * so we check the previous and next nodes first and only remove
++		 * and re-insert if the current node is not already in the
++		 * correct position.
++		 */
++		if ((n_prev = rb_prev(&txqi->schedule_order)) != NULL) {
++			t_prev = container_of(n_prev, struct txq_info,
++					      schedule_order);
++			a_prev = to_airtime_info(&t_prev->txq);
++		}
++
++		if ((n_next = rb_next(&txqi->schedule_order)) != NULL) {
++			t_next = container_of(n_next, struct txq_info,
++					      schedule_order);
++			a_next = to_airtime_info(&t_next->txq);
+ 		}
++
++		if ((!a_prev || a_prev->v_t <= air_info->v_t) &&
++		    (!a_next || a_next->v_t > air_info->v_t))
++			return;
++
++		if (air_sched->schedule_pos == &txqi->schedule_order)
++			air_sched->schedule_pos = n_prev;
++
++		rb_erase_cached(&txqi->schedule_order,
++				&air_sched->active_txqs);
++		RB_CLEAR_NODE(&txqi->schedule_order);
++		__ieee80211_insert_txq(&air_sched->active_txqs, txqi);
+ 	}
++}
++
++void ieee80211_update_airtime_weight(struct ieee80211_local *local,
++				     struct airtime_sched_info *air_sched,
++				     u64 now, bool force)
++{
++	struct airtime_info *air_info, *tmp;
++	u64 weight_sum = 0;
++
++	if (unlikely(!now))
++		now = ktime_get_boottime_ns();
++
++	lockdep_assert_held(&air_sched->lock);
++
++	if (!force && (air_sched->last_weight_update <
++		       now - AIRTIME_ACTIVE_DURATION))
++		return;
++
++	list_for_each_entry_safe(air_info, tmp,
++				 &air_sched->active_list, list) {
++		if (airtime_is_active(air_info, now))
++			weight_sum += air_info->weight;
++		else
++			list_del_init(&air_info->list);
++	}
++	airtime_weight_sum_set(air_sched, weight_sum);
++	air_sched->last_weight_update = now;
++}
+ 
++void ieee80211_schedule_txq(struct ieee80211_hw *hw,
++			    struct ieee80211_txq *txq)
++	__acquires(txq_lock) __releases(txq_lock)
++{
++	struct ieee80211_local *local = hw_to_local(hw);
++	struct txq_info *txqi = to_txq_info(txq);
++	struct airtime_sched_info *air_sched;
++	u64 now = ktime_get_boottime_ns();
++	struct airtime_info *air_info;
++	u8 ac = txq->ac;
++	bool was_active;
+ 
+-	if (txqi->schedule_round == local->schedule_round[ac])
++	air_sched = &local->airtime[ac];
++	air_info = to_airtime_info(txq);
++
++	spin_lock_bh(&air_sched->lock);
++	was_active = airtime_is_active(air_info, now);
++	airtime_set_active(air_sched, air_info, now);
++
++	if (!RB_EMPTY_NODE(&txqi->schedule_order))
+ 		goto out;
+ 
+-	list_del_init(&txqi->schedule_order);
+-	txqi->schedule_round = local->schedule_round[ac];
+-	ret = &txqi->txq;
++	/* If the station has been inactive for a while, catch up its v_t so it
++	 * doesn't get indefinite priority; see comment above the definition of
++	 * AIRTIME_MAX_BEHIND.
++	 */
++	if ((!was_active && air_info->v_t < air_sched->v_t) ||
++	    air_info->v_t < air_sched->v_t - AIRTIME_MAX_BEHIND)
++		air_info->v_t = air_sched->v_t;
++
++	ieee80211_update_airtime_weight(local, air_sched, now, !was_active);
++	__ieee80211_insert_txq(&air_sched->active_txqs, txqi);
+ 
+ out:
+-	spin_unlock_bh(&local->active_txq_lock[ac]);
+-	return ret;
++	spin_unlock_bh(&air_sched->lock);
+ }
+-EXPORT_SYMBOL(ieee80211_next_txq);
++EXPORT_SYMBOL(ieee80211_schedule_txq);
+ 
+-void __ieee80211_schedule_txq(struct ieee80211_hw *hw,
+-			      struct ieee80211_txq *txq,
+-			      bool force)
++static void __ieee80211_unschedule_txq(struct ieee80211_hw *hw,
++				       struct ieee80211_txq *txq,
++				       bool purge)
+ {
+ 	struct ieee80211_local *local = hw_to_local(hw);
+ 	struct txq_info *txqi = to_txq_info(txq);
++	struct airtime_sched_info *air_sched;
++	struct airtime_info *air_info;
+ 
+-	spin_lock_bh(&local->active_txq_lock[txq->ac]);
++	air_sched = &local->airtime[txq->ac];
++	air_info = to_airtime_info(&txqi->txq);
+ 
+-	if (list_empty(&txqi->schedule_order) &&
+-	    (force || !skb_queue_empty(&txqi->frags) ||
+-	     txqi->tin.backlog_packets)) {
+-		/* If airtime accounting is active, always enqueue STAs at the
+-		 * head of the list to ensure that they only get moved to the
+-		 * back by the airtime DRR scheduler once they have a negative
+-		 * deficit. A station that already has a negative deficit will
+-		 * get immediately moved to the back of the list on the next
+-		 * call to ieee80211_next_txq().
+-		 */
+-		if (txqi->txq.sta && local->airtime_flags &&
+-		    wiphy_ext_feature_isset(local->hw.wiphy,
+-					    NL80211_EXT_FEATURE_AIRTIME_FAIRNESS))
+-			list_add(&txqi->schedule_order,
+-				 &local->active_txqs[txq->ac]);
+-		else
+-			list_add_tail(&txqi->schedule_order,
+-				      &local->active_txqs[txq->ac]);
++	lockdep_assert_held(&air_sched->lock);
++
++	if (purge) {
++		list_del_init(&air_info->list);
++		ieee80211_update_airtime_weight(local, air_sched, 0, true);
+ 	}
+ 
+-	spin_unlock_bh(&local->active_txq_lock[txq->ac]);
++	if (RB_EMPTY_NODE(&txqi->schedule_order))
++		return;
++
++	if (air_sched->schedule_pos == &txqi->schedule_order)
++		air_sched->schedule_pos = rb_prev(&txqi->schedule_order);
++
++	if (!purge)
++		airtime_set_active(air_sched, air_info,
++				   ktime_get_boottime_ns());
++
++	rb_erase_cached(&txqi->schedule_order,
++			&air_sched->active_txqs);
++	RB_CLEAR_NODE(&txqi->schedule_order);
++}
++
++void ieee80211_unschedule_txq(struct ieee80211_hw *hw,
++			      struct ieee80211_txq *txq,
++			      bool purge)
++	__acquires(txq_lock) __releases(txq_lock)
++{
++	struct ieee80211_local *local = hw_to_local(hw);
++
++	spin_lock_bh(&local->airtime[txq->ac].lock);
++	__ieee80211_unschedule_txq(hw, txq, purge);
++	spin_unlock_bh(&local->airtime[txq->ac].lock);
++}
++
++void ieee80211_return_txq(struct ieee80211_hw *hw,
++			  struct ieee80211_txq *txq, bool force)
++{
++	struct ieee80211_local *local = hw_to_local(hw);
++	struct txq_info *txqi = to_txq_info(txq);
++
++	spin_lock_bh(&local->airtime[txq->ac].lock);
++
++	if (!RB_EMPTY_NODE(&txqi->schedule_order) && !force &&
++	    !txq_has_queue(txq))
++		__ieee80211_unschedule_txq(hw, txq, false);
++
++	spin_unlock_bh(&local->airtime[txq->ac].lock);
+ }
+-EXPORT_SYMBOL(__ieee80211_schedule_txq);
++EXPORT_SYMBOL(ieee80211_return_txq);
+ 
+ DEFINE_STATIC_KEY_FALSE(aql_disable);
+ 
+ bool ieee80211_txq_airtime_check(struct ieee80211_hw *hw,
+ 				 struct ieee80211_txq *txq)
+ {
+-	struct sta_info *sta;
++	struct airtime_info *air_info = to_airtime_info(txq);
+ 	struct ieee80211_local *local = hw_to_local(hw);
+ 
+ 	if (!wiphy_ext_feature_isset(local->hw.wiphy, NL80211_EXT_FEATURE_AQL))
+@@ -3929,15 +4085,12 @@ bool ieee80211_txq_airtime_check(struct
+ 	if (unlikely(txq->tid == IEEE80211_NUM_TIDS))
+ 		return true;
+ 
+-	sta = container_of(txq->sta, struct sta_info, sta);
+-	if (atomic_read(&sta->airtime[txq->ac].aql_tx_pending) <
+-	    sta->airtime[txq->ac].aql_limit_low)
++	if (atomic_read(&air_info->aql_tx_pending) < air_info->aql_limit_low)
+ 		return true;
+ 
+ 	if (atomic_read(&local->aql_total_pending_airtime) <
+ 	    local->aql_threshold &&
+-	    atomic_read(&sta->airtime[txq->ac].aql_tx_pending) <
+-	    sta->airtime[txq->ac].aql_limit_high)
++	    atomic_read(&air_info->aql_tx_pending) < air_info->aql_limit_high)
+ 		return true;
+ 
+ 	return false;
+@@ -3947,60 +4100,59 @@ EXPORT_SYMBOL(ieee80211_txq_airtime_chec
+ bool ieee80211_txq_may_transmit(struct ieee80211_hw *hw,
+ 				struct ieee80211_txq *txq)
+ {
++	struct txq_info *first_txqi = NULL, *txqi = to_txq_info(txq);
+ 	struct ieee80211_local *local = hw_to_local(hw);
+-	struct txq_info *iter, *tmp, *txqi = to_txq_info(txq);
+-	struct sta_info *sta;
+-	u8 ac = txq->ac;
++	struct airtime_sched_info *air_sched;
++	struct airtime_info *air_info;
++	struct rb_node *node = NULL;
++	bool ret = false;
++	u64 now;
+ 
+-	spin_lock_bh(&local->active_txq_lock[ac]);
+ 
+-	if (!txqi->txq.sta)
+-		goto out;
++	if (!ieee80211_txq_airtime_check(hw, txq))
++		return false;
++
++	air_sched = &local->airtime[txq->ac];
++	spin_lock_bh(&air_sched->lock);
+ 
+-	if (list_empty(&txqi->schedule_order))
++	if (RB_EMPTY_NODE(&txqi->schedule_order))
+ 		goto out;
+ 
+-	list_for_each_entry_safe(iter, tmp, &local->active_txqs[ac],
+-				 schedule_order) {
+-		if (iter == txqi)
+-			break;
++	now = ktime_get_boottime_ns();
+ 
+-		if (!iter->txq.sta) {
+-			list_move_tail(&iter->schedule_order,
+-				       &local->active_txqs[ac]);
+-			continue;
+-		}
+-		sta = container_of(iter->txq.sta, struct sta_info, sta);
+-		if (sta->airtime[ac].deficit < 0)
+-			sta->airtime[ac].deficit += sta->airtime_weight;
+-		list_move_tail(&iter->schedule_order, &local->active_txqs[ac]);
++	/* Like in ieee80211_next_txq(), make sure the first station in the
++	 * scheduling order is eligible for transmission to avoid starvation.
++	 */
++	node = rb_first_cached(&air_sched->active_txqs);
++	if (node) {
++		first_txqi = container_of(node, struct txq_info,
++					  schedule_order);
++		air_info = to_airtime_info(&first_txqi->txq);
++
++		if (air_sched->v_t < air_info->v_t)
++			airtime_catchup_v_t(air_sched, air_info->v_t, now);
+ 	}
+ 
+-	sta = container_of(txqi->txq.sta, struct sta_info, sta);
+-	if (sta->airtime[ac].deficit >= 0)
+-		goto out;
+-
+-	sta->airtime[ac].deficit += sta->airtime_weight;
+-	list_move_tail(&txqi->schedule_order, &local->active_txqs[ac]);
+-	spin_unlock_bh(&local->active_txq_lock[ac]);
++	air_info = to_airtime_info(&txqi->txq);
++	if (air_info->v_t <= air_sched->v_t) {
++		air_sched->last_schedule_activity = now;
++		ret = true;
++	}
+ 
+-	return false;
+ out:
+-	if (!list_empty(&txqi->schedule_order))
+-		list_del_init(&txqi->schedule_order);
+-	spin_unlock_bh(&local->active_txq_lock[ac]);
+-
+-	return true;
++	spin_unlock_bh(&air_sched->lock);
++	return ret;
+ }
+ EXPORT_SYMBOL(ieee80211_txq_may_transmit);
+ 
+ void ieee80211_txq_schedule_start(struct ieee80211_hw *hw, u8 ac)
+ {
+ 	struct ieee80211_local *local = hw_to_local(hw);
++	struct airtime_sched_info *air_sched = &local->airtime[ac];
+ 
+-	spin_lock_bh(&local->active_txq_lock[ac]);
+-	local->schedule_round[ac]++;
+-	spin_unlock_bh(&local->active_txq_lock[ac]);
++	spin_lock_bh(&air_sched->lock);
++	air_sched->schedule_pos = NULL;
++	spin_unlock_bh(&air_sched->lock);
+ }
+ EXPORT_SYMBOL(ieee80211_txq_schedule_start);
+ 
diff --git a/package/kernel/mac80211/patches/subsys/500-mac80211_configure_antenna_gain.patch b/package/kernel/mac80211/patches/subsys/500-mac80211_configure_antenna_gain.patch
index 406c3a2ee3..cff870fcd6 100644
--- a/package/kernel/mac80211/patches/subsys/500-mac80211_configure_antenna_gain.patch
+++ b/package/kernel/mac80211/patches/subsys/500-mac80211_configure_antenna_gain.patch
@@ -57,7 +57,7 @@
  	__NL80211_ATTR_AFTER_LAST,
 --- a/net/mac80211/cfg.c
 +++ b/net/mac80211/cfg.c
-@@ -2728,6 +2728,19 @@ static int ieee80211_get_tx_power(struct
+@@ -2761,6 +2761,19 @@ static int ieee80211_get_tx_power(struct
  	return 0;
  }
  
@@ -77,7 +77,7 @@
  static int ieee80211_set_wds_peer(struct wiphy *wiphy, struct net_device *dev,
  				  const u8 *addr)
  {
-@@ -4158,6 +4171,7 @@ const struct cfg80211_ops mac80211_confi
+@@ -4191,6 +4204,7 @@ const struct cfg80211_ops mac80211_confi
  	.set_wiphy_params = ieee80211_set_wiphy_params,
  	.set_tx_power = ieee80211_set_tx_power,
  	.get_tx_power = ieee80211_get_tx_power,
@@ -87,7 +87,7 @@
  	CFG80211_TESTMODE_CMD(ieee80211_testmode_cmd)
 --- a/net/mac80211/ieee80211_i.h
 +++ b/net/mac80211/ieee80211_i.h
-@@ -1390,6 +1390,7 @@ struct ieee80211_local {
+@@ -1426,6 +1426,7 @@ struct ieee80211_local {
  	int dynamic_ps_forced_timeout;
  
  	int user_power_level; /* in dBm, for all interfaces */
-- 
2.30.2