475715a2f9ffda0f809f42f6a8b0f97def957498
[openwrt/staging/thess.git] /
1 From: =?UTF-8?q?Toke=20H=C3=B8iland-J=C3=B8rgensen?= <toke@redhat.com>
2 Date: Wed, 23 Jun 2021 15:47:55 +0200
3 Subject: [PATCH] mac80211: Switch to a virtual time-based airtime scheduler
4 MIME-Version: 1.0
5 Content-Type: text/plain; charset=UTF-8
6 Content-Transfer-Encoding: 8bit
7
8 This switches the airtime scheduler in mac80211 to use a virtual
9 time-based scheduler instead of the round-robin scheduler used before.
10 This has a couple of advantages:
11
12 - No need to sync up the round-robin scheduler in firmware/hardware with
13 the round-robin airtime scheduler.
14
15 - If several stations are eligible for transmission we can schedule both
16 of them; no need to hard-block the scheduling rotation until the head
17 of the queue has used up its quantum.
18
19 - The check of whether a station is eligible for transmission becomes
20 simpler (in ieee80211_txq_may_transmit()).
21
22 The drawback is that scheduling becomes slightly more expensive, as we
23 need to maintain an rbtree of TXQs sorted by virtual time. This means
24 that ieee80211_register_airtime() becomes O(logN) in the number of
25 currently scheduled TXQs because it can change the order of the
26 scheduled stations. We mitigate this overhead by only resorting when a
27 station changes position in the tree, and hopefully N rarely grows too
28 big (it's only TXQs currently backlogged, not all associated stations),
29 so it shouldn't be too big of an issue.
30
31 To prevent divisions in the fast path, we maintain both station sums and
32 pre-computed reciprocals of the sums. This turns the fast-path operation
33 into a multiplication, with divisions only happening as the number of
34 active stations change (to re-compute the current sum of all active
35 station weights). To prevent this re-computation of the reciprocal from
36 happening too frequently, we use a time-based notion of station
37 activity, instead of updating the weight every time a station gets
38 scheduled or de-scheduled. As queues can oscillate between empty and
39 occupied quite frequently, this can significantly cut down on the number
40 of re-computations. It also has the added benefit of making the station
41 airtime calculation independent on whether the queue happened to have
42 drained at the time an airtime value was accounted.
43
44 Co-developed-by: Yibo Zhao <yiboz@codeaurora.org>
45 Signed-off-by: Yibo Zhao <yiboz@codeaurora.org>
46 Signed-off-by: Toke Høiland-Jørgensen <toke@redhat.com>
47 Link: https://lore.kernel.org/r/20210623134755.235545-1-toke@redhat.com
48 Signed-off-by: Johannes Berg <johannes.berg@intel.com>
49 ---
50
51 --- a/include/net/mac80211.h
52 +++ b/include/net/mac80211.h
53 @@ -6578,9 +6578,6 @@ static inline void ieee80211_txq_schedul
54 {
55 }
56
57 -void __ieee80211_schedule_txq(struct ieee80211_hw *hw,
58 - struct ieee80211_txq *txq, bool force);
59 -
60 /**
61 * ieee80211_schedule_txq - schedule a TXQ for transmission
62 *
63 @@ -6593,11 +6590,7 @@ void __ieee80211_schedule_txq(struct iee
64 * The driver may call this function if it has buffered packets for
65 * this TXQ internally.
66 */
67 -static inline void
68 -ieee80211_schedule_txq(struct ieee80211_hw *hw, struct ieee80211_txq *txq)
69 -{
70 - __ieee80211_schedule_txq(hw, txq, true);
71 -}
72 +void ieee80211_schedule_txq(struct ieee80211_hw *hw, struct ieee80211_txq *txq);
73
74 /**
75 * ieee80211_return_txq - return a TXQ previously acquired by ieee80211_next_txq()
76 @@ -6609,12 +6602,8 @@ ieee80211_schedule_txq(struct ieee80211_
77 * The driver may set force=true if it has buffered packets for this TXQ
78 * internally.
79 */
80 -static inline void
81 -ieee80211_return_txq(struct ieee80211_hw *hw, struct ieee80211_txq *txq,
82 - bool force)
83 -{
84 - __ieee80211_schedule_txq(hw, txq, force);
85 -}
86 +void ieee80211_return_txq(struct ieee80211_hw *hw, struct ieee80211_txq *txq,
87 + bool force);
88
89 /**
90 * ieee80211_txq_may_transmit - check whether TXQ is allowed to transmit
91 --- a/net/mac80211/cfg.c
92 +++ b/net/mac80211/cfg.c
93 @@ -1441,6 +1441,38 @@ static void sta_apply_mesh_params(struct
94 #endif
95 }
96
97 +static void sta_apply_airtime_params(struct ieee80211_local *local,
98 + struct sta_info *sta,
99 + struct station_parameters *params)
100 +{
101 + u8 ac;
102 +
103 + for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) {
104 + struct airtime_sched_info *air_sched = &local->airtime[ac];
105 + struct airtime_info *air_info = &sta->airtime[ac];
106 + struct txq_info *txqi;
107 + u8 tid;
108 +
109 + spin_lock_bh(&air_sched->lock);
110 + for (tid = 0; tid < IEEE80211_NUM_TIDS + 1; tid++) {
111 + if (air_info->weight == params->airtime_weight ||
112 + !sta->sta.txq[tid] ||
113 + ac != ieee80211_ac_from_tid(tid))
114 + continue;
115 +
116 + airtime_weight_set(air_info, params->airtime_weight);
117 +
118 + txqi = to_txq_info(sta->sta.txq[tid]);
119 + if (RB_EMPTY_NODE(&txqi->schedule_order))
120 + continue;
121 +
122 + ieee80211_update_airtime_weight(local, air_sched,
123 + 0, true);
124 + }
125 + spin_unlock_bh(&air_sched->lock);
126 + }
127 +}
128 +
129 static int sta_apply_parameters(struct ieee80211_local *local,
130 struct sta_info *sta,
131 struct station_parameters *params)
132 @@ -1628,7 +1660,8 @@ static int sta_apply_parameters(struct i
133 sta_apply_mesh_params(local, sta, params);
134
135 if (params->airtime_weight)
136 - sta->airtime_weight = params->airtime_weight;
137 + sta_apply_airtime_params(local, sta, params);
138 +
139
140 /* set the STA state after all sta info from usermode has been set */
141 if (test_sta_flag(sta, WLAN_STA_TDLS_PEER) ||
142 --- a/net/mac80211/debugfs.c
143 +++ b/net/mac80211/debugfs.c
144 @@ -216,14 +216,14 @@ static ssize_t aql_txq_limit_read(struct
145 "VI %u %u\n"
146 "BE %u %u\n"
147 "BK %u %u\n",
148 - local->aql_txq_limit_low[IEEE80211_AC_VO],
149 - local->aql_txq_limit_high[IEEE80211_AC_VO],
150 - local->aql_txq_limit_low[IEEE80211_AC_VI],
151 - local->aql_txq_limit_high[IEEE80211_AC_VI],
152 - local->aql_txq_limit_low[IEEE80211_AC_BE],
153 - local->aql_txq_limit_high[IEEE80211_AC_BE],
154 - local->aql_txq_limit_low[IEEE80211_AC_BK],
155 - local->aql_txq_limit_high[IEEE80211_AC_BK]);
156 + local->airtime[IEEE80211_AC_VO].aql_txq_limit_low,
157 + local->airtime[IEEE80211_AC_VO].aql_txq_limit_high,
158 + local->airtime[IEEE80211_AC_VI].aql_txq_limit_low,
159 + local->airtime[IEEE80211_AC_VI].aql_txq_limit_high,
160 + local->airtime[IEEE80211_AC_BE].aql_txq_limit_low,
161 + local->airtime[IEEE80211_AC_BE].aql_txq_limit_high,
162 + local->airtime[IEEE80211_AC_BK].aql_txq_limit_low,
163 + local->airtime[IEEE80211_AC_BK].aql_txq_limit_high);
164 return simple_read_from_buffer(user_buf, count, ppos,
165 buf, len);
166 }
167 @@ -255,11 +255,11 @@ static ssize_t aql_txq_limit_write(struc
168 if (ac >= IEEE80211_NUM_ACS)
169 return -EINVAL;
170
171 - q_limit_low_old = local->aql_txq_limit_low[ac];
172 - q_limit_high_old = local->aql_txq_limit_high[ac];
173 + q_limit_low_old = local->airtime[ac].aql_txq_limit_low;
174 + q_limit_high_old = local->airtime[ac].aql_txq_limit_high;
175
176 - local->aql_txq_limit_low[ac] = q_limit_low;
177 - local->aql_txq_limit_high[ac] = q_limit_high;
178 + local->airtime[ac].aql_txq_limit_low = q_limit_low;
179 + local->airtime[ac].aql_txq_limit_high = q_limit_high;
180
181 mutex_lock(&local->sta_mtx);
182 list_for_each_entry(sta, &local->sta_list, list) {
183 @@ -382,6 +382,46 @@ static const struct file_operations forc
184 .llseek = default_llseek,
185 };
186
187 +static ssize_t airtime_read(struct file *file,
188 + char __user *user_buf,
189 + size_t count,
190 + loff_t *ppos)
191 +{
192 + struct ieee80211_local *local = file->private_data;
193 + char buf[200];
194 + u64 v_t[IEEE80211_NUM_ACS];
195 + u64 wt[IEEE80211_NUM_ACS];
196 + int len = 0, ac;
197 +
198 + for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) {
199 + spin_lock_bh(&local->airtime[ac].lock);
200 + v_t[ac] = local->airtime[ac].v_t;
201 + wt[ac] = local->airtime[ac].weight_sum;
202 + spin_unlock_bh(&local->airtime[ac].lock);
203 + }
204 + len = scnprintf(buf, sizeof(buf),
205 + "\tVO VI BE BK\n"
206 + "Virt-t\t%-10llu %-10llu %-10llu %-10llu\n"
207 + "Weight\t%-10llu %-10llu %-10llu %-10llu\n",
208 + v_t[0],
209 + v_t[1],
210 + v_t[2],
211 + v_t[3],
212 + wt[0],
213 + wt[1],
214 + wt[2],
215 + wt[3]);
216 +
217 + return simple_read_from_buffer(user_buf, count, ppos,
218 + buf, len);
219 +}
220 +
221 +static const struct file_operations airtime_ops = {
222 + .read = airtime_read,
223 + .open = simple_open,
224 + .llseek = default_llseek,
225 +};
226 +
227 #ifdef CONFIG_PM
228 static ssize_t reset_write(struct file *file, const char __user *user_buf,
229 size_t count, loff_t *ppos)
230 @@ -624,7 +664,11 @@ void debugfs_hw_add(struct ieee80211_loc
231 if (local->ops->wake_tx_queue)
232 DEBUGFS_ADD_MODE(aqm, 0600);
233
234 - DEBUGFS_ADD_MODE(airtime_flags, 0600);
235 + if (wiphy_ext_feature_isset(local->hw.wiphy,
236 + NL80211_EXT_FEATURE_AIRTIME_FAIRNESS)) {
237 + DEBUGFS_ADD_MODE(airtime, 0600);
238 + DEBUGFS_ADD_MODE(airtime_flags, 0600);
239 + }
240
241 DEBUGFS_ADD(aql_txq_limit);
242 debugfs_create_u32("aql_threshold", 0600,
243 --- a/net/mac80211/debugfs_netdev.c
244 +++ b/net/mac80211/debugfs_netdev.c
245 @@ -513,6 +513,34 @@ static ssize_t ieee80211_if_fmt_aqm(
246 }
247 IEEE80211_IF_FILE_R(aqm);
248
249 +static ssize_t ieee80211_if_fmt_airtime(
250 + const struct ieee80211_sub_if_data *sdata, char *buf, int buflen)
251 +{
252 + struct ieee80211_local *local = sdata->local;
253 + struct ieee80211_txq *txq = sdata->vif.txq;
254 + struct airtime_info *air_info;
255 + int len;
256 +
257 + if (!txq)
258 + return 0;
259 +
260 + spin_lock_bh(&local->airtime[txq->ac].lock);
261 + air_info = to_airtime_info(txq);
262 + len = scnprintf(buf,
263 + buflen,
264 + "RX: %llu us\nTX: %llu us\nWeight: %u\n"
265 + "Virt-T: %lld us\n",
266 + air_info->rx_airtime,
267 + air_info->tx_airtime,
268 + air_info->weight,
269 + air_info->v_t);
270 + spin_unlock_bh(&local->airtime[txq->ac].lock);
271 +
272 + return len;
273 +}
274 +
275 +IEEE80211_IF_FILE_R(airtime);
276 +
277 IEEE80211_IF_FILE(multicast_to_unicast, u.ap.multicast_to_unicast, HEX);
278
279 /* IBSS attributes */
280 @@ -658,8 +686,10 @@ static void add_common_files(struct ieee
281
282 if (sdata->local->ops->wake_tx_queue &&
283 sdata->vif.type != NL80211_IFTYPE_P2P_DEVICE &&
284 - sdata->vif.type != NL80211_IFTYPE_NAN)
285 + sdata->vif.type != NL80211_IFTYPE_NAN) {
286 DEBUGFS_ADD(aqm);
287 + DEBUGFS_ADD(airtime);
288 + }
289 }
290
291 static void add_sta_files(struct ieee80211_sub_if_data *sdata)
292 --- a/net/mac80211/debugfs_sta.c
293 +++ b/net/mac80211/debugfs_sta.c
294 @@ -202,7 +202,7 @@ static ssize_t sta_airtime_read(struct f
295 size_t bufsz = 400;
296 char *buf = kzalloc(bufsz, GFP_KERNEL), *p = buf;
297 u64 rx_airtime = 0, tx_airtime = 0;
298 - s64 deficit[IEEE80211_NUM_ACS];
299 + u64 v_t[IEEE80211_NUM_ACS];
300 ssize_t rv;
301 int ac;
302
303 @@ -210,18 +210,18 @@ static ssize_t sta_airtime_read(struct f
304 return -ENOMEM;
305
306 for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) {
307 - spin_lock_bh(&local->active_txq_lock[ac]);
308 + spin_lock_bh(&local->airtime[ac].lock);
309 rx_airtime += sta->airtime[ac].rx_airtime;
310 tx_airtime += sta->airtime[ac].tx_airtime;
311 - deficit[ac] = sta->airtime[ac].deficit;
312 - spin_unlock_bh(&local->active_txq_lock[ac]);
313 + v_t[ac] = sta->airtime[ac].v_t;
314 + spin_unlock_bh(&local->airtime[ac].lock);
315 }
316
317 p += scnprintf(p, bufsz + buf - p,
318 "RX: %llu us\nTX: %llu us\nWeight: %u\n"
319 - "Deficit: VO: %lld us VI: %lld us BE: %lld us BK: %lld us\n",
320 - rx_airtime, tx_airtime, sta->airtime_weight,
321 - deficit[0], deficit[1], deficit[2], deficit[3]);
322 + "Virt-T: VO: %lld us VI: %lld us BE: %lld us BK: %lld us\n",
323 + rx_airtime, tx_airtime, sta->airtime[0].weight,
324 + v_t[0], v_t[1], v_t[2], v_t[3]);
325
326 rv = simple_read_from_buffer(userbuf, count, ppos, buf, p - buf);
327 kfree(buf);
328 @@ -236,11 +236,11 @@ static ssize_t sta_airtime_write(struct
329 int ac;
330
331 for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) {
332 - spin_lock_bh(&local->active_txq_lock[ac]);
333 + spin_lock_bh(&local->airtime[ac].lock);
334 sta->airtime[ac].rx_airtime = 0;
335 sta->airtime[ac].tx_airtime = 0;
336 - sta->airtime[ac].deficit = sta->airtime_weight;
337 - spin_unlock_bh(&local->active_txq_lock[ac]);
338 + sta->airtime[ac].v_t = 0;
339 + spin_unlock_bh(&local->airtime[ac].lock);
340 }
341
342 return count;
343 @@ -263,10 +263,10 @@ static ssize_t sta_aql_read(struct file
344 return -ENOMEM;
345
346 for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) {
347 - spin_lock_bh(&local->active_txq_lock[ac]);
348 + spin_lock_bh(&local->airtime[ac].lock);
349 q_limit_l[ac] = sta->airtime[ac].aql_limit_low;
350 q_limit_h[ac] = sta->airtime[ac].aql_limit_high;
351 - spin_unlock_bh(&local->active_txq_lock[ac]);
352 + spin_unlock_bh(&local->airtime[ac].lock);
353 q_depth[ac] = atomic_read(&sta->airtime[ac].aql_tx_pending);
354 }
355
356 --- a/net/mac80211/ieee80211_i.h
357 +++ b/net/mac80211/ieee80211_i.h
358 @@ -846,20 +846,16 @@ enum txq_info_flags {
359 * @def_flow: used as a fallback flow when a packet destined to @tin hashes to
360 * a fq_flow which is already owned by a different tin
361 * @def_cvars: codel vars for @def_flow
362 - * @frags: used to keep fragments created after dequeue
363 * @schedule_order: used with ieee80211_local->active_txqs
364 - * @schedule_round: counter to prevent infinite loops on TXQ scheduling
365 + * @frags: used to keep fragments created after dequeue
366 */
367 struct txq_info {
368 struct fq_tin tin;
369 struct codel_vars def_cvars;
370 struct codel_stats cstats;
371 -
372 - u16 schedule_round;
373 - struct list_head schedule_order;
374 + struct rb_node schedule_order;
375
376 struct sk_buff_head frags;
377 -
378 unsigned long flags;
379
380 /* keep last! */
381 @@ -938,6 +934,8 @@ struct ieee80211_sub_if_data {
382 struct ieee80211_tx_queue_params tx_conf[IEEE80211_NUM_ACS];
383 struct mac80211_qos_map __rcu *qos_map;
384
385 + struct airtime_info airtime[IEEE80211_NUM_ACS];
386 +
387 struct work_struct csa_finalize_work;
388 bool csa_block_tx; /* write-protected by sdata_lock and local->mtx */
389 struct cfg80211_chan_def csa_chandef;
390 @@ -1150,6 +1148,44 @@ enum mac80211_scan_state {
391 SCAN_ABORT,
392 };
393
394 +/**
395 + * struct airtime_sched_info - state used for airtime scheduling and AQL
396 + *
397 + * @lock: spinlock that protects all the fields in this struct
398 + * @active_txqs: rbtree of currently backlogged queues, sorted by virtual time
399 + * @schedule_pos: the current position maintained while a driver walks the tree
400 + * with ieee80211_next_txq()
401 + * @active_list: list of struct airtime_info structs that were active within
402 + * the last AIRTIME_ACTIVE_DURATION (100 ms), used to compute
403 + * weight_sum
404 + * @last_weight_update: used for rate limiting walking active_list
405 + * @last_schedule_time: tracks the last time a transmission was scheduled; used
406 + * for catching up v_t if no stations are eligible for
407 + * transmission.
408 + * @v_t: global virtual time; queues with v_t < this are eligible for
409 + * transmission
410 + * @weight_sum: total sum of all active stations used for dividing airtime
411 + * @weight_sum_reciprocal: reciprocal of weight_sum (to avoid divisions in fast
412 + * path - see comment above
413 + * IEEE80211_RECIPROCAL_DIVISOR_64)
414 + * @aql_txq_limit_low: AQL limit when total outstanding airtime
415 + * is < IEEE80211_AQL_THRESHOLD
416 + * @aql_txq_limit_high: AQL limit when total outstanding airtime
417 + * is > IEEE80211_AQL_THRESHOLD
418 + */
419 +struct airtime_sched_info {
420 + spinlock_t lock;
421 + struct rb_root_cached active_txqs;
422 + struct rb_node *schedule_pos;
423 + struct list_head active_list;
424 + u64 last_weight_update;
425 + u64 last_schedule_activity;
426 + u64 v_t;
427 + u64 weight_sum;
428 + u64 weight_sum_reciprocal;
429 + u32 aql_txq_limit_low;
430 + u32 aql_txq_limit_high;
431 +};
432 DECLARE_STATIC_KEY_FALSE(aql_disable);
433
434 struct ieee80211_local {
435 @@ -1163,13 +1199,8 @@ struct ieee80211_local {
436 struct codel_params cparams;
437
438 /* protects active_txqs and txqi->schedule_order */
439 - spinlock_t active_txq_lock[IEEE80211_NUM_ACS];
440 - struct list_head active_txqs[IEEE80211_NUM_ACS];
441 - u16 schedule_round[IEEE80211_NUM_ACS];
442 -
443 + struct airtime_sched_info airtime[IEEE80211_NUM_ACS];
444 u16 airtime_flags;
445 - u32 aql_txq_limit_low[IEEE80211_NUM_ACS];
446 - u32 aql_txq_limit_high[IEEE80211_NUM_ACS];
447 u32 aql_threshold;
448 atomic_t aql_total_pending_airtime;
449
450 @@ -1587,6 +1618,125 @@ static inline bool txq_has_queue(struct
451 return !(skb_queue_empty(&txqi->frags) && !txqi->tin.backlog_packets);
452 }
453
454 +static inline struct airtime_info *to_airtime_info(struct ieee80211_txq *txq)
455 +{
456 + struct ieee80211_sub_if_data *sdata;
457 + struct sta_info *sta;
458 +
459 + if (txq->sta) {
460 + sta = container_of(txq->sta, struct sta_info, sta);
461 + return &sta->airtime[txq->ac];
462 + }
463 +
464 + sdata = vif_to_sdata(txq->vif);
465 + return &sdata->airtime[txq->ac];
466 +}
467 +
468 +/* To avoid divisions in the fast path, we keep pre-computed reciprocals for
469 + * airtime weight calculations. There are two different weights to keep track
470 + * of: The per-station weight and the sum of weights per phy.
471 + *
472 + * For the per-station weights (kept in airtime_info below), we use 32-bit
473 + * reciprocals with a devisor of 2^19. This lets us keep the multiplications and
474 + * divisions for the station weights as 32-bit operations at the cost of a bit
475 + * of rounding error for high weights; but the choice of divisor keeps rounding
476 + * errors <10% for weights <2^15, assuming no more than 8ms of airtime is
477 + * reported at a time.
478 + *
479 + * For the per-phy sum of weights the values can get higher, so we use 64-bit
480 + * operations for those with a 32-bit divisor, which should avoid any
481 + * significant rounding errors.
482 + */
483 +#define IEEE80211_RECIPROCAL_DIVISOR_64 0x100000000ULL
484 +#define IEEE80211_RECIPROCAL_SHIFT_64 32
485 +#define IEEE80211_RECIPROCAL_DIVISOR_32 0x80000U
486 +#define IEEE80211_RECIPROCAL_SHIFT_32 19
487 +
488 +static inline void airtime_weight_set(struct airtime_info *air_info, u16 weight)
489 +{
490 + if (air_info->weight == weight)
491 + return;
492 +
493 + air_info->weight = weight;
494 + if (weight) {
495 + air_info->weight_reciprocal =
496 + IEEE80211_RECIPROCAL_DIVISOR_32 / weight;
497 + } else {
498 + air_info->weight_reciprocal = 0;
499 + }
500 +}
501 +
502 +static inline void airtime_weight_sum_set(struct airtime_sched_info *air_sched,
503 + int weight_sum)
504 +{
505 + if (air_sched->weight_sum == weight_sum)
506 + return;
507 +
508 + air_sched->weight_sum = weight_sum;
509 + if (air_sched->weight_sum) {
510 + air_sched->weight_sum_reciprocal = IEEE80211_RECIPROCAL_DIVISOR_64;
511 + do_div(air_sched->weight_sum_reciprocal, air_sched->weight_sum);
512 + } else {
513 + air_sched->weight_sum_reciprocal = 0;
514 + }
515 +}
516 +
517 +/* A problem when trying to enforce airtime fairness is that we want to divide
518 + * the airtime between the currently *active* stations. However, basing this on
519 + * the instantaneous queue state of stations doesn't work, as queues tend to
520 + * oscillate very quickly between empty and occupied, leading to the scheduler
521 + * thinking only a single station is active when deciding whether to allow
522 + * transmission (and thus not throttling correctly).
523 + *
524 + * To fix this we use a timer-based notion of activity: a station is considered
525 + * active if it has been scheduled within the last 100 ms; we keep a separate
526 + * list of all the stations considered active in this manner, and lazily update
527 + * the total weight of active stations from this list (filtering the stations in
528 + * the list by their 'last active' time).
529 + *
530 + * We add one additional safeguard to guard against stations that manage to get
531 + * scheduled every 100 ms but don't transmit a lot of data, and thus don't use
532 + * up any airtime. Such stations would be able to get priority for an extended
533 + * period of time if they do start transmitting at full capacity again, and so
534 + * we add an explicit maximum for how far behind a station is allowed to fall in
535 + * the virtual airtime domain. This limit is set to a relatively high value of
536 + * 20 ms because the main mechanism for catching up idle stations is the active
537 + * state as described above; i.e., the hard limit should only be hit in
538 + * pathological cases.
539 + */
540 +#define AIRTIME_ACTIVE_DURATION (100 * NSEC_PER_MSEC)
541 +#define AIRTIME_MAX_BEHIND 20000 /* 20 ms */
542 +
543 +static inline bool airtime_is_active(struct airtime_info *air_info, u64 now)
544 +{
545 + return air_info->last_scheduled >= now - AIRTIME_ACTIVE_DURATION;
546 +}
547 +
548 +static inline void airtime_set_active(struct airtime_sched_info *air_sched,
549 + struct airtime_info *air_info, u64 now)
550 +{
551 + air_info->last_scheduled = now;
552 + air_sched->last_schedule_activity = now;
553 + list_move_tail(&air_info->list, &air_sched->active_list);
554 +}
555 +
556 +static inline bool airtime_catchup_v_t(struct airtime_sched_info *air_sched,
557 + u64 v_t, u64 now)
558 +{
559 + air_sched->v_t = v_t;
560 + return true;
561 +}
562 +
563 +static inline void init_airtime_info(struct airtime_info *air_info,
564 + struct airtime_sched_info *air_sched)
565 +{
566 + atomic_set(&air_info->aql_tx_pending, 0);
567 + air_info->aql_limit_low = air_sched->aql_txq_limit_low;
568 + air_info->aql_limit_high = air_sched->aql_txq_limit_high;
569 + airtime_weight_set(air_info, IEEE80211_DEFAULT_AIRTIME_WEIGHT);
570 + INIT_LIST_HEAD(&air_info->list);
571 +}
572 +
573 static inline int ieee80211_bssid_match(const u8 *raddr, const u8 *addr)
574 {
575 return ether_addr_equal(raddr, addr) ||
576 @@ -1827,6 +1977,14 @@ int ieee80211_tx_control_port(struct wip
577 u64 *cookie);
578 int ieee80211_probe_mesh_link(struct wiphy *wiphy, struct net_device *dev,
579 const u8 *buf, size_t len);
580 +void ieee80211_resort_txq(struct ieee80211_hw *hw,
581 + struct ieee80211_txq *txq);
582 +void ieee80211_unschedule_txq(struct ieee80211_hw *hw,
583 + struct ieee80211_txq *txq,
584 + bool purge);
585 +void ieee80211_update_airtime_weight(struct ieee80211_local *local,
586 + struct airtime_sched_info *air_sched,
587 + u64 now, bool force);
588
589 /* HT */
590 void ieee80211_apply_htcap_overrides(struct ieee80211_sub_if_data *sdata,
591 --- a/net/mac80211/iface.c
592 +++ b/net/mac80211/iface.c
593 @@ -2013,6 +2013,9 @@ int ieee80211_if_add(struct ieee80211_lo
594 }
595 }
596
597 + for (i = 0; i < IEEE80211_NUM_ACS; i++)
598 + init_airtime_info(&sdata->airtime[i], &local->airtime[i]);
599 +
600 ieee80211_set_default_queues(sdata);
601
602 sdata->ap_power_level = IEEE80211_UNSET_POWER_LEVEL;
603 --- a/net/mac80211/main.c
604 +++ b/net/mac80211/main.c
605 @@ -691,10 +691,13 @@ struct ieee80211_hw *ieee80211_alloc_hw_
606 spin_lock_init(&local->queue_stop_reason_lock);
607
608 for (i = 0; i < IEEE80211_NUM_ACS; i++) {
609 - INIT_LIST_HEAD(&local->active_txqs[i]);
610 - spin_lock_init(&local->active_txq_lock[i]);
611 - local->aql_txq_limit_low[i] = IEEE80211_DEFAULT_AQL_TXQ_LIMIT_L;
612 - local->aql_txq_limit_high[i] =
613 + struct airtime_sched_info *air_sched = &local->airtime[i];
614 +
615 + air_sched->active_txqs = RB_ROOT_CACHED;
616 + INIT_LIST_HEAD(&air_sched->active_list);
617 + spin_lock_init(&air_sched->lock);
618 + air_sched->aql_txq_limit_low = IEEE80211_DEFAULT_AQL_TXQ_LIMIT_L;
619 + air_sched->aql_txq_limit_high =
620 IEEE80211_DEFAULT_AQL_TXQ_LIMIT_H;
621 }
622
623 --- a/net/mac80211/rx.c
624 +++ b/net/mac80211/rx.c
625 @@ -1562,12 +1562,8 @@ static void sta_ps_start(struct sta_info
626
627 for (tid = 0; tid < IEEE80211_NUM_TIDS; tid++) {
628 struct ieee80211_txq *txq = sta->sta.txq[tid];
629 - struct txq_info *txqi = to_txq_info(txq);
630
631 - spin_lock(&local->active_txq_lock[txq->ac]);
632 - if (!list_empty(&txqi->schedule_order))
633 - list_del_init(&txqi->schedule_order);
634 - spin_unlock(&local->active_txq_lock[txq->ac]);
635 + ieee80211_unschedule_txq(&local->hw, txq, false);
636
637 if (txq_has_queue(txq))
638 set_bit(tid, &sta->txq_buffered_tids);
639 --- a/net/mac80211/sta_info.c
640 +++ b/net/mac80211/sta_info.c
641 @@ -424,15 +424,11 @@ struct sta_info *sta_info_alloc(struct i
642 if (sta_prepare_rate_control(local, sta, gfp))
643 goto free_txq;
644
645 - sta->airtime_weight = IEEE80211_DEFAULT_AIRTIME_WEIGHT;
646
647 for (i = 0; i < IEEE80211_NUM_ACS; i++) {
648 skb_queue_head_init(&sta->ps_tx_buf[i]);
649 skb_queue_head_init(&sta->tx_filtered[i]);
650 - sta->airtime[i].deficit = sta->airtime_weight;
651 - atomic_set(&sta->airtime[i].aql_tx_pending, 0);
652 - sta->airtime[i].aql_limit_low = local->aql_txq_limit_low[i];
653 - sta->airtime[i].aql_limit_high = local->aql_txq_limit_high[i];
654 + init_airtime_info(&sta->airtime[i], &local->airtime[i]);
655 }
656
657 for (i = 0; i < IEEE80211_NUM_TIDS; i++)
658 @@ -1894,24 +1890,59 @@ void ieee80211_sta_set_buffered(struct i
659 }
660 EXPORT_SYMBOL(ieee80211_sta_set_buffered);
661
662 -void ieee80211_sta_register_airtime(struct ieee80211_sta *pubsta, u8 tid,
663 - u32 tx_airtime, u32 rx_airtime)
664 +void ieee80211_register_airtime(struct ieee80211_txq *txq,
665 + u32 tx_airtime, u32 rx_airtime)
666 {
667 - struct sta_info *sta = container_of(pubsta, struct sta_info, sta);
668 - struct ieee80211_local *local = sta->sdata->local;
669 - u8 ac = ieee80211_ac_from_tid(tid);
670 + struct ieee80211_sub_if_data *sdata = vif_to_sdata(txq->vif);
671 + struct ieee80211_local *local = sdata->local;
672 + u64 weight_sum, weight_sum_reciprocal;
673 + struct airtime_sched_info *air_sched;
674 + struct airtime_info *air_info;
675 u32 airtime = 0;
676
677 - if (sta->local->airtime_flags & AIRTIME_USE_TX)
678 + air_sched = &local->airtime[txq->ac];
679 + air_info = to_airtime_info(txq);
680 +
681 + if (local->airtime_flags & AIRTIME_USE_TX)
682 airtime += tx_airtime;
683 - if (sta->local->airtime_flags & AIRTIME_USE_RX)
684 + if (local->airtime_flags & AIRTIME_USE_RX)
685 airtime += rx_airtime;
686
687 - spin_lock_bh(&local->active_txq_lock[ac]);
688 - sta->airtime[ac].tx_airtime += tx_airtime;
689 - sta->airtime[ac].rx_airtime += rx_airtime;
690 - sta->airtime[ac].deficit -= airtime;
691 - spin_unlock_bh(&local->active_txq_lock[ac]);
692 + /* Weights scale so the unit weight is 256 */
693 + airtime <<= 8;
694 +
695 + spin_lock_bh(&air_sched->lock);
696 +
697 + air_info->tx_airtime += tx_airtime;
698 + air_info->rx_airtime += rx_airtime;
699 +
700 + if (air_sched->weight_sum) {
701 + weight_sum = air_sched->weight_sum;
702 + weight_sum_reciprocal = air_sched->weight_sum_reciprocal;
703 + } else {
704 + weight_sum = air_info->weight;
705 + weight_sum_reciprocal = air_info->weight_reciprocal;
706 + }
707 +
708 + /* Round the calculation of global vt */
709 + air_sched->v_t += (u64)((airtime + (weight_sum >> 1)) *
710 + weight_sum_reciprocal) >> IEEE80211_RECIPROCAL_SHIFT_64;
711 + air_info->v_t += (u32)((airtime + (air_info->weight >> 1)) *
712 + air_info->weight_reciprocal) >> IEEE80211_RECIPROCAL_SHIFT_32;
713 + ieee80211_resort_txq(&local->hw, txq);
714 +
715 + spin_unlock_bh(&air_sched->lock);
716 +}
717 +
718 +void ieee80211_sta_register_airtime(struct ieee80211_sta *pubsta, u8 tid,
719 + u32 tx_airtime, u32 rx_airtime)
720 +{
721 + struct ieee80211_txq *txq = pubsta->txq[tid];
722 +
723 + if (!txq)
724 + return;
725 +
726 + ieee80211_register_airtime(txq, tx_airtime, rx_airtime);
727 }
728 EXPORT_SYMBOL(ieee80211_sta_register_airtime);
729
730 @@ -2360,7 +2391,7 @@ void sta_set_sinfo(struct sta_info *sta,
731 }
732
733 if (!(sinfo->filled & BIT_ULL(NL80211_STA_INFO_AIRTIME_WEIGHT))) {
734 - sinfo->airtime_weight = sta->airtime_weight;
735 + sinfo->airtime_weight = sta->airtime[0].weight;
736 sinfo->filled |= BIT_ULL(NL80211_STA_INFO_AIRTIME_WEIGHT);
737 }
738
739 --- a/net/mac80211/sta_info.h
740 +++ b/net/mac80211/sta_info.h
741 @@ -135,18 +135,25 @@ enum ieee80211_agg_stop_reason {
742 #define AIRTIME_USE_TX BIT(0)
743 #define AIRTIME_USE_RX BIT(1)
744
745 +
746 struct airtime_info {
747 u64 rx_airtime;
748 u64 tx_airtime;
749 - s64 deficit;
750 + u64 v_t;
751 + u64 last_scheduled;
752 + struct list_head list;
753 atomic_t aql_tx_pending; /* Estimated airtime for frames pending */
754 u32 aql_limit_low;
755 u32 aql_limit_high;
756 + u32 weight_reciprocal;
757 + u16 weight;
758 };
759
760 void ieee80211_sta_update_pending_airtime(struct ieee80211_local *local,
761 struct sta_info *sta, u8 ac,
762 u16 tx_airtime, bool tx_completed);
763 +void ieee80211_register_airtime(struct ieee80211_txq *txq,
764 + u32 tx_airtime, u32 rx_airtime);
765
766 struct sta_info;
767
768 @@ -487,7 +494,6 @@ struct ieee80211_sta_rx_stats {
769 * @tid_seq: per-TID sequence numbers for sending to this STA
770 * @airtime: per-AC struct airtime_info describing airtime statistics for this
771 * station
772 - * @airtime_weight: station weight for airtime fairness calculation purposes
773 * @ampdu_mlme: A-MPDU state machine state
774 * @mesh: mesh STA information
775 * @debugfs_dir: debug filesystem directory dentry
776 @@ -617,7 +623,6 @@ struct sta_info {
777 u16 tid_seq[IEEE80211_QOS_CTL_TID_MASK + 1];
778
779 struct airtime_info airtime[IEEE80211_NUM_ACS];
780 - u16 airtime_weight;
781
782 /*
783 * Aggregation information, locked with lock.
784 --- a/net/mac80211/status.c
785 +++ b/net/mac80211/status.c
786 @@ -970,6 +970,25 @@ static void __ieee80211_tx_status(struct
787 if (!(info->flags & IEEE80211_TX_CTL_INJECTED) && acked)
788 ieee80211_frame_acked(sta, skb);
789
790 + } else if (wiphy_ext_feature_isset(local->hw.wiphy,
791 + NL80211_EXT_FEATURE_AIRTIME_FAIRNESS)) {
792 + struct ieee80211_sub_if_data *sdata;
793 + struct ieee80211_txq *txq;
794 + u32 airtime;
795 +
796 + /* Account airtime to multicast queue */
797 + sdata = ieee80211_sdata_from_skb(local, skb);
798 +
799 + if (sdata && (txq = sdata->vif.txq)) {
800 + airtime = info->status.tx_time ?:
801 + ieee80211_calc_expected_tx_airtime(hw,
802 + &sdata->vif,
803 + NULL,
804 + skb->len,
805 + false);
806 +
807 + ieee80211_register_airtime(txq, airtime, 0);
808 + }
809 }
810
811 /* SNMP counters
812 --- a/net/mac80211/tx.c
813 +++ b/net/mac80211/tx.c
814 @@ -18,6 +18,7 @@
815 #include <linux/bitmap.h>
816 #include <linux/rcupdate.h>
817 #include <linux/export.h>
818 +#include <linux/timekeeping.h>
819 #include <net/net_namespace.h>
820 #include <net/ieee80211_radiotap.h>
821 #include <net/cfg80211.h>
822 @@ -1476,7 +1477,7 @@ void ieee80211_txq_init(struct ieee80211
823 codel_vars_init(&txqi->def_cvars);
824 codel_stats_init(&txqi->cstats);
825 __skb_queue_head_init(&txqi->frags);
826 - INIT_LIST_HEAD(&txqi->schedule_order);
827 + RB_CLEAR_NODE(&txqi->schedule_order);
828
829 txqi->txq.vif = &sdata->vif;
830
831 @@ -1520,9 +1521,7 @@ void ieee80211_txq_purge(struct ieee8021
832 ieee80211_purge_tx_queue(&local->hw, &txqi->frags);
833 spin_unlock_bh(&fq->lock);
834
835 - spin_lock_bh(&local->active_txq_lock[txqi->txq.ac]);
836 - list_del_init(&txqi->schedule_order);
837 - spin_unlock_bh(&local->active_txq_lock[txqi->txq.ac]);
838 + ieee80211_unschedule_txq(&local->hw, &txqi->txq, true);
839 }
840
841 void ieee80211_txq_set_params(struct ieee80211_local *local)
842 @@ -3768,102 +3767,259 @@ EXPORT_SYMBOL(ieee80211_tx_dequeue);
843 struct ieee80211_txq *ieee80211_next_txq(struct ieee80211_hw *hw, u8 ac)
844 {
845 struct ieee80211_local *local = hw_to_local(hw);
846 + struct airtime_sched_info *air_sched;
847 + u64 now = ktime_get_boottime_ns();
848 struct ieee80211_txq *ret = NULL;
849 - struct txq_info *txqi = NULL, *head = NULL;
850 - bool found_eligible_txq = false;
851 + struct airtime_info *air_info;
852 + struct txq_info *txqi = NULL;
853 + struct rb_node *node;
854 + bool first = false;
855
856 - spin_lock_bh(&local->active_txq_lock[ac]);
857 + air_sched = &local->airtime[ac];
858 + spin_lock_bh(&air_sched->lock);
859
860 - begin:
861 - txqi = list_first_entry_or_null(&local->active_txqs[ac],
862 - struct txq_info,
863 - schedule_order);
864 - if (!txqi)
865 + node = air_sched->schedule_pos;
866 +
867 +begin:
868 + if (!node) {
869 + node = rb_first_cached(&air_sched->active_txqs);
870 + first = true;
871 + } else {
872 + node = rb_next(node);
873 + }
874 +
875 + if (!node)
876 goto out;
877
878 - if (txqi == head) {
879 - if (!found_eligible_txq)
880 - goto out;
881 - else
882 - found_eligible_txq = false;
883 + txqi = container_of(node, struct txq_info, schedule_order);
884 + air_info = to_airtime_info(&txqi->txq);
885 +
886 + if (air_info->v_t > air_sched->v_t &&
887 + (!first || !airtime_catchup_v_t(air_sched, air_info->v_t, now)))
888 + goto out;
889 +
890 + if (!ieee80211_txq_airtime_check(hw, &txqi->txq)) {
891 + first = false;
892 + goto begin;
893 }
894
895 - if (!head)
896 - head = txqi;
897 + air_sched->schedule_pos = node;
898 + air_sched->last_schedule_activity = now;
899 + ret = &txqi->txq;
900 +out:
901 + spin_unlock_bh(&air_sched->lock);
902 + return ret;
903 +}
904 +EXPORT_SYMBOL(ieee80211_next_txq);
905
906 - if (txqi->txq.sta) {
907 - struct sta_info *sta = container_of(txqi->txq.sta,
908 - struct sta_info, sta);
909 - bool aql_check = ieee80211_txq_airtime_check(hw, &txqi->txq);
910 - s64 deficit = sta->airtime[txqi->txq.ac].deficit;
911 +static void __ieee80211_insert_txq(struct rb_root_cached *root,
912 + struct txq_info *txqi)
913 +{
914 + struct rb_node **new = &root->rb_root.rb_node;
915 + struct airtime_info *old_air, *new_air;
916 + struct rb_node *parent = NULL;
917 + struct txq_info *__txqi;
918 + bool leftmost = true;
919 +
920 + while (*new) {
921 + parent = *new;
922 + __txqi = rb_entry(parent, struct txq_info, schedule_order);
923 + old_air = to_airtime_info(&__txqi->txq);
924 + new_air = to_airtime_info(&txqi->txq);
925
926 - if (aql_check)
927 - found_eligible_txq = true;
928 + if (new_air->v_t <= old_air->v_t) {
929 + new = &parent->rb_left;
930 + } else {
931 + new = &parent->rb_right;
932 + leftmost = false;
933 + }
934 + }
935
936 - if (deficit < 0)
937 - sta->airtime[txqi->txq.ac].deficit +=
938 - sta->airtime_weight;
939 -
940 - if (deficit < 0 || !aql_check) {
941 - list_move_tail(&txqi->schedule_order,
942 - &local->active_txqs[txqi->txq.ac]);
943 - goto begin;
944 + rb_link_node(&txqi->schedule_order, parent, new);
945 + rb_insert_color_cached(&txqi->schedule_order, root, leftmost);
946 +}
947 +
948 +void ieee80211_resort_txq(struct ieee80211_hw *hw,
949 + struct ieee80211_txq *txq)
950 +{
951 + struct airtime_info *air_info = to_airtime_info(txq);
952 + struct ieee80211_local *local = hw_to_local(hw);
953 + struct txq_info *txqi = to_txq_info(txq);
954 + struct airtime_sched_info *air_sched;
955 +
956 + air_sched = &local->airtime[txq->ac];
957 +
958 + lockdep_assert_held(&air_sched->lock);
959 +
960 + if (!RB_EMPTY_NODE(&txqi->schedule_order)) {
961 + struct airtime_info *a_prev = NULL, *a_next = NULL;
962 + struct txq_info *t_prev, *t_next;
963 + struct rb_node *n_prev, *n_next;
964 +
965 + /* Erasing a node can cause an expensive rebalancing operation,
966 + * so we check the previous and next nodes first and only remove
967 + * and re-insert if the current node is not already in the
968 + * correct position.
969 + */
970 + if ((n_prev = rb_prev(&txqi->schedule_order)) != NULL) {
971 + t_prev = container_of(n_prev, struct txq_info,
972 + schedule_order);
973 + a_prev = to_airtime_info(&t_prev->txq);
974 + }
975 +
976 + if ((n_next = rb_next(&txqi->schedule_order)) != NULL) {
977 + t_next = container_of(n_next, struct txq_info,
978 + schedule_order);
979 + a_next = to_airtime_info(&t_next->txq);
980 }
981 +
982 + if ((!a_prev || a_prev->v_t <= air_info->v_t) &&
983 + (!a_next || a_next->v_t > air_info->v_t))
984 + return;
985 +
986 + if (air_sched->schedule_pos == &txqi->schedule_order)
987 + air_sched->schedule_pos = n_prev;
988 +
989 + rb_erase_cached(&txqi->schedule_order,
990 + &air_sched->active_txqs);
991 + RB_CLEAR_NODE(&txqi->schedule_order);
992 + __ieee80211_insert_txq(&air_sched->active_txqs, txqi);
993 }
994 +}
995 +
996 +void ieee80211_update_airtime_weight(struct ieee80211_local *local,
997 + struct airtime_sched_info *air_sched,
998 + u64 now, bool force)
999 +{
1000 + struct airtime_info *air_info, *tmp;
1001 + u64 weight_sum = 0;
1002
1003 + if (unlikely(!now))
1004 + now = ktime_get_boottime_ns();
1005 +
1006 + lockdep_assert_held(&air_sched->lock);
1007 +
1008 + if (!force && (air_sched->last_weight_update <
1009 + now - AIRTIME_ACTIVE_DURATION))
1010 + return;
1011
1012 - if (txqi->schedule_round == local->schedule_round[ac])
1013 + list_for_each_entry_safe(air_info, tmp,
1014 + &air_sched->active_list, list) {
1015 + if (airtime_is_active(air_info, now))
1016 + weight_sum += air_info->weight;
1017 + else
1018 + list_del_init(&air_info->list);
1019 + }
1020 + airtime_weight_sum_set(air_sched, weight_sum);
1021 + air_sched->last_weight_update = now;
1022 +}
1023 +
1024 +void ieee80211_schedule_txq(struct ieee80211_hw *hw,
1025 + struct ieee80211_txq *txq)
1026 + __acquires(txq_lock) __releases(txq_lock)
1027 +{
1028 + struct ieee80211_local *local = hw_to_local(hw);
1029 + struct txq_info *txqi = to_txq_info(txq);
1030 + struct airtime_sched_info *air_sched;
1031 + u64 now = ktime_get_boottime_ns();
1032 + struct airtime_info *air_info;
1033 + u8 ac = txq->ac;
1034 + bool was_active;
1035 +
1036 + air_sched = &local->airtime[ac];
1037 + air_info = to_airtime_info(txq);
1038 +
1039 + spin_lock_bh(&air_sched->lock);
1040 + was_active = airtime_is_active(air_info, now);
1041 + airtime_set_active(air_sched, air_info, now);
1042 +
1043 + if (!RB_EMPTY_NODE(&txqi->schedule_order))
1044 goto out;
1045
1046 - list_del_init(&txqi->schedule_order);
1047 - txqi->schedule_round = local->schedule_round[ac];
1048 - ret = &txqi->txq;
1049 + /* If the station has been inactive for a while, catch up its v_t so it
1050 + * doesn't get indefinite priority; see comment above the definition of
1051 + * AIRTIME_MAX_BEHIND.
1052 + */
1053 + if ((!was_active && air_info->v_t < air_sched->v_t) ||
1054 + air_info->v_t < air_sched->v_t - AIRTIME_MAX_BEHIND)
1055 + air_info->v_t = air_sched->v_t;
1056 +
1057 + ieee80211_update_airtime_weight(local, air_sched, now, !was_active);
1058 + __ieee80211_insert_txq(&air_sched->active_txqs, txqi);
1059
1060 out:
1061 - spin_unlock_bh(&local->active_txq_lock[ac]);
1062 - return ret;
1063 + spin_unlock_bh(&air_sched->lock);
1064 }
1065 -EXPORT_SYMBOL(ieee80211_next_txq);
1066 +EXPORT_SYMBOL(ieee80211_schedule_txq);
1067
1068 -void __ieee80211_schedule_txq(struct ieee80211_hw *hw,
1069 - struct ieee80211_txq *txq,
1070 - bool force)
1071 +static void __ieee80211_unschedule_txq(struct ieee80211_hw *hw,
1072 + struct ieee80211_txq *txq,
1073 + bool purge)
1074 {
1075 struct ieee80211_local *local = hw_to_local(hw);
1076 struct txq_info *txqi = to_txq_info(txq);
1077 + struct airtime_sched_info *air_sched;
1078 + struct airtime_info *air_info;
1079
1080 - spin_lock_bh(&local->active_txq_lock[txq->ac]);
1081 + air_sched = &local->airtime[txq->ac];
1082 + air_info = to_airtime_info(&txqi->txq);
1083
1084 - if (list_empty(&txqi->schedule_order) &&
1085 - (force || !skb_queue_empty(&txqi->frags) ||
1086 - txqi->tin.backlog_packets)) {
1087 - /* If airtime accounting is active, always enqueue STAs at the
1088 - * head of the list to ensure that they only get moved to the
1089 - * back by the airtime DRR scheduler once they have a negative
1090 - * deficit. A station that already has a negative deficit will
1091 - * get immediately moved to the back of the list on the next
1092 - * call to ieee80211_next_txq().
1093 - */
1094 - if (txqi->txq.sta && local->airtime_flags &&
1095 - wiphy_ext_feature_isset(local->hw.wiphy,
1096 - NL80211_EXT_FEATURE_AIRTIME_FAIRNESS))
1097 - list_add(&txqi->schedule_order,
1098 - &local->active_txqs[txq->ac]);
1099 - else
1100 - list_add_tail(&txqi->schedule_order,
1101 - &local->active_txqs[txq->ac]);
1102 + lockdep_assert_held(&air_sched->lock);
1103 +
1104 + if (purge) {
1105 + list_del_init(&air_info->list);
1106 + ieee80211_update_airtime_weight(local, air_sched, 0, true);
1107 }
1108
1109 - spin_unlock_bh(&local->active_txq_lock[txq->ac]);
1110 + if (RB_EMPTY_NODE(&txqi->schedule_order))
1111 + return;
1112 +
1113 + if (air_sched->schedule_pos == &txqi->schedule_order)
1114 + air_sched->schedule_pos = rb_prev(&txqi->schedule_order);
1115 +
1116 + if (!purge)
1117 + airtime_set_active(air_sched, air_info,
1118 + ktime_get_boottime_ns());
1119 +
1120 + rb_erase_cached(&txqi->schedule_order,
1121 + &air_sched->active_txqs);
1122 + RB_CLEAR_NODE(&txqi->schedule_order);
1123 +}
1124 +
1125 +void ieee80211_unschedule_txq(struct ieee80211_hw *hw,
1126 + struct ieee80211_txq *txq,
1127 + bool purge)
1128 + __acquires(txq_lock) __releases(txq_lock)
1129 +{
1130 + struct ieee80211_local *local = hw_to_local(hw);
1131 +
1132 + spin_lock_bh(&local->airtime[txq->ac].lock);
1133 + __ieee80211_unschedule_txq(hw, txq, purge);
1134 + spin_unlock_bh(&local->airtime[txq->ac].lock);
1135 +}
1136 +
1137 +void ieee80211_return_txq(struct ieee80211_hw *hw,
1138 + struct ieee80211_txq *txq, bool force)
1139 +{
1140 + struct ieee80211_local *local = hw_to_local(hw);
1141 + struct txq_info *txqi = to_txq_info(txq);
1142 +
1143 + spin_lock_bh(&local->airtime[txq->ac].lock);
1144 +
1145 + if (!RB_EMPTY_NODE(&txqi->schedule_order) && !force &&
1146 + !txq_has_queue(txq))
1147 + __ieee80211_unschedule_txq(hw, txq, false);
1148 +
1149 + spin_unlock_bh(&local->airtime[txq->ac].lock);
1150 }
1151 -EXPORT_SYMBOL(__ieee80211_schedule_txq);
1152 +EXPORT_SYMBOL(ieee80211_return_txq);
1153
1154 DEFINE_STATIC_KEY_FALSE(aql_disable);
1155
1156 bool ieee80211_txq_airtime_check(struct ieee80211_hw *hw,
1157 struct ieee80211_txq *txq)
1158 {
1159 - struct sta_info *sta;
1160 + struct airtime_info *air_info = to_airtime_info(txq);
1161 struct ieee80211_local *local = hw_to_local(hw);
1162
1163 if (!wiphy_ext_feature_isset(local->hw.wiphy, NL80211_EXT_FEATURE_AQL))
1164 @@ -3878,15 +4034,12 @@ bool ieee80211_txq_airtime_check(struct
1165 if (unlikely(txq->tid == IEEE80211_NUM_TIDS))
1166 return true;
1167
1168 - sta = container_of(txq->sta, struct sta_info, sta);
1169 - if (atomic_read(&sta->airtime[txq->ac].aql_tx_pending) <
1170 - sta->airtime[txq->ac].aql_limit_low)
1171 + if (atomic_read(&air_info->aql_tx_pending) < air_info->aql_limit_low)
1172 return true;
1173
1174 if (atomic_read(&local->aql_total_pending_airtime) <
1175 local->aql_threshold &&
1176 - atomic_read(&sta->airtime[txq->ac].aql_tx_pending) <
1177 - sta->airtime[txq->ac].aql_limit_high)
1178 + atomic_read(&air_info->aql_tx_pending) < air_info->aql_limit_high)
1179 return true;
1180
1181 return false;
1182 @@ -3896,60 +4049,59 @@ EXPORT_SYMBOL(ieee80211_txq_airtime_chec
1183 bool ieee80211_txq_may_transmit(struct ieee80211_hw *hw,
1184 struct ieee80211_txq *txq)
1185 {
1186 + struct txq_info *first_txqi = NULL, *txqi = to_txq_info(txq);
1187 struct ieee80211_local *local = hw_to_local(hw);
1188 - struct txq_info *iter, *tmp, *txqi = to_txq_info(txq);
1189 - struct sta_info *sta;
1190 - u8 ac = txq->ac;
1191 + struct airtime_sched_info *air_sched;
1192 + struct airtime_info *air_info;
1193 + struct rb_node *node = NULL;
1194 + bool ret = false;
1195 + u64 now;
1196
1197 - spin_lock_bh(&local->active_txq_lock[ac]);
1198
1199 - if (!txqi->txq.sta)
1200 - goto out;
1201 + if (!ieee80211_txq_airtime_check(hw, txq))
1202 + return false;
1203
1204 - if (list_empty(&txqi->schedule_order))
1205 + air_sched = &local->airtime[txq->ac];
1206 + spin_lock_bh(&air_sched->lock);
1207 +
1208 + if (RB_EMPTY_NODE(&txqi->schedule_order))
1209 goto out;
1210
1211 - list_for_each_entry_safe(iter, tmp, &local->active_txqs[ac],
1212 - schedule_order) {
1213 - if (iter == txqi)
1214 - break;
1215 + now = ktime_get_boottime_ns();
1216
1217 - if (!iter->txq.sta) {
1218 - list_move_tail(&iter->schedule_order,
1219 - &local->active_txqs[ac]);
1220 - continue;
1221 - }
1222 - sta = container_of(iter->txq.sta, struct sta_info, sta);
1223 - if (sta->airtime[ac].deficit < 0)
1224 - sta->airtime[ac].deficit += sta->airtime_weight;
1225 - list_move_tail(&iter->schedule_order, &local->active_txqs[ac]);
1226 - }
1227 + /* Like in ieee80211_next_txq(), make sure the first station in the
1228 + * scheduling order is eligible for transmission to avoid starvation.
1229 + */
1230 + node = rb_first_cached(&air_sched->active_txqs);
1231 + if (node) {
1232 + first_txqi = container_of(node, struct txq_info,
1233 + schedule_order);
1234 + air_info = to_airtime_info(&first_txqi->txq);
1235
1236 - sta = container_of(txqi->txq.sta, struct sta_info, sta);
1237 - if (sta->airtime[ac].deficit >= 0)
1238 - goto out;
1239 + if (air_sched->v_t < air_info->v_t)
1240 + airtime_catchup_v_t(air_sched, air_info->v_t, now);
1241 + }
1242
1243 - sta->airtime[ac].deficit += sta->airtime_weight;
1244 - list_move_tail(&txqi->schedule_order, &local->active_txqs[ac]);
1245 - spin_unlock_bh(&local->active_txq_lock[ac]);
1246 + air_info = to_airtime_info(&txqi->txq);
1247 + if (air_info->v_t <= air_sched->v_t) {
1248 + air_sched->last_schedule_activity = now;
1249 + ret = true;
1250 + }
1251
1252 - return false;
1253 out:
1254 - if (!list_empty(&txqi->schedule_order))
1255 - list_del_init(&txqi->schedule_order);
1256 - spin_unlock_bh(&local->active_txq_lock[ac]);
1257 -
1258 - return true;
1259 + spin_unlock_bh(&air_sched->lock);
1260 + return ret;
1261 }
1262 EXPORT_SYMBOL(ieee80211_txq_may_transmit);
1263
1264 void ieee80211_txq_schedule_start(struct ieee80211_hw *hw, u8 ac)
1265 {
1266 struct ieee80211_local *local = hw_to_local(hw);
1267 + struct airtime_sched_info *air_sched = &local->airtime[ac];
1268
1269 - spin_lock_bh(&local->active_txq_lock[ac]);
1270 - local->schedule_round[ac]++;
1271 - spin_unlock_bh(&local->active_txq_lock[ac]);
1272 + spin_lock_bh(&air_sched->lock);
1273 + air_sched->schedule_pos = NULL;
1274 + spin_unlock_bh(&air_sched->lock);
1275 }
1276 EXPORT_SYMBOL(ieee80211_txq_schedule_start);
1277