0ca53860a54c1ae94257c2b72f9b08f5654dc520
[openwrt/staging/jow.git] /
1 From: Felix Fietkau <nbd@nbd.name>
2 Date: Sun, 19 Jun 2022 23:13:05 +0200
3 Subject: [PATCH] mac80211: switch airtime fairness back to deficit round-robin
4 scheduling
5
6 This reverts commits 6a789ba679d652587532cec2a0e0274fda172f3b and
7 2433647bc8d983a543e7d31b41ca2de1c7e2c198.
8
9 The virtual time scheduler code has a number of issues:
10 - queues slowed down by hardware/firmware powersave handling were not properly
11 handled.
12 - on ath10k in push-pull mode, tx queues that the driver tries to pull from
13 were starved, causing excessive latency
14 - delay between tx enqueue and reported airtime use were causing excessively
15 bursty tx behavior
16
17 The bursty behavior may also be present on the round-robin scheduler, but there
18 it is much easier to fix without introducing additional regressions
19
20 Signed-off-by: Felix Fietkau <nbd@nbd.name>
21 ---
22
23 --- a/include/net/mac80211.h
24 +++ b/include/net/mac80211.h
25 @@ -6666,6 +6666,9 @@ static inline void ieee80211_txq_schedul
26 {
27 }
28
29 +void __ieee80211_schedule_txq(struct ieee80211_hw *hw,
30 + struct ieee80211_txq *txq, bool force);
31 +
32 /**
33 * ieee80211_schedule_txq - schedule a TXQ for transmission
34 *
35 @@ -6678,7 +6681,11 @@ static inline void ieee80211_txq_schedul
36 * The driver may call this function if it has buffered packets for
37 * this TXQ internally.
38 */
39 -void ieee80211_schedule_txq(struct ieee80211_hw *hw, struct ieee80211_txq *txq);
40 +static inline void
41 +ieee80211_schedule_txq(struct ieee80211_hw *hw, struct ieee80211_txq *txq)
42 +{
43 + __ieee80211_schedule_txq(hw, txq, true);
44 +}
45
46 /**
47 * ieee80211_return_txq - return a TXQ previously acquired by ieee80211_next_txq()
48 @@ -6690,8 +6697,12 @@ void ieee80211_schedule_txq(struct ieee8
49 * The driver may set force=true if it has buffered packets for this TXQ
50 * internally.
51 */
52 -void ieee80211_return_txq(struct ieee80211_hw *hw, struct ieee80211_txq *txq,
53 - bool force);
54 +static inline void
55 +ieee80211_return_txq(struct ieee80211_hw *hw, struct ieee80211_txq *txq,
56 + bool force)
57 +{
58 + __ieee80211_schedule_txq(hw, txq, force);
59 +}
60
61 /**
62 * ieee80211_txq_may_transmit - check whether TXQ is allowed to transmit
63 --- a/net/mac80211/cfg.c
64 +++ b/net/mac80211/cfg.c
65 @@ -1554,38 +1554,6 @@ static void sta_apply_mesh_params(struct
66 #endif
67 }
68
69 -static void sta_apply_airtime_params(struct ieee80211_local *local,
70 - struct sta_info *sta,
71 - struct station_parameters *params)
72 -{
73 - u8 ac;
74 -
75 - for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) {
76 - struct airtime_sched_info *air_sched = &local->airtime[ac];
77 - struct airtime_info *air_info = &sta->airtime[ac];
78 - struct txq_info *txqi;
79 - u8 tid;
80 -
81 - spin_lock_bh(&air_sched->lock);
82 - for (tid = 0; tid < IEEE80211_NUM_TIDS + 1; tid++) {
83 - if (air_info->weight == params->airtime_weight ||
84 - !sta->sta.txq[tid] ||
85 - ac != ieee80211_ac_from_tid(tid))
86 - continue;
87 -
88 - airtime_weight_set(air_info, params->airtime_weight);
89 -
90 - txqi = to_txq_info(sta->sta.txq[tid]);
91 - if (RB_EMPTY_NODE(&txqi->schedule_order))
92 - continue;
93 -
94 - ieee80211_update_airtime_weight(local, air_sched,
95 - 0, true);
96 - }
97 - spin_unlock_bh(&air_sched->lock);
98 - }
99 -}
100 -
101 static int sta_apply_parameters(struct ieee80211_local *local,
102 struct sta_info *sta,
103 struct station_parameters *params)
104 @@ -1773,8 +1741,7 @@ static int sta_apply_parameters(struct i
105 sta_apply_mesh_params(local, sta, params);
106
107 if (params->airtime_weight)
108 - sta_apply_airtime_params(local, sta, params);
109 -
110 + sta->airtime_weight = params->airtime_weight;
111
112 /* set the STA state after all sta info from usermode has been set */
113 if (test_sta_flag(sta, WLAN_STA_TDLS_PEER) ||
114 --- a/net/mac80211/debugfs.c
115 +++ b/net/mac80211/debugfs.c
116 @@ -216,14 +216,14 @@ static ssize_t aql_txq_limit_read(struct
117 "VI %u %u\n"
118 "BE %u %u\n"
119 "BK %u %u\n",
120 - local->airtime[IEEE80211_AC_VO].aql_txq_limit_low,
121 - local->airtime[IEEE80211_AC_VO].aql_txq_limit_high,
122 - local->airtime[IEEE80211_AC_VI].aql_txq_limit_low,
123 - local->airtime[IEEE80211_AC_VI].aql_txq_limit_high,
124 - local->airtime[IEEE80211_AC_BE].aql_txq_limit_low,
125 - local->airtime[IEEE80211_AC_BE].aql_txq_limit_high,
126 - local->airtime[IEEE80211_AC_BK].aql_txq_limit_low,
127 - local->airtime[IEEE80211_AC_BK].aql_txq_limit_high);
128 + local->aql_txq_limit_low[IEEE80211_AC_VO],
129 + local->aql_txq_limit_high[IEEE80211_AC_VO],
130 + local->aql_txq_limit_low[IEEE80211_AC_VI],
131 + local->aql_txq_limit_high[IEEE80211_AC_VI],
132 + local->aql_txq_limit_low[IEEE80211_AC_BE],
133 + local->aql_txq_limit_high[IEEE80211_AC_BE],
134 + local->aql_txq_limit_low[IEEE80211_AC_BK],
135 + local->aql_txq_limit_high[IEEE80211_AC_BK]);
136 return simple_read_from_buffer(user_buf, count, ppos,
137 buf, len);
138 }
139 @@ -255,11 +255,11 @@ static ssize_t aql_txq_limit_write(struc
140 if (ac >= IEEE80211_NUM_ACS)
141 return -EINVAL;
142
143 - q_limit_low_old = local->airtime[ac].aql_txq_limit_low;
144 - q_limit_high_old = local->airtime[ac].aql_txq_limit_high;
145 + q_limit_low_old = local->aql_txq_limit_low[ac];
146 + q_limit_high_old = local->aql_txq_limit_high[ac];
147
148 - local->airtime[ac].aql_txq_limit_low = q_limit_low;
149 - local->airtime[ac].aql_txq_limit_high = q_limit_high;
150 + local->aql_txq_limit_low[ac] = q_limit_low;
151 + local->aql_txq_limit_high[ac] = q_limit_high;
152
153 mutex_lock(&local->sta_mtx);
154 list_for_each_entry(sta, &local->sta_list, list) {
155 @@ -382,46 +382,6 @@ static const struct file_operations forc
156 .llseek = default_llseek,
157 };
158
159 -static ssize_t airtime_read(struct file *file,
160 - char __user *user_buf,
161 - size_t count,
162 - loff_t *ppos)
163 -{
164 - struct ieee80211_local *local = file->private_data;
165 - char buf[200];
166 - u64 v_t[IEEE80211_NUM_ACS];
167 - u64 wt[IEEE80211_NUM_ACS];
168 - int len = 0, ac;
169 -
170 - for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) {
171 - spin_lock_bh(&local->airtime[ac].lock);
172 - v_t[ac] = local->airtime[ac].v_t;
173 - wt[ac] = local->airtime[ac].weight_sum;
174 - spin_unlock_bh(&local->airtime[ac].lock);
175 - }
176 - len = scnprintf(buf, sizeof(buf),
177 - "\tVO VI BE BK\n"
178 - "Virt-t\t%-10llu %-10llu %-10llu %-10llu\n"
179 - "Weight\t%-10llu %-10llu %-10llu %-10llu\n",
180 - v_t[0],
181 - v_t[1],
182 - v_t[2],
183 - v_t[3],
184 - wt[0],
185 - wt[1],
186 - wt[2],
187 - wt[3]);
188 -
189 - return simple_read_from_buffer(user_buf, count, ppos,
190 - buf, len);
191 -}
192 -
193 -static const struct file_operations airtime_ops = {
194 - .read = airtime_read,
195 - .open = simple_open,
196 - .llseek = default_llseek,
197 -};
198 -
199 #ifdef CONFIG_PM
200 static ssize_t reset_write(struct file *file, const char __user *user_buf,
201 size_t count, loff_t *ppos)
202 @@ -672,11 +632,7 @@ void debugfs_hw_add(struct ieee80211_loc
203 if (local->ops->wake_tx_queue)
204 DEBUGFS_ADD_MODE(aqm, 0600);
205
206 - if (wiphy_ext_feature_isset(local->hw.wiphy,
207 - NL80211_EXT_FEATURE_AIRTIME_FAIRNESS)) {
208 - DEBUGFS_ADD_MODE(airtime, 0600);
209 - DEBUGFS_ADD_MODE(airtime_flags, 0600);
210 - }
211 + DEBUGFS_ADD_MODE(airtime_flags, 0600);
212
213 DEBUGFS_ADD(aql_txq_limit);
214 debugfs_create_u32("aql_threshold", 0600,
215 --- a/net/mac80211/debugfs_netdev.c
216 +++ b/net/mac80211/debugfs_netdev.c
217 @@ -512,34 +512,6 @@ static ssize_t ieee80211_if_fmt_aqm(
218 }
219 IEEE80211_IF_FILE_R(aqm);
220
221 -static ssize_t ieee80211_if_fmt_airtime(
222 - const struct ieee80211_sub_if_data *sdata, char *buf, int buflen)
223 -{
224 - struct ieee80211_local *local = sdata->local;
225 - struct ieee80211_txq *txq = sdata->vif.txq;
226 - struct airtime_info *air_info;
227 - int len;
228 -
229 - if (!txq)
230 - return 0;
231 -
232 - spin_lock_bh(&local->airtime[txq->ac].lock);
233 - air_info = to_airtime_info(txq);
234 - len = scnprintf(buf,
235 - buflen,
236 - "RX: %llu us\nTX: %llu us\nWeight: %u\n"
237 - "Virt-T: %lld us\n",
238 - air_info->rx_airtime,
239 - air_info->tx_airtime,
240 - air_info->weight,
241 - air_info->v_t);
242 - spin_unlock_bh(&local->airtime[txq->ac].lock);
243 -
244 - return len;
245 -}
246 -
247 -IEEE80211_IF_FILE_R(airtime);
248 -
249 IEEE80211_IF_FILE(multicast_to_unicast, u.ap.multicast_to_unicast, HEX);
250
251 /* IBSS attributes */
252 @@ -685,10 +657,8 @@ static void add_common_files(struct ieee
253
254 if (sdata->local->ops->wake_tx_queue &&
255 sdata->vif.type != NL80211_IFTYPE_P2P_DEVICE &&
256 - sdata->vif.type != NL80211_IFTYPE_NAN) {
257 + sdata->vif.type != NL80211_IFTYPE_NAN)
258 DEBUGFS_ADD(aqm);
259 - DEBUGFS_ADD(airtime);
260 - }
261 }
262
263 static void add_sta_files(struct ieee80211_sub_if_data *sdata)
264 --- a/net/mac80211/debugfs_sta.c
265 +++ b/net/mac80211/debugfs_sta.c
266 @@ -202,7 +202,7 @@ static ssize_t sta_airtime_read(struct f
267 size_t bufsz = 400;
268 char *buf = kzalloc(bufsz, GFP_KERNEL), *p = buf;
269 u64 rx_airtime = 0, tx_airtime = 0;
270 - u64 v_t[IEEE80211_NUM_ACS];
271 + s64 deficit[IEEE80211_NUM_ACS];
272 ssize_t rv;
273 int ac;
274
275 @@ -210,18 +210,18 @@ static ssize_t sta_airtime_read(struct f
276 return -ENOMEM;
277
278 for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) {
279 - spin_lock_bh(&local->airtime[ac].lock);
280 + spin_lock_bh(&local->active_txq_lock[ac]);
281 rx_airtime += sta->airtime[ac].rx_airtime;
282 tx_airtime += sta->airtime[ac].tx_airtime;
283 - v_t[ac] = sta->airtime[ac].v_t;
284 - spin_unlock_bh(&local->airtime[ac].lock);
285 + deficit[ac] = sta->airtime[ac].deficit;
286 + spin_unlock_bh(&local->active_txq_lock[ac]);
287 }
288
289 p += scnprintf(p, bufsz + buf - p,
290 "RX: %llu us\nTX: %llu us\nWeight: %u\n"
291 - "Virt-T: VO: %lld us VI: %lld us BE: %lld us BK: %lld us\n",
292 - rx_airtime, tx_airtime, sta->airtime[0].weight,
293 - v_t[0], v_t[1], v_t[2], v_t[3]);
294 + "Deficit: VO: %lld us VI: %lld us BE: %lld us BK: %lld us\n",
295 + rx_airtime, tx_airtime, sta->airtime_weight,
296 + deficit[0], deficit[1], deficit[2], deficit[3]);
297
298 rv = simple_read_from_buffer(userbuf, count, ppos, buf, p - buf);
299 kfree(buf);
300 @@ -236,11 +236,11 @@ static ssize_t sta_airtime_write(struct
301 int ac;
302
303 for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) {
304 - spin_lock_bh(&local->airtime[ac].lock);
305 + spin_lock_bh(&local->active_txq_lock[ac]);
306 sta->airtime[ac].rx_airtime = 0;
307 sta->airtime[ac].tx_airtime = 0;
308 - sta->airtime[ac].v_t = 0;
309 - spin_unlock_bh(&local->airtime[ac].lock);
310 + sta->airtime[ac].deficit = sta->airtime_weight;
311 + spin_unlock_bh(&local->active_txq_lock[ac]);
312 }
313
314 return count;
315 @@ -263,10 +263,10 @@ static ssize_t sta_aql_read(struct file
316 return -ENOMEM;
317
318 for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) {
319 - spin_lock_bh(&local->airtime[ac].lock);
320 + spin_lock_bh(&local->active_txq_lock[ac]);
321 q_limit_l[ac] = sta->airtime[ac].aql_limit_low;
322 q_limit_h[ac] = sta->airtime[ac].aql_limit_high;
323 - spin_unlock_bh(&local->airtime[ac].lock);
324 + spin_unlock_bh(&local->active_txq_lock[ac]);
325 q_depth[ac] = atomic_read(&sta->airtime[ac].aql_tx_pending);
326 }
327
328 --- a/net/mac80211/ieee80211_i.h
329 +++ b/net/mac80211/ieee80211_i.h
330 @@ -863,16 +863,20 @@ enum txq_info_flags {
331 * @def_flow: used as a fallback flow when a packet destined to @tin hashes to
332 * a fq_flow which is already owned by a different tin
333 * @def_cvars: codel vars for @def_flow
334 - * @schedule_order: used with ieee80211_local->active_txqs
335 * @frags: used to keep fragments created after dequeue
336 + * @schedule_order: used with ieee80211_local->active_txqs
337 + * @schedule_round: counter to prevent infinite loops on TXQ scheduling
338 */
339 struct txq_info {
340 struct fq_tin tin;
341 struct codel_vars def_cvars;
342 struct codel_stats cstats;
343 - struct rb_node schedule_order;
344 +
345 + u16 schedule_round;
346 + struct list_head schedule_order;
347
348 struct sk_buff_head frags;
349 +
350 unsigned long flags;
351
352 /* keep last! */
353 @@ -949,8 +953,6 @@ struct ieee80211_sub_if_data {
354 struct ieee80211_tx_queue_params tx_conf[IEEE80211_NUM_ACS];
355 struct mac80211_qos_map __rcu *qos_map;
356
357 - struct airtime_info airtime[IEEE80211_NUM_ACS];
358 -
359 struct work_struct csa_finalize_work;
360 bool csa_block_tx; /* write-protected by sdata_lock and local->mtx */
361 struct cfg80211_chan_def csa_chandef;
362 @@ -1185,44 +1187,6 @@ enum mac80211_scan_state {
363 SCAN_ABORT,
364 };
365
366 -/**
367 - * struct airtime_sched_info - state used for airtime scheduling and AQL
368 - *
369 - * @lock: spinlock that protects all the fields in this struct
370 - * @active_txqs: rbtree of currently backlogged queues, sorted by virtual time
371 - * @schedule_pos: the current position maintained while a driver walks the tree
372 - * with ieee80211_next_txq()
373 - * @active_list: list of struct airtime_info structs that were active within
374 - * the last AIRTIME_ACTIVE_DURATION (100 ms), used to compute
375 - * weight_sum
376 - * @last_weight_update: used for rate limiting walking active_list
377 - * @last_schedule_time: tracks the last time a transmission was scheduled; used
378 - * for catching up v_t if no stations are eligible for
379 - * transmission.
380 - * @v_t: global virtual time; queues with v_t < this are eligible for
381 - * transmission
382 - * @weight_sum: total sum of all active stations used for dividing airtime
383 - * @weight_sum_reciprocal: reciprocal of weight_sum (to avoid divisions in fast
384 - * path - see comment above
385 - * IEEE80211_RECIPROCAL_DIVISOR_64)
386 - * @aql_txq_limit_low: AQL limit when total outstanding airtime
387 - * is < IEEE80211_AQL_THRESHOLD
388 - * @aql_txq_limit_high: AQL limit when total outstanding airtime
389 - * is > IEEE80211_AQL_THRESHOLD
390 - */
391 -struct airtime_sched_info {
392 - spinlock_t lock;
393 - struct rb_root_cached active_txqs;
394 - struct rb_node *schedule_pos;
395 - struct list_head active_list;
396 - u64 last_weight_update;
397 - u64 last_schedule_activity;
398 - u64 v_t;
399 - u64 weight_sum;
400 - u64 weight_sum_reciprocal;
401 - u32 aql_txq_limit_low;
402 - u32 aql_txq_limit_high;
403 -};
404 DECLARE_STATIC_KEY_FALSE(aql_disable);
405
406 struct ieee80211_local {
407 @@ -1236,8 +1200,13 @@ struct ieee80211_local {
408 struct codel_params cparams;
409
410 /* protects active_txqs and txqi->schedule_order */
411 - struct airtime_sched_info airtime[IEEE80211_NUM_ACS];
412 + spinlock_t active_txq_lock[IEEE80211_NUM_ACS];
413 + struct list_head active_txqs[IEEE80211_NUM_ACS];
414 + u16 schedule_round[IEEE80211_NUM_ACS];
415 +
416 u16 airtime_flags;
417 + u32 aql_txq_limit_low[IEEE80211_NUM_ACS];
418 + u32 aql_txq_limit_high[IEEE80211_NUM_ACS];
419 u32 aql_threshold;
420 atomic_t aql_total_pending_airtime;
421
422 @@ -1654,125 +1623,6 @@ static inline bool txq_has_queue(struct
423 return !(skb_queue_empty(&txqi->frags) && !txqi->tin.backlog_packets);
424 }
425
426 -static inline struct airtime_info *to_airtime_info(struct ieee80211_txq *txq)
427 -{
428 - struct ieee80211_sub_if_data *sdata;
429 - struct sta_info *sta;
430 -
431 - if (txq->sta) {
432 - sta = container_of(txq->sta, struct sta_info, sta);
433 - return &sta->airtime[txq->ac];
434 - }
435 -
436 - sdata = vif_to_sdata(txq->vif);
437 - return &sdata->airtime[txq->ac];
438 -}
439 -
440 -/* To avoid divisions in the fast path, we keep pre-computed reciprocals for
441 - * airtime weight calculations. There are two different weights to keep track
442 - * of: The per-station weight and the sum of weights per phy.
443 - *
444 - * For the per-station weights (kept in airtime_info below), we use 32-bit
445 - * reciprocals with a devisor of 2^19. This lets us keep the multiplications and
446 - * divisions for the station weights as 32-bit operations at the cost of a bit
447 - * of rounding error for high weights; but the choice of divisor keeps rounding
448 - * errors <10% for weights <2^15, assuming no more than 8ms of airtime is
449 - * reported at a time.
450 - *
451 - * For the per-phy sum of weights the values can get higher, so we use 64-bit
452 - * operations for those with a 32-bit divisor, which should avoid any
453 - * significant rounding errors.
454 - */
455 -#define IEEE80211_RECIPROCAL_DIVISOR_64 0x100000000ULL
456 -#define IEEE80211_RECIPROCAL_SHIFT_64 32
457 -#define IEEE80211_RECIPROCAL_DIVISOR_32 0x80000U
458 -#define IEEE80211_RECIPROCAL_SHIFT_32 19
459 -
460 -static inline void airtime_weight_set(struct airtime_info *air_info, u16 weight)
461 -{
462 - if (air_info->weight == weight)
463 - return;
464 -
465 - air_info->weight = weight;
466 - if (weight) {
467 - air_info->weight_reciprocal =
468 - IEEE80211_RECIPROCAL_DIVISOR_32 / weight;
469 - } else {
470 - air_info->weight_reciprocal = 0;
471 - }
472 -}
473 -
474 -static inline void airtime_weight_sum_set(struct airtime_sched_info *air_sched,
475 - int weight_sum)
476 -{
477 - if (air_sched->weight_sum == weight_sum)
478 - return;
479 -
480 - air_sched->weight_sum = weight_sum;
481 - if (air_sched->weight_sum) {
482 - air_sched->weight_sum_reciprocal = IEEE80211_RECIPROCAL_DIVISOR_64;
483 - do_div(air_sched->weight_sum_reciprocal, air_sched->weight_sum);
484 - } else {
485 - air_sched->weight_sum_reciprocal = 0;
486 - }
487 -}
488 -
489 -/* A problem when trying to enforce airtime fairness is that we want to divide
490 - * the airtime between the currently *active* stations. However, basing this on
491 - * the instantaneous queue state of stations doesn't work, as queues tend to
492 - * oscillate very quickly between empty and occupied, leading to the scheduler
493 - * thinking only a single station is active when deciding whether to allow
494 - * transmission (and thus not throttling correctly).
495 - *
496 - * To fix this we use a timer-based notion of activity: a station is considered
497 - * active if it has been scheduled within the last 100 ms; we keep a separate
498 - * list of all the stations considered active in this manner, and lazily update
499 - * the total weight of active stations from this list (filtering the stations in
500 - * the list by their 'last active' time).
501 - *
502 - * We add one additional safeguard to guard against stations that manage to get
503 - * scheduled every 100 ms but don't transmit a lot of data, and thus don't use
504 - * up any airtime. Such stations would be able to get priority for an extended
505 - * period of time if they do start transmitting at full capacity again, and so
506 - * we add an explicit maximum for how far behind a station is allowed to fall in
507 - * the virtual airtime domain. This limit is set to a relatively high value of
508 - * 20 ms because the main mechanism for catching up idle stations is the active
509 - * state as described above; i.e., the hard limit should only be hit in
510 - * pathological cases.
511 - */
512 -#define AIRTIME_ACTIVE_DURATION (100 * NSEC_PER_MSEC)
513 -#define AIRTIME_MAX_BEHIND 20000 /* 20 ms */
514 -
515 -static inline bool airtime_is_active(struct airtime_info *air_info, u64 now)
516 -{
517 - return air_info->last_scheduled >= now - AIRTIME_ACTIVE_DURATION;
518 -}
519 -
520 -static inline void airtime_set_active(struct airtime_sched_info *air_sched,
521 - struct airtime_info *air_info, u64 now)
522 -{
523 - air_info->last_scheduled = now;
524 - air_sched->last_schedule_activity = now;
525 - list_move_tail(&air_info->list, &air_sched->active_list);
526 -}
527 -
528 -static inline bool airtime_catchup_v_t(struct airtime_sched_info *air_sched,
529 - u64 v_t, u64 now)
530 -{
531 - air_sched->v_t = v_t;
532 - return true;
533 -}
534 -
535 -static inline void init_airtime_info(struct airtime_info *air_info,
536 - struct airtime_sched_info *air_sched)
537 -{
538 - atomic_set(&air_info->aql_tx_pending, 0);
539 - air_info->aql_limit_low = air_sched->aql_txq_limit_low;
540 - air_info->aql_limit_high = air_sched->aql_txq_limit_high;
541 - airtime_weight_set(air_info, IEEE80211_DEFAULT_AIRTIME_WEIGHT);
542 - INIT_LIST_HEAD(&air_info->list);
543 -}
544 -
545 static inline int ieee80211_bssid_match(const u8 *raddr, const u8 *addr)
546 {
547 return ether_addr_equal(raddr, addr) ||
548 @@ -2018,14 +1868,6 @@ int ieee80211_tx_control_port(struct wip
549 u64 *cookie);
550 int ieee80211_probe_mesh_link(struct wiphy *wiphy, struct net_device *dev,
551 const u8 *buf, size_t len);
552 -void ieee80211_resort_txq(struct ieee80211_hw *hw,
553 - struct ieee80211_txq *txq);
554 -void ieee80211_unschedule_txq(struct ieee80211_hw *hw,
555 - struct ieee80211_txq *txq,
556 - bool purge);
557 -void ieee80211_update_airtime_weight(struct ieee80211_local *local,
558 - struct airtime_sched_info *air_sched,
559 - u64 now, bool force);
560
561 /* HT */
562 void ieee80211_apply_htcap_overrides(struct ieee80211_sub_if_data *sdata,
563 --- a/net/mac80211/iface.c
564 +++ b/net/mac80211/iface.c
565 @@ -2192,9 +2192,6 @@ int ieee80211_if_add(struct ieee80211_lo
566 }
567 }
568
569 - for (i = 0; i < IEEE80211_NUM_ACS; i++)
570 - init_airtime_info(&sdata->airtime[i], &local->airtime[i]);
571 -
572 ieee80211_set_default_queues(sdata);
573
574 sdata->ap_power_level = IEEE80211_UNSET_POWER_LEVEL;
575 --- a/net/mac80211/main.c
576 +++ b/net/mac80211/main.c
577 @@ -707,13 +707,10 @@ struct ieee80211_hw *ieee80211_alloc_hw_
578 spin_lock_init(&local->queue_stop_reason_lock);
579
580 for (i = 0; i < IEEE80211_NUM_ACS; i++) {
581 - struct airtime_sched_info *air_sched = &local->airtime[i];
582 -
583 - air_sched->active_txqs = RB_ROOT_CACHED;
584 - INIT_LIST_HEAD(&air_sched->active_list);
585 - spin_lock_init(&air_sched->lock);
586 - air_sched->aql_txq_limit_low = IEEE80211_DEFAULT_AQL_TXQ_LIMIT_L;
587 - air_sched->aql_txq_limit_high =
588 + INIT_LIST_HEAD(&local->active_txqs[i]);
589 + spin_lock_init(&local->active_txq_lock[i]);
590 + local->aql_txq_limit_low[i] = IEEE80211_DEFAULT_AQL_TXQ_LIMIT_L;
591 + local->aql_txq_limit_high[i] =
592 IEEE80211_DEFAULT_AQL_TXQ_LIMIT_H;
593 }
594
595 --- a/net/mac80211/rx.c
596 +++ b/net/mac80211/rx.c
597 @@ -1583,8 +1583,12 @@ static void sta_ps_start(struct sta_info
598
599 for (tid = 0; tid < IEEE80211_NUM_TIDS; tid++) {
600 struct ieee80211_txq *txq = sta->sta.txq[tid];
601 + struct txq_info *txqi = to_txq_info(txq);
602
603 - ieee80211_unschedule_txq(&local->hw, txq, false);
604 + spin_lock(&local->active_txq_lock[txq->ac]);
605 + if (!list_empty(&txqi->schedule_order))
606 + list_del_init(&txqi->schedule_order);
607 + spin_unlock(&local->active_txq_lock[txq->ac]);
608
609 if (txq_has_queue(txq))
610 set_bit(tid, &sta->txq_buffered_tids);
611 --- a/net/mac80211/sta_info.c
612 +++ b/net/mac80211/sta_info.c
613 @@ -426,11 +426,15 @@ struct sta_info *sta_info_alloc(struct i
614 if (sta_prepare_rate_control(local, sta, gfp))
615 goto free_txq;
616
617 + sta->airtime_weight = IEEE80211_DEFAULT_AIRTIME_WEIGHT;
618
619 for (i = 0; i < IEEE80211_NUM_ACS; i++) {
620 skb_queue_head_init(&sta->ps_tx_buf[i]);
621 skb_queue_head_init(&sta->tx_filtered[i]);
622 - init_airtime_info(&sta->airtime[i], &local->airtime[i]);
623 + sta->airtime[i].deficit = sta->airtime_weight;
624 + atomic_set(&sta->airtime[i].aql_tx_pending, 0);
625 + sta->airtime[i].aql_limit_low = local->aql_txq_limit_low[i];
626 + sta->airtime[i].aql_limit_high = local->aql_txq_limit_high[i];
627 }
628
629 for (i = 0; i < IEEE80211_NUM_TIDS; i++)
630 @@ -1889,59 +1893,24 @@ void ieee80211_sta_set_buffered(struct i
631 }
632 EXPORT_SYMBOL(ieee80211_sta_set_buffered);
633
634 -void ieee80211_register_airtime(struct ieee80211_txq *txq,
635 - u32 tx_airtime, u32 rx_airtime)
636 +void ieee80211_sta_register_airtime(struct ieee80211_sta *pubsta, u8 tid,
637 + u32 tx_airtime, u32 rx_airtime)
638 {
639 - struct ieee80211_sub_if_data *sdata = vif_to_sdata(txq->vif);
640 - struct ieee80211_local *local = sdata->local;
641 - u64 weight_sum, weight_sum_reciprocal;
642 - struct airtime_sched_info *air_sched;
643 - struct airtime_info *air_info;
644 + struct sta_info *sta = container_of(pubsta, struct sta_info, sta);
645 + struct ieee80211_local *local = sta->sdata->local;
646 + u8 ac = ieee80211_ac_from_tid(tid);
647 u32 airtime = 0;
648
649 - air_sched = &local->airtime[txq->ac];
650 - air_info = to_airtime_info(txq);
651 -
652 - if (local->airtime_flags & AIRTIME_USE_TX)
653 + if (sta->local->airtime_flags & AIRTIME_USE_TX)
654 airtime += tx_airtime;
655 - if (local->airtime_flags & AIRTIME_USE_RX)
656 + if (sta->local->airtime_flags & AIRTIME_USE_RX)
657 airtime += rx_airtime;
658
659 - /* Weights scale so the unit weight is 256 */
660 - airtime <<= 8;
661 -
662 - spin_lock_bh(&air_sched->lock);
663 -
664 - air_info->tx_airtime += tx_airtime;
665 - air_info->rx_airtime += rx_airtime;
666 -
667 - if (air_sched->weight_sum) {
668 - weight_sum = air_sched->weight_sum;
669 - weight_sum_reciprocal = air_sched->weight_sum_reciprocal;
670 - } else {
671 - weight_sum = air_info->weight;
672 - weight_sum_reciprocal = air_info->weight_reciprocal;
673 - }
674 -
675 - /* Round the calculation of global vt */
676 - air_sched->v_t += (u64)((airtime + (weight_sum >> 1)) *
677 - weight_sum_reciprocal) >> IEEE80211_RECIPROCAL_SHIFT_64;
678 - air_info->v_t += (u32)((airtime + (air_info->weight >> 1)) *
679 - air_info->weight_reciprocal) >> IEEE80211_RECIPROCAL_SHIFT_32;
680 - ieee80211_resort_txq(&local->hw, txq);
681 -
682 - spin_unlock_bh(&air_sched->lock);
683 -}
684 -
685 -void ieee80211_sta_register_airtime(struct ieee80211_sta *pubsta, u8 tid,
686 - u32 tx_airtime, u32 rx_airtime)
687 -{
688 - struct ieee80211_txq *txq = pubsta->txq[tid];
689 -
690 - if (!txq)
691 - return;
692 -
693 - ieee80211_register_airtime(txq, tx_airtime, rx_airtime);
694 + spin_lock_bh(&local->active_txq_lock[ac]);
695 + sta->airtime[ac].tx_airtime += tx_airtime;
696 + sta->airtime[ac].rx_airtime += rx_airtime;
697 + sta->airtime[ac].deficit -= airtime;
698 + spin_unlock_bh(&local->active_txq_lock[ac]);
699 }
700 EXPORT_SYMBOL(ieee80211_sta_register_airtime);
701
702 @@ -2385,7 +2354,7 @@ void sta_set_sinfo(struct sta_info *sta,
703 }
704
705 if (!(sinfo->filled & BIT_ULL(NL80211_STA_INFO_AIRTIME_WEIGHT))) {
706 - sinfo->airtime_weight = sta->airtime[0].weight;
707 + sinfo->airtime_weight = sta->airtime_weight;
708 sinfo->filled |= BIT_ULL(NL80211_STA_INFO_AIRTIME_WEIGHT);
709 }
710
711 --- a/net/mac80211/sta_info.h
712 +++ b/net/mac80211/sta_info.h
713 @@ -135,25 +135,18 @@ enum ieee80211_agg_stop_reason {
714 #define AIRTIME_USE_TX BIT(0)
715 #define AIRTIME_USE_RX BIT(1)
716
717 -
718 struct airtime_info {
719 u64 rx_airtime;
720 u64 tx_airtime;
721 - u64 v_t;
722 - u64 last_scheduled;
723 - struct list_head list;
724 + s64 deficit;
725 atomic_t aql_tx_pending; /* Estimated airtime for frames pending */
726 u32 aql_limit_low;
727 u32 aql_limit_high;
728 - u32 weight_reciprocal;
729 - u16 weight;
730 };
731
732 void ieee80211_sta_update_pending_airtime(struct ieee80211_local *local,
733 struct sta_info *sta, u8 ac,
734 u16 tx_airtime, bool tx_completed);
735 -void ieee80211_register_airtime(struct ieee80211_txq *txq,
736 - u32 tx_airtime, u32 rx_airtime);
737
738 struct sta_info;
739
740 @@ -523,6 +516,7 @@ struct ieee80211_fragment_cache {
741 * @tid_seq: per-TID sequence numbers for sending to this STA
742 * @airtime: per-AC struct airtime_info describing airtime statistics for this
743 * station
744 + * @airtime_weight: station weight for airtime fairness calculation purposes
745 * @ampdu_mlme: A-MPDU state machine state
746 * @mesh: mesh STA information
747 * @debugfs_dir: debug filesystem directory dentry
748 @@ -653,6 +647,7 @@ struct sta_info {
749 u16 tid_seq[IEEE80211_QOS_CTL_TID_MASK + 1];
750
751 struct airtime_info airtime[IEEE80211_NUM_ACS];
752 + u16 airtime_weight;
753
754 /*
755 * Aggregation information, locked with lock.
756 --- a/net/mac80211/status.c
757 +++ b/net/mac80211/status.c
758 @@ -983,25 +983,6 @@ static void __ieee80211_tx_status(struct
759 if (!(info->flags & IEEE80211_TX_CTL_INJECTED) && acked)
760 ieee80211_frame_acked(sta, skb);
761
762 - } else if (wiphy_ext_feature_isset(local->hw.wiphy,
763 - NL80211_EXT_FEATURE_AIRTIME_FAIRNESS)) {
764 - struct ieee80211_sub_if_data *sdata;
765 - struct ieee80211_txq *txq;
766 - u32 airtime;
767 -
768 - /* Account airtime to multicast queue */
769 - sdata = ieee80211_sdata_from_skb(local, skb);
770 -
771 - if (sdata && (txq = sdata->vif.txq)) {
772 - airtime = info->status.tx_time ?:
773 - ieee80211_calc_expected_tx_airtime(hw,
774 - &sdata->vif,
775 - NULL,
776 - skb->len,
777 - false);
778 -
779 - ieee80211_register_airtime(txq, airtime, 0);
780 - }
781 }
782
783 /* SNMP counters
784 --- a/net/mac80211/tx.c
785 +++ b/net/mac80211/tx.c
786 @@ -18,7 +18,6 @@
787 #include <linux/bitmap.h>
788 #include <linux/rcupdate.h>
789 #include <linux/export.h>
790 -#include <linux/timekeeping.h>
791 #include <net/net_namespace.h>
792 #include <net/ieee80211_radiotap.h>
793 #include <net/cfg80211.h>
794 @@ -1480,7 +1479,7 @@ void ieee80211_txq_init(struct ieee80211
795 codel_vars_init(&txqi->def_cvars);
796 codel_stats_init(&txqi->cstats);
797 __skb_queue_head_init(&txqi->frags);
798 - RB_CLEAR_NODE(&txqi->schedule_order);
799 + INIT_LIST_HEAD(&txqi->schedule_order);
800
801 txqi->txq.vif = &sdata->vif;
802
803 @@ -1524,7 +1523,9 @@ void ieee80211_txq_purge(struct ieee8021
804 ieee80211_purge_tx_queue(&local->hw, &txqi->frags);
805 spin_unlock_bh(&fq->lock);
806
807 - ieee80211_unschedule_txq(&local->hw, &txqi->txq, true);
808 + spin_lock_bh(&local->active_txq_lock[txqi->txq.ac]);
809 + list_del_init(&txqi->schedule_order);
810 + spin_unlock_bh(&local->active_txq_lock[txqi->txq.ac]);
811 }
812
813 void ieee80211_txq_set_params(struct ieee80211_local *local)
814 @@ -3819,259 +3820,102 @@ EXPORT_SYMBOL(ieee80211_tx_dequeue);
815 struct ieee80211_txq *ieee80211_next_txq(struct ieee80211_hw *hw, u8 ac)
816 {
817 struct ieee80211_local *local = hw_to_local(hw);
818 - struct airtime_sched_info *air_sched;
819 - u64 now = ktime_get_coarse_boottime_ns();
820 struct ieee80211_txq *ret = NULL;
821 - struct airtime_info *air_info;
822 - struct txq_info *txqi = NULL;
823 - struct rb_node *node;
824 - bool first = false;
825 + struct txq_info *txqi = NULL, *head = NULL;
826 + bool found_eligible_txq = false;
827
828 - air_sched = &local->airtime[ac];
829 - spin_lock_bh(&air_sched->lock);
830 + spin_lock_bh(&local->active_txq_lock[ac]);
831
832 - node = air_sched->schedule_pos;
833 -
834 -begin:
835 - if (!node) {
836 - node = rb_first_cached(&air_sched->active_txqs);
837 - first = true;
838 - } else {
839 - node = rb_next(node);
840 - }
841 -
842 - if (!node)
843 - goto out;
844 -
845 - txqi = container_of(node, struct txq_info, schedule_order);
846 - air_info = to_airtime_info(&txqi->txq);
847 -
848 - if (air_info->v_t > air_sched->v_t &&
849 - (!first || !airtime_catchup_v_t(air_sched, air_info->v_t, now)))
850 + begin:
851 + txqi = list_first_entry_or_null(&local->active_txqs[ac],
852 + struct txq_info,
853 + schedule_order);
854 + if (!txqi)
855 goto out;
856
857 - if (!ieee80211_txq_airtime_check(hw, &txqi->txq)) {
858 - first = false;
859 - goto begin;
860 - }
861 -
862 - air_sched->schedule_pos = node;
863 - air_sched->last_schedule_activity = now;
864 - ret = &txqi->txq;
865 -out:
866 - spin_unlock_bh(&air_sched->lock);
867 - return ret;
868 -}
869 -EXPORT_SYMBOL(ieee80211_next_txq);
870 -
871 -static void __ieee80211_insert_txq(struct rb_root_cached *root,
872 - struct txq_info *txqi)
873 -{
874 - struct rb_node **new = &root->rb_root.rb_node;
875 - struct airtime_info *old_air, *new_air;
876 - struct rb_node *parent = NULL;
877 - struct txq_info *__txqi;
878 - bool leftmost = true;
879 -
880 - while (*new) {
881 - parent = *new;
882 - __txqi = rb_entry(parent, struct txq_info, schedule_order);
883 - old_air = to_airtime_info(&__txqi->txq);
884 - new_air = to_airtime_info(&txqi->txq);
885 -
886 - if (new_air->v_t <= old_air->v_t) {
887 - new = &parent->rb_left;
888 - } else {
889 - new = &parent->rb_right;
890 - leftmost = false;
891 - }
892 + if (txqi == head) {
893 + if (!found_eligible_txq)
894 + goto out;
895 + else
896 + found_eligible_txq = false;
897 }
898
899 - rb_link_node(&txqi->schedule_order, parent, new);
900 - rb_insert_color_cached(&txqi->schedule_order, root, leftmost);
901 -}
902 -
903 -void ieee80211_resort_txq(struct ieee80211_hw *hw,
904 - struct ieee80211_txq *txq)
905 -{
906 - struct airtime_info *air_info = to_airtime_info(txq);
907 - struct ieee80211_local *local = hw_to_local(hw);
908 - struct txq_info *txqi = to_txq_info(txq);
909 - struct airtime_sched_info *air_sched;
910 -
911 - air_sched = &local->airtime[txq->ac];
912 + if (!head)
913 + head = txqi;
914
915 - lockdep_assert_held(&air_sched->lock);
916 -
917 - if (!RB_EMPTY_NODE(&txqi->schedule_order)) {
918 - struct airtime_info *a_prev = NULL, *a_next = NULL;
919 - struct txq_info *t_prev, *t_next;
920 - struct rb_node *n_prev, *n_next;
921 + if (txqi->txq.sta) {
922 + struct sta_info *sta = container_of(txqi->txq.sta,
923 + struct sta_info, sta);
924 + bool aql_check = ieee80211_txq_airtime_check(hw, &txqi->txq);
925 + s64 deficit = sta->airtime[txqi->txq.ac].deficit;
926
927 - /* Erasing a node can cause an expensive rebalancing operation,
928 - * so we check the previous and next nodes first and only remove
929 - * and re-insert if the current node is not already in the
930 - * correct position.
931 - */
932 - if ((n_prev = rb_prev(&txqi->schedule_order)) != NULL) {
933 - t_prev = container_of(n_prev, struct txq_info,
934 - schedule_order);
935 - a_prev = to_airtime_info(&t_prev->txq);
936 - }
937 + if (aql_check)
938 + found_eligible_txq = true;
939
940 - if ((n_next = rb_next(&txqi->schedule_order)) != NULL) {
941 - t_next = container_of(n_next, struct txq_info,
942 - schedule_order);
943 - a_next = to_airtime_info(&t_next->txq);
944 + if (deficit < 0)
945 + sta->airtime[txqi->txq.ac].deficit +=
946 + sta->airtime_weight;
947 +
948 + if (deficit < 0 || !aql_check) {
949 + list_move_tail(&txqi->schedule_order,
950 + &local->active_txqs[txqi->txq.ac]);
951 + goto begin;
952 }
953 -
954 - if ((!a_prev || a_prev->v_t <= air_info->v_t) &&
955 - (!a_next || a_next->v_t > air_info->v_t))
956 - return;
957 -
958 - if (air_sched->schedule_pos == &txqi->schedule_order)
959 - air_sched->schedule_pos = n_prev;
960 -
961 - rb_erase_cached(&txqi->schedule_order,
962 - &air_sched->active_txqs);
963 - RB_CLEAR_NODE(&txqi->schedule_order);
964 - __ieee80211_insert_txq(&air_sched->active_txqs, txqi);
965 }
966 -}
967 -
968 -void ieee80211_update_airtime_weight(struct ieee80211_local *local,
969 - struct airtime_sched_info *air_sched,
970 - u64 now, bool force)
971 -{
972 - struct airtime_info *air_info, *tmp;
973 - u64 weight_sum = 0;
974 -
975 - if (unlikely(!now))
976 - now = ktime_get_coarse_boottime_ns();
977 -
978 - lockdep_assert_held(&air_sched->lock);
979 -
980 - if (!force && (air_sched->last_weight_update <
981 - now - AIRTIME_ACTIVE_DURATION))
982 - return;
983 -
984 - list_for_each_entry_safe(air_info, tmp,
985 - &air_sched->active_list, list) {
986 - if (airtime_is_active(air_info, now))
987 - weight_sum += air_info->weight;
988 - else
989 - list_del_init(&air_info->list);
990 - }
991 - airtime_weight_sum_set(air_sched, weight_sum);
992 - air_sched->last_weight_update = now;
993 -}
994
995 -void ieee80211_schedule_txq(struct ieee80211_hw *hw,
996 - struct ieee80211_txq *txq)
997 - __acquires(txq_lock) __releases(txq_lock)
998 -{
999 - struct ieee80211_local *local = hw_to_local(hw);
1000 - struct txq_info *txqi = to_txq_info(txq);
1001 - struct airtime_sched_info *air_sched;
1002 - u64 now = ktime_get_coarse_boottime_ns();
1003 - struct airtime_info *air_info;
1004 - u8 ac = txq->ac;
1005 - bool was_active;
1006
1007 - air_sched = &local->airtime[ac];
1008 - air_info = to_airtime_info(txq);
1009 -
1010 - spin_lock_bh(&air_sched->lock);
1011 - was_active = airtime_is_active(air_info, now);
1012 - airtime_set_active(air_sched, air_info, now);
1013 -
1014 - if (!RB_EMPTY_NODE(&txqi->schedule_order))
1015 + if (txqi->schedule_round == local->schedule_round[ac])
1016 goto out;
1017
1018 - /* If the station has been inactive for a while, catch up its v_t so it
1019 - * doesn't get indefinite priority; see comment above the definition of
1020 - * AIRTIME_MAX_BEHIND.
1021 - */
1022 - if ((!was_active && air_info->v_t < air_sched->v_t) ||
1023 - air_info->v_t < air_sched->v_t - AIRTIME_MAX_BEHIND)
1024 - air_info->v_t = air_sched->v_t;
1025 -
1026 - ieee80211_update_airtime_weight(local, air_sched, now, !was_active);
1027 - __ieee80211_insert_txq(&air_sched->active_txqs, txqi);
1028 + list_del_init(&txqi->schedule_order);
1029 + txqi->schedule_round = local->schedule_round[ac];
1030 + ret = &txqi->txq;
1031
1032 out:
1033 - spin_unlock_bh(&air_sched->lock);
1034 -}
1035 -EXPORT_SYMBOL(ieee80211_schedule_txq);
1036 -
1037 -static void __ieee80211_unschedule_txq(struct ieee80211_hw *hw,
1038 - struct ieee80211_txq *txq,
1039 - bool purge)
1040 -{
1041 - struct ieee80211_local *local = hw_to_local(hw);
1042 - struct txq_info *txqi = to_txq_info(txq);
1043 - struct airtime_sched_info *air_sched;
1044 - struct airtime_info *air_info;
1045 -
1046 - air_sched = &local->airtime[txq->ac];
1047 - air_info = to_airtime_info(&txqi->txq);
1048 -
1049 - lockdep_assert_held(&air_sched->lock);
1050 -
1051 - if (purge) {
1052 - list_del_init(&air_info->list);
1053 - ieee80211_update_airtime_weight(local, air_sched, 0, true);
1054 - }
1055 -
1056 - if (RB_EMPTY_NODE(&txqi->schedule_order))
1057 - return;
1058 -
1059 - if (air_sched->schedule_pos == &txqi->schedule_order)
1060 - air_sched->schedule_pos = rb_prev(&txqi->schedule_order);
1061 -
1062 - if (!purge)
1063 - airtime_set_active(air_sched, air_info,
1064 - ktime_get_coarse_boottime_ns());
1065 -
1066 - rb_erase_cached(&txqi->schedule_order,
1067 - &air_sched->active_txqs);
1068 - RB_CLEAR_NODE(&txqi->schedule_order);
1069 + spin_unlock_bh(&local->active_txq_lock[ac]);
1070 + return ret;
1071 }
1072 +EXPORT_SYMBOL(ieee80211_next_txq);
1073
1074 -void ieee80211_unschedule_txq(struct ieee80211_hw *hw,
1075 +void __ieee80211_schedule_txq(struct ieee80211_hw *hw,
1076 struct ieee80211_txq *txq,
1077 - bool purge)
1078 - __acquires(txq_lock) __releases(txq_lock)
1079 -{
1080 - struct ieee80211_local *local = hw_to_local(hw);
1081 -
1082 - spin_lock_bh(&local->airtime[txq->ac].lock);
1083 - __ieee80211_unschedule_txq(hw, txq, purge);
1084 - spin_unlock_bh(&local->airtime[txq->ac].lock);
1085 -}
1086 -
1087 -void ieee80211_return_txq(struct ieee80211_hw *hw,
1088 - struct ieee80211_txq *txq, bool force)
1089 + bool force)
1090 {
1091 struct ieee80211_local *local = hw_to_local(hw);
1092 struct txq_info *txqi = to_txq_info(txq);
1093
1094 - spin_lock_bh(&local->airtime[txq->ac].lock);
1095 + spin_lock_bh(&local->active_txq_lock[txq->ac]);
1096
1097 - if (!RB_EMPTY_NODE(&txqi->schedule_order) && !force &&
1098 - !txq_has_queue(txq))
1099 - __ieee80211_unschedule_txq(hw, txq, false);
1100 + if (list_empty(&txqi->schedule_order) &&
1101 + (force || !skb_queue_empty(&txqi->frags) ||
1102 + txqi->tin.backlog_packets)) {
1103 + /* If airtime accounting is active, always enqueue STAs at the
1104 + * head of the list to ensure that they only get moved to the
1105 + * back by the airtime DRR scheduler once they have a negative
1106 + * deficit. A station that already has a negative deficit will
1107 + * get immediately moved to the back of the list on the next
1108 + * call to ieee80211_next_txq().
1109 + */
1110 + if (txqi->txq.sta && local->airtime_flags &&
1111 + wiphy_ext_feature_isset(local->hw.wiphy,
1112 + NL80211_EXT_FEATURE_AIRTIME_FAIRNESS))
1113 + list_add(&txqi->schedule_order,
1114 + &local->active_txqs[txq->ac]);
1115 + else
1116 + list_add_tail(&txqi->schedule_order,
1117 + &local->active_txqs[txq->ac]);
1118 + }
1119
1120 - spin_unlock_bh(&local->airtime[txq->ac].lock);
1121 + spin_unlock_bh(&local->active_txq_lock[txq->ac]);
1122 }
1123 -EXPORT_SYMBOL(ieee80211_return_txq);
1124 +EXPORT_SYMBOL(__ieee80211_schedule_txq);
1125
1126 DEFINE_STATIC_KEY_FALSE(aql_disable);
1127
1128 bool ieee80211_txq_airtime_check(struct ieee80211_hw *hw,
1129 struct ieee80211_txq *txq)
1130 {
1131 - struct airtime_info *air_info = to_airtime_info(txq);
1132 + struct sta_info *sta;
1133 struct ieee80211_local *local = hw_to_local(hw);
1134
1135 if (!wiphy_ext_feature_isset(local->hw.wiphy, NL80211_EXT_FEATURE_AQL))
1136 @@ -4086,12 +3930,15 @@ bool ieee80211_txq_airtime_check(struct
1137 if (unlikely(txq->tid == IEEE80211_NUM_TIDS))
1138 return true;
1139
1140 - if (atomic_read(&air_info->aql_tx_pending) < air_info->aql_limit_low)
1141 + sta = container_of(txq->sta, struct sta_info, sta);
1142 + if (atomic_read(&sta->airtime[txq->ac].aql_tx_pending) <
1143 + sta->airtime[txq->ac].aql_limit_low)
1144 return true;
1145
1146 if (atomic_read(&local->aql_total_pending_airtime) <
1147 local->aql_threshold &&
1148 - atomic_read(&air_info->aql_tx_pending) < air_info->aql_limit_high)
1149 + atomic_read(&sta->airtime[txq->ac].aql_tx_pending) <
1150 + sta->airtime[txq->ac].aql_limit_high)
1151 return true;
1152
1153 return false;
1154 @@ -4101,59 +3948,60 @@ EXPORT_SYMBOL(ieee80211_txq_airtime_chec
1155 bool ieee80211_txq_may_transmit(struct ieee80211_hw *hw,
1156 struct ieee80211_txq *txq)
1157 {
1158 - struct txq_info *first_txqi = NULL, *txqi = to_txq_info(txq);
1159 struct ieee80211_local *local = hw_to_local(hw);
1160 - struct airtime_sched_info *air_sched;
1161 - struct airtime_info *air_info;
1162 - struct rb_node *node = NULL;
1163 - bool ret = false;
1164 - u64 now;
1165 -
1166 + struct txq_info *iter, *tmp, *txqi = to_txq_info(txq);
1167 + struct sta_info *sta;
1168 + u8 ac = txq->ac;
1169
1170 - if (!ieee80211_txq_airtime_check(hw, txq))
1171 - return false;
1172 + spin_lock_bh(&local->active_txq_lock[ac]);
1173
1174 - air_sched = &local->airtime[txq->ac];
1175 - spin_lock_bh(&air_sched->lock);
1176 + if (!txqi->txq.sta)
1177 + goto out;
1178
1179 - if (RB_EMPTY_NODE(&txqi->schedule_order))
1180 + if (list_empty(&txqi->schedule_order))
1181 goto out;
1182
1183 - now = ktime_get_coarse_boottime_ns();
1184 + list_for_each_entry_safe(iter, tmp, &local->active_txqs[ac],
1185 + schedule_order) {
1186 + if (iter == txqi)
1187 + break;
1188
1189 - /* Like in ieee80211_next_txq(), make sure the first station in the
1190 - * scheduling order is eligible for transmission to avoid starvation.
1191 - */
1192 - node = rb_first_cached(&air_sched->active_txqs);
1193 - if (node) {
1194 - first_txqi = container_of(node, struct txq_info,
1195 - schedule_order);
1196 - air_info = to_airtime_info(&first_txqi->txq);
1197 -
1198 - if (air_sched->v_t < air_info->v_t)
1199 - airtime_catchup_v_t(air_sched, air_info->v_t, now);
1200 + if (!iter->txq.sta) {
1201 + list_move_tail(&iter->schedule_order,
1202 + &local->active_txqs[ac]);
1203 + continue;
1204 + }
1205 + sta = container_of(iter->txq.sta, struct sta_info, sta);
1206 + if (sta->airtime[ac].deficit < 0)
1207 + sta->airtime[ac].deficit += sta->airtime_weight;
1208 + list_move_tail(&iter->schedule_order, &local->active_txqs[ac]);
1209 }
1210
1211 - air_info = to_airtime_info(&txqi->txq);
1212 - if (air_info->v_t <= air_sched->v_t) {
1213 - air_sched->last_schedule_activity = now;
1214 - ret = true;
1215 - }
1216 + sta = container_of(txqi->txq.sta, struct sta_info, sta);
1217 + if (sta->airtime[ac].deficit >= 0)
1218 + goto out;
1219 +
1220 + sta->airtime[ac].deficit += sta->airtime_weight;
1221 + list_move_tail(&txqi->schedule_order, &local->active_txqs[ac]);
1222 + spin_unlock_bh(&local->active_txq_lock[ac]);
1223
1224 + return false;
1225 out:
1226 - spin_unlock_bh(&air_sched->lock);
1227 - return ret;
1228 + if (!list_empty(&txqi->schedule_order))
1229 + list_del_init(&txqi->schedule_order);
1230 + spin_unlock_bh(&local->active_txq_lock[ac]);
1231 +
1232 + return true;
1233 }
1234 EXPORT_SYMBOL(ieee80211_txq_may_transmit);
1235
1236 void ieee80211_txq_schedule_start(struct ieee80211_hw *hw, u8 ac)
1237 {
1238 struct ieee80211_local *local = hw_to_local(hw);
1239 - struct airtime_sched_info *air_sched = &local->airtime[ac];
1240
1241 - spin_lock_bh(&air_sched->lock);
1242 - air_sched->schedule_pos = NULL;
1243 - spin_unlock_bh(&air_sched->lock);
1244 + spin_lock_bh(&local->active_txq_lock[ac]);
1245 + local->schedule_round[ac]++;
1246 + spin_unlock_bh(&local->active_txq_lock[ac]);
1247 }
1248 EXPORT_SYMBOL(ieee80211_txq_schedule_start);
1249