f4c4bdb94e90b862ab09b467abba518f8bb8bb94
[openwrt/staging/blocktrron.git] /
1 From: Felix Fietkau <nbd@nbd.name>
2 Date: Tue, 8 Oct 2019 18:54:46 +0200
3 Subject: [PATCH] mac80211: minstrel_ht: rename prob_ewma to prob_avg, use it
4 for the new average
5
6 Reduces per-rate data structure size
7
8 Signed-off-by: Felix Fietkau <nbd@nbd.name>
9 ---
10
11 --- a/net/mac80211/rc80211_minstrel.c
12 +++ b/net/mac80211/rc80211_minstrel.c
13 @@ -70,7 +70,7 @@ rix_to_ndx(struct minstrel_sta_info *mi,
14 }
15
16 /* return current EMWA throughput */
17 -int minstrel_get_tp_avg(struct minstrel_rate *mr, int prob_ewma)
18 +int minstrel_get_tp_avg(struct minstrel_rate *mr, int prob_avg)
19 {
20 int usecs;
21
22 @@ -79,13 +79,13 @@ int minstrel_get_tp_avg(struct minstrel_
23 usecs = 1000000;
24
25 /* reset thr. below 10% success */
26 - if (mr->stats.prob_ewma < MINSTREL_FRAC(10, 100))
27 + if (mr->stats.prob_avg < MINSTREL_FRAC(10, 100))
28 return 0;
29
30 - if (prob_ewma > MINSTREL_FRAC(90, 100))
31 + if (prob_avg > MINSTREL_FRAC(90, 100))
32 return MINSTREL_TRUNC(100000 * (MINSTREL_FRAC(90, 100) / usecs));
33 else
34 - return MINSTREL_TRUNC(100000 * (prob_ewma / usecs));
35 + return MINSTREL_TRUNC(100000 * (prob_avg / usecs));
36 }
37
38 /* find & sort topmost throughput rates */
39 @@ -98,8 +98,8 @@ minstrel_sort_best_tp_rates(struct minst
40
41 for (j = MAX_THR_RATES; j > 0; --j) {
42 tmp_mrs = &mi->r[tp_list[j - 1]].stats;
43 - if (minstrel_get_tp_avg(&mi->r[i], cur_mrs->prob_ewma) <=
44 - minstrel_get_tp_avg(&mi->r[tp_list[j - 1]], tmp_mrs->prob_ewma))
45 + if (minstrel_get_tp_avg(&mi->r[i], cur_mrs->prob_avg) <=
46 + minstrel_get_tp_avg(&mi->r[tp_list[j - 1]], tmp_mrs->prob_avg))
47 break;
48 }
49
50 @@ -166,15 +166,15 @@ minstrel_calc_rate_stats(struct minstrel
51 mrs->sample_skipped = 0;
52 cur_prob = MINSTREL_FRAC(mrs->success, mrs->attempts);
53 if (mp->new_avg) {
54 - mrs->prob_ewma = minstrel_filter_avg_add(&mrs->avg,
55 - cur_prob);
56 + minstrel_filter_avg_add(&mrs->prob_avg,
57 + &mrs->prob_avg_1, cur_prob);
58 } else if (unlikely(!mrs->att_hist)) {
59 - mrs->prob_ewma = cur_prob;
60 + mrs->prob_avg = cur_prob;
61 } else {
62 /*update exponential weighted moving avarage */
63 - mrs->prob_ewma = minstrel_ewma(mrs->prob_ewma,
64 - cur_prob,
65 - EWMA_LEVEL);
66 + mrs->prob_avg = minstrel_ewma(mrs->prob_avg,
67 + cur_prob,
68 + EWMA_LEVEL);
69 }
70 mrs->att_hist += mrs->attempts;
71 mrs->succ_hist += mrs->success;
72 @@ -208,8 +208,8 @@ minstrel_update_stats(struct minstrel_pr
73
74 /* Sample less often below the 10% chance of success.
75 * Sample less often above the 95% chance of success. */
76 - if (mrs->prob_ewma > MINSTREL_FRAC(95, 100) ||
77 - mrs->prob_ewma < MINSTREL_FRAC(10, 100)) {
78 + if (mrs->prob_avg > MINSTREL_FRAC(95, 100) ||
79 + mrs->prob_avg < MINSTREL_FRAC(10, 100)) {
80 mr->adjusted_retry_count = mrs->retry_count >> 1;
81 if (mr->adjusted_retry_count > 2)
82 mr->adjusted_retry_count = 2;
83 @@ -229,14 +229,14 @@ minstrel_update_stats(struct minstrel_pr
84 * choose the maximum throughput rate as max_prob_rate
85 * (2) if all success probabilities < 95%, the rate with
86 * highest success probability is chosen as max_prob_rate */
87 - if (mrs->prob_ewma >= MINSTREL_FRAC(95, 100)) {
88 - tmp_cur_tp = minstrel_get_tp_avg(mr, mrs->prob_ewma);
89 + if (mrs->prob_avg >= MINSTREL_FRAC(95, 100)) {
90 + tmp_cur_tp = minstrel_get_tp_avg(mr, mrs->prob_avg);
91 tmp_prob_tp = minstrel_get_tp_avg(&mi->r[tmp_prob_rate],
92 - tmp_mrs->prob_ewma);
93 + tmp_mrs->prob_avg);
94 if (tmp_cur_tp >= tmp_prob_tp)
95 tmp_prob_rate = i;
96 } else {
97 - if (mrs->prob_ewma >= tmp_mrs->prob_ewma)
98 + if (mrs->prob_avg >= tmp_mrs->prob_avg)
99 tmp_prob_rate = i;
100 }
101 }
102 @@ -426,7 +426,7 @@ minstrel_get_rate(void *priv, struct iee
103 * has a probability of >95%, we shouldn't be attempting
104 * to use it, as this only wastes precious airtime */
105 if (!mrr_capable &&
106 - (mi->r[ndx].stats.prob_ewma > MINSTREL_FRAC(95, 100)))
107 + (mi->r[ndx].stats.prob_avg > MINSTREL_FRAC(95, 100)))
108 return;
109
110 mi->prev_sample = true;
111 @@ -577,7 +577,7 @@ static u32 minstrel_get_expected_through
112 * computing cur_tp
113 */
114 tmp_mrs = &mi->r[idx].stats;
115 - tmp_cur_tp = minstrel_get_tp_avg(&mi->r[idx], tmp_mrs->prob_ewma) * 10;
116 + tmp_cur_tp = minstrel_get_tp_avg(&mi->r[idx], tmp_mrs->prob_avg) * 10;
117 tmp_cur_tp = tmp_cur_tp * 1200 * 8 / 1024;
118
119 return tmp_cur_tp;
120 --- a/net/mac80211/rc80211_minstrel.h
121 +++ b/net/mac80211/rc80211_minstrel.h
122 @@ -47,14 +47,10 @@ minstrel_ewma(int old, int new, int weig
123 return old + incr;
124 }
125
126 -struct minstrel_avg_ctx {
127 - s32 prev[2];
128 -};
129 -
130 -static inline int minstrel_filter_avg_add(struct minstrel_avg_ctx *ctx, s32 in)
131 +static inline int minstrel_filter_avg_add(u16 *prev_1, u16 *prev_2, s32 in)
132 {
133 - s32 out_1 = ctx->prev[0];
134 - s32 out_2 = ctx->prev[1];
135 + s32 out_1 = *prev_1;
136 + s32 out_2 = *prev_2;
137 s32 val;
138
139 if (!in)
140 @@ -76,8 +72,8 @@ static inline int minstrel_filter_avg_ad
141 val = 1;
142
143 out:
144 - ctx->prev[1] = out_1;
145 - ctx->prev[0] = val;
146 + *prev_2 = out_1;
147 + *prev_1 = val;
148
149 return val;
150 }
151 @@ -90,10 +86,9 @@ struct minstrel_rate_stats {
152 /* total attempts/success counters */
153 u32 att_hist, succ_hist;
154
155 - struct minstrel_avg_ctx avg;
156 -
157 - /* prob_ewma - exponential weighted moving average of prob */
158 - u16 prob_ewma;
159 + /* prob_avg - moving average of prob */
160 + u16 prob_avg;
161 + u16 prob_avg_1;
162
163 /* maximum retry counts */
164 u8 retry_count;
165 @@ -181,7 +176,7 @@ void minstrel_add_sta_debugfs(void *priv
166 /* Recalculate success probabilities and counters for a given rate using EWMA */
167 void minstrel_calc_rate_stats(struct minstrel_priv *mp,
168 struct minstrel_rate_stats *mrs);
169 -int minstrel_get_tp_avg(struct minstrel_rate *mr, int prob_ewma);
170 +int minstrel_get_tp_avg(struct minstrel_rate *mr, int prob_avg);
171
172 /* debugfs */
173 int minstrel_stats_open(struct inode *inode, struct file *file);
174 --- a/net/mac80211/rc80211_minstrel_debugfs.c
175 +++ b/net/mac80211/rc80211_minstrel_debugfs.c
176 @@ -90,8 +90,8 @@ minstrel_stats_open(struct inode *inode,
177 p += sprintf(p, "%6u ", mr->perfect_tx_time);
178
179 tp_max = minstrel_get_tp_avg(mr, MINSTREL_FRAC(100,100));
180 - tp_avg = minstrel_get_tp_avg(mr, mrs->prob_ewma);
181 - eprob = MINSTREL_TRUNC(mrs->prob_ewma * 1000);
182 + tp_avg = minstrel_get_tp_avg(mr, mrs->prob_avg);
183 + eprob = MINSTREL_TRUNC(mrs->prob_avg * 1000);
184
185 p += sprintf(p, "%4u.%1u %4u.%1u %3u.%1u"
186 " %3u %3u %-3u "
187 @@ -147,8 +147,8 @@ minstrel_stats_csv_open(struct inode *in
188 p += sprintf(p, "%u,",mr->perfect_tx_time);
189
190 tp_max = minstrel_get_tp_avg(mr, MINSTREL_FRAC(100,100));
191 - tp_avg = minstrel_get_tp_avg(mr, mrs->prob_ewma);
192 - eprob = MINSTREL_TRUNC(mrs->prob_ewma * 1000);
193 + tp_avg = minstrel_get_tp_avg(mr, mrs->prob_avg);
194 + eprob = MINSTREL_TRUNC(mrs->prob_avg * 1000);
195
196 p += sprintf(p, "%u.%u,%u.%u,%u.%u,%u,%u,%u,"
197 "%llu,%llu,%d,%d\n",
198 --- a/net/mac80211/rc80211_minstrel_ht.c
199 +++ b/net/mac80211/rc80211_minstrel_ht.c
200 @@ -346,12 +346,12 @@ minstrel_ht_avg_ampdu_len(struct minstre
201 */
202 int
203 minstrel_ht_get_tp_avg(struct minstrel_ht_sta *mi, int group, int rate,
204 - int prob_ewma)
205 + int prob_avg)
206 {
207 unsigned int nsecs = 0;
208
209 /* do not account throughput if sucess prob is below 10% */
210 - if (prob_ewma < MINSTREL_FRAC(10, 100))
211 + if (prob_avg < MINSTREL_FRAC(10, 100))
212 return 0;
213
214 if (group != MINSTREL_CCK_GROUP)
215 @@ -365,11 +365,11 @@ minstrel_ht_get_tp_avg(struct minstrel_h
216 * account for collision related packet error rate fluctuation
217 * (prob is scaled - see MINSTREL_FRAC above)
218 */
219 - if (prob_ewma > MINSTREL_FRAC(90, 100))
220 + if (prob_avg > MINSTREL_FRAC(90, 100))
221 return MINSTREL_TRUNC(100000 * ((MINSTREL_FRAC(90, 100) * 1000)
222 / nsecs));
223 else
224 - return MINSTREL_TRUNC(100000 * ((prob_ewma * 1000) / nsecs));
225 + return MINSTREL_TRUNC(100000 * ((prob_avg * 1000) / nsecs));
226 }
227
228 /*
229 @@ -389,13 +389,13 @@ minstrel_ht_sort_best_tp_rates(struct mi
230
231 cur_group = index / MCS_GROUP_RATES;
232 cur_idx = index % MCS_GROUP_RATES;
233 - cur_prob = mi->groups[cur_group].rates[cur_idx].prob_ewma;
234 + cur_prob = mi->groups[cur_group].rates[cur_idx].prob_avg;
235 cur_tp_avg = minstrel_ht_get_tp_avg(mi, cur_group, cur_idx, cur_prob);
236
237 do {
238 tmp_group = tp_list[j - 1] / MCS_GROUP_RATES;
239 tmp_idx = tp_list[j - 1] % MCS_GROUP_RATES;
240 - tmp_prob = mi->groups[tmp_group].rates[tmp_idx].prob_ewma;
241 + tmp_prob = mi->groups[tmp_group].rates[tmp_idx].prob_avg;
242 tmp_tp_avg = minstrel_ht_get_tp_avg(mi, tmp_group, tmp_idx,
243 tmp_prob);
244 if (cur_tp_avg < tmp_tp_avg ||
245 @@ -432,7 +432,7 @@ minstrel_ht_set_best_prob_rate(struct mi
246
247 tmp_group = mi->max_prob_rate / MCS_GROUP_RATES;
248 tmp_idx = mi->max_prob_rate % MCS_GROUP_RATES;
249 - tmp_prob = mi->groups[tmp_group].rates[tmp_idx].prob_ewma;
250 + tmp_prob = mi->groups[tmp_group].rates[tmp_idx].prob_avg;
251 tmp_tp_avg = minstrel_ht_get_tp_avg(mi, tmp_group, tmp_idx, tmp_prob);
252
253 /* if max_tp_rate[0] is from MCS_GROUP max_prob_rate get selected from
254 @@ -444,11 +444,11 @@ minstrel_ht_set_best_prob_rate(struct mi
255
256 max_gpr_group = mg->max_group_prob_rate / MCS_GROUP_RATES;
257 max_gpr_idx = mg->max_group_prob_rate % MCS_GROUP_RATES;
258 - max_gpr_prob = mi->groups[max_gpr_group].rates[max_gpr_idx].prob_ewma;
259 + max_gpr_prob = mi->groups[max_gpr_group].rates[max_gpr_idx].prob_avg;
260
261 - if (mrs->prob_ewma > MINSTREL_FRAC(75, 100)) {
262 + if (mrs->prob_avg > MINSTREL_FRAC(75, 100)) {
263 cur_tp_avg = minstrel_ht_get_tp_avg(mi, cur_group, cur_idx,
264 - mrs->prob_ewma);
265 + mrs->prob_avg);
266 if (cur_tp_avg > tmp_tp_avg)
267 mi->max_prob_rate = index;
268
269 @@ -458,9 +458,9 @@ minstrel_ht_set_best_prob_rate(struct mi
270 if (cur_tp_avg > max_gpr_tp_avg)
271 mg->max_group_prob_rate = index;
272 } else {
273 - if (mrs->prob_ewma > tmp_prob)
274 + if (mrs->prob_avg > tmp_prob)
275 mi->max_prob_rate = index;
276 - if (mrs->prob_ewma > max_gpr_prob)
277 + if (mrs->prob_avg > max_gpr_prob)
278 mg->max_group_prob_rate = index;
279 }
280 }
281 @@ -482,12 +482,12 @@ minstrel_ht_assign_best_tp_rates(struct
282
283 tmp_group = tmp_cck_tp_rate[0] / MCS_GROUP_RATES;
284 tmp_idx = tmp_cck_tp_rate[0] % MCS_GROUP_RATES;
285 - tmp_prob = mi->groups[tmp_group].rates[tmp_idx].prob_ewma;
286 + tmp_prob = mi->groups[tmp_group].rates[tmp_idx].prob_avg;
287 tmp_cck_tp = minstrel_ht_get_tp_avg(mi, tmp_group, tmp_idx, tmp_prob);
288
289 tmp_group = tmp_mcs_tp_rate[0] / MCS_GROUP_RATES;
290 tmp_idx = tmp_mcs_tp_rate[0] % MCS_GROUP_RATES;
291 - tmp_prob = mi->groups[tmp_group].rates[tmp_idx].prob_ewma;
292 + tmp_prob = mi->groups[tmp_group].rates[tmp_idx].prob_avg;
293 tmp_mcs_tp = minstrel_ht_get_tp_avg(mi, tmp_group, tmp_idx, tmp_prob);
294
295 if (tmp_cck_tp_rate && tmp_cck_tp > tmp_mcs_tp) {
296 @@ -518,7 +518,7 @@ minstrel_ht_prob_rate_reduce_streams(str
297 continue;
298
299 tmp_idx = mg->max_group_prob_rate % MCS_GROUP_RATES;
300 - tmp_prob = mi->groups[group].rates[tmp_idx].prob_ewma;
301 + tmp_prob = mi->groups[group].rates[tmp_idx].prob_avg;
302
303 if (tmp_tp < minstrel_ht_get_tp_avg(mi, group, tmp_idx, tmp_prob) &&
304 (minstrel_mcs_groups[group].streams < tmp_max_streams)) {
305 @@ -623,7 +623,7 @@ minstrel_ht_rate_sample_switch(struct mi
306 * If that fails, look again for a rate that is at least as fast
307 */
308 mrs = minstrel_get_ratestats(mi, mi->max_tp_rate[0]);
309 - faster_rate = mrs->prob_ewma > MINSTREL_FRAC(75, 100);
310 + faster_rate = mrs->prob_avg > MINSTREL_FRAC(75, 100);
311 minstrel_ht_find_probe_rates(mi, rates, &n_rates, faster_rate);
312 if (!n_rates && faster_rate)
313 minstrel_ht_find_probe_rates(mi, rates, &n_rates, false);
314 @@ -738,7 +738,7 @@ minstrel_ht_update_stats(struct minstrel
315 mrs = &mg->rates[i];
316 mrs->retry_updated = false;
317 minstrel_calc_rate_stats(mp, mrs);
318 - cur_prob = mrs->prob_ewma;
319 + cur_prob = mrs->prob_avg;
320
321 if (minstrel_ht_get_tp_avg(mi, group, i, cur_prob) == 0)
322 continue;
323 @@ -1012,7 +1012,7 @@ minstrel_calc_retransmit(struct minstrel
324 unsigned int overhead = 0, overhead_rtscts = 0;
325
326 mrs = minstrel_get_ratestats(mi, index);
327 - if (mrs->prob_ewma < MINSTREL_FRAC(1, 10)) {
328 + if (mrs->prob_avg < MINSTREL_FRAC(1, 10)) {
329 mrs->retry_count = 1;
330 mrs->retry_count_rtscts = 1;
331 return;
332 @@ -1069,7 +1069,7 @@ minstrel_ht_set_rate(struct minstrel_pri
333 if (!mrs->retry_updated)
334 minstrel_calc_retransmit(mp, mi, index);
335
336 - if (mrs->prob_ewma < MINSTREL_FRAC(20, 100) || !mrs->retry_count) {
337 + if (mrs->prob_avg < MINSTREL_FRAC(20, 100) || !mrs->retry_count) {
338 ratetbl->rate[offset].count = 2;
339 ratetbl->rate[offset].count_rts = 2;
340 ratetbl->rate[offset].count_cts = 2;
341 @@ -1103,11 +1103,11 @@ minstrel_ht_set_rate(struct minstrel_pri
342 }
343
344 static inline int
345 -minstrel_ht_get_prob_ewma(struct minstrel_ht_sta *mi, int rate)
346 +minstrel_ht_get_prob_avg(struct minstrel_ht_sta *mi, int rate)
347 {
348 int group = rate / MCS_GROUP_RATES;
349 rate %= MCS_GROUP_RATES;
350 - return mi->groups[group].rates[rate].prob_ewma;
351 + return mi->groups[group].rates[rate].prob_avg;
352 }
353
354 static int
355 @@ -1119,7 +1119,7 @@ minstrel_ht_get_max_amsdu_len(struct min
356 unsigned int duration;
357
358 /* Disable A-MSDU if max_prob_rate is bad */
359 - if (mi->groups[group].rates[rate].prob_ewma < MINSTREL_FRAC(50, 100))
360 + if (mi->groups[group].rates[rate].prob_avg < MINSTREL_FRAC(50, 100))
361 return 1;
362
363 duration = g->duration[rate];
364 @@ -1142,7 +1142,7 @@ minstrel_ht_get_max_amsdu_len(struct min
365 * data packet size
366 */
367 if (duration > MCS_DURATION(1, 0, 260) ||
368 - (minstrel_ht_get_prob_ewma(mi, mi->max_tp_rate[0]) <
369 + (minstrel_ht_get_prob_avg(mi, mi->max_tp_rate[0]) <
370 MINSTREL_FRAC(75, 100)))
371 return 3200;
372
373 @@ -1247,7 +1247,7 @@ minstrel_get_sample_rate(struct minstrel
374 * rate, to avoid wasting airtime.
375 */
376 sample_dur = minstrel_get_duration(sample_idx);
377 - if (mrs->prob_ewma > MINSTREL_FRAC(95, 100) ||
378 + if (mrs->prob_avg > MINSTREL_FRAC(95, 100) ||
379 minstrel_get_duration(mi->max_prob_rate) * 3 < sample_dur)
380 return -1;
381
382 @@ -1711,7 +1711,7 @@ static u32 minstrel_ht_get_expected_thro
383
384 i = mi->max_tp_rate[0] / MCS_GROUP_RATES;
385 j = mi->max_tp_rate[0] % MCS_GROUP_RATES;
386 - prob = mi->groups[i].rates[j].prob_ewma;
387 + prob = mi->groups[i].rates[j].prob_avg;
388
389 /* convert tp_avg from pkt per second in kbps */
390 tp_avg = minstrel_ht_get_tp_avg(mi, i, j, prob) * 10;
391 --- a/net/mac80211/rc80211_minstrel_ht.h
392 +++ b/net/mac80211/rc80211_minstrel_ht.h
393 @@ -119,6 +119,6 @@ struct minstrel_ht_sta_priv {
394
395 void minstrel_ht_add_sta_debugfs(void *priv, void *priv_sta, struct dentry *dir);
396 int minstrel_ht_get_tp_avg(struct minstrel_ht_sta *mi, int group, int rate,
397 - int prob_ewma);
398 + int prob_avg);
399
400 #endif
401 --- a/net/mac80211/rc80211_minstrel_ht_debugfs.c
402 +++ b/net/mac80211/rc80211_minstrel_ht_debugfs.c
403 @@ -98,8 +98,8 @@ minstrel_ht_stats_dump(struct minstrel_h
404 p += sprintf(p, "%6u ", tx_time);
405
406 tp_max = minstrel_ht_get_tp_avg(mi, i, j, MINSTREL_FRAC(100, 100));
407 - tp_avg = minstrel_ht_get_tp_avg(mi, i, j, mrs->prob_ewma);
408 - eprob = MINSTREL_TRUNC(mrs->prob_ewma * 1000);
409 + tp_avg = minstrel_ht_get_tp_avg(mi, i, j, mrs->prob_avg);
410 + eprob = MINSTREL_TRUNC(mrs->prob_avg * 1000);
411
412 p += sprintf(p, "%4u.%1u %4u.%1u %3u.%1u"
413 " %3u %3u %-3u "
414 @@ -243,8 +243,8 @@ minstrel_ht_stats_csv_dump(struct minstr
415 p += sprintf(p, "%u,", tx_time);
416
417 tp_max = minstrel_ht_get_tp_avg(mi, i, j, MINSTREL_FRAC(100, 100));
418 - tp_avg = minstrel_ht_get_tp_avg(mi, i, j, mrs->prob_ewma);
419 - eprob = MINSTREL_TRUNC(mrs->prob_ewma * 1000);
420 + tp_avg = minstrel_ht_get_tp_avg(mi, i, j, mrs->prob_avg);
421 + eprob = MINSTREL_TRUNC(mrs->prob_avg * 1000);
422
423 p += sprintf(p, "%u.%u,%u.%u,%u.%u,%u,%u,"
424 "%u,%llu,%llu,",