Loading...
1/*
2 * Copyright (c) 2014, Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>
3 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
17#include "ath9k.h"
18#include "hw.h"
19#include "dynack.h"
20
21#define COMPUTE_TO (5 * HZ)
22#define LATEACK_DELAY (10 * HZ)
23#define EWMA_LEVEL 96
24#define EWMA_DIV 128
25
26/**
27 * ath_dynack_get_max_to - set max timeout according to channel width
28 * @ah: ath hw
29 *
30 */
31static u32 ath_dynack_get_max_to(struct ath_hw *ah)
32{
33 const struct ath9k_channel *chan = ah->curchan;
34
35 if (!chan)
36 return 300;
37
38 if (IS_CHAN_HT40(chan))
39 return 300;
40 if (IS_CHAN_HALF_RATE(chan))
41 return 750;
42 if (IS_CHAN_QUARTER_RATE(chan))
43 return 1500;
44 return 600;
45}
46
47/*
48 * ath_dynack_ewma - EWMA (Exponentially Weighted Moving Average) calculation
49 */
50static inline int ath_dynack_ewma(int old, int new)
51{
52 if (old > 0)
53 return (new * (EWMA_DIV - EWMA_LEVEL) +
54 old * EWMA_LEVEL) / EWMA_DIV;
55 else
56 return new;
57}
58
59/**
60 * ath_dynack_get_sifs - get sifs time based on phy used
61 * @ah: ath hw
62 * @phy: phy used
63 *
64 */
65static inline u32 ath_dynack_get_sifs(struct ath_hw *ah, int phy)
66{
67 u32 sifs = CCK_SIFS_TIME;
68
69 if (phy == WLAN_RC_PHY_OFDM) {
70 if (IS_CHAN_QUARTER_RATE(ah->curchan))
71 sifs = OFDM_SIFS_TIME_QUARTER;
72 else if (IS_CHAN_HALF_RATE(ah->curchan))
73 sifs = OFDM_SIFS_TIME_HALF;
74 else
75 sifs = OFDM_SIFS_TIME;
76 }
77 return sifs;
78}
79
80/**
81 * ath_dynack_bssidmask - filter out ACK frames based on BSSID mask
82 * @ah: ath hw
83 * @mac: receiver address
84 */
85static inline bool ath_dynack_bssidmask(struct ath_hw *ah, const u8 *mac)
86{
87 int i;
88 struct ath_common *common = ath9k_hw_common(ah);
89
90 for (i = 0; i < ETH_ALEN; i++) {
91 if ((common->macaddr[i] & common->bssidmask[i]) !=
92 (mac[i] & common->bssidmask[i]))
93 return false;
94 }
95
96 return true;
97}
98
99/**
100 * ath_dynack_set_timeout - configure timeouts/slottime registers
101 * @ah: ath hw
102 * @to: timeout value
103 *
104 */
105static void ath_dynack_set_timeout(struct ath_hw *ah, int to)
106{
107 struct ath_common *common = ath9k_hw_common(ah);
108 int slottime = (to - 3) / 2;
109
110 ath_dbg(common, DYNACK, "ACK timeout %u slottime %u\n",
111 to, slottime);
112 ath9k_hw_setslottime(ah, slottime);
113 ath9k_hw_set_ack_timeout(ah, to);
114 ath9k_hw_set_cts_timeout(ah, to);
115}
116
117/**
118 * ath_dynack_compute_ackto - compute ACK timeout as the maximum STA timeout
119 * @ah: ath hw
120 *
121 * should be called while holding qlock
122 */
123static void ath_dynack_compute_ackto(struct ath_hw *ah)
124{
125 struct ath_dynack *da = &ah->dynack;
126 struct ath_node *an;
127 int to = 0;
128
129 list_for_each_entry(an, &da->nodes, list)
130 if (an->ackto > to)
131 to = an->ackto;
132
133 if (to && da->ackto != to) {
134 ath_dynack_set_timeout(ah, to);
135 da->ackto = to;
136 }
137}
138
139/**
140 * ath_dynack_compute_to - compute STA ACK timeout
141 * @ah: ath hw
142 *
143 * should be called while holding qlock
144 */
145static void ath_dynack_compute_to(struct ath_hw *ah)
146{
147 struct ath_dynack *da = &ah->dynack;
148 u32 ackto, ack_ts, max_to;
149 struct ieee80211_sta *sta;
150 struct ts_info *st_ts;
151 struct ath_node *an;
152 u8 *dst, *src;
153
154 rcu_read_lock();
155
156 max_to = ath_dynack_get_max_to(ah);
157 while (da->st_rbf.h_rb != da->st_rbf.t_rb &&
158 da->ack_rbf.h_rb != da->ack_rbf.t_rb) {
159 ack_ts = da->ack_rbf.tstamp[da->ack_rbf.h_rb];
160 st_ts = &da->st_rbf.ts[da->st_rbf.h_rb];
161 dst = da->st_rbf.addr[da->st_rbf.h_rb].h_dest;
162 src = da->st_rbf.addr[da->st_rbf.h_rb].h_src;
163
164 ath_dbg(ath9k_hw_common(ah), DYNACK,
165 "ack_ts %u st_ts %u st_dur %u [%u-%u]\n",
166 ack_ts, st_ts->tstamp, st_ts->dur,
167 da->ack_rbf.h_rb, da->st_rbf.h_rb);
168
169 if (ack_ts > st_ts->tstamp + st_ts->dur) {
170 ackto = ack_ts - st_ts->tstamp - st_ts->dur;
171
172 if (ackto < max_to) {
173 sta = ieee80211_find_sta_by_ifaddr(ah->hw, dst,
174 src);
175 if (sta) {
176 an = (struct ath_node *)sta->drv_priv;
177 an->ackto = ath_dynack_ewma(an->ackto,
178 ackto);
179 ath_dbg(ath9k_hw_common(ah), DYNACK,
180 "%pM to %d [%u]\n", dst,
181 an->ackto, ackto);
182 if (time_is_before_jiffies(da->lto)) {
183 ath_dynack_compute_ackto(ah);
184 da->lto = jiffies + COMPUTE_TO;
185 }
186 }
187 INCR(da->ack_rbf.h_rb, ATH_DYN_BUF);
188 }
189 INCR(da->st_rbf.h_rb, ATH_DYN_BUF);
190 } else {
191 INCR(da->ack_rbf.h_rb, ATH_DYN_BUF);
192 }
193 }
194
195 rcu_read_unlock();
196}
197
198/**
199 * ath_dynack_sample_tx_ts - status timestamp sampling method
200 * @ah: ath hw
201 * @skb: socket buffer
202 * @ts: tx status info
203 * @sta: station pointer
204 *
205 */
206void ath_dynack_sample_tx_ts(struct ath_hw *ah, struct sk_buff *skb,
207 struct ath_tx_status *ts,
208 struct ieee80211_sta *sta)
209{
210 struct ieee80211_hdr *hdr;
211 struct ath_dynack *da = &ah->dynack;
212 struct ath_common *common = ath9k_hw_common(ah);
213 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
214 u32 dur = ts->duration;
215 u8 ridx;
216
217 if (!da->enabled || (info->flags & IEEE80211_TX_CTL_NO_ACK))
218 return;
219
220 spin_lock_bh(&da->qlock);
221
222 hdr = (struct ieee80211_hdr *)skb->data;
223
224 /* late ACK */
225 if (ts->ts_status & ATH9K_TXERR_XRETRY) {
226 if (ieee80211_is_assoc_req(hdr->frame_control) ||
227 ieee80211_is_assoc_resp(hdr->frame_control) ||
228 ieee80211_is_auth(hdr->frame_control)) {
229 u32 max_to = ath_dynack_get_max_to(ah);
230
231 ath_dbg(common, DYNACK, "late ack\n");
232 ath_dynack_set_timeout(ah, max_to);
233 if (sta) {
234 struct ath_node *an;
235
236 an = (struct ath_node *)sta->drv_priv;
237 an->ackto = -1;
238 }
239 da->lto = jiffies + LATEACK_DELAY;
240 }
241
242 spin_unlock_bh(&da->qlock);
243 return;
244 }
245
246 ridx = ts->ts_rateindex;
247
248 da->st_rbf.ts[da->st_rbf.t_rb].tstamp = ts->ts_tstamp;
249
250 /* ether_addr_copy() gives a false warning on gcc-10 so use memcpy()
251 * https://gcc.gnu.org/bugzilla/show_bug.cgi?id=97490
252 */
253 memcpy(da->st_rbf.addr[da->st_rbf.t_rb].h_dest, hdr->addr1, ETH_ALEN);
254 memcpy(da->st_rbf.addr[da->st_rbf.t_rb].h_src, hdr->addr2, ETH_ALEN);
255
256 if (!(info->status.rates[ridx].flags & IEEE80211_TX_RC_MCS)) {
257 const struct ieee80211_rate *rate;
258 struct ieee80211_tx_rate *rates = info->status.rates;
259 u32 phy;
260
261 rate = &common->sbands[info->band].bitrates[rates[ridx].idx];
262 if (info->band == NL80211_BAND_2GHZ &&
263 !(rate->flags & IEEE80211_RATE_ERP_G))
264 phy = WLAN_RC_PHY_CCK;
265 else
266 phy = WLAN_RC_PHY_OFDM;
267
268 dur -= ath_dynack_get_sifs(ah, phy);
269 }
270 da->st_rbf.ts[da->st_rbf.t_rb].dur = dur;
271
272 INCR(da->st_rbf.t_rb, ATH_DYN_BUF);
273 if (da->st_rbf.t_rb == da->st_rbf.h_rb)
274 INCR(da->st_rbf.h_rb, ATH_DYN_BUF);
275
276 ath_dbg(common, DYNACK, "{%pM} tx sample %u [dur %u][h %u-t %u]\n",
277 hdr->addr1, ts->ts_tstamp, dur, da->st_rbf.h_rb,
278 da->st_rbf.t_rb);
279
280 ath_dynack_compute_to(ah);
281
282 spin_unlock_bh(&da->qlock);
283}
284EXPORT_SYMBOL(ath_dynack_sample_tx_ts);
285
286/**
287 * ath_dynack_sample_ack_ts - ACK timestamp sampling method
288 * @ah: ath hw
289 * @skb: socket buffer
290 * @ts: rx timestamp
291 *
292 */
293void ath_dynack_sample_ack_ts(struct ath_hw *ah, struct sk_buff *skb,
294 u32 ts)
295{
296 struct ath_dynack *da = &ah->dynack;
297 struct ath_common *common = ath9k_hw_common(ah);
298 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
299
300 if (!da->enabled || !ath_dynack_bssidmask(ah, hdr->addr1))
301 return;
302
303 spin_lock_bh(&da->qlock);
304 da->ack_rbf.tstamp[da->ack_rbf.t_rb] = ts;
305
306 INCR(da->ack_rbf.t_rb, ATH_DYN_BUF);
307 if (da->ack_rbf.t_rb == da->ack_rbf.h_rb)
308 INCR(da->ack_rbf.h_rb, ATH_DYN_BUF);
309
310 ath_dbg(common, DYNACK, "rx sample %u [h %u-t %u]\n",
311 ts, da->ack_rbf.h_rb, da->ack_rbf.t_rb);
312
313 ath_dynack_compute_to(ah);
314
315 spin_unlock_bh(&da->qlock);
316}
317EXPORT_SYMBOL(ath_dynack_sample_ack_ts);
318
319/**
320 * ath_dynack_node_init - init ath_node related info
321 * @ah: ath hw
322 * @an: ath node
323 *
324 */
325void ath_dynack_node_init(struct ath_hw *ah, struct ath_node *an)
326{
327 struct ath_dynack *da = &ah->dynack;
328
329 an->ackto = da->ackto;
330
331 spin_lock_bh(&da->qlock);
332 list_add_tail(&an->list, &da->nodes);
333 spin_unlock_bh(&da->qlock);
334}
335EXPORT_SYMBOL(ath_dynack_node_init);
336
337/**
338 * ath_dynack_node_deinit - deinit ath_node related info
339 * @ah: ath hw
340 * @an: ath node
341 *
342 */
343void ath_dynack_node_deinit(struct ath_hw *ah, struct ath_node *an)
344{
345 struct ath_dynack *da = &ah->dynack;
346
347 spin_lock_bh(&da->qlock);
348 list_del(&an->list);
349 spin_unlock_bh(&da->qlock);
350}
351EXPORT_SYMBOL(ath_dynack_node_deinit);
352
353/**
354 * ath_dynack_reset - reset dynack processing
355 * @ah: ath hw
356 *
357 */
358void ath_dynack_reset(struct ath_hw *ah)
359{
360 struct ath_dynack *da = &ah->dynack;
361 struct ath_node *an;
362
363 spin_lock_bh(&da->qlock);
364
365 da->lto = jiffies + COMPUTE_TO;
366
367 da->st_rbf.t_rb = 0;
368 da->st_rbf.h_rb = 0;
369 da->ack_rbf.t_rb = 0;
370 da->ack_rbf.h_rb = 0;
371
372 da->ackto = ath_dynack_get_max_to(ah);
373 list_for_each_entry(an, &da->nodes, list)
374 an->ackto = da->ackto;
375
376 /* init acktimeout */
377 ath_dynack_set_timeout(ah, da->ackto);
378
379 spin_unlock_bh(&da->qlock);
380}
381EXPORT_SYMBOL(ath_dynack_reset);
382
383/**
384 * ath_dynack_init - init dynack data structure
385 * @ah: ath hw
386 *
387 */
388void ath_dynack_init(struct ath_hw *ah)
389{
390 struct ath_dynack *da = &ah->dynack;
391
392 memset(da, 0, sizeof(struct ath_dynack));
393
394 spin_lock_init(&da->qlock);
395 INIT_LIST_HEAD(&da->nodes);
396 /* ackto = slottime + sifs + air delay */
397 da->ackto = 9 + 16 + 64;
398
399 ah->hw->wiphy->features |= NL80211_FEATURE_ACKTO_ESTIMATION;
400}
1/*
2 * Copyright (c) 2014, Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>
3 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
17#include "ath9k.h"
18#include "hw.h"
19#include "dynack.h"
20
21#define COMPUTE_TO (5 * HZ)
22#define LATEACK_DELAY (10 * HZ)
23#define LATEACK_TO 256
24#define MAX_DELAY 300
25#define EWMA_LEVEL 96
26#define EWMA_DIV 128
27
28/**
29 * ath_dynack_ewma - EWMA (Exponentially Weighted Moving Average) calculation
30 *
31 */
32static inline u32 ath_dynack_ewma(u32 old, u32 new)
33{
34 return (new * (EWMA_DIV - EWMA_LEVEL) + old * EWMA_LEVEL) / EWMA_DIV;
35}
36
37/**
38 * ath_dynack_get_sifs - get sifs time based on phy used
39 * @ah: ath hw
40 * @phy: phy used
41 *
42 */
43static inline u32 ath_dynack_get_sifs(struct ath_hw *ah, int phy)
44{
45 u32 sifs = CCK_SIFS_TIME;
46
47 if (phy == WLAN_RC_PHY_OFDM) {
48 if (IS_CHAN_QUARTER_RATE(ah->curchan))
49 sifs = OFDM_SIFS_TIME_QUARTER;
50 else if (IS_CHAN_HALF_RATE(ah->curchan))
51 sifs = OFDM_SIFS_TIME_HALF;
52 else
53 sifs = OFDM_SIFS_TIME;
54 }
55 return sifs;
56}
57
58/**
59 * ath_dynack_bssidmask - filter out ACK frames based on BSSID mask
60 * @ah: ath hw
61 * @mac: receiver address
62 */
63static inline bool ath_dynack_bssidmask(struct ath_hw *ah, const u8 *mac)
64{
65 int i;
66 struct ath_common *common = ath9k_hw_common(ah);
67
68 for (i = 0; i < ETH_ALEN; i++) {
69 if ((common->macaddr[i] & common->bssidmask[i]) !=
70 (mac[i] & common->bssidmask[i]))
71 return false;
72 }
73
74 return true;
75}
76
77/**
78 * ath_dynack_compute_ackto - compute ACK timeout as the maximum STA timeout
79 * @ah: ath hw
80 *
81 * should be called while holding qlock
82 */
83static void ath_dynack_compute_ackto(struct ath_hw *ah)
84{
85 struct ath_node *an;
86 u32 to = 0;
87 struct ath_dynack *da = &ah->dynack;
88 struct ath_common *common = ath9k_hw_common(ah);
89
90 list_for_each_entry(an, &da->nodes, list)
91 if (an->ackto > to)
92 to = an->ackto;
93
94 if (to && da->ackto != to) {
95 u32 slottime;
96
97 slottime = (to - 3) / 2;
98 da->ackto = to;
99 ath_dbg(common, DYNACK, "ACK timeout %u slottime %u\n",
100 da->ackto, slottime);
101 ath9k_hw_setslottime(ah, slottime);
102 ath9k_hw_set_ack_timeout(ah, da->ackto);
103 ath9k_hw_set_cts_timeout(ah, da->ackto);
104 }
105}
106
107/**
108 * ath_dynack_compute_to - compute STA ACK timeout
109 * @ah: ath hw
110 *
111 * should be called while holding qlock
112 */
113static void ath_dynack_compute_to(struct ath_hw *ah)
114{
115 u32 ackto, ack_ts;
116 u8 *dst, *src;
117 struct ieee80211_sta *sta;
118 struct ath_node *an;
119 struct ts_info *st_ts;
120 struct ath_dynack *da = &ah->dynack;
121
122 rcu_read_lock();
123
124 while (da->st_rbf.h_rb != da->st_rbf.t_rb &&
125 da->ack_rbf.h_rb != da->ack_rbf.t_rb) {
126 ack_ts = da->ack_rbf.tstamp[da->ack_rbf.h_rb];
127 st_ts = &da->st_rbf.ts[da->st_rbf.h_rb];
128 dst = da->st_rbf.addr[da->st_rbf.h_rb].h_dest;
129 src = da->st_rbf.addr[da->st_rbf.h_rb].h_src;
130
131 ath_dbg(ath9k_hw_common(ah), DYNACK,
132 "ack_ts %u st_ts %u st_dur %u [%u-%u]\n",
133 ack_ts, st_ts->tstamp, st_ts->dur,
134 da->ack_rbf.h_rb, da->st_rbf.h_rb);
135
136 if (ack_ts > st_ts->tstamp + st_ts->dur) {
137 ackto = ack_ts - st_ts->tstamp - st_ts->dur;
138
139 if (ackto < MAX_DELAY) {
140 sta = ieee80211_find_sta_by_ifaddr(ah->hw, dst,
141 src);
142 if (sta) {
143 an = (struct ath_node *)sta->drv_priv;
144 an->ackto = ath_dynack_ewma(an->ackto,
145 ackto);
146 ath_dbg(ath9k_hw_common(ah), DYNACK,
147 "%pM to %u\n", dst, an->ackto);
148 if (time_is_before_jiffies(da->lto)) {
149 ath_dynack_compute_ackto(ah);
150 da->lto = jiffies + COMPUTE_TO;
151 }
152 }
153 INCR(da->ack_rbf.h_rb, ATH_DYN_BUF);
154 }
155 INCR(da->st_rbf.h_rb, ATH_DYN_BUF);
156 } else {
157 INCR(da->ack_rbf.h_rb, ATH_DYN_BUF);
158 }
159 }
160
161 rcu_read_unlock();
162}
163
164/**
165 * ath_dynack_sample_tx_ts - status timestamp sampling method
166 * @ah: ath hw
167 * @skb: socket buffer
168 * @ts: tx status info
169 *
170 */
171void ath_dynack_sample_tx_ts(struct ath_hw *ah, struct sk_buff *skb,
172 struct ath_tx_status *ts)
173{
174 u8 ridx;
175 struct ieee80211_hdr *hdr;
176 struct ath_dynack *da = &ah->dynack;
177 struct ath_common *common = ath9k_hw_common(ah);
178 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
179
180 if ((info->flags & IEEE80211_TX_CTL_NO_ACK) || !da->enabled)
181 return;
182
183 spin_lock_bh(&da->qlock);
184
185 hdr = (struct ieee80211_hdr *)skb->data;
186
187 /* late ACK */
188 if (ts->ts_status & ATH9K_TXERR_XRETRY) {
189 if (ieee80211_is_assoc_req(hdr->frame_control) ||
190 ieee80211_is_assoc_resp(hdr->frame_control)) {
191 ath_dbg(common, DYNACK, "late ack\n");
192 ath9k_hw_setslottime(ah, (LATEACK_TO - 3) / 2);
193 ath9k_hw_set_ack_timeout(ah, LATEACK_TO);
194 ath9k_hw_set_cts_timeout(ah, LATEACK_TO);
195 da->lto = jiffies + LATEACK_DELAY;
196 }
197
198 spin_unlock_bh(&da->qlock);
199 return;
200 }
201
202 ridx = ts->ts_rateindex;
203
204 da->st_rbf.ts[da->st_rbf.t_rb].tstamp = ts->ts_tstamp;
205 da->st_rbf.ts[da->st_rbf.t_rb].dur = ts->duration;
206 ether_addr_copy(da->st_rbf.addr[da->st_rbf.t_rb].h_dest, hdr->addr1);
207 ether_addr_copy(da->st_rbf.addr[da->st_rbf.t_rb].h_src, hdr->addr2);
208
209 if (!(info->status.rates[ridx].flags & IEEE80211_TX_RC_MCS)) {
210 u32 phy, sifs;
211 const struct ieee80211_rate *rate;
212 struct ieee80211_tx_rate *rates = info->status.rates;
213
214 rate = &common->sbands[info->band].bitrates[rates[ridx].idx];
215 if (info->band == IEEE80211_BAND_2GHZ &&
216 !(rate->flags & IEEE80211_RATE_ERP_G))
217 phy = WLAN_RC_PHY_CCK;
218 else
219 phy = WLAN_RC_PHY_OFDM;
220
221 sifs = ath_dynack_get_sifs(ah, phy);
222 da->st_rbf.ts[da->st_rbf.t_rb].dur -= sifs;
223 }
224
225 ath_dbg(common, DYNACK, "{%pM} tx sample %u [dur %u][h %u-t %u]\n",
226 hdr->addr1, da->st_rbf.ts[da->st_rbf.t_rb].tstamp,
227 da->st_rbf.ts[da->st_rbf.t_rb].dur, da->st_rbf.h_rb,
228 (da->st_rbf.t_rb + 1) % ATH_DYN_BUF);
229
230 INCR(da->st_rbf.t_rb, ATH_DYN_BUF);
231 if (da->st_rbf.t_rb == da->st_rbf.h_rb)
232 INCR(da->st_rbf.h_rb, ATH_DYN_BUF);
233
234 ath_dynack_compute_to(ah);
235
236 spin_unlock_bh(&da->qlock);
237}
238EXPORT_SYMBOL(ath_dynack_sample_tx_ts);
239
240/**
241 * ath_dynack_sample_ack_ts - ACK timestamp sampling method
242 * @ah: ath hw
243 * @skb: socket buffer
244 * @ts: rx timestamp
245 *
246 */
247void ath_dynack_sample_ack_ts(struct ath_hw *ah, struct sk_buff *skb,
248 u32 ts)
249{
250 struct ath_dynack *da = &ah->dynack;
251 struct ath_common *common = ath9k_hw_common(ah);
252 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
253
254 if (!ath_dynack_bssidmask(ah, hdr->addr1) || !da->enabled)
255 return;
256
257 spin_lock_bh(&da->qlock);
258 da->ack_rbf.tstamp[da->ack_rbf.t_rb] = ts;
259
260 ath_dbg(common, DYNACK, "rx sample %u [h %u-t %u]\n",
261 da->ack_rbf.tstamp[da->ack_rbf.t_rb],
262 da->ack_rbf.h_rb, (da->ack_rbf.t_rb + 1) % ATH_DYN_BUF);
263
264 INCR(da->ack_rbf.t_rb, ATH_DYN_BUF);
265 if (da->ack_rbf.t_rb == da->ack_rbf.h_rb)
266 INCR(da->ack_rbf.h_rb, ATH_DYN_BUF);
267
268 ath_dynack_compute_to(ah);
269
270 spin_unlock_bh(&da->qlock);
271}
272EXPORT_SYMBOL(ath_dynack_sample_ack_ts);
273
274/**
275 * ath_dynack_node_init - init ath_node related info
276 * @ah: ath hw
277 * @an: ath node
278 *
279 */
280void ath_dynack_node_init(struct ath_hw *ah, struct ath_node *an)
281{
282 /* ackto = slottime + sifs + air delay */
283 u32 ackto = ATH9K_SLOT_TIME_9 + 16 + 64;
284 struct ath_dynack *da = &ah->dynack;
285
286 an->ackto = ackto;
287
288 spin_lock(&da->qlock);
289 list_add_tail(&an->list, &da->nodes);
290 spin_unlock(&da->qlock);
291}
292EXPORT_SYMBOL(ath_dynack_node_init);
293
294/**
295 * ath_dynack_node_deinit - deinit ath_node related info
296 * @ah: ath hw
297 * @an: ath node
298 *
299 */
300void ath_dynack_node_deinit(struct ath_hw *ah, struct ath_node *an)
301{
302 struct ath_dynack *da = &ah->dynack;
303
304 spin_lock(&da->qlock);
305 list_del(&an->list);
306 spin_unlock(&da->qlock);
307}
308EXPORT_SYMBOL(ath_dynack_node_deinit);
309
310/**
311 * ath_dynack_reset - reset dynack processing
312 * @ah: ath hw
313 *
314 */
315void ath_dynack_reset(struct ath_hw *ah)
316{
317 /* ackto = slottime + sifs + air delay */
318 u32 ackto = ATH9K_SLOT_TIME_9 + 16 + 64;
319 struct ath_dynack *da = &ah->dynack;
320
321 da->lto = jiffies;
322 da->ackto = ackto;
323
324 da->st_rbf.t_rb = 0;
325 da->st_rbf.h_rb = 0;
326 da->ack_rbf.t_rb = 0;
327 da->ack_rbf.h_rb = 0;
328
329 /* init acktimeout */
330 ath9k_hw_setslottime(ah, (ackto - 3) / 2);
331 ath9k_hw_set_ack_timeout(ah, ackto);
332 ath9k_hw_set_cts_timeout(ah, ackto);
333}
334EXPORT_SYMBOL(ath_dynack_reset);
335
336/**
337 * ath_dynack_init - init dynack data structure
338 * @ah: ath hw
339 *
340 */
341void ath_dynack_init(struct ath_hw *ah)
342{
343 struct ath_dynack *da = &ah->dynack;
344
345 memset(da, 0, sizeof(struct ath_dynack));
346
347 spin_lock_init(&da->qlock);
348 INIT_LIST_HEAD(&da->nodes);
349
350 ah->hw->wiphy->features |= NL80211_FEATURE_ACKTO_ESTIMATION;
351}