Loading...
1// SPDX-License-Identifier: ISC
2/*
3 * Copyright (c) 2014-2017 Qualcomm Atheros, Inc.
4 * Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
5 */
6
7#include "wil6210.h"
8#include "txrx.h"
9
10#define SEQ_MODULO 0x1000
11#define SEQ_MASK 0xfff
12
13static inline int seq_less(u16 sq1, u16 sq2)
14{
15 return ((sq1 - sq2) & SEQ_MASK) > (SEQ_MODULO >> 1);
16}
17
18static inline u16 seq_inc(u16 sq)
19{
20 return (sq + 1) & SEQ_MASK;
21}
22
23static inline u16 seq_sub(u16 sq1, u16 sq2)
24{
25 return (sq1 - sq2) & SEQ_MASK;
26}
27
28static inline int reorder_index(struct wil_tid_ampdu_rx *r, u16 seq)
29{
30 return seq_sub(seq, r->ssn) % r->buf_size;
31}
32
33static void wil_release_reorder_frame(struct net_device *ndev,
34 struct wil_tid_ampdu_rx *r,
35 int index)
36{
37 struct sk_buff *skb = r->reorder_buf[index];
38
39 if (!skb)
40 goto no_frame;
41
42 /* release the frame from the reorder ring buffer */
43 r->stored_mpdu_num--;
44 r->reorder_buf[index] = NULL;
45 wil_netif_rx_any(skb, ndev);
46
47no_frame:
48 r->head_seq_num = seq_inc(r->head_seq_num);
49}
50
51static void wil_release_reorder_frames(struct net_device *ndev,
52 struct wil_tid_ampdu_rx *r,
53 u16 hseq)
54{
55 int index;
56
57 /* note: this function is never called with
58 * hseq preceding r->head_seq_num, i.e it is always true
59 * !seq_less(hseq, r->head_seq_num)
60 * and thus on loop exit it should be
61 * r->head_seq_num == hseq
62 */
63 while (seq_less(r->head_seq_num, hseq) && r->stored_mpdu_num) {
64 index = reorder_index(r, r->head_seq_num);
65 wil_release_reorder_frame(ndev, r, index);
66 }
67 r->head_seq_num = hseq;
68}
69
70static void wil_reorder_release(struct net_device *ndev,
71 struct wil_tid_ampdu_rx *r)
72{
73 int index = reorder_index(r, r->head_seq_num);
74
75 while (r->reorder_buf[index]) {
76 wil_release_reorder_frame(ndev, r, index);
77 index = reorder_index(r, r->head_seq_num);
78 }
79}
80
81/* called in NAPI context */
82void wil_rx_reorder(struct wil6210_priv *wil, struct sk_buff *skb)
83__acquires(&sta->tid_rx_lock) __releases(&sta->tid_rx_lock)
84{
85 struct wil6210_vif *vif;
86 struct net_device *ndev;
87 int tid, cid, mid, mcast, retry;
88 u16 seq;
89 struct wil_sta_info *sta;
90 struct wil_tid_ampdu_rx *r;
91 u16 hseq;
92 int index;
93
94 wil->txrx_ops.get_reorder_params(wil, skb, &tid, &cid, &mid, &seq,
95 &mcast, &retry);
96 sta = &wil->sta[cid];
97
98 wil_dbg_txrx(wil, "MID %d CID %d TID %d Seq 0x%03x mcast %01x\n",
99 mid, cid, tid, seq, mcast);
100
101 vif = wil->vifs[mid];
102 if (unlikely(!vif)) {
103 wil_dbg_txrx(wil, "invalid VIF, mid %d\n", mid);
104 dev_kfree_skb(skb);
105 return;
106 }
107 ndev = vif_to_ndev(vif);
108
109 spin_lock(&sta->tid_rx_lock);
110
111 r = sta->tid_rx[tid];
112 if (!r) {
113 wil_netif_rx_any(skb, ndev);
114 goto out;
115 }
116
117 if (unlikely(mcast)) {
118 if (retry && seq == r->mcast_last_seq) {
119 r->drop_dup_mcast++;
120 wil_dbg_txrx(wil, "Rx drop: dup mcast seq 0x%03x\n",
121 seq);
122 dev_kfree_skb(skb);
123 goto out;
124 }
125 r->mcast_last_seq = seq;
126 wil_netif_rx_any(skb, ndev);
127 goto out;
128 }
129
130 r->total++;
131 hseq = r->head_seq_num;
132
133 /** Due to the race between WMI events, where BACK establishment
134 * reported, and data Rx, few packets may be pass up before reorder
135 * buffer get allocated. Catch up by pretending SSN is what we
136 * see in the 1-st Rx packet
137 *
138 * Another scenario, Rx get delayed and we got packet from before
139 * BACK. Pass it to the stack and wait.
140 */
141 if (r->first_time) {
142 r->first_time = false;
143 if (seq != r->head_seq_num) {
144 if (seq_less(seq, r->head_seq_num)) {
145 wil_err(wil,
146 "Error: frame with early sequence 0x%03x, should be 0x%03x. Waiting...\n",
147 seq, r->head_seq_num);
148 r->first_time = true;
149 wil_netif_rx_any(skb, ndev);
150 goto out;
151 }
152 wil_err(wil,
153 "Error: 1-st frame with wrong sequence 0x%03x, should be 0x%03x. Fixing...\n",
154 seq, r->head_seq_num);
155 r->head_seq_num = seq;
156 r->ssn = seq;
157 }
158 }
159
160 /* frame with out of date sequence number */
161 if (seq_less(seq, r->head_seq_num)) {
162 r->ssn_last_drop = seq;
163 r->drop_old++;
164 wil_dbg_txrx(wil, "Rx drop: old seq 0x%03x head 0x%03x\n",
165 seq, r->head_seq_num);
166 dev_kfree_skb(skb);
167 goto out;
168 }
169
170 /*
171 * If frame the sequence number exceeds our buffering window
172 * size release some previous frames to make room for this one.
173 */
174 if (!seq_less(seq, r->head_seq_num + r->buf_size)) {
175 hseq = seq_inc(seq_sub(seq, r->buf_size));
176 /* release stored frames up to new head to stack */
177 wil_release_reorder_frames(ndev, r, hseq);
178 }
179
180 /* Now the new frame is always in the range of the reordering buffer */
181
182 index = reorder_index(r, seq);
183
184 /* check if we already stored this frame */
185 if (r->reorder_buf[index]) {
186 r->drop_dup++;
187 wil_dbg_txrx(wil, "Rx drop: dup seq 0x%03x\n", seq);
188 dev_kfree_skb(skb);
189 goto out;
190 }
191
192 /*
193 * If the current MPDU is in the right order and nothing else
194 * is stored we can process it directly, no need to buffer it.
195 * If it is first but there's something stored, we may be able
196 * to release frames after this one.
197 */
198 if (seq == r->head_seq_num && r->stored_mpdu_num == 0) {
199 r->head_seq_num = seq_inc(r->head_seq_num);
200 wil_netif_rx_any(skb, ndev);
201 goto out;
202 }
203
204 /* put the frame in the reordering buffer */
205 r->reorder_buf[index] = skb;
206 r->stored_mpdu_num++;
207 wil_reorder_release(ndev, r);
208
209out:
210 spin_unlock(&sta->tid_rx_lock);
211}
212
213/* process BAR frame, called in NAPI context */
214void wil_rx_bar(struct wil6210_priv *wil, struct wil6210_vif *vif,
215 u8 cid, u8 tid, u16 seq)
216{
217 struct wil_sta_info *sta = &wil->sta[cid];
218 struct net_device *ndev = vif_to_ndev(vif);
219 struct wil_tid_ampdu_rx *r;
220
221 spin_lock(&sta->tid_rx_lock);
222
223 r = sta->tid_rx[tid];
224 if (!r) {
225 wil_err(wil, "BAR for non-existing CID %d TID %d\n", cid, tid);
226 goto out;
227 }
228 if (seq_less(seq, r->head_seq_num)) {
229 wil_err(wil, "BAR Seq 0x%03x preceding head 0x%03x\n",
230 seq, r->head_seq_num);
231 goto out;
232 }
233 wil_dbg_txrx(wil, "BAR: CID %d MID %d TID %d Seq 0x%03x head 0x%03x\n",
234 cid, vif->mid, tid, seq, r->head_seq_num);
235 wil_release_reorder_frames(ndev, r, seq);
236
237out:
238 spin_unlock(&sta->tid_rx_lock);
239}
240
241struct wil_tid_ampdu_rx *wil_tid_ampdu_rx_alloc(struct wil6210_priv *wil,
242 int size, u16 ssn)
243{
244 struct wil_tid_ampdu_rx *r = kzalloc(sizeof(*r), GFP_KERNEL);
245
246 if (!r)
247 return NULL;
248
249 r->reorder_buf =
250 kcalloc(size, sizeof(struct sk_buff *), GFP_KERNEL);
251 if (!r->reorder_buf) {
252 kfree(r);
253 return NULL;
254 }
255
256 r->ssn = ssn;
257 r->head_seq_num = ssn;
258 r->buf_size = size;
259 r->stored_mpdu_num = 0;
260 r->first_time = true;
261 r->mcast_last_seq = U16_MAX;
262 return r;
263}
264
265void wil_tid_ampdu_rx_free(struct wil6210_priv *wil,
266 struct wil_tid_ampdu_rx *r)
267{
268 int i;
269
270 if (!r)
271 return;
272
273 /* Do not pass remaining frames to the network stack - it may be
274 * not expecting to get any more Rx. Rx from here may lead to
275 * kernel OOPS since some per-socket accounting info was already
276 * released.
277 */
278 for (i = 0; i < r->buf_size; i++)
279 kfree_skb(r->reorder_buf[i]);
280
281 kfree(r->reorder_buf);
282 kfree(r);
283}
284
285/* ADDBA processing */
286static u16 wil_agg_size(struct wil6210_priv *wil, u16 req_agg_wsize)
287{
288 u16 max_agg_size = min_t(u16, wil->max_agg_wsize, wil->max_ampdu_size /
289 (mtu_max + WIL_MAX_MPDU_OVERHEAD));
290
291 if (!req_agg_wsize)
292 return max_agg_size;
293
294 return min(max_agg_size, req_agg_wsize);
295}
296
297/* Block Ack - Rx side (recipient) */
298int wil_addba_rx_request(struct wil6210_priv *wil, u8 mid, u8 cid, u8 tid,
299 u8 dialog_token, __le16 ba_param_set,
300 __le16 ba_timeout, __le16 ba_seq_ctrl)
301__acquires(&sta->tid_rx_lock) __releases(&sta->tid_rx_lock)
302{
303 u16 param_set = le16_to_cpu(ba_param_set);
304 u16 agg_timeout = le16_to_cpu(ba_timeout);
305 u16 seq_ctrl = le16_to_cpu(ba_seq_ctrl);
306 struct wil_sta_info *sta;
307 u16 agg_wsize;
308 /* bit 0: A-MSDU supported
309 * bit 1: policy (should be 0 for us)
310 * bits 2..5: TID
311 * bits 6..15: buffer size
312 */
313 u16 req_agg_wsize = WIL_GET_BITS(param_set, 6, 15);
314 bool agg_amsdu = wil->use_enhanced_dma_hw &&
315 wil->use_rx_hw_reordering &&
316 test_bit(WMI_FW_CAPABILITY_AMSDU, wil->fw_capabilities) &&
317 wil->amsdu_en && (param_set & BIT(0));
318 int ba_policy = param_set & BIT(1);
319 u16 ssn = seq_ctrl >> 4;
320 struct wil_tid_ampdu_rx *r;
321 int rc = 0;
322
323 might_sleep();
324
325 /* sanity checks */
326 if (cid >= wil->max_assoc_sta) {
327 wil_err(wil, "BACK: invalid CID %d\n", cid);
328 rc = -EINVAL;
329 goto out;
330 }
331
332 sta = &wil->sta[cid];
333 if (sta->status != wil_sta_connected) {
334 wil_err(wil, "BACK: CID %d not connected\n", cid);
335 rc = -EINVAL;
336 goto out;
337 }
338
339 wil_dbg_wmi(wil,
340 "ADDBA request for CID %d %pM TID %d size %d timeout %d AMSDU%s policy %d token %d SSN 0x%03x\n",
341 cid, sta->addr, tid, req_agg_wsize, agg_timeout,
342 agg_amsdu ? "+" : "-", !!ba_policy, dialog_token, ssn);
343
344 /* apply policies */
345 if (req_agg_wsize == 0) {
346 wil_dbg_misc(wil, "Suggest BACK wsize %d\n",
347 wil->max_agg_wsize);
348 agg_wsize = wil->max_agg_wsize;
349 } else {
350 agg_wsize = min_t(u16, wil->max_agg_wsize, req_agg_wsize);
351 }
352
353 rc = wil->txrx_ops.wmi_addba_rx_resp(wil, mid, cid, tid, dialog_token,
354 WLAN_STATUS_SUCCESS, agg_amsdu,
355 agg_wsize, agg_timeout);
356 if (rc) {
357 wil_err(wil, "do not apply ba, rc(%d)\n", rc);
358 goto out;
359 }
360
361 /* apply */
362 if (!wil->use_rx_hw_reordering) {
363 r = wil_tid_ampdu_rx_alloc(wil, agg_wsize, ssn);
364 spin_lock_bh(&sta->tid_rx_lock);
365 wil_tid_ampdu_rx_free(wil, sta->tid_rx[tid]);
366 sta->tid_rx[tid] = r;
367 spin_unlock_bh(&sta->tid_rx_lock);
368 }
369
370out:
371 return rc;
372}
373
374/* BACK - Tx side (originator) */
375int wil_addba_tx_request(struct wil6210_priv *wil, u8 ringid, u16 wsize)
376{
377 u8 agg_wsize = wil_agg_size(wil, wsize);
378 u16 agg_timeout = 0;
379 struct wil_ring_tx_data *txdata = &wil->ring_tx_data[ringid];
380 int rc = 0;
381
382 if (txdata->addba_in_progress) {
383 wil_dbg_misc(wil, "ADDBA for vring[%d] already in progress\n",
384 ringid);
385 goto out;
386 }
387 if (txdata->agg_wsize) {
388 wil_dbg_misc(wil,
389 "ADDBA for vring[%d] already done for wsize %d\n",
390 ringid, txdata->agg_wsize);
391 goto out;
392 }
393 txdata->addba_in_progress = true;
394 rc = wmi_addba(wil, txdata->mid, ringid, agg_wsize, agg_timeout);
395 if (rc) {
396 wil_err(wil, "wmi_addba failed, rc (%d)", rc);
397 txdata->addba_in_progress = false;
398 }
399
400out:
401 return rc;
402}
1/*
2 * Copyright (c) 2014-2015 Qualcomm Atheros, Inc.
3 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
17#include "wil6210.h"
18#include "txrx.h"
19
20#define SEQ_MODULO 0x1000
21#define SEQ_MASK 0xfff
22
23static inline int seq_less(u16 sq1, u16 sq2)
24{
25 return ((sq1 - sq2) & SEQ_MASK) > (SEQ_MODULO >> 1);
26}
27
28static inline u16 seq_inc(u16 sq)
29{
30 return (sq + 1) & SEQ_MASK;
31}
32
33static inline u16 seq_sub(u16 sq1, u16 sq2)
34{
35 return (sq1 - sq2) & SEQ_MASK;
36}
37
38static inline int reorder_index(struct wil_tid_ampdu_rx *r, u16 seq)
39{
40 return seq_sub(seq, r->ssn) % r->buf_size;
41}
42
43static void wil_release_reorder_frame(struct wil6210_priv *wil,
44 struct wil_tid_ampdu_rx *r,
45 int index)
46{
47 struct net_device *ndev = wil_to_ndev(wil);
48 struct sk_buff *skb = r->reorder_buf[index];
49
50 if (!skb)
51 goto no_frame;
52
53 /* release the frame from the reorder ring buffer */
54 r->stored_mpdu_num--;
55 r->reorder_buf[index] = NULL;
56 wil_netif_rx_any(skb, ndev);
57
58no_frame:
59 r->head_seq_num = seq_inc(r->head_seq_num);
60}
61
62static void wil_release_reorder_frames(struct wil6210_priv *wil,
63 struct wil_tid_ampdu_rx *r,
64 u16 hseq)
65{
66 int index;
67
68 /* note: this function is never called with
69 * hseq preceding r->head_seq_num, i.e it is always true
70 * !seq_less(hseq, r->head_seq_num)
71 * and thus on loop exit it should be
72 * r->head_seq_num == hseq
73 */
74 while (seq_less(r->head_seq_num, hseq) && r->stored_mpdu_num) {
75 index = reorder_index(r, r->head_seq_num);
76 wil_release_reorder_frame(wil, r, index);
77 }
78 r->head_seq_num = hseq;
79}
80
81static void wil_reorder_release(struct wil6210_priv *wil,
82 struct wil_tid_ampdu_rx *r)
83{
84 int index = reorder_index(r, r->head_seq_num);
85
86 while (r->reorder_buf[index]) {
87 wil_release_reorder_frame(wil, r, index);
88 index = reorder_index(r, r->head_seq_num);
89 }
90}
91
92/* called in NAPI context */
93void wil_rx_reorder(struct wil6210_priv *wil, struct sk_buff *skb)
94__acquires(&sta->tid_rx_lock) __releases(&sta->tid_rx_lock)
95{
96 struct net_device *ndev = wil_to_ndev(wil);
97 struct vring_rx_desc *d = wil_skb_rxdesc(skb);
98 int tid = wil_rxdesc_tid(d);
99 int cid = wil_rxdesc_cid(d);
100 int mid = wil_rxdesc_mid(d);
101 u16 seq = wil_rxdesc_seq(d);
102 int mcast = wil_rxdesc_mcast(d);
103 struct wil_sta_info *sta = &wil->sta[cid];
104 struct wil_tid_ampdu_rx *r;
105 u16 hseq;
106 int index;
107
108 wil_dbg_txrx(wil, "MID %d CID %d TID %d Seq 0x%03x mcast %01x\n",
109 mid, cid, tid, seq, mcast);
110
111 if (unlikely(mcast)) {
112 wil_netif_rx_any(skb, ndev);
113 return;
114 }
115
116 spin_lock(&sta->tid_rx_lock);
117
118 r = sta->tid_rx[tid];
119 if (!r) {
120 wil_netif_rx_any(skb, ndev);
121 goto out;
122 }
123
124 r->total++;
125 hseq = r->head_seq_num;
126
127 /** Due to the race between WMI events, where BACK establishment
128 * reported, and data Rx, few packets may be pass up before reorder
129 * buffer get allocated. Catch up by pretending SSN is what we
130 * see in the 1-st Rx packet
131 *
132 * Another scenario, Rx get delayed and we got packet from before
133 * BACK. Pass it to the stack and wait.
134 */
135 if (r->first_time) {
136 r->first_time = false;
137 if (seq != r->head_seq_num) {
138 if (seq_less(seq, r->head_seq_num)) {
139 wil_err(wil,
140 "Error: frame with early sequence 0x%03x, should be 0x%03x. Waiting...\n",
141 seq, r->head_seq_num);
142 r->first_time = true;
143 wil_netif_rx_any(skb, ndev);
144 goto out;
145 }
146 wil_err(wil,
147 "Error: 1-st frame with wrong sequence 0x%03x, should be 0x%03x. Fixing...\n",
148 seq, r->head_seq_num);
149 r->head_seq_num = seq;
150 r->ssn = seq;
151 }
152 }
153
154 /* frame with out of date sequence number */
155 if (seq_less(seq, r->head_seq_num)) {
156 r->ssn_last_drop = seq;
157 r->drop_old++;
158 wil_dbg_txrx(wil, "Rx drop: old seq 0x%03x head 0x%03x\n",
159 seq, r->head_seq_num);
160 dev_kfree_skb(skb);
161 goto out;
162 }
163
164 /*
165 * If frame the sequence number exceeds our buffering window
166 * size release some previous frames to make room for this one.
167 */
168 if (!seq_less(seq, r->head_seq_num + r->buf_size)) {
169 hseq = seq_inc(seq_sub(seq, r->buf_size));
170 /* release stored frames up to new head to stack */
171 wil_release_reorder_frames(wil, r, hseq);
172 }
173
174 /* Now the new frame is always in the range of the reordering buffer */
175
176 index = reorder_index(r, seq);
177
178 /* check if we already stored this frame */
179 if (r->reorder_buf[index]) {
180 r->drop_dup++;
181 wil_dbg_txrx(wil, "Rx drop: dup seq 0x%03x\n", seq);
182 dev_kfree_skb(skb);
183 goto out;
184 }
185
186 /*
187 * If the current MPDU is in the right order and nothing else
188 * is stored we can process it directly, no need to buffer it.
189 * If it is first but there's something stored, we may be able
190 * to release frames after this one.
191 */
192 if (seq == r->head_seq_num && r->stored_mpdu_num == 0) {
193 r->head_seq_num = seq_inc(r->head_seq_num);
194 wil_netif_rx_any(skb, ndev);
195 goto out;
196 }
197
198 /* put the frame in the reordering buffer */
199 r->reorder_buf[index] = skb;
200 r->reorder_time[index] = jiffies;
201 r->stored_mpdu_num++;
202 wil_reorder_release(wil, r);
203
204out:
205 spin_unlock(&sta->tid_rx_lock);
206}
207
208/* process BAR frame, called in NAPI context */
209void wil_rx_bar(struct wil6210_priv *wil, u8 cid, u8 tid, u16 seq)
210{
211 struct wil_sta_info *sta = &wil->sta[cid];
212 struct wil_tid_ampdu_rx *r;
213
214 spin_lock(&sta->tid_rx_lock);
215
216 r = sta->tid_rx[tid];
217 if (!r) {
218 wil_err(wil, "BAR for non-existing CID %d TID %d\n", cid, tid);
219 goto out;
220 }
221 if (seq_less(seq, r->head_seq_num)) {
222 wil_err(wil, "BAR Seq 0x%03x preceding head 0x%03x\n",
223 seq, r->head_seq_num);
224 goto out;
225 }
226 wil_dbg_txrx(wil, "BAR: CID %d TID %d Seq 0x%03x head 0x%03x\n",
227 cid, tid, seq, r->head_seq_num);
228 wil_release_reorder_frames(wil, r, seq);
229
230out:
231 spin_unlock(&sta->tid_rx_lock);
232}
233
234struct wil_tid_ampdu_rx *wil_tid_ampdu_rx_alloc(struct wil6210_priv *wil,
235 int size, u16 ssn)
236{
237 struct wil_tid_ampdu_rx *r = kzalloc(sizeof(*r), GFP_KERNEL);
238
239 if (!r)
240 return NULL;
241
242 r->reorder_buf =
243 kcalloc(size, sizeof(struct sk_buff *), GFP_KERNEL);
244 r->reorder_time =
245 kcalloc(size, sizeof(unsigned long), GFP_KERNEL);
246 if (!r->reorder_buf || !r->reorder_time) {
247 kfree(r->reorder_buf);
248 kfree(r->reorder_time);
249 kfree(r);
250 return NULL;
251 }
252
253 r->ssn = ssn;
254 r->head_seq_num = ssn;
255 r->buf_size = size;
256 r->stored_mpdu_num = 0;
257 r->first_time = true;
258 return r;
259}
260
261void wil_tid_ampdu_rx_free(struct wil6210_priv *wil,
262 struct wil_tid_ampdu_rx *r)
263{
264 int i;
265
266 if (!r)
267 return;
268
269 /* Do not pass remaining frames to the network stack - it may be
270 * not expecting to get any more Rx. Rx from here may lead to
271 * kernel OOPS since some per-socket accounting info was already
272 * released.
273 */
274 for (i = 0; i < r->buf_size; i++)
275 kfree_skb(r->reorder_buf[i]);
276
277 kfree(r->reorder_buf);
278 kfree(r->reorder_time);
279 kfree(r);
280}
281
282/* ADDBA processing */
283static u16 wil_agg_size(struct wil6210_priv *wil, u16 req_agg_wsize)
284{
285 u16 max_agg_size = min_t(u16, WIL_MAX_AGG_WSIZE, WIL_MAX_AMPDU_SIZE /
286 (mtu_max + WIL_MAX_MPDU_OVERHEAD));
287
288 if (!req_agg_wsize)
289 return max_agg_size;
290
291 return min(max_agg_size, req_agg_wsize);
292}
293
294/* Block Ack - Rx side (recipient */
295int wil_addba_rx_request(struct wil6210_priv *wil, u8 cidxtid,
296 u8 dialog_token, __le16 ba_param_set,
297 __le16 ba_timeout, __le16 ba_seq_ctrl)
298{
299 struct wil_back_rx *req = kzalloc(sizeof(*req), GFP_KERNEL);
300
301 if (!req)
302 return -ENOMEM;
303
304 req->cidxtid = cidxtid;
305 req->dialog_token = dialog_token;
306 req->ba_param_set = le16_to_cpu(ba_param_set);
307 req->ba_timeout = le16_to_cpu(ba_timeout);
308 req->ba_seq_ctrl = le16_to_cpu(ba_seq_ctrl);
309
310 mutex_lock(&wil->back_rx_mutex);
311 list_add_tail(&req->list, &wil->back_rx_pending);
312 mutex_unlock(&wil->back_rx_mutex);
313
314 queue_work(wil->wq_service, &wil->back_rx_worker);
315
316 return 0;
317}
318
319static void wil_back_rx_handle(struct wil6210_priv *wil,
320 struct wil_back_rx *req)
321__acquires(&sta->tid_rx_lock) __releases(&sta->tid_rx_lock)
322{
323 struct wil_sta_info *sta;
324 u8 cid, tid;
325 u16 agg_wsize = 0;
326 /* bit 0: A-MSDU supported
327 * bit 1: policy (should be 0 for us)
328 * bits 2..5: TID
329 * bits 6..15: buffer size
330 */
331 u16 req_agg_wsize = WIL_GET_BITS(req->ba_param_set, 6, 15);
332 bool agg_amsdu = !!(req->ba_param_set & BIT(0));
333 int ba_policy = req->ba_param_set & BIT(1);
334 u16 agg_timeout = req->ba_timeout;
335 u16 status = WLAN_STATUS_SUCCESS;
336 u16 ssn = req->ba_seq_ctrl >> 4;
337 struct wil_tid_ampdu_rx *r;
338 int rc;
339
340 might_sleep();
341 parse_cidxtid(req->cidxtid, &cid, &tid);
342
343 /* sanity checks */
344 if (cid >= WIL6210_MAX_CID) {
345 wil_err(wil, "BACK: invalid CID %d\n", cid);
346 return;
347 }
348
349 sta = &wil->sta[cid];
350 if (sta->status != wil_sta_connected) {
351 wil_err(wil, "BACK: CID %d not connected\n", cid);
352 return;
353 }
354
355 wil_dbg_wmi(wil,
356 "ADDBA request for CID %d %pM TID %d size %d timeout %d AMSDU%s policy %d token %d SSN 0x%03x\n",
357 cid, sta->addr, tid, req_agg_wsize, req->ba_timeout,
358 agg_amsdu ? "+" : "-", !!ba_policy, req->dialog_token, ssn);
359
360 /* apply policies */
361 if (ba_policy) {
362 wil_err(wil, "BACK requested unsupported ba_policy == 1\n");
363 status = WLAN_STATUS_INVALID_QOS_PARAM;
364 }
365 if (status == WLAN_STATUS_SUCCESS)
366 agg_wsize = wil_agg_size(wil, req_agg_wsize);
367
368 rc = wmi_addba_rx_resp(wil, cid, tid, req->dialog_token, status,
369 agg_amsdu, agg_wsize, agg_timeout);
370 if (rc || (status != WLAN_STATUS_SUCCESS))
371 return;
372
373 /* apply */
374 r = wil_tid_ampdu_rx_alloc(wil, agg_wsize, ssn);
375 spin_lock_bh(&sta->tid_rx_lock);
376 wil_tid_ampdu_rx_free(wil, sta->tid_rx[tid]);
377 sta->tid_rx[tid] = r;
378 spin_unlock_bh(&sta->tid_rx_lock);
379}
380
381void wil_back_rx_flush(struct wil6210_priv *wil)
382{
383 struct wil_back_rx *evt, *t;
384
385 wil_dbg_misc(wil, "%s()\n", __func__);
386
387 mutex_lock(&wil->back_rx_mutex);
388
389 list_for_each_entry_safe(evt, t, &wil->back_rx_pending, list) {
390 list_del(&evt->list);
391 kfree(evt);
392 }
393
394 mutex_unlock(&wil->back_rx_mutex);
395}
396
397/* Retrieve next ADDBA request from the pending list */
398static struct list_head *next_back_rx(struct wil6210_priv *wil)
399{
400 struct list_head *ret = NULL;
401
402 mutex_lock(&wil->back_rx_mutex);
403
404 if (!list_empty(&wil->back_rx_pending)) {
405 ret = wil->back_rx_pending.next;
406 list_del(ret);
407 }
408
409 mutex_unlock(&wil->back_rx_mutex);
410
411 return ret;
412}
413
414void wil_back_rx_worker(struct work_struct *work)
415{
416 struct wil6210_priv *wil = container_of(work, struct wil6210_priv,
417 back_rx_worker);
418 struct wil_back_rx *evt;
419 struct list_head *lh;
420
421 while ((lh = next_back_rx(wil)) != NULL) {
422 evt = list_entry(lh, struct wil_back_rx, list);
423
424 wil_back_rx_handle(wil, evt);
425 kfree(evt);
426 }
427}
428
429/* BACK - Tx (originator) side */
430static void wil_back_tx_handle(struct wil6210_priv *wil,
431 struct wil_back_tx *req)
432{
433 struct vring_tx_data *txdata = &wil->vring_tx_data[req->ringid];
434 int rc;
435
436 if (txdata->addba_in_progress) {
437 wil_dbg_misc(wil, "ADDBA for vring[%d] already in progress\n",
438 req->ringid);
439 return;
440 }
441 if (txdata->agg_wsize) {
442 wil_dbg_misc(wil,
443 "ADDBA for vring[%d] already established wsize %d\n",
444 req->ringid, txdata->agg_wsize);
445 return;
446 }
447 txdata->addba_in_progress = true;
448 rc = wmi_addba(wil, req->ringid, req->agg_wsize, req->agg_timeout);
449 if (rc)
450 txdata->addba_in_progress = false;
451}
452
453static struct list_head *next_back_tx(struct wil6210_priv *wil)
454{
455 struct list_head *ret = NULL;
456
457 mutex_lock(&wil->back_tx_mutex);
458
459 if (!list_empty(&wil->back_tx_pending)) {
460 ret = wil->back_tx_pending.next;
461 list_del(ret);
462 }
463
464 mutex_unlock(&wil->back_tx_mutex);
465
466 return ret;
467}
468
469void wil_back_tx_worker(struct work_struct *work)
470{
471 struct wil6210_priv *wil = container_of(work, struct wil6210_priv,
472 back_tx_worker);
473 struct wil_back_tx *evt;
474 struct list_head *lh;
475
476 while ((lh = next_back_tx(wil)) != NULL) {
477 evt = list_entry(lh, struct wil_back_tx, list);
478
479 wil_back_tx_handle(wil, evt);
480 kfree(evt);
481 }
482}
483
484void wil_back_tx_flush(struct wil6210_priv *wil)
485{
486 struct wil_back_tx *evt, *t;
487
488 wil_dbg_misc(wil, "%s()\n", __func__);
489
490 mutex_lock(&wil->back_tx_mutex);
491
492 list_for_each_entry_safe(evt, t, &wil->back_tx_pending, list) {
493 list_del(&evt->list);
494 kfree(evt);
495 }
496
497 mutex_unlock(&wil->back_tx_mutex);
498}
499
500int wil_addba_tx_request(struct wil6210_priv *wil, u8 ringid, u16 wsize)
501{
502 struct wil_back_tx *req = kzalloc(sizeof(*req), GFP_KERNEL);
503
504 if (!req)
505 return -ENOMEM;
506
507 req->ringid = ringid;
508 req->agg_wsize = wil_agg_size(wil, wsize);
509 req->agg_timeout = 0;
510
511 mutex_lock(&wil->back_tx_mutex);
512 list_add_tail(&req->list, &wil->back_tx_pending);
513 mutex_unlock(&wil->back_tx_mutex);
514
515 queue_work(wil->wq_service, &wil->back_tx_worker);
516
517 return 0;
518}