Loading...
1// SPDX-License-Identifier: BSD-3-Clause-Clear
2/*
3 * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
4 * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
5 */
6
7#include <linux/ieee80211.h>
8#include <linux/kernel.h>
9#include <linux/skbuff.h>
10#include <crypto/hash.h>
11#include "core.h"
12#include "debug.h"
13#include "debugfs_htt_stats.h"
14#include "debugfs_sta.h"
15#include "hal_desc.h"
16#include "hw.h"
17#include "dp_rx.h"
18#include "hal_rx.h"
19#include "dp_tx.h"
20#include "peer.h"
21
22#define ATH11K_DP_RX_FRAGMENT_TIMEOUT_MS (2 * HZ)
23
24static inline
25u8 *ath11k_dp_rx_h_80211_hdr(struct ath11k_base *ab, struct hal_rx_desc *desc)
26{
27 return ab->hw_params.hw_ops->rx_desc_get_hdr_status(desc);
28}
29
30static inline
31enum hal_encrypt_type ath11k_dp_rx_h_mpdu_start_enctype(struct ath11k_base *ab,
32 struct hal_rx_desc *desc)
33{
34 if (!ab->hw_params.hw_ops->rx_desc_encrypt_valid(desc))
35 return HAL_ENCRYPT_TYPE_OPEN;
36
37 return ab->hw_params.hw_ops->rx_desc_get_encrypt_type(desc);
38}
39
40static inline u8 ath11k_dp_rx_h_msdu_start_decap_type(struct ath11k_base *ab,
41 struct hal_rx_desc *desc)
42{
43 return ab->hw_params.hw_ops->rx_desc_get_decap_type(desc);
44}
45
46static inline
47bool ath11k_dp_rx_h_msdu_start_ldpc_support(struct ath11k_base *ab,
48 struct hal_rx_desc *desc)
49{
50 return ab->hw_params.hw_ops->rx_desc_get_ldpc_support(desc);
51}
52
53static inline
54u8 ath11k_dp_rx_h_msdu_start_mesh_ctl_present(struct ath11k_base *ab,
55 struct hal_rx_desc *desc)
56{
57 return ab->hw_params.hw_ops->rx_desc_get_mesh_ctl(desc);
58}
59
60static inline
61bool ath11k_dp_rx_h_mpdu_start_seq_ctrl_valid(struct ath11k_base *ab,
62 struct hal_rx_desc *desc)
63{
64 return ab->hw_params.hw_ops->rx_desc_get_mpdu_seq_ctl_vld(desc);
65}
66
67static inline bool ath11k_dp_rx_h_mpdu_start_fc_valid(struct ath11k_base *ab,
68 struct hal_rx_desc *desc)
69{
70 return ab->hw_params.hw_ops->rx_desc_get_mpdu_fc_valid(desc);
71}
72
73static inline bool ath11k_dp_rx_h_mpdu_start_more_frags(struct ath11k_base *ab,
74 struct sk_buff *skb)
75{
76 struct ieee80211_hdr *hdr;
77
78 hdr = (struct ieee80211_hdr *)(skb->data + ab->hw_params.hal_desc_sz);
79 return ieee80211_has_morefrags(hdr->frame_control);
80}
81
82static inline u16 ath11k_dp_rx_h_mpdu_start_frag_no(struct ath11k_base *ab,
83 struct sk_buff *skb)
84{
85 struct ieee80211_hdr *hdr;
86
87 hdr = (struct ieee80211_hdr *)(skb->data + ab->hw_params.hal_desc_sz);
88 return le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_FRAG;
89}
90
91static inline u16 ath11k_dp_rx_h_mpdu_start_seq_no(struct ath11k_base *ab,
92 struct hal_rx_desc *desc)
93{
94 return ab->hw_params.hw_ops->rx_desc_get_mpdu_start_seq_no(desc);
95}
96
97static inline void *ath11k_dp_rx_get_attention(struct ath11k_base *ab,
98 struct hal_rx_desc *desc)
99{
100 return ab->hw_params.hw_ops->rx_desc_get_attention(desc);
101}
102
103static inline bool ath11k_dp_rx_h_attn_msdu_done(struct rx_attention *attn)
104{
105 return !!FIELD_GET(RX_ATTENTION_INFO2_MSDU_DONE,
106 __le32_to_cpu(attn->info2));
107}
108
109static inline bool ath11k_dp_rx_h_attn_l4_cksum_fail(struct rx_attention *attn)
110{
111 return !!FIELD_GET(RX_ATTENTION_INFO1_TCP_UDP_CKSUM_FAIL,
112 __le32_to_cpu(attn->info1));
113}
114
115static inline bool ath11k_dp_rx_h_attn_ip_cksum_fail(struct rx_attention *attn)
116{
117 return !!FIELD_GET(RX_ATTENTION_INFO1_IP_CKSUM_FAIL,
118 __le32_to_cpu(attn->info1));
119}
120
121static inline bool ath11k_dp_rx_h_attn_is_decrypted(struct rx_attention *attn)
122{
123 return (FIELD_GET(RX_ATTENTION_INFO2_DCRYPT_STATUS_CODE,
124 __le32_to_cpu(attn->info2)) ==
125 RX_DESC_DECRYPT_STATUS_CODE_OK);
126}
127
128static u32 ath11k_dp_rx_h_attn_mpdu_err(struct rx_attention *attn)
129{
130 u32 info = __le32_to_cpu(attn->info1);
131 u32 errmap = 0;
132
133 if (info & RX_ATTENTION_INFO1_FCS_ERR)
134 errmap |= DP_RX_MPDU_ERR_FCS;
135
136 if (info & RX_ATTENTION_INFO1_DECRYPT_ERR)
137 errmap |= DP_RX_MPDU_ERR_DECRYPT;
138
139 if (info & RX_ATTENTION_INFO1_TKIP_MIC_ERR)
140 errmap |= DP_RX_MPDU_ERR_TKIP_MIC;
141
142 if (info & RX_ATTENTION_INFO1_A_MSDU_ERROR)
143 errmap |= DP_RX_MPDU_ERR_AMSDU_ERR;
144
145 if (info & RX_ATTENTION_INFO1_OVERFLOW_ERR)
146 errmap |= DP_RX_MPDU_ERR_OVERFLOW;
147
148 if (info & RX_ATTENTION_INFO1_MSDU_LEN_ERR)
149 errmap |= DP_RX_MPDU_ERR_MSDU_LEN;
150
151 if (info & RX_ATTENTION_INFO1_MPDU_LEN_ERR)
152 errmap |= DP_RX_MPDU_ERR_MPDU_LEN;
153
154 return errmap;
155}
156
157static bool ath11k_dp_rx_h_attn_msdu_len_err(struct ath11k_base *ab,
158 struct hal_rx_desc *desc)
159{
160 struct rx_attention *rx_attention;
161 u32 errmap;
162
163 rx_attention = ath11k_dp_rx_get_attention(ab, desc);
164 errmap = ath11k_dp_rx_h_attn_mpdu_err(rx_attention);
165
166 return errmap & DP_RX_MPDU_ERR_MSDU_LEN;
167}
168
169static inline u16 ath11k_dp_rx_h_msdu_start_msdu_len(struct ath11k_base *ab,
170 struct hal_rx_desc *desc)
171{
172 return ab->hw_params.hw_ops->rx_desc_get_msdu_len(desc);
173}
174
175static inline u8 ath11k_dp_rx_h_msdu_start_sgi(struct ath11k_base *ab,
176 struct hal_rx_desc *desc)
177{
178 return ab->hw_params.hw_ops->rx_desc_get_msdu_sgi(desc);
179}
180
181static inline u8 ath11k_dp_rx_h_msdu_start_rate_mcs(struct ath11k_base *ab,
182 struct hal_rx_desc *desc)
183{
184 return ab->hw_params.hw_ops->rx_desc_get_msdu_rate_mcs(desc);
185}
186
187static inline u8 ath11k_dp_rx_h_msdu_start_rx_bw(struct ath11k_base *ab,
188 struct hal_rx_desc *desc)
189{
190 return ab->hw_params.hw_ops->rx_desc_get_msdu_rx_bw(desc);
191}
192
193static inline u32 ath11k_dp_rx_h_msdu_start_freq(struct ath11k_base *ab,
194 struct hal_rx_desc *desc)
195{
196 return ab->hw_params.hw_ops->rx_desc_get_msdu_freq(desc);
197}
198
199static inline u8 ath11k_dp_rx_h_msdu_start_pkt_type(struct ath11k_base *ab,
200 struct hal_rx_desc *desc)
201{
202 return ab->hw_params.hw_ops->rx_desc_get_msdu_pkt_type(desc);
203}
204
205static inline u8 ath11k_dp_rx_h_msdu_start_nss(struct ath11k_base *ab,
206 struct hal_rx_desc *desc)
207{
208 return hweight8(ab->hw_params.hw_ops->rx_desc_get_msdu_nss(desc));
209}
210
211static inline u8 ath11k_dp_rx_h_mpdu_start_tid(struct ath11k_base *ab,
212 struct hal_rx_desc *desc)
213{
214 return ab->hw_params.hw_ops->rx_desc_get_mpdu_tid(desc);
215}
216
217static inline u16 ath11k_dp_rx_h_mpdu_start_peer_id(struct ath11k_base *ab,
218 struct hal_rx_desc *desc)
219{
220 return ab->hw_params.hw_ops->rx_desc_get_mpdu_peer_id(desc);
221}
222
223static inline u8 ath11k_dp_rx_h_msdu_end_l3pad(struct ath11k_base *ab,
224 struct hal_rx_desc *desc)
225{
226 return ab->hw_params.hw_ops->rx_desc_get_l3_pad_bytes(desc);
227}
228
229static inline bool ath11k_dp_rx_h_msdu_end_first_msdu(struct ath11k_base *ab,
230 struct hal_rx_desc *desc)
231{
232 return ab->hw_params.hw_ops->rx_desc_get_first_msdu(desc);
233}
234
235static bool ath11k_dp_rx_h_msdu_end_last_msdu(struct ath11k_base *ab,
236 struct hal_rx_desc *desc)
237{
238 return ab->hw_params.hw_ops->rx_desc_get_last_msdu(desc);
239}
240
241static void ath11k_dp_rx_desc_end_tlv_copy(struct ath11k_base *ab,
242 struct hal_rx_desc *fdesc,
243 struct hal_rx_desc *ldesc)
244{
245 ab->hw_params.hw_ops->rx_desc_copy_attn_end_tlv(fdesc, ldesc);
246}
247
248static inline u32 ath11k_dp_rxdesc_get_mpdulen_err(struct rx_attention *attn)
249{
250 return FIELD_GET(RX_ATTENTION_INFO1_MPDU_LEN_ERR,
251 __le32_to_cpu(attn->info1));
252}
253
254static inline u8 *ath11k_dp_rxdesc_get_80211hdr(struct ath11k_base *ab,
255 struct hal_rx_desc *rx_desc)
256{
257 u8 *rx_pkt_hdr;
258
259 rx_pkt_hdr = ab->hw_params.hw_ops->rx_desc_get_msdu_payload(rx_desc);
260
261 return rx_pkt_hdr;
262}
263
264static inline bool ath11k_dp_rxdesc_mpdu_valid(struct ath11k_base *ab,
265 struct hal_rx_desc *rx_desc)
266{
267 u32 tlv_tag;
268
269 tlv_tag = ab->hw_params.hw_ops->rx_desc_get_mpdu_start_tag(rx_desc);
270
271 return tlv_tag == HAL_RX_MPDU_START;
272}
273
274static inline u32 ath11k_dp_rxdesc_get_ppduid(struct ath11k_base *ab,
275 struct hal_rx_desc *rx_desc)
276{
277 return ab->hw_params.hw_ops->rx_desc_get_mpdu_ppdu_id(rx_desc);
278}
279
280static inline void ath11k_dp_rxdesc_set_msdu_len(struct ath11k_base *ab,
281 struct hal_rx_desc *desc,
282 u16 len)
283{
284 ab->hw_params.hw_ops->rx_desc_set_msdu_len(desc, len);
285}
286
287static bool ath11k_dp_rx_h_attn_is_mcbc(struct ath11k_base *ab,
288 struct hal_rx_desc *desc)
289{
290 struct rx_attention *attn = ath11k_dp_rx_get_attention(ab, desc);
291
292 return ath11k_dp_rx_h_msdu_end_first_msdu(ab, desc) &&
293 (!!FIELD_GET(RX_ATTENTION_INFO1_MCAST_BCAST,
294 __le32_to_cpu(attn->info1)));
295}
296
297static bool ath11k_dp_rxdesc_mac_addr2_valid(struct ath11k_base *ab,
298 struct hal_rx_desc *desc)
299{
300 return ab->hw_params.hw_ops->rx_desc_mac_addr2_valid(desc);
301}
302
303static u8 *ath11k_dp_rxdesc_mpdu_start_addr2(struct ath11k_base *ab,
304 struct hal_rx_desc *desc)
305{
306 return ab->hw_params.hw_ops->rx_desc_mpdu_start_addr2(desc);
307}
308
309static void ath11k_dp_service_mon_ring(struct timer_list *t)
310{
311 struct ath11k_base *ab = from_timer(ab, t, mon_reap_timer);
312 int i;
313
314 for (i = 0; i < ab->hw_params.num_rxmda_per_pdev; i++)
315 ath11k_dp_rx_process_mon_rings(ab, i, NULL, DP_MON_SERVICE_BUDGET);
316
317 mod_timer(&ab->mon_reap_timer, jiffies +
318 msecs_to_jiffies(ATH11K_MON_TIMER_INTERVAL));
319}
320
321static int ath11k_dp_purge_mon_ring(struct ath11k_base *ab)
322{
323 int i, reaped = 0;
324 unsigned long timeout = jiffies + msecs_to_jiffies(DP_MON_PURGE_TIMEOUT_MS);
325
326 do {
327 for (i = 0; i < ab->hw_params.num_rxmda_per_pdev; i++)
328 reaped += ath11k_dp_rx_process_mon_rings(ab, i,
329 NULL,
330 DP_MON_SERVICE_BUDGET);
331
332 /* nothing more to reap */
333 if (reaped < DP_MON_SERVICE_BUDGET)
334 return 0;
335
336 } while (time_before(jiffies, timeout));
337
338 ath11k_warn(ab, "dp mon ring purge timeout");
339
340 return -ETIMEDOUT;
341}
342
343/* Returns number of Rx buffers replenished */
344int ath11k_dp_rxbufs_replenish(struct ath11k_base *ab, int mac_id,
345 struct dp_rxdma_ring *rx_ring,
346 int req_entries,
347 enum hal_rx_buf_return_buf_manager mgr)
348{
349 struct hal_srng *srng;
350 u32 *desc;
351 struct sk_buff *skb;
352 int num_free;
353 int num_remain;
354 int buf_id;
355 u32 cookie;
356 dma_addr_t paddr;
357
358 req_entries = min(req_entries, rx_ring->bufs_max);
359
360 srng = &ab->hal.srng_list[rx_ring->refill_buf_ring.ring_id];
361
362 spin_lock_bh(&srng->lock);
363
364 ath11k_hal_srng_access_begin(ab, srng);
365
366 num_free = ath11k_hal_srng_src_num_free(ab, srng, true);
367 if (!req_entries && (num_free > (rx_ring->bufs_max * 3) / 4))
368 req_entries = num_free;
369
370 req_entries = min(num_free, req_entries);
371 num_remain = req_entries;
372
373 while (num_remain > 0) {
374 skb = dev_alloc_skb(DP_RX_BUFFER_SIZE +
375 DP_RX_BUFFER_ALIGN_SIZE);
376 if (!skb)
377 break;
378
379 if (!IS_ALIGNED((unsigned long)skb->data,
380 DP_RX_BUFFER_ALIGN_SIZE)) {
381 skb_pull(skb,
382 PTR_ALIGN(skb->data, DP_RX_BUFFER_ALIGN_SIZE) -
383 skb->data);
384 }
385
386 paddr = dma_map_single(ab->dev, skb->data,
387 skb->len + skb_tailroom(skb),
388 DMA_FROM_DEVICE);
389 if (dma_mapping_error(ab->dev, paddr))
390 goto fail_free_skb;
391
392 spin_lock_bh(&rx_ring->idr_lock);
393 buf_id = idr_alloc(&rx_ring->bufs_idr, skb, 1,
394 (rx_ring->bufs_max * 3) + 1, GFP_ATOMIC);
395 spin_unlock_bh(&rx_ring->idr_lock);
396 if (buf_id <= 0)
397 goto fail_dma_unmap;
398
399 desc = ath11k_hal_srng_src_get_next_entry(ab, srng);
400 if (!desc)
401 goto fail_idr_remove;
402
403 ATH11K_SKB_RXCB(skb)->paddr = paddr;
404
405 cookie = FIELD_PREP(DP_RXDMA_BUF_COOKIE_PDEV_ID, mac_id) |
406 FIELD_PREP(DP_RXDMA_BUF_COOKIE_BUF_ID, buf_id);
407
408 num_remain--;
409
410 ath11k_hal_rx_buf_addr_info_set(desc, paddr, cookie, mgr);
411 }
412
413 ath11k_hal_srng_access_end(ab, srng);
414
415 spin_unlock_bh(&srng->lock);
416
417 return req_entries - num_remain;
418
419fail_idr_remove:
420 spin_lock_bh(&rx_ring->idr_lock);
421 idr_remove(&rx_ring->bufs_idr, buf_id);
422 spin_unlock_bh(&rx_ring->idr_lock);
423fail_dma_unmap:
424 dma_unmap_single(ab->dev, paddr, skb->len + skb_tailroom(skb),
425 DMA_FROM_DEVICE);
426fail_free_skb:
427 dev_kfree_skb_any(skb);
428
429 ath11k_hal_srng_access_end(ab, srng);
430
431 spin_unlock_bh(&srng->lock);
432
433 return req_entries - num_remain;
434}
435
436static int ath11k_dp_rxdma_buf_ring_free(struct ath11k *ar,
437 struct dp_rxdma_ring *rx_ring)
438{
439 struct sk_buff *skb;
440 int buf_id;
441
442 spin_lock_bh(&rx_ring->idr_lock);
443 idr_for_each_entry(&rx_ring->bufs_idr, skb, buf_id) {
444 idr_remove(&rx_ring->bufs_idr, buf_id);
445 /* TODO: Understand where internal driver does this dma_unmap
446 * of rxdma_buffer.
447 */
448 dma_unmap_single(ar->ab->dev, ATH11K_SKB_RXCB(skb)->paddr,
449 skb->len + skb_tailroom(skb), DMA_FROM_DEVICE);
450 dev_kfree_skb_any(skb);
451 }
452
453 idr_destroy(&rx_ring->bufs_idr);
454 spin_unlock_bh(&rx_ring->idr_lock);
455
456 return 0;
457}
458
459static int ath11k_dp_rxdma_pdev_buf_free(struct ath11k *ar)
460{
461 struct ath11k_pdev_dp *dp = &ar->dp;
462 struct ath11k_base *ab = ar->ab;
463 struct dp_rxdma_ring *rx_ring = &dp->rx_refill_buf_ring;
464 int i;
465
466 ath11k_dp_rxdma_buf_ring_free(ar, rx_ring);
467
468 rx_ring = &dp->rxdma_mon_buf_ring;
469 ath11k_dp_rxdma_buf_ring_free(ar, rx_ring);
470
471 for (i = 0; i < ab->hw_params.num_rxmda_per_pdev; i++) {
472 rx_ring = &dp->rx_mon_status_refill_ring[i];
473 ath11k_dp_rxdma_buf_ring_free(ar, rx_ring);
474 }
475
476 return 0;
477}
478
479static int ath11k_dp_rxdma_ring_buf_setup(struct ath11k *ar,
480 struct dp_rxdma_ring *rx_ring,
481 u32 ringtype)
482{
483 struct ath11k_pdev_dp *dp = &ar->dp;
484 int num_entries;
485
486 num_entries = rx_ring->refill_buf_ring.size /
487 ath11k_hal_srng_get_entrysize(ar->ab, ringtype);
488
489 rx_ring->bufs_max = num_entries;
490 ath11k_dp_rxbufs_replenish(ar->ab, dp->mac_id, rx_ring, num_entries,
491 ar->ab->hw_params.hal_params->rx_buf_rbm);
492 return 0;
493}
494
495static int ath11k_dp_rxdma_pdev_buf_setup(struct ath11k *ar)
496{
497 struct ath11k_pdev_dp *dp = &ar->dp;
498 struct ath11k_base *ab = ar->ab;
499 struct dp_rxdma_ring *rx_ring = &dp->rx_refill_buf_ring;
500 int i;
501
502 ath11k_dp_rxdma_ring_buf_setup(ar, rx_ring, HAL_RXDMA_BUF);
503
504 if (ar->ab->hw_params.rxdma1_enable) {
505 rx_ring = &dp->rxdma_mon_buf_ring;
506 ath11k_dp_rxdma_ring_buf_setup(ar, rx_ring, HAL_RXDMA_MONITOR_BUF);
507 }
508
509 for (i = 0; i < ab->hw_params.num_rxmda_per_pdev; i++) {
510 rx_ring = &dp->rx_mon_status_refill_ring[i];
511 ath11k_dp_rxdma_ring_buf_setup(ar, rx_ring, HAL_RXDMA_MONITOR_STATUS);
512 }
513
514 return 0;
515}
516
517static void ath11k_dp_rx_pdev_srng_free(struct ath11k *ar)
518{
519 struct ath11k_pdev_dp *dp = &ar->dp;
520 struct ath11k_base *ab = ar->ab;
521 int i;
522
523 ath11k_dp_srng_cleanup(ab, &dp->rx_refill_buf_ring.refill_buf_ring);
524
525 for (i = 0; i < ab->hw_params.num_rxmda_per_pdev; i++) {
526 if (ab->hw_params.rx_mac_buf_ring)
527 ath11k_dp_srng_cleanup(ab, &dp->rx_mac_buf_ring[i]);
528
529 ath11k_dp_srng_cleanup(ab, &dp->rxdma_err_dst_ring[i]);
530 ath11k_dp_srng_cleanup(ab,
531 &dp->rx_mon_status_refill_ring[i].refill_buf_ring);
532 }
533
534 ath11k_dp_srng_cleanup(ab, &dp->rxdma_mon_buf_ring.refill_buf_ring);
535}
536
537void ath11k_dp_pdev_reo_cleanup(struct ath11k_base *ab)
538{
539 struct ath11k_dp *dp = &ab->dp;
540 int i;
541
542 for (i = 0; i < DP_REO_DST_RING_MAX; i++)
543 ath11k_dp_srng_cleanup(ab, &dp->reo_dst_ring[i]);
544}
545
546int ath11k_dp_pdev_reo_setup(struct ath11k_base *ab)
547{
548 struct ath11k_dp *dp = &ab->dp;
549 int ret;
550 int i;
551
552 for (i = 0; i < DP_REO_DST_RING_MAX; i++) {
553 ret = ath11k_dp_srng_setup(ab, &dp->reo_dst_ring[i],
554 HAL_REO_DST, i, 0,
555 DP_REO_DST_RING_SIZE);
556 if (ret) {
557 ath11k_warn(ab, "failed to setup reo_dst_ring\n");
558 goto err_reo_cleanup;
559 }
560 }
561
562 return 0;
563
564err_reo_cleanup:
565 ath11k_dp_pdev_reo_cleanup(ab);
566
567 return ret;
568}
569
570static int ath11k_dp_rx_pdev_srng_alloc(struct ath11k *ar)
571{
572 struct ath11k_pdev_dp *dp = &ar->dp;
573 struct ath11k_base *ab = ar->ab;
574 struct dp_srng *srng = NULL;
575 int i;
576 int ret;
577
578 ret = ath11k_dp_srng_setup(ar->ab,
579 &dp->rx_refill_buf_ring.refill_buf_ring,
580 HAL_RXDMA_BUF, 0,
581 dp->mac_id, DP_RXDMA_BUF_RING_SIZE);
582 if (ret) {
583 ath11k_warn(ar->ab, "failed to setup rx_refill_buf_ring\n");
584 return ret;
585 }
586
587 if (ar->ab->hw_params.rx_mac_buf_ring) {
588 for (i = 0; i < ab->hw_params.num_rxmda_per_pdev; i++) {
589 ret = ath11k_dp_srng_setup(ar->ab,
590 &dp->rx_mac_buf_ring[i],
591 HAL_RXDMA_BUF, 1,
592 dp->mac_id + i, 1024);
593 if (ret) {
594 ath11k_warn(ar->ab, "failed to setup rx_mac_buf_ring %d\n",
595 i);
596 return ret;
597 }
598 }
599 }
600
601 for (i = 0; i < ab->hw_params.num_rxmda_per_pdev; i++) {
602 ret = ath11k_dp_srng_setup(ar->ab, &dp->rxdma_err_dst_ring[i],
603 HAL_RXDMA_DST, 0, dp->mac_id + i,
604 DP_RXDMA_ERR_DST_RING_SIZE);
605 if (ret) {
606 ath11k_warn(ar->ab, "failed to setup rxdma_err_dst_ring %d\n", i);
607 return ret;
608 }
609 }
610
611 for (i = 0; i < ab->hw_params.num_rxmda_per_pdev; i++) {
612 srng = &dp->rx_mon_status_refill_ring[i].refill_buf_ring;
613 ret = ath11k_dp_srng_setup(ar->ab,
614 srng,
615 HAL_RXDMA_MONITOR_STATUS, 0, dp->mac_id + i,
616 DP_RXDMA_MON_STATUS_RING_SIZE);
617 if (ret) {
618 ath11k_warn(ar->ab,
619 "failed to setup rx_mon_status_refill_ring %d\n", i);
620 return ret;
621 }
622 }
623
624 /* if rxdma1_enable is false, then it doesn't need
625 * to setup rxdam_mon_buf_ring, rxdma_mon_dst_ring
626 * and rxdma_mon_desc_ring.
627 * init reap timer for QCA6390.
628 */
629 if (!ar->ab->hw_params.rxdma1_enable) {
630 //init mon status buffer reap timer
631 timer_setup(&ar->ab->mon_reap_timer,
632 ath11k_dp_service_mon_ring, 0);
633 return 0;
634 }
635
636 ret = ath11k_dp_srng_setup(ar->ab,
637 &dp->rxdma_mon_buf_ring.refill_buf_ring,
638 HAL_RXDMA_MONITOR_BUF, 0, dp->mac_id,
639 DP_RXDMA_MONITOR_BUF_RING_SIZE);
640 if (ret) {
641 ath11k_warn(ar->ab,
642 "failed to setup HAL_RXDMA_MONITOR_BUF\n");
643 return ret;
644 }
645
646 ret = ath11k_dp_srng_setup(ar->ab, &dp->rxdma_mon_dst_ring,
647 HAL_RXDMA_MONITOR_DST, 0, dp->mac_id,
648 DP_RXDMA_MONITOR_DST_RING_SIZE);
649 if (ret) {
650 ath11k_warn(ar->ab,
651 "failed to setup HAL_RXDMA_MONITOR_DST\n");
652 return ret;
653 }
654
655 ret = ath11k_dp_srng_setup(ar->ab, &dp->rxdma_mon_desc_ring,
656 HAL_RXDMA_MONITOR_DESC, 0, dp->mac_id,
657 DP_RXDMA_MONITOR_DESC_RING_SIZE);
658 if (ret) {
659 ath11k_warn(ar->ab,
660 "failed to setup HAL_RXDMA_MONITOR_DESC\n");
661 return ret;
662 }
663
664 return 0;
665}
666
667void ath11k_dp_reo_cmd_list_cleanup(struct ath11k_base *ab)
668{
669 struct ath11k_dp *dp = &ab->dp;
670 struct dp_reo_cmd *cmd, *tmp;
671 struct dp_reo_cache_flush_elem *cmd_cache, *tmp_cache;
672 struct dp_rx_tid *rx_tid;
673
674 spin_lock_bh(&dp->reo_cmd_lock);
675 list_for_each_entry_safe(cmd, tmp, &dp->reo_cmd_list, list) {
676 list_del(&cmd->list);
677 rx_tid = &cmd->data;
678 if (rx_tid->vaddr) {
679 dma_unmap_single(ab->dev, rx_tid->paddr,
680 rx_tid->size, DMA_BIDIRECTIONAL);
681 kfree(rx_tid->vaddr);
682 rx_tid->vaddr = NULL;
683 }
684 kfree(cmd);
685 }
686
687 list_for_each_entry_safe(cmd_cache, tmp_cache,
688 &dp->reo_cmd_cache_flush_list, list) {
689 list_del(&cmd_cache->list);
690 dp->reo_cmd_cache_flush_count--;
691 rx_tid = &cmd_cache->data;
692 if (rx_tid->vaddr) {
693 dma_unmap_single(ab->dev, rx_tid->paddr,
694 rx_tid->size, DMA_BIDIRECTIONAL);
695 kfree(rx_tid->vaddr);
696 rx_tid->vaddr = NULL;
697 }
698 kfree(cmd_cache);
699 }
700 spin_unlock_bh(&dp->reo_cmd_lock);
701}
702
703static void ath11k_dp_reo_cmd_free(struct ath11k_dp *dp, void *ctx,
704 enum hal_reo_cmd_status status)
705{
706 struct dp_rx_tid *rx_tid = ctx;
707
708 if (status != HAL_REO_CMD_SUCCESS)
709 ath11k_warn(dp->ab, "failed to flush rx tid hw desc, tid %d status %d\n",
710 rx_tid->tid, status);
711 if (rx_tid->vaddr) {
712 dma_unmap_single(dp->ab->dev, rx_tid->paddr, rx_tid->size,
713 DMA_BIDIRECTIONAL);
714 kfree(rx_tid->vaddr);
715 rx_tid->vaddr = NULL;
716 }
717}
718
719static void ath11k_dp_reo_cache_flush(struct ath11k_base *ab,
720 struct dp_rx_tid *rx_tid)
721{
722 struct ath11k_hal_reo_cmd cmd = {0};
723 unsigned long tot_desc_sz, desc_sz;
724 int ret;
725
726 tot_desc_sz = rx_tid->size;
727 desc_sz = ath11k_hal_reo_qdesc_size(0, HAL_DESC_REO_NON_QOS_TID);
728
729 while (tot_desc_sz > desc_sz) {
730 tot_desc_sz -= desc_sz;
731 cmd.addr_lo = lower_32_bits(rx_tid->paddr + tot_desc_sz);
732 cmd.addr_hi = upper_32_bits(rx_tid->paddr);
733 ret = ath11k_dp_tx_send_reo_cmd(ab, rx_tid,
734 HAL_REO_CMD_FLUSH_CACHE, &cmd,
735 NULL);
736 if (ret)
737 ath11k_warn(ab,
738 "failed to send HAL_REO_CMD_FLUSH_CACHE, tid %d (%d)\n",
739 rx_tid->tid, ret);
740 }
741
742 memset(&cmd, 0, sizeof(cmd));
743 cmd.addr_lo = lower_32_bits(rx_tid->paddr);
744 cmd.addr_hi = upper_32_bits(rx_tid->paddr);
745 cmd.flag |= HAL_REO_CMD_FLG_NEED_STATUS;
746 ret = ath11k_dp_tx_send_reo_cmd(ab, rx_tid,
747 HAL_REO_CMD_FLUSH_CACHE,
748 &cmd, ath11k_dp_reo_cmd_free);
749 if (ret) {
750 ath11k_err(ab, "failed to send HAL_REO_CMD_FLUSH_CACHE cmd, tid %d (%d)\n",
751 rx_tid->tid, ret);
752 dma_unmap_single(ab->dev, rx_tid->paddr, rx_tid->size,
753 DMA_BIDIRECTIONAL);
754 kfree(rx_tid->vaddr);
755 rx_tid->vaddr = NULL;
756 }
757}
758
759static void ath11k_dp_rx_tid_del_func(struct ath11k_dp *dp, void *ctx,
760 enum hal_reo_cmd_status status)
761{
762 struct ath11k_base *ab = dp->ab;
763 struct dp_rx_tid *rx_tid = ctx;
764 struct dp_reo_cache_flush_elem *elem, *tmp;
765
766 if (status == HAL_REO_CMD_DRAIN) {
767 goto free_desc;
768 } else if (status != HAL_REO_CMD_SUCCESS) {
769 /* Shouldn't happen! Cleanup in case of other failure? */
770 ath11k_warn(ab, "failed to delete rx tid %d hw descriptor %d\n",
771 rx_tid->tid, status);
772 return;
773 }
774
775 elem = kzalloc(sizeof(*elem), GFP_ATOMIC);
776 if (!elem)
777 goto free_desc;
778
779 elem->ts = jiffies;
780 memcpy(&elem->data, rx_tid, sizeof(*rx_tid));
781
782 spin_lock_bh(&dp->reo_cmd_lock);
783 list_add_tail(&elem->list, &dp->reo_cmd_cache_flush_list);
784 dp->reo_cmd_cache_flush_count++;
785
786 /* Flush and invalidate aged REO desc from HW cache */
787 list_for_each_entry_safe(elem, tmp, &dp->reo_cmd_cache_flush_list,
788 list) {
789 if (dp->reo_cmd_cache_flush_count > DP_REO_DESC_FREE_THRESHOLD ||
790 time_after(jiffies, elem->ts +
791 msecs_to_jiffies(DP_REO_DESC_FREE_TIMEOUT_MS))) {
792 list_del(&elem->list);
793 dp->reo_cmd_cache_flush_count--;
794 spin_unlock_bh(&dp->reo_cmd_lock);
795
796 ath11k_dp_reo_cache_flush(ab, &elem->data);
797 kfree(elem);
798 spin_lock_bh(&dp->reo_cmd_lock);
799 }
800 }
801 spin_unlock_bh(&dp->reo_cmd_lock);
802
803 return;
804free_desc:
805 dma_unmap_single(ab->dev, rx_tid->paddr, rx_tid->size,
806 DMA_BIDIRECTIONAL);
807 kfree(rx_tid->vaddr);
808 rx_tid->vaddr = NULL;
809}
810
811void ath11k_peer_rx_tid_delete(struct ath11k *ar,
812 struct ath11k_peer *peer, u8 tid)
813{
814 struct ath11k_hal_reo_cmd cmd = {0};
815 struct dp_rx_tid *rx_tid = &peer->rx_tid[tid];
816 int ret;
817
818 if (!rx_tid->active)
819 return;
820
821 rx_tid->active = false;
822
823 cmd.flag = HAL_REO_CMD_FLG_NEED_STATUS;
824 cmd.addr_lo = lower_32_bits(rx_tid->paddr);
825 cmd.addr_hi = upper_32_bits(rx_tid->paddr);
826 cmd.upd0 |= HAL_REO_CMD_UPD0_VLD;
827 ret = ath11k_dp_tx_send_reo_cmd(ar->ab, rx_tid,
828 HAL_REO_CMD_UPDATE_RX_QUEUE, &cmd,
829 ath11k_dp_rx_tid_del_func);
830 if (ret) {
831 if (ret != -ESHUTDOWN)
832 ath11k_err(ar->ab, "failed to send HAL_REO_CMD_UPDATE_RX_QUEUE cmd, tid %d (%d)\n",
833 tid, ret);
834 dma_unmap_single(ar->ab->dev, rx_tid->paddr, rx_tid->size,
835 DMA_BIDIRECTIONAL);
836 kfree(rx_tid->vaddr);
837 rx_tid->vaddr = NULL;
838 }
839
840 rx_tid->paddr = 0;
841 rx_tid->size = 0;
842}
843
844static int ath11k_dp_rx_link_desc_return(struct ath11k_base *ab,
845 u32 *link_desc,
846 enum hal_wbm_rel_bm_act action)
847{
848 struct ath11k_dp *dp = &ab->dp;
849 struct hal_srng *srng;
850 u32 *desc;
851 int ret = 0;
852
853 srng = &ab->hal.srng_list[dp->wbm_desc_rel_ring.ring_id];
854
855 spin_lock_bh(&srng->lock);
856
857 ath11k_hal_srng_access_begin(ab, srng);
858
859 desc = ath11k_hal_srng_src_get_next_entry(ab, srng);
860 if (!desc) {
861 ret = -ENOBUFS;
862 goto exit;
863 }
864
865 ath11k_hal_rx_msdu_link_desc_set(ab, (void *)desc, (void *)link_desc,
866 action);
867
868exit:
869 ath11k_hal_srng_access_end(ab, srng);
870
871 spin_unlock_bh(&srng->lock);
872
873 return ret;
874}
875
876static void ath11k_dp_rx_frags_cleanup(struct dp_rx_tid *rx_tid, bool rel_link_desc)
877{
878 struct ath11k_base *ab = rx_tid->ab;
879
880 lockdep_assert_held(&ab->base_lock);
881
882 if (rx_tid->dst_ring_desc) {
883 if (rel_link_desc)
884 ath11k_dp_rx_link_desc_return(ab, (u32 *)rx_tid->dst_ring_desc,
885 HAL_WBM_REL_BM_ACT_PUT_IN_IDLE);
886 kfree(rx_tid->dst_ring_desc);
887 rx_tid->dst_ring_desc = NULL;
888 }
889
890 rx_tid->cur_sn = 0;
891 rx_tid->last_frag_no = 0;
892 rx_tid->rx_frag_bitmap = 0;
893 __skb_queue_purge(&rx_tid->rx_frags);
894}
895
896void ath11k_peer_frags_flush(struct ath11k *ar, struct ath11k_peer *peer)
897{
898 struct dp_rx_tid *rx_tid;
899 int i;
900
901 lockdep_assert_held(&ar->ab->base_lock);
902
903 for (i = 0; i <= IEEE80211_NUM_TIDS; i++) {
904 rx_tid = &peer->rx_tid[i];
905
906 spin_unlock_bh(&ar->ab->base_lock);
907 del_timer_sync(&rx_tid->frag_timer);
908 spin_lock_bh(&ar->ab->base_lock);
909
910 ath11k_dp_rx_frags_cleanup(rx_tid, true);
911 }
912}
913
914void ath11k_peer_rx_tid_cleanup(struct ath11k *ar, struct ath11k_peer *peer)
915{
916 struct dp_rx_tid *rx_tid;
917 int i;
918
919 lockdep_assert_held(&ar->ab->base_lock);
920
921 for (i = 0; i <= IEEE80211_NUM_TIDS; i++) {
922 rx_tid = &peer->rx_tid[i];
923
924 ath11k_peer_rx_tid_delete(ar, peer, i);
925 ath11k_dp_rx_frags_cleanup(rx_tid, true);
926
927 spin_unlock_bh(&ar->ab->base_lock);
928 del_timer_sync(&rx_tid->frag_timer);
929 spin_lock_bh(&ar->ab->base_lock);
930 }
931}
932
933static int ath11k_peer_rx_tid_reo_update(struct ath11k *ar,
934 struct ath11k_peer *peer,
935 struct dp_rx_tid *rx_tid,
936 u32 ba_win_sz, u16 ssn,
937 bool update_ssn)
938{
939 struct ath11k_hal_reo_cmd cmd = {0};
940 int ret;
941
942 cmd.addr_lo = lower_32_bits(rx_tid->paddr);
943 cmd.addr_hi = upper_32_bits(rx_tid->paddr);
944 cmd.flag = HAL_REO_CMD_FLG_NEED_STATUS;
945 cmd.upd0 = HAL_REO_CMD_UPD0_BA_WINDOW_SIZE;
946 cmd.ba_window_size = ba_win_sz;
947
948 if (update_ssn) {
949 cmd.upd0 |= HAL_REO_CMD_UPD0_SSN;
950 cmd.upd2 = FIELD_PREP(HAL_REO_CMD_UPD2_SSN, ssn);
951 }
952
953 ret = ath11k_dp_tx_send_reo_cmd(ar->ab, rx_tid,
954 HAL_REO_CMD_UPDATE_RX_QUEUE, &cmd,
955 NULL);
956 if (ret) {
957 ath11k_warn(ar->ab, "failed to update rx tid queue, tid %d (%d)\n",
958 rx_tid->tid, ret);
959 return ret;
960 }
961
962 rx_tid->ba_win_sz = ba_win_sz;
963
964 return 0;
965}
966
967static void ath11k_dp_rx_tid_mem_free(struct ath11k_base *ab,
968 const u8 *peer_mac, int vdev_id, u8 tid)
969{
970 struct ath11k_peer *peer;
971 struct dp_rx_tid *rx_tid;
972
973 spin_lock_bh(&ab->base_lock);
974
975 peer = ath11k_peer_find(ab, vdev_id, peer_mac);
976 if (!peer) {
977 ath11k_warn(ab, "failed to find the peer to free up rx tid mem\n");
978 goto unlock_exit;
979 }
980
981 rx_tid = &peer->rx_tid[tid];
982 if (!rx_tid->active)
983 goto unlock_exit;
984
985 dma_unmap_single(ab->dev, rx_tid->paddr, rx_tid->size,
986 DMA_BIDIRECTIONAL);
987 kfree(rx_tid->vaddr);
988 rx_tid->vaddr = NULL;
989
990 rx_tid->active = false;
991
992unlock_exit:
993 spin_unlock_bh(&ab->base_lock);
994}
995
996int ath11k_peer_rx_tid_setup(struct ath11k *ar, const u8 *peer_mac, int vdev_id,
997 u8 tid, u32 ba_win_sz, u16 ssn,
998 enum hal_pn_type pn_type)
999{
1000 struct ath11k_base *ab = ar->ab;
1001 struct ath11k_peer *peer;
1002 struct dp_rx_tid *rx_tid;
1003 u32 hw_desc_sz;
1004 u32 *addr_aligned;
1005 void *vaddr;
1006 dma_addr_t paddr;
1007 int ret;
1008
1009 spin_lock_bh(&ab->base_lock);
1010
1011 peer = ath11k_peer_find(ab, vdev_id, peer_mac);
1012 if (!peer) {
1013 ath11k_warn(ab, "failed to find the peer %pM to set up rx tid\n",
1014 peer_mac);
1015 spin_unlock_bh(&ab->base_lock);
1016 return -ENOENT;
1017 }
1018
1019 rx_tid = &peer->rx_tid[tid];
1020 /* Update the tid queue if it is already setup */
1021 if (rx_tid->active) {
1022 paddr = rx_tid->paddr;
1023 ret = ath11k_peer_rx_tid_reo_update(ar, peer, rx_tid,
1024 ba_win_sz, ssn, true);
1025 spin_unlock_bh(&ab->base_lock);
1026 if (ret) {
1027 ath11k_warn(ab, "failed to update reo for peer %pM rx tid %d\n: %d",
1028 peer_mac, tid, ret);
1029 return ret;
1030 }
1031
1032 ret = ath11k_wmi_peer_rx_reorder_queue_setup(ar, vdev_id,
1033 peer_mac, paddr,
1034 tid, 1, ba_win_sz);
1035 if (ret)
1036 ath11k_warn(ab, "failed to send wmi rx reorder queue for peer %pM tid %d: %d\n",
1037 peer_mac, tid, ret);
1038 return ret;
1039 }
1040
1041 rx_tid->tid = tid;
1042
1043 rx_tid->ba_win_sz = ba_win_sz;
1044
1045 /* TODO: Optimize the memory allocation for qos tid based on
1046 * the actual BA window size in REO tid update path.
1047 */
1048 if (tid == HAL_DESC_REO_NON_QOS_TID)
1049 hw_desc_sz = ath11k_hal_reo_qdesc_size(ba_win_sz, tid);
1050 else
1051 hw_desc_sz = ath11k_hal_reo_qdesc_size(DP_BA_WIN_SZ_MAX, tid);
1052
1053 vaddr = kzalloc(hw_desc_sz + HAL_LINK_DESC_ALIGN - 1, GFP_ATOMIC);
1054 if (!vaddr) {
1055 spin_unlock_bh(&ab->base_lock);
1056 return -ENOMEM;
1057 }
1058
1059 addr_aligned = PTR_ALIGN(vaddr, HAL_LINK_DESC_ALIGN);
1060
1061 ath11k_hal_reo_qdesc_setup(addr_aligned, tid, ba_win_sz,
1062 ssn, pn_type);
1063
1064 paddr = dma_map_single(ab->dev, addr_aligned, hw_desc_sz,
1065 DMA_BIDIRECTIONAL);
1066
1067 ret = dma_mapping_error(ab->dev, paddr);
1068 if (ret) {
1069 spin_unlock_bh(&ab->base_lock);
1070 ath11k_warn(ab, "failed to setup dma map for peer %pM rx tid %d: %d\n",
1071 peer_mac, tid, ret);
1072 goto err_mem_free;
1073 }
1074
1075 rx_tid->vaddr = vaddr;
1076 rx_tid->paddr = paddr;
1077 rx_tid->size = hw_desc_sz;
1078 rx_tid->active = true;
1079
1080 spin_unlock_bh(&ab->base_lock);
1081
1082 ret = ath11k_wmi_peer_rx_reorder_queue_setup(ar, vdev_id, peer_mac,
1083 paddr, tid, 1, ba_win_sz);
1084 if (ret) {
1085 ath11k_warn(ar->ab, "failed to setup rx reorder queue for peer %pM tid %d: %d\n",
1086 peer_mac, tid, ret);
1087 ath11k_dp_rx_tid_mem_free(ab, peer_mac, vdev_id, tid);
1088 }
1089
1090 return ret;
1091
1092err_mem_free:
1093 kfree(rx_tid->vaddr);
1094 rx_tid->vaddr = NULL;
1095
1096 return ret;
1097}
1098
1099int ath11k_dp_rx_ampdu_start(struct ath11k *ar,
1100 struct ieee80211_ampdu_params *params)
1101{
1102 struct ath11k_base *ab = ar->ab;
1103 struct ath11k_sta *arsta = ath11k_sta_to_arsta(params->sta);
1104 int vdev_id = arsta->arvif->vdev_id;
1105 int ret;
1106
1107 ret = ath11k_peer_rx_tid_setup(ar, params->sta->addr, vdev_id,
1108 params->tid, params->buf_size,
1109 params->ssn, arsta->pn_type);
1110 if (ret)
1111 ath11k_warn(ab, "failed to setup rx tid %d\n", ret);
1112
1113 return ret;
1114}
1115
1116int ath11k_dp_rx_ampdu_stop(struct ath11k *ar,
1117 struct ieee80211_ampdu_params *params)
1118{
1119 struct ath11k_base *ab = ar->ab;
1120 struct ath11k_peer *peer;
1121 struct ath11k_sta *arsta = ath11k_sta_to_arsta(params->sta);
1122 int vdev_id = arsta->arvif->vdev_id;
1123 dma_addr_t paddr;
1124 bool active;
1125 int ret;
1126
1127 spin_lock_bh(&ab->base_lock);
1128
1129 peer = ath11k_peer_find(ab, vdev_id, params->sta->addr);
1130 if (!peer) {
1131 ath11k_warn(ab, "failed to find the peer to stop rx aggregation\n");
1132 spin_unlock_bh(&ab->base_lock);
1133 return -ENOENT;
1134 }
1135
1136 paddr = peer->rx_tid[params->tid].paddr;
1137 active = peer->rx_tid[params->tid].active;
1138
1139 if (!active) {
1140 spin_unlock_bh(&ab->base_lock);
1141 return 0;
1142 }
1143
1144 ret = ath11k_peer_rx_tid_reo_update(ar, peer, peer->rx_tid, 1, 0, false);
1145 spin_unlock_bh(&ab->base_lock);
1146 if (ret) {
1147 ath11k_warn(ab, "failed to update reo for rx tid %d: %d\n",
1148 params->tid, ret);
1149 return ret;
1150 }
1151
1152 ret = ath11k_wmi_peer_rx_reorder_queue_setup(ar, vdev_id,
1153 params->sta->addr, paddr,
1154 params->tid, 1, 1);
1155 if (ret)
1156 ath11k_warn(ab, "failed to send wmi to delete rx tid %d\n",
1157 ret);
1158
1159 return ret;
1160}
1161
1162int ath11k_dp_peer_rx_pn_replay_config(struct ath11k_vif *arvif,
1163 const u8 *peer_addr,
1164 enum set_key_cmd key_cmd,
1165 struct ieee80211_key_conf *key)
1166{
1167 struct ath11k *ar = arvif->ar;
1168 struct ath11k_base *ab = ar->ab;
1169 struct ath11k_hal_reo_cmd cmd = {0};
1170 struct ath11k_peer *peer;
1171 struct dp_rx_tid *rx_tid;
1172 u8 tid;
1173 int ret = 0;
1174
1175 /* NOTE: Enable PN/TSC replay check offload only for unicast frames.
1176 * We use mac80211 PN/TSC replay check functionality for bcast/mcast
1177 * for now.
1178 */
1179 if (!(key->flags & IEEE80211_KEY_FLAG_PAIRWISE))
1180 return 0;
1181
1182 cmd.flag |= HAL_REO_CMD_FLG_NEED_STATUS;
1183 cmd.upd0 |= HAL_REO_CMD_UPD0_PN |
1184 HAL_REO_CMD_UPD0_PN_SIZE |
1185 HAL_REO_CMD_UPD0_PN_VALID |
1186 HAL_REO_CMD_UPD0_PN_CHECK |
1187 HAL_REO_CMD_UPD0_SVLD;
1188
1189 switch (key->cipher) {
1190 case WLAN_CIPHER_SUITE_TKIP:
1191 case WLAN_CIPHER_SUITE_CCMP:
1192 case WLAN_CIPHER_SUITE_CCMP_256:
1193 case WLAN_CIPHER_SUITE_GCMP:
1194 case WLAN_CIPHER_SUITE_GCMP_256:
1195 if (key_cmd == SET_KEY) {
1196 cmd.upd1 |= HAL_REO_CMD_UPD1_PN_CHECK;
1197 cmd.pn_size = 48;
1198 }
1199 break;
1200 default:
1201 break;
1202 }
1203
1204 spin_lock_bh(&ab->base_lock);
1205
1206 peer = ath11k_peer_find(ab, arvif->vdev_id, peer_addr);
1207 if (!peer) {
1208 ath11k_warn(ab, "failed to find the peer to configure pn replay detection\n");
1209 spin_unlock_bh(&ab->base_lock);
1210 return -ENOENT;
1211 }
1212
1213 for (tid = 0; tid <= IEEE80211_NUM_TIDS; tid++) {
1214 rx_tid = &peer->rx_tid[tid];
1215 if (!rx_tid->active)
1216 continue;
1217 cmd.addr_lo = lower_32_bits(rx_tid->paddr);
1218 cmd.addr_hi = upper_32_bits(rx_tid->paddr);
1219 ret = ath11k_dp_tx_send_reo_cmd(ab, rx_tid,
1220 HAL_REO_CMD_UPDATE_RX_QUEUE,
1221 &cmd, NULL);
1222 if (ret) {
1223 ath11k_warn(ab, "failed to configure rx tid %d queue for pn replay detection %d\n",
1224 tid, ret);
1225 break;
1226 }
1227 }
1228
1229 spin_unlock_bh(&ab->base_lock);
1230
1231 return ret;
1232}
1233
1234static inline int ath11k_get_ppdu_user_index(struct htt_ppdu_stats *ppdu_stats,
1235 u16 peer_id)
1236{
1237 int i;
1238
1239 for (i = 0; i < HTT_PPDU_STATS_MAX_USERS - 1; i++) {
1240 if (ppdu_stats->user_stats[i].is_valid_peer_id) {
1241 if (peer_id == ppdu_stats->user_stats[i].peer_id)
1242 return i;
1243 } else {
1244 return i;
1245 }
1246 }
1247
1248 return -EINVAL;
1249}
1250
1251static int ath11k_htt_tlv_ppdu_stats_parse(struct ath11k_base *ab,
1252 u16 tag, u16 len, const void *ptr,
1253 void *data)
1254{
1255 struct htt_ppdu_stats_info *ppdu_info;
1256 struct htt_ppdu_user_stats *user_stats;
1257 int cur_user;
1258 u16 peer_id;
1259
1260 ppdu_info = data;
1261
1262 switch (tag) {
1263 case HTT_PPDU_STATS_TAG_COMMON:
1264 if (len < sizeof(struct htt_ppdu_stats_common)) {
1265 ath11k_warn(ab, "Invalid len %d for the tag 0x%x\n",
1266 len, tag);
1267 return -EINVAL;
1268 }
1269 memcpy((void *)&ppdu_info->ppdu_stats.common, ptr,
1270 sizeof(struct htt_ppdu_stats_common));
1271 break;
1272 case HTT_PPDU_STATS_TAG_USR_RATE:
1273 if (len < sizeof(struct htt_ppdu_stats_user_rate)) {
1274 ath11k_warn(ab, "Invalid len %d for the tag 0x%x\n",
1275 len, tag);
1276 return -EINVAL;
1277 }
1278
1279 peer_id = ((struct htt_ppdu_stats_user_rate *)ptr)->sw_peer_id;
1280 cur_user = ath11k_get_ppdu_user_index(&ppdu_info->ppdu_stats,
1281 peer_id);
1282 if (cur_user < 0)
1283 return -EINVAL;
1284 user_stats = &ppdu_info->ppdu_stats.user_stats[cur_user];
1285 user_stats->peer_id = peer_id;
1286 user_stats->is_valid_peer_id = true;
1287 memcpy((void *)&user_stats->rate, ptr,
1288 sizeof(struct htt_ppdu_stats_user_rate));
1289 user_stats->tlv_flags |= BIT(tag);
1290 break;
1291 case HTT_PPDU_STATS_TAG_USR_COMPLTN_COMMON:
1292 if (len < sizeof(struct htt_ppdu_stats_usr_cmpltn_cmn)) {
1293 ath11k_warn(ab, "Invalid len %d for the tag 0x%x\n",
1294 len, tag);
1295 return -EINVAL;
1296 }
1297
1298 peer_id = ((struct htt_ppdu_stats_usr_cmpltn_cmn *)ptr)->sw_peer_id;
1299 cur_user = ath11k_get_ppdu_user_index(&ppdu_info->ppdu_stats,
1300 peer_id);
1301 if (cur_user < 0)
1302 return -EINVAL;
1303 user_stats = &ppdu_info->ppdu_stats.user_stats[cur_user];
1304 user_stats->peer_id = peer_id;
1305 user_stats->is_valid_peer_id = true;
1306 memcpy((void *)&user_stats->cmpltn_cmn, ptr,
1307 sizeof(struct htt_ppdu_stats_usr_cmpltn_cmn));
1308 user_stats->tlv_flags |= BIT(tag);
1309 break;
1310 case HTT_PPDU_STATS_TAG_USR_COMPLTN_ACK_BA_STATUS:
1311 if (len <
1312 sizeof(struct htt_ppdu_stats_usr_cmpltn_ack_ba_status)) {
1313 ath11k_warn(ab, "Invalid len %d for the tag 0x%x\n",
1314 len, tag);
1315 return -EINVAL;
1316 }
1317
1318 peer_id =
1319 ((struct htt_ppdu_stats_usr_cmpltn_ack_ba_status *)ptr)->sw_peer_id;
1320 cur_user = ath11k_get_ppdu_user_index(&ppdu_info->ppdu_stats,
1321 peer_id);
1322 if (cur_user < 0)
1323 return -EINVAL;
1324 user_stats = &ppdu_info->ppdu_stats.user_stats[cur_user];
1325 user_stats->peer_id = peer_id;
1326 user_stats->is_valid_peer_id = true;
1327 memcpy((void *)&user_stats->ack_ba, ptr,
1328 sizeof(struct htt_ppdu_stats_usr_cmpltn_ack_ba_status));
1329 user_stats->tlv_flags |= BIT(tag);
1330 break;
1331 }
1332 return 0;
1333}
1334
1335int ath11k_dp_htt_tlv_iter(struct ath11k_base *ab, const void *ptr, size_t len,
1336 int (*iter)(struct ath11k_base *ar, u16 tag, u16 len,
1337 const void *ptr, void *data),
1338 void *data)
1339{
1340 const struct htt_tlv *tlv;
1341 const void *begin = ptr;
1342 u16 tlv_tag, tlv_len;
1343 int ret = -EINVAL;
1344
1345 while (len > 0) {
1346 if (len < sizeof(*tlv)) {
1347 ath11k_err(ab, "htt tlv parse failure at byte %zd (%zu bytes left, %zu expected)\n",
1348 ptr - begin, len, sizeof(*tlv));
1349 return -EINVAL;
1350 }
1351 tlv = (struct htt_tlv *)ptr;
1352 tlv_tag = FIELD_GET(HTT_TLV_TAG, tlv->header);
1353 tlv_len = FIELD_GET(HTT_TLV_LEN, tlv->header);
1354 ptr += sizeof(*tlv);
1355 len -= sizeof(*tlv);
1356
1357 if (tlv_len > len) {
1358 ath11k_err(ab, "htt tlv parse failure of tag %u at byte %zd (%zu bytes left, %u expected)\n",
1359 tlv_tag, ptr - begin, len, tlv_len);
1360 return -EINVAL;
1361 }
1362 ret = iter(ab, tlv_tag, tlv_len, ptr, data);
1363 if (ret == -ENOMEM)
1364 return ret;
1365
1366 ptr += tlv_len;
1367 len -= tlv_len;
1368 }
1369 return 0;
1370}
1371
1372static void
1373ath11k_update_per_peer_tx_stats(struct ath11k *ar,
1374 struct htt_ppdu_stats *ppdu_stats, u8 user)
1375{
1376 struct ath11k_base *ab = ar->ab;
1377 struct ath11k_peer *peer;
1378 struct ieee80211_sta *sta;
1379 struct ath11k_sta *arsta;
1380 struct htt_ppdu_stats_user_rate *user_rate;
1381 struct ath11k_per_peer_tx_stats *peer_stats = &ar->peer_tx_stats;
1382 struct htt_ppdu_user_stats *usr_stats = &ppdu_stats->user_stats[user];
1383 struct htt_ppdu_stats_common *common = &ppdu_stats->common;
1384 int ret;
1385 u8 flags, mcs, nss, bw, sgi, dcm, rate_idx = 0;
1386 u32 succ_bytes = 0;
1387 u16 rate = 0, succ_pkts = 0;
1388 u32 tx_duration = 0;
1389 u8 tid = HTT_PPDU_STATS_NON_QOS_TID;
1390 bool is_ampdu = false;
1391
1392 if (!(usr_stats->tlv_flags & BIT(HTT_PPDU_STATS_TAG_USR_RATE)))
1393 return;
1394
1395 if (usr_stats->tlv_flags & BIT(HTT_PPDU_STATS_TAG_USR_COMPLTN_COMMON))
1396 is_ampdu =
1397 HTT_USR_CMPLTN_IS_AMPDU(usr_stats->cmpltn_cmn.flags);
1398
1399 if (usr_stats->tlv_flags &
1400 BIT(HTT_PPDU_STATS_TAG_USR_COMPLTN_ACK_BA_STATUS)) {
1401 succ_bytes = usr_stats->ack_ba.success_bytes;
1402 succ_pkts = FIELD_GET(HTT_PPDU_STATS_ACK_BA_INFO_NUM_MSDU_M,
1403 usr_stats->ack_ba.info);
1404 tid = FIELD_GET(HTT_PPDU_STATS_ACK_BA_INFO_TID_NUM,
1405 usr_stats->ack_ba.info);
1406 }
1407
1408 if (common->fes_duration_us)
1409 tx_duration = common->fes_duration_us;
1410
1411 user_rate = &usr_stats->rate;
1412 flags = HTT_USR_RATE_PREAMBLE(user_rate->rate_flags);
1413 bw = HTT_USR_RATE_BW(user_rate->rate_flags) - 2;
1414 nss = HTT_USR_RATE_NSS(user_rate->rate_flags) + 1;
1415 mcs = HTT_USR_RATE_MCS(user_rate->rate_flags);
1416 sgi = HTT_USR_RATE_GI(user_rate->rate_flags);
1417 dcm = HTT_USR_RATE_DCM(user_rate->rate_flags);
1418
1419 /* Note: If host configured fixed rates and in some other special
1420 * cases, the broadcast/management frames are sent in different rates.
1421 * Firmware rate's control to be skipped for this?
1422 */
1423
1424 if (flags == WMI_RATE_PREAMBLE_HE && mcs > ATH11K_HE_MCS_MAX) {
1425 ath11k_warn(ab, "Invalid HE mcs %d peer stats", mcs);
1426 return;
1427 }
1428
1429 if (flags == WMI_RATE_PREAMBLE_VHT && mcs > ATH11K_VHT_MCS_MAX) {
1430 ath11k_warn(ab, "Invalid VHT mcs %d peer stats", mcs);
1431 return;
1432 }
1433
1434 if (flags == WMI_RATE_PREAMBLE_HT && (mcs > ATH11K_HT_MCS_MAX || nss < 1)) {
1435 ath11k_warn(ab, "Invalid HT mcs %d nss %d peer stats",
1436 mcs, nss);
1437 return;
1438 }
1439
1440 if (flags == WMI_RATE_PREAMBLE_CCK || flags == WMI_RATE_PREAMBLE_OFDM) {
1441 ret = ath11k_mac_hw_ratecode_to_legacy_rate(mcs,
1442 flags,
1443 &rate_idx,
1444 &rate);
1445 if (ret < 0)
1446 return;
1447 }
1448
1449 rcu_read_lock();
1450 spin_lock_bh(&ab->base_lock);
1451 peer = ath11k_peer_find_by_id(ab, usr_stats->peer_id);
1452
1453 if (!peer || !peer->sta) {
1454 spin_unlock_bh(&ab->base_lock);
1455 rcu_read_unlock();
1456 return;
1457 }
1458
1459 sta = peer->sta;
1460 arsta = ath11k_sta_to_arsta(sta);
1461
1462 memset(&arsta->txrate, 0, sizeof(arsta->txrate));
1463
1464 switch (flags) {
1465 case WMI_RATE_PREAMBLE_OFDM:
1466 arsta->txrate.legacy = rate;
1467 break;
1468 case WMI_RATE_PREAMBLE_CCK:
1469 arsta->txrate.legacy = rate;
1470 break;
1471 case WMI_RATE_PREAMBLE_HT:
1472 arsta->txrate.mcs = mcs + 8 * (nss - 1);
1473 arsta->txrate.flags = RATE_INFO_FLAGS_MCS;
1474 if (sgi)
1475 arsta->txrate.flags |= RATE_INFO_FLAGS_SHORT_GI;
1476 break;
1477 case WMI_RATE_PREAMBLE_VHT:
1478 arsta->txrate.mcs = mcs;
1479 arsta->txrate.flags = RATE_INFO_FLAGS_VHT_MCS;
1480 if (sgi)
1481 arsta->txrate.flags |= RATE_INFO_FLAGS_SHORT_GI;
1482 break;
1483 case WMI_RATE_PREAMBLE_HE:
1484 arsta->txrate.mcs = mcs;
1485 arsta->txrate.flags = RATE_INFO_FLAGS_HE_MCS;
1486 arsta->txrate.he_dcm = dcm;
1487 arsta->txrate.he_gi = ath11k_mac_he_gi_to_nl80211_he_gi(sgi);
1488 arsta->txrate.he_ru_alloc = ath11k_mac_phy_he_ru_to_nl80211_he_ru_alloc
1489 ((user_rate->ru_end -
1490 user_rate->ru_start) + 1);
1491 break;
1492 }
1493
1494 arsta->txrate.nss = nss;
1495
1496 arsta->txrate.bw = ath11k_mac_bw_to_mac80211_bw(bw);
1497 arsta->tx_duration += tx_duration;
1498 memcpy(&arsta->last_txrate, &arsta->txrate, sizeof(struct rate_info));
1499
1500 /* PPDU stats reported for mgmt packet doesn't have valid tx bytes.
1501 * So skip peer stats update for mgmt packets.
1502 */
1503 if (tid < HTT_PPDU_STATS_NON_QOS_TID) {
1504 memset(peer_stats, 0, sizeof(*peer_stats));
1505 peer_stats->succ_pkts = succ_pkts;
1506 peer_stats->succ_bytes = succ_bytes;
1507 peer_stats->is_ampdu = is_ampdu;
1508 peer_stats->duration = tx_duration;
1509 peer_stats->ba_fails =
1510 HTT_USR_CMPLTN_LONG_RETRY(usr_stats->cmpltn_cmn.flags) +
1511 HTT_USR_CMPLTN_SHORT_RETRY(usr_stats->cmpltn_cmn.flags);
1512
1513 if (ath11k_debugfs_is_extd_tx_stats_enabled(ar))
1514 ath11k_debugfs_sta_add_tx_stats(arsta, peer_stats, rate_idx);
1515 }
1516
1517 spin_unlock_bh(&ab->base_lock);
1518 rcu_read_unlock();
1519}
1520
1521static void ath11k_htt_update_ppdu_stats(struct ath11k *ar,
1522 struct htt_ppdu_stats *ppdu_stats)
1523{
1524 u8 user;
1525
1526 for (user = 0; user < HTT_PPDU_STATS_MAX_USERS - 1; user++)
1527 ath11k_update_per_peer_tx_stats(ar, ppdu_stats, user);
1528}
1529
1530static
1531struct htt_ppdu_stats_info *ath11k_dp_htt_get_ppdu_desc(struct ath11k *ar,
1532 u32 ppdu_id)
1533{
1534 struct htt_ppdu_stats_info *ppdu_info;
1535
1536 lockdep_assert_held(&ar->data_lock);
1537
1538 if (!list_empty(&ar->ppdu_stats_info)) {
1539 list_for_each_entry(ppdu_info, &ar->ppdu_stats_info, list) {
1540 if (ppdu_info->ppdu_id == ppdu_id)
1541 return ppdu_info;
1542 }
1543
1544 if (ar->ppdu_stat_list_depth > HTT_PPDU_DESC_MAX_DEPTH) {
1545 ppdu_info = list_first_entry(&ar->ppdu_stats_info,
1546 typeof(*ppdu_info), list);
1547 list_del(&ppdu_info->list);
1548 ar->ppdu_stat_list_depth--;
1549 ath11k_htt_update_ppdu_stats(ar, &ppdu_info->ppdu_stats);
1550 kfree(ppdu_info);
1551 }
1552 }
1553
1554 ppdu_info = kzalloc(sizeof(*ppdu_info), GFP_ATOMIC);
1555 if (!ppdu_info)
1556 return NULL;
1557
1558 list_add_tail(&ppdu_info->list, &ar->ppdu_stats_info);
1559 ar->ppdu_stat_list_depth++;
1560
1561 return ppdu_info;
1562}
1563
1564static int ath11k_htt_pull_ppdu_stats(struct ath11k_base *ab,
1565 struct sk_buff *skb)
1566{
1567 struct ath11k_htt_ppdu_stats_msg *msg;
1568 struct htt_ppdu_stats_info *ppdu_info;
1569 struct ath11k *ar;
1570 int ret;
1571 u8 pdev_id;
1572 u32 ppdu_id, len;
1573
1574 msg = (struct ath11k_htt_ppdu_stats_msg *)skb->data;
1575 len = FIELD_GET(HTT_T2H_PPDU_STATS_INFO_PAYLOAD_SIZE, msg->info);
1576 pdev_id = FIELD_GET(HTT_T2H_PPDU_STATS_INFO_PDEV_ID, msg->info);
1577 ppdu_id = msg->ppdu_id;
1578
1579 rcu_read_lock();
1580 ar = ath11k_mac_get_ar_by_pdev_id(ab, pdev_id);
1581 if (!ar) {
1582 ret = -EINVAL;
1583 goto out;
1584 }
1585
1586 if (ath11k_debugfs_is_pktlog_lite_mode_enabled(ar))
1587 trace_ath11k_htt_ppdu_stats(ar, skb->data, len);
1588
1589 spin_lock_bh(&ar->data_lock);
1590 ppdu_info = ath11k_dp_htt_get_ppdu_desc(ar, ppdu_id);
1591 if (!ppdu_info) {
1592 ret = -EINVAL;
1593 goto out_unlock_data;
1594 }
1595
1596 ppdu_info->ppdu_id = ppdu_id;
1597 ret = ath11k_dp_htt_tlv_iter(ab, msg->data, len,
1598 ath11k_htt_tlv_ppdu_stats_parse,
1599 (void *)ppdu_info);
1600 if (ret) {
1601 ath11k_warn(ab, "Failed to parse tlv %d\n", ret);
1602 goto out_unlock_data;
1603 }
1604
1605out_unlock_data:
1606 spin_unlock_bh(&ar->data_lock);
1607
1608out:
1609 rcu_read_unlock();
1610
1611 return ret;
1612}
1613
1614static void ath11k_htt_pktlog(struct ath11k_base *ab, struct sk_buff *skb)
1615{
1616 struct htt_pktlog_msg *data = (struct htt_pktlog_msg *)skb->data;
1617 struct ath_pktlog_hdr *hdr = (struct ath_pktlog_hdr *)data;
1618 struct ath11k *ar;
1619 u8 pdev_id;
1620
1621 pdev_id = FIELD_GET(HTT_T2H_PPDU_STATS_INFO_PDEV_ID, data->hdr);
1622
1623 rcu_read_lock();
1624
1625 ar = ath11k_mac_get_ar_by_pdev_id(ab, pdev_id);
1626 if (!ar) {
1627 ath11k_warn(ab, "invalid pdev id %d on htt pktlog\n", pdev_id);
1628 goto out;
1629 }
1630
1631 trace_ath11k_htt_pktlog(ar, data->payload, hdr->size,
1632 ar->ab->pktlog_defs_checksum);
1633
1634out:
1635 rcu_read_unlock();
1636}
1637
1638static void ath11k_htt_backpressure_event_handler(struct ath11k_base *ab,
1639 struct sk_buff *skb)
1640{
1641 u32 *data = (u32 *)skb->data;
1642 u8 pdev_id, ring_type, ring_id, pdev_idx;
1643 u16 hp, tp;
1644 u32 backpressure_time;
1645 struct ath11k_bp_stats *bp_stats;
1646
1647 pdev_id = FIELD_GET(HTT_BACKPRESSURE_EVENT_PDEV_ID_M, *data);
1648 ring_type = FIELD_GET(HTT_BACKPRESSURE_EVENT_RING_TYPE_M, *data);
1649 ring_id = FIELD_GET(HTT_BACKPRESSURE_EVENT_RING_ID_M, *data);
1650 ++data;
1651
1652 hp = FIELD_GET(HTT_BACKPRESSURE_EVENT_HP_M, *data);
1653 tp = FIELD_GET(HTT_BACKPRESSURE_EVENT_TP_M, *data);
1654 ++data;
1655
1656 backpressure_time = *data;
1657
1658 ath11k_dbg(ab, ATH11K_DBG_DP_HTT, "backpressure event, pdev %d, ring type %d,ring id %d, hp %d tp %d, backpressure time %d\n",
1659 pdev_id, ring_type, ring_id, hp, tp, backpressure_time);
1660
1661 if (ring_type == HTT_BACKPRESSURE_UMAC_RING_TYPE) {
1662 if (ring_id >= HTT_SW_UMAC_RING_IDX_MAX)
1663 return;
1664
1665 bp_stats = &ab->soc_stats.bp_stats.umac_ring_bp_stats[ring_id];
1666 } else if (ring_type == HTT_BACKPRESSURE_LMAC_RING_TYPE) {
1667 pdev_idx = DP_HW2SW_MACID(pdev_id);
1668
1669 if (ring_id >= HTT_SW_LMAC_RING_IDX_MAX || pdev_idx >= MAX_RADIOS)
1670 return;
1671
1672 bp_stats = &ab->soc_stats.bp_stats.lmac_ring_bp_stats[ring_id][pdev_idx];
1673 } else {
1674 ath11k_warn(ab, "unknown ring type received in htt bp event %d\n",
1675 ring_type);
1676 return;
1677 }
1678
1679 spin_lock_bh(&ab->base_lock);
1680 bp_stats->hp = hp;
1681 bp_stats->tp = tp;
1682 bp_stats->count++;
1683 bp_stats->jiffies = jiffies;
1684 spin_unlock_bh(&ab->base_lock);
1685}
1686
1687void ath11k_dp_htt_htc_t2h_msg_handler(struct ath11k_base *ab,
1688 struct sk_buff *skb)
1689{
1690 struct ath11k_dp *dp = &ab->dp;
1691 struct htt_resp_msg *resp = (struct htt_resp_msg *)skb->data;
1692 enum htt_t2h_msg_type type = FIELD_GET(HTT_T2H_MSG_TYPE, *(u32 *)resp);
1693 u16 peer_id;
1694 u8 vdev_id;
1695 u8 mac_addr[ETH_ALEN];
1696 u16 peer_mac_h16;
1697 u16 ast_hash;
1698 u16 hw_peer_id;
1699
1700 ath11k_dbg(ab, ATH11K_DBG_DP_HTT, "dp_htt rx msg type :0x%0x\n", type);
1701
1702 switch (type) {
1703 case HTT_T2H_MSG_TYPE_VERSION_CONF:
1704 dp->htt_tgt_ver_major = FIELD_GET(HTT_T2H_VERSION_CONF_MAJOR,
1705 resp->version_msg.version);
1706 dp->htt_tgt_ver_minor = FIELD_GET(HTT_T2H_VERSION_CONF_MINOR,
1707 resp->version_msg.version);
1708 complete(&dp->htt_tgt_version_received);
1709 break;
1710 case HTT_T2H_MSG_TYPE_PEER_MAP:
1711 vdev_id = FIELD_GET(HTT_T2H_PEER_MAP_INFO_VDEV_ID,
1712 resp->peer_map_ev.info);
1713 peer_id = FIELD_GET(HTT_T2H_PEER_MAP_INFO_PEER_ID,
1714 resp->peer_map_ev.info);
1715 peer_mac_h16 = FIELD_GET(HTT_T2H_PEER_MAP_INFO1_MAC_ADDR_H16,
1716 resp->peer_map_ev.info1);
1717 ath11k_dp_get_mac_addr(resp->peer_map_ev.mac_addr_l32,
1718 peer_mac_h16, mac_addr);
1719 ath11k_peer_map_event(ab, vdev_id, peer_id, mac_addr, 0, 0);
1720 break;
1721 case HTT_T2H_MSG_TYPE_PEER_MAP2:
1722 vdev_id = FIELD_GET(HTT_T2H_PEER_MAP_INFO_VDEV_ID,
1723 resp->peer_map_ev.info);
1724 peer_id = FIELD_GET(HTT_T2H_PEER_MAP_INFO_PEER_ID,
1725 resp->peer_map_ev.info);
1726 peer_mac_h16 = FIELD_GET(HTT_T2H_PEER_MAP_INFO1_MAC_ADDR_H16,
1727 resp->peer_map_ev.info1);
1728 ath11k_dp_get_mac_addr(resp->peer_map_ev.mac_addr_l32,
1729 peer_mac_h16, mac_addr);
1730 ast_hash = FIELD_GET(HTT_T2H_PEER_MAP_INFO2_AST_HASH_VAL,
1731 resp->peer_map_ev.info2);
1732 hw_peer_id = FIELD_GET(HTT_T2H_PEER_MAP_INFO1_HW_PEER_ID,
1733 resp->peer_map_ev.info1);
1734 ath11k_peer_map_event(ab, vdev_id, peer_id, mac_addr, ast_hash,
1735 hw_peer_id);
1736 break;
1737 case HTT_T2H_MSG_TYPE_PEER_UNMAP:
1738 case HTT_T2H_MSG_TYPE_PEER_UNMAP2:
1739 peer_id = FIELD_GET(HTT_T2H_PEER_UNMAP_INFO_PEER_ID,
1740 resp->peer_unmap_ev.info);
1741 ath11k_peer_unmap_event(ab, peer_id);
1742 break;
1743 case HTT_T2H_MSG_TYPE_PPDU_STATS_IND:
1744 ath11k_htt_pull_ppdu_stats(ab, skb);
1745 break;
1746 case HTT_T2H_MSG_TYPE_EXT_STATS_CONF:
1747 ath11k_debugfs_htt_ext_stats_handler(ab, skb);
1748 break;
1749 case HTT_T2H_MSG_TYPE_PKTLOG:
1750 ath11k_htt_pktlog(ab, skb);
1751 break;
1752 case HTT_T2H_MSG_TYPE_BKPRESSURE_EVENT_IND:
1753 ath11k_htt_backpressure_event_handler(ab, skb);
1754 break;
1755 default:
1756 ath11k_warn(ab, "htt event %d not handled\n", type);
1757 break;
1758 }
1759
1760 dev_kfree_skb_any(skb);
1761}
1762
1763static int ath11k_dp_rx_msdu_coalesce(struct ath11k *ar,
1764 struct sk_buff_head *msdu_list,
1765 struct sk_buff *first, struct sk_buff *last,
1766 u8 l3pad_bytes, int msdu_len)
1767{
1768 struct ath11k_base *ab = ar->ab;
1769 struct sk_buff *skb;
1770 struct ath11k_skb_rxcb *rxcb = ATH11K_SKB_RXCB(first);
1771 int buf_first_hdr_len, buf_first_len;
1772 struct hal_rx_desc *ldesc;
1773 int space_extra, rem_len, buf_len;
1774 u32 hal_rx_desc_sz = ar->ab->hw_params.hal_desc_sz;
1775
1776 /* As the msdu is spread across multiple rx buffers,
1777 * find the offset to the start of msdu for computing
1778 * the length of the msdu in the first buffer.
1779 */
1780 buf_first_hdr_len = hal_rx_desc_sz + l3pad_bytes;
1781 buf_first_len = DP_RX_BUFFER_SIZE - buf_first_hdr_len;
1782
1783 if (WARN_ON_ONCE(msdu_len <= buf_first_len)) {
1784 skb_put(first, buf_first_hdr_len + msdu_len);
1785 skb_pull(first, buf_first_hdr_len);
1786 return 0;
1787 }
1788
1789 ldesc = (struct hal_rx_desc *)last->data;
1790 rxcb->is_first_msdu = ath11k_dp_rx_h_msdu_end_first_msdu(ab, ldesc);
1791 rxcb->is_last_msdu = ath11k_dp_rx_h_msdu_end_last_msdu(ab, ldesc);
1792
1793 /* MSDU spans over multiple buffers because the length of the MSDU
1794 * exceeds DP_RX_BUFFER_SIZE - HAL_RX_DESC_SIZE. So assume the data
1795 * in the first buf is of length DP_RX_BUFFER_SIZE - HAL_RX_DESC_SIZE.
1796 */
1797 skb_put(first, DP_RX_BUFFER_SIZE);
1798 skb_pull(first, buf_first_hdr_len);
1799
1800 /* When an MSDU spread over multiple buffers attention, MSDU_END and
1801 * MPDU_END tlvs are valid only in the last buffer. Copy those tlvs.
1802 */
1803 ath11k_dp_rx_desc_end_tlv_copy(ab, rxcb->rx_desc, ldesc);
1804
1805 space_extra = msdu_len - (buf_first_len + skb_tailroom(first));
1806 if (space_extra > 0 &&
1807 (pskb_expand_head(first, 0, space_extra, GFP_ATOMIC) < 0)) {
1808 /* Free up all buffers of the MSDU */
1809 while ((skb = __skb_dequeue(msdu_list)) != NULL) {
1810 rxcb = ATH11K_SKB_RXCB(skb);
1811 if (!rxcb->is_continuation) {
1812 dev_kfree_skb_any(skb);
1813 break;
1814 }
1815 dev_kfree_skb_any(skb);
1816 }
1817 return -ENOMEM;
1818 }
1819
1820 rem_len = msdu_len - buf_first_len;
1821 while ((skb = __skb_dequeue(msdu_list)) != NULL && rem_len > 0) {
1822 rxcb = ATH11K_SKB_RXCB(skb);
1823 if (rxcb->is_continuation)
1824 buf_len = DP_RX_BUFFER_SIZE - hal_rx_desc_sz;
1825 else
1826 buf_len = rem_len;
1827
1828 if (buf_len > (DP_RX_BUFFER_SIZE - hal_rx_desc_sz)) {
1829 WARN_ON_ONCE(1);
1830 dev_kfree_skb_any(skb);
1831 return -EINVAL;
1832 }
1833
1834 skb_put(skb, buf_len + hal_rx_desc_sz);
1835 skb_pull(skb, hal_rx_desc_sz);
1836 skb_copy_from_linear_data(skb, skb_put(first, buf_len),
1837 buf_len);
1838 dev_kfree_skb_any(skb);
1839
1840 rem_len -= buf_len;
1841 if (!rxcb->is_continuation)
1842 break;
1843 }
1844
1845 return 0;
1846}
1847
1848static struct sk_buff *ath11k_dp_rx_get_msdu_last_buf(struct sk_buff_head *msdu_list,
1849 struct sk_buff *first)
1850{
1851 struct sk_buff *skb;
1852 struct ath11k_skb_rxcb *rxcb = ATH11K_SKB_RXCB(first);
1853
1854 if (!rxcb->is_continuation)
1855 return first;
1856
1857 skb_queue_walk(msdu_list, skb) {
1858 rxcb = ATH11K_SKB_RXCB(skb);
1859 if (!rxcb->is_continuation)
1860 return skb;
1861 }
1862
1863 return NULL;
1864}
1865
1866static void ath11k_dp_rx_h_csum_offload(struct ath11k *ar, struct sk_buff *msdu)
1867{
1868 struct ath11k_skb_rxcb *rxcb = ATH11K_SKB_RXCB(msdu);
1869 struct rx_attention *rx_attention;
1870 bool ip_csum_fail, l4_csum_fail;
1871
1872 rx_attention = ath11k_dp_rx_get_attention(ar->ab, rxcb->rx_desc);
1873 ip_csum_fail = ath11k_dp_rx_h_attn_ip_cksum_fail(rx_attention);
1874 l4_csum_fail = ath11k_dp_rx_h_attn_l4_cksum_fail(rx_attention);
1875
1876 msdu->ip_summed = (ip_csum_fail || l4_csum_fail) ?
1877 CHECKSUM_NONE : CHECKSUM_UNNECESSARY;
1878}
1879
1880static int ath11k_dp_rx_crypto_mic_len(struct ath11k *ar,
1881 enum hal_encrypt_type enctype)
1882{
1883 switch (enctype) {
1884 case HAL_ENCRYPT_TYPE_OPEN:
1885 case HAL_ENCRYPT_TYPE_TKIP_NO_MIC:
1886 case HAL_ENCRYPT_TYPE_TKIP_MIC:
1887 return 0;
1888 case HAL_ENCRYPT_TYPE_CCMP_128:
1889 return IEEE80211_CCMP_MIC_LEN;
1890 case HAL_ENCRYPT_TYPE_CCMP_256:
1891 return IEEE80211_CCMP_256_MIC_LEN;
1892 case HAL_ENCRYPT_TYPE_GCMP_128:
1893 case HAL_ENCRYPT_TYPE_AES_GCMP_256:
1894 return IEEE80211_GCMP_MIC_LEN;
1895 case HAL_ENCRYPT_TYPE_WEP_40:
1896 case HAL_ENCRYPT_TYPE_WEP_104:
1897 case HAL_ENCRYPT_TYPE_WEP_128:
1898 case HAL_ENCRYPT_TYPE_WAPI_GCM_SM4:
1899 case HAL_ENCRYPT_TYPE_WAPI:
1900 break;
1901 }
1902
1903 ath11k_warn(ar->ab, "unsupported encryption type %d for mic len\n", enctype);
1904 return 0;
1905}
1906
1907static int ath11k_dp_rx_crypto_param_len(struct ath11k *ar,
1908 enum hal_encrypt_type enctype)
1909{
1910 switch (enctype) {
1911 case HAL_ENCRYPT_TYPE_OPEN:
1912 return 0;
1913 case HAL_ENCRYPT_TYPE_TKIP_NO_MIC:
1914 case HAL_ENCRYPT_TYPE_TKIP_MIC:
1915 return IEEE80211_TKIP_IV_LEN;
1916 case HAL_ENCRYPT_TYPE_CCMP_128:
1917 return IEEE80211_CCMP_HDR_LEN;
1918 case HAL_ENCRYPT_TYPE_CCMP_256:
1919 return IEEE80211_CCMP_256_HDR_LEN;
1920 case HAL_ENCRYPT_TYPE_GCMP_128:
1921 case HAL_ENCRYPT_TYPE_AES_GCMP_256:
1922 return IEEE80211_GCMP_HDR_LEN;
1923 case HAL_ENCRYPT_TYPE_WEP_40:
1924 case HAL_ENCRYPT_TYPE_WEP_104:
1925 case HAL_ENCRYPT_TYPE_WEP_128:
1926 case HAL_ENCRYPT_TYPE_WAPI_GCM_SM4:
1927 case HAL_ENCRYPT_TYPE_WAPI:
1928 break;
1929 }
1930
1931 ath11k_warn(ar->ab, "unsupported encryption type %d\n", enctype);
1932 return 0;
1933}
1934
1935static int ath11k_dp_rx_crypto_icv_len(struct ath11k *ar,
1936 enum hal_encrypt_type enctype)
1937{
1938 switch (enctype) {
1939 case HAL_ENCRYPT_TYPE_OPEN:
1940 case HAL_ENCRYPT_TYPE_CCMP_128:
1941 case HAL_ENCRYPT_TYPE_CCMP_256:
1942 case HAL_ENCRYPT_TYPE_GCMP_128:
1943 case HAL_ENCRYPT_TYPE_AES_GCMP_256:
1944 return 0;
1945 case HAL_ENCRYPT_TYPE_TKIP_NO_MIC:
1946 case HAL_ENCRYPT_TYPE_TKIP_MIC:
1947 return IEEE80211_TKIP_ICV_LEN;
1948 case HAL_ENCRYPT_TYPE_WEP_40:
1949 case HAL_ENCRYPT_TYPE_WEP_104:
1950 case HAL_ENCRYPT_TYPE_WEP_128:
1951 case HAL_ENCRYPT_TYPE_WAPI_GCM_SM4:
1952 case HAL_ENCRYPT_TYPE_WAPI:
1953 break;
1954 }
1955
1956 ath11k_warn(ar->ab, "unsupported encryption type %d\n", enctype);
1957 return 0;
1958}
1959
1960static void ath11k_dp_rx_h_undecap_nwifi(struct ath11k *ar,
1961 struct sk_buff *msdu,
1962 u8 *first_hdr,
1963 enum hal_encrypt_type enctype,
1964 struct ieee80211_rx_status *status)
1965{
1966 struct ath11k_skb_rxcb *rxcb = ATH11K_SKB_RXCB(msdu);
1967 u8 decap_hdr[DP_MAX_NWIFI_HDR_LEN];
1968 struct ieee80211_hdr *hdr;
1969 size_t hdr_len;
1970 u8 da[ETH_ALEN];
1971 u8 sa[ETH_ALEN];
1972 u16 qos_ctl = 0;
1973 u8 *qos;
1974
1975 /* copy SA & DA and pull decapped header */
1976 hdr = (struct ieee80211_hdr *)msdu->data;
1977 hdr_len = ieee80211_hdrlen(hdr->frame_control);
1978 ether_addr_copy(da, ieee80211_get_DA(hdr));
1979 ether_addr_copy(sa, ieee80211_get_SA(hdr));
1980 skb_pull(msdu, ieee80211_hdrlen(hdr->frame_control));
1981
1982 if (rxcb->is_first_msdu) {
1983 /* original 802.11 header is valid for the first msdu
1984 * hence we can reuse the same header
1985 */
1986 hdr = (struct ieee80211_hdr *)first_hdr;
1987 hdr_len = ieee80211_hdrlen(hdr->frame_control);
1988
1989 /* Each A-MSDU subframe will be reported as a separate MSDU,
1990 * so strip the A-MSDU bit from QoS Ctl.
1991 */
1992 if (ieee80211_is_data_qos(hdr->frame_control)) {
1993 qos = ieee80211_get_qos_ctl(hdr);
1994 qos[0] &= ~IEEE80211_QOS_CTL_A_MSDU_PRESENT;
1995 }
1996 } else {
1997 /* Rebuild qos header if this is a middle/last msdu */
1998 hdr->frame_control |= __cpu_to_le16(IEEE80211_STYPE_QOS_DATA);
1999
2000 /* Reset the order bit as the HT_Control header is stripped */
2001 hdr->frame_control &= ~(__cpu_to_le16(IEEE80211_FCTL_ORDER));
2002
2003 qos_ctl = rxcb->tid;
2004
2005 if (ath11k_dp_rx_h_msdu_start_mesh_ctl_present(ar->ab, rxcb->rx_desc))
2006 qos_ctl |= IEEE80211_QOS_CTL_MESH_CONTROL_PRESENT;
2007
2008 /* TODO Add other QoS ctl fields when required */
2009
2010 /* copy decap header before overwriting for reuse below */
2011 memcpy(decap_hdr, (uint8_t *)hdr, hdr_len);
2012 }
2013
2014 if (!(status->flag & RX_FLAG_IV_STRIPPED)) {
2015 memcpy(skb_push(msdu,
2016 ath11k_dp_rx_crypto_param_len(ar, enctype)),
2017 (void *)hdr + hdr_len,
2018 ath11k_dp_rx_crypto_param_len(ar, enctype));
2019 }
2020
2021 if (!rxcb->is_first_msdu) {
2022 memcpy(skb_push(msdu,
2023 IEEE80211_QOS_CTL_LEN), &qos_ctl,
2024 IEEE80211_QOS_CTL_LEN);
2025 memcpy(skb_push(msdu, hdr_len), decap_hdr, hdr_len);
2026 return;
2027 }
2028
2029 memcpy(skb_push(msdu, hdr_len), hdr, hdr_len);
2030
2031 /* original 802.11 header has a different DA and in
2032 * case of 4addr it may also have different SA
2033 */
2034 hdr = (struct ieee80211_hdr *)msdu->data;
2035 ether_addr_copy(ieee80211_get_DA(hdr), da);
2036 ether_addr_copy(ieee80211_get_SA(hdr), sa);
2037}
2038
2039static void ath11k_dp_rx_h_undecap_raw(struct ath11k *ar, struct sk_buff *msdu,
2040 enum hal_encrypt_type enctype,
2041 struct ieee80211_rx_status *status,
2042 bool decrypted)
2043{
2044 struct ath11k_skb_rxcb *rxcb = ATH11K_SKB_RXCB(msdu);
2045 struct ieee80211_hdr *hdr;
2046 size_t hdr_len;
2047 size_t crypto_len;
2048
2049 if (!rxcb->is_first_msdu ||
2050 !(rxcb->is_first_msdu && rxcb->is_last_msdu)) {
2051 WARN_ON_ONCE(1);
2052 return;
2053 }
2054
2055 skb_trim(msdu, msdu->len - FCS_LEN);
2056
2057 if (!decrypted)
2058 return;
2059
2060 hdr = (void *)msdu->data;
2061
2062 /* Tail */
2063 if (status->flag & RX_FLAG_IV_STRIPPED) {
2064 skb_trim(msdu, msdu->len -
2065 ath11k_dp_rx_crypto_mic_len(ar, enctype));
2066
2067 skb_trim(msdu, msdu->len -
2068 ath11k_dp_rx_crypto_icv_len(ar, enctype));
2069 } else {
2070 /* MIC */
2071 if (status->flag & RX_FLAG_MIC_STRIPPED)
2072 skb_trim(msdu, msdu->len -
2073 ath11k_dp_rx_crypto_mic_len(ar, enctype));
2074
2075 /* ICV */
2076 if (status->flag & RX_FLAG_ICV_STRIPPED)
2077 skb_trim(msdu, msdu->len -
2078 ath11k_dp_rx_crypto_icv_len(ar, enctype));
2079 }
2080
2081 /* MMIC */
2082 if ((status->flag & RX_FLAG_MMIC_STRIPPED) &&
2083 !ieee80211_has_morefrags(hdr->frame_control) &&
2084 enctype == HAL_ENCRYPT_TYPE_TKIP_MIC)
2085 skb_trim(msdu, msdu->len - IEEE80211_CCMP_MIC_LEN);
2086
2087 /* Head */
2088 if (status->flag & RX_FLAG_IV_STRIPPED) {
2089 hdr_len = ieee80211_hdrlen(hdr->frame_control);
2090 crypto_len = ath11k_dp_rx_crypto_param_len(ar, enctype);
2091
2092 memmove((void *)msdu->data + crypto_len,
2093 (void *)msdu->data, hdr_len);
2094 skb_pull(msdu, crypto_len);
2095 }
2096}
2097
2098static void *ath11k_dp_rx_h_find_rfc1042(struct ath11k *ar,
2099 struct sk_buff *msdu,
2100 enum hal_encrypt_type enctype)
2101{
2102 struct ath11k_skb_rxcb *rxcb = ATH11K_SKB_RXCB(msdu);
2103 struct ieee80211_hdr *hdr;
2104 size_t hdr_len, crypto_len;
2105 void *rfc1042;
2106 bool is_amsdu;
2107
2108 is_amsdu = !(rxcb->is_first_msdu && rxcb->is_last_msdu);
2109 hdr = (struct ieee80211_hdr *)ath11k_dp_rx_h_80211_hdr(ar->ab, rxcb->rx_desc);
2110 rfc1042 = hdr;
2111
2112 if (rxcb->is_first_msdu) {
2113 hdr_len = ieee80211_hdrlen(hdr->frame_control);
2114 crypto_len = ath11k_dp_rx_crypto_param_len(ar, enctype);
2115
2116 rfc1042 += hdr_len + crypto_len;
2117 }
2118
2119 if (is_amsdu)
2120 rfc1042 += sizeof(struct ath11k_dp_amsdu_subframe_hdr);
2121
2122 return rfc1042;
2123}
2124
2125static void ath11k_dp_rx_h_undecap_eth(struct ath11k *ar,
2126 struct sk_buff *msdu,
2127 u8 *first_hdr,
2128 enum hal_encrypt_type enctype,
2129 struct ieee80211_rx_status *status)
2130{
2131 struct ieee80211_hdr *hdr;
2132 struct ethhdr *eth;
2133 size_t hdr_len;
2134 u8 da[ETH_ALEN];
2135 u8 sa[ETH_ALEN];
2136 void *rfc1042;
2137
2138 rfc1042 = ath11k_dp_rx_h_find_rfc1042(ar, msdu, enctype);
2139 if (WARN_ON_ONCE(!rfc1042))
2140 return;
2141
2142 /* pull decapped header and copy SA & DA */
2143 eth = (struct ethhdr *)msdu->data;
2144 ether_addr_copy(da, eth->h_dest);
2145 ether_addr_copy(sa, eth->h_source);
2146 skb_pull(msdu, sizeof(struct ethhdr));
2147
2148 /* push rfc1042/llc/snap */
2149 memcpy(skb_push(msdu, sizeof(struct ath11k_dp_rfc1042_hdr)), rfc1042,
2150 sizeof(struct ath11k_dp_rfc1042_hdr));
2151
2152 /* push original 802.11 header */
2153 hdr = (struct ieee80211_hdr *)first_hdr;
2154 hdr_len = ieee80211_hdrlen(hdr->frame_control);
2155
2156 if (!(status->flag & RX_FLAG_IV_STRIPPED)) {
2157 memcpy(skb_push(msdu,
2158 ath11k_dp_rx_crypto_param_len(ar, enctype)),
2159 (void *)hdr + hdr_len,
2160 ath11k_dp_rx_crypto_param_len(ar, enctype));
2161 }
2162
2163 memcpy(skb_push(msdu, hdr_len), hdr, hdr_len);
2164
2165 /* original 802.11 header has a different DA and in
2166 * case of 4addr it may also have different SA
2167 */
2168 hdr = (struct ieee80211_hdr *)msdu->data;
2169 ether_addr_copy(ieee80211_get_DA(hdr), da);
2170 ether_addr_copy(ieee80211_get_SA(hdr), sa);
2171}
2172
2173static void ath11k_dp_rx_h_undecap(struct ath11k *ar, struct sk_buff *msdu,
2174 struct hal_rx_desc *rx_desc,
2175 enum hal_encrypt_type enctype,
2176 struct ieee80211_rx_status *status,
2177 bool decrypted)
2178{
2179 u8 *first_hdr;
2180 u8 decap;
2181 struct ethhdr *ehdr;
2182
2183 first_hdr = ath11k_dp_rx_h_80211_hdr(ar->ab, rx_desc);
2184 decap = ath11k_dp_rx_h_msdu_start_decap_type(ar->ab, rx_desc);
2185
2186 switch (decap) {
2187 case DP_RX_DECAP_TYPE_NATIVE_WIFI:
2188 ath11k_dp_rx_h_undecap_nwifi(ar, msdu, first_hdr,
2189 enctype, status);
2190 break;
2191 case DP_RX_DECAP_TYPE_RAW:
2192 ath11k_dp_rx_h_undecap_raw(ar, msdu, enctype, status,
2193 decrypted);
2194 break;
2195 case DP_RX_DECAP_TYPE_ETHERNET2_DIX:
2196 ehdr = (struct ethhdr *)msdu->data;
2197
2198 /* mac80211 allows fast path only for authorized STA */
2199 if (ehdr->h_proto == cpu_to_be16(ETH_P_PAE)) {
2200 ATH11K_SKB_RXCB(msdu)->is_eapol = true;
2201 ath11k_dp_rx_h_undecap_eth(ar, msdu, first_hdr,
2202 enctype, status);
2203 break;
2204 }
2205
2206 /* PN for mcast packets will be validated in mac80211;
2207 * remove eth header and add 802.11 header.
2208 */
2209 if (ATH11K_SKB_RXCB(msdu)->is_mcbc && decrypted)
2210 ath11k_dp_rx_h_undecap_eth(ar, msdu, first_hdr,
2211 enctype, status);
2212 break;
2213 case DP_RX_DECAP_TYPE_8023:
2214 /* TODO: Handle undecap for these formats */
2215 break;
2216 }
2217}
2218
2219static struct ath11k_peer *
2220ath11k_dp_rx_h_find_peer(struct ath11k_base *ab, struct sk_buff *msdu)
2221{
2222 struct ath11k_skb_rxcb *rxcb = ATH11K_SKB_RXCB(msdu);
2223 struct hal_rx_desc *rx_desc = rxcb->rx_desc;
2224 struct ath11k_peer *peer = NULL;
2225
2226 lockdep_assert_held(&ab->base_lock);
2227
2228 if (rxcb->peer_id)
2229 peer = ath11k_peer_find_by_id(ab, rxcb->peer_id);
2230
2231 if (peer)
2232 return peer;
2233
2234 if (!rx_desc || !(ath11k_dp_rxdesc_mac_addr2_valid(ab, rx_desc)))
2235 return NULL;
2236
2237 peer = ath11k_peer_find_by_addr(ab,
2238 ath11k_dp_rxdesc_mpdu_start_addr2(ab, rx_desc));
2239 return peer;
2240}
2241
2242static void ath11k_dp_rx_h_mpdu(struct ath11k *ar,
2243 struct sk_buff *msdu,
2244 struct hal_rx_desc *rx_desc,
2245 struct ieee80211_rx_status *rx_status)
2246{
2247 bool fill_crypto_hdr;
2248 enum hal_encrypt_type enctype;
2249 bool is_decrypted = false;
2250 struct ath11k_skb_rxcb *rxcb;
2251 struct ieee80211_hdr *hdr;
2252 struct ath11k_peer *peer;
2253 struct rx_attention *rx_attention;
2254 u32 err_bitmap;
2255
2256 /* PN for multicast packets will be checked in mac80211 */
2257 rxcb = ATH11K_SKB_RXCB(msdu);
2258 fill_crypto_hdr = ath11k_dp_rx_h_attn_is_mcbc(ar->ab, rx_desc);
2259 rxcb->is_mcbc = fill_crypto_hdr;
2260
2261 if (rxcb->is_mcbc) {
2262 rxcb->peer_id = ath11k_dp_rx_h_mpdu_start_peer_id(ar->ab, rx_desc);
2263 rxcb->seq_no = ath11k_dp_rx_h_mpdu_start_seq_no(ar->ab, rx_desc);
2264 }
2265
2266 spin_lock_bh(&ar->ab->base_lock);
2267 peer = ath11k_dp_rx_h_find_peer(ar->ab, msdu);
2268 if (peer) {
2269 if (rxcb->is_mcbc)
2270 enctype = peer->sec_type_grp;
2271 else
2272 enctype = peer->sec_type;
2273 } else {
2274 enctype = ath11k_dp_rx_h_mpdu_start_enctype(ar->ab, rx_desc);
2275 }
2276 spin_unlock_bh(&ar->ab->base_lock);
2277
2278 rx_attention = ath11k_dp_rx_get_attention(ar->ab, rx_desc);
2279 err_bitmap = ath11k_dp_rx_h_attn_mpdu_err(rx_attention);
2280 if (enctype != HAL_ENCRYPT_TYPE_OPEN && !err_bitmap)
2281 is_decrypted = ath11k_dp_rx_h_attn_is_decrypted(rx_attention);
2282
2283 /* Clear per-MPDU flags while leaving per-PPDU flags intact */
2284 rx_status->flag &= ~(RX_FLAG_FAILED_FCS_CRC |
2285 RX_FLAG_MMIC_ERROR |
2286 RX_FLAG_DECRYPTED |
2287 RX_FLAG_IV_STRIPPED |
2288 RX_FLAG_MMIC_STRIPPED);
2289
2290 if (err_bitmap & DP_RX_MPDU_ERR_FCS)
2291 rx_status->flag |= RX_FLAG_FAILED_FCS_CRC;
2292 if (err_bitmap & DP_RX_MPDU_ERR_TKIP_MIC)
2293 rx_status->flag |= RX_FLAG_MMIC_ERROR;
2294
2295 if (is_decrypted) {
2296 rx_status->flag |= RX_FLAG_DECRYPTED | RX_FLAG_MMIC_STRIPPED;
2297
2298 if (fill_crypto_hdr)
2299 rx_status->flag |= RX_FLAG_MIC_STRIPPED |
2300 RX_FLAG_ICV_STRIPPED;
2301 else
2302 rx_status->flag |= RX_FLAG_IV_STRIPPED |
2303 RX_FLAG_PN_VALIDATED;
2304 }
2305
2306 ath11k_dp_rx_h_csum_offload(ar, msdu);
2307 ath11k_dp_rx_h_undecap(ar, msdu, rx_desc,
2308 enctype, rx_status, is_decrypted);
2309
2310 if (!is_decrypted || fill_crypto_hdr)
2311 return;
2312
2313 if (ath11k_dp_rx_h_msdu_start_decap_type(ar->ab, rx_desc) !=
2314 DP_RX_DECAP_TYPE_ETHERNET2_DIX) {
2315 hdr = (void *)msdu->data;
2316 hdr->frame_control &= ~__cpu_to_le16(IEEE80211_FCTL_PROTECTED);
2317 }
2318}
2319
2320static void ath11k_dp_rx_h_rate(struct ath11k *ar, struct hal_rx_desc *rx_desc,
2321 struct ieee80211_rx_status *rx_status)
2322{
2323 struct ieee80211_supported_band *sband;
2324 enum rx_msdu_start_pkt_type pkt_type;
2325 u8 bw;
2326 u8 rate_mcs, nss;
2327 u8 sgi;
2328 bool is_cck, is_ldpc;
2329
2330 pkt_type = ath11k_dp_rx_h_msdu_start_pkt_type(ar->ab, rx_desc);
2331 bw = ath11k_dp_rx_h_msdu_start_rx_bw(ar->ab, rx_desc);
2332 rate_mcs = ath11k_dp_rx_h_msdu_start_rate_mcs(ar->ab, rx_desc);
2333 nss = ath11k_dp_rx_h_msdu_start_nss(ar->ab, rx_desc);
2334 sgi = ath11k_dp_rx_h_msdu_start_sgi(ar->ab, rx_desc);
2335
2336 switch (pkt_type) {
2337 case RX_MSDU_START_PKT_TYPE_11A:
2338 case RX_MSDU_START_PKT_TYPE_11B:
2339 is_cck = (pkt_type == RX_MSDU_START_PKT_TYPE_11B);
2340 sband = &ar->mac.sbands[rx_status->band];
2341 rx_status->rate_idx = ath11k_mac_hw_rate_to_idx(sband, rate_mcs,
2342 is_cck);
2343 break;
2344 case RX_MSDU_START_PKT_TYPE_11N:
2345 rx_status->encoding = RX_ENC_HT;
2346 if (rate_mcs > ATH11K_HT_MCS_MAX) {
2347 ath11k_warn(ar->ab,
2348 "Received with invalid mcs in HT mode %d\n",
2349 rate_mcs);
2350 break;
2351 }
2352 rx_status->rate_idx = rate_mcs + (8 * (nss - 1));
2353 if (sgi)
2354 rx_status->enc_flags |= RX_ENC_FLAG_SHORT_GI;
2355 rx_status->bw = ath11k_mac_bw_to_mac80211_bw(bw);
2356 break;
2357 case RX_MSDU_START_PKT_TYPE_11AC:
2358 rx_status->encoding = RX_ENC_VHT;
2359 rx_status->rate_idx = rate_mcs;
2360 if (rate_mcs > ATH11K_VHT_MCS_MAX) {
2361 ath11k_warn(ar->ab,
2362 "Received with invalid mcs in VHT mode %d\n",
2363 rate_mcs);
2364 break;
2365 }
2366 rx_status->nss = nss;
2367 if (sgi)
2368 rx_status->enc_flags |= RX_ENC_FLAG_SHORT_GI;
2369 rx_status->bw = ath11k_mac_bw_to_mac80211_bw(bw);
2370 is_ldpc = ath11k_dp_rx_h_msdu_start_ldpc_support(ar->ab, rx_desc);
2371 if (is_ldpc)
2372 rx_status->enc_flags |= RX_ENC_FLAG_LDPC;
2373 break;
2374 case RX_MSDU_START_PKT_TYPE_11AX:
2375 rx_status->rate_idx = rate_mcs;
2376 if (rate_mcs > ATH11K_HE_MCS_MAX) {
2377 ath11k_warn(ar->ab,
2378 "Received with invalid mcs in HE mode %d\n",
2379 rate_mcs);
2380 break;
2381 }
2382 rx_status->encoding = RX_ENC_HE;
2383 rx_status->nss = nss;
2384 rx_status->he_gi = ath11k_mac_he_gi_to_nl80211_he_gi(sgi);
2385 rx_status->bw = ath11k_mac_bw_to_mac80211_bw(bw);
2386 break;
2387 }
2388}
2389
2390static void ath11k_dp_rx_h_ppdu(struct ath11k *ar, struct hal_rx_desc *rx_desc,
2391 struct ieee80211_rx_status *rx_status)
2392{
2393 u8 channel_num;
2394 u32 center_freq, meta_data;
2395 struct ieee80211_channel *channel;
2396
2397 rx_status->freq = 0;
2398 rx_status->rate_idx = 0;
2399 rx_status->nss = 0;
2400 rx_status->encoding = RX_ENC_LEGACY;
2401 rx_status->bw = RATE_INFO_BW_20;
2402
2403 rx_status->flag |= RX_FLAG_NO_SIGNAL_VAL;
2404
2405 meta_data = ath11k_dp_rx_h_msdu_start_freq(ar->ab, rx_desc);
2406 channel_num = meta_data;
2407 center_freq = meta_data >> 16;
2408
2409 if (center_freq >= ATH11K_MIN_6G_FREQ &&
2410 center_freq <= ATH11K_MAX_6G_FREQ) {
2411 rx_status->band = NL80211_BAND_6GHZ;
2412 rx_status->freq = center_freq;
2413 } else if (channel_num >= 1 && channel_num <= 14) {
2414 rx_status->band = NL80211_BAND_2GHZ;
2415 } else if (channel_num >= 36 && channel_num <= 177) {
2416 rx_status->band = NL80211_BAND_5GHZ;
2417 } else {
2418 spin_lock_bh(&ar->data_lock);
2419 channel = ar->rx_channel;
2420 if (channel) {
2421 rx_status->band = channel->band;
2422 channel_num =
2423 ieee80211_frequency_to_channel(channel->center_freq);
2424 }
2425 spin_unlock_bh(&ar->data_lock);
2426 ath11k_dbg_dump(ar->ab, ATH11K_DBG_DATA, NULL, "rx_desc: ",
2427 rx_desc, sizeof(struct hal_rx_desc));
2428 }
2429
2430 if (rx_status->band != NL80211_BAND_6GHZ)
2431 rx_status->freq = ieee80211_channel_to_frequency(channel_num,
2432 rx_status->band);
2433
2434 ath11k_dp_rx_h_rate(ar, rx_desc, rx_status);
2435}
2436
2437static void ath11k_dp_rx_deliver_msdu(struct ath11k *ar, struct napi_struct *napi,
2438 struct sk_buff *msdu,
2439 struct ieee80211_rx_status *status)
2440{
2441 static const struct ieee80211_radiotap_he known = {
2442 .data1 = cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA1_DATA_MCS_KNOWN |
2443 IEEE80211_RADIOTAP_HE_DATA1_BW_RU_ALLOC_KNOWN),
2444 .data2 = cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA2_GI_KNOWN),
2445 };
2446 struct ieee80211_rx_status *rx_status;
2447 struct ieee80211_radiotap_he *he = NULL;
2448 struct ieee80211_sta *pubsta = NULL;
2449 struct ath11k_peer *peer;
2450 struct ath11k_skb_rxcb *rxcb = ATH11K_SKB_RXCB(msdu);
2451 u8 decap = DP_RX_DECAP_TYPE_RAW;
2452 bool is_mcbc = rxcb->is_mcbc;
2453 bool is_eapol = rxcb->is_eapol;
2454
2455 if (status->encoding == RX_ENC_HE &&
2456 !(status->flag & RX_FLAG_RADIOTAP_HE) &&
2457 !(status->flag & RX_FLAG_SKIP_MONITOR)) {
2458 he = skb_push(msdu, sizeof(known));
2459 memcpy(he, &known, sizeof(known));
2460 status->flag |= RX_FLAG_RADIOTAP_HE;
2461 }
2462
2463 if (!(status->flag & RX_FLAG_ONLY_MONITOR))
2464 decap = ath11k_dp_rx_h_msdu_start_decap_type(ar->ab, rxcb->rx_desc);
2465
2466 spin_lock_bh(&ar->ab->base_lock);
2467 peer = ath11k_dp_rx_h_find_peer(ar->ab, msdu);
2468 if (peer && peer->sta)
2469 pubsta = peer->sta;
2470 spin_unlock_bh(&ar->ab->base_lock);
2471
2472 ath11k_dbg(ar->ab, ATH11K_DBG_DATA,
2473 "rx skb %p len %u peer %pM %d %s sn %u %s%s%s%s%s%s%s %srate_idx %u vht_nss %u freq %u band %u flag 0x%x fcs-err %i mic-err %i amsdu-more %i\n",
2474 msdu,
2475 msdu->len,
2476 peer ? peer->addr : NULL,
2477 rxcb->tid,
2478 is_mcbc ? "mcast" : "ucast",
2479 rxcb->seq_no,
2480 (status->encoding == RX_ENC_LEGACY) ? "legacy" : "",
2481 (status->encoding == RX_ENC_HT) ? "ht" : "",
2482 (status->encoding == RX_ENC_VHT) ? "vht" : "",
2483 (status->encoding == RX_ENC_HE) ? "he" : "",
2484 (status->bw == RATE_INFO_BW_40) ? "40" : "",
2485 (status->bw == RATE_INFO_BW_80) ? "80" : "",
2486 (status->bw == RATE_INFO_BW_160) ? "160" : "",
2487 status->enc_flags & RX_ENC_FLAG_SHORT_GI ? "sgi " : "",
2488 status->rate_idx,
2489 status->nss,
2490 status->freq,
2491 status->band, status->flag,
2492 !!(status->flag & RX_FLAG_FAILED_FCS_CRC),
2493 !!(status->flag & RX_FLAG_MMIC_ERROR),
2494 !!(status->flag & RX_FLAG_AMSDU_MORE));
2495
2496 ath11k_dbg_dump(ar->ab, ATH11K_DBG_DP_RX, NULL, "dp rx msdu: ",
2497 msdu->data, msdu->len);
2498
2499 rx_status = IEEE80211_SKB_RXCB(msdu);
2500 *rx_status = *status;
2501
2502 /* TODO: trace rx packet */
2503
2504 /* PN for multicast packets are not validate in HW,
2505 * so skip 802.3 rx path
2506 * Also, fast_rx expects the STA to be authorized, hence
2507 * eapol packets are sent in slow path.
2508 */
2509 if (decap == DP_RX_DECAP_TYPE_ETHERNET2_DIX && !is_eapol &&
2510 !(is_mcbc && rx_status->flag & RX_FLAG_DECRYPTED))
2511 rx_status->flag |= RX_FLAG_8023;
2512
2513 ieee80211_rx_napi(ar->hw, pubsta, msdu, napi);
2514}
2515
2516static int ath11k_dp_rx_process_msdu(struct ath11k *ar,
2517 struct sk_buff *msdu,
2518 struct sk_buff_head *msdu_list,
2519 struct ieee80211_rx_status *rx_status)
2520{
2521 struct ath11k_base *ab = ar->ab;
2522 struct hal_rx_desc *rx_desc, *lrx_desc;
2523 struct rx_attention *rx_attention;
2524 struct ath11k_skb_rxcb *rxcb;
2525 struct sk_buff *last_buf;
2526 u8 l3_pad_bytes;
2527 u8 *hdr_status;
2528 u16 msdu_len;
2529 int ret;
2530 u32 hal_rx_desc_sz = ar->ab->hw_params.hal_desc_sz;
2531
2532 last_buf = ath11k_dp_rx_get_msdu_last_buf(msdu_list, msdu);
2533 if (!last_buf) {
2534 ath11k_warn(ab,
2535 "No valid Rx buffer to access Atten/MSDU_END/MPDU_END tlvs\n");
2536 ret = -EIO;
2537 goto free_out;
2538 }
2539
2540 rx_desc = (struct hal_rx_desc *)msdu->data;
2541 if (ath11k_dp_rx_h_attn_msdu_len_err(ab, rx_desc)) {
2542 ath11k_warn(ar->ab, "msdu len not valid\n");
2543 ret = -EIO;
2544 goto free_out;
2545 }
2546
2547 lrx_desc = (struct hal_rx_desc *)last_buf->data;
2548 rx_attention = ath11k_dp_rx_get_attention(ab, lrx_desc);
2549 if (!ath11k_dp_rx_h_attn_msdu_done(rx_attention)) {
2550 ath11k_warn(ab, "msdu_done bit in attention is not set\n");
2551 ret = -EIO;
2552 goto free_out;
2553 }
2554
2555 rxcb = ATH11K_SKB_RXCB(msdu);
2556 rxcb->rx_desc = rx_desc;
2557 msdu_len = ath11k_dp_rx_h_msdu_start_msdu_len(ab, rx_desc);
2558 l3_pad_bytes = ath11k_dp_rx_h_msdu_end_l3pad(ab, lrx_desc);
2559
2560 if (rxcb->is_frag) {
2561 skb_pull(msdu, hal_rx_desc_sz);
2562 } else if (!rxcb->is_continuation) {
2563 if ((msdu_len + hal_rx_desc_sz) > DP_RX_BUFFER_SIZE) {
2564 hdr_status = ath11k_dp_rx_h_80211_hdr(ab, rx_desc);
2565 ret = -EINVAL;
2566 ath11k_warn(ab, "invalid msdu len %u\n", msdu_len);
2567 ath11k_dbg_dump(ab, ATH11K_DBG_DATA, NULL, "", hdr_status,
2568 sizeof(struct ieee80211_hdr));
2569 ath11k_dbg_dump(ab, ATH11K_DBG_DATA, NULL, "", rx_desc,
2570 sizeof(struct hal_rx_desc));
2571 goto free_out;
2572 }
2573 skb_put(msdu, hal_rx_desc_sz + l3_pad_bytes + msdu_len);
2574 skb_pull(msdu, hal_rx_desc_sz + l3_pad_bytes);
2575 } else {
2576 ret = ath11k_dp_rx_msdu_coalesce(ar, msdu_list,
2577 msdu, last_buf,
2578 l3_pad_bytes, msdu_len);
2579 if (ret) {
2580 ath11k_warn(ab,
2581 "failed to coalesce msdu rx buffer%d\n", ret);
2582 goto free_out;
2583 }
2584 }
2585
2586 ath11k_dp_rx_h_ppdu(ar, rx_desc, rx_status);
2587 ath11k_dp_rx_h_mpdu(ar, msdu, rx_desc, rx_status);
2588
2589 rx_status->flag |= RX_FLAG_SKIP_MONITOR | RX_FLAG_DUP_VALIDATED;
2590
2591 return 0;
2592
2593free_out:
2594 return ret;
2595}
2596
2597static void ath11k_dp_rx_process_received_packets(struct ath11k_base *ab,
2598 struct napi_struct *napi,
2599 struct sk_buff_head *msdu_list,
2600 int mac_id)
2601{
2602 struct sk_buff *msdu;
2603 struct ath11k *ar;
2604 struct ieee80211_rx_status rx_status = {0};
2605 int ret;
2606
2607 if (skb_queue_empty(msdu_list))
2608 return;
2609
2610 if (unlikely(!rcu_access_pointer(ab->pdevs_active[mac_id]))) {
2611 __skb_queue_purge(msdu_list);
2612 return;
2613 }
2614
2615 ar = ab->pdevs[mac_id].ar;
2616 if (unlikely(test_bit(ATH11K_CAC_RUNNING, &ar->dev_flags))) {
2617 __skb_queue_purge(msdu_list);
2618 return;
2619 }
2620
2621 while ((msdu = __skb_dequeue(msdu_list))) {
2622 ret = ath11k_dp_rx_process_msdu(ar, msdu, msdu_list, &rx_status);
2623 if (unlikely(ret)) {
2624 ath11k_dbg(ab, ATH11K_DBG_DATA,
2625 "Unable to process msdu %d", ret);
2626 dev_kfree_skb_any(msdu);
2627 continue;
2628 }
2629
2630 ath11k_dp_rx_deliver_msdu(ar, napi, msdu, &rx_status);
2631 }
2632}
2633
2634int ath11k_dp_process_rx(struct ath11k_base *ab, int ring_id,
2635 struct napi_struct *napi, int budget)
2636{
2637 struct ath11k_dp *dp = &ab->dp;
2638 struct dp_rxdma_ring *rx_ring;
2639 int num_buffs_reaped[MAX_RADIOS] = {0};
2640 struct sk_buff_head msdu_list[MAX_RADIOS];
2641 struct ath11k_skb_rxcb *rxcb;
2642 int total_msdu_reaped = 0;
2643 struct hal_srng *srng;
2644 struct sk_buff *msdu;
2645 bool done = false;
2646 int buf_id, mac_id;
2647 struct ath11k *ar;
2648 struct hal_reo_dest_ring *desc;
2649 enum hal_reo_dest_ring_push_reason push_reason;
2650 u32 cookie;
2651 int i;
2652
2653 for (i = 0; i < MAX_RADIOS; i++)
2654 __skb_queue_head_init(&msdu_list[i]);
2655
2656 srng = &ab->hal.srng_list[dp->reo_dst_ring[ring_id].ring_id];
2657
2658 spin_lock_bh(&srng->lock);
2659
2660try_again:
2661 ath11k_hal_srng_access_begin(ab, srng);
2662
2663 while (likely(desc =
2664 (struct hal_reo_dest_ring *)ath11k_hal_srng_dst_get_next_entry(ab,
2665 srng))) {
2666 cookie = FIELD_GET(BUFFER_ADDR_INFO1_SW_COOKIE,
2667 desc->buf_addr_info.info1);
2668 buf_id = FIELD_GET(DP_RXDMA_BUF_COOKIE_BUF_ID,
2669 cookie);
2670 mac_id = FIELD_GET(DP_RXDMA_BUF_COOKIE_PDEV_ID, cookie);
2671
2672 if (unlikely(buf_id == 0))
2673 continue;
2674
2675 ar = ab->pdevs[mac_id].ar;
2676 rx_ring = &ar->dp.rx_refill_buf_ring;
2677 spin_lock_bh(&rx_ring->idr_lock);
2678 msdu = idr_find(&rx_ring->bufs_idr, buf_id);
2679 if (unlikely(!msdu)) {
2680 ath11k_warn(ab, "frame rx with invalid buf_id %d\n",
2681 buf_id);
2682 spin_unlock_bh(&rx_ring->idr_lock);
2683 continue;
2684 }
2685
2686 idr_remove(&rx_ring->bufs_idr, buf_id);
2687 spin_unlock_bh(&rx_ring->idr_lock);
2688
2689 rxcb = ATH11K_SKB_RXCB(msdu);
2690 dma_unmap_single(ab->dev, rxcb->paddr,
2691 msdu->len + skb_tailroom(msdu),
2692 DMA_FROM_DEVICE);
2693
2694 num_buffs_reaped[mac_id]++;
2695
2696 push_reason = FIELD_GET(HAL_REO_DEST_RING_INFO0_PUSH_REASON,
2697 desc->info0);
2698 if (unlikely(push_reason !=
2699 HAL_REO_DEST_RING_PUSH_REASON_ROUTING_INSTRUCTION)) {
2700 dev_kfree_skb_any(msdu);
2701 ab->soc_stats.hal_reo_error[dp->reo_dst_ring[ring_id].ring_id]++;
2702 continue;
2703 }
2704
2705 rxcb->is_first_msdu = !!(desc->rx_msdu_info.info0 &
2706 RX_MSDU_DESC_INFO0_FIRST_MSDU_IN_MPDU);
2707 rxcb->is_last_msdu = !!(desc->rx_msdu_info.info0 &
2708 RX_MSDU_DESC_INFO0_LAST_MSDU_IN_MPDU);
2709 rxcb->is_continuation = !!(desc->rx_msdu_info.info0 &
2710 RX_MSDU_DESC_INFO0_MSDU_CONTINUATION);
2711 rxcb->peer_id = FIELD_GET(RX_MPDU_DESC_META_DATA_PEER_ID,
2712 desc->rx_mpdu_info.meta_data);
2713 rxcb->seq_no = FIELD_GET(RX_MPDU_DESC_INFO0_SEQ_NUM,
2714 desc->rx_mpdu_info.info0);
2715 rxcb->tid = FIELD_GET(HAL_REO_DEST_RING_INFO0_RX_QUEUE_NUM,
2716 desc->info0);
2717
2718 rxcb->mac_id = mac_id;
2719 __skb_queue_tail(&msdu_list[mac_id], msdu);
2720
2721 if (rxcb->is_continuation) {
2722 done = false;
2723 } else {
2724 total_msdu_reaped++;
2725 done = true;
2726 }
2727
2728 if (total_msdu_reaped >= budget)
2729 break;
2730 }
2731
2732 /* Hw might have updated the head pointer after we cached it.
2733 * In this case, even though there are entries in the ring we'll
2734 * get rx_desc NULL. Give the read another try with updated cached
2735 * head pointer so that we can reap complete MPDU in the current
2736 * rx processing.
2737 */
2738 if (unlikely(!done && ath11k_hal_srng_dst_num_free(ab, srng, true))) {
2739 ath11k_hal_srng_access_end(ab, srng);
2740 goto try_again;
2741 }
2742
2743 ath11k_hal_srng_access_end(ab, srng);
2744
2745 spin_unlock_bh(&srng->lock);
2746
2747 if (unlikely(!total_msdu_reaped))
2748 goto exit;
2749
2750 for (i = 0; i < ab->num_radios; i++) {
2751 if (!num_buffs_reaped[i])
2752 continue;
2753
2754 ath11k_dp_rx_process_received_packets(ab, napi, &msdu_list[i], i);
2755
2756 ar = ab->pdevs[i].ar;
2757 rx_ring = &ar->dp.rx_refill_buf_ring;
2758
2759 ath11k_dp_rxbufs_replenish(ab, i, rx_ring, num_buffs_reaped[i],
2760 ab->hw_params.hal_params->rx_buf_rbm);
2761 }
2762exit:
2763 return total_msdu_reaped;
2764}
2765
2766static void ath11k_dp_rx_update_peer_stats(struct ath11k_sta *arsta,
2767 struct hal_rx_mon_ppdu_info *ppdu_info)
2768{
2769 struct ath11k_rx_peer_stats *rx_stats = arsta->rx_stats;
2770 u32 num_msdu;
2771 int i;
2772
2773 if (!rx_stats)
2774 return;
2775
2776 arsta->rssi_comb = ppdu_info->rssi_comb;
2777 ewma_avg_rssi_add(&arsta->avg_rssi, ppdu_info->rssi_comb);
2778
2779 num_msdu = ppdu_info->tcp_msdu_count + ppdu_info->tcp_ack_msdu_count +
2780 ppdu_info->udp_msdu_count + ppdu_info->other_msdu_count;
2781
2782 rx_stats->num_msdu += num_msdu;
2783 rx_stats->tcp_msdu_count += ppdu_info->tcp_msdu_count +
2784 ppdu_info->tcp_ack_msdu_count;
2785 rx_stats->udp_msdu_count += ppdu_info->udp_msdu_count;
2786 rx_stats->other_msdu_count += ppdu_info->other_msdu_count;
2787
2788 if (ppdu_info->preamble_type == HAL_RX_PREAMBLE_11A ||
2789 ppdu_info->preamble_type == HAL_RX_PREAMBLE_11B) {
2790 ppdu_info->nss = 1;
2791 ppdu_info->mcs = HAL_RX_MAX_MCS;
2792 ppdu_info->tid = IEEE80211_NUM_TIDS;
2793 }
2794
2795 if (ppdu_info->nss > 0 && ppdu_info->nss <= HAL_RX_MAX_NSS)
2796 rx_stats->nss_count[ppdu_info->nss - 1] += num_msdu;
2797
2798 if (ppdu_info->mcs <= HAL_RX_MAX_MCS)
2799 rx_stats->mcs_count[ppdu_info->mcs] += num_msdu;
2800
2801 if (ppdu_info->gi < HAL_RX_GI_MAX)
2802 rx_stats->gi_count[ppdu_info->gi] += num_msdu;
2803
2804 if (ppdu_info->bw < HAL_RX_BW_MAX)
2805 rx_stats->bw_count[ppdu_info->bw] += num_msdu;
2806
2807 if (ppdu_info->ldpc < HAL_RX_SU_MU_CODING_MAX)
2808 rx_stats->coding_count[ppdu_info->ldpc] += num_msdu;
2809
2810 if (ppdu_info->tid <= IEEE80211_NUM_TIDS)
2811 rx_stats->tid_count[ppdu_info->tid] += num_msdu;
2812
2813 if (ppdu_info->preamble_type < HAL_RX_PREAMBLE_MAX)
2814 rx_stats->pream_cnt[ppdu_info->preamble_type] += num_msdu;
2815
2816 if (ppdu_info->reception_type < HAL_RX_RECEPTION_TYPE_MAX)
2817 rx_stats->reception_type[ppdu_info->reception_type] += num_msdu;
2818
2819 if (ppdu_info->is_stbc)
2820 rx_stats->stbc_count += num_msdu;
2821
2822 if (ppdu_info->beamformed)
2823 rx_stats->beamformed_count += num_msdu;
2824
2825 if (ppdu_info->num_mpdu_fcs_ok > 1)
2826 rx_stats->ampdu_msdu_count += num_msdu;
2827 else
2828 rx_stats->non_ampdu_msdu_count += num_msdu;
2829
2830 rx_stats->num_mpdu_fcs_ok += ppdu_info->num_mpdu_fcs_ok;
2831 rx_stats->num_mpdu_fcs_err += ppdu_info->num_mpdu_fcs_err;
2832 rx_stats->dcm_count += ppdu_info->dcm;
2833 rx_stats->ru_alloc_cnt[ppdu_info->ru_alloc] += num_msdu;
2834
2835 arsta->rssi_comb = ppdu_info->rssi_comb;
2836
2837 BUILD_BUG_ON(ARRAY_SIZE(arsta->chain_signal) >
2838 ARRAY_SIZE(ppdu_info->rssi_chain_pri20));
2839
2840 for (i = 0; i < ARRAY_SIZE(arsta->chain_signal); i++)
2841 arsta->chain_signal[i] = ppdu_info->rssi_chain_pri20[i];
2842
2843 rx_stats->rx_duration += ppdu_info->rx_duration;
2844 arsta->rx_duration = rx_stats->rx_duration;
2845}
2846
2847static struct sk_buff *ath11k_dp_rx_alloc_mon_status_buf(struct ath11k_base *ab,
2848 struct dp_rxdma_ring *rx_ring,
2849 int *buf_id)
2850{
2851 struct sk_buff *skb;
2852 dma_addr_t paddr;
2853
2854 skb = dev_alloc_skb(DP_RX_BUFFER_SIZE +
2855 DP_RX_BUFFER_ALIGN_SIZE);
2856
2857 if (!skb)
2858 goto fail_alloc_skb;
2859
2860 if (!IS_ALIGNED((unsigned long)skb->data,
2861 DP_RX_BUFFER_ALIGN_SIZE)) {
2862 skb_pull(skb, PTR_ALIGN(skb->data, DP_RX_BUFFER_ALIGN_SIZE) -
2863 skb->data);
2864 }
2865
2866 paddr = dma_map_single(ab->dev, skb->data,
2867 skb->len + skb_tailroom(skb),
2868 DMA_FROM_DEVICE);
2869 if (unlikely(dma_mapping_error(ab->dev, paddr)))
2870 goto fail_free_skb;
2871
2872 spin_lock_bh(&rx_ring->idr_lock);
2873 *buf_id = idr_alloc(&rx_ring->bufs_idr, skb, 0,
2874 rx_ring->bufs_max, GFP_ATOMIC);
2875 spin_unlock_bh(&rx_ring->idr_lock);
2876 if (*buf_id < 0)
2877 goto fail_dma_unmap;
2878
2879 ATH11K_SKB_RXCB(skb)->paddr = paddr;
2880 return skb;
2881
2882fail_dma_unmap:
2883 dma_unmap_single(ab->dev, paddr, skb->len + skb_tailroom(skb),
2884 DMA_FROM_DEVICE);
2885fail_free_skb:
2886 dev_kfree_skb_any(skb);
2887fail_alloc_skb:
2888 return NULL;
2889}
2890
2891int ath11k_dp_rx_mon_status_bufs_replenish(struct ath11k_base *ab, int mac_id,
2892 struct dp_rxdma_ring *rx_ring,
2893 int req_entries,
2894 enum hal_rx_buf_return_buf_manager mgr)
2895{
2896 struct hal_srng *srng;
2897 u32 *desc;
2898 struct sk_buff *skb;
2899 int num_free;
2900 int num_remain;
2901 int buf_id;
2902 u32 cookie;
2903 dma_addr_t paddr;
2904
2905 req_entries = min(req_entries, rx_ring->bufs_max);
2906
2907 srng = &ab->hal.srng_list[rx_ring->refill_buf_ring.ring_id];
2908
2909 spin_lock_bh(&srng->lock);
2910
2911 ath11k_hal_srng_access_begin(ab, srng);
2912
2913 num_free = ath11k_hal_srng_src_num_free(ab, srng, true);
2914
2915 req_entries = min(num_free, req_entries);
2916 num_remain = req_entries;
2917
2918 while (num_remain > 0) {
2919 skb = ath11k_dp_rx_alloc_mon_status_buf(ab, rx_ring,
2920 &buf_id);
2921 if (!skb)
2922 break;
2923 paddr = ATH11K_SKB_RXCB(skb)->paddr;
2924
2925 desc = ath11k_hal_srng_src_get_next_entry(ab, srng);
2926 if (!desc)
2927 goto fail_desc_get;
2928
2929 cookie = FIELD_PREP(DP_RXDMA_BUF_COOKIE_PDEV_ID, mac_id) |
2930 FIELD_PREP(DP_RXDMA_BUF_COOKIE_BUF_ID, buf_id);
2931
2932 num_remain--;
2933
2934 ath11k_hal_rx_buf_addr_info_set(desc, paddr, cookie, mgr);
2935 }
2936
2937 ath11k_hal_srng_access_end(ab, srng);
2938
2939 spin_unlock_bh(&srng->lock);
2940
2941 return req_entries - num_remain;
2942
2943fail_desc_get:
2944 spin_lock_bh(&rx_ring->idr_lock);
2945 idr_remove(&rx_ring->bufs_idr, buf_id);
2946 spin_unlock_bh(&rx_ring->idr_lock);
2947 dma_unmap_single(ab->dev, paddr, skb->len + skb_tailroom(skb),
2948 DMA_FROM_DEVICE);
2949 dev_kfree_skb_any(skb);
2950 ath11k_hal_srng_access_end(ab, srng);
2951 spin_unlock_bh(&srng->lock);
2952
2953 return req_entries - num_remain;
2954}
2955
2956#define ATH11K_DP_RX_FULL_MON_PPDU_ID_WRAP 32535
2957
2958static void
2959ath11k_dp_rx_mon_update_status_buf_state(struct ath11k_mon_data *pmon,
2960 struct hal_tlv_hdr *tlv)
2961{
2962 struct hal_rx_ppdu_start *ppdu_start;
2963 u16 ppdu_id_diff, ppdu_id, tlv_len;
2964 u8 *ptr;
2965
2966 /* PPDU id is part of second tlv, move ptr to second tlv */
2967 tlv_len = FIELD_GET(HAL_TLV_HDR_LEN, tlv->tl);
2968 ptr = (u8 *)tlv;
2969 ptr += sizeof(*tlv) + tlv_len;
2970 tlv = (struct hal_tlv_hdr *)ptr;
2971
2972 if (FIELD_GET(HAL_TLV_HDR_TAG, tlv->tl) != HAL_RX_PPDU_START)
2973 return;
2974
2975 ptr += sizeof(*tlv);
2976 ppdu_start = (struct hal_rx_ppdu_start *)ptr;
2977 ppdu_id = FIELD_GET(HAL_RX_PPDU_START_INFO0_PPDU_ID,
2978 __le32_to_cpu(ppdu_start->info0));
2979
2980 if (pmon->sw_mon_entries.ppdu_id < ppdu_id) {
2981 pmon->buf_state = DP_MON_STATUS_LEAD;
2982 ppdu_id_diff = ppdu_id - pmon->sw_mon_entries.ppdu_id;
2983 if (ppdu_id_diff > ATH11K_DP_RX_FULL_MON_PPDU_ID_WRAP)
2984 pmon->buf_state = DP_MON_STATUS_LAG;
2985 } else if (pmon->sw_mon_entries.ppdu_id > ppdu_id) {
2986 pmon->buf_state = DP_MON_STATUS_LAG;
2987 ppdu_id_diff = pmon->sw_mon_entries.ppdu_id - ppdu_id;
2988 if (ppdu_id_diff > ATH11K_DP_RX_FULL_MON_PPDU_ID_WRAP)
2989 pmon->buf_state = DP_MON_STATUS_LEAD;
2990 }
2991}
2992
2993static int ath11k_dp_rx_reap_mon_status_ring(struct ath11k_base *ab, int mac_id,
2994 int *budget, struct sk_buff_head *skb_list)
2995{
2996 struct ath11k *ar;
2997 const struct ath11k_hw_hal_params *hal_params;
2998 struct ath11k_pdev_dp *dp;
2999 struct dp_rxdma_ring *rx_ring;
3000 struct ath11k_mon_data *pmon;
3001 struct hal_srng *srng;
3002 void *rx_mon_status_desc;
3003 struct sk_buff *skb;
3004 struct ath11k_skb_rxcb *rxcb;
3005 struct hal_tlv_hdr *tlv;
3006 u32 cookie;
3007 int buf_id, srng_id;
3008 dma_addr_t paddr;
3009 u8 rbm;
3010 int num_buffs_reaped = 0;
3011
3012 ar = ab->pdevs[ath11k_hw_mac_id_to_pdev_id(&ab->hw_params, mac_id)].ar;
3013 dp = &ar->dp;
3014 pmon = &dp->mon_data;
3015 srng_id = ath11k_hw_mac_id_to_srng_id(&ab->hw_params, mac_id);
3016 rx_ring = &dp->rx_mon_status_refill_ring[srng_id];
3017
3018 srng = &ab->hal.srng_list[rx_ring->refill_buf_ring.ring_id];
3019
3020 spin_lock_bh(&srng->lock);
3021
3022 ath11k_hal_srng_access_begin(ab, srng);
3023 while (*budget) {
3024 *budget -= 1;
3025 rx_mon_status_desc =
3026 ath11k_hal_srng_src_peek(ab, srng);
3027 if (!rx_mon_status_desc) {
3028 pmon->buf_state = DP_MON_STATUS_REPLINISH;
3029 break;
3030 }
3031
3032 ath11k_hal_rx_buf_addr_info_get(rx_mon_status_desc, &paddr,
3033 &cookie, &rbm);
3034 if (paddr) {
3035 buf_id = FIELD_GET(DP_RXDMA_BUF_COOKIE_BUF_ID, cookie);
3036
3037 spin_lock_bh(&rx_ring->idr_lock);
3038 skb = idr_find(&rx_ring->bufs_idr, buf_id);
3039 spin_unlock_bh(&rx_ring->idr_lock);
3040
3041 if (!skb) {
3042 ath11k_warn(ab, "rx monitor status with invalid buf_id %d\n",
3043 buf_id);
3044 pmon->buf_state = DP_MON_STATUS_REPLINISH;
3045 goto move_next;
3046 }
3047
3048 rxcb = ATH11K_SKB_RXCB(skb);
3049
3050 dma_sync_single_for_cpu(ab->dev, rxcb->paddr,
3051 skb->len + skb_tailroom(skb),
3052 DMA_FROM_DEVICE);
3053
3054 tlv = (struct hal_tlv_hdr *)skb->data;
3055 if (FIELD_GET(HAL_TLV_HDR_TAG, tlv->tl) !=
3056 HAL_RX_STATUS_BUFFER_DONE) {
3057 ath11k_warn(ab, "mon status DONE not set %lx, buf_id %d\n",
3058 FIELD_GET(HAL_TLV_HDR_TAG,
3059 tlv->tl), buf_id);
3060 /* If done status is missing, hold onto status
3061 * ring until status is done for this status
3062 * ring buffer.
3063 * Keep HP in mon_status_ring unchanged,
3064 * and break from here.
3065 * Check status for same buffer for next time
3066 */
3067 pmon->buf_state = DP_MON_STATUS_NO_DMA;
3068 break;
3069 }
3070
3071 spin_lock_bh(&rx_ring->idr_lock);
3072 idr_remove(&rx_ring->bufs_idr, buf_id);
3073 spin_unlock_bh(&rx_ring->idr_lock);
3074 if (ab->hw_params.full_monitor_mode) {
3075 ath11k_dp_rx_mon_update_status_buf_state(pmon, tlv);
3076 if (paddr == pmon->mon_status_paddr)
3077 pmon->buf_state = DP_MON_STATUS_MATCH;
3078 }
3079
3080 dma_unmap_single(ab->dev, rxcb->paddr,
3081 skb->len + skb_tailroom(skb),
3082 DMA_FROM_DEVICE);
3083
3084 __skb_queue_tail(skb_list, skb);
3085 } else {
3086 pmon->buf_state = DP_MON_STATUS_REPLINISH;
3087 }
3088move_next:
3089 skb = ath11k_dp_rx_alloc_mon_status_buf(ab, rx_ring,
3090 &buf_id);
3091
3092 if (!skb) {
3093 hal_params = ab->hw_params.hal_params;
3094 ath11k_hal_rx_buf_addr_info_set(rx_mon_status_desc, 0, 0,
3095 hal_params->rx_buf_rbm);
3096 num_buffs_reaped++;
3097 break;
3098 }
3099 rxcb = ATH11K_SKB_RXCB(skb);
3100
3101 cookie = FIELD_PREP(DP_RXDMA_BUF_COOKIE_PDEV_ID, mac_id) |
3102 FIELD_PREP(DP_RXDMA_BUF_COOKIE_BUF_ID, buf_id);
3103
3104 ath11k_hal_rx_buf_addr_info_set(rx_mon_status_desc, rxcb->paddr,
3105 cookie,
3106 ab->hw_params.hal_params->rx_buf_rbm);
3107 ath11k_hal_srng_src_get_next_entry(ab, srng);
3108 num_buffs_reaped++;
3109 }
3110 ath11k_hal_srng_access_end(ab, srng);
3111 spin_unlock_bh(&srng->lock);
3112
3113 return num_buffs_reaped;
3114}
3115
3116static void ath11k_dp_rx_frag_timer(struct timer_list *timer)
3117{
3118 struct dp_rx_tid *rx_tid = from_timer(rx_tid, timer, frag_timer);
3119
3120 spin_lock_bh(&rx_tid->ab->base_lock);
3121 if (rx_tid->last_frag_no &&
3122 rx_tid->rx_frag_bitmap == GENMASK(rx_tid->last_frag_no, 0)) {
3123 spin_unlock_bh(&rx_tid->ab->base_lock);
3124 return;
3125 }
3126 ath11k_dp_rx_frags_cleanup(rx_tid, true);
3127 spin_unlock_bh(&rx_tid->ab->base_lock);
3128}
3129
3130int ath11k_peer_rx_frag_setup(struct ath11k *ar, const u8 *peer_mac, int vdev_id)
3131{
3132 struct ath11k_base *ab = ar->ab;
3133 struct crypto_shash *tfm;
3134 struct ath11k_peer *peer;
3135 struct dp_rx_tid *rx_tid;
3136 int i;
3137
3138 tfm = crypto_alloc_shash("michael_mic", 0, 0);
3139 if (IS_ERR(tfm)) {
3140 ath11k_warn(ab, "failed to allocate michael_mic shash: %ld\n",
3141 PTR_ERR(tfm));
3142 return PTR_ERR(tfm);
3143 }
3144
3145 spin_lock_bh(&ab->base_lock);
3146
3147 peer = ath11k_peer_find(ab, vdev_id, peer_mac);
3148 if (!peer) {
3149 ath11k_warn(ab, "failed to find the peer to set up fragment info\n");
3150 spin_unlock_bh(&ab->base_lock);
3151 crypto_free_shash(tfm);
3152 return -ENOENT;
3153 }
3154
3155 for (i = 0; i <= IEEE80211_NUM_TIDS; i++) {
3156 rx_tid = &peer->rx_tid[i];
3157 rx_tid->ab = ab;
3158 timer_setup(&rx_tid->frag_timer, ath11k_dp_rx_frag_timer, 0);
3159 skb_queue_head_init(&rx_tid->rx_frags);
3160 }
3161
3162 peer->tfm_mmic = tfm;
3163 peer->dp_setup_done = true;
3164 spin_unlock_bh(&ab->base_lock);
3165
3166 return 0;
3167}
3168
3169static int ath11k_dp_rx_h_michael_mic(struct crypto_shash *tfm, u8 *key,
3170 struct ieee80211_hdr *hdr, u8 *data,
3171 size_t data_len, u8 *mic)
3172{
3173 SHASH_DESC_ON_STACK(desc, tfm);
3174 u8 mic_hdr[16] = {0};
3175 u8 tid = 0;
3176 int ret;
3177
3178 if (!tfm)
3179 return -EINVAL;
3180
3181 desc->tfm = tfm;
3182
3183 ret = crypto_shash_setkey(tfm, key, 8);
3184 if (ret)
3185 goto out;
3186
3187 ret = crypto_shash_init(desc);
3188 if (ret)
3189 goto out;
3190
3191 /* TKIP MIC header */
3192 memcpy(mic_hdr, ieee80211_get_DA(hdr), ETH_ALEN);
3193 memcpy(mic_hdr + ETH_ALEN, ieee80211_get_SA(hdr), ETH_ALEN);
3194 if (ieee80211_is_data_qos(hdr->frame_control))
3195 tid = ieee80211_get_tid(hdr);
3196 mic_hdr[12] = tid;
3197
3198 ret = crypto_shash_update(desc, mic_hdr, 16);
3199 if (ret)
3200 goto out;
3201 ret = crypto_shash_update(desc, data, data_len);
3202 if (ret)
3203 goto out;
3204 ret = crypto_shash_final(desc, mic);
3205out:
3206 shash_desc_zero(desc);
3207 return ret;
3208}
3209
3210static int ath11k_dp_rx_h_verify_tkip_mic(struct ath11k *ar, struct ath11k_peer *peer,
3211 struct sk_buff *msdu)
3212{
3213 struct hal_rx_desc *rx_desc = (struct hal_rx_desc *)msdu->data;
3214 struct ieee80211_rx_status *rxs = IEEE80211_SKB_RXCB(msdu);
3215 struct ieee80211_key_conf *key_conf;
3216 struct ieee80211_hdr *hdr;
3217 u8 mic[IEEE80211_CCMP_MIC_LEN];
3218 int head_len, tail_len, ret;
3219 size_t data_len;
3220 u32 hdr_len, hal_rx_desc_sz = ar->ab->hw_params.hal_desc_sz;
3221 u8 *key, *data;
3222 u8 key_idx;
3223
3224 if (ath11k_dp_rx_h_mpdu_start_enctype(ar->ab, rx_desc) !=
3225 HAL_ENCRYPT_TYPE_TKIP_MIC)
3226 return 0;
3227
3228 hdr = (struct ieee80211_hdr *)(msdu->data + hal_rx_desc_sz);
3229 hdr_len = ieee80211_hdrlen(hdr->frame_control);
3230 head_len = hdr_len + hal_rx_desc_sz + IEEE80211_TKIP_IV_LEN;
3231 tail_len = IEEE80211_CCMP_MIC_LEN + IEEE80211_TKIP_ICV_LEN + FCS_LEN;
3232
3233 if (!is_multicast_ether_addr(hdr->addr1))
3234 key_idx = peer->ucast_keyidx;
3235 else
3236 key_idx = peer->mcast_keyidx;
3237
3238 key_conf = peer->keys[key_idx];
3239
3240 data = msdu->data + head_len;
3241 data_len = msdu->len - head_len - tail_len;
3242 key = &key_conf->key[NL80211_TKIP_DATA_OFFSET_RX_MIC_KEY];
3243
3244 ret = ath11k_dp_rx_h_michael_mic(peer->tfm_mmic, key, hdr, data, data_len, mic);
3245 if (ret || memcmp(mic, data + data_len, IEEE80211_CCMP_MIC_LEN))
3246 goto mic_fail;
3247
3248 return 0;
3249
3250mic_fail:
3251 (ATH11K_SKB_RXCB(msdu))->is_first_msdu = true;
3252 (ATH11K_SKB_RXCB(msdu))->is_last_msdu = true;
3253
3254 rxs->flag |= RX_FLAG_MMIC_ERROR | RX_FLAG_MMIC_STRIPPED |
3255 RX_FLAG_IV_STRIPPED | RX_FLAG_DECRYPTED;
3256 skb_pull(msdu, hal_rx_desc_sz);
3257
3258 ath11k_dp_rx_h_ppdu(ar, rx_desc, rxs);
3259 ath11k_dp_rx_h_undecap(ar, msdu, rx_desc,
3260 HAL_ENCRYPT_TYPE_TKIP_MIC, rxs, true);
3261 ieee80211_rx(ar->hw, msdu);
3262 return -EINVAL;
3263}
3264
3265static void ath11k_dp_rx_h_undecap_frag(struct ath11k *ar, struct sk_buff *msdu,
3266 enum hal_encrypt_type enctype, u32 flags)
3267{
3268 struct ieee80211_hdr *hdr;
3269 size_t hdr_len;
3270 size_t crypto_len;
3271 u32 hal_rx_desc_sz = ar->ab->hw_params.hal_desc_sz;
3272
3273 if (!flags)
3274 return;
3275
3276 hdr = (struct ieee80211_hdr *)(msdu->data + hal_rx_desc_sz);
3277
3278 if (flags & RX_FLAG_MIC_STRIPPED)
3279 skb_trim(msdu, msdu->len -
3280 ath11k_dp_rx_crypto_mic_len(ar, enctype));
3281
3282 if (flags & RX_FLAG_ICV_STRIPPED)
3283 skb_trim(msdu, msdu->len -
3284 ath11k_dp_rx_crypto_icv_len(ar, enctype));
3285
3286 if (flags & RX_FLAG_IV_STRIPPED) {
3287 hdr_len = ieee80211_hdrlen(hdr->frame_control);
3288 crypto_len = ath11k_dp_rx_crypto_param_len(ar, enctype);
3289
3290 memmove((void *)msdu->data + hal_rx_desc_sz + crypto_len,
3291 (void *)msdu->data + hal_rx_desc_sz, hdr_len);
3292 skb_pull(msdu, crypto_len);
3293 }
3294}
3295
3296static int ath11k_dp_rx_h_defrag(struct ath11k *ar,
3297 struct ath11k_peer *peer,
3298 struct dp_rx_tid *rx_tid,
3299 struct sk_buff **defrag_skb)
3300{
3301 struct hal_rx_desc *rx_desc;
3302 struct sk_buff *skb, *first_frag, *last_frag;
3303 struct ieee80211_hdr *hdr;
3304 struct rx_attention *rx_attention;
3305 enum hal_encrypt_type enctype;
3306 bool is_decrypted = false;
3307 int msdu_len = 0;
3308 int extra_space;
3309 u32 flags, hal_rx_desc_sz = ar->ab->hw_params.hal_desc_sz;
3310
3311 first_frag = skb_peek(&rx_tid->rx_frags);
3312 last_frag = skb_peek_tail(&rx_tid->rx_frags);
3313
3314 skb_queue_walk(&rx_tid->rx_frags, skb) {
3315 flags = 0;
3316 rx_desc = (struct hal_rx_desc *)skb->data;
3317 hdr = (struct ieee80211_hdr *)(skb->data + hal_rx_desc_sz);
3318
3319 enctype = ath11k_dp_rx_h_mpdu_start_enctype(ar->ab, rx_desc);
3320 if (enctype != HAL_ENCRYPT_TYPE_OPEN) {
3321 rx_attention = ath11k_dp_rx_get_attention(ar->ab, rx_desc);
3322 is_decrypted = ath11k_dp_rx_h_attn_is_decrypted(rx_attention);
3323 }
3324
3325 if (is_decrypted) {
3326 if (skb != first_frag)
3327 flags |= RX_FLAG_IV_STRIPPED;
3328 if (skb != last_frag)
3329 flags |= RX_FLAG_ICV_STRIPPED |
3330 RX_FLAG_MIC_STRIPPED;
3331 }
3332
3333 /* RX fragments are always raw packets */
3334 if (skb != last_frag)
3335 skb_trim(skb, skb->len - FCS_LEN);
3336 ath11k_dp_rx_h_undecap_frag(ar, skb, enctype, flags);
3337
3338 if (skb != first_frag)
3339 skb_pull(skb, hal_rx_desc_sz +
3340 ieee80211_hdrlen(hdr->frame_control));
3341 msdu_len += skb->len;
3342 }
3343
3344 extra_space = msdu_len - (DP_RX_BUFFER_SIZE + skb_tailroom(first_frag));
3345 if (extra_space > 0 &&
3346 (pskb_expand_head(first_frag, 0, extra_space, GFP_ATOMIC) < 0))
3347 return -ENOMEM;
3348
3349 __skb_unlink(first_frag, &rx_tid->rx_frags);
3350 while ((skb = __skb_dequeue(&rx_tid->rx_frags))) {
3351 skb_put_data(first_frag, skb->data, skb->len);
3352 dev_kfree_skb_any(skb);
3353 }
3354
3355 hdr = (struct ieee80211_hdr *)(first_frag->data + hal_rx_desc_sz);
3356 hdr->frame_control &= ~__cpu_to_le16(IEEE80211_FCTL_MOREFRAGS);
3357 ATH11K_SKB_RXCB(first_frag)->is_frag = 1;
3358
3359 if (ath11k_dp_rx_h_verify_tkip_mic(ar, peer, first_frag))
3360 first_frag = NULL;
3361
3362 *defrag_skb = first_frag;
3363 return 0;
3364}
3365
3366static int ath11k_dp_rx_h_defrag_reo_reinject(struct ath11k *ar, struct dp_rx_tid *rx_tid,
3367 struct sk_buff *defrag_skb)
3368{
3369 struct ath11k_base *ab = ar->ab;
3370 struct ath11k_pdev_dp *dp = &ar->dp;
3371 struct dp_rxdma_ring *rx_refill_ring = &dp->rx_refill_buf_ring;
3372 struct hal_rx_desc *rx_desc = (struct hal_rx_desc *)defrag_skb->data;
3373 struct hal_reo_entrance_ring *reo_ent_ring;
3374 struct hal_reo_dest_ring *reo_dest_ring;
3375 struct dp_link_desc_bank *link_desc_banks;
3376 struct hal_rx_msdu_link *msdu_link;
3377 struct hal_rx_msdu_details *msdu0;
3378 struct hal_srng *srng;
3379 dma_addr_t paddr;
3380 u32 desc_bank, msdu_info, mpdu_info;
3381 u32 dst_idx, cookie, hal_rx_desc_sz;
3382 int ret, buf_id;
3383
3384 hal_rx_desc_sz = ab->hw_params.hal_desc_sz;
3385 link_desc_banks = ab->dp.link_desc_banks;
3386 reo_dest_ring = rx_tid->dst_ring_desc;
3387
3388 ath11k_hal_rx_reo_ent_paddr_get(ab, reo_dest_ring, &paddr, &desc_bank);
3389 msdu_link = (struct hal_rx_msdu_link *)(link_desc_banks[desc_bank].vaddr +
3390 (paddr - link_desc_banks[desc_bank].paddr));
3391 msdu0 = &msdu_link->msdu_link[0];
3392 dst_idx = FIELD_GET(RX_MSDU_DESC_INFO0_REO_DEST_IND, msdu0->rx_msdu_info.info0);
3393 memset(msdu0, 0, sizeof(*msdu0));
3394
3395 msdu_info = FIELD_PREP(RX_MSDU_DESC_INFO0_FIRST_MSDU_IN_MPDU, 1) |
3396 FIELD_PREP(RX_MSDU_DESC_INFO0_LAST_MSDU_IN_MPDU, 1) |
3397 FIELD_PREP(RX_MSDU_DESC_INFO0_MSDU_CONTINUATION, 0) |
3398 FIELD_PREP(RX_MSDU_DESC_INFO0_MSDU_LENGTH,
3399 defrag_skb->len - hal_rx_desc_sz) |
3400 FIELD_PREP(RX_MSDU_DESC_INFO0_REO_DEST_IND, dst_idx) |
3401 FIELD_PREP(RX_MSDU_DESC_INFO0_VALID_SA, 1) |
3402 FIELD_PREP(RX_MSDU_DESC_INFO0_VALID_DA, 1);
3403 msdu0->rx_msdu_info.info0 = msdu_info;
3404
3405 /* change msdu len in hal rx desc */
3406 ath11k_dp_rxdesc_set_msdu_len(ab, rx_desc, defrag_skb->len - hal_rx_desc_sz);
3407
3408 paddr = dma_map_single(ab->dev, defrag_skb->data,
3409 defrag_skb->len + skb_tailroom(defrag_skb),
3410 DMA_TO_DEVICE);
3411 if (dma_mapping_error(ab->dev, paddr))
3412 return -ENOMEM;
3413
3414 spin_lock_bh(&rx_refill_ring->idr_lock);
3415 buf_id = idr_alloc(&rx_refill_ring->bufs_idr, defrag_skb, 0,
3416 rx_refill_ring->bufs_max * 3, GFP_ATOMIC);
3417 spin_unlock_bh(&rx_refill_ring->idr_lock);
3418 if (buf_id < 0) {
3419 ret = -ENOMEM;
3420 goto err_unmap_dma;
3421 }
3422
3423 ATH11K_SKB_RXCB(defrag_skb)->paddr = paddr;
3424 cookie = FIELD_PREP(DP_RXDMA_BUF_COOKIE_PDEV_ID, dp->mac_id) |
3425 FIELD_PREP(DP_RXDMA_BUF_COOKIE_BUF_ID, buf_id);
3426
3427 ath11k_hal_rx_buf_addr_info_set(msdu0, paddr, cookie,
3428 ab->hw_params.hal_params->rx_buf_rbm);
3429
3430 /* Fill mpdu details into reo entrance ring */
3431 srng = &ab->hal.srng_list[ab->dp.reo_reinject_ring.ring_id];
3432
3433 spin_lock_bh(&srng->lock);
3434 ath11k_hal_srng_access_begin(ab, srng);
3435
3436 reo_ent_ring = (struct hal_reo_entrance_ring *)
3437 ath11k_hal_srng_src_get_next_entry(ab, srng);
3438 if (!reo_ent_ring) {
3439 ath11k_hal_srng_access_end(ab, srng);
3440 spin_unlock_bh(&srng->lock);
3441 ret = -ENOSPC;
3442 goto err_free_idr;
3443 }
3444 memset(reo_ent_ring, 0, sizeof(*reo_ent_ring));
3445
3446 ath11k_hal_rx_reo_ent_paddr_get(ab, reo_dest_ring, &paddr, &desc_bank);
3447 ath11k_hal_rx_buf_addr_info_set(reo_ent_ring, paddr, desc_bank,
3448 HAL_RX_BUF_RBM_WBM_IDLE_DESC_LIST);
3449
3450 mpdu_info = FIELD_PREP(RX_MPDU_DESC_INFO0_MSDU_COUNT, 1) |
3451 FIELD_PREP(RX_MPDU_DESC_INFO0_SEQ_NUM, rx_tid->cur_sn) |
3452 FIELD_PREP(RX_MPDU_DESC_INFO0_FRAG_FLAG, 0) |
3453 FIELD_PREP(RX_MPDU_DESC_INFO0_VALID_SA, 1) |
3454 FIELD_PREP(RX_MPDU_DESC_INFO0_VALID_DA, 1) |
3455 FIELD_PREP(RX_MPDU_DESC_INFO0_RAW_MPDU, 1) |
3456 FIELD_PREP(RX_MPDU_DESC_INFO0_VALID_PN, 1);
3457
3458 reo_ent_ring->rx_mpdu_info.info0 = mpdu_info;
3459 reo_ent_ring->rx_mpdu_info.meta_data = reo_dest_ring->rx_mpdu_info.meta_data;
3460 reo_ent_ring->queue_addr_lo = reo_dest_ring->queue_addr_lo;
3461 reo_ent_ring->info0 = FIELD_PREP(HAL_REO_ENTR_RING_INFO0_QUEUE_ADDR_HI,
3462 FIELD_GET(HAL_REO_DEST_RING_INFO0_QUEUE_ADDR_HI,
3463 reo_dest_ring->info0)) |
3464 FIELD_PREP(HAL_REO_ENTR_RING_INFO0_DEST_IND, dst_idx);
3465 ath11k_hal_srng_access_end(ab, srng);
3466 spin_unlock_bh(&srng->lock);
3467
3468 return 0;
3469
3470err_free_idr:
3471 spin_lock_bh(&rx_refill_ring->idr_lock);
3472 idr_remove(&rx_refill_ring->bufs_idr, buf_id);
3473 spin_unlock_bh(&rx_refill_ring->idr_lock);
3474err_unmap_dma:
3475 dma_unmap_single(ab->dev, paddr, defrag_skb->len + skb_tailroom(defrag_skb),
3476 DMA_TO_DEVICE);
3477 return ret;
3478}
3479
3480static int ath11k_dp_rx_h_cmp_frags(struct ath11k *ar,
3481 struct sk_buff *a, struct sk_buff *b)
3482{
3483 int frag1, frag2;
3484
3485 frag1 = ath11k_dp_rx_h_mpdu_start_frag_no(ar->ab, a);
3486 frag2 = ath11k_dp_rx_h_mpdu_start_frag_no(ar->ab, b);
3487
3488 return frag1 - frag2;
3489}
3490
3491static void ath11k_dp_rx_h_sort_frags(struct ath11k *ar,
3492 struct sk_buff_head *frag_list,
3493 struct sk_buff *cur_frag)
3494{
3495 struct sk_buff *skb;
3496 int cmp;
3497
3498 skb_queue_walk(frag_list, skb) {
3499 cmp = ath11k_dp_rx_h_cmp_frags(ar, skb, cur_frag);
3500 if (cmp < 0)
3501 continue;
3502 __skb_queue_before(frag_list, skb, cur_frag);
3503 return;
3504 }
3505 __skb_queue_tail(frag_list, cur_frag);
3506}
3507
3508static u64 ath11k_dp_rx_h_get_pn(struct ath11k *ar, struct sk_buff *skb)
3509{
3510 struct ieee80211_hdr *hdr;
3511 u64 pn = 0;
3512 u8 *ehdr;
3513 u32 hal_rx_desc_sz = ar->ab->hw_params.hal_desc_sz;
3514
3515 hdr = (struct ieee80211_hdr *)(skb->data + hal_rx_desc_sz);
3516 ehdr = skb->data + hal_rx_desc_sz + ieee80211_hdrlen(hdr->frame_control);
3517
3518 pn = ehdr[0];
3519 pn |= (u64)ehdr[1] << 8;
3520 pn |= (u64)ehdr[4] << 16;
3521 pn |= (u64)ehdr[5] << 24;
3522 pn |= (u64)ehdr[6] << 32;
3523 pn |= (u64)ehdr[7] << 40;
3524
3525 return pn;
3526}
3527
3528static bool
3529ath11k_dp_rx_h_defrag_validate_incr_pn(struct ath11k *ar, struct dp_rx_tid *rx_tid)
3530{
3531 enum hal_encrypt_type encrypt_type;
3532 struct sk_buff *first_frag, *skb;
3533 struct hal_rx_desc *desc;
3534 u64 last_pn;
3535 u64 cur_pn;
3536
3537 first_frag = skb_peek(&rx_tid->rx_frags);
3538 desc = (struct hal_rx_desc *)first_frag->data;
3539
3540 encrypt_type = ath11k_dp_rx_h_mpdu_start_enctype(ar->ab, desc);
3541 if (encrypt_type != HAL_ENCRYPT_TYPE_CCMP_128 &&
3542 encrypt_type != HAL_ENCRYPT_TYPE_CCMP_256 &&
3543 encrypt_type != HAL_ENCRYPT_TYPE_GCMP_128 &&
3544 encrypt_type != HAL_ENCRYPT_TYPE_AES_GCMP_256)
3545 return true;
3546
3547 last_pn = ath11k_dp_rx_h_get_pn(ar, first_frag);
3548 skb_queue_walk(&rx_tid->rx_frags, skb) {
3549 if (skb == first_frag)
3550 continue;
3551
3552 cur_pn = ath11k_dp_rx_h_get_pn(ar, skb);
3553 if (cur_pn != last_pn + 1)
3554 return false;
3555 last_pn = cur_pn;
3556 }
3557 return true;
3558}
3559
3560static int ath11k_dp_rx_frag_h_mpdu(struct ath11k *ar,
3561 struct sk_buff *msdu,
3562 u32 *ring_desc)
3563{
3564 struct ath11k_base *ab = ar->ab;
3565 struct hal_rx_desc *rx_desc;
3566 struct ath11k_peer *peer;
3567 struct dp_rx_tid *rx_tid;
3568 struct sk_buff *defrag_skb = NULL;
3569 u32 peer_id;
3570 u16 seqno, frag_no;
3571 u8 tid;
3572 int ret = 0;
3573 bool more_frags;
3574 bool is_mcbc;
3575
3576 rx_desc = (struct hal_rx_desc *)msdu->data;
3577 peer_id = ath11k_dp_rx_h_mpdu_start_peer_id(ar->ab, rx_desc);
3578 tid = ath11k_dp_rx_h_mpdu_start_tid(ar->ab, rx_desc);
3579 seqno = ath11k_dp_rx_h_mpdu_start_seq_no(ar->ab, rx_desc);
3580 frag_no = ath11k_dp_rx_h_mpdu_start_frag_no(ar->ab, msdu);
3581 more_frags = ath11k_dp_rx_h_mpdu_start_more_frags(ar->ab, msdu);
3582 is_mcbc = ath11k_dp_rx_h_attn_is_mcbc(ar->ab, rx_desc);
3583
3584 /* Multicast/Broadcast fragments are not expected */
3585 if (is_mcbc)
3586 return -EINVAL;
3587
3588 if (!ath11k_dp_rx_h_mpdu_start_seq_ctrl_valid(ar->ab, rx_desc) ||
3589 !ath11k_dp_rx_h_mpdu_start_fc_valid(ar->ab, rx_desc) ||
3590 tid > IEEE80211_NUM_TIDS)
3591 return -EINVAL;
3592
3593 /* received unfragmented packet in reo
3594 * exception ring, this shouldn't happen
3595 * as these packets typically come from
3596 * reo2sw srngs.
3597 */
3598 if (WARN_ON_ONCE(!frag_no && !more_frags))
3599 return -EINVAL;
3600
3601 spin_lock_bh(&ab->base_lock);
3602 peer = ath11k_peer_find_by_id(ab, peer_id);
3603 if (!peer) {
3604 ath11k_warn(ab, "failed to find the peer to de-fragment received fragment peer_id %d\n",
3605 peer_id);
3606 ret = -ENOENT;
3607 goto out_unlock;
3608 }
3609 if (!peer->dp_setup_done) {
3610 ath11k_warn(ab, "The peer %pM [%d] has uninitialized datapath\n",
3611 peer->addr, peer_id);
3612 ret = -ENOENT;
3613 goto out_unlock;
3614 }
3615
3616 rx_tid = &peer->rx_tid[tid];
3617
3618 if ((!skb_queue_empty(&rx_tid->rx_frags) && seqno != rx_tid->cur_sn) ||
3619 skb_queue_empty(&rx_tid->rx_frags)) {
3620 /* Flush stored fragments and start a new sequence */
3621 ath11k_dp_rx_frags_cleanup(rx_tid, true);
3622 rx_tid->cur_sn = seqno;
3623 }
3624
3625 if (rx_tid->rx_frag_bitmap & BIT(frag_no)) {
3626 /* Fragment already present */
3627 ret = -EINVAL;
3628 goto out_unlock;
3629 }
3630
3631 if (!rx_tid->rx_frag_bitmap || (frag_no > __fls(rx_tid->rx_frag_bitmap)))
3632 __skb_queue_tail(&rx_tid->rx_frags, msdu);
3633 else
3634 ath11k_dp_rx_h_sort_frags(ar, &rx_tid->rx_frags, msdu);
3635
3636 rx_tid->rx_frag_bitmap |= BIT(frag_no);
3637 if (!more_frags)
3638 rx_tid->last_frag_no = frag_no;
3639
3640 if (frag_no == 0) {
3641 rx_tid->dst_ring_desc = kmemdup(ring_desc,
3642 sizeof(*rx_tid->dst_ring_desc),
3643 GFP_ATOMIC);
3644 if (!rx_tid->dst_ring_desc) {
3645 ret = -ENOMEM;
3646 goto out_unlock;
3647 }
3648 } else {
3649 ath11k_dp_rx_link_desc_return(ab, ring_desc,
3650 HAL_WBM_REL_BM_ACT_PUT_IN_IDLE);
3651 }
3652
3653 if (!rx_tid->last_frag_no ||
3654 rx_tid->rx_frag_bitmap != GENMASK(rx_tid->last_frag_no, 0)) {
3655 mod_timer(&rx_tid->frag_timer, jiffies +
3656 ATH11K_DP_RX_FRAGMENT_TIMEOUT_MS);
3657 goto out_unlock;
3658 }
3659
3660 spin_unlock_bh(&ab->base_lock);
3661 del_timer_sync(&rx_tid->frag_timer);
3662 spin_lock_bh(&ab->base_lock);
3663
3664 peer = ath11k_peer_find_by_id(ab, peer_id);
3665 if (!peer)
3666 goto err_frags_cleanup;
3667
3668 if (!ath11k_dp_rx_h_defrag_validate_incr_pn(ar, rx_tid))
3669 goto err_frags_cleanup;
3670
3671 if (ath11k_dp_rx_h_defrag(ar, peer, rx_tid, &defrag_skb))
3672 goto err_frags_cleanup;
3673
3674 if (!defrag_skb)
3675 goto err_frags_cleanup;
3676
3677 if (ath11k_dp_rx_h_defrag_reo_reinject(ar, rx_tid, defrag_skb))
3678 goto err_frags_cleanup;
3679
3680 ath11k_dp_rx_frags_cleanup(rx_tid, false);
3681 goto out_unlock;
3682
3683err_frags_cleanup:
3684 dev_kfree_skb_any(defrag_skb);
3685 ath11k_dp_rx_frags_cleanup(rx_tid, true);
3686out_unlock:
3687 spin_unlock_bh(&ab->base_lock);
3688 return ret;
3689}
3690
3691static int
3692ath11k_dp_process_rx_err_buf(struct ath11k *ar, u32 *ring_desc, int buf_id, bool drop)
3693{
3694 struct ath11k_pdev_dp *dp = &ar->dp;
3695 struct dp_rxdma_ring *rx_ring = &dp->rx_refill_buf_ring;
3696 struct sk_buff *msdu;
3697 struct ath11k_skb_rxcb *rxcb;
3698 struct hal_rx_desc *rx_desc;
3699 u8 *hdr_status;
3700 u16 msdu_len;
3701 u32 hal_rx_desc_sz = ar->ab->hw_params.hal_desc_sz;
3702
3703 spin_lock_bh(&rx_ring->idr_lock);
3704 msdu = idr_find(&rx_ring->bufs_idr, buf_id);
3705 if (!msdu) {
3706 ath11k_warn(ar->ab, "rx err buf with invalid buf_id %d\n",
3707 buf_id);
3708 spin_unlock_bh(&rx_ring->idr_lock);
3709 return -EINVAL;
3710 }
3711
3712 idr_remove(&rx_ring->bufs_idr, buf_id);
3713 spin_unlock_bh(&rx_ring->idr_lock);
3714
3715 rxcb = ATH11K_SKB_RXCB(msdu);
3716 dma_unmap_single(ar->ab->dev, rxcb->paddr,
3717 msdu->len + skb_tailroom(msdu),
3718 DMA_FROM_DEVICE);
3719
3720 if (drop) {
3721 dev_kfree_skb_any(msdu);
3722 return 0;
3723 }
3724
3725 rcu_read_lock();
3726 if (!rcu_dereference(ar->ab->pdevs_active[ar->pdev_idx])) {
3727 dev_kfree_skb_any(msdu);
3728 goto exit;
3729 }
3730
3731 if (test_bit(ATH11K_CAC_RUNNING, &ar->dev_flags)) {
3732 dev_kfree_skb_any(msdu);
3733 goto exit;
3734 }
3735
3736 rx_desc = (struct hal_rx_desc *)msdu->data;
3737 msdu_len = ath11k_dp_rx_h_msdu_start_msdu_len(ar->ab, rx_desc);
3738 if ((msdu_len + hal_rx_desc_sz) > DP_RX_BUFFER_SIZE) {
3739 hdr_status = ath11k_dp_rx_h_80211_hdr(ar->ab, rx_desc);
3740 ath11k_warn(ar->ab, "invalid msdu leng %u", msdu_len);
3741 ath11k_dbg_dump(ar->ab, ATH11K_DBG_DATA, NULL, "", hdr_status,
3742 sizeof(struct ieee80211_hdr));
3743 ath11k_dbg_dump(ar->ab, ATH11K_DBG_DATA, NULL, "", rx_desc,
3744 sizeof(struct hal_rx_desc));
3745 dev_kfree_skb_any(msdu);
3746 goto exit;
3747 }
3748
3749 skb_put(msdu, hal_rx_desc_sz + msdu_len);
3750
3751 if (ath11k_dp_rx_frag_h_mpdu(ar, msdu, ring_desc)) {
3752 dev_kfree_skb_any(msdu);
3753 ath11k_dp_rx_link_desc_return(ar->ab, ring_desc,
3754 HAL_WBM_REL_BM_ACT_PUT_IN_IDLE);
3755 }
3756exit:
3757 rcu_read_unlock();
3758 return 0;
3759}
3760
3761int ath11k_dp_process_rx_err(struct ath11k_base *ab, struct napi_struct *napi,
3762 int budget)
3763{
3764 u32 msdu_cookies[HAL_NUM_RX_MSDUS_PER_LINK_DESC];
3765 struct dp_link_desc_bank *link_desc_banks;
3766 enum hal_rx_buf_return_buf_manager rbm;
3767 int tot_n_bufs_reaped, quota, ret, i;
3768 int n_bufs_reaped[MAX_RADIOS] = {0};
3769 struct dp_rxdma_ring *rx_ring;
3770 struct dp_srng *reo_except;
3771 u32 desc_bank, num_msdus;
3772 struct hal_srng *srng;
3773 struct ath11k_dp *dp;
3774 void *link_desc_va;
3775 int buf_id, mac_id;
3776 struct ath11k *ar;
3777 dma_addr_t paddr;
3778 u32 *desc;
3779 bool is_frag;
3780 u8 drop = 0;
3781
3782 tot_n_bufs_reaped = 0;
3783 quota = budget;
3784
3785 dp = &ab->dp;
3786 reo_except = &dp->reo_except_ring;
3787 link_desc_banks = dp->link_desc_banks;
3788
3789 srng = &ab->hal.srng_list[reo_except->ring_id];
3790
3791 spin_lock_bh(&srng->lock);
3792
3793 ath11k_hal_srng_access_begin(ab, srng);
3794
3795 while (budget &&
3796 (desc = ath11k_hal_srng_dst_get_next_entry(ab, srng))) {
3797 struct hal_reo_dest_ring *reo_desc = (struct hal_reo_dest_ring *)desc;
3798
3799 ab->soc_stats.err_ring_pkts++;
3800 ret = ath11k_hal_desc_reo_parse_err(ab, desc, &paddr,
3801 &desc_bank);
3802 if (ret) {
3803 ath11k_warn(ab, "failed to parse error reo desc %d\n",
3804 ret);
3805 continue;
3806 }
3807 link_desc_va = link_desc_banks[desc_bank].vaddr +
3808 (paddr - link_desc_banks[desc_bank].paddr);
3809 ath11k_hal_rx_msdu_link_info_get(link_desc_va, &num_msdus, msdu_cookies,
3810 &rbm);
3811 if (rbm != HAL_RX_BUF_RBM_WBM_IDLE_DESC_LIST &&
3812 rbm != HAL_RX_BUF_RBM_SW3_BM) {
3813 ab->soc_stats.invalid_rbm++;
3814 ath11k_warn(ab, "invalid return buffer manager %d\n", rbm);
3815 ath11k_dp_rx_link_desc_return(ab, desc,
3816 HAL_WBM_REL_BM_ACT_REL_MSDU);
3817 continue;
3818 }
3819
3820 is_frag = !!(reo_desc->rx_mpdu_info.info0 & RX_MPDU_DESC_INFO0_FRAG_FLAG);
3821
3822 /* Process only rx fragments with one msdu per link desc below, and drop
3823 * msdu's indicated due to error reasons.
3824 */
3825 if (!is_frag || num_msdus > 1) {
3826 drop = 1;
3827 /* Return the link desc back to wbm idle list */
3828 ath11k_dp_rx_link_desc_return(ab, desc,
3829 HAL_WBM_REL_BM_ACT_PUT_IN_IDLE);
3830 }
3831
3832 for (i = 0; i < num_msdus; i++) {
3833 buf_id = FIELD_GET(DP_RXDMA_BUF_COOKIE_BUF_ID,
3834 msdu_cookies[i]);
3835
3836 mac_id = FIELD_GET(DP_RXDMA_BUF_COOKIE_PDEV_ID,
3837 msdu_cookies[i]);
3838
3839 ar = ab->pdevs[mac_id].ar;
3840
3841 if (!ath11k_dp_process_rx_err_buf(ar, desc, buf_id, drop)) {
3842 n_bufs_reaped[mac_id]++;
3843 tot_n_bufs_reaped++;
3844 }
3845 }
3846
3847 if (tot_n_bufs_reaped >= quota) {
3848 tot_n_bufs_reaped = quota;
3849 goto exit;
3850 }
3851
3852 budget = quota - tot_n_bufs_reaped;
3853 }
3854
3855exit:
3856 ath11k_hal_srng_access_end(ab, srng);
3857
3858 spin_unlock_bh(&srng->lock);
3859
3860 for (i = 0; i < ab->num_radios; i++) {
3861 if (!n_bufs_reaped[i])
3862 continue;
3863
3864 ar = ab->pdevs[i].ar;
3865 rx_ring = &ar->dp.rx_refill_buf_ring;
3866
3867 ath11k_dp_rxbufs_replenish(ab, i, rx_ring, n_bufs_reaped[i],
3868 ab->hw_params.hal_params->rx_buf_rbm);
3869 }
3870
3871 return tot_n_bufs_reaped;
3872}
3873
3874static void ath11k_dp_rx_null_q_desc_sg_drop(struct ath11k *ar,
3875 int msdu_len,
3876 struct sk_buff_head *msdu_list)
3877{
3878 struct sk_buff *skb, *tmp;
3879 struct ath11k_skb_rxcb *rxcb;
3880 int n_buffs;
3881
3882 n_buffs = DIV_ROUND_UP(msdu_len,
3883 (DP_RX_BUFFER_SIZE - ar->ab->hw_params.hal_desc_sz));
3884
3885 skb_queue_walk_safe(msdu_list, skb, tmp) {
3886 rxcb = ATH11K_SKB_RXCB(skb);
3887 if (rxcb->err_rel_src == HAL_WBM_REL_SRC_MODULE_REO &&
3888 rxcb->err_code == HAL_REO_DEST_RING_ERROR_CODE_DESC_ADDR_ZERO) {
3889 if (!n_buffs)
3890 break;
3891 __skb_unlink(skb, msdu_list);
3892 dev_kfree_skb_any(skb);
3893 n_buffs--;
3894 }
3895 }
3896}
3897
3898static int ath11k_dp_rx_h_null_q_desc(struct ath11k *ar, struct sk_buff *msdu,
3899 struct ieee80211_rx_status *status,
3900 struct sk_buff_head *msdu_list)
3901{
3902 u16 msdu_len;
3903 struct hal_rx_desc *desc = (struct hal_rx_desc *)msdu->data;
3904 struct rx_attention *rx_attention;
3905 u8 l3pad_bytes;
3906 struct ath11k_skb_rxcb *rxcb = ATH11K_SKB_RXCB(msdu);
3907 u32 hal_rx_desc_sz = ar->ab->hw_params.hal_desc_sz;
3908
3909 msdu_len = ath11k_dp_rx_h_msdu_start_msdu_len(ar->ab, desc);
3910
3911 if (!rxcb->is_frag && ((msdu_len + hal_rx_desc_sz) > DP_RX_BUFFER_SIZE)) {
3912 /* First buffer will be freed by the caller, so deduct it's length */
3913 msdu_len = msdu_len - (DP_RX_BUFFER_SIZE - hal_rx_desc_sz);
3914 ath11k_dp_rx_null_q_desc_sg_drop(ar, msdu_len, msdu_list);
3915 return -EINVAL;
3916 }
3917
3918 rx_attention = ath11k_dp_rx_get_attention(ar->ab, desc);
3919 if (!ath11k_dp_rx_h_attn_msdu_done(rx_attention)) {
3920 ath11k_warn(ar->ab,
3921 "msdu_done bit not set in null_q_des processing\n");
3922 __skb_queue_purge(msdu_list);
3923 return -EIO;
3924 }
3925
3926 /* Handle NULL queue descriptor violations arising out a missing
3927 * REO queue for a given peer or a given TID. This typically
3928 * may happen if a packet is received on a QOS enabled TID before the
3929 * ADDBA negotiation for that TID, when the TID queue is setup. Or
3930 * it may also happen for MC/BC frames if they are not routed to the
3931 * non-QOS TID queue, in the absence of any other default TID queue.
3932 * This error can show up both in a REO destination or WBM release ring.
3933 */
3934
3935 rxcb->is_first_msdu = ath11k_dp_rx_h_msdu_end_first_msdu(ar->ab, desc);
3936 rxcb->is_last_msdu = ath11k_dp_rx_h_msdu_end_last_msdu(ar->ab, desc);
3937
3938 if (rxcb->is_frag) {
3939 skb_pull(msdu, hal_rx_desc_sz);
3940 } else {
3941 l3pad_bytes = ath11k_dp_rx_h_msdu_end_l3pad(ar->ab, desc);
3942
3943 if ((hal_rx_desc_sz + l3pad_bytes + msdu_len) > DP_RX_BUFFER_SIZE)
3944 return -EINVAL;
3945
3946 skb_put(msdu, hal_rx_desc_sz + l3pad_bytes + msdu_len);
3947 skb_pull(msdu, hal_rx_desc_sz + l3pad_bytes);
3948 }
3949 ath11k_dp_rx_h_ppdu(ar, desc, status);
3950
3951 ath11k_dp_rx_h_mpdu(ar, msdu, desc, status);
3952
3953 rxcb->tid = ath11k_dp_rx_h_mpdu_start_tid(ar->ab, desc);
3954
3955 /* Please note that caller will having the access to msdu and completing
3956 * rx with mac80211. Need not worry about cleaning up amsdu_list.
3957 */
3958
3959 return 0;
3960}
3961
3962static bool ath11k_dp_rx_h_reo_err(struct ath11k *ar, struct sk_buff *msdu,
3963 struct ieee80211_rx_status *status,
3964 struct sk_buff_head *msdu_list)
3965{
3966 struct ath11k_skb_rxcb *rxcb = ATH11K_SKB_RXCB(msdu);
3967 bool drop = false;
3968
3969 ar->ab->soc_stats.reo_error[rxcb->err_code]++;
3970
3971 switch (rxcb->err_code) {
3972 case HAL_REO_DEST_RING_ERROR_CODE_DESC_ADDR_ZERO:
3973 if (ath11k_dp_rx_h_null_q_desc(ar, msdu, status, msdu_list))
3974 drop = true;
3975 break;
3976 case HAL_REO_DEST_RING_ERROR_CODE_PN_CHECK_FAILED:
3977 /* TODO: Do not drop PN failed packets in the driver;
3978 * instead, it is good to drop such packets in mac80211
3979 * after incrementing the replay counters.
3980 */
3981 fallthrough;
3982 default:
3983 /* TODO: Review other errors and process them to mac80211
3984 * as appropriate.
3985 */
3986 drop = true;
3987 break;
3988 }
3989
3990 return drop;
3991}
3992
3993static void ath11k_dp_rx_h_tkip_mic_err(struct ath11k *ar, struct sk_buff *msdu,
3994 struct ieee80211_rx_status *status)
3995{
3996 u16 msdu_len;
3997 struct hal_rx_desc *desc = (struct hal_rx_desc *)msdu->data;
3998 u8 l3pad_bytes;
3999 struct ath11k_skb_rxcb *rxcb = ATH11K_SKB_RXCB(msdu);
4000 u32 hal_rx_desc_sz = ar->ab->hw_params.hal_desc_sz;
4001
4002 rxcb->is_first_msdu = ath11k_dp_rx_h_msdu_end_first_msdu(ar->ab, desc);
4003 rxcb->is_last_msdu = ath11k_dp_rx_h_msdu_end_last_msdu(ar->ab, desc);
4004
4005 l3pad_bytes = ath11k_dp_rx_h_msdu_end_l3pad(ar->ab, desc);
4006 msdu_len = ath11k_dp_rx_h_msdu_start_msdu_len(ar->ab, desc);
4007 skb_put(msdu, hal_rx_desc_sz + l3pad_bytes + msdu_len);
4008 skb_pull(msdu, hal_rx_desc_sz + l3pad_bytes);
4009
4010 ath11k_dp_rx_h_ppdu(ar, desc, status);
4011
4012 status->flag |= (RX_FLAG_MMIC_STRIPPED | RX_FLAG_MMIC_ERROR |
4013 RX_FLAG_DECRYPTED);
4014
4015 ath11k_dp_rx_h_undecap(ar, msdu, desc,
4016 HAL_ENCRYPT_TYPE_TKIP_MIC, status, false);
4017}
4018
4019static bool ath11k_dp_rx_h_rxdma_err(struct ath11k *ar, struct sk_buff *msdu,
4020 struct ieee80211_rx_status *status)
4021{
4022 struct ath11k_skb_rxcb *rxcb = ATH11K_SKB_RXCB(msdu);
4023 bool drop = false;
4024
4025 ar->ab->soc_stats.rxdma_error[rxcb->err_code]++;
4026
4027 switch (rxcb->err_code) {
4028 case HAL_REO_ENTR_RING_RXDMA_ECODE_TKIP_MIC_ERR:
4029 ath11k_dp_rx_h_tkip_mic_err(ar, msdu, status);
4030 break;
4031 default:
4032 /* TODO: Review other rxdma error code to check if anything is
4033 * worth reporting to mac80211
4034 */
4035 drop = true;
4036 break;
4037 }
4038
4039 return drop;
4040}
4041
4042static void ath11k_dp_rx_wbm_err(struct ath11k *ar,
4043 struct napi_struct *napi,
4044 struct sk_buff *msdu,
4045 struct sk_buff_head *msdu_list)
4046{
4047 struct ath11k_skb_rxcb *rxcb = ATH11K_SKB_RXCB(msdu);
4048 struct ieee80211_rx_status rxs = {0};
4049 bool drop = true;
4050
4051 switch (rxcb->err_rel_src) {
4052 case HAL_WBM_REL_SRC_MODULE_REO:
4053 drop = ath11k_dp_rx_h_reo_err(ar, msdu, &rxs, msdu_list);
4054 break;
4055 case HAL_WBM_REL_SRC_MODULE_RXDMA:
4056 drop = ath11k_dp_rx_h_rxdma_err(ar, msdu, &rxs);
4057 break;
4058 default:
4059 /* msdu will get freed */
4060 break;
4061 }
4062
4063 if (drop) {
4064 dev_kfree_skb_any(msdu);
4065 return;
4066 }
4067
4068 ath11k_dp_rx_deliver_msdu(ar, napi, msdu, &rxs);
4069}
4070
4071int ath11k_dp_rx_process_wbm_err(struct ath11k_base *ab,
4072 struct napi_struct *napi, int budget)
4073{
4074 struct ath11k *ar;
4075 struct ath11k_dp *dp = &ab->dp;
4076 struct dp_rxdma_ring *rx_ring;
4077 struct hal_rx_wbm_rel_info err_info;
4078 struct hal_srng *srng;
4079 struct sk_buff *msdu;
4080 struct sk_buff_head msdu_list[MAX_RADIOS];
4081 struct ath11k_skb_rxcb *rxcb;
4082 u32 *rx_desc;
4083 int buf_id, mac_id;
4084 int num_buffs_reaped[MAX_RADIOS] = {0};
4085 int total_num_buffs_reaped = 0;
4086 int ret, i;
4087
4088 for (i = 0; i < ab->num_radios; i++)
4089 __skb_queue_head_init(&msdu_list[i]);
4090
4091 srng = &ab->hal.srng_list[dp->rx_rel_ring.ring_id];
4092
4093 spin_lock_bh(&srng->lock);
4094
4095 ath11k_hal_srng_access_begin(ab, srng);
4096
4097 while (budget) {
4098 rx_desc = ath11k_hal_srng_dst_get_next_entry(ab, srng);
4099 if (!rx_desc)
4100 break;
4101
4102 ret = ath11k_hal_wbm_desc_parse_err(ab, rx_desc, &err_info);
4103 if (ret) {
4104 ath11k_warn(ab,
4105 "failed to parse rx error in wbm_rel ring desc %d\n",
4106 ret);
4107 continue;
4108 }
4109
4110 buf_id = FIELD_GET(DP_RXDMA_BUF_COOKIE_BUF_ID, err_info.cookie);
4111 mac_id = FIELD_GET(DP_RXDMA_BUF_COOKIE_PDEV_ID, err_info.cookie);
4112
4113 ar = ab->pdevs[mac_id].ar;
4114 rx_ring = &ar->dp.rx_refill_buf_ring;
4115
4116 spin_lock_bh(&rx_ring->idr_lock);
4117 msdu = idr_find(&rx_ring->bufs_idr, buf_id);
4118 if (!msdu) {
4119 ath11k_warn(ab, "frame rx with invalid buf_id %d pdev %d\n",
4120 buf_id, mac_id);
4121 spin_unlock_bh(&rx_ring->idr_lock);
4122 continue;
4123 }
4124
4125 idr_remove(&rx_ring->bufs_idr, buf_id);
4126 spin_unlock_bh(&rx_ring->idr_lock);
4127
4128 rxcb = ATH11K_SKB_RXCB(msdu);
4129 dma_unmap_single(ab->dev, rxcb->paddr,
4130 msdu->len + skb_tailroom(msdu),
4131 DMA_FROM_DEVICE);
4132
4133 num_buffs_reaped[mac_id]++;
4134 total_num_buffs_reaped++;
4135 budget--;
4136
4137 if (err_info.push_reason !=
4138 HAL_REO_DEST_RING_PUSH_REASON_ERR_DETECTED) {
4139 dev_kfree_skb_any(msdu);
4140 continue;
4141 }
4142
4143 rxcb->err_rel_src = err_info.err_rel_src;
4144 rxcb->err_code = err_info.err_code;
4145 rxcb->rx_desc = (struct hal_rx_desc *)msdu->data;
4146 __skb_queue_tail(&msdu_list[mac_id], msdu);
4147 }
4148
4149 ath11k_hal_srng_access_end(ab, srng);
4150
4151 spin_unlock_bh(&srng->lock);
4152
4153 if (!total_num_buffs_reaped)
4154 goto done;
4155
4156 for (i = 0; i < ab->num_radios; i++) {
4157 if (!num_buffs_reaped[i])
4158 continue;
4159
4160 ar = ab->pdevs[i].ar;
4161 rx_ring = &ar->dp.rx_refill_buf_ring;
4162
4163 ath11k_dp_rxbufs_replenish(ab, i, rx_ring, num_buffs_reaped[i],
4164 ab->hw_params.hal_params->rx_buf_rbm);
4165 }
4166
4167 rcu_read_lock();
4168 for (i = 0; i < ab->num_radios; i++) {
4169 if (!rcu_dereference(ab->pdevs_active[i])) {
4170 __skb_queue_purge(&msdu_list[i]);
4171 continue;
4172 }
4173
4174 ar = ab->pdevs[i].ar;
4175
4176 if (test_bit(ATH11K_CAC_RUNNING, &ar->dev_flags)) {
4177 __skb_queue_purge(&msdu_list[i]);
4178 continue;
4179 }
4180
4181 while ((msdu = __skb_dequeue(&msdu_list[i])) != NULL)
4182 ath11k_dp_rx_wbm_err(ar, napi, msdu, &msdu_list[i]);
4183 }
4184 rcu_read_unlock();
4185done:
4186 return total_num_buffs_reaped;
4187}
4188
4189int ath11k_dp_process_rxdma_err(struct ath11k_base *ab, int mac_id, int budget)
4190{
4191 struct ath11k *ar;
4192 struct dp_srng *err_ring;
4193 struct dp_rxdma_ring *rx_ring;
4194 struct dp_link_desc_bank *link_desc_banks = ab->dp.link_desc_banks;
4195 struct hal_srng *srng;
4196 u32 msdu_cookies[HAL_NUM_RX_MSDUS_PER_LINK_DESC];
4197 enum hal_rx_buf_return_buf_manager rbm;
4198 enum hal_reo_entr_rxdma_ecode rxdma_err_code;
4199 struct ath11k_skb_rxcb *rxcb;
4200 struct sk_buff *skb;
4201 struct hal_reo_entrance_ring *entr_ring;
4202 void *desc;
4203 int num_buf_freed = 0;
4204 int quota = budget;
4205 dma_addr_t paddr;
4206 u32 desc_bank;
4207 void *link_desc_va;
4208 int num_msdus;
4209 int i;
4210 int buf_id;
4211
4212 ar = ab->pdevs[ath11k_hw_mac_id_to_pdev_id(&ab->hw_params, mac_id)].ar;
4213 err_ring = &ar->dp.rxdma_err_dst_ring[ath11k_hw_mac_id_to_srng_id(&ab->hw_params,
4214 mac_id)];
4215 rx_ring = &ar->dp.rx_refill_buf_ring;
4216
4217 srng = &ab->hal.srng_list[err_ring->ring_id];
4218
4219 spin_lock_bh(&srng->lock);
4220
4221 ath11k_hal_srng_access_begin(ab, srng);
4222
4223 while (quota-- &&
4224 (desc = ath11k_hal_srng_dst_get_next_entry(ab, srng))) {
4225 ath11k_hal_rx_reo_ent_paddr_get(ab, desc, &paddr, &desc_bank);
4226
4227 entr_ring = (struct hal_reo_entrance_ring *)desc;
4228 rxdma_err_code =
4229 FIELD_GET(HAL_REO_ENTR_RING_INFO1_RXDMA_ERROR_CODE,
4230 entr_ring->info1);
4231 ab->soc_stats.rxdma_error[rxdma_err_code]++;
4232
4233 link_desc_va = link_desc_banks[desc_bank].vaddr +
4234 (paddr - link_desc_banks[desc_bank].paddr);
4235 ath11k_hal_rx_msdu_link_info_get(link_desc_va, &num_msdus,
4236 msdu_cookies, &rbm);
4237
4238 for (i = 0; i < num_msdus; i++) {
4239 buf_id = FIELD_GET(DP_RXDMA_BUF_COOKIE_BUF_ID,
4240 msdu_cookies[i]);
4241
4242 spin_lock_bh(&rx_ring->idr_lock);
4243 skb = idr_find(&rx_ring->bufs_idr, buf_id);
4244 if (!skb) {
4245 ath11k_warn(ab, "rxdma error with invalid buf_id %d\n",
4246 buf_id);
4247 spin_unlock_bh(&rx_ring->idr_lock);
4248 continue;
4249 }
4250
4251 idr_remove(&rx_ring->bufs_idr, buf_id);
4252 spin_unlock_bh(&rx_ring->idr_lock);
4253
4254 rxcb = ATH11K_SKB_RXCB(skb);
4255 dma_unmap_single(ab->dev, rxcb->paddr,
4256 skb->len + skb_tailroom(skb),
4257 DMA_FROM_DEVICE);
4258 dev_kfree_skb_any(skb);
4259
4260 num_buf_freed++;
4261 }
4262
4263 ath11k_dp_rx_link_desc_return(ab, desc,
4264 HAL_WBM_REL_BM_ACT_PUT_IN_IDLE);
4265 }
4266
4267 ath11k_hal_srng_access_end(ab, srng);
4268
4269 spin_unlock_bh(&srng->lock);
4270
4271 if (num_buf_freed)
4272 ath11k_dp_rxbufs_replenish(ab, mac_id, rx_ring, num_buf_freed,
4273 ab->hw_params.hal_params->rx_buf_rbm);
4274
4275 return budget - quota;
4276}
4277
4278void ath11k_dp_process_reo_status(struct ath11k_base *ab)
4279{
4280 struct ath11k_dp *dp = &ab->dp;
4281 struct hal_srng *srng;
4282 struct dp_reo_cmd *cmd, *tmp;
4283 bool found = false;
4284 u32 *reo_desc;
4285 u16 tag;
4286 struct hal_reo_status reo_status;
4287
4288 srng = &ab->hal.srng_list[dp->reo_status_ring.ring_id];
4289
4290 memset(&reo_status, 0, sizeof(reo_status));
4291
4292 spin_lock_bh(&srng->lock);
4293
4294 ath11k_hal_srng_access_begin(ab, srng);
4295
4296 while ((reo_desc = ath11k_hal_srng_dst_get_next_entry(ab, srng))) {
4297 tag = FIELD_GET(HAL_SRNG_TLV_HDR_TAG, *reo_desc);
4298
4299 switch (tag) {
4300 case HAL_REO_GET_QUEUE_STATS_STATUS:
4301 ath11k_hal_reo_status_queue_stats(ab, reo_desc,
4302 &reo_status);
4303 break;
4304 case HAL_REO_FLUSH_QUEUE_STATUS:
4305 ath11k_hal_reo_flush_queue_status(ab, reo_desc,
4306 &reo_status);
4307 break;
4308 case HAL_REO_FLUSH_CACHE_STATUS:
4309 ath11k_hal_reo_flush_cache_status(ab, reo_desc,
4310 &reo_status);
4311 break;
4312 case HAL_REO_UNBLOCK_CACHE_STATUS:
4313 ath11k_hal_reo_unblk_cache_status(ab, reo_desc,
4314 &reo_status);
4315 break;
4316 case HAL_REO_FLUSH_TIMEOUT_LIST_STATUS:
4317 ath11k_hal_reo_flush_timeout_list_status(ab, reo_desc,
4318 &reo_status);
4319 break;
4320 case HAL_REO_DESCRIPTOR_THRESHOLD_REACHED_STATUS:
4321 ath11k_hal_reo_desc_thresh_reached_status(ab, reo_desc,
4322 &reo_status);
4323 break;
4324 case HAL_REO_UPDATE_RX_REO_QUEUE_STATUS:
4325 ath11k_hal_reo_update_rx_reo_queue_status(ab, reo_desc,
4326 &reo_status);
4327 break;
4328 default:
4329 ath11k_warn(ab, "Unknown reo status type %d\n", tag);
4330 continue;
4331 }
4332
4333 spin_lock_bh(&dp->reo_cmd_lock);
4334 list_for_each_entry_safe(cmd, tmp, &dp->reo_cmd_list, list) {
4335 if (reo_status.uniform_hdr.cmd_num == cmd->cmd_num) {
4336 found = true;
4337 list_del(&cmd->list);
4338 break;
4339 }
4340 }
4341 spin_unlock_bh(&dp->reo_cmd_lock);
4342
4343 if (found) {
4344 cmd->handler(dp, (void *)&cmd->data,
4345 reo_status.uniform_hdr.cmd_status);
4346 kfree(cmd);
4347 }
4348
4349 found = false;
4350 }
4351
4352 ath11k_hal_srng_access_end(ab, srng);
4353
4354 spin_unlock_bh(&srng->lock);
4355}
4356
4357void ath11k_dp_rx_pdev_free(struct ath11k_base *ab, int mac_id)
4358{
4359 struct ath11k *ar = ab->pdevs[mac_id].ar;
4360
4361 ath11k_dp_rx_pdev_srng_free(ar);
4362 ath11k_dp_rxdma_pdev_buf_free(ar);
4363}
4364
4365int ath11k_dp_rx_pdev_alloc(struct ath11k_base *ab, int mac_id)
4366{
4367 struct ath11k *ar = ab->pdevs[mac_id].ar;
4368 struct ath11k_pdev_dp *dp = &ar->dp;
4369 u32 ring_id;
4370 int i;
4371 int ret;
4372
4373 ret = ath11k_dp_rx_pdev_srng_alloc(ar);
4374 if (ret) {
4375 ath11k_warn(ab, "failed to setup rx srngs\n");
4376 return ret;
4377 }
4378
4379 ret = ath11k_dp_rxdma_pdev_buf_setup(ar);
4380 if (ret) {
4381 ath11k_warn(ab, "failed to setup rxdma ring\n");
4382 return ret;
4383 }
4384
4385 ring_id = dp->rx_refill_buf_ring.refill_buf_ring.ring_id;
4386 ret = ath11k_dp_tx_htt_srng_setup(ab, ring_id, mac_id, HAL_RXDMA_BUF);
4387 if (ret) {
4388 ath11k_warn(ab, "failed to configure rx_refill_buf_ring %d\n",
4389 ret);
4390 return ret;
4391 }
4392
4393 if (ab->hw_params.rx_mac_buf_ring) {
4394 for (i = 0; i < ab->hw_params.num_rxmda_per_pdev; i++) {
4395 ring_id = dp->rx_mac_buf_ring[i].ring_id;
4396 ret = ath11k_dp_tx_htt_srng_setup(ab, ring_id,
4397 mac_id + i, HAL_RXDMA_BUF);
4398 if (ret) {
4399 ath11k_warn(ab, "failed to configure rx_mac_buf_ring%d %d\n",
4400 i, ret);
4401 return ret;
4402 }
4403 }
4404 }
4405
4406 for (i = 0; i < ab->hw_params.num_rxmda_per_pdev; i++) {
4407 ring_id = dp->rxdma_err_dst_ring[i].ring_id;
4408 ret = ath11k_dp_tx_htt_srng_setup(ab, ring_id,
4409 mac_id + i, HAL_RXDMA_DST);
4410 if (ret) {
4411 ath11k_warn(ab, "failed to configure rxdma_err_dest_ring%d %d\n",
4412 i, ret);
4413 return ret;
4414 }
4415 }
4416
4417 if (!ab->hw_params.rxdma1_enable)
4418 goto config_refill_ring;
4419
4420 ring_id = dp->rxdma_mon_buf_ring.refill_buf_ring.ring_id;
4421 ret = ath11k_dp_tx_htt_srng_setup(ab, ring_id,
4422 mac_id, HAL_RXDMA_MONITOR_BUF);
4423 if (ret) {
4424 ath11k_warn(ab, "failed to configure rxdma_mon_buf_ring %d\n",
4425 ret);
4426 return ret;
4427 }
4428 ret = ath11k_dp_tx_htt_srng_setup(ab,
4429 dp->rxdma_mon_dst_ring.ring_id,
4430 mac_id, HAL_RXDMA_MONITOR_DST);
4431 if (ret) {
4432 ath11k_warn(ab, "failed to configure rxdma_mon_dst_ring %d\n",
4433 ret);
4434 return ret;
4435 }
4436 ret = ath11k_dp_tx_htt_srng_setup(ab,
4437 dp->rxdma_mon_desc_ring.ring_id,
4438 mac_id, HAL_RXDMA_MONITOR_DESC);
4439 if (ret) {
4440 ath11k_warn(ab, "failed to configure rxdma_mon_dst_ring %d\n",
4441 ret);
4442 return ret;
4443 }
4444
4445config_refill_ring:
4446 for (i = 0; i < ab->hw_params.num_rxmda_per_pdev; i++) {
4447 ring_id = dp->rx_mon_status_refill_ring[i].refill_buf_ring.ring_id;
4448 ret = ath11k_dp_tx_htt_srng_setup(ab, ring_id, mac_id + i,
4449 HAL_RXDMA_MONITOR_STATUS);
4450 if (ret) {
4451 ath11k_warn(ab,
4452 "failed to configure mon_status_refill_ring%d %d\n",
4453 i, ret);
4454 return ret;
4455 }
4456 }
4457
4458 return 0;
4459}
4460
4461static void ath11k_dp_mon_set_frag_len(u32 *total_len, u32 *frag_len)
4462{
4463 if (*total_len >= (DP_RX_BUFFER_SIZE - sizeof(struct hal_rx_desc))) {
4464 *frag_len = DP_RX_BUFFER_SIZE - sizeof(struct hal_rx_desc);
4465 *total_len -= *frag_len;
4466 } else {
4467 *frag_len = *total_len;
4468 *total_len = 0;
4469 }
4470}
4471
4472static
4473int ath11k_dp_rx_monitor_link_desc_return(struct ath11k *ar,
4474 void *p_last_buf_addr_info,
4475 u8 mac_id)
4476{
4477 struct ath11k_pdev_dp *dp = &ar->dp;
4478 struct dp_srng *dp_srng;
4479 void *hal_srng;
4480 void *src_srng_desc;
4481 int ret = 0;
4482
4483 if (ar->ab->hw_params.rxdma1_enable) {
4484 dp_srng = &dp->rxdma_mon_desc_ring;
4485 hal_srng = &ar->ab->hal.srng_list[dp_srng->ring_id];
4486 } else {
4487 dp_srng = &ar->ab->dp.wbm_desc_rel_ring;
4488 hal_srng = &ar->ab->hal.srng_list[dp_srng->ring_id];
4489 }
4490
4491 ath11k_hal_srng_access_begin(ar->ab, hal_srng);
4492
4493 src_srng_desc = ath11k_hal_srng_src_get_next_entry(ar->ab, hal_srng);
4494
4495 if (src_srng_desc) {
4496 struct ath11k_buffer_addr *src_desc = src_srng_desc;
4497
4498 *src_desc = *((struct ath11k_buffer_addr *)p_last_buf_addr_info);
4499 } else {
4500 ath11k_dbg(ar->ab, ATH11K_DBG_DATA,
4501 "Monitor Link Desc Ring %d Full", mac_id);
4502 ret = -ENOMEM;
4503 }
4504
4505 ath11k_hal_srng_access_end(ar->ab, hal_srng);
4506 return ret;
4507}
4508
4509static
4510void ath11k_dp_rx_mon_next_link_desc_get(void *rx_msdu_link_desc,
4511 dma_addr_t *paddr, u32 *sw_cookie,
4512 u8 *rbm,
4513 void **pp_buf_addr_info)
4514{
4515 struct hal_rx_msdu_link *msdu_link = rx_msdu_link_desc;
4516 struct ath11k_buffer_addr *buf_addr_info;
4517
4518 buf_addr_info = (struct ath11k_buffer_addr *)&msdu_link->buf_addr_info;
4519
4520 ath11k_hal_rx_buf_addr_info_get(buf_addr_info, paddr, sw_cookie, rbm);
4521
4522 *pp_buf_addr_info = (void *)buf_addr_info;
4523}
4524
4525static int ath11k_dp_pkt_set_pktlen(struct sk_buff *skb, u32 len)
4526{
4527 if (skb->len > len) {
4528 skb_trim(skb, len);
4529 } else {
4530 if (skb_tailroom(skb) < len - skb->len) {
4531 if ((pskb_expand_head(skb, 0,
4532 len - skb->len - skb_tailroom(skb),
4533 GFP_ATOMIC))) {
4534 dev_kfree_skb_any(skb);
4535 return -ENOMEM;
4536 }
4537 }
4538 skb_put(skb, (len - skb->len));
4539 }
4540 return 0;
4541}
4542
4543static void ath11k_hal_rx_msdu_list_get(struct ath11k *ar,
4544 void *msdu_link_desc,
4545 struct hal_rx_msdu_list *msdu_list,
4546 u16 *num_msdus)
4547{
4548 struct hal_rx_msdu_details *msdu_details = NULL;
4549 struct rx_msdu_desc *msdu_desc_info = NULL;
4550 struct hal_rx_msdu_link *msdu_link = NULL;
4551 int i;
4552 u32 last = FIELD_PREP(RX_MSDU_DESC_INFO0_LAST_MSDU_IN_MPDU, 1);
4553 u32 first = FIELD_PREP(RX_MSDU_DESC_INFO0_FIRST_MSDU_IN_MPDU, 1);
4554 u8 tmp = 0;
4555
4556 msdu_link = msdu_link_desc;
4557 msdu_details = &msdu_link->msdu_link[0];
4558
4559 for (i = 0; i < HAL_RX_NUM_MSDU_DESC; i++) {
4560 if (FIELD_GET(BUFFER_ADDR_INFO0_ADDR,
4561 msdu_details[i].buf_addr_info.info0) == 0) {
4562 msdu_desc_info = &msdu_details[i - 1].rx_msdu_info;
4563 msdu_desc_info->info0 |= last;
4564 ;
4565 break;
4566 }
4567 msdu_desc_info = &msdu_details[i].rx_msdu_info;
4568
4569 if (!i)
4570 msdu_desc_info->info0 |= first;
4571 else if (i == (HAL_RX_NUM_MSDU_DESC - 1))
4572 msdu_desc_info->info0 |= last;
4573 msdu_list->msdu_info[i].msdu_flags = msdu_desc_info->info0;
4574 msdu_list->msdu_info[i].msdu_len =
4575 HAL_RX_MSDU_PKT_LENGTH_GET(msdu_desc_info->info0);
4576 msdu_list->sw_cookie[i] =
4577 FIELD_GET(BUFFER_ADDR_INFO1_SW_COOKIE,
4578 msdu_details[i].buf_addr_info.info1);
4579 tmp = FIELD_GET(BUFFER_ADDR_INFO1_RET_BUF_MGR,
4580 msdu_details[i].buf_addr_info.info1);
4581 msdu_list->rbm[i] = tmp;
4582 }
4583 *num_msdus = i;
4584}
4585
4586static u32 ath11k_dp_rx_mon_comp_ppduid(u32 msdu_ppdu_id, u32 *ppdu_id,
4587 u32 *rx_bufs_used)
4588{
4589 u32 ret = 0;
4590
4591 if ((*ppdu_id < msdu_ppdu_id) &&
4592 ((msdu_ppdu_id - *ppdu_id) < DP_NOT_PPDU_ID_WRAP_AROUND)) {
4593 *ppdu_id = msdu_ppdu_id;
4594 ret = msdu_ppdu_id;
4595 } else if ((*ppdu_id > msdu_ppdu_id) &&
4596 ((*ppdu_id - msdu_ppdu_id) > DP_NOT_PPDU_ID_WRAP_AROUND)) {
4597 /* mon_dst is behind than mon_status
4598 * skip dst_ring and free it
4599 */
4600 *rx_bufs_used += 1;
4601 *ppdu_id = msdu_ppdu_id;
4602 ret = msdu_ppdu_id;
4603 }
4604 return ret;
4605}
4606
4607static void ath11k_dp_mon_get_buf_len(struct hal_rx_msdu_desc_info *info,
4608 bool *is_frag, u32 *total_len,
4609 u32 *frag_len, u32 *msdu_cnt)
4610{
4611 if (info->msdu_flags & RX_MSDU_DESC_INFO0_MSDU_CONTINUATION) {
4612 if (!*is_frag) {
4613 *total_len = info->msdu_len;
4614 *is_frag = true;
4615 }
4616 ath11k_dp_mon_set_frag_len(total_len,
4617 frag_len);
4618 } else {
4619 if (*is_frag) {
4620 ath11k_dp_mon_set_frag_len(total_len,
4621 frag_len);
4622 } else {
4623 *frag_len = info->msdu_len;
4624 }
4625 *is_frag = false;
4626 *msdu_cnt -= 1;
4627 }
4628}
4629
4630static u32
4631ath11k_dp_rx_mon_mpdu_pop(struct ath11k *ar, int mac_id,
4632 void *ring_entry, struct sk_buff **head_msdu,
4633 struct sk_buff **tail_msdu, u32 *npackets,
4634 u32 *ppdu_id)
4635{
4636 struct ath11k_pdev_dp *dp = &ar->dp;
4637 struct ath11k_mon_data *pmon = (struct ath11k_mon_data *)&dp->mon_data;
4638 struct dp_rxdma_ring *rx_ring = &dp->rxdma_mon_buf_ring;
4639 struct sk_buff *msdu = NULL, *last = NULL;
4640 struct hal_rx_msdu_list msdu_list;
4641 void *p_buf_addr_info, *p_last_buf_addr_info;
4642 struct hal_rx_desc *rx_desc;
4643 void *rx_msdu_link_desc;
4644 dma_addr_t paddr;
4645 u16 num_msdus = 0;
4646 u32 rx_buf_size, rx_pkt_offset, sw_cookie;
4647 u32 rx_bufs_used = 0, i = 0;
4648 u32 msdu_ppdu_id = 0, msdu_cnt = 0;
4649 u32 total_len = 0, frag_len = 0;
4650 bool is_frag, is_first_msdu;
4651 bool drop_mpdu = false;
4652 struct ath11k_skb_rxcb *rxcb;
4653 struct hal_reo_entrance_ring *ent_desc = ring_entry;
4654 int buf_id;
4655 u32 rx_link_buf_info[2];
4656 u8 rbm;
4657
4658 if (!ar->ab->hw_params.rxdma1_enable)
4659 rx_ring = &dp->rx_refill_buf_ring;
4660
4661 ath11k_hal_rx_reo_ent_buf_paddr_get(ring_entry, &paddr,
4662 &sw_cookie,
4663 &p_last_buf_addr_info, &rbm,
4664 &msdu_cnt);
4665
4666 if (FIELD_GET(HAL_REO_ENTR_RING_INFO1_RXDMA_PUSH_REASON,
4667 ent_desc->info1) ==
4668 HAL_REO_DEST_RING_PUSH_REASON_ERR_DETECTED) {
4669 u8 rxdma_err =
4670 FIELD_GET(HAL_REO_ENTR_RING_INFO1_RXDMA_ERROR_CODE,
4671 ent_desc->info1);
4672 if (rxdma_err == HAL_REO_ENTR_RING_RXDMA_ECODE_FLUSH_REQUEST_ERR ||
4673 rxdma_err == HAL_REO_ENTR_RING_RXDMA_ECODE_MPDU_LEN_ERR ||
4674 rxdma_err == HAL_REO_ENTR_RING_RXDMA_ECODE_OVERFLOW_ERR) {
4675 drop_mpdu = true;
4676 pmon->rx_mon_stats.dest_mpdu_drop++;
4677 }
4678 }
4679
4680 is_frag = false;
4681 is_first_msdu = true;
4682
4683 do {
4684 if (pmon->mon_last_linkdesc_paddr == paddr) {
4685 pmon->rx_mon_stats.dup_mon_linkdesc_cnt++;
4686 return rx_bufs_used;
4687 }
4688
4689 if (ar->ab->hw_params.rxdma1_enable)
4690 rx_msdu_link_desc =
4691 (void *)pmon->link_desc_banks[sw_cookie].vaddr +
4692 (paddr - pmon->link_desc_banks[sw_cookie].paddr);
4693 else
4694 rx_msdu_link_desc =
4695 (void *)ar->ab->dp.link_desc_banks[sw_cookie].vaddr +
4696 (paddr - ar->ab->dp.link_desc_banks[sw_cookie].paddr);
4697
4698 ath11k_hal_rx_msdu_list_get(ar, rx_msdu_link_desc, &msdu_list,
4699 &num_msdus);
4700
4701 for (i = 0; i < num_msdus; i++) {
4702 u32 l2_hdr_offset;
4703
4704 if (pmon->mon_last_buf_cookie == msdu_list.sw_cookie[i]) {
4705 ath11k_dbg(ar->ab, ATH11K_DBG_DATA,
4706 "i %d last_cookie %d is same\n",
4707 i, pmon->mon_last_buf_cookie);
4708 drop_mpdu = true;
4709 pmon->rx_mon_stats.dup_mon_buf_cnt++;
4710 continue;
4711 }
4712 buf_id = FIELD_GET(DP_RXDMA_BUF_COOKIE_BUF_ID,
4713 msdu_list.sw_cookie[i]);
4714
4715 spin_lock_bh(&rx_ring->idr_lock);
4716 msdu = idr_find(&rx_ring->bufs_idr, buf_id);
4717 spin_unlock_bh(&rx_ring->idr_lock);
4718 if (!msdu) {
4719 ath11k_dbg(ar->ab, ATH11K_DBG_DATA,
4720 "msdu_pop: invalid buf_id %d\n", buf_id);
4721 break;
4722 }
4723 rxcb = ATH11K_SKB_RXCB(msdu);
4724 if (!rxcb->unmapped) {
4725 dma_unmap_single(ar->ab->dev, rxcb->paddr,
4726 msdu->len +
4727 skb_tailroom(msdu),
4728 DMA_FROM_DEVICE);
4729 rxcb->unmapped = 1;
4730 }
4731 if (drop_mpdu) {
4732 ath11k_dbg(ar->ab, ATH11K_DBG_DATA,
4733 "i %d drop msdu %p *ppdu_id %x\n",
4734 i, msdu, *ppdu_id);
4735 dev_kfree_skb_any(msdu);
4736 msdu = NULL;
4737 goto next_msdu;
4738 }
4739
4740 rx_desc = (struct hal_rx_desc *)msdu->data;
4741
4742 rx_pkt_offset = sizeof(struct hal_rx_desc);
4743 l2_hdr_offset = ath11k_dp_rx_h_msdu_end_l3pad(ar->ab, rx_desc);
4744
4745 if (is_first_msdu) {
4746 if (!ath11k_dp_rxdesc_mpdu_valid(ar->ab, rx_desc)) {
4747 drop_mpdu = true;
4748 dev_kfree_skb_any(msdu);
4749 msdu = NULL;
4750 pmon->mon_last_linkdesc_paddr = paddr;
4751 goto next_msdu;
4752 }
4753
4754 msdu_ppdu_id =
4755 ath11k_dp_rxdesc_get_ppduid(ar->ab, rx_desc);
4756
4757 if (ath11k_dp_rx_mon_comp_ppduid(msdu_ppdu_id,
4758 ppdu_id,
4759 &rx_bufs_used)) {
4760 if (rx_bufs_used) {
4761 drop_mpdu = true;
4762 dev_kfree_skb_any(msdu);
4763 msdu = NULL;
4764 goto next_msdu;
4765 }
4766 return rx_bufs_used;
4767 }
4768 pmon->mon_last_linkdesc_paddr = paddr;
4769 is_first_msdu = false;
4770 }
4771 ath11k_dp_mon_get_buf_len(&msdu_list.msdu_info[i],
4772 &is_frag, &total_len,
4773 &frag_len, &msdu_cnt);
4774 rx_buf_size = rx_pkt_offset + l2_hdr_offset + frag_len;
4775
4776 ath11k_dp_pkt_set_pktlen(msdu, rx_buf_size);
4777
4778 if (!(*head_msdu))
4779 *head_msdu = msdu;
4780 else if (last)
4781 last->next = msdu;
4782
4783 last = msdu;
4784next_msdu:
4785 pmon->mon_last_buf_cookie = msdu_list.sw_cookie[i];
4786 rx_bufs_used++;
4787 spin_lock_bh(&rx_ring->idr_lock);
4788 idr_remove(&rx_ring->bufs_idr, buf_id);
4789 spin_unlock_bh(&rx_ring->idr_lock);
4790 }
4791
4792 ath11k_hal_rx_buf_addr_info_set(rx_link_buf_info, paddr, sw_cookie, rbm);
4793
4794 ath11k_dp_rx_mon_next_link_desc_get(rx_msdu_link_desc, &paddr,
4795 &sw_cookie, &rbm,
4796 &p_buf_addr_info);
4797
4798 if (ar->ab->hw_params.rxdma1_enable) {
4799 if (ath11k_dp_rx_monitor_link_desc_return(ar,
4800 p_last_buf_addr_info,
4801 dp->mac_id))
4802 ath11k_dbg(ar->ab, ATH11K_DBG_DATA,
4803 "dp_rx_monitor_link_desc_return failed");
4804 } else {
4805 ath11k_dp_rx_link_desc_return(ar->ab, rx_link_buf_info,
4806 HAL_WBM_REL_BM_ACT_PUT_IN_IDLE);
4807 }
4808
4809 p_last_buf_addr_info = p_buf_addr_info;
4810
4811 } while (paddr && msdu_cnt);
4812
4813 if (last)
4814 last->next = NULL;
4815
4816 *tail_msdu = msdu;
4817
4818 if (msdu_cnt == 0)
4819 *npackets = 1;
4820
4821 return rx_bufs_used;
4822}
4823
4824static void ath11k_dp_rx_msdus_set_payload(struct ath11k *ar, struct sk_buff *msdu)
4825{
4826 u32 rx_pkt_offset, l2_hdr_offset;
4827
4828 rx_pkt_offset = ar->ab->hw_params.hal_desc_sz;
4829 l2_hdr_offset = ath11k_dp_rx_h_msdu_end_l3pad(ar->ab,
4830 (struct hal_rx_desc *)msdu->data);
4831 skb_pull(msdu, rx_pkt_offset + l2_hdr_offset);
4832}
4833
4834static struct sk_buff *
4835ath11k_dp_rx_mon_merg_msdus(struct ath11k *ar,
4836 u32 mac_id, struct sk_buff *head_msdu,
4837 struct sk_buff *last_msdu,
4838 struct ieee80211_rx_status *rxs, bool *fcs_err)
4839{
4840 struct ath11k_base *ab = ar->ab;
4841 struct sk_buff *msdu, *prev_buf;
4842 struct hal_rx_desc *rx_desc;
4843 char *hdr_desc;
4844 u8 *dest, decap_format;
4845 struct ieee80211_hdr_3addr *wh;
4846 struct rx_attention *rx_attention;
4847 u32 err_bitmap;
4848
4849 if (!head_msdu)
4850 goto err_merge_fail;
4851
4852 rx_desc = (struct hal_rx_desc *)head_msdu->data;
4853 rx_attention = ath11k_dp_rx_get_attention(ab, rx_desc);
4854 err_bitmap = ath11k_dp_rx_h_attn_mpdu_err(rx_attention);
4855
4856 if (err_bitmap & DP_RX_MPDU_ERR_FCS)
4857 *fcs_err = true;
4858
4859 if (ath11k_dp_rxdesc_get_mpdulen_err(rx_attention))
4860 return NULL;
4861
4862 decap_format = ath11k_dp_rx_h_msdu_start_decap_type(ab, rx_desc);
4863
4864 ath11k_dp_rx_h_ppdu(ar, rx_desc, rxs);
4865
4866 if (decap_format == DP_RX_DECAP_TYPE_RAW) {
4867 ath11k_dp_rx_msdus_set_payload(ar, head_msdu);
4868
4869 prev_buf = head_msdu;
4870 msdu = head_msdu->next;
4871
4872 while (msdu) {
4873 ath11k_dp_rx_msdus_set_payload(ar, msdu);
4874
4875 prev_buf = msdu;
4876 msdu = msdu->next;
4877 }
4878
4879 prev_buf->next = NULL;
4880
4881 skb_trim(prev_buf, prev_buf->len - HAL_RX_FCS_LEN);
4882 } else if (decap_format == DP_RX_DECAP_TYPE_NATIVE_WIFI) {
4883 u8 qos_pkt = 0;
4884
4885 rx_desc = (struct hal_rx_desc *)head_msdu->data;
4886 hdr_desc = ath11k_dp_rxdesc_get_80211hdr(ab, rx_desc);
4887
4888 /* Base size */
4889 wh = (struct ieee80211_hdr_3addr *)hdr_desc;
4890
4891 if (ieee80211_is_data_qos(wh->frame_control))
4892 qos_pkt = 1;
4893
4894 msdu = head_msdu;
4895
4896 while (msdu) {
4897 ath11k_dp_rx_msdus_set_payload(ar, msdu);
4898 if (qos_pkt) {
4899 dest = skb_push(msdu, sizeof(__le16));
4900 if (!dest)
4901 goto err_merge_fail;
4902 memcpy(dest, hdr_desc, sizeof(struct ieee80211_qos_hdr));
4903 }
4904 prev_buf = msdu;
4905 msdu = msdu->next;
4906 }
4907 dest = skb_put(prev_buf, HAL_RX_FCS_LEN);
4908 if (!dest)
4909 goto err_merge_fail;
4910
4911 ath11k_dbg(ab, ATH11K_DBG_DATA,
4912 "mpdu_buf %p mpdu_buf->len %u",
4913 prev_buf, prev_buf->len);
4914 } else {
4915 ath11k_dbg(ab, ATH11K_DBG_DATA,
4916 "decap format %d is not supported!\n",
4917 decap_format);
4918 goto err_merge_fail;
4919 }
4920
4921 return head_msdu;
4922
4923err_merge_fail:
4924 return NULL;
4925}
4926
4927static void
4928ath11k_dp_rx_update_radiotap_he(struct hal_rx_mon_ppdu_info *rx_status,
4929 u8 *rtap_buf)
4930{
4931 u32 rtap_len = 0;
4932
4933 put_unaligned_le16(rx_status->he_data1, &rtap_buf[rtap_len]);
4934 rtap_len += 2;
4935
4936 put_unaligned_le16(rx_status->he_data2, &rtap_buf[rtap_len]);
4937 rtap_len += 2;
4938
4939 put_unaligned_le16(rx_status->he_data3, &rtap_buf[rtap_len]);
4940 rtap_len += 2;
4941
4942 put_unaligned_le16(rx_status->he_data4, &rtap_buf[rtap_len]);
4943 rtap_len += 2;
4944
4945 put_unaligned_le16(rx_status->he_data5, &rtap_buf[rtap_len]);
4946 rtap_len += 2;
4947
4948 put_unaligned_le16(rx_status->he_data6, &rtap_buf[rtap_len]);
4949}
4950
4951static void
4952ath11k_dp_rx_update_radiotap_he_mu(struct hal_rx_mon_ppdu_info *rx_status,
4953 u8 *rtap_buf)
4954{
4955 u32 rtap_len = 0;
4956
4957 put_unaligned_le16(rx_status->he_flags1, &rtap_buf[rtap_len]);
4958 rtap_len += 2;
4959
4960 put_unaligned_le16(rx_status->he_flags2, &rtap_buf[rtap_len]);
4961 rtap_len += 2;
4962
4963 rtap_buf[rtap_len] = rx_status->he_RU[0];
4964 rtap_len += 1;
4965
4966 rtap_buf[rtap_len] = rx_status->he_RU[1];
4967 rtap_len += 1;
4968
4969 rtap_buf[rtap_len] = rx_status->he_RU[2];
4970 rtap_len += 1;
4971
4972 rtap_buf[rtap_len] = rx_status->he_RU[3];
4973}
4974
4975static void ath11k_update_radiotap(struct ath11k *ar,
4976 struct hal_rx_mon_ppdu_info *ppduinfo,
4977 struct sk_buff *mon_skb,
4978 struct ieee80211_rx_status *rxs)
4979{
4980 struct ieee80211_supported_band *sband;
4981 u8 *ptr = NULL;
4982
4983 rxs->flag |= RX_FLAG_MACTIME_START;
4984 rxs->signal = ppduinfo->rssi_comb + ATH11K_DEFAULT_NOISE_FLOOR;
4985
4986 if (ppduinfo->nss)
4987 rxs->nss = ppduinfo->nss;
4988
4989 if (ppduinfo->he_mu_flags) {
4990 rxs->flag |= RX_FLAG_RADIOTAP_HE_MU;
4991 rxs->encoding = RX_ENC_HE;
4992 ptr = skb_push(mon_skb, sizeof(struct ieee80211_radiotap_he_mu));
4993 ath11k_dp_rx_update_radiotap_he_mu(ppduinfo, ptr);
4994 } else if (ppduinfo->he_flags) {
4995 rxs->flag |= RX_FLAG_RADIOTAP_HE;
4996 rxs->encoding = RX_ENC_HE;
4997 ptr = skb_push(mon_skb, sizeof(struct ieee80211_radiotap_he));
4998 ath11k_dp_rx_update_radiotap_he(ppduinfo, ptr);
4999 rxs->rate_idx = ppduinfo->rate;
5000 } else if (ppduinfo->vht_flags) {
5001 rxs->encoding = RX_ENC_VHT;
5002 rxs->rate_idx = ppduinfo->rate;
5003 } else if (ppduinfo->ht_flags) {
5004 rxs->encoding = RX_ENC_HT;
5005 rxs->rate_idx = ppduinfo->rate;
5006 } else {
5007 rxs->encoding = RX_ENC_LEGACY;
5008 sband = &ar->mac.sbands[rxs->band];
5009 rxs->rate_idx = ath11k_mac_hw_rate_to_idx(sband, ppduinfo->rate,
5010 ppduinfo->cck_flag);
5011 }
5012
5013 rxs->mactime = ppduinfo->tsft;
5014}
5015
5016static int ath11k_dp_rx_mon_deliver(struct ath11k *ar, u32 mac_id,
5017 struct sk_buff *head_msdu,
5018 struct hal_rx_mon_ppdu_info *ppduinfo,
5019 struct sk_buff *tail_msdu,
5020 struct napi_struct *napi)
5021{
5022 struct ath11k_pdev_dp *dp = &ar->dp;
5023 struct sk_buff *mon_skb, *skb_next, *header;
5024 struct ieee80211_rx_status *rxs = &dp->rx_status;
5025 bool fcs_err = false;
5026
5027 mon_skb = ath11k_dp_rx_mon_merg_msdus(ar, mac_id, head_msdu,
5028 tail_msdu, rxs, &fcs_err);
5029
5030 if (!mon_skb)
5031 goto mon_deliver_fail;
5032
5033 header = mon_skb;
5034
5035 rxs->flag = 0;
5036
5037 if (fcs_err)
5038 rxs->flag = RX_FLAG_FAILED_FCS_CRC;
5039
5040 do {
5041 skb_next = mon_skb->next;
5042 if (!skb_next)
5043 rxs->flag &= ~RX_FLAG_AMSDU_MORE;
5044 else
5045 rxs->flag |= RX_FLAG_AMSDU_MORE;
5046
5047 if (mon_skb == header) {
5048 header = NULL;
5049 rxs->flag &= ~RX_FLAG_ALLOW_SAME_PN;
5050 } else {
5051 rxs->flag |= RX_FLAG_ALLOW_SAME_PN;
5052 }
5053 rxs->flag |= RX_FLAG_ONLY_MONITOR;
5054 ath11k_update_radiotap(ar, ppduinfo, mon_skb, rxs);
5055
5056 ath11k_dp_rx_deliver_msdu(ar, napi, mon_skb, rxs);
5057 mon_skb = skb_next;
5058 } while (mon_skb);
5059 rxs->flag = 0;
5060
5061 return 0;
5062
5063mon_deliver_fail:
5064 mon_skb = head_msdu;
5065 while (mon_skb) {
5066 skb_next = mon_skb->next;
5067 dev_kfree_skb_any(mon_skb);
5068 mon_skb = skb_next;
5069 }
5070 return -EINVAL;
5071}
5072
5073/* The destination ring processing is stuck if the destination is not
5074 * moving while status ring moves 16 PPDU. The destination ring processing
5075 * skips this destination ring PPDU as a workaround.
5076 */
5077#define MON_DEST_RING_STUCK_MAX_CNT 16
5078
5079static void ath11k_dp_rx_mon_dest_process(struct ath11k *ar, int mac_id,
5080 u32 quota, struct napi_struct *napi)
5081{
5082 struct ath11k_pdev_dp *dp = &ar->dp;
5083 struct ath11k_mon_data *pmon = (struct ath11k_mon_data *)&dp->mon_data;
5084 const struct ath11k_hw_hal_params *hal_params;
5085 void *ring_entry;
5086 void *mon_dst_srng;
5087 u32 ppdu_id;
5088 u32 rx_bufs_used;
5089 u32 ring_id;
5090 struct ath11k_pdev_mon_stats *rx_mon_stats;
5091 u32 npackets = 0;
5092 u32 mpdu_rx_bufs_used;
5093
5094 if (ar->ab->hw_params.rxdma1_enable)
5095 ring_id = dp->rxdma_mon_dst_ring.ring_id;
5096 else
5097 ring_id = dp->rxdma_err_dst_ring[mac_id].ring_id;
5098
5099 mon_dst_srng = &ar->ab->hal.srng_list[ring_id];
5100
5101 spin_lock_bh(&pmon->mon_lock);
5102
5103 ath11k_hal_srng_access_begin(ar->ab, mon_dst_srng);
5104
5105 ppdu_id = pmon->mon_ppdu_info.ppdu_id;
5106 rx_bufs_used = 0;
5107 rx_mon_stats = &pmon->rx_mon_stats;
5108
5109 while ((ring_entry = ath11k_hal_srng_dst_peek(ar->ab, mon_dst_srng))) {
5110 struct sk_buff *head_msdu, *tail_msdu;
5111
5112 head_msdu = NULL;
5113 tail_msdu = NULL;
5114
5115 mpdu_rx_bufs_used = ath11k_dp_rx_mon_mpdu_pop(ar, mac_id, ring_entry,
5116 &head_msdu,
5117 &tail_msdu,
5118 &npackets, &ppdu_id);
5119
5120 rx_bufs_used += mpdu_rx_bufs_used;
5121
5122 if (mpdu_rx_bufs_used) {
5123 dp->mon_dest_ring_stuck_cnt = 0;
5124 } else {
5125 dp->mon_dest_ring_stuck_cnt++;
5126 rx_mon_stats->dest_mon_not_reaped++;
5127 }
5128
5129 if (dp->mon_dest_ring_stuck_cnt > MON_DEST_RING_STUCK_MAX_CNT) {
5130 rx_mon_stats->dest_mon_stuck++;
5131 ath11k_dbg(ar->ab, ATH11K_DBG_DATA,
5132 "status ring ppdu_id=%d dest ring ppdu_id=%d mon_dest_ring_stuck_cnt=%d dest_mon_not_reaped=%u dest_mon_stuck=%u\n",
5133 pmon->mon_ppdu_info.ppdu_id, ppdu_id,
5134 dp->mon_dest_ring_stuck_cnt,
5135 rx_mon_stats->dest_mon_not_reaped,
5136 rx_mon_stats->dest_mon_stuck);
5137 pmon->mon_ppdu_info.ppdu_id = ppdu_id;
5138 continue;
5139 }
5140
5141 if (ppdu_id != pmon->mon_ppdu_info.ppdu_id) {
5142 pmon->mon_ppdu_status = DP_PPDU_STATUS_START;
5143 ath11k_dbg(ar->ab, ATH11K_DBG_DATA,
5144 "dest_rx: new ppdu_id %x != status ppdu_id %x dest_mon_not_reaped = %u dest_mon_stuck = %u\n",
5145 ppdu_id, pmon->mon_ppdu_info.ppdu_id,
5146 rx_mon_stats->dest_mon_not_reaped,
5147 rx_mon_stats->dest_mon_stuck);
5148 break;
5149 }
5150 if (head_msdu && tail_msdu) {
5151 ath11k_dp_rx_mon_deliver(ar, dp->mac_id, head_msdu,
5152 &pmon->mon_ppdu_info,
5153 tail_msdu, napi);
5154 rx_mon_stats->dest_mpdu_done++;
5155 }
5156
5157 ring_entry = ath11k_hal_srng_dst_get_next_entry(ar->ab,
5158 mon_dst_srng);
5159 }
5160 ath11k_hal_srng_access_end(ar->ab, mon_dst_srng);
5161
5162 spin_unlock_bh(&pmon->mon_lock);
5163
5164 if (rx_bufs_used) {
5165 rx_mon_stats->dest_ppdu_done++;
5166 hal_params = ar->ab->hw_params.hal_params;
5167
5168 if (ar->ab->hw_params.rxdma1_enable)
5169 ath11k_dp_rxbufs_replenish(ar->ab, dp->mac_id,
5170 &dp->rxdma_mon_buf_ring,
5171 rx_bufs_used,
5172 hal_params->rx_buf_rbm);
5173 else
5174 ath11k_dp_rxbufs_replenish(ar->ab, dp->mac_id,
5175 &dp->rx_refill_buf_ring,
5176 rx_bufs_used,
5177 hal_params->rx_buf_rbm);
5178 }
5179}
5180
5181int ath11k_dp_rx_process_mon_status(struct ath11k_base *ab, int mac_id,
5182 struct napi_struct *napi, int budget)
5183{
5184 struct ath11k *ar = ath11k_ab_to_ar(ab, mac_id);
5185 enum hal_rx_mon_status hal_status;
5186 struct sk_buff *skb;
5187 struct sk_buff_head skb_list;
5188 struct ath11k_peer *peer;
5189 struct ath11k_sta *arsta;
5190 int num_buffs_reaped = 0;
5191 u32 rx_buf_sz;
5192 u16 log_type;
5193 struct ath11k_mon_data *pmon = (struct ath11k_mon_data *)&ar->dp.mon_data;
5194 struct ath11k_pdev_mon_stats *rx_mon_stats = &pmon->rx_mon_stats;
5195 struct hal_rx_mon_ppdu_info *ppdu_info = &pmon->mon_ppdu_info;
5196
5197 __skb_queue_head_init(&skb_list);
5198
5199 num_buffs_reaped = ath11k_dp_rx_reap_mon_status_ring(ab, mac_id, &budget,
5200 &skb_list);
5201 if (!num_buffs_reaped)
5202 goto exit;
5203
5204 memset(ppdu_info, 0, sizeof(*ppdu_info));
5205 ppdu_info->peer_id = HAL_INVALID_PEERID;
5206
5207 while ((skb = __skb_dequeue(&skb_list))) {
5208 if (ath11k_debugfs_is_pktlog_lite_mode_enabled(ar)) {
5209 log_type = ATH11K_PKTLOG_TYPE_LITE_RX;
5210 rx_buf_sz = DP_RX_BUFFER_SIZE_LITE;
5211 } else if (ath11k_debugfs_is_pktlog_rx_stats_enabled(ar)) {
5212 log_type = ATH11K_PKTLOG_TYPE_RX_STATBUF;
5213 rx_buf_sz = DP_RX_BUFFER_SIZE;
5214 } else {
5215 log_type = ATH11K_PKTLOG_TYPE_INVALID;
5216 rx_buf_sz = 0;
5217 }
5218
5219 if (log_type != ATH11K_PKTLOG_TYPE_INVALID)
5220 trace_ath11k_htt_rxdesc(ar, skb->data, log_type, rx_buf_sz);
5221
5222 memset(ppdu_info, 0, sizeof(*ppdu_info));
5223 ppdu_info->peer_id = HAL_INVALID_PEERID;
5224 hal_status = ath11k_hal_rx_parse_mon_status(ab, ppdu_info, skb);
5225
5226 if (test_bit(ATH11K_FLAG_MONITOR_STARTED, &ar->monitor_flags) &&
5227 pmon->mon_ppdu_status == DP_PPDU_STATUS_START &&
5228 hal_status == HAL_TLV_STATUS_PPDU_DONE) {
5229 rx_mon_stats->status_ppdu_done++;
5230 pmon->mon_ppdu_status = DP_PPDU_STATUS_DONE;
5231 ath11k_dp_rx_mon_dest_process(ar, mac_id, budget, napi);
5232 pmon->mon_ppdu_status = DP_PPDU_STATUS_START;
5233 }
5234
5235 if (ppdu_info->peer_id == HAL_INVALID_PEERID ||
5236 hal_status != HAL_RX_MON_STATUS_PPDU_DONE) {
5237 dev_kfree_skb_any(skb);
5238 continue;
5239 }
5240
5241 rcu_read_lock();
5242 spin_lock_bh(&ab->base_lock);
5243 peer = ath11k_peer_find_by_id(ab, ppdu_info->peer_id);
5244
5245 if (!peer || !peer->sta) {
5246 ath11k_dbg(ab, ATH11K_DBG_DATA,
5247 "failed to find the peer with peer_id %d\n",
5248 ppdu_info->peer_id);
5249 goto next_skb;
5250 }
5251
5252 arsta = ath11k_sta_to_arsta(peer->sta);
5253 ath11k_dp_rx_update_peer_stats(arsta, ppdu_info);
5254
5255 if (ath11k_debugfs_is_pktlog_peer_valid(ar, peer->addr))
5256 trace_ath11k_htt_rxdesc(ar, skb->data, log_type, rx_buf_sz);
5257
5258next_skb:
5259 spin_unlock_bh(&ab->base_lock);
5260 rcu_read_unlock();
5261
5262 dev_kfree_skb_any(skb);
5263 memset(ppdu_info, 0, sizeof(*ppdu_info));
5264 ppdu_info->peer_id = HAL_INVALID_PEERID;
5265 }
5266exit:
5267 return num_buffs_reaped;
5268}
5269
5270static u32
5271ath11k_dp_rx_full_mon_mpdu_pop(struct ath11k *ar,
5272 void *ring_entry, struct sk_buff **head_msdu,
5273 struct sk_buff **tail_msdu,
5274 struct hal_sw_mon_ring_entries *sw_mon_entries)
5275{
5276 struct ath11k_pdev_dp *dp = &ar->dp;
5277 struct ath11k_mon_data *pmon = &dp->mon_data;
5278 struct dp_rxdma_ring *rx_ring = &dp->rxdma_mon_buf_ring;
5279 struct sk_buff *msdu = NULL, *last = NULL;
5280 struct hal_sw_monitor_ring *sw_desc = ring_entry;
5281 struct hal_rx_msdu_list msdu_list;
5282 struct hal_rx_desc *rx_desc;
5283 struct ath11k_skb_rxcb *rxcb;
5284 void *rx_msdu_link_desc;
5285 void *p_buf_addr_info, *p_last_buf_addr_info;
5286 int buf_id, i = 0;
5287 u32 rx_buf_size, rx_pkt_offset, l2_hdr_offset;
5288 u32 rx_bufs_used = 0, msdu_cnt = 0;
5289 u32 total_len = 0, frag_len = 0, sw_cookie;
5290 u16 num_msdus = 0;
5291 u8 rxdma_err, rbm;
5292 bool is_frag, is_first_msdu;
5293 bool drop_mpdu = false;
5294
5295 ath11k_hal_rx_sw_mon_ring_buf_paddr_get(ring_entry, sw_mon_entries);
5296
5297 sw_cookie = sw_mon_entries->mon_dst_sw_cookie;
5298 sw_mon_entries->end_of_ppdu = false;
5299 sw_mon_entries->drop_ppdu = false;
5300 p_last_buf_addr_info = sw_mon_entries->dst_buf_addr_info;
5301 msdu_cnt = sw_mon_entries->msdu_cnt;
5302
5303 sw_mon_entries->end_of_ppdu =
5304 FIELD_GET(HAL_SW_MON_RING_INFO0_END_OF_PPDU, sw_desc->info0);
5305 if (sw_mon_entries->end_of_ppdu)
5306 return rx_bufs_used;
5307
5308 if (FIELD_GET(HAL_SW_MON_RING_INFO0_RXDMA_PUSH_REASON,
5309 sw_desc->info0) ==
5310 HAL_REO_DEST_RING_PUSH_REASON_ERR_DETECTED) {
5311 rxdma_err =
5312 FIELD_GET(HAL_SW_MON_RING_INFO0_RXDMA_ERROR_CODE,
5313 sw_desc->info0);
5314 if (rxdma_err == HAL_REO_ENTR_RING_RXDMA_ECODE_FLUSH_REQUEST_ERR ||
5315 rxdma_err == HAL_REO_ENTR_RING_RXDMA_ECODE_MPDU_LEN_ERR ||
5316 rxdma_err == HAL_REO_ENTR_RING_RXDMA_ECODE_OVERFLOW_ERR) {
5317 pmon->rx_mon_stats.dest_mpdu_drop++;
5318 drop_mpdu = true;
5319 }
5320 }
5321
5322 is_frag = false;
5323 is_first_msdu = true;
5324
5325 do {
5326 rx_msdu_link_desc =
5327 (u8 *)pmon->link_desc_banks[sw_cookie].vaddr +
5328 (sw_mon_entries->mon_dst_paddr -
5329 pmon->link_desc_banks[sw_cookie].paddr);
5330
5331 ath11k_hal_rx_msdu_list_get(ar, rx_msdu_link_desc, &msdu_list,
5332 &num_msdus);
5333
5334 for (i = 0; i < num_msdus; i++) {
5335 buf_id = FIELD_GET(DP_RXDMA_BUF_COOKIE_BUF_ID,
5336 msdu_list.sw_cookie[i]);
5337
5338 spin_lock_bh(&rx_ring->idr_lock);
5339 msdu = idr_find(&rx_ring->bufs_idr, buf_id);
5340 if (!msdu) {
5341 ath11k_dbg(ar->ab, ATH11K_DBG_DATA,
5342 "full mon msdu_pop: invalid buf_id %d\n",
5343 buf_id);
5344 spin_unlock_bh(&rx_ring->idr_lock);
5345 break;
5346 }
5347 idr_remove(&rx_ring->bufs_idr, buf_id);
5348 spin_unlock_bh(&rx_ring->idr_lock);
5349
5350 rxcb = ATH11K_SKB_RXCB(msdu);
5351 if (!rxcb->unmapped) {
5352 dma_unmap_single(ar->ab->dev, rxcb->paddr,
5353 msdu->len +
5354 skb_tailroom(msdu),
5355 DMA_FROM_DEVICE);
5356 rxcb->unmapped = 1;
5357 }
5358 if (drop_mpdu) {
5359 ath11k_dbg(ar->ab, ATH11K_DBG_DATA,
5360 "full mon: i %d drop msdu %p *ppdu_id %x\n",
5361 i, msdu, sw_mon_entries->ppdu_id);
5362 dev_kfree_skb_any(msdu);
5363 msdu_cnt--;
5364 goto next_msdu;
5365 }
5366
5367 rx_desc = (struct hal_rx_desc *)msdu->data;
5368
5369 rx_pkt_offset = sizeof(struct hal_rx_desc);
5370 l2_hdr_offset = ath11k_dp_rx_h_msdu_end_l3pad(ar->ab, rx_desc);
5371
5372 if (is_first_msdu) {
5373 if (!ath11k_dp_rxdesc_mpdu_valid(ar->ab, rx_desc)) {
5374 drop_mpdu = true;
5375 dev_kfree_skb_any(msdu);
5376 msdu = NULL;
5377 goto next_msdu;
5378 }
5379 is_first_msdu = false;
5380 }
5381
5382 ath11k_dp_mon_get_buf_len(&msdu_list.msdu_info[i],
5383 &is_frag, &total_len,
5384 &frag_len, &msdu_cnt);
5385
5386 rx_buf_size = rx_pkt_offset + l2_hdr_offset + frag_len;
5387
5388 ath11k_dp_pkt_set_pktlen(msdu, rx_buf_size);
5389
5390 if (!(*head_msdu))
5391 *head_msdu = msdu;
5392 else if (last)
5393 last->next = msdu;
5394
5395 last = msdu;
5396next_msdu:
5397 rx_bufs_used++;
5398 }
5399
5400 ath11k_dp_rx_mon_next_link_desc_get(rx_msdu_link_desc,
5401 &sw_mon_entries->mon_dst_paddr,
5402 &sw_mon_entries->mon_dst_sw_cookie,
5403 &rbm,
5404 &p_buf_addr_info);
5405
5406 if (ath11k_dp_rx_monitor_link_desc_return(ar,
5407 p_last_buf_addr_info,
5408 dp->mac_id))
5409 ath11k_dbg(ar->ab, ATH11K_DBG_DATA,
5410 "full mon: dp_rx_monitor_link_desc_return failed\n");
5411
5412 p_last_buf_addr_info = p_buf_addr_info;
5413
5414 } while (sw_mon_entries->mon_dst_paddr && msdu_cnt);
5415
5416 if (last)
5417 last->next = NULL;
5418
5419 *tail_msdu = msdu;
5420
5421 return rx_bufs_used;
5422}
5423
5424static int ath11k_dp_rx_full_mon_prepare_mpdu(struct ath11k_dp *dp,
5425 struct dp_full_mon_mpdu *mon_mpdu,
5426 struct sk_buff *head,
5427 struct sk_buff *tail)
5428{
5429 mon_mpdu = kzalloc(sizeof(*mon_mpdu), GFP_ATOMIC);
5430 if (!mon_mpdu)
5431 return -ENOMEM;
5432
5433 list_add_tail(&mon_mpdu->list, &dp->dp_full_mon_mpdu_list);
5434 mon_mpdu->head = head;
5435 mon_mpdu->tail = tail;
5436
5437 return 0;
5438}
5439
5440static void ath11k_dp_rx_full_mon_drop_ppdu(struct ath11k_dp *dp,
5441 struct dp_full_mon_mpdu *mon_mpdu)
5442{
5443 struct dp_full_mon_mpdu *tmp;
5444 struct sk_buff *tmp_msdu, *skb_next;
5445
5446 if (list_empty(&dp->dp_full_mon_mpdu_list))
5447 return;
5448
5449 list_for_each_entry_safe(mon_mpdu, tmp, &dp->dp_full_mon_mpdu_list, list) {
5450 list_del(&mon_mpdu->list);
5451
5452 tmp_msdu = mon_mpdu->head;
5453 while (tmp_msdu) {
5454 skb_next = tmp_msdu->next;
5455 dev_kfree_skb_any(tmp_msdu);
5456 tmp_msdu = skb_next;
5457 }
5458
5459 kfree(mon_mpdu);
5460 }
5461}
5462
5463static int ath11k_dp_rx_full_mon_deliver_ppdu(struct ath11k *ar,
5464 int mac_id,
5465 struct ath11k_mon_data *pmon,
5466 struct napi_struct *napi)
5467{
5468 struct ath11k_pdev_mon_stats *rx_mon_stats;
5469 struct dp_full_mon_mpdu *tmp;
5470 struct dp_full_mon_mpdu *mon_mpdu = pmon->mon_mpdu;
5471 struct sk_buff *head_msdu, *tail_msdu;
5472 struct ath11k_base *ab = ar->ab;
5473 struct ath11k_dp *dp = &ab->dp;
5474 int ret;
5475
5476 rx_mon_stats = &pmon->rx_mon_stats;
5477
5478 list_for_each_entry_safe(mon_mpdu, tmp, &dp->dp_full_mon_mpdu_list, list) {
5479 list_del(&mon_mpdu->list);
5480 head_msdu = mon_mpdu->head;
5481 tail_msdu = mon_mpdu->tail;
5482 if (head_msdu && tail_msdu) {
5483 ret = ath11k_dp_rx_mon_deliver(ar, mac_id, head_msdu,
5484 &pmon->mon_ppdu_info,
5485 tail_msdu, napi);
5486 rx_mon_stats->dest_mpdu_done++;
5487 ath11k_dbg(ar->ab, ATH11K_DBG_DATA, "full mon: deliver ppdu\n");
5488 }
5489 kfree(mon_mpdu);
5490 }
5491
5492 return ret;
5493}
5494
5495static int
5496ath11k_dp_rx_process_full_mon_status_ring(struct ath11k_base *ab, int mac_id,
5497 struct napi_struct *napi, int budget)
5498{
5499 struct ath11k *ar = ab->pdevs[mac_id].ar;
5500 struct ath11k_pdev_dp *dp = &ar->dp;
5501 struct ath11k_mon_data *pmon = &dp->mon_data;
5502 struct hal_sw_mon_ring_entries *sw_mon_entries;
5503 int quota = 0, work = 0, count;
5504
5505 sw_mon_entries = &pmon->sw_mon_entries;
5506
5507 while (pmon->hold_mon_dst_ring) {
5508 quota = ath11k_dp_rx_process_mon_status(ab, mac_id,
5509 napi, 1);
5510 if (pmon->buf_state == DP_MON_STATUS_MATCH) {
5511 count = sw_mon_entries->status_buf_count;
5512 if (count > 1) {
5513 quota += ath11k_dp_rx_process_mon_status(ab, mac_id,
5514 napi, count);
5515 }
5516
5517 ath11k_dp_rx_full_mon_deliver_ppdu(ar, dp->mac_id,
5518 pmon, napi);
5519 pmon->hold_mon_dst_ring = false;
5520 } else if (!pmon->mon_status_paddr ||
5521 pmon->buf_state == DP_MON_STATUS_LEAD) {
5522 sw_mon_entries->drop_ppdu = true;
5523 pmon->hold_mon_dst_ring = false;
5524 }
5525
5526 if (!quota)
5527 break;
5528
5529 work += quota;
5530 }
5531
5532 if (sw_mon_entries->drop_ppdu)
5533 ath11k_dp_rx_full_mon_drop_ppdu(&ab->dp, pmon->mon_mpdu);
5534
5535 return work;
5536}
5537
5538static int ath11k_dp_full_mon_process_rx(struct ath11k_base *ab, int mac_id,
5539 struct napi_struct *napi, int budget)
5540{
5541 struct ath11k *ar = ab->pdevs[mac_id].ar;
5542 struct ath11k_pdev_dp *dp = &ar->dp;
5543 struct ath11k_mon_data *pmon = &dp->mon_data;
5544 struct hal_sw_mon_ring_entries *sw_mon_entries;
5545 struct ath11k_pdev_mon_stats *rx_mon_stats;
5546 struct sk_buff *head_msdu, *tail_msdu;
5547 void *mon_dst_srng = &ar->ab->hal.srng_list[dp->rxdma_mon_dst_ring.ring_id];
5548 void *ring_entry;
5549 u32 rx_bufs_used = 0, mpdu_rx_bufs_used;
5550 int quota = 0, ret;
5551 bool break_dst_ring = false;
5552
5553 spin_lock_bh(&pmon->mon_lock);
5554
5555 sw_mon_entries = &pmon->sw_mon_entries;
5556 rx_mon_stats = &pmon->rx_mon_stats;
5557
5558 if (pmon->hold_mon_dst_ring) {
5559 spin_unlock_bh(&pmon->mon_lock);
5560 goto reap_status_ring;
5561 }
5562
5563 ath11k_hal_srng_access_begin(ar->ab, mon_dst_srng);
5564 while ((ring_entry = ath11k_hal_srng_dst_peek(ar->ab, mon_dst_srng))) {
5565 head_msdu = NULL;
5566 tail_msdu = NULL;
5567
5568 mpdu_rx_bufs_used = ath11k_dp_rx_full_mon_mpdu_pop(ar, ring_entry,
5569 &head_msdu,
5570 &tail_msdu,
5571 sw_mon_entries);
5572 rx_bufs_used += mpdu_rx_bufs_used;
5573
5574 if (!sw_mon_entries->end_of_ppdu) {
5575 if (head_msdu) {
5576 ret = ath11k_dp_rx_full_mon_prepare_mpdu(&ab->dp,
5577 pmon->mon_mpdu,
5578 head_msdu,
5579 tail_msdu);
5580 if (ret)
5581 break_dst_ring = true;
5582 }
5583
5584 goto next_entry;
5585 } else {
5586 if (!sw_mon_entries->ppdu_id &&
5587 !sw_mon_entries->mon_status_paddr) {
5588 break_dst_ring = true;
5589 goto next_entry;
5590 }
5591 }
5592
5593 rx_mon_stats->dest_ppdu_done++;
5594 pmon->mon_ppdu_status = DP_PPDU_STATUS_START;
5595 pmon->buf_state = DP_MON_STATUS_LAG;
5596 pmon->mon_status_paddr = sw_mon_entries->mon_status_paddr;
5597 pmon->hold_mon_dst_ring = true;
5598next_entry:
5599 ring_entry = ath11k_hal_srng_dst_get_next_entry(ar->ab,
5600 mon_dst_srng);
5601 if (break_dst_ring)
5602 break;
5603 }
5604
5605 ath11k_hal_srng_access_end(ar->ab, mon_dst_srng);
5606 spin_unlock_bh(&pmon->mon_lock);
5607
5608 if (rx_bufs_used) {
5609 ath11k_dp_rxbufs_replenish(ar->ab, dp->mac_id,
5610 &dp->rxdma_mon_buf_ring,
5611 rx_bufs_used,
5612 HAL_RX_BUF_RBM_SW3_BM);
5613 }
5614
5615reap_status_ring:
5616 quota = ath11k_dp_rx_process_full_mon_status_ring(ab, mac_id,
5617 napi, budget);
5618
5619 return quota;
5620}
5621
5622int ath11k_dp_rx_process_mon_rings(struct ath11k_base *ab, int mac_id,
5623 struct napi_struct *napi, int budget)
5624{
5625 struct ath11k *ar = ath11k_ab_to_ar(ab, mac_id);
5626 int ret = 0;
5627
5628 if (test_bit(ATH11K_FLAG_MONITOR_STARTED, &ar->monitor_flags) &&
5629 ab->hw_params.full_monitor_mode)
5630 ret = ath11k_dp_full_mon_process_rx(ab, mac_id, napi, budget);
5631 else
5632 ret = ath11k_dp_rx_process_mon_status(ab, mac_id, napi, budget);
5633
5634 return ret;
5635}
5636
5637static int ath11k_dp_rx_pdev_mon_status_attach(struct ath11k *ar)
5638{
5639 struct ath11k_pdev_dp *dp = &ar->dp;
5640 struct ath11k_mon_data *pmon = (struct ath11k_mon_data *)&dp->mon_data;
5641
5642 skb_queue_head_init(&pmon->rx_status_q);
5643
5644 pmon->mon_ppdu_status = DP_PPDU_STATUS_START;
5645
5646 memset(&pmon->rx_mon_stats, 0,
5647 sizeof(pmon->rx_mon_stats));
5648 return 0;
5649}
5650
5651int ath11k_dp_rx_pdev_mon_attach(struct ath11k *ar)
5652{
5653 struct ath11k_pdev_dp *dp = &ar->dp;
5654 struct ath11k_mon_data *pmon = &dp->mon_data;
5655 struct hal_srng *mon_desc_srng = NULL;
5656 struct dp_srng *dp_srng;
5657 int ret = 0;
5658 u32 n_link_desc = 0;
5659
5660 ret = ath11k_dp_rx_pdev_mon_status_attach(ar);
5661 if (ret) {
5662 ath11k_warn(ar->ab, "pdev_mon_status_attach() failed");
5663 return ret;
5664 }
5665
5666 /* if rxdma1_enable is false, no need to setup
5667 * rxdma_mon_desc_ring.
5668 */
5669 if (!ar->ab->hw_params.rxdma1_enable)
5670 return 0;
5671
5672 dp_srng = &dp->rxdma_mon_desc_ring;
5673 n_link_desc = dp_srng->size /
5674 ath11k_hal_srng_get_entrysize(ar->ab, HAL_RXDMA_MONITOR_DESC);
5675 mon_desc_srng =
5676 &ar->ab->hal.srng_list[dp->rxdma_mon_desc_ring.ring_id];
5677
5678 ret = ath11k_dp_link_desc_setup(ar->ab, pmon->link_desc_banks,
5679 HAL_RXDMA_MONITOR_DESC, mon_desc_srng,
5680 n_link_desc);
5681 if (ret) {
5682 ath11k_warn(ar->ab, "mon_link_desc_pool_setup() failed");
5683 return ret;
5684 }
5685 pmon->mon_last_linkdesc_paddr = 0;
5686 pmon->mon_last_buf_cookie = DP_RX_DESC_COOKIE_MAX + 1;
5687 spin_lock_init(&pmon->mon_lock);
5688
5689 return 0;
5690}
5691
5692static int ath11k_dp_mon_link_free(struct ath11k *ar)
5693{
5694 struct ath11k_pdev_dp *dp = &ar->dp;
5695 struct ath11k_mon_data *pmon = &dp->mon_data;
5696
5697 ath11k_dp_link_desc_cleanup(ar->ab, pmon->link_desc_banks,
5698 HAL_RXDMA_MONITOR_DESC,
5699 &dp->rxdma_mon_desc_ring);
5700 return 0;
5701}
5702
5703int ath11k_dp_rx_pdev_mon_detach(struct ath11k *ar)
5704{
5705 ath11k_dp_mon_link_free(ar);
5706 return 0;
5707}
5708
5709int ath11k_dp_rx_pktlog_start(struct ath11k_base *ab)
5710{
5711 /* start reap timer */
5712 mod_timer(&ab->mon_reap_timer,
5713 jiffies + msecs_to_jiffies(ATH11K_MON_TIMER_INTERVAL));
5714
5715 return 0;
5716}
5717
5718int ath11k_dp_rx_pktlog_stop(struct ath11k_base *ab, bool stop_timer)
5719{
5720 int ret;
5721
5722 if (stop_timer)
5723 del_timer_sync(&ab->mon_reap_timer);
5724
5725 /* reap all the monitor related rings */
5726 ret = ath11k_dp_purge_mon_ring(ab);
5727 if (ret) {
5728 ath11k_warn(ab, "failed to purge dp mon ring: %d\n", ret);
5729 return ret;
5730 }
5731
5732 return 0;
5733}
1// SPDX-License-Identifier: BSD-3-Clause-Clear
2/*
3 * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
4 */
5
6#include <linux/ieee80211.h>
7#include <linux/kernel.h>
8#include <linux/skbuff.h>
9#include <crypto/hash.h>
10#include "core.h"
11#include "debug.h"
12#include "hal_desc.h"
13#include "hw.h"
14#include "dp_rx.h"
15#include "hal_rx.h"
16#include "dp_tx.h"
17#include "peer.h"
18
19#define ATH11K_DP_RX_FRAGMENT_TIMEOUT_MS (2 * HZ)
20
21static u8 *ath11k_dp_rx_h_80211_hdr(struct hal_rx_desc *desc)
22{
23 return desc->hdr_status;
24}
25
26static enum hal_encrypt_type ath11k_dp_rx_h_mpdu_start_enctype(struct hal_rx_desc *desc)
27{
28 if (!(__le32_to_cpu(desc->mpdu_start.info1) &
29 RX_MPDU_START_INFO1_ENCRYPT_INFO_VALID))
30 return HAL_ENCRYPT_TYPE_OPEN;
31
32 return FIELD_GET(RX_MPDU_START_INFO2_ENC_TYPE,
33 __le32_to_cpu(desc->mpdu_start.info2));
34}
35
36static u8 ath11k_dp_rx_h_msdu_start_decap_type(struct hal_rx_desc *desc)
37{
38 return FIELD_GET(RX_MSDU_START_INFO2_DECAP_FORMAT,
39 __le32_to_cpu(desc->msdu_start.info2));
40}
41
42static u8 ath11k_dp_rx_h_msdu_start_mesh_ctl_present(struct hal_rx_desc *desc)
43{
44 return FIELD_GET(RX_MSDU_START_INFO2_MESH_CTRL_PRESENT,
45 __le32_to_cpu(desc->msdu_start.info2));
46}
47
48static bool ath11k_dp_rx_h_mpdu_start_seq_ctrl_valid(struct hal_rx_desc *desc)
49{
50 return !!FIELD_GET(RX_MPDU_START_INFO1_MPDU_SEQ_CTRL_VALID,
51 __le32_to_cpu(desc->mpdu_start.info1));
52}
53
54static bool ath11k_dp_rx_h_mpdu_start_fc_valid(struct hal_rx_desc *desc)
55{
56 return !!FIELD_GET(RX_MPDU_START_INFO1_MPDU_FCTRL_VALID,
57 __le32_to_cpu(desc->mpdu_start.info1));
58}
59
60static bool ath11k_dp_rx_h_mpdu_start_more_frags(struct sk_buff *skb)
61{
62 struct ieee80211_hdr *hdr;
63
64 hdr = (struct ieee80211_hdr *)(skb->data + HAL_RX_DESC_SIZE);
65 return ieee80211_has_morefrags(hdr->frame_control);
66}
67
68static u16 ath11k_dp_rx_h_mpdu_start_frag_no(struct sk_buff *skb)
69{
70 struct ieee80211_hdr *hdr;
71
72 hdr = (struct ieee80211_hdr *)(skb->data + HAL_RX_DESC_SIZE);
73 return le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_FRAG;
74}
75
76static u16 ath11k_dp_rx_h_mpdu_start_seq_no(struct hal_rx_desc *desc)
77{
78 return FIELD_GET(RX_MPDU_START_INFO1_MPDU_SEQ_NUM,
79 __le32_to_cpu(desc->mpdu_start.info1));
80}
81
82static bool ath11k_dp_rx_h_attn_msdu_done(struct hal_rx_desc *desc)
83{
84 return !!FIELD_GET(RX_ATTENTION_INFO2_MSDU_DONE,
85 __le32_to_cpu(desc->attention.info2));
86}
87
88static bool ath11k_dp_rx_h_attn_l4_cksum_fail(struct hal_rx_desc *desc)
89{
90 return !!FIELD_GET(RX_ATTENTION_INFO1_TCP_UDP_CKSUM_FAIL,
91 __le32_to_cpu(desc->attention.info1));
92}
93
94static bool ath11k_dp_rx_h_attn_ip_cksum_fail(struct hal_rx_desc *desc)
95{
96 return !!FIELD_GET(RX_ATTENTION_INFO1_IP_CKSUM_FAIL,
97 __le32_to_cpu(desc->attention.info1));
98}
99
100static bool ath11k_dp_rx_h_attn_is_decrypted(struct hal_rx_desc *desc)
101{
102 return (FIELD_GET(RX_ATTENTION_INFO2_DCRYPT_STATUS_CODE,
103 __le32_to_cpu(desc->attention.info2)) ==
104 RX_DESC_DECRYPT_STATUS_CODE_OK);
105}
106
107static u32 ath11k_dp_rx_h_attn_mpdu_err(struct hal_rx_desc *desc)
108{
109 u32 info = __le32_to_cpu(desc->attention.info1);
110 u32 errmap = 0;
111
112 if (info & RX_ATTENTION_INFO1_FCS_ERR)
113 errmap |= DP_RX_MPDU_ERR_FCS;
114
115 if (info & RX_ATTENTION_INFO1_DECRYPT_ERR)
116 errmap |= DP_RX_MPDU_ERR_DECRYPT;
117
118 if (info & RX_ATTENTION_INFO1_TKIP_MIC_ERR)
119 errmap |= DP_RX_MPDU_ERR_TKIP_MIC;
120
121 if (info & RX_ATTENTION_INFO1_A_MSDU_ERROR)
122 errmap |= DP_RX_MPDU_ERR_AMSDU_ERR;
123
124 if (info & RX_ATTENTION_INFO1_OVERFLOW_ERR)
125 errmap |= DP_RX_MPDU_ERR_OVERFLOW;
126
127 if (info & RX_ATTENTION_INFO1_MSDU_LEN_ERR)
128 errmap |= DP_RX_MPDU_ERR_MSDU_LEN;
129
130 if (info & RX_ATTENTION_INFO1_MPDU_LEN_ERR)
131 errmap |= DP_RX_MPDU_ERR_MPDU_LEN;
132
133 return errmap;
134}
135
136static u16 ath11k_dp_rx_h_msdu_start_msdu_len(struct hal_rx_desc *desc)
137{
138 return FIELD_GET(RX_MSDU_START_INFO1_MSDU_LENGTH,
139 __le32_to_cpu(desc->msdu_start.info1));
140}
141
142static u8 ath11k_dp_rx_h_msdu_start_sgi(struct hal_rx_desc *desc)
143{
144 return FIELD_GET(RX_MSDU_START_INFO3_SGI,
145 __le32_to_cpu(desc->msdu_start.info3));
146}
147
148static u8 ath11k_dp_rx_h_msdu_start_rate_mcs(struct hal_rx_desc *desc)
149{
150 return FIELD_GET(RX_MSDU_START_INFO3_RATE_MCS,
151 __le32_to_cpu(desc->msdu_start.info3));
152}
153
154static u8 ath11k_dp_rx_h_msdu_start_rx_bw(struct hal_rx_desc *desc)
155{
156 return FIELD_GET(RX_MSDU_START_INFO3_RECV_BW,
157 __le32_to_cpu(desc->msdu_start.info3));
158}
159
160static u32 ath11k_dp_rx_h_msdu_start_freq(struct hal_rx_desc *desc)
161{
162 return __le32_to_cpu(desc->msdu_start.phy_meta_data);
163}
164
165static u8 ath11k_dp_rx_h_msdu_start_pkt_type(struct hal_rx_desc *desc)
166{
167 return FIELD_GET(RX_MSDU_START_INFO3_PKT_TYPE,
168 __le32_to_cpu(desc->msdu_start.info3));
169}
170
171static u8 ath11k_dp_rx_h_msdu_start_nss(struct hal_rx_desc *desc)
172{
173 u8 mimo_ss_bitmap = FIELD_GET(RX_MSDU_START_INFO3_MIMO_SS_BITMAP,
174 __le32_to_cpu(desc->msdu_start.info3));
175
176 return hweight8(mimo_ss_bitmap);
177}
178
179static u8 ath11k_dp_rx_h_mpdu_start_tid(struct hal_rx_desc *desc)
180{
181 return FIELD_GET(RX_MPDU_START_INFO2_TID,
182 __le32_to_cpu(desc->mpdu_start.info2));
183}
184
185static u16 ath11k_dp_rx_h_mpdu_start_peer_id(struct hal_rx_desc *desc)
186{
187 return __le16_to_cpu(desc->mpdu_start.sw_peer_id);
188}
189
190static u8 ath11k_dp_rx_h_msdu_end_l3pad(struct hal_rx_desc *desc)
191{
192 return FIELD_GET(RX_MSDU_END_INFO2_L3_HDR_PADDING,
193 __le32_to_cpu(desc->msdu_end.info2));
194}
195
196static bool ath11k_dp_rx_h_msdu_end_first_msdu(struct hal_rx_desc *desc)
197{
198 return !!FIELD_GET(RX_MSDU_END_INFO2_FIRST_MSDU,
199 __le32_to_cpu(desc->msdu_end.info2));
200}
201
202static bool ath11k_dp_rx_h_msdu_end_last_msdu(struct hal_rx_desc *desc)
203{
204 return !!FIELD_GET(RX_MSDU_END_INFO2_LAST_MSDU,
205 __le32_to_cpu(desc->msdu_end.info2));
206}
207
208static void ath11k_dp_rx_desc_end_tlv_copy(struct hal_rx_desc *fdesc,
209 struct hal_rx_desc *ldesc)
210{
211 memcpy((u8 *)&fdesc->msdu_end, (u8 *)&ldesc->msdu_end,
212 sizeof(struct rx_msdu_end));
213 memcpy((u8 *)&fdesc->attention, (u8 *)&ldesc->attention,
214 sizeof(struct rx_attention));
215 memcpy((u8 *)&fdesc->mpdu_end, (u8 *)&ldesc->mpdu_end,
216 sizeof(struct rx_mpdu_end));
217}
218
219static u32 ath11k_dp_rxdesc_get_mpdulen_err(struct hal_rx_desc *rx_desc)
220{
221 struct rx_attention *rx_attn;
222
223 rx_attn = &rx_desc->attention;
224
225 return FIELD_GET(RX_ATTENTION_INFO1_MPDU_LEN_ERR,
226 __le32_to_cpu(rx_attn->info1));
227}
228
229static u32 ath11k_dp_rxdesc_get_decap_format(struct hal_rx_desc *rx_desc)
230{
231 struct rx_msdu_start *rx_msdu_start;
232
233 rx_msdu_start = &rx_desc->msdu_start;
234
235 return FIELD_GET(RX_MSDU_START_INFO2_DECAP_FORMAT,
236 __le32_to_cpu(rx_msdu_start->info2));
237}
238
239static u8 *ath11k_dp_rxdesc_get_80211hdr(struct hal_rx_desc *rx_desc)
240{
241 u8 *rx_pkt_hdr;
242
243 rx_pkt_hdr = &rx_desc->msdu_payload[0];
244
245 return rx_pkt_hdr;
246}
247
248static bool ath11k_dp_rxdesc_mpdu_valid(struct hal_rx_desc *rx_desc)
249{
250 u32 tlv_tag;
251
252 tlv_tag = FIELD_GET(HAL_TLV_HDR_TAG,
253 __le32_to_cpu(rx_desc->mpdu_start_tag));
254
255 return tlv_tag == HAL_RX_MPDU_START;
256}
257
258static u32 ath11k_dp_rxdesc_get_ppduid(struct hal_rx_desc *rx_desc)
259{
260 return __le16_to_cpu(rx_desc->mpdu_start.phy_ppdu_id);
261}
262
263/* Returns number of Rx buffers replenished */
264int ath11k_dp_rxbufs_replenish(struct ath11k_base *ab, int mac_id,
265 struct dp_rxdma_ring *rx_ring,
266 int req_entries,
267 enum hal_rx_buf_return_buf_manager mgr,
268 gfp_t gfp)
269{
270 struct hal_srng *srng;
271 u32 *desc;
272 struct sk_buff *skb;
273 int num_free;
274 int num_remain;
275 int buf_id;
276 u32 cookie;
277 dma_addr_t paddr;
278
279 req_entries = min(req_entries, rx_ring->bufs_max);
280
281 srng = &ab->hal.srng_list[rx_ring->refill_buf_ring.ring_id];
282
283 spin_lock_bh(&srng->lock);
284
285 ath11k_hal_srng_access_begin(ab, srng);
286
287 num_free = ath11k_hal_srng_src_num_free(ab, srng, true);
288 if (!req_entries && (num_free > (rx_ring->bufs_max * 3) / 4))
289 req_entries = num_free;
290
291 req_entries = min(num_free, req_entries);
292 num_remain = req_entries;
293
294 while (num_remain > 0) {
295 skb = dev_alloc_skb(DP_RX_BUFFER_SIZE +
296 DP_RX_BUFFER_ALIGN_SIZE);
297 if (!skb)
298 break;
299
300 if (!IS_ALIGNED((unsigned long)skb->data,
301 DP_RX_BUFFER_ALIGN_SIZE)) {
302 skb_pull(skb,
303 PTR_ALIGN(skb->data, DP_RX_BUFFER_ALIGN_SIZE) -
304 skb->data);
305 }
306
307 paddr = dma_map_single(ab->dev, skb->data,
308 skb->len + skb_tailroom(skb),
309 DMA_FROM_DEVICE);
310 if (dma_mapping_error(ab->dev, paddr))
311 goto fail_free_skb;
312
313 spin_lock_bh(&rx_ring->idr_lock);
314 buf_id = idr_alloc(&rx_ring->bufs_idr, skb, 0,
315 rx_ring->bufs_max * 3, gfp);
316 spin_unlock_bh(&rx_ring->idr_lock);
317 if (buf_id < 0)
318 goto fail_dma_unmap;
319
320 desc = ath11k_hal_srng_src_get_next_entry(ab, srng);
321 if (!desc)
322 goto fail_idr_remove;
323
324 ATH11K_SKB_RXCB(skb)->paddr = paddr;
325
326 cookie = FIELD_PREP(DP_RXDMA_BUF_COOKIE_PDEV_ID, mac_id) |
327 FIELD_PREP(DP_RXDMA_BUF_COOKIE_BUF_ID, buf_id);
328
329 num_remain--;
330
331 ath11k_hal_rx_buf_addr_info_set(desc, paddr, cookie, mgr);
332 }
333
334 ath11k_hal_srng_access_end(ab, srng);
335
336 spin_unlock_bh(&srng->lock);
337
338 return req_entries - num_remain;
339
340fail_idr_remove:
341 spin_lock_bh(&rx_ring->idr_lock);
342 idr_remove(&rx_ring->bufs_idr, buf_id);
343 spin_unlock_bh(&rx_ring->idr_lock);
344fail_dma_unmap:
345 dma_unmap_single(ab->dev, paddr, skb->len + skb_tailroom(skb),
346 DMA_FROM_DEVICE);
347fail_free_skb:
348 dev_kfree_skb_any(skb);
349
350 ath11k_hal_srng_access_end(ab, srng);
351
352 spin_unlock_bh(&srng->lock);
353
354 return req_entries - num_remain;
355}
356
357static int ath11k_dp_rxdma_buf_ring_free(struct ath11k *ar,
358 struct dp_rxdma_ring *rx_ring)
359{
360 struct ath11k_pdev_dp *dp = &ar->dp;
361 struct sk_buff *skb;
362 int buf_id;
363
364 spin_lock_bh(&rx_ring->idr_lock);
365 idr_for_each_entry(&rx_ring->bufs_idr, skb, buf_id) {
366 idr_remove(&rx_ring->bufs_idr, buf_id);
367 /* TODO: Understand where internal driver does this dma_unmap of
368 * of rxdma_buffer.
369 */
370 dma_unmap_single(ar->ab->dev, ATH11K_SKB_RXCB(skb)->paddr,
371 skb->len + skb_tailroom(skb), DMA_FROM_DEVICE);
372 dev_kfree_skb_any(skb);
373 }
374
375 idr_destroy(&rx_ring->bufs_idr);
376 spin_unlock_bh(&rx_ring->idr_lock);
377
378 rx_ring = &dp->rx_mon_status_refill_ring;
379
380 spin_lock_bh(&rx_ring->idr_lock);
381 idr_for_each_entry(&rx_ring->bufs_idr, skb, buf_id) {
382 idr_remove(&rx_ring->bufs_idr, buf_id);
383 /* XXX: Understand where internal driver does this dma_unmap of
384 * of rxdma_buffer.
385 */
386 dma_unmap_single(ar->ab->dev, ATH11K_SKB_RXCB(skb)->paddr,
387 skb->len + skb_tailroom(skb), DMA_BIDIRECTIONAL);
388 dev_kfree_skb_any(skb);
389 }
390
391 idr_destroy(&rx_ring->bufs_idr);
392 spin_unlock_bh(&rx_ring->idr_lock);
393 return 0;
394}
395
396static int ath11k_dp_rxdma_pdev_buf_free(struct ath11k *ar)
397{
398 struct ath11k_pdev_dp *dp = &ar->dp;
399 struct dp_rxdma_ring *rx_ring = &dp->rx_refill_buf_ring;
400
401 ath11k_dp_rxdma_buf_ring_free(ar, rx_ring);
402
403 rx_ring = &dp->rxdma_mon_buf_ring;
404 ath11k_dp_rxdma_buf_ring_free(ar, rx_ring);
405
406 rx_ring = &dp->rx_mon_status_refill_ring;
407 ath11k_dp_rxdma_buf_ring_free(ar, rx_ring);
408 return 0;
409}
410
411static int ath11k_dp_rxdma_ring_buf_setup(struct ath11k *ar,
412 struct dp_rxdma_ring *rx_ring,
413 u32 ringtype)
414{
415 struct ath11k_pdev_dp *dp = &ar->dp;
416 int num_entries;
417
418 num_entries = rx_ring->refill_buf_ring.size /
419 ath11k_hal_srng_get_entrysize(ringtype);
420
421 rx_ring->bufs_max = num_entries;
422 ath11k_dp_rxbufs_replenish(ar->ab, dp->mac_id, rx_ring, num_entries,
423 HAL_RX_BUF_RBM_SW3_BM, GFP_KERNEL);
424 return 0;
425}
426
427static int ath11k_dp_rxdma_pdev_buf_setup(struct ath11k *ar)
428{
429 struct ath11k_pdev_dp *dp = &ar->dp;
430 struct dp_rxdma_ring *rx_ring = &dp->rx_refill_buf_ring;
431
432 ath11k_dp_rxdma_ring_buf_setup(ar, rx_ring, HAL_RXDMA_BUF);
433
434 rx_ring = &dp->rxdma_mon_buf_ring;
435 ath11k_dp_rxdma_ring_buf_setup(ar, rx_ring, HAL_RXDMA_MONITOR_BUF);
436
437 rx_ring = &dp->rx_mon_status_refill_ring;
438 ath11k_dp_rxdma_ring_buf_setup(ar, rx_ring, HAL_RXDMA_MONITOR_STATUS);
439
440 return 0;
441}
442
443static void ath11k_dp_rx_pdev_srng_free(struct ath11k *ar)
444{
445 struct ath11k_pdev_dp *dp = &ar->dp;
446
447 ath11k_dp_srng_cleanup(ar->ab, &dp->rx_refill_buf_ring.refill_buf_ring);
448 ath11k_dp_srng_cleanup(ar->ab, &dp->rxdma_err_dst_ring);
449 ath11k_dp_srng_cleanup(ar->ab, &dp->rx_mon_status_refill_ring.refill_buf_ring);
450 ath11k_dp_srng_cleanup(ar->ab, &dp->rxdma_mon_buf_ring.refill_buf_ring);
451}
452
453void ath11k_dp_pdev_reo_cleanup(struct ath11k_base *ab)
454{
455 struct ath11k_dp *dp = &ab->dp;
456 int i;
457
458 for (i = 0; i < DP_REO_DST_RING_MAX; i++)
459 ath11k_dp_srng_cleanup(ab, &dp->reo_dst_ring[i]);
460}
461
462int ath11k_dp_pdev_reo_setup(struct ath11k_base *ab)
463{
464 struct ath11k_dp *dp = &ab->dp;
465 int ret;
466 int i;
467
468 for (i = 0; i < DP_REO_DST_RING_MAX; i++) {
469 ret = ath11k_dp_srng_setup(ab, &dp->reo_dst_ring[i],
470 HAL_REO_DST, i, 0,
471 DP_REO_DST_RING_SIZE);
472 if (ret) {
473 ath11k_warn(ab, "failed to setup reo_dst_ring\n");
474 goto err_reo_cleanup;
475 }
476 }
477
478 return 0;
479
480err_reo_cleanup:
481 ath11k_dp_pdev_reo_cleanup(ab);
482
483 return ret;
484}
485
486static int ath11k_dp_rx_pdev_srng_alloc(struct ath11k *ar)
487{
488 struct ath11k_pdev_dp *dp = &ar->dp;
489 struct dp_srng *srng = NULL;
490 int ret;
491
492 ret = ath11k_dp_srng_setup(ar->ab,
493 &dp->rx_refill_buf_ring.refill_buf_ring,
494 HAL_RXDMA_BUF, 0,
495 dp->mac_id, DP_RXDMA_BUF_RING_SIZE);
496 if (ret) {
497 ath11k_warn(ar->ab, "failed to setup rx_refill_buf_ring\n");
498 return ret;
499 }
500
501 ret = ath11k_dp_srng_setup(ar->ab, &dp->rxdma_err_dst_ring,
502 HAL_RXDMA_DST, 0, dp->mac_id,
503 DP_RXDMA_ERR_DST_RING_SIZE);
504 if (ret) {
505 ath11k_warn(ar->ab, "failed to setup rxdma_err_dst_ring\n");
506 return ret;
507 }
508
509 srng = &dp->rx_mon_status_refill_ring.refill_buf_ring;
510 ret = ath11k_dp_srng_setup(ar->ab,
511 srng,
512 HAL_RXDMA_MONITOR_STATUS, 0, dp->mac_id,
513 DP_RXDMA_MON_STATUS_RING_SIZE);
514 if (ret) {
515 ath11k_warn(ar->ab,
516 "failed to setup rx_mon_status_refill_ring\n");
517 return ret;
518 }
519 ret = ath11k_dp_srng_setup(ar->ab,
520 &dp->rxdma_mon_buf_ring.refill_buf_ring,
521 HAL_RXDMA_MONITOR_BUF, 0, dp->mac_id,
522 DP_RXDMA_MONITOR_BUF_RING_SIZE);
523 if (ret) {
524 ath11k_warn(ar->ab,
525 "failed to setup HAL_RXDMA_MONITOR_BUF\n");
526 return ret;
527 }
528
529 ret = ath11k_dp_srng_setup(ar->ab, &dp->rxdma_mon_dst_ring,
530 HAL_RXDMA_MONITOR_DST, 0, dp->mac_id,
531 DP_RXDMA_MONITOR_DST_RING_SIZE);
532 if (ret) {
533 ath11k_warn(ar->ab,
534 "failed to setup HAL_RXDMA_MONITOR_DST\n");
535 return ret;
536 }
537
538 ret = ath11k_dp_srng_setup(ar->ab, &dp->rxdma_mon_desc_ring,
539 HAL_RXDMA_MONITOR_DESC, 0, dp->mac_id,
540 DP_RXDMA_MONITOR_DESC_RING_SIZE);
541 if (ret) {
542 ath11k_warn(ar->ab,
543 "failed to setup HAL_RXDMA_MONITOR_DESC\n");
544 return ret;
545 }
546
547 return 0;
548}
549
550void ath11k_dp_reo_cmd_list_cleanup(struct ath11k_base *ab)
551{
552 struct ath11k_dp *dp = &ab->dp;
553 struct dp_reo_cmd *cmd, *tmp;
554 struct dp_reo_cache_flush_elem *cmd_cache, *tmp_cache;
555
556 spin_lock_bh(&dp->reo_cmd_lock);
557 list_for_each_entry_safe(cmd, tmp, &dp->reo_cmd_list, list) {
558 list_del(&cmd->list);
559 dma_unmap_single(ab->dev, cmd->data.paddr,
560 cmd->data.size, DMA_BIDIRECTIONAL);
561 kfree(cmd->data.vaddr);
562 kfree(cmd);
563 }
564
565 list_for_each_entry_safe(cmd_cache, tmp_cache,
566 &dp->reo_cmd_cache_flush_list, list) {
567 list_del(&cmd_cache->list);
568 dp->reo_cmd_cache_flush_count--;
569 dma_unmap_single(ab->dev, cmd_cache->data.paddr,
570 cmd_cache->data.size, DMA_BIDIRECTIONAL);
571 kfree(cmd_cache->data.vaddr);
572 kfree(cmd_cache);
573 }
574 spin_unlock_bh(&dp->reo_cmd_lock);
575}
576
577static void ath11k_dp_reo_cmd_free(struct ath11k_dp *dp, void *ctx,
578 enum hal_reo_cmd_status status)
579{
580 struct dp_rx_tid *rx_tid = ctx;
581
582 if (status != HAL_REO_CMD_SUCCESS)
583 ath11k_warn(dp->ab, "failed to flush rx tid hw desc, tid %d status %d\n",
584 rx_tid->tid, status);
585
586 dma_unmap_single(dp->ab->dev, rx_tid->paddr, rx_tid->size,
587 DMA_BIDIRECTIONAL);
588 kfree(rx_tid->vaddr);
589}
590
591static void ath11k_dp_reo_cache_flush(struct ath11k_base *ab,
592 struct dp_rx_tid *rx_tid)
593{
594 struct ath11k_hal_reo_cmd cmd = {0};
595 unsigned long tot_desc_sz, desc_sz;
596 int ret;
597
598 tot_desc_sz = rx_tid->size;
599 desc_sz = ath11k_hal_reo_qdesc_size(0, HAL_DESC_REO_NON_QOS_TID);
600
601 while (tot_desc_sz > desc_sz) {
602 tot_desc_sz -= desc_sz;
603 cmd.addr_lo = lower_32_bits(rx_tid->paddr + tot_desc_sz);
604 cmd.addr_hi = upper_32_bits(rx_tid->paddr);
605 ret = ath11k_dp_tx_send_reo_cmd(ab, rx_tid,
606 HAL_REO_CMD_FLUSH_CACHE, &cmd,
607 NULL);
608 if (ret)
609 ath11k_warn(ab,
610 "failed to send HAL_REO_CMD_FLUSH_CACHE, tid %d (%d)\n",
611 rx_tid->tid, ret);
612 }
613
614 memset(&cmd, 0, sizeof(cmd));
615 cmd.addr_lo = lower_32_bits(rx_tid->paddr);
616 cmd.addr_hi = upper_32_bits(rx_tid->paddr);
617 cmd.flag |= HAL_REO_CMD_FLG_NEED_STATUS;
618 ret = ath11k_dp_tx_send_reo_cmd(ab, rx_tid,
619 HAL_REO_CMD_FLUSH_CACHE,
620 &cmd, ath11k_dp_reo_cmd_free);
621 if (ret) {
622 ath11k_err(ab, "failed to send HAL_REO_CMD_FLUSH_CACHE cmd, tid %d (%d)\n",
623 rx_tid->tid, ret);
624 dma_unmap_single(ab->dev, rx_tid->paddr, rx_tid->size,
625 DMA_BIDIRECTIONAL);
626 kfree(rx_tid->vaddr);
627 }
628}
629
630static void ath11k_dp_rx_tid_del_func(struct ath11k_dp *dp, void *ctx,
631 enum hal_reo_cmd_status status)
632{
633 struct ath11k_base *ab = dp->ab;
634 struct dp_rx_tid *rx_tid = ctx;
635 struct dp_reo_cache_flush_elem *elem, *tmp;
636
637 if (status == HAL_REO_CMD_DRAIN) {
638 goto free_desc;
639 } else if (status != HAL_REO_CMD_SUCCESS) {
640 /* Shouldn't happen! Cleanup in case of other failure? */
641 ath11k_warn(ab, "failed to delete rx tid %d hw descriptor %d\n",
642 rx_tid->tid, status);
643 return;
644 }
645
646 elem = kzalloc(sizeof(*elem), GFP_ATOMIC);
647 if (!elem)
648 goto free_desc;
649
650 elem->ts = jiffies;
651 memcpy(&elem->data, rx_tid, sizeof(*rx_tid));
652
653 spin_lock_bh(&dp->reo_cmd_lock);
654 list_add_tail(&elem->list, &dp->reo_cmd_cache_flush_list);
655 dp->reo_cmd_cache_flush_count++;
656
657 /* Flush and invalidate aged REO desc from HW cache */
658 list_for_each_entry_safe(elem, tmp, &dp->reo_cmd_cache_flush_list,
659 list) {
660 if (dp->reo_cmd_cache_flush_count > DP_REO_DESC_FREE_THRESHOLD ||
661 time_after(jiffies, elem->ts +
662 msecs_to_jiffies(DP_REO_DESC_FREE_TIMEOUT_MS))) {
663 list_del(&elem->list);
664 dp->reo_cmd_cache_flush_count--;
665 spin_unlock_bh(&dp->reo_cmd_lock);
666
667 ath11k_dp_reo_cache_flush(ab, &elem->data);
668 kfree(elem);
669 spin_lock_bh(&dp->reo_cmd_lock);
670 }
671 }
672 spin_unlock_bh(&dp->reo_cmd_lock);
673
674 return;
675free_desc:
676 dma_unmap_single(ab->dev, rx_tid->paddr, rx_tid->size,
677 DMA_BIDIRECTIONAL);
678 kfree(rx_tid->vaddr);
679}
680
681void ath11k_peer_rx_tid_delete(struct ath11k *ar,
682 struct ath11k_peer *peer, u8 tid)
683{
684 struct ath11k_hal_reo_cmd cmd = {0};
685 struct dp_rx_tid *rx_tid = &peer->rx_tid[tid];
686 int ret;
687
688 if (!rx_tid->active)
689 return;
690
691 cmd.flag = HAL_REO_CMD_FLG_NEED_STATUS;
692 cmd.addr_lo = lower_32_bits(rx_tid->paddr);
693 cmd.addr_hi = upper_32_bits(rx_tid->paddr);
694 cmd.upd0 |= HAL_REO_CMD_UPD0_VLD;
695 ret = ath11k_dp_tx_send_reo_cmd(ar->ab, rx_tid,
696 HAL_REO_CMD_UPDATE_RX_QUEUE, &cmd,
697 ath11k_dp_rx_tid_del_func);
698 if (ret) {
699 ath11k_err(ar->ab, "failed to send HAL_REO_CMD_UPDATE_RX_QUEUE cmd, tid %d (%d)\n",
700 tid, ret);
701 dma_unmap_single(ar->ab->dev, rx_tid->paddr, rx_tid->size,
702 DMA_BIDIRECTIONAL);
703 kfree(rx_tid->vaddr);
704 }
705
706 rx_tid->active = false;
707}
708
709static int ath11k_dp_rx_link_desc_return(struct ath11k_base *ab,
710 u32 *link_desc,
711 enum hal_wbm_rel_bm_act action)
712{
713 struct ath11k_dp *dp = &ab->dp;
714 struct hal_srng *srng;
715 u32 *desc;
716 int ret = 0;
717
718 srng = &ab->hal.srng_list[dp->wbm_desc_rel_ring.ring_id];
719
720 spin_lock_bh(&srng->lock);
721
722 ath11k_hal_srng_access_begin(ab, srng);
723
724 desc = ath11k_hal_srng_src_get_next_entry(ab, srng);
725 if (!desc) {
726 ret = -ENOBUFS;
727 goto exit;
728 }
729
730 ath11k_hal_rx_msdu_link_desc_set(ab, (void *)desc, (void *)link_desc,
731 action);
732
733exit:
734 ath11k_hal_srng_access_end(ab, srng);
735
736 spin_unlock_bh(&srng->lock);
737
738 return ret;
739}
740
741static void ath11k_dp_rx_frags_cleanup(struct dp_rx_tid *rx_tid, bool rel_link_desc)
742{
743 struct ath11k_base *ab = rx_tid->ab;
744
745 lockdep_assert_held(&ab->base_lock);
746
747 if (rx_tid->dst_ring_desc) {
748 if (rel_link_desc)
749 ath11k_dp_rx_link_desc_return(ab, (u32 *)rx_tid->dst_ring_desc,
750 HAL_WBM_REL_BM_ACT_PUT_IN_IDLE);
751 kfree(rx_tid->dst_ring_desc);
752 rx_tid->dst_ring_desc = NULL;
753 }
754
755 rx_tid->cur_sn = 0;
756 rx_tid->last_frag_no = 0;
757 rx_tid->rx_frag_bitmap = 0;
758 __skb_queue_purge(&rx_tid->rx_frags);
759}
760
761void ath11k_peer_rx_tid_cleanup(struct ath11k *ar, struct ath11k_peer *peer)
762{
763 struct dp_rx_tid *rx_tid;
764 int i;
765
766 lockdep_assert_held(&ar->ab->base_lock);
767
768 for (i = 0; i <= IEEE80211_NUM_TIDS; i++) {
769 rx_tid = &peer->rx_tid[i];
770
771 ath11k_peer_rx_tid_delete(ar, peer, i);
772 ath11k_dp_rx_frags_cleanup(rx_tid, true);
773
774 spin_unlock_bh(&ar->ab->base_lock);
775 del_timer_sync(&rx_tid->frag_timer);
776 spin_lock_bh(&ar->ab->base_lock);
777 }
778}
779
780static int ath11k_peer_rx_tid_reo_update(struct ath11k *ar,
781 struct ath11k_peer *peer,
782 struct dp_rx_tid *rx_tid,
783 u32 ba_win_sz, u16 ssn,
784 bool update_ssn)
785{
786 struct ath11k_hal_reo_cmd cmd = {0};
787 int ret;
788
789 cmd.addr_lo = lower_32_bits(rx_tid->paddr);
790 cmd.addr_hi = upper_32_bits(rx_tid->paddr);
791 cmd.flag = HAL_REO_CMD_FLG_NEED_STATUS;
792 cmd.upd0 = HAL_REO_CMD_UPD0_BA_WINDOW_SIZE;
793 cmd.ba_window_size = ba_win_sz;
794
795 if (update_ssn) {
796 cmd.upd0 |= HAL_REO_CMD_UPD0_SSN;
797 cmd.upd2 = FIELD_PREP(HAL_REO_CMD_UPD2_SSN, ssn);
798 }
799
800 ret = ath11k_dp_tx_send_reo_cmd(ar->ab, rx_tid,
801 HAL_REO_CMD_UPDATE_RX_QUEUE, &cmd,
802 NULL);
803 if (ret) {
804 ath11k_warn(ar->ab, "failed to update rx tid queue, tid %d (%d)\n",
805 rx_tid->tid, ret);
806 return ret;
807 }
808
809 rx_tid->ba_win_sz = ba_win_sz;
810
811 return 0;
812}
813
814static void ath11k_dp_rx_tid_mem_free(struct ath11k_base *ab,
815 const u8 *peer_mac, int vdev_id, u8 tid)
816{
817 struct ath11k_peer *peer;
818 struct dp_rx_tid *rx_tid;
819
820 spin_lock_bh(&ab->base_lock);
821
822 peer = ath11k_peer_find(ab, vdev_id, peer_mac);
823 if (!peer) {
824 ath11k_warn(ab, "failed to find the peer to free up rx tid mem\n");
825 goto unlock_exit;
826 }
827
828 rx_tid = &peer->rx_tid[tid];
829 if (!rx_tid->active)
830 goto unlock_exit;
831
832 dma_unmap_single(ab->dev, rx_tid->paddr, rx_tid->size,
833 DMA_BIDIRECTIONAL);
834 kfree(rx_tid->vaddr);
835
836 rx_tid->active = false;
837
838unlock_exit:
839 spin_unlock_bh(&ab->base_lock);
840}
841
842int ath11k_peer_rx_tid_setup(struct ath11k *ar, const u8 *peer_mac, int vdev_id,
843 u8 tid, u32 ba_win_sz, u16 ssn,
844 enum hal_pn_type pn_type)
845{
846 struct ath11k_base *ab = ar->ab;
847 struct ath11k_peer *peer;
848 struct dp_rx_tid *rx_tid;
849 u32 hw_desc_sz;
850 u32 *addr_aligned;
851 void *vaddr;
852 dma_addr_t paddr;
853 int ret;
854
855 spin_lock_bh(&ab->base_lock);
856
857 peer = ath11k_peer_find(ab, vdev_id, peer_mac);
858 if (!peer) {
859 ath11k_warn(ab, "failed to find the peer to set up rx tid\n");
860 spin_unlock_bh(&ab->base_lock);
861 return -ENOENT;
862 }
863
864 rx_tid = &peer->rx_tid[tid];
865 /* Update the tid queue if it is already setup */
866 if (rx_tid->active) {
867 paddr = rx_tid->paddr;
868 ret = ath11k_peer_rx_tid_reo_update(ar, peer, rx_tid,
869 ba_win_sz, ssn, true);
870 spin_unlock_bh(&ab->base_lock);
871 if (ret) {
872 ath11k_warn(ab, "failed to update reo for rx tid %d\n", tid);
873 return ret;
874 }
875
876 ret = ath11k_wmi_peer_rx_reorder_queue_setup(ar, vdev_id,
877 peer_mac, paddr,
878 tid, 1, ba_win_sz);
879 if (ret)
880 ath11k_warn(ab, "failed to send wmi command to update rx reorder queue, tid :%d (%d)\n",
881 tid, ret);
882 return ret;
883 }
884
885 rx_tid->tid = tid;
886
887 rx_tid->ba_win_sz = ba_win_sz;
888
889 /* TODO: Optimize the memory allocation for qos tid based on the
890 * the actual BA window size in REO tid update path.
891 */
892 if (tid == HAL_DESC_REO_NON_QOS_TID)
893 hw_desc_sz = ath11k_hal_reo_qdesc_size(ba_win_sz, tid);
894 else
895 hw_desc_sz = ath11k_hal_reo_qdesc_size(DP_BA_WIN_SZ_MAX, tid);
896
897 vaddr = kzalloc(hw_desc_sz + HAL_LINK_DESC_ALIGN - 1, GFP_ATOMIC);
898 if (!vaddr) {
899 spin_unlock_bh(&ab->base_lock);
900 return -ENOMEM;
901 }
902
903 addr_aligned = PTR_ALIGN(vaddr, HAL_LINK_DESC_ALIGN);
904
905 ath11k_hal_reo_qdesc_setup(addr_aligned, tid, ba_win_sz,
906 ssn, pn_type);
907
908 paddr = dma_map_single(ab->dev, addr_aligned, hw_desc_sz,
909 DMA_BIDIRECTIONAL);
910
911 ret = dma_mapping_error(ab->dev, paddr);
912 if (ret) {
913 spin_unlock_bh(&ab->base_lock);
914 goto err_mem_free;
915 }
916
917 rx_tid->vaddr = vaddr;
918 rx_tid->paddr = paddr;
919 rx_tid->size = hw_desc_sz;
920 rx_tid->active = true;
921
922 spin_unlock_bh(&ab->base_lock);
923
924 ret = ath11k_wmi_peer_rx_reorder_queue_setup(ar, vdev_id, peer_mac,
925 paddr, tid, 1, ba_win_sz);
926 if (ret) {
927 ath11k_warn(ar->ab, "failed to setup rx reorder queue, tid :%d (%d)\n",
928 tid, ret);
929 ath11k_dp_rx_tid_mem_free(ab, peer_mac, vdev_id, tid);
930 }
931
932 return ret;
933
934err_mem_free:
935 kfree(vaddr);
936
937 return ret;
938}
939
940int ath11k_dp_rx_ampdu_start(struct ath11k *ar,
941 struct ieee80211_ampdu_params *params)
942{
943 struct ath11k_base *ab = ar->ab;
944 struct ath11k_sta *arsta = (void *)params->sta->drv_priv;
945 int vdev_id = arsta->arvif->vdev_id;
946 int ret;
947
948 ret = ath11k_peer_rx_tid_setup(ar, params->sta->addr, vdev_id,
949 params->tid, params->buf_size,
950 params->ssn, arsta->pn_type);
951 if (ret)
952 ath11k_warn(ab, "failed to setup rx tid %d\n", ret);
953
954 return ret;
955}
956
957int ath11k_dp_rx_ampdu_stop(struct ath11k *ar,
958 struct ieee80211_ampdu_params *params)
959{
960 struct ath11k_base *ab = ar->ab;
961 struct ath11k_peer *peer;
962 struct ath11k_sta *arsta = (void *)params->sta->drv_priv;
963 int vdev_id = arsta->arvif->vdev_id;
964 dma_addr_t paddr;
965 bool active;
966 int ret;
967
968 spin_lock_bh(&ab->base_lock);
969
970 peer = ath11k_peer_find(ab, vdev_id, params->sta->addr);
971 if (!peer) {
972 ath11k_warn(ab, "failed to find the peer to stop rx aggregation\n");
973 spin_unlock_bh(&ab->base_lock);
974 return -ENOENT;
975 }
976
977 paddr = peer->rx_tid[params->tid].paddr;
978 active = peer->rx_tid[params->tid].active;
979
980 if (!active) {
981 spin_unlock_bh(&ab->base_lock);
982 return 0;
983 }
984
985 ret = ath11k_peer_rx_tid_reo_update(ar, peer, peer->rx_tid, 1, 0, false);
986 spin_unlock_bh(&ab->base_lock);
987 if (ret) {
988 ath11k_warn(ab, "failed to update reo for rx tid %d: %d\n",
989 params->tid, ret);
990 return ret;
991 }
992
993 ret = ath11k_wmi_peer_rx_reorder_queue_setup(ar, vdev_id,
994 params->sta->addr, paddr,
995 params->tid, 1, 1);
996 if (ret)
997 ath11k_warn(ab, "failed to send wmi to delete rx tid %d\n",
998 ret);
999
1000 return ret;
1001}
1002
1003int ath11k_dp_peer_rx_pn_replay_config(struct ath11k_vif *arvif,
1004 const u8 *peer_addr,
1005 enum set_key_cmd key_cmd,
1006 struct ieee80211_key_conf *key)
1007{
1008 struct ath11k *ar = arvif->ar;
1009 struct ath11k_base *ab = ar->ab;
1010 struct ath11k_hal_reo_cmd cmd = {0};
1011 struct ath11k_peer *peer;
1012 struct dp_rx_tid *rx_tid;
1013 u8 tid;
1014 int ret = 0;
1015
1016 /* NOTE: Enable PN/TSC replay check offload only for unicast frames.
1017 * We use mac80211 PN/TSC replay check functionality for bcast/mcast
1018 * for now.
1019 */
1020 if (!(key->flags & IEEE80211_KEY_FLAG_PAIRWISE))
1021 return 0;
1022
1023 cmd.flag |= HAL_REO_CMD_FLG_NEED_STATUS;
1024 cmd.upd0 |= HAL_REO_CMD_UPD0_PN |
1025 HAL_REO_CMD_UPD0_PN_SIZE |
1026 HAL_REO_CMD_UPD0_PN_VALID |
1027 HAL_REO_CMD_UPD0_PN_CHECK |
1028 HAL_REO_CMD_UPD0_SVLD;
1029
1030 switch (key->cipher) {
1031 case WLAN_CIPHER_SUITE_TKIP:
1032 case WLAN_CIPHER_SUITE_CCMP:
1033 case WLAN_CIPHER_SUITE_CCMP_256:
1034 case WLAN_CIPHER_SUITE_GCMP:
1035 case WLAN_CIPHER_SUITE_GCMP_256:
1036 if (key_cmd == SET_KEY) {
1037 cmd.upd1 |= HAL_REO_CMD_UPD1_PN_CHECK;
1038 cmd.pn_size = 48;
1039 }
1040 break;
1041 default:
1042 break;
1043 }
1044
1045 spin_lock_bh(&ab->base_lock);
1046
1047 peer = ath11k_peer_find(ab, arvif->vdev_id, peer_addr);
1048 if (!peer) {
1049 ath11k_warn(ab, "failed to find the peer to configure pn replay detection\n");
1050 spin_unlock_bh(&ab->base_lock);
1051 return -ENOENT;
1052 }
1053
1054 for (tid = 0; tid <= IEEE80211_NUM_TIDS; tid++) {
1055 rx_tid = &peer->rx_tid[tid];
1056 if (!rx_tid->active)
1057 continue;
1058 cmd.addr_lo = lower_32_bits(rx_tid->paddr);
1059 cmd.addr_hi = upper_32_bits(rx_tid->paddr);
1060 ret = ath11k_dp_tx_send_reo_cmd(ab, rx_tid,
1061 HAL_REO_CMD_UPDATE_RX_QUEUE,
1062 &cmd, NULL);
1063 if (ret) {
1064 ath11k_warn(ab, "failed to configure rx tid %d queue for pn replay detection %d\n",
1065 tid, ret);
1066 break;
1067 }
1068 }
1069
1070 spin_unlock_bh(&ar->ab->base_lock);
1071
1072 return ret;
1073}
1074
1075static inline int ath11k_get_ppdu_user_index(struct htt_ppdu_stats *ppdu_stats,
1076 u16 peer_id)
1077{
1078 int i;
1079
1080 for (i = 0; i < HTT_PPDU_STATS_MAX_USERS - 1; i++) {
1081 if (ppdu_stats->user_stats[i].is_valid_peer_id) {
1082 if (peer_id == ppdu_stats->user_stats[i].peer_id)
1083 return i;
1084 } else {
1085 return i;
1086 }
1087 }
1088
1089 return -EINVAL;
1090}
1091
1092static int ath11k_htt_tlv_ppdu_stats_parse(struct ath11k_base *ab,
1093 u16 tag, u16 len, const void *ptr,
1094 void *data)
1095{
1096 struct htt_ppdu_stats_info *ppdu_info;
1097 struct htt_ppdu_user_stats *user_stats;
1098 int cur_user;
1099 u16 peer_id;
1100
1101 ppdu_info = (struct htt_ppdu_stats_info *)data;
1102
1103 switch (tag) {
1104 case HTT_PPDU_STATS_TAG_COMMON:
1105 if (len < sizeof(struct htt_ppdu_stats_common)) {
1106 ath11k_warn(ab, "Invalid len %d for the tag 0x%x\n",
1107 len, tag);
1108 return -EINVAL;
1109 }
1110 memcpy((void *)&ppdu_info->ppdu_stats.common, ptr,
1111 sizeof(struct htt_ppdu_stats_common));
1112 break;
1113 case HTT_PPDU_STATS_TAG_USR_RATE:
1114 if (len < sizeof(struct htt_ppdu_stats_user_rate)) {
1115 ath11k_warn(ab, "Invalid len %d for the tag 0x%x\n",
1116 len, tag);
1117 return -EINVAL;
1118 }
1119
1120 peer_id = ((struct htt_ppdu_stats_user_rate *)ptr)->sw_peer_id;
1121 cur_user = ath11k_get_ppdu_user_index(&ppdu_info->ppdu_stats,
1122 peer_id);
1123 if (cur_user < 0)
1124 return -EINVAL;
1125 user_stats = &ppdu_info->ppdu_stats.user_stats[cur_user];
1126 user_stats->peer_id = peer_id;
1127 user_stats->is_valid_peer_id = true;
1128 memcpy((void *)&user_stats->rate, ptr,
1129 sizeof(struct htt_ppdu_stats_user_rate));
1130 user_stats->tlv_flags |= BIT(tag);
1131 break;
1132 case HTT_PPDU_STATS_TAG_USR_COMPLTN_COMMON:
1133 if (len < sizeof(struct htt_ppdu_stats_usr_cmpltn_cmn)) {
1134 ath11k_warn(ab, "Invalid len %d for the tag 0x%x\n",
1135 len, tag);
1136 return -EINVAL;
1137 }
1138
1139 peer_id = ((struct htt_ppdu_stats_usr_cmpltn_cmn *)ptr)->sw_peer_id;
1140 cur_user = ath11k_get_ppdu_user_index(&ppdu_info->ppdu_stats,
1141 peer_id);
1142 if (cur_user < 0)
1143 return -EINVAL;
1144 user_stats = &ppdu_info->ppdu_stats.user_stats[cur_user];
1145 user_stats->peer_id = peer_id;
1146 user_stats->is_valid_peer_id = true;
1147 memcpy((void *)&user_stats->cmpltn_cmn, ptr,
1148 sizeof(struct htt_ppdu_stats_usr_cmpltn_cmn));
1149 user_stats->tlv_flags |= BIT(tag);
1150 break;
1151 case HTT_PPDU_STATS_TAG_USR_COMPLTN_ACK_BA_STATUS:
1152 if (len <
1153 sizeof(struct htt_ppdu_stats_usr_cmpltn_ack_ba_status)) {
1154 ath11k_warn(ab, "Invalid len %d for the tag 0x%x\n",
1155 len, tag);
1156 return -EINVAL;
1157 }
1158
1159 peer_id =
1160 ((struct htt_ppdu_stats_usr_cmpltn_ack_ba_status *)ptr)->sw_peer_id;
1161 cur_user = ath11k_get_ppdu_user_index(&ppdu_info->ppdu_stats,
1162 peer_id);
1163 if (cur_user < 0)
1164 return -EINVAL;
1165 user_stats = &ppdu_info->ppdu_stats.user_stats[cur_user];
1166 user_stats->peer_id = peer_id;
1167 user_stats->is_valid_peer_id = true;
1168 memcpy((void *)&user_stats->ack_ba, ptr,
1169 sizeof(struct htt_ppdu_stats_usr_cmpltn_ack_ba_status));
1170 user_stats->tlv_flags |= BIT(tag);
1171 break;
1172 }
1173 return 0;
1174}
1175
1176int ath11k_dp_htt_tlv_iter(struct ath11k_base *ab, const void *ptr, size_t len,
1177 int (*iter)(struct ath11k_base *ar, u16 tag, u16 len,
1178 const void *ptr, void *data),
1179 void *data)
1180{
1181 const struct htt_tlv *tlv;
1182 const void *begin = ptr;
1183 u16 tlv_tag, tlv_len;
1184 int ret = -EINVAL;
1185
1186 while (len > 0) {
1187 if (len < sizeof(*tlv)) {
1188 ath11k_err(ab, "htt tlv parse failure at byte %zd (%zu bytes left, %zu expected)\n",
1189 ptr - begin, len, sizeof(*tlv));
1190 return -EINVAL;
1191 }
1192 tlv = (struct htt_tlv *)ptr;
1193 tlv_tag = FIELD_GET(HTT_TLV_TAG, tlv->header);
1194 tlv_len = FIELD_GET(HTT_TLV_LEN, tlv->header);
1195 ptr += sizeof(*tlv);
1196 len -= sizeof(*tlv);
1197
1198 if (tlv_len > len) {
1199 ath11k_err(ab, "htt tlv parse failure of tag %hhu at byte %zd (%zu bytes left, %hhu expected)\n",
1200 tlv_tag, ptr - begin, len, tlv_len);
1201 return -EINVAL;
1202 }
1203 ret = iter(ab, tlv_tag, tlv_len, ptr, data);
1204 if (ret == -ENOMEM)
1205 return ret;
1206
1207 ptr += tlv_len;
1208 len -= tlv_len;
1209 }
1210 return 0;
1211}
1212
1213static inline u32 ath11k_he_gi_to_nl80211_he_gi(u8 sgi)
1214{
1215 u32 ret = 0;
1216
1217 switch (sgi) {
1218 case RX_MSDU_START_SGI_0_8_US:
1219 ret = NL80211_RATE_INFO_HE_GI_0_8;
1220 break;
1221 case RX_MSDU_START_SGI_1_6_US:
1222 ret = NL80211_RATE_INFO_HE_GI_1_6;
1223 break;
1224 case RX_MSDU_START_SGI_3_2_US:
1225 ret = NL80211_RATE_INFO_HE_GI_3_2;
1226 break;
1227 }
1228
1229 return ret;
1230}
1231
1232static void
1233ath11k_update_per_peer_tx_stats(struct ath11k *ar,
1234 struct htt_ppdu_stats *ppdu_stats, u8 user)
1235{
1236 struct ath11k_base *ab = ar->ab;
1237 struct ath11k_peer *peer;
1238 struct ieee80211_sta *sta;
1239 struct ath11k_sta *arsta;
1240 struct htt_ppdu_stats_user_rate *user_rate;
1241 struct ath11k_per_peer_tx_stats *peer_stats = &ar->peer_tx_stats;
1242 struct htt_ppdu_user_stats *usr_stats = &ppdu_stats->user_stats[user];
1243 struct htt_ppdu_stats_common *common = &ppdu_stats->common;
1244 int ret;
1245 u8 flags, mcs, nss, bw, sgi, dcm, rate_idx = 0;
1246 u32 succ_bytes = 0;
1247 u16 rate = 0, succ_pkts = 0;
1248 u32 tx_duration = 0;
1249 u8 tid = HTT_PPDU_STATS_NON_QOS_TID;
1250 bool is_ampdu = false;
1251
1252 if (!usr_stats)
1253 return;
1254
1255 if (!(usr_stats->tlv_flags & BIT(HTT_PPDU_STATS_TAG_USR_RATE)))
1256 return;
1257
1258 if (usr_stats->tlv_flags & BIT(HTT_PPDU_STATS_TAG_USR_COMPLTN_COMMON))
1259 is_ampdu =
1260 HTT_USR_CMPLTN_IS_AMPDU(usr_stats->cmpltn_cmn.flags);
1261
1262 if (usr_stats->tlv_flags &
1263 BIT(HTT_PPDU_STATS_TAG_USR_COMPLTN_ACK_BA_STATUS)) {
1264 succ_bytes = usr_stats->ack_ba.success_bytes;
1265 succ_pkts = FIELD_GET(HTT_PPDU_STATS_ACK_BA_INFO_NUM_MSDU_M,
1266 usr_stats->ack_ba.info);
1267 tid = FIELD_GET(HTT_PPDU_STATS_ACK_BA_INFO_TID_NUM,
1268 usr_stats->ack_ba.info);
1269 }
1270
1271 if (common->fes_duration_us)
1272 tx_duration = common->fes_duration_us;
1273
1274 user_rate = &usr_stats->rate;
1275 flags = HTT_USR_RATE_PREAMBLE(user_rate->rate_flags);
1276 bw = HTT_USR_RATE_BW(user_rate->rate_flags) - 2;
1277 nss = HTT_USR_RATE_NSS(user_rate->rate_flags) + 1;
1278 mcs = HTT_USR_RATE_MCS(user_rate->rate_flags);
1279 sgi = HTT_USR_RATE_GI(user_rate->rate_flags);
1280 dcm = HTT_USR_RATE_DCM(user_rate->rate_flags);
1281
1282 /* Note: If host configured fixed rates and in some other special
1283 * cases, the broadcast/management frames are sent in different rates.
1284 * Firmware rate's control to be skipped for this?
1285 */
1286
1287 if (flags == WMI_RATE_PREAMBLE_HE && mcs > 11) {
1288 ath11k_warn(ab, "Invalid HE mcs %hhd peer stats", mcs);
1289 return;
1290 }
1291
1292 if (flags == WMI_RATE_PREAMBLE_HE && mcs > ATH11K_HE_MCS_MAX) {
1293 ath11k_warn(ab, "Invalid HE mcs %hhd peer stats", mcs);
1294 return;
1295 }
1296
1297 if (flags == WMI_RATE_PREAMBLE_VHT && mcs > ATH11K_VHT_MCS_MAX) {
1298 ath11k_warn(ab, "Invalid VHT mcs %hhd peer stats", mcs);
1299 return;
1300 }
1301
1302 if (flags == WMI_RATE_PREAMBLE_HT && (mcs > ATH11K_HT_MCS_MAX || nss < 1)) {
1303 ath11k_warn(ab, "Invalid HT mcs %hhd nss %hhd peer stats",
1304 mcs, nss);
1305 return;
1306 }
1307
1308 if (flags == WMI_RATE_PREAMBLE_CCK || flags == WMI_RATE_PREAMBLE_OFDM) {
1309 ret = ath11k_mac_hw_ratecode_to_legacy_rate(mcs,
1310 flags,
1311 &rate_idx,
1312 &rate);
1313 if (ret < 0)
1314 return;
1315 }
1316
1317 rcu_read_lock();
1318 spin_lock_bh(&ab->base_lock);
1319 peer = ath11k_peer_find_by_id(ab, usr_stats->peer_id);
1320
1321 if (!peer || !peer->sta) {
1322 spin_unlock_bh(&ab->base_lock);
1323 rcu_read_unlock();
1324 return;
1325 }
1326
1327 sta = peer->sta;
1328 arsta = (struct ath11k_sta *)sta->drv_priv;
1329
1330 memset(&arsta->txrate, 0, sizeof(arsta->txrate));
1331
1332 switch (flags) {
1333 case WMI_RATE_PREAMBLE_OFDM:
1334 arsta->txrate.legacy = rate;
1335 break;
1336 case WMI_RATE_PREAMBLE_CCK:
1337 arsta->txrate.legacy = rate;
1338 break;
1339 case WMI_RATE_PREAMBLE_HT:
1340 arsta->txrate.mcs = mcs + 8 * (nss - 1);
1341 arsta->txrate.flags = RATE_INFO_FLAGS_MCS;
1342 if (sgi)
1343 arsta->txrate.flags |= RATE_INFO_FLAGS_SHORT_GI;
1344 break;
1345 case WMI_RATE_PREAMBLE_VHT:
1346 arsta->txrate.mcs = mcs;
1347 arsta->txrate.flags = RATE_INFO_FLAGS_VHT_MCS;
1348 if (sgi)
1349 arsta->txrate.flags |= RATE_INFO_FLAGS_SHORT_GI;
1350 break;
1351 case WMI_RATE_PREAMBLE_HE:
1352 arsta->txrate.mcs = mcs;
1353 arsta->txrate.flags = RATE_INFO_FLAGS_HE_MCS;
1354 arsta->txrate.he_dcm = dcm;
1355 arsta->txrate.he_gi = ath11k_he_gi_to_nl80211_he_gi(sgi);
1356 arsta->txrate.he_ru_alloc = ath11k_he_ru_tones_to_nl80211_he_ru_alloc(
1357 (user_rate->ru_end -
1358 user_rate->ru_start) + 1);
1359 break;
1360 }
1361
1362 arsta->txrate.nss = nss;
1363 arsta->txrate.bw = ath11k_mac_bw_to_mac80211_bw(bw);
1364 arsta->tx_duration += tx_duration;
1365 memcpy(&arsta->last_txrate, &arsta->txrate, sizeof(struct rate_info));
1366
1367 /* PPDU stats reported for mgmt packet doesn't have valid tx bytes.
1368 * So skip peer stats update for mgmt packets.
1369 */
1370 if (tid < HTT_PPDU_STATS_NON_QOS_TID) {
1371 memset(peer_stats, 0, sizeof(*peer_stats));
1372 peer_stats->succ_pkts = succ_pkts;
1373 peer_stats->succ_bytes = succ_bytes;
1374 peer_stats->is_ampdu = is_ampdu;
1375 peer_stats->duration = tx_duration;
1376 peer_stats->ba_fails =
1377 HTT_USR_CMPLTN_LONG_RETRY(usr_stats->cmpltn_cmn.flags) +
1378 HTT_USR_CMPLTN_SHORT_RETRY(usr_stats->cmpltn_cmn.flags);
1379
1380 if (ath11k_debug_is_extd_tx_stats_enabled(ar))
1381 ath11k_accumulate_per_peer_tx_stats(arsta,
1382 peer_stats, rate_idx);
1383 }
1384
1385 spin_unlock_bh(&ab->base_lock);
1386 rcu_read_unlock();
1387}
1388
1389static void ath11k_htt_update_ppdu_stats(struct ath11k *ar,
1390 struct htt_ppdu_stats *ppdu_stats)
1391{
1392 u8 user;
1393
1394 for (user = 0; user < HTT_PPDU_STATS_MAX_USERS - 1; user++)
1395 ath11k_update_per_peer_tx_stats(ar, ppdu_stats, user);
1396}
1397
1398static
1399struct htt_ppdu_stats_info *ath11k_dp_htt_get_ppdu_desc(struct ath11k *ar,
1400 u32 ppdu_id)
1401{
1402 struct htt_ppdu_stats_info *ppdu_info;
1403
1404 spin_lock_bh(&ar->data_lock);
1405 if (!list_empty(&ar->ppdu_stats_info)) {
1406 list_for_each_entry(ppdu_info, &ar->ppdu_stats_info, list) {
1407 if (ppdu_info->ppdu_id == ppdu_id) {
1408 spin_unlock_bh(&ar->data_lock);
1409 return ppdu_info;
1410 }
1411 }
1412
1413 if (ar->ppdu_stat_list_depth > HTT_PPDU_DESC_MAX_DEPTH) {
1414 ppdu_info = list_first_entry(&ar->ppdu_stats_info,
1415 typeof(*ppdu_info), list);
1416 list_del(&ppdu_info->list);
1417 ar->ppdu_stat_list_depth--;
1418 ath11k_htt_update_ppdu_stats(ar, &ppdu_info->ppdu_stats);
1419 kfree(ppdu_info);
1420 }
1421 }
1422 spin_unlock_bh(&ar->data_lock);
1423
1424 ppdu_info = kzalloc(sizeof(*ppdu_info), GFP_KERNEL);
1425 if (!ppdu_info)
1426 return NULL;
1427
1428 spin_lock_bh(&ar->data_lock);
1429 list_add_tail(&ppdu_info->list, &ar->ppdu_stats_info);
1430 ar->ppdu_stat_list_depth++;
1431 spin_unlock_bh(&ar->data_lock);
1432
1433 return ppdu_info;
1434}
1435
1436static int ath11k_htt_pull_ppdu_stats(struct ath11k_base *ab,
1437 struct sk_buff *skb)
1438{
1439 struct ath11k_htt_ppdu_stats_msg *msg;
1440 struct htt_ppdu_stats_info *ppdu_info;
1441 struct ath11k *ar;
1442 int ret;
1443 u8 pdev_id;
1444 u32 ppdu_id, len;
1445
1446 msg = (struct ath11k_htt_ppdu_stats_msg *)skb->data;
1447 len = FIELD_GET(HTT_T2H_PPDU_STATS_INFO_PAYLOAD_SIZE, msg->info);
1448 pdev_id = FIELD_GET(HTT_T2H_PPDU_STATS_INFO_PDEV_ID, msg->info);
1449 ppdu_id = msg->ppdu_id;
1450
1451 rcu_read_lock();
1452 ar = ath11k_mac_get_ar_by_pdev_id(ab, pdev_id);
1453 if (!ar) {
1454 ret = -EINVAL;
1455 goto exit;
1456 }
1457
1458 if (ath11k_debug_is_pktlog_lite_mode_enabled(ar))
1459 trace_ath11k_htt_ppdu_stats(ar, skb->data, len);
1460
1461 ppdu_info = ath11k_dp_htt_get_ppdu_desc(ar, ppdu_id);
1462 if (!ppdu_info) {
1463 ret = -EINVAL;
1464 goto exit;
1465 }
1466
1467 ppdu_info->ppdu_id = ppdu_id;
1468 ret = ath11k_dp_htt_tlv_iter(ab, msg->data, len,
1469 ath11k_htt_tlv_ppdu_stats_parse,
1470 (void *)ppdu_info);
1471 if (ret) {
1472 ath11k_warn(ab, "Failed to parse tlv %d\n", ret);
1473 goto exit;
1474 }
1475
1476exit:
1477 rcu_read_unlock();
1478
1479 return ret;
1480}
1481
1482static void ath11k_htt_pktlog(struct ath11k_base *ab, struct sk_buff *skb)
1483{
1484 struct htt_pktlog_msg *data = (struct htt_pktlog_msg *)skb->data;
1485 struct ath_pktlog_hdr *hdr = (struct ath_pktlog_hdr *)data;
1486 struct ath11k *ar;
1487 u8 pdev_id;
1488
1489 pdev_id = FIELD_GET(HTT_T2H_PPDU_STATS_INFO_PDEV_ID, data->hdr);
1490 ar = ath11k_mac_get_ar_by_pdev_id(ab, pdev_id);
1491 if (!ar) {
1492 ath11k_warn(ab, "invalid pdev id %d on htt pktlog\n", pdev_id);
1493 return;
1494 }
1495
1496 trace_ath11k_htt_pktlog(ar, data->payload, hdr->size,
1497 ar->ab->pktlog_defs_checksum);
1498}
1499
1500static void ath11k_htt_backpressure_event_handler(struct ath11k_base *ab,
1501 struct sk_buff *skb)
1502{
1503 u32 *data = (u32 *)skb->data;
1504 u8 pdev_id, ring_type, ring_id, pdev_idx;
1505 u16 hp, tp;
1506 u32 backpressure_time;
1507 struct ath11k_bp_stats *bp_stats;
1508
1509 pdev_id = FIELD_GET(HTT_BACKPRESSURE_EVENT_PDEV_ID_M, *data);
1510 ring_type = FIELD_GET(HTT_BACKPRESSURE_EVENT_RING_TYPE_M, *data);
1511 ring_id = FIELD_GET(HTT_BACKPRESSURE_EVENT_RING_ID_M, *data);
1512 ++data;
1513
1514 hp = FIELD_GET(HTT_BACKPRESSURE_EVENT_HP_M, *data);
1515 tp = FIELD_GET(HTT_BACKPRESSURE_EVENT_TP_M, *data);
1516 ++data;
1517
1518 backpressure_time = *data;
1519
1520 ath11k_dbg(ab, ATH11K_DBG_DP_HTT, "htt backpressure event, pdev %d, ring type %d,ring id %d, hp %d tp %d, backpressure time %d\n",
1521 pdev_id, ring_type, ring_id, hp, tp, backpressure_time);
1522
1523 if (ring_type == HTT_BACKPRESSURE_UMAC_RING_TYPE) {
1524 if (ring_id >= HTT_SW_UMAC_RING_IDX_MAX)
1525 return;
1526
1527 bp_stats = &ab->soc_stats.bp_stats.umac_ring_bp_stats[ring_id];
1528 } else if (ring_type == HTT_BACKPRESSURE_LMAC_RING_TYPE) {
1529 pdev_idx = DP_HW2SW_MACID(pdev_id);
1530
1531 if (ring_id >= HTT_SW_LMAC_RING_IDX_MAX || pdev_idx >= MAX_RADIOS)
1532 return;
1533
1534 bp_stats = &ab->soc_stats.bp_stats.lmac_ring_bp_stats[ring_id][pdev_idx];
1535 } else {
1536 ath11k_warn(ab, "unknown ring type received in htt bp event %d\n",
1537 ring_type);
1538 return;
1539 }
1540
1541 spin_lock_bh(&ab->base_lock);
1542 bp_stats->hp = hp;
1543 bp_stats->tp = tp;
1544 bp_stats->count++;
1545 bp_stats->jiffies = jiffies;
1546 spin_unlock_bh(&ab->base_lock);
1547}
1548
1549void ath11k_dp_htt_htc_t2h_msg_handler(struct ath11k_base *ab,
1550 struct sk_buff *skb)
1551{
1552 struct ath11k_dp *dp = &ab->dp;
1553 struct htt_resp_msg *resp = (struct htt_resp_msg *)skb->data;
1554 enum htt_t2h_msg_type type = FIELD_GET(HTT_T2H_MSG_TYPE, *(u32 *)resp);
1555 u16 peer_id;
1556 u8 vdev_id;
1557 u8 mac_addr[ETH_ALEN];
1558 u16 peer_mac_h16;
1559 u16 ast_hash;
1560
1561 ath11k_dbg(ab, ATH11K_DBG_DP_HTT, "dp_htt rx msg type :0x%0x\n", type);
1562
1563 switch (type) {
1564 case HTT_T2H_MSG_TYPE_VERSION_CONF:
1565 dp->htt_tgt_ver_major = FIELD_GET(HTT_T2H_VERSION_CONF_MAJOR,
1566 resp->version_msg.version);
1567 dp->htt_tgt_ver_minor = FIELD_GET(HTT_T2H_VERSION_CONF_MINOR,
1568 resp->version_msg.version);
1569 complete(&dp->htt_tgt_version_received);
1570 break;
1571 case HTT_T2H_MSG_TYPE_PEER_MAP:
1572 vdev_id = FIELD_GET(HTT_T2H_PEER_MAP_INFO_VDEV_ID,
1573 resp->peer_map_ev.info);
1574 peer_id = FIELD_GET(HTT_T2H_PEER_MAP_INFO_PEER_ID,
1575 resp->peer_map_ev.info);
1576 peer_mac_h16 = FIELD_GET(HTT_T2H_PEER_MAP_INFO1_MAC_ADDR_H16,
1577 resp->peer_map_ev.info1);
1578 ath11k_dp_get_mac_addr(resp->peer_map_ev.mac_addr_l32,
1579 peer_mac_h16, mac_addr);
1580 ast_hash = FIELD_GET(HTT_T2H_PEER_MAP_INFO2_AST_HASH_VAL,
1581 resp->peer_map_ev.info2);
1582 ath11k_peer_map_event(ab, vdev_id, peer_id, mac_addr, ast_hash);
1583 break;
1584 case HTT_T2H_MSG_TYPE_PEER_UNMAP:
1585 peer_id = FIELD_GET(HTT_T2H_PEER_UNMAP_INFO_PEER_ID,
1586 resp->peer_unmap_ev.info);
1587 ath11k_peer_unmap_event(ab, peer_id);
1588 break;
1589 case HTT_T2H_MSG_TYPE_PPDU_STATS_IND:
1590 ath11k_htt_pull_ppdu_stats(ab, skb);
1591 break;
1592 case HTT_T2H_MSG_TYPE_EXT_STATS_CONF:
1593 ath11k_dbg_htt_ext_stats_handler(ab, skb);
1594 break;
1595 case HTT_T2H_MSG_TYPE_PKTLOG:
1596 ath11k_htt_pktlog(ab, skb);
1597 break;
1598 case HTT_T2H_MSG_TYPE_BKPRESSURE_EVENT_IND:
1599 ath11k_htt_backpressure_event_handler(ab, skb);
1600 break;
1601 default:
1602 ath11k_warn(ab, "htt event %d not handled\n", type);
1603 break;
1604 }
1605
1606 dev_kfree_skb_any(skb);
1607}
1608
1609static int ath11k_dp_rx_msdu_coalesce(struct ath11k *ar,
1610 struct sk_buff_head *msdu_list,
1611 struct sk_buff *first, struct sk_buff *last,
1612 u8 l3pad_bytes, int msdu_len)
1613{
1614 struct sk_buff *skb;
1615 struct ath11k_skb_rxcb *rxcb = ATH11K_SKB_RXCB(first);
1616 int buf_first_hdr_len, buf_first_len;
1617 struct hal_rx_desc *ldesc;
1618 int space_extra;
1619 int rem_len;
1620 int buf_len;
1621
1622 /* As the msdu is spread across multiple rx buffers,
1623 * find the offset to the start of msdu for computing
1624 * the length of the msdu in the first buffer.
1625 */
1626 buf_first_hdr_len = HAL_RX_DESC_SIZE + l3pad_bytes;
1627 buf_first_len = DP_RX_BUFFER_SIZE - buf_first_hdr_len;
1628
1629 if (WARN_ON_ONCE(msdu_len <= buf_first_len)) {
1630 skb_put(first, buf_first_hdr_len + msdu_len);
1631 skb_pull(first, buf_first_hdr_len);
1632 return 0;
1633 }
1634
1635 ldesc = (struct hal_rx_desc *)last->data;
1636 rxcb->is_first_msdu = ath11k_dp_rx_h_msdu_end_first_msdu(ldesc);
1637 rxcb->is_last_msdu = ath11k_dp_rx_h_msdu_end_last_msdu(ldesc);
1638
1639 /* MSDU spans over multiple buffers because the length of the MSDU
1640 * exceeds DP_RX_BUFFER_SIZE - HAL_RX_DESC_SIZE. So assume the data
1641 * in the first buf is of length DP_RX_BUFFER_SIZE - HAL_RX_DESC_SIZE.
1642 */
1643 skb_put(first, DP_RX_BUFFER_SIZE);
1644 skb_pull(first, buf_first_hdr_len);
1645
1646 /* When an MSDU spread over multiple buffers attention, MSDU_END and
1647 * MPDU_END tlvs are valid only in the last buffer. Copy those tlvs.
1648 */
1649 ath11k_dp_rx_desc_end_tlv_copy(rxcb->rx_desc, ldesc);
1650
1651 space_extra = msdu_len - (buf_first_len + skb_tailroom(first));
1652 if (space_extra > 0 &&
1653 (pskb_expand_head(first, 0, space_extra, GFP_ATOMIC) < 0)) {
1654 /* Free up all buffers of the MSDU */
1655 while ((skb = __skb_dequeue(msdu_list)) != NULL) {
1656 rxcb = ATH11K_SKB_RXCB(skb);
1657 if (!rxcb->is_continuation) {
1658 dev_kfree_skb_any(skb);
1659 break;
1660 }
1661 dev_kfree_skb_any(skb);
1662 }
1663 return -ENOMEM;
1664 }
1665
1666 rem_len = msdu_len - buf_first_len;
1667 while ((skb = __skb_dequeue(msdu_list)) != NULL && rem_len > 0) {
1668 rxcb = ATH11K_SKB_RXCB(skb);
1669 if (rxcb->is_continuation)
1670 buf_len = DP_RX_BUFFER_SIZE - HAL_RX_DESC_SIZE;
1671 else
1672 buf_len = rem_len;
1673
1674 if (buf_len > (DP_RX_BUFFER_SIZE - HAL_RX_DESC_SIZE)) {
1675 WARN_ON_ONCE(1);
1676 dev_kfree_skb_any(skb);
1677 return -EINVAL;
1678 }
1679
1680 skb_put(skb, buf_len + HAL_RX_DESC_SIZE);
1681 skb_pull(skb, HAL_RX_DESC_SIZE);
1682 skb_copy_from_linear_data(skb, skb_put(first, buf_len),
1683 buf_len);
1684 dev_kfree_skb_any(skb);
1685
1686 rem_len -= buf_len;
1687 if (!rxcb->is_continuation)
1688 break;
1689 }
1690
1691 return 0;
1692}
1693
1694static struct sk_buff *ath11k_dp_rx_get_msdu_last_buf(struct sk_buff_head *msdu_list,
1695 struct sk_buff *first)
1696{
1697 struct sk_buff *skb;
1698 struct ath11k_skb_rxcb *rxcb = ATH11K_SKB_RXCB(first);
1699
1700 if (!rxcb->is_continuation)
1701 return first;
1702
1703 skb_queue_walk(msdu_list, skb) {
1704 rxcb = ATH11K_SKB_RXCB(skb);
1705 if (!rxcb->is_continuation)
1706 return skb;
1707 }
1708
1709 return NULL;
1710}
1711
1712static void ath11k_dp_rx_h_csum_offload(struct sk_buff *msdu)
1713{
1714 struct ath11k_skb_rxcb *rxcb = ATH11K_SKB_RXCB(msdu);
1715 bool ip_csum_fail, l4_csum_fail;
1716
1717 ip_csum_fail = ath11k_dp_rx_h_attn_ip_cksum_fail(rxcb->rx_desc);
1718 l4_csum_fail = ath11k_dp_rx_h_attn_l4_cksum_fail(rxcb->rx_desc);
1719
1720 msdu->ip_summed = (ip_csum_fail || l4_csum_fail) ?
1721 CHECKSUM_NONE : CHECKSUM_UNNECESSARY;
1722}
1723
1724static int ath11k_dp_rx_crypto_mic_len(struct ath11k *ar,
1725 enum hal_encrypt_type enctype)
1726{
1727 switch (enctype) {
1728 case HAL_ENCRYPT_TYPE_OPEN:
1729 case HAL_ENCRYPT_TYPE_TKIP_NO_MIC:
1730 case HAL_ENCRYPT_TYPE_TKIP_MIC:
1731 return 0;
1732 case HAL_ENCRYPT_TYPE_CCMP_128:
1733 return IEEE80211_CCMP_MIC_LEN;
1734 case HAL_ENCRYPT_TYPE_CCMP_256:
1735 return IEEE80211_CCMP_256_MIC_LEN;
1736 case HAL_ENCRYPT_TYPE_GCMP_128:
1737 case HAL_ENCRYPT_TYPE_AES_GCMP_256:
1738 return IEEE80211_GCMP_MIC_LEN;
1739 case HAL_ENCRYPT_TYPE_WEP_40:
1740 case HAL_ENCRYPT_TYPE_WEP_104:
1741 case HAL_ENCRYPT_TYPE_WEP_128:
1742 case HAL_ENCRYPT_TYPE_WAPI_GCM_SM4:
1743 case HAL_ENCRYPT_TYPE_WAPI:
1744 break;
1745 }
1746
1747 ath11k_warn(ar->ab, "unsupported encryption type %d for mic len\n", enctype);
1748 return 0;
1749}
1750
1751static int ath11k_dp_rx_crypto_param_len(struct ath11k *ar,
1752 enum hal_encrypt_type enctype)
1753{
1754 switch (enctype) {
1755 case HAL_ENCRYPT_TYPE_OPEN:
1756 return 0;
1757 case HAL_ENCRYPT_TYPE_TKIP_NO_MIC:
1758 case HAL_ENCRYPT_TYPE_TKIP_MIC:
1759 return IEEE80211_TKIP_IV_LEN;
1760 case HAL_ENCRYPT_TYPE_CCMP_128:
1761 return IEEE80211_CCMP_HDR_LEN;
1762 case HAL_ENCRYPT_TYPE_CCMP_256:
1763 return IEEE80211_CCMP_256_HDR_LEN;
1764 case HAL_ENCRYPT_TYPE_GCMP_128:
1765 case HAL_ENCRYPT_TYPE_AES_GCMP_256:
1766 return IEEE80211_GCMP_HDR_LEN;
1767 case HAL_ENCRYPT_TYPE_WEP_40:
1768 case HAL_ENCRYPT_TYPE_WEP_104:
1769 case HAL_ENCRYPT_TYPE_WEP_128:
1770 case HAL_ENCRYPT_TYPE_WAPI_GCM_SM4:
1771 case HAL_ENCRYPT_TYPE_WAPI:
1772 break;
1773 }
1774
1775 ath11k_warn(ar->ab, "unsupported encryption type %d\n", enctype);
1776 return 0;
1777}
1778
1779static int ath11k_dp_rx_crypto_icv_len(struct ath11k *ar,
1780 enum hal_encrypt_type enctype)
1781{
1782 switch (enctype) {
1783 case HAL_ENCRYPT_TYPE_OPEN:
1784 case HAL_ENCRYPT_TYPE_CCMP_128:
1785 case HAL_ENCRYPT_TYPE_CCMP_256:
1786 case HAL_ENCRYPT_TYPE_GCMP_128:
1787 case HAL_ENCRYPT_TYPE_AES_GCMP_256:
1788 return 0;
1789 case HAL_ENCRYPT_TYPE_TKIP_NO_MIC:
1790 case HAL_ENCRYPT_TYPE_TKIP_MIC:
1791 return IEEE80211_TKIP_ICV_LEN;
1792 case HAL_ENCRYPT_TYPE_WEP_40:
1793 case HAL_ENCRYPT_TYPE_WEP_104:
1794 case HAL_ENCRYPT_TYPE_WEP_128:
1795 case HAL_ENCRYPT_TYPE_WAPI_GCM_SM4:
1796 case HAL_ENCRYPT_TYPE_WAPI:
1797 break;
1798 }
1799
1800 ath11k_warn(ar->ab, "unsupported encryption type %d\n", enctype);
1801 return 0;
1802}
1803
1804static void ath11k_dp_rx_h_undecap_nwifi(struct ath11k *ar,
1805 struct sk_buff *msdu,
1806 u8 *first_hdr,
1807 enum hal_encrypt_type enctype,
1808 struct ieee80211_rx_status *status)
1809{
1810 struct ath11k_skb_rxcb *rxcb = ATH11K_SKB_RXCB(msdu);
1811 u8 decap_hdr[DP_MAX_NWIFI_HDR_LEN];
1812 struct ieee80211_hdr *hdr;
1813 size_t hdr_len;
1814 u8 da[ETH_ALEN];
1815 u8 sa[ETH_ALEN];
1816 u16 qos_ctl = 0;
1817 u8 *qos;
1818
1819 /* copy SA & DA and pull decapped header */
1820 hdr = (struct ieee80211_hdr *)msdu->data;
1821 hdr_len = ieee80211_hdrlen(hdr->frame_control);
1822 ether_addr_copy(da, ieee80211_get_DA(hdr));
1823 ether_addr_copy(sa, ieee80211_get_SA(hdr));
1824 skb_pull(msdu, ieee80211_hdrlen(hdr->frame_control));
1825
1826 if (rxcb->is_first_msdu) {
1827 /* original 802.11 header is valid for the first msdu
1828 * hence we can reuse the same header
1829 */
1830 hdr = (struct ieee80211_hdr *)first_hdr;
1831 hdr_len = ieee80211_hdrlen(hdr->frame_control);
1832
1833 /* Each A-MSDU subframe will be reported as a separate MSDU,
1834 * so strip the A-MSDU bit from QoS Ctl.
1835 */
1836 if (ieee80211_is_data_qos(hdr->frame_control)) {
1837 qos = ieee80211_get_qos_ctl(hdr);
1838 qos[0] &= ~IEEE80211_QOS_CTL_A_MSDU_PRESENT;
1839 }
1840 } else {
1841 /* Rebuild qos header if this is a middle/last msdu */
1842 hdr->frame_control |= __cpu_to_le16(IEEE80211_STYPE_QOS_DATA);
1843
1844 /* Reset the order bit as the HT_Control header is stripped */
1845 hdr->frame_control &= ~(__cpu_to_le16(IEEE80211_FCTL_ORDER));
1846
1847 qos_ctl = rxcb->tid;
1848
1849 if (ath11k_dp_rx_h_msdu_start_mesh_ctl_present(rxcb->rx_desc))
1850 qos_ctl |= IEEE80211_QOS_CTL_MESH_CONTROL_PRESENT;
1851
1852 /* TODO Add other QoS ctl fields when required */
1853
1854 /* copy decap header before overwriting for reuse below */
1855 memcpy(decap_hdr, (uint8_t *)hdr, hdr_len);
1856 }
1857
1858 if (!(status->flag & RX_FLAG_IV_STRIPPED)) {
1859 memcpy(skb_push(msdu,
1860 ath11k_dp_rx_crypto_param_len(ar, enctype)),
1861 (void *)hdr + hdr_len,
1862 ath11k_dp_rx_crypto_param_len(ar, enctype));
1863 }
1864
1865 if (!rxcb->is_first_msdu) {
1866 memcpy(skb_push(msdu,
1867 IEEE80211_QOS_CTL_LEN), &qos_ctl,
1868 IEEE80211_QOS_CTL_LEN);
1869 memcpy(skb_push(msdu, hdr_len), decap_hdr, hdr_len);
1870 return;
1871 }
1872
1873 memcpy(skb_push(msdu, hdr_len), hdr, hdr_len);
1874
1875 /* original 802.11 header has a different DA and in
1876 * case of 4addr it may also have different SA
1877 */
1878 hdr = (struct ieee80211_hdr *)msdu->data;
1879 ether_addr_copy(ieee80211_get_DA(hdr), da);
1880 ether_addr_copy(ieee80211_get_SA(hdr), sa);
1881}
1882
1883static void ath11k_dp_rx_h_undecap_raw(struct ath11k *ar, struct sk_buff *msdu,
1884 enum hal_encrypt_type enctype,
1885 struct ieee80211_rx_status *status,
1886 bool decrypted)
1887{
1888 struct ath11k_skb_rxcb *rxcb = ATH11K_SKB_RXCB(msdu);
1889 struct ieee80211_hdr *hdr;
1890 size_t hdr_len;
1891 size_t crypto_len;
1892
1893 if (!rxcb->is_first_msdu ||
1894 !(rxcb->is_first_msdu && rxcb->is_last_msdu)) {
1895 WARN_ON_ONCE(1);
1896 return;
1897 }
1898
1899 skb_trim(msdu, msdu->len - FCS_LEN);
1900
1901 if (!decrypted)
1902 return;
1903
1904 hdr = (void *)msdu->data;
1905
1906 /* Tail */
1907 if (status->flag & RX_FLAG_IV_STRIPPED) {
1908 skb_trim(msdu, msdu->len -
1909 ath11k_dp_rx_crypto_mic_len(ar, enctype));
1910
1911 skb_trim(msdu, msdu->len -
1912 ath11k_dp_rx_crypto_icv_len(ar, enctype));
1913 } else {
1914 /* MIC */
1915 if (status->flag & RX_FLAG_MIC_STRIPPED)
1916 skb_trim(msdu, msdu->len -
1917 ath11k_dp_rx_crypto_mic_len(ar, enctype));
1918
1919 /* ICV */
1920 if (status->flag & RX_FLAG_ICV_STRIPPED)
1921 skb_trim(msdu, msdu->len -
1922 ath11k_dp_rx_crypto_icv_len(ar, enctype));
1923 }
1924
1925 /* MMIC */
1926 if ((status->flag & RX_FLAG_MMIC_STRIPPED) &&
1927 !ieee80211_has_morefrags(hdr->frame_control) &&
1928 enctype == HAL_ENCRYPT_TYPE_TKIP_MIC)
1929 skb_trim(msdu, msdu->len - IEEE80211_CCMP_MIC_LEN);
1930
1931 /* Head */
1932 if (status->flag & RX_FLAG_IV_STRIPPED) {
1933 hdr_len = ieee80211_hdrlen(hdr->frame_control);
1934 crypto_len = ath11k_dp_rx_crypto_param_len(ar, enctype);
1935
1936 memmove((void *)msdu->data + crypto_len,
1937 (void *)msdu->data, hdr_len);
1938 skb_pull(msdu, crypto_len);
1939 }
1940}
1941
1942static void *ath11k_dp_rx_h_find_rfc1042(struct ath11k *ar,
1943 struct sk_buff *msdu,
1944 enum hal_encrypt_type enctype)
1945{
1946 struct ath11k_skb_rxcb *rxcb = ATH11K_SKB_RXCB(msdu);
1947 struct ieee80211_hdr *hdr;
1948 size_t hdr_len, crypto_len;
1949 void *rfc1042;
1950 bool is_amsdu;
1951
1952 is_amsdu = !(rxcb->is_first_msdu && rxcb->is_last_msdu);
1953 hdr = (struct ieee80211_hdr *)ath11k_dp_rx_h_80211_hdr(rxcb->rx_desc);
1954 rfc1042 = hdr;
1955
1956 if (rxcb->is_first_msdu) {
1957 hdr_len = ieee80211_hdrlen(hdr->frame_control);
1958 crypto_len = ath11k_dp_rx_crypto_param_len(ar, enctype);
1959
1960 rfc1042 += hdr_len + crypto_len;
1961 }
1962
1963 if (is_amsdu)
1964 rfc1042 += sizeof(struct ath11k_dp_amsdu_subframe_hdr);
1965
1966 return rfc1042;
1967}
1968
1969static void ath11k_dp_rx_h_undecap_eth(struct ath11k *ar,
1970 struct sk_buff *msdu,
1971 u8 *first_hdr,
1972 enum hal_encrypt_type enctype,
1973 struct ieee80211_rx_status *status)
1974{
1975 struct ieee80211_hdr *hdr;
1976 struct ethhdr *eth;
1977 size_t hdr_len;
1978 u8 da[ETH_ALEN];
1979 u8 sa[ETH_ALEN];
1980 void *rfc1042;
1981
1982 rfc1042 = ath11k_dp_rx_h_find_rfc1042(ar, msdu, enctype);
1983 if (WARN_ON_ONCE(!rfc1042))
1984 return;
1985
1986 /* pull decapped header and copy SA & DA */
1987 eth = (struct ethhdr *)msdu->data;
1988 ether_addr_copy(da, eth->h_dest);
1989 ether_addr_copy(sa, eth->h_source);
1990 skb_pull(msdu, sizeof(struct ethhdr));
1991
1992 /* push rfc1042/llc/snap */
1993 memcpy(skb_push(msdu, sizeof(struct ath11k_dp_rfc1042_hdr)), rfc1042,
1994 sizeof(struct ath11k_dp_rfc1042_hdr));
1995
1996 /* push original 802.11 header */
1997 hdr = (struct ieee80211_hdr *)first_hdr;
1998 hdr_len = ieee80211_hdrlen(hdr->frame_control);
1999
2000 if (!(status->flag & RX_FLAG_IV_STRIPPED)) {
2001 memcpy(skb_push(msdu,
2002 ath11k_dp_rx_crypto_param_len(ar, enctype)),
2003 (void *)hdr + hdr_len,
2004 ath11k_dp_rx_crypto_param_len(ar, enctype));
2005 }
2006
2007 memcpy(skb_push(msdu, hdr_len), hdr, hdr_len);
2008
2009 /* original 802.11 header has a different DA and in
2010 * case of 4addr it may also have different SA
2011 */
2012 hdr = (struct ieee80211_hdr *)msdu->data;
2013 ether_addr_copy(ieee80211_get_DA(hdr), da);
2014 ether_addr_copy(ieee80211_get_SA(hdr), sa);
2015}
2016
2017static void ath11k_dp_rx_h_undecap(struct ath11k *ar, struct sk_buff *msdu,
2018 struct hal_rx_desc *rx_desc,
2019 enum hal_encrypt_type enctype,
2020 struct ieee80211_rx_status *status,
2021 bool decrypted)
2022{
2023 u8 *first_hdr;
2024 u8 decap;
2025
2026 first_hdr = ath11k_dp_rx_h_80211_hdr(rx_desc);
2027 decap = ath11k_dp_rx_h_msdu_start_decap_type(rx_desc);
2028
2029 switch (decap) {
2030 case DP_RX_DECAP_TYPE_NATIVE_WIFI:
2031 ath11k_dp_rx_h_undecap_nwifi(ar, msdu, first_hdr,
2032 enctype, status);
2033 break;
2034 case DP_RX_DECAP_TYPE_RAW:
2035 ath11k_dp_rx_h_undecap_raw(ar, msdu, enctype, status,
2036 decrypted);
2037 break;
2038 case DP_RX_DECAP_TYPE_ETHERNET2_DIX:
2039 /* TODO undecap support for middle/last msdu's of amsdu */
2040 ath11k_dp_rx_h_undecap_eth(ar, msdu, first_hdr,
2041 enctype, status);
2042 break;
2043 case DP_RX_DECAP_TYPE_8023:
2044 /* TODO: Handle undecap for these formats */
2045 break;
2046 }
2047}
2048
2049static void ath11k_dp_rx_h_mpdu(struct ath11k *ar,
2050 struct sk_buff *msdu,
2051 struct hal_rx_desc *rx_desc,
2052 struct ieee80211_rx_status *rx_status)
2053{
2054 bool fill_crypto_hdr, mcast;
2055 enum hal_encrypt_type enctype;
2056 bool is_decrypted = false;
2057 struct ieee80211_hdr *hdr;
2058 struct ath11k_peer *peer;
2059 u32 err_bitmap;
2060
2061 hdr = (struct ieee80211_hdr *)msdu->data;
2062
2063 /* PN for multicast packets will be checked in mac80211 */
2064
2065 mcast = is_multicast_ether_addr(hdr->addr1);
2066 fill_crypto_hdr = mcast;
2067
2068 is_decrypted = ath11k_dp_rx_h_attn_is_decrypted(rx_desc);
2069
2070 spin_lock_bh(&ar->ab->base_lock);
2071 peer = ath11k_peer_find_by_addr(ar->ab, hdr->addr2);
2072 if (peer) {
2073 if (mcast)
2074 enctype = peer->sec_type_grp;
2075 else
2076 enctype = peer->sec_type;
2077 } else {
2078 enctype = HAL_ENCRYPT_TYPE_OPEN;
2079 }
2080 spin_unlock_bh(&ar->ab->base_lock);
2081
2082 err_bitmap = ath11k_dp_rx_h_attn_mpdu_err(rx_desc);
2083
2084 /* Clear per-MPDU flags while leaving per-PPDU flags intact */
2085 rx_status->flag &= ~(RX_FLAG_FAILED_FCS_CRC |
2086 RX_FLAG_MMIC_ERROR |
2087 RX_FLAG_DECRYPTED |
2088 RX_FLAG_IV_STRIPPED |
2089 RX_FLAG_MMIC_STRIPPED);
2090
2091 if (err_bitmap & DP_RX_MPDU_ERR_FCS)
2092 rx_status->flag |= RX_FLAG_FAILED_FCS_CRC;
2093 if (err_bitmap & DP_RX_MPDU_ERR_TKIP_MIC)
2094 rx_status->flag |= RX_FLAG_MMIC_ERROR;
2095
2096 if (is_decrypted) {
2097 rx_status->flag |= RX_FLAG_DECRYPTED | RX_FLAG_MMIC_STRIPPED;
2098
2099 if (fill_crypto_hdr)
2100 rx_status->flag |= RX_FLAG_MIC_STRIPPED |
2101 RX_FLAG_ICV_STRIPPED;
2102 else
2103 rx_status->flag |= RX_FLAG_IV_STRIPPED |
2104 RX_FLAG_PN_VALIDATED;
2105 }
2106
2107 ath11k_dp_rx_h_csum_offload(msdu);
2108 ath11k_dp_rx_h_undecap(ar, msdu, rx_desc,
2109 enctype, rx_status, is_decrypted);
2110
2111 if (!is_decrypted || fill_crypto_hdr)
2112 return;
2113
2114 hdr = (void *)msdu->data;
2115 hdr->frame_control &= ~__cpu_to_le16(IEEE80211_FCTL_PROTECTED);
2116}
2117
2118static void ath11k_dp_rx_h_rate(struct ath11k *ar, struct hal_rx_desc *rx_desc,
2119 struct ieee80211_rx_status *rx_status)
2120{
2121 struct ieee80211_supported_band *sband;
2122 enum rx_msdu_start_pkt_type pkt_type;
2123 u8 bw;
2124 u8 rate_mcs, nss;
2125 u8 sgi;
2126 bool is_cck;
2127
2128 pkt_type = ath11k_dp_rx_h_msdu_start_pkt_type(rx_desc);
2129 bw = ath11k_dp_rx_h_msdu_start_rx_bw(rx_desc);
2130 rate_mcs = ath11k_dp_rx_h_msdu_start_rate_mcs(rx_desc);
2131 nss = ath11k_dp_rx_h_msdu_start_nss(rx_desc);
2132 sgi = ath11k_dp_rx_h_msdu_start_sgi(rx_desc);
2133
2134 switch (pkt_type) {
2135 case RX_MSDU_START_PKT_TYPE_11A:
2136 case RX_MSDU_START_PKT_TYPE_11B:
2137 is_cck = (pkt_type == RX_MSDU_START_PKT_TYPE_11B);
2138 sband = &ar->mac.sbands[rx_status->band];
2139 rx_status->rate_idx = ath11k_mac_hw_rate_to_idx(sband, rate_mcs,
2140 is_cck);
2141 break;
2142 case RX_MSDU_START_PKT_TYPE_11N:
2143 rx_status->encoding = RX_ENC_HT;
2144 if (rate_mcs > ATH11K_HT_MCS_MAX) {
2145 ath11k_warn(ar->ab,
2146 "Received with invalid mcs in HT mode %d\n",
2147 rate_mcs);
2148 break;
2149 }
2150 rx_status->rate_idx = rate_mcs + (8 * (nss - 1));
2151 if (sgi)
2152 rx_status->enc_flags |= RX_ENC_FLAG_SHORT_GI;
2153 rx_status->bw = ath11k_mac_bw_to_mac80211_bw(bw);
2154 break;
2155 case RX_MSDU_START_PKT_TYPE_11AC:
2156 rx_status->encoding = RX_ENC_VHT;
2157 rx_status->rate_idx = rate_mcs;
2158 if (rate_mcs > ATH11K_VHT_MCS_MAX) {
2159 ath11k_warn(ar->ab,
2160 "Received with invalid mcs in VHT mode %d\n",
2161 rate_mcs);
2162 break;
2163 }
2164 rx_status->nss = nss;
2165 if (sgi)
2166 rx_status->enc_flags |= RX_ENC_FLAG_SHORT_GI;
2167 rx_status->bw = ath11k_mac_bw_to_mac80211_bw(bw);
2168 break;
2169 case RX_MSDU_START_PKT_TYPE_11AX:
2170 rx_status->rate_idx = rate_mcs;
2171 if (rate_mcs > ATH11K_HE_MCS_MAX) {
2172 ath11k_warn(ar->ab,
2173 "Received with invalid mcs in HE mode %d\n",
2174 rate_mcs);
2175 break;
2176 }
2177 rx_status->encoding = RX_ENC_HE;
2178 rx_status->nss = nss;
2179 rx_status->he_gi = ath11k_he_gi_to_nl80211_he_gi(sgi);
2180 rx_status->bw = ath11k_mac_bw_to_mac80211_bw(bw);
2181 break;
2182 }
2183}
2184
2185static void ath11k_dp_rx_h_ppdu(struct ath11k *ar, struct hal_rx_desc *rx_desc,
2186 struct ieee80211_rx_status *rx_status)
2187{
2188 u8 channel_num;
2189 u32 center_freq;
2190
2191 rx_status->freq = 0;
2192 rx_status->rate_idx = 0;
2193 rx_status->nss = 0;
2194 rx_status->encoding = RX_ENC_LEGACY;
2195 rx_status->bw = RATE_INFO_BW_20;
2196
2197 rx_status->flag |= RX_FLAG_NO_SIGNAL_VAL;
2198
2199 channel_num = ath11k_dp_rx_h_msdu_start_freq(rx_desc);
2200 center_freq = ath11k_dp_rx_h_msdu_start_freq(rx_desc) >> 16;
2201
2202 if (center_freq >= 5935 && center_freq <= 7105) {
2203 rx_status->band = NL80211_BAND_6GHZ;
2204 } else if (channel_num >= 1 && channel_num <= 14) {
2205 rx_status->band = NL80211_BAND_2GHZ;
2206 } else if (channel_num >= 36 && channel_num <= 173) {
2207 rx_status->band = NL80211_BAND_5GHZ;
2208 } else {
2209 spin_lock_bh(&ar->data_lock);
2210 rx_status->band = ar->rx_channel->band;
2211 channel_num =
2212 ieee80211_frequency_to_channel(ar->rx_channel->center_freq);
2213 spin_unlock_bh(&ar->data_lock);
2214 ath11k_dbg_dump(ar->ab, ATH11K_DBG_DATA, NULL, "rx_desc: ",
2215 rx_desc, sizeof(struct hal_rx_desc));
2216 }
2217
2218 rx_status->freq = ieee80211_channel_to_frequency(channel_num,
2219 rx_status->band);
2220
2221 ath11k_dp_rx_h_rate(ar, rx_desc, rx_status);
2222}
2223
2224static char *ath11k_print_get_tid(struct ieee80211_hdr *hdr, char *out,
2225 size_t size)
2226{
2227 u8 *qc;
2228 int tid;
2229
2230 if (!ieee80211_is_data_qos(hdr->frame_control))
2231 return "";
2232
2233 qc = ieee80211_get_qos_ctl(hdr);
2234 tid = *qc & IEEE80211_QOS_CTL_TID_MASK;
2235 snprintf(out, size, "tid %d", tid);
2236
2237 return out;
2238}
2239
2240static void ath11k_dp_rx_deliver_msdu(struct ath11k *ar, struct napi_struct *napi,
2241 struct sk_buff *msdu)
2242{
2243 static const struct ieee80211_radiotap_he known = {
2244 .data1 = cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA1_DATA_MCS_KNOWN |
2245 IEEE80211_RADIOTAP_HE_DATA1_BW_RU_ALLOC_KNOWN),
2246 .data2 = cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA2_GI_KNOWN),
2247 };
2248 struct ieee80211_rx_status *status;
2249 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)msdu->data;
2250 struct ieee80211_radiotap_he *he = NULL;
2251 char tid[32];
2252
2253 status = IEEE80211_SKB_RXCB(msdu);
2254 if (status->encoding == RX_ENC_HE) {
2255 he = skb_push(msdu, sizeof(known));
2256 memcpy(he, &known, sizeof(known));
2257 status->flag |= RX_FLAG_RADIOTAP_HE;
2258 }
2259
2260 ath11k_dbg(ar->ab, ATH11K_DBG_DATA,
2261 "rx skb %pK len %u peer %pM %s %s sn %u %s%s%s%s%s%s%s %srate_idx %u vht_nss %u freq %u band %u flag 0x%x fcs-err %i mic-err %i amsdu-more %i\n",
2262 msdu,
2263 msdu->len,
2264 ieee80211_get_SA(hdr),
2265 ath11k_print_get_tid(hdr, tid, sizeof(tid)),
2266 is_multicast_ether_addr(ieee80211_get_DA(hdr)) ?
2267 "mcast" : "ucast",
2268 (__le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_SEQ) >> 4,
2269 (status->encoding == RX_ENC_LEGACY) ? "legacy" : "",
2270 (status->encoding == RX_ENC_HT) ? "ht" : "",
2271 (status->encoding == RX_ENC_VHT) ? "vht" : "",
2272 (status->encoding == RX_ENC_HE) ? "he" : "",
2273 (status->bw == RATE_INFO_BW_40) ? "40" : "",
2274 (status->bw == RATE_INFO_BW_80) ? "80" : "",
2275 (status->bw == RATE_INFO_BW_160) ? "160" : "",
2276 status->enc_flags & RX_ENC_FLAG_SHORT_GI ? "sgi " : "",
2277 status->rate_idx,
2278 status->nss,
2279 status->freq,
2280 status->band, status->flag,
2281 !!(status->flag & RX_FLAG_FAILED_FCS_CRC),
2282 !!(status->flag & RX_FLAG_MMIC_ERROR),
2283 !!(status->flag & RX_FLAG_AMSDU_MORE));
2284
2285 /* TODO: trace rx packet */
2286
2287 ieee80211_rx_napi(ar->hw, NULL, msdu, napi);
2288}
2289
2290static int ath11k_dp_rx_process_msdu(struct ath11k *ar,
2291 struct sk_buff *msdu,
2292 struct sk_buff_head *msdu_list)
2293{
2294 struct hal_rx_desc *rx_desc, *lrx_desc;
2295 struct ieee80211_rx_status rx_status = {0};
2296 struct ieee80211_rx_status *status;
2297 struct ath11k_skb_rxcb *rxcb;
2298 struct ieee80211_hdr *hdr;
2299 struct sk_buff *last_buf;
2300 u8 l3_pad_bytes;
2301 u8 *hdr_status;
2302 u16 msdu_len;
2303 int ret;
2304
2305 last_buf = ath11k_dp_rx_get_msdu_last_buf(msdu_list, msdu);
2306 if (!last_buf) {
2307 ath11k_warn(ar->ab,
2308 "No valid Rx buffer to access Atten/MSDU_END/MPDU_END tlvs\n");
2309 ret = -EIO;
2310 goto free_out;
2311 }
2312
2313 rx_desc = (struct hal_rx_desc *)msdu->data;
2314 lrx_desc = (struct hal_rx_desc *)last_buf->data;
2315 if (!ath11k_dp_rx_h_attn_msdu_done(lrx_desc)) {
2316 ath11k_warn(ar->ab, "msdu_done bit in attention is not set\n");
2317 ret = -EIO;
2318 goto free_out;
2319 }
2320
2321 rxcb = ATH11K_SKB_RXCB(msdu);
2322 rxcb->rx_desc = rx_desc;
2323 msdu_len = ath11k_dp_rx_h_msdu_start_msdu_len(rx_desc);
2324 l3_pad_bytes = ath11k_dp_rx_h_msdu_end_l3pad(lrx_desc);
2325
2326 if (rxcb->is_frag) {
2327 skb_pull(msdu, HAL_RX_DESC_SIZE);
2328 } else if (!rxcb->is_continuation) {
2329 if ((msdu_len + HAL_RX_DESC_SIZE) > DP_RX_BUFFER_SIZE) {
2330 hdr_status = ath11k_dp_rx_h_80211_hdr(rx_desc);
2331 ret = -EINVAL;
2332 ath11k_warn(ar->ab, "invalid msdu len %u\n", msdu_len);
2333 ath11k_dbg_dump(ar->ab, ATH11K_DBG_DATA, NULL, "", hdr_status,
2334 sizeof(struct ieee80211_hdr));
2335 ath11k_dbg_dump(ar->ab, ATH11K_DBG_DATA, NULL, "", rx_desc,
2336 sizeof(struct hal_rx_desc));
2337 goto free_out;
2338 }
2339 skb_put(msdu, HAL_RX_DESC_SIZE + l3_pad_bytes + msdu_len);
2340 skb_pull(msdu, HAL_RX_DESC_SIZE + l3_pad_bytes);
2341 } else {
2342 ret = ath11k_dp_rx_msdu_coalesce(ar, msdu_list,
2343 msdu, last_buf,
2344 l3_pad_bytes, msdu_len);
2345 if (ret) {
2346 ath11k_warn(ar->ab,
2347 "failed to coalesce msdu rx buffer%d\n", ret);
2348 goto free_out;
2349 }
2350 }
2351
2352 hdr = (struct ieee80211_hdr *)msdu->data;
2353
2354 /* Process only data frames */
2355 if (!ieee80211_is_data(hdr->frame_control))
2356 return -EINVAL;
2357
2358 ath11k_dp_rx_h_ppdu(ar, rx_desc, &rx_status);
2359 ath11k_dp_rx_h_mpdu(ar, msdu, rx_desc, &rx_status);
2360
2361 rx_status.flag |= RX_FLAG_SKIP_MONITOR | RX_FLAG_DUP_VALIDATED;
2362
2363 status = IEEE80211_SKB_RXCB(msdu);
2364 *status = rx_status;
2365 return 0;
2366
2367free_out:
2368 return ret;
2369}
2370
2371static void ath11k_dp_rx_process_received_packets(struct ath11k_base *ab,
2372 struct napi_struct *napi,
2373 struct sk_buff_head *msdu_list,
2374 int *quota, int ring_id)
2375{
2376 struct ath11k_skb_rxcb *rxcb;
2377 struct sk_buff *msdu;
2378 struct ath11k *ar;
2379 u8 mac_id;
2380 int ret;
2381
2382 if (skb_queue_empty(msdu_list))
2383 return;
2384
2385 rcu_read_lock();
2386
2387 while (*quota && (msdu = __skb_dequeue(msdu_list))) {
2388 rxcb = ATH11K_SKB_RXCB(msdu);
2389 mac_id = rxcb->mac_id;
2390 ar = ab->pdevs[mac_id].ar;
2391 if (!rcu_dereference(ab->pdevs_active[mac_id])) {
2392 dev_kfree_skb_any(msdu);
2393 continue;
2394 }
2395
2396 if (test_bit(ATH11K_CAC_RUNNING, &ar->dev_flags)) {
2397 dev_kfree_skb_any(msdu);
2398 continue;
2399 }
2400
2401 ret = ath11k_dp_rx_process_msdu(ar, msdu, msdu_list);
2402 if (ret) {
2403 ath11k_dbg(ab, ATH11K_DBG_DATA,
2404 "Unable to process msdu %d", ret);
2405 dev_kfree_skb_any(msdu);
2406 continue;
2407 }
2408
2409 ath11k_dp_rx_deliver_msdu(ar, napi, msdu);
2410 (*quota)--;
2411 }
2412
2413 rcu_read_unlock();
2414}
2415
2416int ath11k_dp_process_rx(struct ath11k_base *ab, int ring_id,
2417 struct napi_struct *napi, int budget)
2418{
2419 struct ath11k_dp *dp = &ab->dp;
2420 struct dp_rxdma_ring *rx_ring;
2421 int num_buffs_reaped[MAX_RADIOS] = {0};
2422 struct sk_buff_head msdu_list;
2423 struct ath11k_skb_rxcb *rxcb;
2424 int total_msdu_reaped = 0;
2425 struct hal_srng *srng;
2426 struct sk_buff *msdu;
2427 int quota = budget;
2428 bool done = false;
2429 int buf_id, mac_id;
2430 struct ath11k *ar;
2431 u32 *rx_desc;
2432 int i;
2433
2434 __skb_queue_head_init(&msdu_list);
2435
2436 srng = &ab->hal.srng_list[dp->reo_dst_ring[ring_id].ring_id];
2437
2438 spin_lock_bh(&srng->lock);
2439
2440 ath11k_hal_srng_access_begin(ab, srng);
2441
2442try_again:
2443 while ((rx_desc = ath11k_hal_srng_dst_get_next_entry(ab, srng))) {
2444 struct hal_reo_dest_ring desc = *(struct hal_reo_dest_ring *)rx_desc;
2445 enum hal_reo_dest_ring_push_reason push_reason;
2446 u32 cookie;
2447
2448 cookie = FIELD_GET(BUFFER_ADDR_INFO1_SW_COOKIE,
2449 desc.buf_addr_info.info1);
2450 buf_id = FIELD_GET(DP_RXDMA_BUF_COOKIE_BUF_ID,
2451 cookie);
2452 mac_id = FIELD_GET(DP_RXDMA_BUF_COOKIE_PDEV_ID, cookie);
2453
2454 ar = ab->pdevs[mac_id].ar;
2455 rx_ring = &ar->dp.rx_refill_buf_ring;
2456 spin_lock_bh(&rx_ring->idr_lock);
2457 msdu = idr_find(&rx_ring->bufs_idr, buf_id);
2458 if (!msdu) {
2459 ath11k_warn(ab, "frame rx with invalid buf_id %d\n",
2460 buf_id);
2461 spin_unlock_bh(&rx_ring->idr_lock);
2462 continue;
2463 }
2464
2465 idr_remove(&rx_ring->bufs_idr, buf_id);
2466 spin_unlock_bh(&rx_ring->idr_lock);
2467
2468 rxcb = ATH11K_SKB_RXCB(msdu);
2469 dma_unmap_single(ab->dev, rxcb->paddr,
2470 msdu->len + skb_tailroom(msdu),
2471 DMA_FROM_DEVICE);
2472
2473 num_buffs_reaped[mac_id]++;
2474 total_msdu_reaped++;
2475
2476 push_reason = FIELD_GET(HAL_REO_DEST_RING_INFO0_PUSH_REASON,
2477 desc.info0);
2478 if (push_reason !=
2479 HAL_REO_DEST_RING_PUSH_REASON_ROUTING_INSTRUCTION) {
2480 dev_kfree_skb_any(msdu);
2481 ab->soc_stats.hal_reo_error[dp->reo_dst_ring[ring_id].ring_id]++;
2482 continue;
2483 }
2484
2485 rxcb->is_first_msdu = !!(desc.rx_msdu_info.info0 &
2486 RX_MSDU_DESC_INFO0_FIRST_MSDU_IN_MPDU);
2487 rxcb->is_last_msdu = !!(desc.rx_msdu_info.info0 &
2488 RX_MSDU_DESC_INFO0_LAST_MSDU_IN_MPDU);
2489 rxcb->is_continuation = !!(desc.rx_msdu_info.info0 &
2490 RX_MSDU_DESC_INFO0_MSDU_CONTINUATION);
2491 rxcb->mac_id = mac_id;
2492 rxcb->tid = FIELD_GET(HAL_REO_DEST_RING_INFO0_RX_QUEUE_NUM,
2493 desc.info0);
2494
2495 __skb_queue_tail(&msdu_list, msdu);
2496
2497 if (total_msdu_reaped >= quota && !rxcb->is_continuation) {
2498 done = true;
2499 break;
2500 }
2501 }
2502
2503 /* Hw might have updated the head pointer after we cached it.
2504 * In this case, even though there are entries in the ring we'll
2505 * get rx_desc NULL. Give the read another try with updated cached
2506 * head pointer so that we can reap complete MPDU in the current
2507 * rx processing.
2508 */
2509 if (!done && ath11k_hal_srng_dst_num_free(ab, srng, true)) {
2510 ath11k_hal_srng_access_end(ab, srng);
2511 goto try_again;
2512 }
2513
2514 ath11k_hal_srng_access_end(ab, srng);
2515
2516 spin_unlock_bh(&srng->lock);
2517
2518 if (!total_msdu_reaped)
2519 goto exit;
2520
2521 for (i = 0; i < ab->num_radios; i++) {
2522 if (!num_buffs_reaped[i])
2523 continue;
2524
2525 ar = ab->pdevs[i].ar;
2526 rx_ring = &ar->dp.rx_refill_buf_ring;
2527
2528 ath11k_dp_rxbufs_replenish(ab, i, rx_ring, num_buffs_reaped[i],
2529 HAL_RX_BUF_RBM_SW3_BM, GFP_ATOMIC);
2530 }
2531
2532 ath11k_dp_rx_process_received_packets(ab, napi, &msdu_list,
2533 "a, ring_id);
2534
2535exit:
2536 return budget - quota;
2537}
2538
2539static void ath11k_dp_rx_update_peer_stats(struct ath11k_sta *arsta,
2540 struct hal_rx_mon_ppdu_info *ppdu_info)
2541{
2542 struct ath11k_rx_peer_stats *rx_stats = arsta->rx_stats;
2543 u32 num_msdu;
2544
2545 if (!rx_stats)
2546 return;
2547
2548 num_msdu = ppdu_info->tcp_msdu_count + ppdu_info->tcp_ack_msdu_count +
2549 ppdu_info->udp_msdu_count + ppdu_info->other_msdu_count;
2550
2551 rx_stats->num_msdu += num_msdu;
2552 rx_stats->tcp_msdu_count += ppdu_info->tcp_msdu_count +
2553 ppdu_info->tcp_ack_msdu_count;
2554 rx_stats->udp_msdu_count += ppdu_info->udp_msdu_count;
2555 rx_stats->other_msdu_count += ppdu_info->other_msdu_count;
2556
2557 if (ppdu_info->preamble_type == HAL_RX_PREAMBLE_11A ||
2558 ppdu_info->preamble_type == HAL_RX_PREAMBLE_11B) {
2559 ppdu_info->nss = 1;
2560 ppdu_info->mcs = HAL_RX_MAX_MCS;
2561 ppdu_info->tid = IEEE80211_NUM_TIDS;
2562 }
2563
2564 if (ppdu_info->nss > 0 && ppdu_info->nss <= HAL_RX_MAX_NSS)
2565 rx_stats->nss_count[ppdu_info->nss - 1] += num_msdu;
2566
2567 if (ppdu_info->mcs <= HAL_RX_MAX_MCS)
2568 rx_stats->mcs_count[ppdu_info->mcs] += num_msdu;
2569
2570 if (ppdu_info->gi < HAL_RX_GI_MAX)
2571 rx_stats->gi_count[ppdu_info->gi] += num_msdu;
2572
2573 if (ppdu_info->bw < HAL_RX_BW_MAX)
2574 rx_stats->bw_count[ppdu_info->bw] += num_msdu;
2575
2576 if (ppdu_info->ldpc < HAL_RX_SU_MU_CODING_MAX)
2577 rx_stats->coding_count[ppdu_info->ldpc] += num_msdu;
2578
2579 if (ppdu_info->tid <= IEEE80211_NUM_TIDS)
2580 rx_stats->tid_count[ppdu_info->tid] += num_msdu;
2581
2582 if (ppdu_info->preamble_type < HAL_RX_PREAMBLE_MAX)
2583 rx_stats->pream_cnt[ppdu_info->preamble_type] += num_msdu;
2584
2585 if (ppdu_info->reception_type < HAL_RX_RECEPTION_TYPE_MAX)
2586 rx_stats->reception_type[ppdu_info->reception_type] += num_msdu;
2587
2588 if (ppdu_info->is_stbc)
2589 rx_stats->stbc_count += num_msdu;
2590
2591 if (ppdu_info->beamformed)
2592 rx_stats->beamformed_count += num_msdu;
2593
2594 if (ppdu_info->num_mpdu_fcs_ok > 1)
2595 rx_stats->ampdu_msdu_count += num_msdu;
2596 else
2597 rx_stats->non_ampdu_msdu_count += num_msdu;
2598
2599 rx_stats->num_mpdu_fcs_ok += ppdu_info->num_mpdu_fcs_ok;
2600 rx_stats->num_mpdu_fcs_err += ppdu_info->num_mpdu_fcs_err;
2601 rx_stats->dcm_count += ppdu_info->dcm;
2602 rx_stats->ru_alloc_cnt[ppdu_info->ru_alloc] += num_msdu;
2603
2604 arsta->rssi_comb = ppdu_info->rssi_comb;
2605 rx_stats->rx_duration += ppdu_info->rx_duration;
2606 arsta->rx_duration = rx_stats->rx_duration;
2607}
2608
2609static struct sk_buff *ath11k_dp_rx_alloc_mon_status_buf(struct ath11k_base *ab,
2610 struct dp_rxdma_ring *rx_ring,
2611 int *buf_id, gfp_t gfp)
2612{
2613 struct sk_buff *skb;
2614 dma_addr_t paddr;
2615
2616 skb = dev_alloc_skb(DP_RX_BUFFER_SIZE +
2617 DP_RX_BUFFER_ALIGN_SIZE);
2618
2619 if (!skb)
2620 goto fail_alloc_skb;
2621
2622 if (!IS_ALIGNED((unsigned long)skb->data,
2623 DP_RX_BUFFER_ALIGN_SIZE)) {
2624 skb_pull(skb, PTR_ALIGN(skb->data, DP_RX_BUFFER_ALIGN_SIZE) -
2625 skb->data);
2626 }
2627
2628 paddr = dma_map_single(ab->dev, skb->data,
2629 skb->len + skb_tailroom(skb),
2630 DMA_BIDIRECTIONAL);
2631 if (unlikely(dma_mapping_error(ab->dev, paddr)))
2632 goto fail_free_skb;
2633
2634 spin_lock_bh(&rx_ring->idr_lock);
2635 *buf_id = idr_alloc(&rx_ring->bufs_idr, skb, 0,
2636 rx_ring->bufs_max, gfp);
2637 spin_unlock_bh(&rx_ring->idr_lock);
2638 if (*buf_id < 0)
2639 goto fail_dma_unmap;
2640
2641 ATH11K_SKB_RXCB(skb)->paddr = paddr;
2642 return skb;
2643
2644fail_dma_unmap:
2645 dma_unmap_single(ab->dev, paddr, skb->len + skb_tailroom(skb),
2646 DMA_BIDIRECTIONAL);
2647fail_free_skb:
2648 dev_kfree_skb_any(skb);
2649fail_alloc_skb:
2650 return NULL;
2651}
2652
2653int ath11k_dp_rx_mon_status_bufs_replenish(struct ath11k_base *ab, int mac_id,
2654 struct dp_rxdma_ring *rx_ring,
2655 int req_entries,
2656 enum hal_rx_buf_return_buf_manager mgr,
2657 gfp_t gfp)
2658{
2659 struct hal_srng *srng;
2660 u32 *desc;
2661 struct sk_buff *skb;
2662 int num_free;
2663 int num_remain;
2664 int buf_id;
2665 u32 cookie;
2666 dma_addr_t paddr;
2667
2668 req_entries = min(req_entries, rx_ring->bufs_max);
2669
2670 srng = &ab->hal.srng_list[rx_ring->refill_buf_ring.ring_id];
2671
2672 spin_lock_bh(&srng->lock);
2673
2674 ath11k_hal_srng_access_begin(ab, srng);
2675
2676 num_free = ath11k_hal_srng_src_num_free(ab, srng, true);
2677
2678 req_entries = min(num_free, req_entries);
2679 num_remain = req_entries;
2680
2681 while (num_remain > 0) {
2682 skb = ath11k_dp_rx_alloc_mon_status_buf(ab, rx_ring,
2683 &buf_id, gfp);
2684 if (!skb)
2685 break;
2686 paddr = ATH11K_SKB_RXCB(skb)->paddr;
2687
2688 desc = ath11k_hal_srng_src_get_next_entry(ab, srng);
2689 if (!desc)
2690 goto fail_desc_get;
2691
2692 cookie = FIELD_PREP(DP_RXDMA_BUF_COOKIE_PDEV_ID, mac_id) |
2693 FIELD_PREP(DP_RXDMA_BUF_COOKIE_BUF_ID, buf_id);
2694
2695 num_remain--;
2696
2697 ath11k_hal_rx_buf_addr_info_set(desc, paddr, cookie, mgr);
2698 }
2699
2700 ath11k_hal_srng_access_end(ab, srng);
2701
2702 spin_unlock_bh(&srng->lock);
2703
2704 return req_entries - num_remain;
2705
2706fail_desc_get:
2707 spin_lock_bh(&rx_ring->idr_lock);
2708 idr_remove(&rx_ring->bufs_idr, buf_id);
2709 spin_unlock_bh(&rx_ring->idr_lock);
2710 dma_unmap_single(ab->dev, paddr, skb->len + skb_tailroom(skb),
2711 DMA_BIDIRECTIONAL);
2712 dev_kfree_skb_any(skb);
2713 ath11k_hal_srng_access_end(ab, srng);
2714 spin_unlock_bh(&srng->lock);
2715
2716 return req_entries - num_remain;
2717}
2718
2719static int ath11k_dp_rx_reap_mon_status_ring(struct ath11k_base *ab, int mac_id,
2720 int *budget, struct sk_buff_head *skb_list)
2721{
2722 struct ath11k *ar = ab->pdevs[mac_id].ar;
2723 struct ath11k_pdev_dp *dp = &ar->dp;
2724 struct dp_rxdma_ring *rx_ring = &dp->rx_mon_status_refill_ring;
2725 struct hal_srng *srng;
2726 void *rx_mon_status_desc;
2727 struct sk_buff *skb;
2728 struct ath11k_skb_rxcb *rxcb;
2729 struct hal_tlv_hdr *tlv;
2730 u32 cookie;
2731 int buf_id;
2732 dma_addr_t paddr;
2733 u8 rbm;
2734 int num_buffs_reaped = 0;
2735
2736 srng = &ab->hal.srng_list[rx_ring->refill_buf_ring.ring_id];
2737
2738 spin_lock_bh(&srng->lock);
2739
2740 ath11k_hal_srng_access_begin(ab, srng);
2741 while (*budget) {
2742 *budget -= 1;
2743 rx_mon_status_desc =
2744 ath11k_hal_srng_src_peek(ab, srng);
2745 if (!rx_mon_status_desc)
2746 break;
2747
2748 ath11k_hal_rx_buf_addr_info_get(rx_mon_status_desc, &paddr,
2749 &cookie, &rbm);
2750 if (paddr) {
2751 buf_id = FIELD_GET(DP_RXDMA_BUF_COOKIE_BUF_ID, cookie);
2752
2753 spin_lock_bh(&rx_ring->idr_lock);
2754 skb = idr_find(&rx_ring->bufs_idr, buf_id);
2755 if (!skb) {
2756 ath11k_warn(ab, "rx monitor status with invalid buf_id %d\n",
2757 buf_id);
2758 spin_unlock_bh(&rx_ring->idr_lock);
2759 goto move_next;
2760 }
2761
2762 idr_remove(&rx_ring->bufs_idr, buf_id);
2763 spin_unlock_bh(&rx_ring->idr_lock);
2764
2765 rxcb = ATH11K_SKB_RXCB(skb);
2766
2767 dma_sync_single_for_cpu(ab->dev, rxcb->paddr,
2768 skb->len + skb_tailroom(skb),
2769 DMA_FROM_DEVICE);
2770
2771 dma_unmap_single(ab->dev, rxcb->paddr,
2772 skb->len + skb_tailroom(skb),
2773 DMA_BIDIRECTIONAL);
2774
2775 tlv = (struct hal_tlv_hdr *)skb->data;
2776 if (FIELD_GET(HAL_TLV_HDR_TAG, tlv->tl) !=
2777 HAL_RX_STATUS_BUFFER_DONE) {
2778 ath11k_warn(ab, "mon status DONE not set %lx\n",
2779 FIELD_GET(HAL_TLV_HDR_TAG,
2780 tlv->tl));
2781 dev_kfree_skb_any(skb);
2782 goto move_next;
2783 }
2784
2785 __skb_queue_tail(skb_list, skb);
2786 }
2787move_next:
2788 skb = ath11k_dp_rx_alloc_mon_status_buf(ab, rx_ring,
2789 &buf_id, GFP_ATOMIC);
2790
2791 if (!skb) {
2792 ath11k_hal_rx_buf_addr_info_set(rx_mon_status_desc, 0, 0,
2793 HAL_RX_BUF_RBM_SW3_BM);
2794 num_buffs_reaped++;
2795 break;
2796 }
2797 rxcb = ATH11K_SKB_RXCB(skb);
2798
2799 cookie = FIELD_PREP(DP_RXDMA_BUF_COOKIE_PDEV_ID, mac_id) |
2800 FIELD_PREP(DP_RXDMA_BUF_COOKIE_BUF_ID, buf_id);
2801
2802 ath11k_hal_rx_buf_addr_info_set(rx_mon_status_desc, rxcb->paddr,
2803 cookie, HAL_RX_BUF_RBM_SW3_BM);
2804 ath11k_hal_srng_src_get_next_entry(ab, srng);
2805 num_buffs_reaped++;
2806 }
2807 ath11k_hal_srng_access_end(ab, srng);
2808 spin_unlock_bh(&srng->lock);
2809
2810 return num_buffs_reaped;
2811}
2812
2813int ath11k_dp_rx_process_mon_status(struct ath11k_base *ab, int mac_id,
2814 struct napi_struct *napi, int budget)
2815{
2816 struct ath11k *ar = ab->pdevs[mac_id].ar;
2817 enum hal_rx_mon_status hal_status;
2818 struct sk_buff *skb;
2819 struct sk_buff_head skb_list;
2820 struct hal_rx_mon_ppdu_info ppdu_info;
2821 struct ath11k_peer *peer;
2822 struct ath11k_sta *arsta;
2823 int num_buffs_reaped = 0;
2824
2825 __skb_queue_head_init(&skb_list);
2826
2827 num_buffs_reaped = ath11k_dp_rx_reap_mon_status_ring(ab, mac_id, &budget,
2828 &skb_list);
2829 if (!num_buffs_reaped)
2830 goto exit;
2831
2832 while ((skb = __skb_dequeue(&skb_list))) {
2833 memset(&ppdu_info, 0, sizeof(ppdu_info));
2834 ppdu_info.peer_id = HAL_INVALID_PEERID;
2835
2836 if (ath11k_debug_is_pktlog_rx_stats_enabled(ar))
2837 trace_ath11k_htt_rxdesc(ar, skb->data, DP_RX_BUFFER_SIZE);
2838
2839 hal_status = ath11k_hal_rx_parse_mon_status(ab, &ppdu_info, skb);
2840
2841 if (ppdu_info.peer_id == HAL_INVALID_PEERID ||
2842 hal_status != HAL_RX_MON_STATUS_PPDU_DONE) {
2843 dev_kfree_skb_any(skb);
2844 continue;
2845 }
2846
2847 rcu_read_lock();
2848 spin_lock_bh(&ab->base_lock);
2849 peer = ath11k_peer_find_by_id(ab, ppdu_info.peer_id);
2850
2851 if (!peer || !peer->sta) {
2852 ath11k_dbg(ab, ATH11K_DBG_DATA,
2853 "failed to find the peer with peer_id %d\n",
2854 ppdu_info.peer_id);
2855 spin_unlock_bh(&ab->base_lock);
2856 rcu_read_unlock();
2857 dev_kfree_skb_any(skb);
2858 continue;
2859 }
2860
2861 arsta = (struct ath11k_sta *)peer->sta->drv_priv;
2862 ath11k_dp_rx_update_peer_stats(arsta, &ppdu_info);
2863
2864 if (ath11k_debug_is_pktlog_peer_valid(ar, peer->addr))
2865 trace_ath11k_htt_rxdesc(ar, skb->data, DP_RX_BUFFER_SIZE);
2866
2867 spin_unlock_bh(&ab->base_lock);
2868 rcu_read_unlock();
2869
2870 dev_kfree_skb_any(skb);
2871 }
2872exit:
2873 return num_buffs_reaped;
2874}
2875
2876static void ath11k_dp_rx_frag_timer(struct timer_list *timer)
2877{
2878 struct dp_rx_tid *rx_tid = from_timer(rx_tid, timer, frag_timer);
2879
2880 spin_lock_bh(&rx_tid->ab->base_lock);
2881 if (rx_tid->last_frag_no &&
2882 rx_tid->rx_frag_bitmap == GENMASK(rx_tid->last_frag_no, 0)) {
2883 spin_unlock_bh(&rx_tid->ab->base_lock);
2884 return;
2885 }
2886 ath11k_dp_rx_frags_cleanup(rx_tid, true);
2887 spin_unlock_bh(&rx_tid->ab->base_lock);
2888}
2889
2890int ath11k_peer_rx_frag_setup(struct ath11k *ar, const u8 *peer_mac, int vdev_id)
2891{
2892 struct ath11k_base *ab = ar->ab;
2893 struct crypto_shash *tfm;
2894 struct ath11k_peer *peer;
2895 struct dp_rx_tid *rx_tid;
2896 int i;
2897
2898 tfm = crypto_alloc_shash("michael_mic", 0, 0);
2899 if (IS_ERR(tfm))
2900 return PTR_ERR(tfm);
2901
2902 spin_lock_bh(&ab->base_lock);
2903
2904 peer = ath11k_peer_find(ab, vdev_id, peer_mac);
2905 if (!peer) {
2906 ath11k_warn(ab, "failed to find the peer to set up fragment info\n");
2907 spin_unlock_bh(&ab->base_lock);
2908 return -ENOENT;
2909 }
2910
2911 for (i = 0; i <= IEEE80211_NUM_TIDS; i++) {
2912 rx_tid = &peer->rx_tid[i];
2913 rx_tid->ab = ab;
2914 timer_setup(&rx_tid->frag_timer, ath11k_dp_rx_frag_timer, 0);
2915 skb_queue_head_init(&rx_tid->rx_frags);
2916 }
2917
2918 peer->tfm_mmic = tfm;
2919 spin_unlock_bh(&ab->base_lock);
2920
2921 return 0;
2922}
2923
2924static int ath11k_dp_rx_h_michael_mic(struct crypto_shash *tfm, u8 *key,
2925 struct ieee80211_hdr *hdr, u8 *data,
2926 size_t data_len, u8 *mic)
2927{
2928 SHASH_DESC_ON_STACK(desc, tfm);
2929 u8 mic_hdr[16] = {0};
2930 u8 tid = 0;
2931 int ret;
2932
2933 if (!tfm)
2934 return -EINVAL;
2935
2936 desc->tfm = tfm;
2937
2938 ret = crypto_shash_setkey(tfm, key, 8);
2939 if (ret)
2940 goto out;
2941
2942 ret = crypto_shash_init(desc);
2943 if (ret)
2944 goto out;
2945
2946 /* TKIP MIC header */
2947 memcpy(mic_hdr, ieee80211_get_DA(hdr), ETH_ALEN);
2948 memcpy(mic_hdr + ETH_ALEN, ieee80211_get_SA(hdr), ETH_ALEN);
2949 if (ieee80211_is_data_qos(hdr->frame_control))
2950 tid = ieee80211_get_tid(hdr);
2951 mic_hdr[12] = tid;
2952
2953 ret = crypto_shash_update(desc, mic_hdr, 16);
2954 if (ret)
2955 goto out;
2956 ret = crypto_shash_update(desc, data, data_len);
2957 if (ret)
2958 goto out;
2959 ret = crypto_shash_final(desc, mic);
2960out:
2961 shash_desc_zero(desc);
2962 return ret;
2963}
2964
2965static int ath11k_dp_rx_h_verify_tkip_mic(struct ath11k *ar, struct ath11k_peer *peer,
2966 struct sk_buff *msdu)
2967{
2968 struct hal_rx_desc *rx_desc = (struct hal_rx_desc *)msdu->data;
2969 struct ieee80211_rx_status *rxs = IEEE80211_SKB_RXCB(msdu);
2970 struct ieee80211_key_conf *key_conf;
2971 struct ieee80211_hdr *hdr;
2972 u8 mic[IEEE80211_CCMP_MIC_LEN];
2973 int head_len, tail_len, ret;
2974 size_t data_len;
2975 u32 hdr_len;
2976 u8 *key, *data;
2977 u8 key_idx;
2978
2979 if (ath11k_dp_rx_h_mpdu_start_enctype(rx_desc) != HAL_ENCRYPT_TYPE_TKIP_MIC)
2980 return 0;
2981
2982 hdr = (struct ieee80211_hdr *)(msdu->data + HAL_RX_DESC_SIZE);
2983 hdr_len = ieee80211_hdrlen(hdr->frame_control);
2984 head_len = hdr_len + HAL_RX_DESC_SIZE + IEEE80211_TKIP_IV_LEN;
2985 tail_len = IEEE80211_CCMP_MIC_LEN + IEEE80211_TKIP_ICV_LEN + FCS_LEN;
2986
2987 if (!is_multicast_ether_addr(hdr->addr1))
2988 key_idx = peer->ucast_keyidx;
2989 else
2990 key_idx = peer->mcast_keyidx;
2991
2992 key_conf = peer->keys[key_idx];
2993
2994 data = msdu->data + head_len;
2995 data_len = msdu->len - head_len - tail_len;
2996 key = &key_conf->key[NL80211_TKIP_DATA_OFFSET_RX_MIC_KEY];
2997
2998 ret = ath11k_dp_rx_h_michael_mic(peer->tfm_mmic, key, hdr, data, data_len, mic);
2999 if (ret || memcmp(mic, data + data_len, IEEE80211_CCMP_MIC_LEN))
3000 goto mic_fail;
3001
3002 return 0;
3003
3004mic_fail:
3005 (ATH11K_SKB_RXCB(msdu))->is_first_msdu = true;
3006 (ATH11K_SKB_RXCB(msdu))->is_last_msdu = true;
3007
3008 rxs->flag |= RX_FLAG_MMIC_ERROR | RX_FLAG_MMIC_STRIPPED |
3009 RX_FLAG_IV_STRIPPED | RX_FLAG_DECRYPTED;
3010 skb_pull(msdu, HAL_RX_DESC_SIZE);
3011
3012 ath11k_dp_rx_h_ppdu(ar, rx_desc, rxs);
3013 ath11k_dp_rx_h_undecap(ar, msdu, rx_desc,
3014 HAL_ENCRYPT_TYPE_TKIP_MIC, rxs, true);
3015 ieee80211_rx(ar->hw, msdu);
3016 return -EINVAL;
3017}
3018
3019static void ath11k_dp_rx_h_undecap_frag(struct ath11k *ar, struct sk_buff *msdu,
3020 enum hal_encrypt_type enctype, u32 flags)
3021{
3022 struct ieee80211_hdr *hdr;
3023 size_t hdr_len;
3024 size_t crypto_len;
3025
3026 if (!flags)
3027 return;
3028
3029 hdr = (struct ieee80211_hdr *)(msdu->data + HAL_RX_DESC_SIZE);
3030
3031 if (flags & RX_FLAG_MIC_STRIPPED)
3032 skb_trim(msdu, msdu->len -
3033 ath11k_dp_rx_crypto_mic_len(ar, enctype));
3034
3035 if (flags & RX_FLAG_ICV_STRIPPED)
3036 skb_trim(msdu, msdu->len -
3037 ath11k_dp_rx_crypto_icv_len(ar, enctype));
3038
3039 if (flags & RX_FLAG_IV_STRIPPED) {
3040 hdr_len = ieee80211_hdrlen(hdr->frame_control);
3041 crypto_len = ath11k_dp_rx_crypto_param_len(ar, enctype);
3042
3043 memmove((void *)msdu->data + HAL_RX_DESC_SIZE + crypto_len,
3044 (void *)msdu->data + HAL_RX_DESC_SIZE, hdr_len);
3045 skb_pull(msdu, crypto_len);
3046 }
3047}
3048
3049static int ath11k_dp_rx_h_defrag(struct ath11k *ar,
3050 struct ath11k_peer *peer,
3051 struct dp_rx_tid *rx_tid,
3052 struct sk_buff **defrag_skb)
3053{
3054 struct hal_rx_desc *rx_desc;
3055 struct sk_buff *skb, *first_frag, *last_frag;
3056 struct ieee80211_hdr *hdr;
3057 enum hal_encrypt_type enctype;
3058 bool is_decrypted = false;
3059 int msdu_len = 0;
3060 int extra_space;
3061 u32 flags;
3062
3063 first_frag = skb_peek(&rx_tid->rx_frags);
3064 last_frag = skb_peek_tail(&rx_tid->rx_frags);
3065
3066 skb_queue_walk(&rx_tid->rx_frags, skb) {
3067 flags = 0;
3068 rx_desc = (struct hal_rx_desc *)skb->data;
3069 hdr = (struct ieee80211_hdr *)(skb->data + HAL_RX_DESC_SIZE);
3070
3071 enctype = ath11k_dp_rx_h_mpdu_start_enctype(rx_desc);
3072 if (enctype != HAL_ENCRYPT_TYPE_OPEN)
3073 is_decrypted = ath11k_dp_rx_h_attn_is_decrypted(rx_desc);
3074
3075 if (is_decrypted) {
3076 if (skb != first_frag)
3077 flags |= RX_FLAG_IV_STRIPPED;
3078 if (skb != last_frag)
3079 flags |= RX_FLAG_ICV_STRIPPED |
3080 RX_FLAG_MIC_STRIPPED;
3081 }
3082
3083 /* RX fragments are always raw packets */
3084 if (skb != last_frag)
3085 skb_trim(skb, skb->len - FCS_LEN);
3086 ath11k_dp_rx_h_undecap_frag(ar, skb, enctype, flags);
3087
3088 if (skb != first_frag)
3089 skb_pull(skb, HAL_RX_DESC_SIZE +
3090 ieee80211_hdrlen(hdr->frame_control));
3091 msdu_len += skb->len;
3092 }
3093
3094 extra_space = msdu_len - (DP_RX_BUFFER_SIZE + skb_tailroom(first_frag));
3095 if (extra_space > 0 &&
3096 (pskb_expand_head(first_frag, 0, extra_space, GFP_ATOMIC) < 0))
3097 return -ENOMEM;
3098
3099 __skb_unlink(first_frag, &rx_tid->rx_frags);
3100 while ((skb = __skb_dequeue(&rx_tid->rx_frags))) {
3101 skb_put_data(first_frag, skb->data, skb->len);
3102 dev_kfree_skb_any(skb);
3103 }
3104
3105 hdr = (struct ieee80211_hdr *)(first_frag->data + HAL_RX_DESC_SIZE);
3106 hdr->frame_control &= ~__cpu_to_le16(IEEE80211_FCTL_MOREFRAGS);
3107 ATH11K_SKB_RXCB(first_frag)->is_frag = 1;
3108
3109 if (ath11k_dp_rx_h_verify_tkip_mic(ar, peer, first_frag))
3110 first_frag = NULL;
3111
3112 *defrag_skb = first_frag;
3113 return 0;
3114}
3115
3116static int ath11k_dp_rx_h_defrag_reo_reinject(struct ath11k *ar, struct dp_rx_tid *rx_tid,
3117 struct sk_buff *defrag_skb)
3118{
3119 struct ath11k_base *ab = ar->ab;
3120 struct ath11k_pdev_dp *dp = &ar->dp;
3121 struct dp_rxdma_ring *rx_refill_ring = &dp->rx_refill_buf_ring;
3122 struct hal_rx_desc *rx_desc = (struct hal_rx_desc *)defrag_skb->data;
3123 struct hal_reo_entrance_ring *reo_ent_ring;
3124 struct hal_reo_dest_ring *reo_dest_ring;
3125 struct dp_link_desc_bank *link_desc_banks;
3126 struct hal_rx_msdu_link *msdu_link;
3127 struct hal_rx_msdu_details *msdu0;
3128 struct hal_srng *srng;
3129 dma_addr_t paddr;
3130 u32 desc_bank, msdu_info, mpdu_info;
3131 u32 dst_idx, cookie;
3132 u32 *msdu_len_offset;
3133 int ret, buf_id;
3134
3135 link_desc_banks = ab->dp.link_desc_banks;
3136 reo_dest_ring = rx_tid->dst_ring_desc;
3137
3138 ath11k_hal_rx_reo_ent_paddr_get(ab, reo_dest_ring, &paddr, &desc_bank);
3139 msdu_link = (struct hal_rx_msdu_link *)(link_desc_banks[desc_bank].vaddr +
3140 (paddr - link_desc_banks[desc_bank].paddr));
3141 msdu0 = &msdu_link->msdu_link[0];
3142 dst_idx = FIELD_GET(RX_MSDU_DESC_INFO0_REO_DEST_IND, msdu0->rx_msdu_info.info0);
3143 memset(msdu0, 0, sizeof(*msdu0));
3144
3145 msdu_info = FIELD_PREP(RX_MSDU_DESC_INFO0_FIRST_MSDU_IN_MPDU, 1) |
3146 FIELD_PREP(RX_MSDU_DESC_INFO0_LAST_MSDU_IN_MPDU, 1) |
3147 FIELD_PREP(RX_MSDU_DESC_INFO0_MSDU_CONTINUATION, 0) |
3148 FIELD_PREP(RX_MSDU_DESC_INFO0_MSDU_LENGTH,
3149 defrag_skb->len - HAL_RX_DESC_SIZE) |
3150 FIELD_PREP(RX_MSDU_DESC_INFO0_REO_DEST_IND, dst_idx) |
3151 FIELD_PREP(RX_MSDU_DESC_INFO0_VALID_SA, 1) |
3152 FIELD_PREP(RX_MSDU_DESC_INFO0_VALID_DA, 1);
3153 msdu0->rx_msdu_info.info0 = msdu_info;
3154
3155 /* change msdu len in hal rx desc */
3156 msdu_len_offset = (u32 *)&rx_desc->msdu_start;
3157 *msdu_len_offset &= ~(RX_MSDU_START_INFO1_MSDU_LENGTH);
3158 *msdu_len_offset |= defrag_skb->len - HAL_RX_DESC_SIZE;
3159
3160 paddr = dma_map_single(ab->dev, defrag_skb->data,
3161 defrag_skb->len + skb_tailroom(defrag_skb),
3162 DMA_FROM_DEVICE);
3163 if (dma_mapping_error(ab->dev, paddr))
3164 return -ENOMEM;
3165
3166 spin_lock_bh(&rx_refill_ring->idr_lock);
3167 buf_id = idr_alloc(&rx_refill_ring->bufs_idr, defrag_skb, 0,
3168 rx_refill_ring->bufs_max * 3, GFP_ATOMIC);
3169 spin_unlock_bh(&rx_refill_ring->idr_lock);
3170 if (buf_id < 0) {
3171 ret = -ENOMEM;
3172 goto err_unmap_dma;
3173 }
3174
3175 ATH11K_SKB_RXCB(defrag_skb)->paddr = paddr;
3176 cookie = FIELD_PREP(DP_RXDMA_BUF_COOKIE_PDEV_ID, dp->mac_id) |
3177 FIELD_PREP(DP_RXDMA_BUF_COOKIE_BUF_ID, buf_id);
3178
3179 ath11k_hal_rx_buf_addr_info_set(msdu0, paddr, cookie, HAL_RX_BUF_RBM_SW3_BM);
3180
3181 /* Fill mpdu details into reo entrace ring */
3182 srng = &ab->hal.srng_list[ab->dp.reo_reinject_ring.ring_id];
3183
3184 spin_lock_bh(&srng->lock);
3185 ath11k_hal_srng_access_begin(ab, srng);
3186
3187 reo_ent_ring = (struct hal_reo_entrance_ring *)
3188 ath11k_hal_srng_src_get_next_entry(ab, srng);
3189 if (!reo_ent_ring) {
3190 ath11k_hal_srng_access_end(ab, srng);
3191 spin_unlock_bh(&srng->lock);
3192 ret = -ENOSPC;
3193 goto err_free_idr;
3194 }
3195 memset(reo_ent_ring, 0, sizeof(*reo_ent_ring));
3196
3197 ath11k_hal_rx_reo_ent_paddr_get(ab, reo_dest_ring, &paddr, &desc_bank);
3198 ath11k_hal_rx_buf_addr_info_set(reo_ent_ring, paddr, desc_bank,
3199 HAL_RX_BUF_RBM_WBM_IDLE_DESC_LIST);
3200
3201 mpdu_info = FIELD_PREP(RX_MPDU_DESC_INFO0_MSDU_COUNT, 1) |
3202 FIELD_PREP(RX_MPDU_DESC_INFO0_SEQ_NUM, rx_tid->cur_sn) |
3203 FIELD_PREP(RX_MPDU_DESC_INFO0_FRAG_FLAG, 0) |
3204 FIELD_PREP(RX_MPDU_DESC_INFO0_VALID_SA, 1) |
3205 FIELD_PREP(RX_MPDU_DESC_INFO0_VALID_DA, 1) |
3206 FIELD_PREP(RX_MPDU_DESC_INFO0_RAW_MPDU, 1) |
3207 FIELD_PREP(RX_MPDU_DESC_INFO0_VALID_PN, 1);
3208
3209 reo_ent_ring->rx_mpdu_info.info0 = mpdu_info;
3210 reo_ent_ring->rx_mpdu_info.meta_data = reo_dest_ring->rx_mpdu_info.meta_data;
3211 reo_ent_ring->queue_addr_lo = reo_dest_ring->queue_addr_lo;
3212 reo_ent_ring->info0 = FIELD_PREP(HAL_REO_ENTR_RING_INFO0_QUEUE_ADDR_HI,
3213 FIELD_GET(HAL_REO_DEST_RING_INFO0_QUEUE_ADDR_HI,
3214 reo_dest_ring->info0)) |
3215 FIELD_PREP(HAL_REO_ENTR_RING_INFO0_DEST_IND, dst_idx);
3216 ath11k_hal_srng_access_end(ab, srng);
3217 spin_unlock_bh(&srng->lock);
3218
3219 return 0;
3220
3221err_free_idr:
3222 spin_lock_bh(&rx_refill_ring->idr_lock);
3223 idr_remove(&rx_refill_ring->bufs_idr, buf_id);
3224 spin_unlock_bh(&rx_refill_ring->idr_lock);
3225err_unmap_dma:
3226 dma_unmap_single(ab->dev, paddr, defrag_skb->len + skb_tailroom(defrag_skb),
3227 DMA_FROM_DEVICE);
3228 return ret;
3229}
3230
3231static int ath11k_dp_rx_h_cmp_frags(struct sk_buff *a, struct sk_buff *b)
3232{
3233 int frag1, frag2;
3234
3235 frag1 = ath11k_dp_rx_h_mpdu_start_frag_no(a);
3236 frag2 = ath11k_dp_rx_h_mpdu_start_frag_no(b);
3237
3238 return frag1 - frag2;
3239}
3240
3241static void ath11k_dp_rx_h_sort_frags(struct sk_buff_head *frag_list,
3242 struct sk_buff *cur_frag)
3243{
3244 struct sk_buff *skb;
3245 int cmp;
3246
3247 skb_queue_walk(frag_list, skb) {
3248 cmp = ath11k_dp_rx_h_cmp_frags(skb, cur_frag);
3249 if (cmp < 0)
3250 continue;
3251 __skb_queue_before(frag_list, skb, cur_frag);
3252 return;
3253 }
3254 __skb_queue_tail(frag_list, cur_frag);
3255}
3256
3257static u64 ath11k_dp_rx_h_get_pn(struct sk_buff *skb)
3258{
3259 struct ieee80211_hdr *hdr;
3260 u64 pn = 0;
3261 u8 *ehdr;
3262
3263 hdr = (struct ieee80211_hdr *)(skb->data + HAL_RX_DESC_SIZE);
3264 ehdr = skb->data + HAL_RX_DESC_SIZE + ieee80211_hdrlen(hdr->frame_control);
3265
3266 pn = ehdr[0];
3267 pn |= (u64)ehdr[1] << 8;
3268 pn |= (u64)ehdr[4] << 16;
3269 pn |= (u64)ehdr[5] << 24;
3270 pn |= (u64)ehdr[6] << 32;
3271 pn |= (u64)ehdr[7] << 40;
3272
3273 return pn;
3274}
3275
3276static bool
3277ath11k_dp_rx_h_defrag_validate_incr_pn(struct ath11k *ar, struct dp_rx_tid *rx_tid)
3278{
3279 enum hal_encrypt_type encrypt_type;
3280 struct sk_buff *first_frag, *skb;
3281 struct hal_rx_desc *desc;
3282 u64 last_pn;
3283 u64 cur_pn;
3284
3285 first_frag = skb_peek(&rx_tid->rx_frags);
3286 desc = (struct hal_rx_desc *)first_frag->data;
3287
3288 encrypt_type = ath11k_dp_rx_h_mpdu_start_enctype(desc);
3289 if (encrypt_type != HAL_ENCRYPT_TYPE_CCMP_128 &&
3290 encrypt_type != HAL_ENCRYPT_TYPE_CCMP_256 &&
3291 encrypt_type != HAL_ENCRYPT_TYPE_GCMP_128 &&
3292 encrypt_type != HAL_ENCRYPT_TYPE_AES_GCMP_256)
3293 return true;
3294
3295 last_pn = ath11k_dp_rx_h_get_pn(first_frag);
3296 skb_queue_walk(&rx_tid->rx_frags, skb) {
3297 if (skb == first_frag)
3298 continue;
3299
3300 cur_pn = ath11k_dp_rx_h_get_pn(skb);
3301 if (cur_pn != last_pn + 1)
3302 return false;
3303 last_pn = cur_pn;
3304 }
3305 return true;
3306}
3307
3308static int ath11k_dp_rx_frag_h_mpdu(struct ath11k *ar,
3309 struct sk_buff *msdu,
3310 u32 *ring_desc)
3311{
3312 struct ath11k_base *ab = ar->ab;
3313 struct hal_rx_desc *rx_desc;
3314 struct ath11k_peer *peer;
3315 struct dp_rx_tid *rx_tid;
3316 struct sk_buff *defrag_skb = NULL;
3317 u32 peer_id;
3318 u16 seqno, frag_no;
3319 u8 tid;
3320 int ret = 0;
3321 bool more_frags;
3322
3323 rx_desc = (struct hal_rx_desc *)msdu->data;
3324 peer_id = ath11k_dp_rx_h_mpdu_start_peer_id(rx_desc);
3325 tid = ath11k_dp_rx_h_mpdu_start_tid(rx_desc);
3326 seqno = ath11k_dp_rx_h_mpdu_start_seq_no(rx_desc);
3327 frag_no = ath11k_dp_rx_h_mpdu_start_frag_no(msdu);
3328 more_frags = ath11k_dp_rx_h_mpdu_start_more_frags(msdu);
3329
3330 if (!ath11k_dp_rx_h_mpdu_start_seq_ctrl_valid(rx_desc) ||
3331 !ath11k_dp_rx_h_mpdu_start_fc_valid(rx_desc) ||
3332 tid > IEEE80211_NUM_TIDS)
3333 return -EINVAL;
3334
3335 /* received unfragmented packet in reo
3336 * exception ring, this shouldn't happen
3337 * as these packets typically come from
3338 * reo2sw srngs.
3339 */
3340 if (WARN_ON_ONCE(!frag_no && !more_frags))
3341 return -EINVAL;
3342
3343 spin_lock_bh(&ab->base_lock);
3344 peer = ath11k_peer_find_by_id(ab, peer_id);
3345 if (!peer) {
3346 ath11k_warn(ab, "failed to find the peer to de-fragment received fragment peer_id %d\n",
3347 peer_id);
3348 ret = -ENOENT;
3349 goto out_unlock;
3350 }
3351 rx_tid = &peer->rx_tid[tid];
3352
3353 if ((!skb_queue_empty(&rx_tid->rx_frags) && seqno != rx_tid->cur_sn) ||
3354 skb_queue_empty(&rx_tid->rx_frags)) {
3355 /* Flush stored fragments and start a new sequence */
3356 ath11k_dp_rx_frags_cleanup(rx_tid, true);
3357 rx_tid->cur_sn = seqno;
3358 }
3359
3360 if (rx_tid->rx_frag_bitmap & BIT(frag_no)) {
3361 /* Fragment already present */
3362 ret = -EINVAL;
3363 goto out_unlock;
3364 }
3365
3366 if (frag_no > __fls(rx_tid->rx_frag_bitmap))
3367 __skb_queue_tail(&rx_tid->rx_frags, msdu);
3368 else
3369 ath11k_dp_rx_h_sort_frags(&rx_tid->rx_frags, msdu);
3370
3371 rx_tid->rx_frag_bitmap |= BIT(frag_no);
3372 if (!more_frags)
3373 rx_tid->last_frag_no = frag_no;
3374
3375 if (frag_no == 0) {
3376 rx_tid->dst_ring_desc = kmemdup(ring_desc,
3377 sizeof(*rx_tid->dst_ring_desc),
3378 GFP_ATOMIC);
3379 if (!rx_tid->dst_ring_desc) {
3380 ret = -ENOMEM;
3381 goto out_unlock;
3382 }
3383 } else {
3384 ath11k_dp_rx_link_desc_return(ab, ring_desc,
3385 HAL_WBM_REL_BM_ACT_PUT_IN_IDLE);
3386 }
3387
3388 if (!rx_tid->last_frag_no ||
3389 rx_tid->rx_frag_bitmap != GENMASK(rx_tid->last_frag_no, 0)) {
3390 mod_timer(&rx_tid->frag_timer, jiffies +
3391 ATH11K_DP_RX_FRAGMENT_TIMEOUT_MS);
3392 goto out_unlock;
3393 }
3394
3395 spin_unlock_bh(&ab->base_lock);
3396 del_timer_sync(&rx_tid->frag_timer);
3397 spin_lock_bh(&ab->base_lock);
3398
3399 peer = ath11k_peer_find_by_id(ab, peer_id);
3400 if (!peer)
3401 goto err_frags_cleanup;
3402
3403 if (!ath11k_dp_rx_h_defrag_validate_incr_pn(ar, rx_tid))
3404 goto err_frags_cleanup;
3405
3406 if (ath11k_dp_rx_h_defrag(ar, peer, rx_tid, &defrag_skb))
3407 goto err_frags_cleanup;
3408
3409 if (!defrag_skb)
3410 goto err_frags_cleanup;
3411
3412 if (ath11k_dp_rx_h_defrag_reo_reinject(ar, rx_tid, defrag_skb))
3413 goto err_frags_cleanup;
3414
3415 ath11k_dp_rx_frags_cleanup(rx_tid, false);
3416 goto out_unlock;
3417
3418err_frags_cleanup:
3419 dev_kfree_skb_any(defrag_skb);
3420 ath11k_dp_rx_frags_cleanup(rx_tid, true);
3421out_unlock:
3422 spin_unlock_bh(&ab->base_lock);
3423 return ret;
3424}
3425
3426static int
3427ath11k_dp_process_rx_err_buf(struct ath11k *ar, u32 *ring_desc, int buf_id, bool drop)
3428{
3429 struct ath11k_pdev_dp *dp = &ar->dp;
3430 struct dp_rxdma_ring *rx_ring = &dp->rx_refill_buf_ring;
3431 struct sk_buff *msdu;
3432 struct ath11k_skb_rxcb *rxcb;
3433 struct hal_rx_desc *rx_desc;
3434 u8 *hdr_status;
3435 u16 msdu_len;
3436
3437 spin_lock_bh(&rx_ring->idr_lock);
3438 msdu = idr_find(&rx_ring->bufs_idr, buf_id);
3439 if (!msdu) {
3440 ath11k_warn(ar->ab, "rx err buf with invalid buf_id %d\n",
3441 buf_id);
3442 spin_unlock_bh(&rx_ring->idr_lock);
3443 return -EINVAL;
3444 }
3445
3446 idr_remove(&rx_ring->bufs_idr, buf_id);
3447 spin_unlock_bh(&rx_ring->idr_lock);
3448
3449 rxcb = ATH11K_SKB_RXCB(msdu);
3450 dma_unmap_single(ar->ab->dev, rxcb->paddr,
3451 msdu->len + skb_tailroom(msdu),
3452 DMA_FROM_DEVICE);
3453
3454 if (drop) {
3455 dev_kfree_skb_any(msdu);
3456 return 0;
3457 }
3458
3459 rcu_read_lock();
3460 if (!rcu_dereference(ar->ab->pdevs_active[ar->pdev_idx])) {
3461 dev_kfree_skb_any(msdu);
3462 goto exit;
3463 }
3464
3465 if (test_bit(ATH11K_CAC_RUNNING, &ar->dev_flags)) {
3466 dev_kfree_skb_any(msdu);
3467 goto exit;
3468 }
3469
3470 rx_desc = (struct hal_rx_desc *)msdu->data;
3471 msdu_len = ath11k_dp_rx_h_msdu_start_msdu_len(rx_desc);
3472 if ((msdu_len + HAL_RX_DESC_SIZE) > DP_RX_BUFFER_SIZE) {
3473 hdr_status = ath11k_dp_rx_h_80211_hdr(rx_desc);
3474 ath11k_warn(ar->ab, "invalid msdu leng %u", msdu_len);
3475 ath11k_dbg_dump(ar->ab, ATH11K_DBG_DATA, NULL, "", hdr_status,
3476 sizeof(struct ieee80211_hdr));
3477 ath11k_dbg_dump(ar->ab, ATH11K_DBG_DATA, NULL, "", rx_desc,
3478 sizeof(struct hal_rx_desc));
3479 dev_kfree_skb_any(msdu);
3480 goto exit;
3481 }
3482
3483 skb_put(msdu, HAL_RX_DESC_SIZE + msdu_len);
3484
3485 if (ath11k_dp_rx_frag_h_mpdu(ar, msdu, ring_desc)) {
3486 dev_kfree_skb_any(msdu);
3487 ath11k_dp_rx_link_desc_return(ar->ab, ring_desc,
3488 HAL_WBM_REL_BM_ACT_PUT_IN_IDLE);
3489 }
3490exit:
3491 rcu_read_unlock();
3492 return 0;
3493}
3494
3495int ath11k_dp_process_rx_err(struct ath11k_base *ab, struct napi_struct *napi,
3496 int budget)
3497{
3498 u32 msdu_cookies[HAL_NUM_RX_MSDUS_PER_LINK_DESC];
3499 struct dp_link_desc_bank *link_desc_banks;
3500 enum hal_rx_buf_return_buf_manager rbm;
3501 int tot_n_bufs_reaped, quota, ret, i;
3502 int n_bufs_reaped[MAX_RADIOS] = {0};
3503 struct dp_rxdma_ring *rx_ring;
3504 struct dp_srng *reo_except;
3505 u32 desc_bank, num_msdus;
3506 struct hal_srng *srng;
3507 struct ath11k_dp *dp;
3508 void *link_desc_va;
3509 int buf_id, mac_id;
3510 struct ath11k *ar;
3511 dma_addr_t paddr;
3512 u32 *desc;
3513 bool is_frag;
3514 u8 drop = 0;
3515
3516 tot_n_bufs_reaped = 0;
3517 quota = budget;
3518
3519 dp = &ab->dp;
3520 reo_except = &dp->reo_except_ring;
3521 link_desc_banks = dp->link_desc_banks;
3522
3523 srng = &ab->hal.srng_list[reo_except->ring_id];
3524
3525 spin_lock_bh(&srng->lock);
3526
3527 ath11k_hal_srng_access_begin(ab, srng);
3528
3529 while (budget &&
3530 (desc = ath11k_hal_srng_dst_get_next_entry(ab, srng))) {
3531 struct hal_reo_dest_ring *reo_desc = (struct hal_reo_dest_ring *)desc;
3532
3533 ab->soc_stats.err_ring_pkts++;
3534 ret = ath11k_hal_desc_reo_parse_err(ab, desc, &paddr,
3535 &desc_bank);
3536 if (ret) {
3537 ath11k_warn(ab, "failed to parse error reo desc %d\n",
3538 ret);
3539 continue;
3540 }
3541 link_desc_va = link_desc_banks[desc_bank].vaddr +
3542 (paddr - link_desc_banks[desc_bank].paddr);
3543 ath11k_hal_rx_msdu_link_info_get(link_desc_va, &num_msdus, msdu_cookies,
3544 &rbm);
3545 if (rbm != HAL_RX_BUF_RBM_WBM_IDLE_DESC_LIST &&
3546 rbm != HAL_RX_BUF_RBM_SW3_BM) {
3547 ab->soc_stats.invalid_rbm++;
3548 ath11k_warn(ab, "invalid return buffer manager %d\n", rbm);
3549 ath11k_dp_rx_link_desc_return(ab, desc,
3550 HAL_WBM_REL_BM_ACT_REL_MSDU);
3551 continue;
3552 }
3553
3554 is_frag = !!(reo_desc->rx_mpdu_info.info0 & RX_MPDU_DESC_INFO0_FRAG_FLAG);
3555
3556 /* Process only rx fragments with one msdu per link desc below, and drop
3557 * msdu's indicated due to error reasons.
3558 */
3559 if (!is_frag || num_msdus > 1) {
3560 drop = 1;
3561 /* Return the link desc back to wbm idle list */
3562 ath11k_dp_rx_link_desc_return(ab, desc,
3563 HAL_WBM_REL_BM_ACT_PUT_IN_IDLE);
3564 }
3565
3566 for (i = 0; i < num_msdus; i++) {
3567 buf_id = FIELD_GET(DP_RXDMA_BUF_COOKIE_BUF_ID,
3568 msdu_cookies[i]);
3569
3570 mac_id = FIELD_GET(DP_RXDMA_BUF_COOKIE_PDEV_ID,
3571 msdu_cookies[i]);
3572
3573 ar = ab->pdevs[mac_id].ar;
3574
3575 if (!ath11k_dp_process_rx_err_buf(ar, desc, buf_id, drop)) {
3576 n_bufs_reaped[mac_id]++;
3577 tot_n_bufs_reaped++;
3578 }
3579 }
3580
3581 if (tot_n_bufs_reaped >= quota) {
3582 tot_n_bufs_reaped = quota;
3583 goto exit;
3584 }
3585
3586 budget = quota - tot_n_bufs_reaped;
3587 }
3588
3589exit:
3590 ath11k_hal_srng_access_end(ab, srng);
3591
3592 spin_unlock_bh(&srng->lock);
3593
3594 for (i = 0; i < ab->num_radios; i++) {
3595 if (!n_bufs_reaped[i])
3596 continue;
3597
3598 ar = ab->pdevs[i].ar;
3599 rx_ring = &ar->dp.rx_refill_buf_ring;
3600
3601 ath11k_dp_rxbufs_replenish(ab, i, rx_ring, n_bufs_reaped[i],
3602 HAL_RX_BUF_RBM_SW3_BM, GFP_ATOMIC);
3603 }
3604
3605 return tot_n_bufs_reaped;
3606}
3607
3608static void ath11k_dp_rx_null_q_desc_sg_drop(struct ath11k *ar,
3609 int msdu_len,
3610 struct sk_buff_head *msdu_list)
3611{
3612 struct sk_buff *skb, *tmp;
3613 struct ath11k_skb_rxcb *rxcb;
3614 int n_buffs;
3615
3616 n_buffs = DIV_ROUND_UP(msdu_len,
3617 (DP_RX_BUFFER_SIZE - HAL_RX_DESC_SIZE));
3618
3619 skb_queue_walk_safe(msdu_list, skb, tmp) {
3620 rxcb = ATH11K_SKB_RXCB(skb);
3621 if (rxcb->err_rel_src == HAL_WBM_REL_SRC_MODULE_REO &&
3622 rxcb->err_code == HAL_REO_DEST_RING_ERROR_CODE_DESC_ADDR_ZERO) {
3623 if (!n_buffs)
3624 break;
3625 __skb_unlink(skb, msdu_list);
3626 dev_kfree_skb_any(skb);
3627 n_buffs--;
3628 }
3629 }
3630}
3631
3632static int ath11k_dp_rx_h_null_q_desc(struct ath11k *ar, struct sk_buff *msdu,
3633 struct ieee80211_rx_status *status,
3634 struct sk_buff_head *msdu_list)
3635{
3636 u16 msdu_len;
3637 struct hal_rx_desc *desc = (struct hal_rx_desc *)msdu->data;
3638 u8 l3pad_bytes;
3639 struct ath11k_skb_rxcb *rxcb = ATH11K_SKB_RXCB(msdu);
3640
3641 msdu_len = ath11k_dp_rx_h_msdu_start_msdu_len(desc);
3642
3643 if (!rxcb->is_frag && ((msdu_len + HAL_RX_DESC_SIZE) > DP_RX_BUFFER_SIZE)) {
3644 /* First buffer will be freed by the caller, so deduct it's length */
3645 msdu_len = msdu_len - (DP_RX_BUFFER_SIZE - HAL_RX_DESC_SIZE);
3646 ath11k_dp_rx_null_q_desc_sg_drop(ar, msdu_len, msdu_list);
3647 return -EINVAL;
3648 }
3649
3650 if (!ath11k_dp_rx_h_attn_msdu_done(desc)) {
3651 ath11k_warn(ar->ab,
3652 "msdu_done bit not set in null_q_des processing\n");
3653 __skb_queue_purge(msdu_list);
3654 return -EIO;
3655 }
3656
3657 /* Handle NULL queue descriptor violations arising out a missing
3658 * REO queue for a given peer or a given TID. This typically
3659 * may happen if a packet is received on a QOS enabled TID before the
3660 * ADDBA negotiation for that TID, when the TID queue is setup. Or
3661 * it may also happen for MC/BC frames if they are not routed to the
3662 * non-QOS TID queue, in the absence of any other default TID queue.
3663 * This error can show up both in a REO destination or WBM release ring.
3664 */
3665
3666 rxcb->is_first_msdu = ath11k_dp_rx_h_msdu_end_first_msdu(desc);
3667 rxcb->is_last_msdu = ath11k_dp_rx_h_msdu_end_last_msdu(desc);
3668
3669 if (rxcb->is_frag) {
3670 skb_pull(msdu, HAL_RX_DESC_SIZE);
3671 } else {
3672 l3pad_bytes = ath11k_dp_rx_h_msdu_end_l3pad(desc);
3673
3674 if ((HAL_RX_DESC_SIZE + l3pad_bytes + msdu_len) > DP_RX_BUFFER_SIZE)
3675 return -EINVAL;
3676
3677 skb_put(msdu, HAL_RX_DESC_SIZE + l3pad_bytes + msdu_len);
3678 skb_pull(msdu, HAL_RX_DESC_SIZE + l3pad_bytes);
3679 }
3680 ath11k_dp_rx_h_ppdu(ar, desc, status);
3681
3682 ath11k_dp_rx_h_mpdu(ar, msdu, desc, status);
3683
3684 rxcb->tid = ath11k_dp_rx_h_mpdu_start_tid(desc);
3685
3686 /* Please note that caller will having the access to msdu and completing
3687 * rx with mac80211. Need not worry about cleaning up amsdu_list.
3688 */
3689
3690 return 0;
3691}
3692
3693static bool ath11k_dp_rx_h_reo_err(struct ath11k *ar, struct sk_buff *msdu,
3694 struct ieee80211_rx_status *status,
3695 struct sk_buff_head *msdu_list)
3696{
3697 struct ath11k_skb_rxcb *rxcb = ATH11K_SKB_RXCB(msdu);
3698 bool drop = false;
3699
3700 ar->ab->soc_stats.reo_error[rxcb->err_code]++;
3701
3702 switch (rxcb->err_code) {
3703 case HAL_REO_DEST_RING_ERROR_CODE_DESC_ADDR_ZERO:
3704 if (ath11k_dp_rx_h_null_q_desc(ar, msdu, status, msdu_list))
3705 drop = true;
3706 break;
3707 case HAL_REO_DEST_RING_ERROR_CODE_PN_CHECK_FAILED:
3708 /* TODO: Do not drop PN failed packets in the driver;
3709 * instead, it is good to drop such packets in mac80211
3710 * after incrementing the replay counters.
3711 */
3712
3713 /* fall through */
3714 default:
3715 /* TODO: Review other errors and process them to mac80211
3716 * as appropriate.
3717 */
3718 drop = true;
3719 break;
3720 }
3721
3722 return drop;
3723}
3724
3725static void ath11k_dp_rx_h_tkip_mic_err(struct ath11k *ar, struct sk_buff *msdu,
3726 struct ieee80211_rx_status *status)
3727{
3728 u16 msdu_len;
3729 struct hal_rx_desc *desc = (struct hal_rx_desc *)msdu->data;
3730 u8 l3pad_bytes;
3731 struct ath11k_skb_rxcb *rxcb = ATH11K_SKB_RXCB(msdu);
3732
3733 rxcb->is_first_msdu = ath11k_dp_rx_h_msdu_end_first_msdu(desc);
3734 rxcb->is_last_msdu = ath11k_dp_rx_h_msdu_end_last_msdu(desc);
3735
3736 l3pad_bytes = ath11k_dp_rx_h_msdu_end_l3pad(desc);
3737 msdu_len = ath11k_dp_rx_h_msdu_start_msdu_len(desc);
3738 skb_put(msdu, HAL_RX_DESC_SIZE + l3pad_bytes + msdu_len);
3739 skb_pull(msdu, HAL_RX_DESC_SIZE + l3pad_bytes);
3740
3741 ath11k_dp_rx_h_ppdu(ar, desc, status);
3742
3743 status->flag |= (RX_FLAG_MMIC_STRIPPED | RX_FLAG_MMIC_ERROR |
3744 RX_FLAG_DECRYPTED);
3745
3746 ath11k_dp_rx_h_undecap(ar, msdu, desc,
3747 HAL_ENCRYPT_TYPE_TKIP_MIC, status, false);
3748}
3749
3750static bool ath11k_dp_rx_h_rxdma_err(struct ath11k *ar, struct sk_buff *msdu,
3751 struct ieee80211_rx_status *status)
3752{
3753 struct ath11k_skb_rxcb *rxcb = ATH11K_SKB_RXCB(msdu);
3754 bool drop = false;
3755
3756 ar->ab->soc_stats.rxdma_error[rxcb->err_code]++;
3757
3758 switch (rxcb->err_code) {
3759 case HAL_REO_ENTR_RING_RXDMA_ECODE_TKIP_MIC_ERR:
3760 ath11k_dp_rx_h_tkip_mic_err(ar, msdu, status);
3761 break;
3762 default:
3763 /* TODO: Review other rxdma error code to check if anything is
3764 * worth reporting to mac80211
3765 */
3766 drop = true;
3767 break;
3768 }
3769
3770 return drop;
3771}
3772
3773static void ath11k_dp_rx_wbm_err(struct ath11k *ar,
3774 struct napi_struct *napi,
3775 struct sk_buff *msdu,
3776 struct sk_buff_head *msdu_list)
3777{
3778 struct ath11k_skb_rxcb *rxcb = ATH11K_SKB_RXCB(msdu);
3779 struct ieee80211_rx_status rxs = {0};
3780 struct ieee80211_rx_status *status;
3781 bool drop = true;
3782
3783 switch (rxcb->err_rel_src) {
3784 case HAL_WBM_REL_SRC_MODULE_REO:
3785 drop = ath11k_dp_rx_h_reo_err(ar, msdu, &rxs, msdu_list);
3786 break;
3787 case HAL_WBM_REL_SRC_MODULE_RXDMA:
3788 drop = ath11k_dp_rx_h_rxdma_err(ar, msdu, &rxs);
3789 break;
3790 default:
3791 /* msdu will get freed */
3792 break;
3793 }
3794
3795 if (drop) {
3796 dev_kfree_skb_any(msdu);
3797 return;
3798 }
3799
3800 status = IEEE80211_SKB_RXCB(msdu);
3801 *status = rxs;
3802
3803 ath11k_dp_rx_deliver_msdu(ar, napi, msdu);
3804}
3805
3806int ath11k_dp_rx_process_wbm_err(struct ath11k_base *ab,
3807 struct napi_struct *napi, int budget)
3808{
3809 struct ath11k *ar;
3810 struct ath11k_dp *dp = &ab->dp;
3811 struct dp_rxdma_ring *rx_ring;
3812 struct hal_rx_wbm_rel_info err_info;
3813 struct hal_srng *srng;
3814 struct sk_buff *msdu;
3815 struct sk_buff_head msdu_list[MAX_RADIOS];
3816 struct ath11k_skb_rxcb *rxcb;
3817 u32 *rx_desc;
3818 int buf_id, mac_id;
3819 int num_buffs_reaped[MAX_RADIOS] = {0};
3820 int total_num_buffs_reaped = 0;
3821 int ret, i;
3822
3823 for (i = 0; i < MAX_RADIOS; i++)
3824 __skb_queue_head_init(&msdu_list[i]);
3825
3826 srng = &ab->hal.srng_list[dp->rx_rel_ring.ring_id];
3827
3828 spin_lock_bh(&srng->lock);
3829
3830 ath11k_hal_srng_access_begin(ab, srng);
3831
3832 while (budget) {
3833 rx_desc = ath11k_hal_srng_dst_get_next_entry(ab, srng);
3834 if (!rx_desc)
3835 break;
3836
3837 ret = ath11k_hal_wbm_desc_parse_err(ab, rx_desc, &err_info);
3838 if (ret) {
3839 ath11k_warn(ab,
3840 "failed to parse rx error in wbm_rel ring desc %d\n",
3841 ret);
3842 continue;
3843 }
3844
3845 buf_id = FIELD_GET(DP_RXDMA_BUF_COOKIE_BUF_ID, err_info.cookie);
3846 mac_id = FIELD_GET(DP_RXDMA_BUF_COOKIE_PDEV_ID, err_info.cookie);
3847
3848 ar = ab->pdevs[mac_id].ar;
3849 rx_ring = &ar->dp.rx_refill_buf_ring;
3850
3851 spin_lock_bh(&rx_ring->idr_lock);
3852 msdu = idr_find(&rx_ring->bufs_idr, buf_id);
3853 if (!msdu) {
3854 ath11k_warn(ab, "frame rx with invalid buf_id %d pdev %d\n",
3855 buf_id, mac_id);
3856 spin_unlock_bh(&rx_ring->idr_lock);
3857 continue;
3858 }
3859
3860 idr_remove(&rx_ring->bufs_idr, buf_id);
3861 spin_unlock_bh(&rx_ring->idr_lock);
3862
3863 rxcb = ATH11K_SKB_RXCB(msdu);
3864 dma_unmap_single(ab->dev, rxcb->paddr,
3865 msdu->len + skb_tailroom(msdu),
3866 DMA_FROM_DEVICE);
3867
3868 num_buffs_reaped[mac_id]++;
3869 total_num_buffs_reaped++;
3870 budget--;
3871
3872 if (err_info.push_reason !=
3873 HAL_REO_DEST_RING_PUSH_REASON_ERR_DETECTED) {
3874 dev_kfree_skb_any(msdu);
3875 continue;
3876 }
3877
3878 rxcb->err_rel_src = err_info.err_rel_src;
3879 rxcb->err_code = err_info.err_code;
3880 rxcb->rx_desc = (struct hal_rx_desc *)msdu->data;
3881 __skb_queue_tail(&msdu_list[mac_id], msdu);
3882 }
3883
3884 ath11k_hal_srng_access_end(ab, srng);
3885
3886 spin_unlock_bh(&srng->lock);
3887
3888 if (!total_num_buffs_reaped)
3889 goto done;
3890
3891 for (i = 0; i < ab->num_radios; i++) {
3892 if (!num_buffs_reaped[i])
3893 continue;
3894
3895 ar = ab->pdevs[i].ar;
3896 rx_ring = &ar->dp.rx_refill_buf_ring;
3897
3898 ath11k_dp_rxbufs_replenish(ab, i, rx_ring, num_buffs_reaped[i],
3899 HAL_RX_BUF_RBM_SW3_BM, GFP_ATOMIC);
3900 }
3901
3902 rcu_read_lock();
3903 for (i = 0; i < ab->num_radios; i++) {
3904 if (!rcu_dereference(ab->pdevs_active[i])) {
3905 __skb_queue_purge(&msdu_list[i]);
3906 continue;
3907 }
3908
3909 ar = ab->pdevs[i].ar;
3910
3911 if (test_bit(ATH11K_CAC_RUNNING, &ar->dev_flags)) {
3912 __skb_queue_purge(&msdu_list[i]);
3913 continue;
3914 }
3915
3916 while ((msdu = __skb_dequeue(&msdu_list[i])) != NULL)
3917 ath11k_dp_rx_wbm_err(ar, napi, msdu, &msdu_list[i]);
3918 }
3919 rcu_read_unlock();
3920done:
3921 return total_num_buffs_reaped;
3922}
3923
3924int ath11k_dp_process_rxdma_err(struct ath11k_base *ab, int mac_id, int budget)
3925{
3926 struct ath11k *ar = ab->pdevs[mac_id].ar;
3927 struct dp_srng *err_ring = &ar->dp.rxdma_err_dst_ring;
3928 struct dp_rxdma_ring *rx_ring = &ar->dp.rx_refill_buf_ring;
3929 struct dp_link_desc_bank *link_desc_banks = ab->dp.link_desc_banks;
3930 struct hal_srng *srng;
3931 u32 msdu_cookies[HAL_NUM_RX_MSDUS_PER_LINK_DESC];
3932 enum hal_rx_buf_return_buf_manager rbm;
3933 enum hal_reo_entr_rxdma_ecode rxdma_err_code;
3934 struct ath11k_skb_rxcb *rxcb;
3935 struct sk_buff *skb;
3936 struct hal_reo_entrance_ring *entr_ring;
3937 void *desc;
3938 int num_buf_freed = 0;
3939 int quota = budget;
3940 dma_addr_t paddr;
3941 u32 desc_bank;
3942 void *link_desc_va;
3943 int num_msdus;
3944 int i;
3945 int buf_id;
3946
3947 srng = &ab->hal.srng_list[err_ring->ring_id];
3948
3949 spin_lock_bh(&srng->lock);
3950
3951 ath11k_hal_srng_access_begin(ab, srng);
3952
3953 while (quota-- &&
3954 (desc = ath11k_hal_srng_dst_get_next_entry(ab, srng))) {
3955 ath11k_hal_rx_reo_ent_paddr_get(ab, desc, &paddr, &desc_bank);
3956
3957 entr_ring = (struct hal_reo_entrance_ring *)desc;
3958 rxdma_err_code =
3959 FIELD_GET(HAL_REO_ENTR_RING_INFO1_RXDMA_ERROR_CODE,
3960 entr_ring->info1);
3961 ab->soc_stats.rxdma_error[rxdma_err_code]++;
3962
3963 link_desc_va = link_desc_banks[desc_bank].vaddr +
3964 (paddr - link_desc_banks[desc_bank].paddr);
3965 ath11k_hal_rx_msdu_link_info_get(link_desc_va, &num_msdus,
3966 msdu_cookies, &rbm);
3967
3968 for (i = 0; i < num_msdus; i++) {
3969 buf_id = FIELD_GET(DP_RXDMA_BUF_COOKIE_BUF_ID,
3970 msdu_cookies[i]);
3971
3972 spin_lock_bh(&rx_ring->idr_lock);
3973 skb = idr_find(&rx_ring->bufs_idr, buf_id);
3974 if (!skb) {
3975 ath11k_warn(ab, "rxdma error with invalid buf_id %d\n",
3976 buf_id);
3977 spin_unlock_bh(&rx_ring->idr_lock);
3978 continue;
3979 }
3980
3981 idr_remove(&rx_ring->bufs_idr, buf_id);
3982 spin_unlock_bh(&rx_ring->idr_lock);
3983
3984 rxcb = ATH11K_SKB_RXCB(skb);
3985 dma_unmap_single(ab->dev, rxcb->paddr,
3986 skb->len + skb_tailroom(skb),
3987 DMA_FROM_DEVICE);
3988 dev_kfree_skb_any(skb);
3989
3990 num_buf_freed++;
3991 }
3992
3993 ath11k_dp_rx_link_desc_return(ab, desc,
3994 HAL_WBM_REL_BM_ACT_PUT_IN_IDLE);
3995 }
3996
3997 ath11k_hal_srng_access_end(ab, srng);
3998
3999 spin_unlock_bh(&srng->lock);
4000
4001 if (num_buf_freed)
4002 ath11k_dp_rxbufs_replenish(ab, mac_id, rx_ring, num_buf_freed,
4003 HAL_RX_BUF_RBM_SW3_BM, GFP_ATOMIC);
4004
4005 return budget - quota;
4006}
4007
4008void ath11k_dp_process_reo_status(struct ath11k_base *ab)
4009{
4010 struct ath11k_dp *dp = &ab->dp;
4011 struct hal_srng *srng;
4012 struct dp_reo_cmd *cmd, *tmp;
4013 bool found = false;
4014 u32 *reo_desc;
4015 u16 tag;
4016 struct hal_reo_status reo_status;
4017
4018 srng = &ab->hal.srng_list[dp->reo_status_ring.ring_id];
4019
4020 memset(&reo_status, 0, sizeof(reo_status));
4021
4022 spin_lock_bh(&srng->lock);
4023
4024 ath11k_hal_srng_access_begin(ab, srng);
4025
4026 while ((reo_desc = ath11k_hal_srng_dst_get_next_entry(ab, srng))) {
4027 tag = FIELD_GET(HAL_SRNG_TLV_HDR_TAG, *reo_desc);
4028
4029 switch (tag) {
4030 case HAL_REO_GET_QUEUE_STATS_STATUS:
4031 ath11k_hal_reo_status_queue_stats(ab, reo_desc,
4032 &reo_status);
4033 break;
4034 case HAL_REO_FLUSH_QUEUE_STATUS:
4035 ath11k_hal_reo_flush_queue_status(ab, reo_desc,
4036 &reo_status);
4037 break;
4038 case HAL_REO_FLUSH_CACHE_STATUS:
4039 ath11k_hal_reo_flush_cache_status(ab, reo_desc,
4040 &reo_status);
4041 break;
4042 case HAL_REO_UNBLOCK_CACHE_STATUS:
4043 ath11k_hal_reo_unblk_cache_status(ab, reo_desc,
4044 &reo_status);
4045 break;
4046 case HAL_REO_FLUSH_TIMEOUT_LIST_STATUS:
4047 ath11k_hal_reo_flush_timeout_list_status(ab, reo_desc,
4048 &reo_status);
4049 break;
4050 case HAL_REO_DESCRIPTOR_THRESHOLD_REACHED_STATUS:
4051 ath11k_hal_reo_desc_thresh_reached_status(ab, reo_desc,
4052 &reo_status);
4053 break;
4054 case HAL_REO_UPDATE_RX_REO_QUEUE_STATUS:
4055 ath11k_hal_reo_update_rx_reo_queue_status(ab, reo_desc,
4056 &reo_status);
4057 break;
4058 default:
4059 ath11k_warn(ab, "Unknown reo status type %d\n", tag);
4060 continue;
4061 }
4062
4063 spin_lock_bh(&dp->reo_cmd_lock);
4064 list_for_each_entry_safe(cmd, tmp, &dp->reo_cmd_list, list) {
4065 if (reo_status.uniform_hdr.cmd_num == cmd->cmd_num) {
4066 found = true;
4067 list_del(&cmd->list);
4068 break;
4069 }
4070 }
4071 spin_unlock_bh(&dp->reo_cmd_lock);
4072
4073 if (found) {
4074 cmd->handler(dp, (void *)&cmd->data,
4075 reo_status.uniform_hdr.cmd_status);
4076 kfree(cmd);
4077 }
4078
4079 found = false;
4080 }
4081
4082 ath11k_hal_srng_access_end(ab, srng);
4083
4084 spin_unlock_bh(&srng->lock);
4085}
4086
4087void ath11k_dp_rx_pdev_free(struct ath11k_base *ab, int mac_id)
4088{
4089 struct ath11k *ar = ab->pdevs[mac_id].ar;
4090
4091 ath11k_dp_rx_pdev_srng_free(ar);
4092 ath11k_dp_rxdma_pdev_buf_free(ar);
4093}
4094
4095int ath11k_dp_rx_pdev_alloc(struct ath11k_base *ab, int mac_id)
4096{
4097 struct ath11k *ar = ab->pdevs[mac_id].ar;
4098 struct ath11k_pdev_dp *dp = &ar->dp;
4099 u32 ring_id;
4100 int ret;
4101
4102 ret = ath11k_dp_rx_pdev_srng_alloc(ar);
4103 if (ret) {
4104 ath11k_warn(ab, "failed to setup rx srngs\n");
4105 return ret;
4106 }
4107
4108 ret = ath11k_dp_rxdma_pdev_buf_setup(ar);
4109 if (ret) {
4110 ath11k_warn(ab, "failed to setup rxdma ring\n");
4111 return ret;
4112 }
4113
4114 ring_id = dp->rx_refill_buf_ring.refill_buf_ring.ring_id;
4115 ret = ath11k_dp_tx_htt_srng_setup(ab, ring_id, mac_id, HAL_RXDMA_BUF);
4116 if (ret) {
4117 ath11k_warn(ab, "failed to configure rx_refill_buf_ring %d\n",
4118 ret);
4119 return ret;
4120 }
4121
4122 ring_id = dp->rxdma_err_dst_ring.ring_id;
4123 ret = ath11k_dp_tx_htt_srng_setup(ab, ring_id, mac_id, HAL_RXDMA_DST);
4124 if (ret) {
4125 ath11k_warn(ab, "failed to configure rxdma_err_dest_ring %d\n",
4126 ret);
4127 return ret;
4128 }
4129
4130 ring_id = dp->rxdma_mon_buf_ring.refill_buf_ring.ring_id;
4131 ret = ath11k_dp_tx_htt_srng_setup(ab, ring_id,
4132 mac_id, HAL_RXDMA_MONITOR_BUF);
4133 if (ret) {
4134 ath11k_warn(ab, "failed to configure rxdma_mon_buf_ring %d\n",
4135 ret);
4136 return ret;
4137 }
4138 ret = ath11k_dp_tx_htt_srng_setup(ab,
4139 dp->rxdma_mon_dst_ring.ring_id,
4140 mac_id, HAL_RXDMA_MONITOR_DST);
4141 if (ret) {
4142 ath11k_warn(ab, "failed to configure rxdma_mon_dst_ring %d\n",
4143 ret);
4144 return ret;
4145 }
4146 ret = ath11k_dp_tx_htt_srng_setup(ab,
4147 dp->rxdma_mon_desc_ring.ring_id,
4148 mac_id, HAL_RXDMA_MONITOR_DESC);
4149 if (ret) {
4150 ath11k_warn(ab, "failed to configure rxdma_mon_dst_ring %d\n",
4151 ret);
4152 return ret;
4153 }
4154 ring_id = dp->rx_mon_status_refill_ring.refill_buf_ring.ring_id;
4155 ret = ath11k_dp_tx_htt_srng_setup(ab, ring_id, mac_id,
4156 HAL_RXDMA_MONITOR_STATUS);
4157 if (ret) {
4158 ath11k_warn(ab,
4159 "failed to configure mon_status_refill_ring %d\n",
4160 ret);
4161 return ret;
4162 }
4163 return 0;
4164}
4165
4166static void ath11k_dp_mon_set_frag_len(u32 *total_len, u32 *frag_len)
4167{
4168 if (*total_len >= (DP_RX_BUFFER_SIZE - sizeof(struct hal_rx_desc))) {
4169 *frag_len = DP_RX_BUFFER_SIZE - sizeof(struct hal_rx_desc);
4170 *total_len -= *frag_len;
4171 } else {
4172 *frag_len = *total_len;
4173 *total_len = 0;
4174 }
4175}
4176
4177static
4178int ath11k_dp_rx_monitor_link_desc_return(struct ath11k *ar,
4179 void *p_last_buf_addr_info,
4180 u8 mac_id)
4181{
4182 struct ath11k_pdev_dp *dp = &ar->dp;
4183 struct dp_srng *dp_srng;
4184 void *hal_srng;
4185 void *src_srng_desc;
4186 int ret = 0;
4187
4188 dp_srng = &dp->rxdma_mon_desc_ring;
4189 hal_srng = &ar->ab->hal.srng_list[dp_srng->ring_id];
4190
4191 ath11k_hal_srng_access_begin(ar->ab, hal_srng);
4192
4193 src_srng_desc = ath11k_hal_srng_src_get_next_entry(ar->ab, hal_srng);
4194
4195 if (src_srng_desc) {
4196 struct ath11k_buffer_addr *src_desc =
4197 (struct ath11k_buffer_addr *)src_srng_desc;
4198
4199 *src_desc = *((struct ath11k_buffer_addr *)p_last_buf_addr_info);
4200 } else {
4201 ath11k_dbg(ar->ab, ATH11K_DBG_DATA,
4202 "Monitor Link Desc Ring %d Full", mac_id);
4203 ret = -ENOMEM;
4204 }
4205
4206 ath11k_hal_srng_access_end(ar->ab, hal_srng);
4207 return ret;
4208}
4209
4210static
4211void ath11k_dp_rx_mon_next_link_desc_get(void *rx_msdu_link_desc,
4212 dma_addr_t *paddr, u32 *sw_cookie,
4213 void **pp_buf_addr_info)
4214{
4215 struct hal_rx_msdu_link *msdu_link =
4216 (struct hal_rx_msdu_link *)rx_msdu_link_desc;
4217 struct ath11k_buffer_addr *buf_addr_info;
4218 u8 rbm = 0;
4219
4220 buf_addr_info = (struct ath11k_buffer_addr *)&msdu_link->buf_addr_info;
4221
4222 ath11k_hal_rx_buf_addr_info_get(buf_addr_info, paddr, sw_cookie, &rbm);
4223
4224 *pp_buf_addr_info = (void *)buf_addr_info;
4225}
4226
4227static int ath11k_dp_pkt_set_pktlen(struct sk_buff *skb, u32 len)
4228{
4229 if (skb->len > len) {
4230 skb_trim(skb, len);
4231 } else {
4232 if (skb_tailroom(skb) < len - skb->len) {
4233 if ((pskb_expand_head(skb, 0,
4234 len - skb->len - skb_tailroom(skb),
4235 GFP_ATOMIC))) {
4236 dev_kfree_skb_any(skb);
4237 return -ENOMEM;
4238 }
4239 }
4240 skb_put(skb, (len - skb->len));
4241 }
4242 return 0;
4243}
4244
4245static void ath11k_hal_rx_msdu_list_get(struct ath11k *ar,
4246 void *msdu_link_desc,
4247 struct hal_rx_msdu_list *msdu_list,
4248 u16 *num_msdus)
4249{
4250 struct hal_rx_msdu_details *msdu_details = NULL;
4251 struct rx_msdu_desc *msdu_desc_info = NULL;
4252 struct hal_rx_msdu_link *msdu_link = NULL;
4253 int i;
4254 u32 last = FIELD_PREP(RX_MSDU_DESC_INFO0_LAST_MSDU_IN_MPDU, 1);
4255 u32 first = FIELD_PREP(RX_MSDU_DESC_INFO0_FIRST_MSDU_IN_MPDU, 1);
4256 u8 tmp = 0;
4257
4258 msdu_link = (struct hal_rx_msdu_link *)msdu_link_desc;
4259 msdu_details = &msdu_link->msdu_link[0];
4260
4261 for (i = 0; i < HAL_RX_NUM_MSDU_DESC; i++) {
4262 if (FIELD_GET(BUFFER_ADDR_INFO0_ADDR,
4263 msdu_details[i].buf_addr_info.info0) == 0) {
4264 msdu_desc_info = &msdu_details[i - 1].rx_msdu_info;
4265 msdu_desc_info->info0 |= last;
4266 ;
4267 break;
4268 }
4269 msdu_desc_info = &msdu_details[i].rx_msdu_info;
4270
4271 if (!i)
4272 msdu_desc_info->info0 |= first;
4273 else if (i == (HAL_RX_NUM_MSDU_DESC - 1))
4274 msdu_desc_info->info0 |= last;
4275 msdu_list->msdu_info[i].msdu_flags = msdu_desc_info->info0;
4276 msdu_list->msdu_info[i].msdu_len =
4277 HAL_RX_MSDU_PKT_LENGTH_GET(msdu_desc_info->info0);
4278 msdu_list->sw_cookie[i] =
4279 FIELD_GET(BUFFER_ADDR_INFO1_SW_COOKIE,
4280 msdu_details[i].buf_addr_info.info1);
4281 tmp = FIELD_GET(BUFFER_ADDR_INFO1_RET_BUF_MGR,
4282 msdu_details[i].buf_addr_info.info1);
4283 msdu_list->rbm[i] = tmp;
4284 }
4285 *num_msdus = i;
4286}
4287
4288static u32 ath11k_dp_rx_mon_comp_ppduid(u32 msdu_ppdu_id, u32 *ppdu_id,
4289 u32 *rx_bufs_used)
4290{
4291 u32 ret = 0;
4292
4293 if ((*ppdu_id < msdu_ppdu_id) &&
4294 ((msdu_ppdu_id - *ppdu_id) < DP_NOT_PPDU_ID_WRAP_AROUND)) {
4295 *ppdu_id = msdu_ppdu_id;
4296 ret = msdu_ppdu_id;
4297 } else if ((*ppdu_id > msdu_ppdu_id) &&
4298 ((*ppdu_id - msdu_ppdu_id) > DP_NOT_PPDU_ID_WRAP_AROUND)) {
4299 /* mon_dst is behind than mon_status
4300 * skip dst_ring and free it
4301 */
4302 *rx_bufs_used += 1;
4303 *ppdu_id = msdu_ppdu_id;
4304 ret = msdu_ppdu_id;
4305 }
4306 return ret;
4307}
4308
4309static void ath11k_dp_mon_get_buf_len(struct hal_rx_msdu_desc_info *info,
4310 bool *is_frag, u32 *total_len,
4311 u32 *frag_len, u32 *msdu_cnt)
4312{
4313 if (info->msdu_flags & RX_MSDU_DESC_INFO0_MSDU_CONTINUATION) {
4314 if (!*is_frag) {
4315 *total_len = info->msdu_len;
4316 *is_frag = true;
4317 }
4318 ath11k_dp_mon_set_frag_len(total_len,
4319 frag_len);
4320 } else {
4321 if (*is_frag) {
4322 ath11k_dp_mon_set_frag_len(total_len,
4323 frag_len);
4324 } else {
4325 *frag_len = info->msdu_len;
4326 }
4327 *is_frag = false;
4328 *msdu_cnt -= 1;
4329 }
4330}
4331
4332static u32
4333ath11k_dp_rx_mon_mpdu_pop(struct ath11k *ar,
4334 void *ring_entry, struct sk_buff **head_msdu,
4335 struct sk_buff **tail_msdu, u32 *npackets,
4336 u32 *ppdu_id)
4337{
4338 struct ath11k_pdev_dp *dp = &ar->dp;
4339 struct ath11k_mon_data *pmon = (struct ath11k_mon_data *)&dp->mon_data;
4340 struct dp_rxdma_ring *rx_ring = &dp->rxdma_mon_buf_ring;
4341 struct sk_buff *msdu = NULL, *last = NULL;
4342 struct hal_rx_msdu_list msdu_list;
4343 void *p_buf_addr_info, *p_last_buf_addr_info;
4344 struct hal_rx_desc *rx_desc;
4345 void *rx_msdu_link_desc;
4346 dma_addr_t paddr;
4347 u16 num_msdus = 0;
4348 u32 rx_buf_size, rx_pkt_offset, sw_cookie;
4349 u32 rx_bufs_used = 0, i = 0;
4350 u32 msdu_ppdu_id = 0, msdu_cnt = 0;
4351 u32 total_len = 0, frag_len = 0;
4352 bool is_frag, is_first_msdu;
4353 bool drop_mpdu = false;
4354 struct ath11k_skb_rxcb *rxcb;
4355 struct hal_reo_entrance_ring *ent_desc =
4356 (struct hal_reo_entrance_ring *)ring_entry;
4357 int buf_id;
4358
4359 ath11k_hal_rx_reo_ent_buf_paddr_get(ring_entry, &paddr,
4360 &sw_cookie, &p_last_buf_addr_info,
4361 &msdu_cnt);
4362
4363 if (FIELD_GET(HAL_REO_ENTR_RING_INFO1_RXDMA_PUSH_REASON,
4364 ent_desc->info1) ==
4365 HAL_REO_DEST_RING_PUSH_REASON_ERR_DETECTED) {
4366 u8 rxdma_err =
4367 FIELD_GET(HAL_REO_ENTR_RING_INFO1_RXDMA_ERROR_CODE,
4368 ent_desc->info1);
4369 if (rxdma_err == HAL_REO_ENTR_RING_RXDMA_ECODE_FLUSH_REQUEST_ERR ||
4370 rxdma_err == HAL_REO_ENTR_RING_RXDMA_ECODE_MPDU_LEN_ERR ||
4371 rxdma_err == HAL_REO_ENTR_RING_RXDMA_ECODE_OVERFLOW_ERR) {
4372 drop_mpdu = true;
4373 pmon->rx_mon_stats.dest_mpdu_drop++;
4374 }
4375 }
4376
4377 is_frag = false;
4378 is_first_msdu = true;
4379
4380 do {
4381 if (pmon->mon_last_linkdesc_paddr == paddr) {
4382 pmon->rx_mon_stats.dup_mon_linkdesc_cnt++;
4383 return rx_bufs_used;
4384 }
4385
4386 rx_msdu_link_desc =
4387 (void *)pmon->link_desc_banks[sw_cookie].vaddr +
4388 (paddr - pmon->link_desc_banks[sw_cookie].paddr);
4389
4390 ath11k_hal_rx_msdu_list_get(ar, rx_msdu_link_desc, &msdu_list,
4391 &num_msdus);
4392
4393 for (i = 0; i < num_msdus; i++) {
4394 u32 l2_hdr_offset;
4395
4396 if (pmon->mon_last_buf_cookie == msdu_list.sw_cookie[i]) {
4397 ath11k_dbg(ar->ab, ATH11K_DBG_DATA,
4398 "i %d last_cookie %d is same\n",
4399 i, pmon->mon_last_buf_cookie);
4400 drop_mpdu = true;
4401 pmon->rx_mon_stats.dup_mon_buf_cnt++;
4402 continue;
4403 }
4404 buf_id = FIELD_GET(DP_RXDMA_BUF_COOKIE_BUF_ID,
4405 msdu_list.sw_cookie[i]);
4406
4407 spin_lock_bh(&rx_ring->idr_lock);
4408 msdu = idr_find(&rx_ring->bufs_idr, buf_id);
4409 spin_unlock_bh(&rx_ring->idr_lock);
4410 if (!msdu) {
4411 ath11k_dbg(ar->ab, ATH11K_DBG_DATA,
4412 "msdu_pop: invalid buf_id %d\n", buf_id);
4413 break;
4414 }
4415 rxcb = ATH11K_SKB_RXCB(msdu);
4416 if (!rxcb->unmapped) {
4417 dma_unmap_single(ar->ab->dev, rxcb->paddr,
4418 msdu->len +
4419 skb_tailroom(msdu),
4420 DMA_FROM_DEVICE);
4421 rxcb->unmapped = 1;
4422 }
4423 if (drop_mpdu) {
4424 ath11k_dbg(ar->ab, ATH11K_DBG_DATA,
4425 "i %d drop msdu %p *ppdu_id %x\n",
4426 i, msdu, *ppdu_id);
4427 dev_kfree_skb_any(msdu);
4428 msdu = NULL;
4429 goto next_msdu;
4430 }
4431
4432 rx_desc = (struct hal_rx_desc *)msdu->data;
4433
4434 rx_pkt_offset = sizeof(struct hal_rx_desc);
4435 l2_hdr_offset = ath11k_dp_rx_h_msdu_end_l3pad(rx_desc);
4436
4437 if (is_first_msdu) {
4438 if (!ath11k_dp_rxdesc_mpdu_valid(rx_desc)) {
4439 drop_mpdu = true;
4440 dev_kfree_skb_any(msdu);
4441 msdu = NULL;
4442 pmon->mon_last_linkdesc_paddr = paddr;
4443 goto next_msdu;
4444 }
4445
4446 msdu_ppdu_id =
4447 ath11k_dp_rxdesc_get_ppduid(rx_desc);
4448
4449 if (ath11k_dp_rx_mon_comp_ppduid(msdu_ppdu_id,
4450 ppdu_id,
4451 &rx_bufs_used)) {
4452 if (rx_bufs_used) {
4453 drop_mpdu = true;
4454 dev_kfree_skb_any(msdu);
4455 msdu = NULL;
4456 goto next_msdu;
4457 }
4458 return rx_bufs_used;
4459 }
4460 pmon->mon_last_linkdesc_paddr = paddr;
4461 is_first_msdu = false;
4462 }
4463 ath11k_dp_mon_get_buf_len(&msdu_list.msdu_info[i],
4464 &is_frag, &total_len,
4465 &frag_len, &msdu_cnt);
4466 rx_buf_size = rx_pkt_offset + l2_hdr_offset + frag_len;
4467
4468 ath11k_dp_pkt_set_pktlen(msdu, rx_buf_size);
4469
4470 if (!(*head_msdu))
4471 *head_msdu = msdu;
4472 else if (last)
4473 last->next = msdu;
4474
4475 last = msdu;
4476next_msdu:
4477 pmon->mon_last_buf_cookie = msdu_list.sw_cookie[i];
4478 rx_bufs_used++;
4479 spin_lock_bh(&rx_ring->idr_lock);
4480 idr_remove(&rx_ring->bufs_idr, buf_id);
4481 spin_unlock_bh(&rx_ring->idr_lock);
4482 }
4483
4484 ath11k_dp_rx_mon_next_link_desc_get(rx_msdu_link_desc, &paddr,
4485 &sw_cookie,
4486 &p_buf_addr_info);
4487
4488 if (ath11k_dp_rx_monitor_link_desc_return(ar,
4489 p_last_buf_addr_info,
4490 dp->mac_id))
4491 ath11k_dbg(ar->ab, ATH11K_DBG_DATA,
4492 "dp_rx_monitor_link_desc_return failed");
4493
4494 p_last_buf_addr_info = p_buf_addr_info;
4495
4496 } while (paddr && msdu_cnt);
4497
4498 if (last)
4499 last->next = NULL;
4500
4501 *tail_msdu = msdu;
4502
4503 if (msdu_cnt == 0)
4504 *npackets = 1;
4505
4506 return rx_bufs_used;
4507}
4508
4509static void ath11k_dp_rx_msdus_set_payload(struct sk_buff *msdu)
4510{
4511 u32 rx_pkt_offset, l2_hdr_offset;
4512
4513 rx_pkt_offset = sizeof(struct hal_rx_desc);
4514 l2_hdr_offset = ath11k_dp_rx_h_msdu_end_l3pad((struct hal_rx_desc *)msdu->data);
4515 skb_pull(msdu, rx_pkt_offset + l2_hdr_offset);
4516}
4517
4518static struct sk_buff *
4519ath11k_dp_rx_mon_merg_msdus(struct ath11k *ar,
4520 u32 mac_id, struct sk_buff *head_msdu,
4521 struct sk_buff *last_msdu,
4522 struct ieee80211_rx_status *rxs)
4523{
4524 struct sk_buff *msdu, *mpdu_buf, *prev_buf;
4525 u32 decap_format, wifi_hdr_len;
4526 struct hal_rx_desc *rx_desc;
4527 char *hdr_desc;
4528 u8 *dest;
4529 struct ieee80211_hdr_3addr *wh;
4530
4531 mpdu_buf = NULL;
4532
4533 if (!head_msdu)
4534 goto err_merge_fail;
4535
4536 rx_desc = (struct hal_rx_desc *)head_msdu->data;
4537
4538 if (ath11k_dp_rxdesc_get_mpdulen_err(rx_desc))
4539 return NULL;
4540
4541 decap_format = ath11k_dp_rxdesc_get_decap_format(rx_desc);
4542
4543 ath11k_dp_rx_h_ppdu(ar, rx_desc, rxs);
4544
4545 if (decap_format == DP_RX_DECAP_TYPE_RAW) {
4546 ath11k_dp_rx_msdus_set_payload(head_msdu);
4547
4548 prev_buf = head_msdu;
4549 msdu = head_msdu->next;
4550
4551 while (msdu) {
4552 ath11k_dp_rx_msdus_set_payload(msdu);
4553
4554 prev_buf = msdu;
4555 msdu = msdu->next;
4556 }
4557
4558 prev_buf->next = NULL;
4559
4560 skb_trim(prev_buf, prev_buf->len - HAL_RX_FCS_LEN);
4561 } else if (decap_format == DP_RX_DECAP_TYPE_NATIVE_WIFI) {
4562 __le16 qos_field;
4563 u8 qos_pkt = 0;
4564
4565 rx_desc = (struct hal_rx_desc *)head_msdu->data;
4566 hdr_desc = ath11k_dp_rxdesc_get_80211hdr(rx_desc);
4567
4568 /* Base size */
4569 wifi_hdr_len = sizeof(struct ieee80211_hdr_3addr);
4570 wh = (struct ieee80211_hdr_3addr *)hdr_desc;
4571
4572 if (ieee80211_is_data_qos(wh->frame_control)) {
4573 struct ieee80211_qos_hdr *qwh =
4574 (struct ieee80211_qos_hdr *)hdr_desc;
4575
4576 qos_field = qwh->qos_ctrl;
4577 qos_pkt = 1;
4578 }
4579 msdu = head_msdu;
4580
4581 while (msdu) {
4582 rx_desc = (struct hal_rx_desc *)msdu->data;
4583 hdr_desc = ath11k_dp_rxdesc_get_80211hdr(rx_desc);
4584
4585 if (qos_pkt) {
4586 dest = skb_push(msdu, sizeof(__le16));
4587 if (!dest)
4588 goto err_merge_fail;
4589 memcpy(dest, hdr_desc, wifi_hdr_len);
4590 memcpy(dest + wifi_hdr_len,
4591 (u8 *)&qos_field, sizeof(__le16));
4592 }
4593 ath11k_dp_rx_msdus_set_payload(msdu);
4594 prev_buf = msdu;
4595 msdu = msdu->next;
4596 }
4597 dest = skb_put(prev_buf, HAL_RX_FCS_LEN);
4598 if (!dest)
4599 goto err_merge_fail;
4600
4601 ath11k_dbg(ar->ab, ATH11K_DBG_DATA,
4602 "mpdu_buf %pK mpdu_buf->len %u",
4603 prev_buf, prev_buf->len);
4604 } else {
4605 ath11k_dbg(ar->ab, ATH11K_DBG_DATA,
4606 "decap format %d is not supported!\n",
4607 decap_format);
4608 goto err_merge_fail;
4609 }
4610
4611 return head_msdu;
4612
4613err_merge_fail:
4614 if (mpdu_buf && decap_format != DP_RX_DECAP_TYPE_RAW) {
4615 ath11k_dbg(ar->ab, ATH11K_DBG_DATA,
4616 "err_merge_fail mpdu_buf %pK", mpdu_buf);
4617 /* Free the head buffer */
4618 dev_kfree_skb_any(mpdu_buf);
4619 }
4620 return NULL;
4621}
4622
4623static int ath11k_dp_rx_mon_deliver(struct ath11k *ar, u32 mac_id,
4624 struct sk_buff *head_msdu,
4625 struct sk_buff *tail_msdu,
4626 struct napi_struct *napi)
4627{
4628 struct ath11k_pdev_dp *dp = &ar->dp;
4629 struct sk_buff *mon_skb, *skb_next, *header;
4630 struct ieee80211_rx_status *rxs = &dp->rx_status, *status;
4631
4632 mon_skb = ath11k_dp_rx_mon_merg_msdus(ar, mac_id, head_msdu,
4633 tail_msdu, rxs);
4634
4635 if (!mon_skb)
4636 goto mon_deliver_fail;
4637
4638 header = mon_skb;
4639
4640 rxs->flag = 0;
4641 do {
4642 skb_next = mon_skb->next;
4643 if (!skb_next)
4644 rxs->flag &= ~RX_FLAG_AMSDU_MORE;
4645 else
4646 rxs->flag |= RX_FLAG_AMSDU_MORE;
4647
4648 if (mon_skb == header) {
4649 header = NULL;
4650 rxs->flag &= ~RX_FLAG_ALLOW_SAME_PN;
4651 } else {
4652 rxs->flag |= RX_FLAG_ALLOW_SAME_PN;
4653 }
4654 rxs->flag |= RX_FLAG_ONLY_MONITOR;
4655
4656 status = IEEE80211_SKB_RXCB(mon_skb);
4657 *status = *rxs;
4658
4659 ath11k_dp_rx_deliver_msdu(ar, napi, mon_skb);
4660 mon_skb = skb_next;
4661 } while (mon_skb);
4662 rxs->flag = 0;
4663
4664 return 0;
4665
4666mon_deliver_fail:
4667 mon_skb = head_msdu;
4668 while (mon_skb) {
4669 skb_next = mon_skb->next;
4670 dev_kfree_skb_any(mon_skb);
4671 mon_skb = skb_next;
4672 }
4673 return -EINVAL;
4674}
4675
4676static void ath11k_dp_rx_mon_dest_process(struct ath11k *ar, u32 quota,
4677 struct napi_struct *napi)
4678{
4679 struct ath11k_pdev_dp *dp = &ar->dp;
4680 struct ath11k_mon_data *pmon = (struct ath11k_mon_data *)&dp->mon_data;
4681 void *ring_entry;
4682 void *mon_dst_srng;
4683 u32 ppdu_id;
4684 u32 rx_bufs_used;
4685 struct ath11k_pdev_mon_stats *rx_mon_stats;
4686 u32 npackets = 0;
4687
4688 mon_dst_srng = &ar->ab->hal.srng_list[dp->rxdma_mon_dst_ring.ring_id];
4689
4690 if (!mon_dst_srng) {
4691 ath11k_warn(ar->ab,
4692 "HAL Monitor Destination Ring Init Failed -- %pK",
4693 mon_dst_srng);
4694 return;
4695 }
4696
4697 spin_lock_bh(&pmon->mon_lock);
4698
4699 ath11k_hal_srng_access_begin(ar->ab, mon_dst_srng);
4700
4701 ppdu_id = pmon->mon_ppdu_info.ppdu_id;
4702 rx_bufs_used = 0;
4703 rx_mon_stats = &pmon->rx_mon_stats;
4704
4705 while ((ring_entry = ath11k_hal_srng_dst_peek(ar->ab, mon_dst_srng))) {
4706 struct sk_buff *head_msdu, *tail_msdu;
4707
4708 head_msdu = NULL;
4709 tail_msdu = NULL;
4710
4711 rx_bufs_used += ath11k_dp_rx_mon_mpdu_pop(ar, ring_entry,
4712 &head_msdu,
4713 &tail_msdu,
4714 &npackets, &ppdu_id);
4715
4716 if (ppdu_id != pmon->mon_ppdu_info.ppdu_id) {
4717 pmon->mon_ppdu_status = DP_PPDU_STATUS_START;
4718 ath11k_dbg(ar->ab, ATH11K_DBG_DATA,
4719 "dest_rx: new ppdu_id %x != status ppdu_id %x",
4720 ppdu_id, pmon->mon_ppdu_info.ppdu_id);
4721 break;
4722 }
4723 if (head_msdu && tail_msdu) {
4724 ath11k_dp_rx_mon_deliver(ar, dp->mac_id, head_msdu,
4725 tail_msdu, napi);
4726 rx_mon_stats->dest_mpdu_done++;
4727 }
4728
4729 ring_entry = ath11k_hal_srng_dst_get_next_entry(ar->ab,
4730 mon_dst_srng);
4731 }
4732 ath11k_hal_srng_access_end(ar->ab, mon_dst_srng);
4733
4734 spin_unlock_bh(&pmon->mon_lock);
4735
4736 if (rx_bufs_used) {
4737 rx_mon_stats->dest_ppdu_done++;
4738 ath11k_dp_rxbufs_replenish(ar->ab, dp->mac_id,
4739 &dp->rxdma_mon_buf_ring,
4740 rx_bufs_used,
4741 HAL_RX_BUF_RBM_SW3_BM, GFP_ATOMIC);
4742 }
4743}
4744
4745static void ath11k_dp_rx_mon_status_process_tlv(struct ath11k *ar,
4746 u32 quota,
4747 struct napi_struct *napi)
4748{
4749 struct ath11k_pdev_dp *dp = &ar->dp;
4750 struct ath11k_mon_data *pmon = (struct ath11k_mon_data *)&dp->mon_data;
4751 struct hal_rx_mon_ppdu_info *ppdu_info;
4752 struct sk_buff *status_skb;
4753 u32 tlv_status = HAL_TLV_STATUS_BUF_DONE;
4754 struct ath11k_pdev_mon_stats *rx_mon_stats;
4755
4756 ppdu_info = &pmon->mon_ppdu_info;
4757 rx_mon_stats = &pmon->rx_mon_stats;
4758
4759 if (pmon->mon_ppdu_status != DP_PPDU_STATUS_START)
4760 return;
4761
4762 while (!skb_queue_empty(&pmon->rx_status_q)) {
4763 status_skb = skb_dequeue(&pmon->rx_status_q);
4764
4765 tlv_status = ath11k_hal_rx_parse_mon_status(ar->ab, ppdu_info,
4766 status_skb);
4767 if (tlv_status == HAL_TLV_STATUS_PPDU_DONE) {
4768 rx_mon_stats->status_ppdu_done++;
4769 pmon->mon_ppdu_status = DP_PPDU_STATUS_DONE;
4770 ath11k_dp_rx_mon_dest_process(ar, quota, napi);
4771 pmon->mon_ppdu_status = DP_PPDU_STATUS_START;
4772 }
4773 dev_kfree_skb_any(status_skb);
4774 }
4775}
4776
4777static int ath11k_dp_mon_process_rx(struct ath11k_base *ab, int mac_id,
4778 struct napi_struct *napi, int budget)
4779{
4780 struct ath11k *ar = ab->pdevs[mac_id].ar;
4781 struct ath11k_pdev_dp *dp = &ar->dp;
4782 struct ath11k_mon_data *pmon = (struct ath11k_mon_data *)&dp->mon_data;
4783 int num_buffs_reaped = 0;
4784
4785 num_buffs_reaped = ath11k_dp_rx_reap_mon_status_ring(ar->ab, dp->mac_id, &budget,
4786 &pmon->rx_status_q);
4787 if (num_buffs_reaped)
4788 ath11k_dp_rx_mon_status_process_tlv(ar, budget, napi);
4789
4790 return num_buffs_reaped;
4791}
4792
4793int ath11k_dp_rx_process_mon_rings(struct ath11k_base *ab, int mac_id,
4794 struct napi_struct *napi, int budget)
4795{
4796 struct ath11k *ar = ab->pdevs[mac_id].ar;
4797 int ret = 0;
4798
4799 if (test_bit(ATH11K_FLAG_MONITOR_ENABLED, &ar->monitor_flags))
4800 ret = ath11k_dp_mon_process_rx(ab, mac_id, napi, budget);
4801 else
4802 ret = ath11k_dp_rx_process_mon_status(ab, mac_id, napi, budget);
4803 return ret;
4804}
4805
4806static int ath11k_dp_rx_pdev_mon_status_attach(struct ath11k *ar)
4807{
4808 struct ath11k_pdev_dp *dp = &ar->dp;
4809 struct ath11k_mon_data *pmon = (struct ath11k_mon_data *)&dp->mon_data;
4810
4811 skb_queue_head_init(&pmon->rx_status_q);
4812
4813 pmon->mon_ppdu_status = DP_PPDU_STATUS_START;
4814
4815 memset(&pmon->rx_mon_stats, 0,
4816 sizeof(pmon->rx_mon_stats));
4817 return 0;
4818}
4819
4820int ath11k_dp_rx_pdev_mon_attach(struct ath11k *ar)
4821{
4822 struct ath11k_pdev_dp *dp = &ar->dp;
4823 struct ath11k_mon_data *pmon = &dp->mon_data;
4824 struct hal_srng *mon_desc_srng = NULL;
4825 struct dp_srng *dp_srng;
4826 int ret = 0;
4827 u32 n_link_desc = 0;
4828
4829 ret = ath11k_dp_rx_pdev_mon_status_attach(ar);
4830 if (ret) {
4831 ath11k_warn(ar->ab, "pdev_mon_status_attach() failed");
4832 return ret;
4833 }
4834
4835 dp_srng = &dp->rxdma_mon_desc_ring;
4836 n_link_desc = dp_srng->size /
4837 ath11k_hal_srng_get_entrysize(HAL_RXDMA_MONITOR_DESC);
4838 mon_desc_srng =
4839 &ar->ab->hal.srng_list[dp->rxdma_mon_desc_ring.ring_id];
4840
4841 ret = ath11k_dp_link_desc_setup(ar->ab, pmon->link_desc_banks,
4842 HAL_RXDMA_MONITOR_DESC, mon_desc_srng,
4843 n_link_desc);
4844 if (ret) {
4845 ath11k_warn(ar->ab, "mon_link_desc_pool_setup() failed");
4846 return ret;
4847 }
4848 pmon->mon_last_linkdesc_paddr = 0;
4849 pmon->mon_last_buf_cookie = DP_RX_DESC_COOKIE_MAX + 1;
4850 spin_lock_init(&pmon->mon_lock);
4851 return 0;
4852}
4853
4854static int ath11k_dp_mon_link_free(struct ath11k *ar)
4855{
4856 struct ath11k_pdev_dp *dp = &ar->dp;
4857 struct ath11k_mon_data *pmon = &dp->mon_data;
4858
4859 ath11k_dp_link_desc_cleanup(ar->ab, pmon->link_desc_banks,
4860 HAL_RXDMA_MONITOR_DESC,
4861 &dp->rxdma_mon_desc_ring);
4862 return 0;
4863}
4864
4865int ath11k_dp_rx_pdev_mon_detach(struct ath11k *ar)
4866{
4867 ath11k_dp_mon_link_free(ar);
4868 return 0;
4869}