Loading...
1// SPDX-License-Identifier: BSD-3-Clause-Clear
2/*
3 * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
4 * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
5 */
6
7#include <linux/ieee80211.h>
8#include <linux/kernel.h>
9#include <linux/skbuff.h>
10#include <crypto/hash.h>
11#include "core.h"
12#include "debug.h"
13#include "debugfs_htt_stats.h"
14#include "debugfs_sta.h"
15#include "hal_desc.h"
16#include "hw.h"
17#include "dp_rx.h"
18#include "hal_rx.h"
19#include "dp_tx.h"
20#include "peer.h"
21
22#define ATH11K_DP_RX_FRAGMENT_TIMEOUT_MS (2 * HZ)
23
24static inline
25u8 *ath11k_dp_rx_h_80211_hdr(struct ath11k_base *ab, struct hal_rx_desc *desc)
26{
27 return ab->hw_params.hw_ops->rx_desc_get_hdr_status(desc);
28}
29
30static inline
31enum hal_encrypt_type ath11k_dp_rx_h_mpdu_start_enctype(struct ath11k_base *ab,
32 struct hal_rx_desc *desc)
33{
34 if (!ab->hw_params.hw_ops->rx_desc_encrypt_valid(desc))
35 return HAL_ENCRYPT_TYPE_OPEN;
36
37 return ab->hw_params.hw_ops->rx_desc_get_encrypt_type(desc);
38}
39
40static inline u8 ath11k_dp_rx_h_msdu_start_decap_type(struct ath11k_base *ab,
41 struct hal_rx_desc *desc)
42{
43 return ab->hw_params.hw_ops->rx_desc_get_decap_type(desc);
44}
45
46static inline
47bool ath11k_dp_rx_h_msdu_start_ldpc_support(struct ath11k_base *ab,
48 struct hal_rx_desc *desc)
49{
50 return ab->hw_params.hw_ops->rx_desc_get_ldpc_support(desc);
51}
52
53static inline
54u8 ath11k_dp_rx_h_msdu_start_mesh_ctl_present(struct ath11k_base *ab,
55 struct hal_rx_desc *desc)
56{
57 return ab->hw_params.hw_ops->rx_desc_get_mesh_ctl(desc);
58}
59
60static inline
61bool ath11k_dp_rx_h_mpdu_start_seq_ctrl_valid(struct ath11k_base *ab,
62 struct hal_rx_desc *desc)
63{
64 return ab->hw_params.hw_ops->rx_desc_get_mpdu_seq_ctl_vld(desc);
65}
66
67static inline bool ath11k_dp_rx_h_mpdu_start_fc_valid(struct ath11k_base *ab,
68 struct hal_rx_desc *desc)
69{
70 return ab->hw_params.hw_ops->rx_desc_get_mpdu_fc_valid(desc);
71}
72
73static inline bool ath11k_dp_rx_h_mpdu_start_more_frags(struct ath11k_base *ab,
74 struct sk_buff *skb)
75{
76 struct ieee80211_hdr *hdr;
77
78 hdr = (struct ieee80211_hdr *)(skb->data + ab->hw_params.hal_desc_sz);
79 return ieee80211_has_morefrags(hdr->frame_control);
80}
81
82static inline u16 ath11k_dp_rx_h_mpdu_start_frag_no(struct ath11k_base *ab,
83 struct sk_buff *skb)
84{
85 struct ieee80211_hdr *hdr;
86
87 hdr = (struct ieee80211_hdr *)(skb->data + ab->hw_params.hal_desc_sz);
88 return le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_FRAG;
89}
90
91static inline u16 ath11k_dp_rx_h_mpdu_start_seq_no(struct ath11k_base *ab,
92 struct hal_rx_desc *desc)
93{
94 return ab->hw_params.hw_ops->rx_desc_get_mpdu_start_seq_no(desc);
95}
96
97static inline void *ath11k_dp_rx_get_attention(struct ath11k_base *ab,
98 struct hal_rx_desc *desc)
99{
100 return ab->hw_params.hw_ops->rx_desc_get_attention(desc);
101}
102
103static inline bool ath11k_dp_rx_h_attn_msdu_done(struct rx_attention *attn)
104{
105 return !!FIELD_GET(RX_ATTENTION_INFO2_MSDU_DONE,
106 __le32_to_cpu(attn->info2));
107}
108
109static inline bool ath11k_dp_rx_h_attn_l4_cksum_fail(struct rx_attention *attn)
110{
111 return !!FIELD_GET(RX_ATTENTION_INFO1_TCP_UDP_CKSUM_FAIL,
112 __le32_to_cpu(attn->info1));
113}
114
115static inline bool ath11k_dp_rx_h_attn_ip_cksum_fail(struct rx_attention *attn)
116{
117 return !!FIELD_GET(RX_ATTENTION_INFO1_IP_CKSUM_FAIL,
118 __le32_to_cpu(attn->info1));
119}
120
121static inline bool ath11k_dp_rx_h_attn_is_decrypted(struct rx_attention *attn)
122{
123 return (FIELD_GET(RX_ATTENTION_INFO2_DCRYPT_STATUS_CODE,
124 __le32_to_cpu(attn->info2)) ==
125 RX_DESC_DECRYPT_STATUS_CODE_OK);
126}
127
128static u32 ath11k_dp_rx_h_attn_mpdu_err(struct rx_attention *attn)
129{
130 u32 info = __le32_to_cpu(attn->info1);
131 u32 errmap = 0;
132
133 if (info & RX_ATTENTION_INFO1_FCS_ERR)
134 errmap |= DP_RX_MPDU_ERR_FCS;
135
136 if (info & RX_ATTENTION_INFO1_DECRYPT_ERR)
137 errmap |= DP_RX_MPDU_ERR_DECRYPT;
138
139 if (info & RX_ATTENTION_INFO1_TKIP_MIC_ERR)
140 errmap |= DP_RX_MPDU_ERR_TKIP_MIC;
141
142 if (info & RX_ATTENTION_INFO1_A_MSDU_ERROR)
143 errmap |= DP_RX_MPDU_ERR_AMSDU_ERR;
144
145 if (info & RX_ATTENTION_INFO1_OVERFLOW_ERR)
146 errmap |= DP_RX_MPDU_ERR_OVERFLOW;
147
148 if (info & RX_ATTENTION_INFO1_MSDU_LEN_ERR)
149 errmap |= DP_RX_MPDU_ERR_MSDU_LEN;
150
151 if (info & RX_ATTENTION_INFO1_MPDU_LEN_ERR)
152 errmap |= DP_RX_MPDU_ERR_MPDU_LEN;
153
154 return errmap;
155}
156
157static bool ath11k_dp_rx_h_attn_msdu_len_err(struct ath11k_base *ab,
158 struct hal_rx_desc *desc)
159{
160 struct rx_attention *rx_attention;
161 u32 errmap;
162
163 rx_attention = ath11k_dp_rx_get_attention(ab, desc);
164 errmap = ath11k_dp_rx_h_attn_mpdu_err(rx_attention);
165
166 return errmap & DP_RX_MPDU_ERR_MSDU_LEN;
167}
168
169static inline u16 ath11k_dp_rx_h_msdu_start_msdu_len(struct ath11k_base *ab,
170 struct hal_rx_desc *desc)
171{
172 return ab->hw_params.hw_ops->rx_desc_get_msdu_len(desc);
173}
174
175static inline u8 ath11k_dp_rx_h_msdu_start_sgi(struct ath11k_base *ab,
176 struct hal_rx_desc *desc)
177{
178 return ab->hw_params.hw_ops->rx_desc_get_msdu_sgi(desc);
179}
180
181static inline u8 ath11k_dp_rx_h_msdu_start_rate_mcs(struct ath11k_base *ab,
182 struct hal_rx_desc *desc)
183{
184 return ab->hw_params.hw_ops->rx_desc_get_msdu_rate_mcs(desc);
185}
186
187static inline u8 ath11k_dp_rx_h_msdu_start_rx_bw(struct ath11k_base *ab,
188 struct hal_rx_desc *desc)
189{
190 return ab->hw_params.hw_ops->rx_desc_get_msdu_rx_bw(desc);
191}
192
193static inline u32 ath11k_dp_rx_h_msdu_start_freq(struct ath11k_base *ab,
194 struct hal_rx_desc *desc)
195{
196 return ab->hw_params.hw_ops->rx_desc_get_msdu_freq(desc);
197}
198
199static inline u8 ath11k_dp_rx_h_msdu_start_pkt_type(struct ath11k_base *ab,
200 struct hal_rx_desc *desc)
201{
202 return ab->hw_params.hw_ops->rx_desc_get_msdu_pkt_type(desc);
203}
204
205static inline u8 ath11k_dp_rx_h_msdu_start_nss(struct ath11k_base *ab,
206 struct hal_rx_desc *desc)
207{
208 return hweight8(ab->hw_params.hw_ops->rx_desc_get_msdu_nss(desc));
209}
210
211static inline u8 ath11k_dp_rx_h_mpdu_start_tid(struct ath11k_base *ab,
212 struct hal_rx_desc *desc)
213{
214 return ab->hw_params.hw_ops->rx_desc_get_mpdu_tid(desc);
215}
216
217static inline u16 ath11k_dp_rx_h_mpdu_start_peer_id(struct ath11k_base *ab,
218 struct hal_rx_desc *desc)
219{
220 return ab->hw_params.hw_ops->rx_desc_get_mpdu_peer_id(desc);
221}
222
223static inline u8 ath11k_dp_rx_h_msdu_end_l3pad(struct ath11k_base *ab,
224 struct hal_rx_desc *desc)
225{
226 return ab->hw_params.hw_ops->rx_desc_get_l3_pad_bytes(desc);
227}
228
229static inline bool ath11k_dp_rx_h_msdu_end_first_msdu(struct ath11k_base *ab,
230 struct hal_rx_desc *desc)
231{
232 return ab->hw_params.hw_ops->rx_desc_get_first_msdu(desc);
233}
234
235static bool ath11k_dp_rx_h_msdu_end_last_msdu(struct ath11k_base *ab,
236 struct hal_rx_desc *desc)
237{
238 return ab->hw_params.hw_ops->rx_desc_get_last_msdu(desc);
239}
240
241static void ath11k_dp_rx_desc_end_tlv_copy(struct ath11k_base *ab,
242 struct hal_rx_desc *fdesc,
243 struct hal_rx_desc *ldesc)
244{
245 ab->hw_params.hw_ops->rx_desc_copy_attn_end_tlv(fdesc, ldesc);
246}
247
248static inline u32 ath11k_dp_rxdesc_get_mpdulen_err(struct rx_attention *attn)
249{
250 return FIELD_GET(RX_ATTENTION_INFO1_MPDU_LEN_ERR,
251 __le32_to_cpu(attn->info1));
252}
253
254static inline u8 *ath11k_dp_rxdesc_get_80211hdr(struct ath11k_base *ab,
255 struct hal_rx_desc *rx_desc)
256{
257 u8 *rx_pkt_hdr;
258
259 rx_pkt_hdr = ab->hw_params.hw_ops->rx_desc_get_msdu_payload(rx_desc);
260
261 return rx_pkt_hdr;
262}
263
264static inline bool ath11k_dp_rxdesc_mpdu_valid(struct ath11k_base *ab,
265 struct hal_rx_desc *rx_desc)
266{
267 u32 tlv_tag;
268
269 tlv_tag = ab->hw_params.hw_ops->rx_desc_get_mpdu_start_tag(rx_desc);
270
271 return tlv_tag == HAL_RX_MPDU_START;
272}
273
274static inline u32 ath11k_dp_rxdesc_get_ppduid(struct ath11k_base *ab,
275 struct hal_rx_desc *rx_desc)
276{
277 return ab->hw_params.hw_ops->rx_desc_get_mpdu_ppdu_id(rx_desc);
278}
279
280static inline void ath11k_dp_rxdesc_set_msdu_len(struct ath11k_base *ab,
281 struct hal_rx_desc *desc,
282 u16 len)
283{
284 ab->hw_params.hw_ops->rx_desc_set_msdu_len(desc, len);
285}
286
287static bool ath11k_dp_rx_h_attn_is_mcbc(struct ath11k_base *ab,
288 struct hal_rx_desc *desc)
289{
290 struct rx_attention *attn = ath11k_dp_rx_get_attention(ab, desc);
291
292 return ath11k_dp_rx_h_msdu_end_first_msdu(ab, desc) &&
293 (!!FIELD_GET(RX_ATTENTION_INFO1_MCAST_BCAST,
294 __le32_to_cpu(attn->info1)));
295}
296
297static bool ath11k_dp_rxdesc_mac_addr2_valid(struct ath11k_base *ab,
298 struct hal_rx_desc *desc)
299{
300 return ab->hw_params.hw_ops->rx_desc_mac_addr2_valid(desc);
301}
302
303static u8 *ath11k_dp_rxdesc_mpdu_start_addr2(struct ath11k_base *ab,
304 struct hal_rx_desc *desc)
305{
306 return ab->hw_params.hw_ops->rx_desc_mpdu_start_addr2(desc);
307}
308
309static void ath11k_dp_service_mon_ring(struct timer_list *t)
310{
311 struct ath11k_base *ab = from_timer(ab, t, mon_reap_timer);
312 int i;
313
314 for (i = 0; i < ab->hw_params.num_rxmda_per_pdev; i++)
315 ath11k_dp_rx_process_mon_rings(ab, i, NULL, DP_MON_SERVICE_BUDGET);
316
317 mod_timer(&ab->mon_reap_timer, jiffies +
318 msecs_to_jiffies(ATH11K_MON_TIMER_INTERVAL));
319}
320
321static int ath11k_dp_purge_mon_ring(struct ath11k_base *ab)
322{
323 int i, reaped = 0;
324 unsigned long timeout = jiffies + msecs_to_jiffies(DP_MON_PURGE_TIMEOUT_MS);
325
326 do {
327 for (i = 0; i < ab->hw_params.num_rxmda_per_pdev; i++)
328 reaped += ath11k_dp_rx_process_mon_rings(ab, i,
329 NULL,
330 DP_MON_SERVICE_BUDGET);
331
332 /* nothing more to reap */
333 if (reaped < DP_MON_SERVICE_BUDGET)
334 return 0;
335
336 } while (time_before(jiffies, timeout));
337
338 ath11k_warn(ab, "dp mon ring purge timeout");
339
340 return -ETIMEDOUT;
341}
342
343/* Returns number of Rx buffers replenished */
344int ath11k_dp_rxbufs_replenish(struct ath11k_base *ab, int mac_id,
345 struct dp_rxdma_ring *rx_ring,
346 int req_entries,
347 enum hal_rx_buf_return_buf_manager mgr)
348{
349 struct hal_srng *srng;
350 u32 *desc;
351 struct sk_buff *skb;
352 int num_free;
353 int num_remain;
354 int buf_id;
355 u32 cookie;
356 dma_addr_t paddr;
357
358 req_entries = min(req_entries, rx_ring->bufs_max);
359
360 srng = &ab->hal.srng_list[rx_ring->refill_buf_ring.ring_id];
361
362 spin_lock_bh(&srng->lock);
363
364 ath11k_hal_srng_access_begin(ab, srng);
365
366 num_free = ath11k_hal_srng_src_num_free(ab, srng, true);
367 if (!req_entries && (num_free > (rx_ring->bufs_max * 3) / 4))
368 req_entries = num_free;
369
370 req_entries = min(num_free, req_entries);
371 num_remain = req_entries;
372
373 while (num_remain > 0) {
374 skb = dev_alloc_skb(DP_RX_BUFFER_SIZE +
375 DP_RX_BUFFER_ALIGN_SIZE);
376 if (!skb)
377 break;
378
379 if (!IS_ALIGNED((unsigned long)skb->data,
380 DP_RX_BUFFER_ALIGN_SIZE)) {
381 skb_pull(skb,
382 PTR_ALIGN(skb->data, DP_RX_BUFFER_ALIGN_SIZE) -
383 skb->data);
384 }
385
386 paddr = dma_map_single(ab->dev, skb->data,
387 skb->len + skb_tailroom(skb),
388 DMA_FROM_DEVICE);
389 if (dma_mapping_error(ab->dev, paddr))
390 goto fail_free_skb;
391
392 spin_lock_bh(&rx_ring->idr_lock);
393 buf_id = idr_alloc(&rx_ring->bufs_idr, skb, 1,
394 (rx_ring->bufs_max * 3) + 1, GFP_ATOMIC);
395 spin_unlock_bh(&rx_ring->idr_lock);
396 if (buf_id <= 0)
397 goto fail_dma_unmap;
398
399 desc = ath11k_hal_srng_src_get_next_entry(ab, srng);
400 if (!desc)
401 goto fail_idr_remove;
402
403 ATH11K_SKB_RXCB(skb)->paddr = paddr;
404
405 cookie = FIELD_PREP(DP_RXDMA_BUF_COOKIE_PDEV_ID, mac_id) |
406 FIELD_PREP(DP_RXDMA_BUF_COOKIE_BUF_ID, buf_id);
407
408 num_remain--;
409
410 ath11k_hal_rx_buf_addr_info_set(desc, paddr, cookie, mgr);
411 }
412
413 ath11k_hal_srng_access_end(ab, srng);
414
415 spin_unlock_bh(&srng->lock);
416
417 return req_entries - num_remain;
418
419fail_idr_remove:
420 spin_lock_bh(&rx_ring->idr_lock);
421 idr_remove(&rx_ring->bufs_idr, buf_id);
422 spin_unlock_bh(&rx_ring->idr_lock);
423fail_dma_unmap:
424 dma_unmap_single(ab->dev, paddr, skb->len + skb_tailroom(skb),
425 DMA_FROM_DEVICE);
426fail_free_skb:
427 dev_kfree_skb_any(skb);
428
429 ath11k_hal_srng_access_end(ab, srng);
430
431 spin_unlock_bh(&srng->lock);
432
433 return req_entries - num_remain;
434}
435
436static int ath11k_dp_rxdma_buf_ring_free(struct ath11k *ar,
437 struct dp_rxdma_ring *rx_ring)
438{
439 struct sk_buff *skb;
440 int buf_id;
441
442 spin_lock_bh(&rx_ring->idr_lock);
443 idr_for_each_entry(&rx_ring->bufs_idr, skb, buf_id) {
444 idr_remove(&rx_ring->bufs_idr, buf_id);
445 /* TODO: Understand where internal driver does this dma_unmap
446 * of rxdma_buffer.
447 */
448 dma_unmap_single(ar->ab->dev, ATH11K_SKB_RXCB(skb)->paddr,
449 skb->len + skb_tailroom(skb), DMA_FROM_DEVICE);
450 dev_kfree_skb_any(skb);
451 }
452
453 idr_destroy(&rx_ring->bufs_idr);
454 spin_unlock_bh(&rx_ring->idr_lock);
455
456 return 0;
457}
458
459static int ath11k_dp_rxdma_pdev_buf_free(struct ath11k *ar)
460{
461 struct ath11k_pdev_dp *dp = &ar->dp;
462 struct ath11k_base *ab = ar->ab;
463 struct dp_rxdma_ring *rx_ring = &dp->rx_refill_buf_ring;
464 int i;
465
466 ath11k_dp_rxdma_buf_ring_free(ar, rx_ring);
467
468 rx_ring = &dp->rxdma_mon_buf_ring;
469 ath11k_dp_rxdma_buf_ring_free(ar, rx_ring);
470
471 for (i = 0; i < ab->hw_params.num_rxmda_per_pdev; i++) {
472 rx_ring = &dp->rx_mon_status_refill_ring[i];
473 ath11k_dp_rxdma_buf_ring_free(ar, rx_ring);
474 }
475
476 return 0;
477}
478
479static int ath11k_dp_rxdma_ring_buf_setup(struct ath11k *ar,
480 struct dp_rxdma_ring *rx_ring,
481 u32 ringtype)
482{
483 struct ath11k_pdev_dp *dp = &ar->dp;
484 int num_entries;
485
486 num_entries = rx_ring->refill_buf_ring.size /
487 ath11k_hal_srng_get_entrysize(ar->ab, ringtype);
488
489 rx_ring->bufs_max = num_entries;
490 ath11k_dp_rxbufs_replenish(ar->ab, dp->mac_id, rx_ring, num_entries,
491 ar->ab->hw_params.hal_params->rx_buf_rbm);
492 return 0;
493}
494
495static int ath11k_dp_rxdma_pdev_buf_setup(struct ath11k *ar)
496{
497 struct ath11k_pdev_dp *dp = &ar->dp;
498 struct ath11k_base *ab = ar->ab;
499 struct dp_rxdma_ring *rx_ring = &dp->rx_refill_buf_ring;
500 int i;
501
502 ath11k_dp_rxdma_ring_buf_setup(ar, rx_ring, HAL_RXDMA_BUF);
503
504 if (ar->ab->hw_params.rxdma1_enable) {
505 rx_ring = &dp->rxdma_mon_buf_ring;
506 ath11k_dp_rxdma_ring_buf_setup(ar, rx_ring, HAL_RXDMA_MONITOR_BUF);
507 }
508
509 for (i = 0; i < ab->hw_params.num_rxmda_per_pdev; i++) {
510 rx_ring = &dp->rx_mon_status_refill_ring[i];
511 ath11k_dp_rxdma_ring_buf_setup(ar, rx_ring, HAL_RXDMA_MONITOR_STATUS);
512 }
513
514 return 0;
515}
516
517static void ath11k_dp_rx_pdev_srng_free(struct ath11k *ar)
518{
519 struct ath11k_pdev_dp *dp = &ar->dp;
520 struct ath11k_base *ab = ar->ab;
521 int i;
522
523 ath11k_dp_srng_cleanup(ab, &dp->rx_refill_buf_ring.refill_buf_ring);
524
525 for (i = 0; i < ab->hw_params.num_rxmda_per_pdev; i++) {
526 if (ab->hw_params.rx_mac_buf_ring)
527 ath11k_dp_srng_cleanup(ab, &dp->rx_mac_buf_ring[i]);
528
529 ath11k_dp_srng_cleanup(ab, &dp->rxdma_err_dst_ring[i]);
530 ath11k_dp_srng_cleanup(ab,
531 &dp->rx_mon_status_refill_ring[i].refill_buf_ring);
532 }
533
534 ath11k_dp_srng_cleanup(ab, &dp->rxdma_mon_buf_ring.refill_buf_ring);
535}
536
537void ath11k_dp_pdev_reo_cleanup(struct ath11k_base *ab)
538{
539 struct ath11k_dp *dp = &ab->dp;
540 int i;
541
542 for (i = 0; i < DP_REO_DST_RING_MAX; i++)
543 ath11k_dp_srng_cleanup(ab, &dp->reo_dst_ring[i]);
544}
545
546int ath11k_dp_pdev_reo_setup(struct ath11k_base *ab)
547{
548 struct ath11k_dp *dp = &ab->dp;
549 int ret;
550 int i;
551
552 for (i = 0; i < DP_REO_DST_RING_MAX; i++) {
553 ret = ath11k_dp_srng_setup(ab, &dp->reo_dst_ring[i],
554 HAL_REO_DST, i, 0,
555 DP_REO_DST_RING_SIZE);
556 if (ret) {
557 ath11k_warn(ab, "failed to setup reo_dst_ring\n");
558 goto err_reo_cleanup;
559 }
560 }
561
562 return 0;
563
564err_reo_cleanup:
565 ath11k_dp_pdev_reo_cleanup(ab);
566
567 return ret;
568}
569
570static int ath11k_dp_rx_pdev_srng_alloc(struct ath11k *ar)
571{
572 struct ath11k_pdev_dp *dp = &ar->dp;
573 struct ath11k_base *ab = ar->ab;
574 struct dp_srng *srng = NULL;
575 int i;
576 int ret;
577
578 ret = ath11k_dp_srng_setup(ar->ab,
579 &dp->rx_refill_buf_ring.refill_buf_ring,
580 HAL_RXDMA_BUF, 0,
581 dp->mac_id, DP_RXDMA_BUF_RING_SIZE);
582 if (ret) {
583 ath11k_warn(ar->ab, "failed to setup rx_refill_buf_ring\n");
584 return ret;
585 }
586
587 if (ar->ab->hw_params.rx_mac_buf_ring) {
588 for (i = 0; i < ab->hw_params.num_rxmda_per_pdev; i++) {
589 ret = ath11k_dp_srng_setup(ar->ab,
590 &dp->rx_mac_buf_ring[i],
591 HAL_RXDMA_BUF, 1,
592 dp->mac_id + i, 1024);
593 if (ret) {
594 ath11k_warn(ar->ab, "failed to setup rx_mac_buf_ring %d\n",
595 i);
596 return ret;
597 }
598 }
599 }
600
601 for (i = 0; i < ab->hw_params.num_rxmda_per_pdev; i++) {
602 ret = ath11k_dp_srng_setup(ar->ab, &dp->rxdma_err_dst_ring[i],
603 HAL_RXDMA_DST, 0, dp->mac_id + i,
604 DP_RXDMA_ERR_DST_RING_SIZE);
605 if (ret) {
606 ath11k_warn(ar->ab, "failed to setup rxdma_err_dst_ring %d\n", i);
607 return ret;
608 }
609 }
610
611 for (i = 0; i < ab->hw_params.num_rxmda_per_pdev; i++) {
612 srng = &dp->rx_mon_status_refill_ring[i].refill_buf_ring;
613 ret = ath11k_dp_srng_setup(ar->ab,
614 srng,
615 HAL_RXDMA_MONITOR_STATUS, 0, dp->mac_id + i,
616 DP_RXDMA_MON_STATUS_RING_SIZE);
617 if (ret) {
618 ath11k_warn(ar->ab,
619 "failed to setup rx_mon_status_refill_ring %d\n", i);
620 return ret;
621 }
622 }
623
624 /* if rxdma1_enable is false, then it doesn't need
625 * to setup rxdam_mon_buf_ring, rxdma_mon_dst_ring
626 * and rxdma_mon_desc_ring.
627 * init reap timer for QCA6390.
628 */
629 if (!ar->ab->hw_params.rxdma1_enable) {
630 //init mon status buffer reap timer
631 timer_setup(&ar->ab->mon_reap_timer,
632 ath11k_dp_service_mon_ring, 0);
633 return 0;
634 }
635
636 ret = ath11k_dp_srng_setup(ar->ab,
637 &dp->rxdma_mon_buf_ring.refill_buf_ring,
638 HAL_RXDMA_MONITOR_BUF, 0, dp->mac_id,
639 DP_RXDMA_MONITOR_BUF_RING_SIZE);
640 if (ret) {
641 ath11k_warn(ar->ab,
642 "failed to setup HAL_RXDMA_MONITOR_BUF\n");
643 return ret;
644 }
645
646 ret = ath11k_dp_srng_setup(ar->ab, &dp->rxdma_mon_dst_ring,
647 HAL_RXDMA_MONITOR_DST, 0, dp->mac_id,
648 DP_RXDMA_MONITOR_DST_RING_SIZE);
649 if (ret) {
650 ath11k_warn(ar->ab,
651 "failed to setup HAL_RXDMA_MONITOR_DST\n");
652 return ret;
653 }
654
655 ret = ath11k_dp_srng_setup(ar->ab, &dp->rxdma_mon_desc_ring,
656 HAL_RXDMA_MONITOR_DESC, 0, dp->mac_id,
657 DP_RXDMA_MONITOR_DESC_RING_SIZE);
658 if (ret) {
659 ath11k_warn(ar->ab,
660 "failed to setup HAL_RXDMA_MONITOR_DESC\n");
661 return ret;
662 }
663
664 return 0;
665}
666
667void ath11k_dp_reo_cmd_list_cleanup(struct ath11k_base *ab)
668{
669 struct ath11k_dp *dp = &ab->dp;
670 struct dp_reo_cmd *cmd, *tmp;
671 struct dp_reo_cache_flush_elem *cmd_cache, *tmp_cache;
672 struct dp_rx_tid *rx_tid;
673
674 spin_lock_bh(&dp->reo_cmd_lock);
675 list_for_each_entry_safe(cmd, tmp, &dp->reo_cmd_list, list) {
676 list_del(&cmd->list);
677 rx_tid = &cmd->data;
678 if (rx_tid->vaddr) {
679 dma_unmap_single(ab->dev, rx_tid->paddr,
680 rx_tid->size, DMA_BIDIRECTIONAL);
681 kfree(rx_tid->vaddr);
682 rx_tid->vaddr = NULL;
683 }
684 kfree(cmd);
685 }
686
687 list_for_each_entry_safe(cmd_cache, tmp_cache,
688 &dp->reo_cmd_cache_flush_list, list) {
689 list_del(&cmd_cache->list);
690 dp->reo_cmd_cache_flush_count--;
691 rx_tid = &cmd_cache->data;
692 if (rx_tid->vaddr) {
693 dma_unmap_single(ab->dev, rx_tid->paddr,
694 rx_tid->size, DMA_BIDIRECTIONAL);
695 kfree(rx_tid->vaddr);
696 rx_tid->vaddr = NULL;
697 }
698 kfree(cmd_cache);
699 }
700 spin_unlock_bh(&dp->reo_cmd_lock);
701}
702
703static void ath11k_dp_reo_cmd_free(struct ath11k_dp *dp, void *ctx,
704 enum hal_reo_cmd_status status)
705{
706 struct dp_rx_tid *rx_tid = ctx;
707
708 if (status != HAL_REO_CMD_SUCCESS)
709 ath11k_warn(dp->ab, "failed to flush rx tid hw desc, tid %d status %d\n",
710 rx_tid->tid, status);
711 if (rx_tid->vaddr) {
712 dma_unmap_single(dp->ab->dev, rx_tid->paddr, rx_tid->size,
713 DMA_BIDIRECTIONAL);
714 kfree(rx_tid->vaddr);
715 rx_tid->vaddr = NULL;
716 }
717}
718
719static void ath11k_dp_reo_cache_flush(struct ath11k_base *ab,
720 struct dp_rx_tid *rx_tid)
721{
722 struct ath11k_hal_reo_cmd cmd = {0};
723 unsigned long tot_desc_sz, desc_sz;
724 int ret;
725
726 tot_desc_sz = rx_tid->size;
727 desc_sz = ath11k_hal_reo_qdesc_size(0, HAL_DESC_REO_NON_QOS_TID);
728
729 while (tot_desc_sz > desc_sz) {
730 tot_desc_sz -= desc_sz;
731 cmd.addr_lo = lower_32_bits(rx_tid->paddr + tot_desc_sz);
732 cmd.addr_hi = upper_32_bits(rx_tid->paddr);
733 ret = ath11k_dp_tx_send_reo_cmd(ab, rx_tid,
734 HAL_REO_CMD_FLUSH_CACHE, &cmd,
735 NULL);
736 if (ret)
737 ath11k_warn(ab,
738 "failed to send HAL_REO_CMD_FLUSH_CACHE, tid %d (%d)\n",
739 rx_tid->tid, ret);
740 }
741
742 memset(&cmd, 0, sizeof(cmd));
743 cmd.addr_lo = lower_32_bits(rx_tid->paddr);
744 cmd.addr_hi = upper_32_bits(rx_tid->paddr);
745 cmd.flag |= HAL_REO_CMD_FLG_NEED_STATUS;
746 ret = ath11k_dp_tx_send_reo_cmd(ab, rx_tid,
747 HAL_REO_CMD_FLUSH_CACHE,
748 &cmd, ath11k_dp_reo_cmd_free);
749 if (ret) {
750 ath11k_err(ab, "failed to send HAL_REO_CMD_FLUSH_CACHE cmd, tid %d (%d)\n",
751 rx_tid->tid, ret);
752 dma_unmap_single(ab->dev, rx_tid->paddr, rx_tid->size,
753 DMA_BIDIRECTIONAL);
754 kfree(rx_tid->vaddr);
755 rx_tid->vaddr = NULL;
756 }
757}
758
759static void ath11k_dp_rx_tid_del_func(struct ath11k_dp *dp, void *ctx,
760 enum hal_reo_cmd_status status)
761{
762 struct ath11k_base *ab = dp->ab;
763 struct dp_rx_tid *rx_tid = ctx;
764 struct dp_reo_cache_flush_elem *elem, *tmp;
765
766 if (status == HAL_REO_CMD_DRAIN) {
767 goto free_desc;
768 } else if (status != HAL_REO_CMD_SUCCESS) {
769 /* Shouldn't happen! Cleanup in case of other failure? */
770 ath11k_warn(ab, "failed to delete rx tid %d hw descriptor %d\n",
771 rx_tid->tid, status);
772 return;
773 }
774
775 elem = kzalloc(sizeof(*elem), GFP_ATOMIC);
776 if (!elem)
777 goto free_desc;
778
779 elem->ts = jiffies;
780 memcpy(&elem->data, rx_tid, sizeof(*rx_tid));
781
782 spin_lock_bh(&dp->reo_cmd_lock);
783 list_add_tail(&elem->list, &dp->reo_cmd_cache_flush_list);
784 dp->reo_cmd_cache_flush_count++;
785
786 /* Flush and invalidate aged REO desc from HW cache */
787 list_for_each_entry_safe(elem, tmp, &dp->reo_cmd_cache_flush_list,
788 list) {
789 if (dp->reo_cmd_cache_flush_count > DP_REO_DESC_FREE_THRESHOLD ||
790 time_after(jiffies, elem->ts +
791 msecs_to_jiffies(DP_REO_DESC_FREE_TIMEOUT_MS))) {
792 list_del(&elem->list);
793 dp->reo_cmd_cache_flush_count--;
794 spin_unlock_bh(&dp->reo_cmd_lock);
795
796 ath11k_dp_reo_cache_flush(ab, &elem->data);
797 kfree(elem);
798 spin_lock_bh(&dp->reo_cmd_lock);
799 }
800 }
801 spin_unlock_bh(&dp->reo_cmd_lock);
802
803 return;
804free_desc:
805 dma_unmap_single(ab->dev, rx_tid->paddr, rx_tid->size,
806 DMA_BIDIRECTIONAL);
807 kfree(rx_tid->vaddr);
808 rx_tid->vaddr = NULL;
809}
810
811void ath11k_peer_rx_tid_delete(struct ath11k *ar,
812 struct ath11k_peer *peer, u8 tid)
813{
814 struct ath11k_hal_reo_cmd cmd = {0};
815 struct dp_rx_tid *rx_tid = &peer->rx_tid[tid];
816 int ret;
817
818 if (!rx_tid->active)
819 return;
820
821 rx_tid->active = false;
822
823 cmd.flag = HAL_REO_CMD_FLG_NEED_STATUS;
824 cmd.addr_lo = lower_32_bits(rx_tid->paddr);
825 cmd.addr_hi = upper_32_bits(rx_tid->paddr);
826 cmd.upd0 |= HAL_REO_CMD_UPD0_VLD;
827 ret = ath11k_dp_tx_send_reo_cmd(ar->ab, rx_tid,
828 HAL_REO_CMD_UPDATE_RX_QUEUE, &cmd,
829 ath11k_dp_rx_tid_del_func);
830 if (ret) {
831 if (ret != -ESHUTDOWN)
832 ath11k_err(ar->ab, "failed to send HAL_REO_CMD_UPDATE_RX_QUEUE cmd, tid %d (%d)\n",
833 tid, ret);
834 dma_unmap_single(ar->ab->dev, rx_tid->paddr, rx_tid->size,
835 DMA_BIDIRECTIONAL);
836 kfree(rx_tid->vaddr);
837 rx_tid->vaddr = NULL;
838 }
839
840 rx_tid->paddr = 0;
841 rx_tid->size = 0;
842}
843
844static int ath11k_dp_rx_link_desc_return(struct ath11k_base *ab,
845 u32 *link_desc,
846 enum hal_wbm_rel_bm_act action)
847{
848 struct ath11k_dp *dp = &ab->dp;
849 struct hal_srng *srng;
850 u32 *desc;
851 int ret = 0;
852
853 srng = &ab->hal.srng_list[dp->wbm_desc_rel_ring.ring_id];
854
855 spin_lock_bh(&srng->lock);
856
857 ath11k_hal_srng_access_begin(ab, srng);
858
859 desc = ath11k_hal_srng_src_get_next_entry(ab, srng);
860 if (!desc) {
861 ret = -ENOBUFS;
862 goto exit;
863 }
864
865 ath11k_hal_rx_msdu_link_desc_set(ab, (void *)desc, (void *)link_desc,
866 action);
867
868exit:
869 ath11k_hal_srng_access_end(ab, srng);
870
871 spin_unlock_bh(&srng->lock);
872
873 return ret;
874}
875
876static void ath11k_dp_rx_frags_cleanup(struct dp_rx_tid *rx_tid, bool rel_link_desc)
877{
878 struct ath11k_base *ab = rx_tid->ab;
879
880 lockdep_assert_held(&ab->base_lock);
881
882 if (rx_tid->dst_ring_desc) {
883 if (rel_link_desc)
884 ath11k_dp_rx_link_desc_return(ab, (u32 *)rx_tid->dst_ring_desc,
885 HAL_WBM_REL_BM_ACT_PUT_IN_IDLE);
886 kfree(rx_tid->dst_ring_desc);
887 rx_tid->dst_ring_desc = NULL;
888 }
889
890 rx_tid->cur_sn = 0;
891 rx_tid->last_frag_no = 0;
892 rx_tid->rx_frag_bitmap = 0;
893 __skb_queue_purge(&rx_tid->rx_frags);
894}
895
896void ath11k_peer_frags_flush(struct ath11k *ar, struct ath11k_peer *peer)
897{
898 struct dp_rx_tid *rx_tid;
899 int i;
900
901 lockdep_assert_held(&ar->ab->base_lock);
902
903 for (i = 0; i <= IEEE80211_NUM_TIDS; i++) {
904 rx_tid = &peer->rx_tid[i];
905
906 spin_unlock_bh(&ar->ab->base_lock);
907 del_timer_sync(&rx_tid->frag_timer);
908 spin_lock_bh(&ar->ab->base_lock);
909
910 ath11k_dp_rx_frags_cleanup(rx_tid, true);
911 }
912}
913
914void ath11k_peer_rx_tid_cleanup(struct ath11k *ar, struct ath11k_peer *peer)
915{
916 struct dp_rx_tid *rx_tid;
917 int i;
918
919 lockdep_assert_held(&ar->ab->base_lock);
920
921 for (i = 0; i <= IEEE80211_NUM_TIDS; i++) {
922 rx_tid = &peer->rx_tid[i];
923
924 ath11k_peer_rx_tid_delete(ar, peer, i);
925 ath11k_dp_rx_frags_cleanup(rx_tid, true);
926
927 spin_unlock_bh(&ar->ab->base_lock);
928 del_timer_sync(&rx_tid->frag_timer);
929 spin_lock_bh(&ar->ab->base_lock);
930 }
931}
932
933static int ath11k_peer_rx_tid_reo_update(struct ath11k *ar,
934 struct ath11k_peer *peer,
935 struct dp_rx_tid *rx_tid,
936 u32 ba_win_sz, u16 ssn,
937 bool update_ssn)
938{
939 struct ath11k_hal_reo_cmd cmd = {0};
940 int ret;
941
942 cmd.addr_lo = lower_32_bits(rx_tid->paddr);
943 cmd.addr_hi = upper_32_bits(rx_tid->paddr);
944 cmd.flag = HAL_REO_CMD_FLG_NEED_STATUS;
945 cmd.upd0 = HAL_REO_CMD_UPD0_BA_WINDOW_SIZE;
946 cmd.ba_window_size = ba_win_sz;
947
948 if (update_ssn) {
949 cmd.upd0 |= HAL_REO_CMD_UPD0_SSN;
950 cmd.upd2 = FIELD_PREP(HAL_REO_CMD_UPD2_SSN, ssn);
951 }
952
953 ret = ath11k_dp_tx_send_reo_cmd(ar->ab, rx_tid,
954 HAL_REO_CMD_UPDATE_RX_QUEUE, &cmd,
955 NULL);
956 if (ret) {
957 ath11k_warn(ar->ab, "failed to update rx tid queue, tid %d (%d)\n",
958 rx_tid->tid, ret);
959 return ret;
960 }
961
962 rx_tid->ba_win_sz = ba_win_sz;
963
964 return 0;
965}
966
967static void ath11k_dp_rx_tid_mem_free(struct ath11k_base *ab,
968 const u8 *peer_mac, int vdev_id, u8 tid)
969{
970 struct ath11k_peer *peer;
971 struct dp_rx_tid *rx_tid;
972
973 spin_lock_bh(&ab->base_lock);
974
975 peer = ath11k_peer_find(ab, vdev_id, peer_mac);
976 if (!peer) {
977 ath11k_warn(ab, "failed to find the peer to free up rx tid mem\n");
978 goto unlock_exit;
979 }
980
981 rx_tid = &peer->rx_tid[tid];
982 if (!rx_tid->active)
983 goto unlock_exit;
984
985 dma_unmap_single(ab->dev, rx_tid->paddr, rx_tid->size,
986 DMA_BIDIRECTIONAL);
987 kfree(rx_tid->vaddr);
988 rx_tid->vaddr = NULL;
989
990 rx_tid->active = false;
991
992unlock_exit:
993 spin_unlock_bh(&ab->base_lock);
994}
995
996int ath11k_peer_rx_tid_setup(struct ath11k *ar, const u8 *peer_mac, int vdev_id,
997 u8 tid, u32 ba_win_sz, u16 ssn,
998 enum hal_pn_type pn_type)
999{
1000 struct ath11k_base *ab = ar->ab;
1001 struct ath11k_peer *peer;
1002 struct dp_rx_tid *rx_tid;
1003 u32 hw_desc_sz;
1004 u32 *addr_aligned;
1005 void *vaddr;
1006 dma_addr_t paddr;
1007 int ret;
1008
1009 spin_lock_bh(&ab->base_lock);
1010
1011 peer = ath11k_peer_find(ab, vdev_id, peer_mac);
1012 if (!peer) {
1013 ath11k_warn(ab, "failed to find the peer %pM to set up rx tid\n",
1014 peer_mac);
1015 spin_unlock_bh(&ab->base_lock);
1016 return -ENOENT;
1017 }
1018
1019 rx_tid = &peer->rx_tid[tid];
1020 /* Update the tid queue if it is already setup */
1021 if (rx_tid->active) {
1022 paddr = rx_tid->paddr;
1023 ret = ath11k_peer_rx_tid_reo_update(ar, peer, rx_tid,
1024 ba_win_sz, ssn, true);
1025 spin_unlock_bh(&ab->base_lock);
1026 if (ret) {
1027 ath11k_warn(ab, "failed to update reo for peer %pM rx tid %d\n: %d",
1028 peer_mac, tid, ret);
1029 return ret;
1030 }
1031
1032 ret = ath11k_wmi_peer_rx_reorder_queue_setup(ar, vdev_id,
1033 peer_mac, paddr,
1034 tid, 1, ba_win_sz);
1035 if (ret)
1036 ath11k_warn(ab, "failed to send wmi rx reorder queue for peer %pM tid %d: %d\n",
1037 peer_mac, tid, ret);
1038 return ret;
1039 }
1040
1041 rx_tid->tid = tid;
1042
1043 rx_tid->ba_win_sz = ba_win_sz;
1044
1045 /* TODO: Optimize the memory allocation for qos tid based on
1046 * the actual BA window size in REO tid update path.
1047 */
1048 if (tid == HAL_DESC_REO_NON_QOS_TID)
1049 hw_desc_sz = ath11k_hal_reo_qdesc_size(ba_win_sz, tid);
1050 else
1051 hw_desc_sz = ath11k_hal_reo_qdesc_size(DP_BA_WIN_SZ_MAX, tid);
1052
1053 vaddr = kzalloc(hw_desc_sz + HAL_LINK_DESC_ALIGN - 1, GFP_ATOMIC);
1054 if (!vaddr) {
1055 spin_unlock_bh(&ab->base_lock);
1056 return -ENOMEM;
1057 }
1058
1059 addr_aligned = PTR_ALIGN(vaddr, HAL_LINK_DESC_ALIGN);
1060
1061 ath11k_hal_reo_qdesc_setup(addr_aligned, tid, ba_win_sz,
1062 ssn, pn_type);
1063
1064 paddr = dma_map_single(ab->dev, addr_aligned, hw_desc_sz,
1065 DMA_BIDIRECTIONAL);
1066
1067 ret = dma_mapping_error(ab->dev, paddr);
1068 if (ret) {
1069 spin_unlock_bh(&ab->base_lock);
1070 ath11k_warn(ab, "failed to setup dma map for peer %pM rx tid %d: %d\n",
1071 peer_mac, tid, ret);
1072 goto err_mem_free;
1073 }
1074
1075 rx_tid->vaddr = vaddr;
1076 rx_tid->paddr = paddr;
1077 rx_tid->size = hw_desc_sz;
1078 rx_tid->active = true;
1079
1080 spin_unlock_bh(&ab->base_lock);
1081
1082 ret = ath11k_wmi_peer_rx_reorder_queue_setup(ar, vdev_id, peer_mac,
1083 paddr, tid, 1, ba_win_sz);
1084 if (ret) {
1085 ath11k_warn(ar->ab, "failed to setup rx reorder queue for peer %pM tid %d: %d\n",
1086 peer_mac, tid, ret);
1087 ath11k_dp_rx_tid_mem_free(ab, peer_mac, vdev_id, tid);
1088 }
1089
1090 return ret;
1091
1092err_mem_free:
1093 kfree(rx_tid->vaddr);
1094 rx_tid->vaddr = NULL;
1095
1096 return ret;
1097}
1098
1099int ath11k_dp_rx_ampdu_start(struct ath11k *ar,
1100 struct ieee80211_ampdu_params *params)
1101{
1102 struct ath11k_base *ab = ar->ab;
1103 struct ath11k_sta *arsta = ath11k_sta_to_arsta(params->sta);
1104 int vdev_id = arsta->arvif->vdev_id;
1105 int ret;
1106
1107 ret = ath11k_peer_rx_tid_setup(ar, params->sta->addr, vdev_id,
1108 params->tid, params->buf_size,
1109 params->ssn, arsta->pn_type);
1110 if (ret)
1111 ath11k_warn(ab, "failed to setup rx tid %d\n", ret);
1112
1113 return ret;
1114}
1115
1116int ath11k_dp_rx_ampdu_stop(struct ath11k *ar,
1117 struct ieee80211_ampdu_params *params)
1118{
1119 struct ath11k_base *ab = ar->ab;
1120 struct ath11k_peer *peer;
1121 struct ath11k_sta *arsta = ath11k_sta_to_arsta(params->sta);
1122 int vdev_id = arsta->arvif->vdev_id;
1123 dma_addr_t paddr;
1124 bool active;
1125 int ret;
1126
1127 spin_lock_bh(&ab->base_lock);
1128
1129 peer = ath11k_peer_find(ab, vdev_id, params->sta->addr);
1130 if (!peer) {
1131 ath11k_warn(ab, "failed to find the peer to stop rx aggregation\n");
1132 spin_unlock_bh(&ab->base_lock);
1133 return -ENOENT;
1134 }
1135
1136 paddr = peer->rx_tid[params->tid].paddr;
1137 active = peer->rx_tid[params->tid].active;
1138
1139 if (!active) {
1140 spin_unlock_bh(&ab->base_lock);
1141 return 0;
1142 }
1143
1144 ret = ath11k_peer_rx_tid_reo_update(ar, peer, peer->rx_tid, 1, 0, false);
1145 spin_unlock_bh(&ab->base_lock);
1146 if (ret) {
1147 ath11k_warn(ab, "failed to update reo for rx tid %d: %d\n",
1148 params->tid, ret);
1149 return ret;
1150 }
1151
1152 ret = ath11k_wmi_peer_rx_reorder_queue_setup(ar, vdev_id,
1153 params->sta->addr, paddr,
1154 params->tid, 1, 1);
1155 if (ret)
1156 ath11k_warn(ab, "failed to send wmi to delete rx tid %d\n",
1157 ret);
1158
1159 return ret;
1160}
1161
1162int ath11k_dp_peer_rx_pn_replay_config(struct ath11k_vif *arvif,
1163 const u8 *peer_addr,
1164 enum set_key_cmd key_cmd,
1165 struct ieee80211_key_conf *key)
1166{
1167 struct ath11k *ar = arvif->ar;
1168 struct ath11k_base *ab = ar->ab;
1169 struct ath11k_hal_reo_cmd cmd = {0};
1170 struct ath11k_peer *peer;
1171 struct dp_rx_tid *rx_tid;
1172 u8 tid;
1173 int ret = 0;
1174
1175 /* NOTE: Enable PN/TSC replay check offload only for unicast frames.
1176 * We use mac80211 PN/TSC replay check functionality for bcast/mcast
1177 * for now.
1178 */
1179 if (!(key->flags & IEEE80211_KEY_FLAG_PAIRWISE))
1180 return 0;
1181
1182 cmd.flag |= HAL_REO_CMD_FLG_NEED_STATUS;
1183 cmd.upd0 |= HAL_REO_CMD_UPD0_PN |
1184 HAL_REO_CMD_UPD0_PN_SIZE |
1185 HAL_REO_CMD_UPD0_PN_VALID |
1186 HAL_REO_CMD_UPD0_PN_CHECK |
1187 HAL_REO_CMD_UPD0_SVLD;
1188
1189 switch (key->cipher) {
1190 case WLAN_CIPHER_SUITE_TKIP:
1191 case WLAN_CIPHER_SUITE_CCMP:
1192 case WLAN_CIPHER_SUITE_CCMP_256:
1193 case WLAN_CIPHER_SUITE_GCMP:
1194 case WLAN_CIPHER_SUITE_GCMP_256:
1195 if (key_cmd == SET_KEY) {
1196 cmd.upd1 |= HAL_REO_CMD_UPD1_PN_CHECK;
1197 cmd.pn_size = 48;
1198 }
1199 break;
1200 default:
1201 break;
1202 }
1203
1204 spin_lock_bh(&ab->base_lock);
1205
1206 peer = ath11k_peer_find(ab, arvif->vdev_id, peer_addr);
1207 if (!peer) {
1208 ath11k_warn(ab, "failed to find the peer to configure pn replay detection\n");
1209 spin_unlock_bh(&ab->base_lock);
1210 return -ENOENT;
1211 }
1212
1213 for (tid = 0; tid <= IEEE80211_NUM_TIDS; tid++) {
1214 rx_tid = &peer->rx_tid[tid];
1215 if (!rx_tid->active)
1216 continue;
1217 cmd.addr_lo = lower_32_bits(rx_tid->paddr);
1218 cmd.addr_hi = upper_32_bits(rx_tid->paddr);
1219 ret = ath11k_dp_tx_send_reo_cmd(ab, rx_tid,
1220 HAL_REO_CMD_UPDATE_RX_QUEUE,
1221 &cmd, NULL);
1222 if (ret) {
1223 ath11k_warn(ab, "failed to configure rx tid %d queue for pn replay detection %d\n",
1224 tid, ret);
1225 break;
1226 }
1227 }
1228
1229 spin_unlock_bh(&ab->base_lock);
1230
1231 return ret;
1232}
1233
1234static inline int ath11k_get_ppdu_user_index(struct htt_ppdu_stats *ppdu_stats,
1235 u16 peer_id)
1236{
1237 int i;
1238
1239 for (i = 0; i < HTT_PPDU_STATS_MAX_USERS - 1; i++) {
1240 if (ppdu_stats->user_stats[i].is_valid_peer_id) {
1241 if (peer_id == ppdu_stats->user_stats[i].peer_id)
1242 return i;
1243 } else {
1244 return i;
1245 }
1246 }
1247
1248 return -EINVAL;
1249}
1250
1251static int ath11k_htt_tlv_ppdu_stats_parse(struct ath11k_base *ab,
1252 u16 tag, u16 len, const void *ptr,
1253 void *data)
1254{
1255 struct htt_ppdu_stats_info *ppdu_info;
1256 struct htt_ppdu_user_stats *user_stats;
1257 int cur_user;
1258 u16 peer_id;
1259
1260 ppdu_info = data;
1261
1262 switch (tag) {
1263 case HTT_PPDU_STATS_TAG_COMMON:
1264 if (len < sizeof(struct htt_ppdu_stats_common)) {
1265 ath11k_warn(ab, "Invalid len %d for the tag 0x%x\n",
1266 len, tag);
1267 return -EINVAL;
1268 }
1269 memcpy((void *)&ppdu_info->ppdu_stats.common, ptr,
1270 sizeof(struct htt_ppdu_stats_common));
1271 break;
1272 case HTT_PPDU_STATS_TAG_USR_RATE:
1273 if (len < sizeof(struct htt_ppdu_stats_user_rate)) {
1274 ath11k_warn(ab, "Invalid len %d for the tag 0x%x\n",
1275 len, tag);
1276 return -EINVAL;
1277 }
1278
1279 peer_id = ((struct htt_ppdu_stats_user_rate *)ptr)->sw_peer_id;
1280 cur_user = ath11k_get_ppdu_user_index(&ppdu_info->ppdu_stats,
1281 peer_id);
1282 if (cur_user < 0)
1283 return -EINVAL;
1284 user_stats = &ppdu_info->ppdu_stats.user_stats[cur_user];
1285 user_stats->peer_id = peer_id;
1286 user_stats->is_valid_peer_id = true;
1287 memcpy((void *)&user_stats->rate, ptr,
1288 sizeof(struct htt_ppdu_stats_user_rate));
1289 user_stats->tlv_flags |= BIT(tag);
1290 break;
1291 case HTT_PPDU_STATS_TAG_USR_COMPLTN_COMMON:
1292 if (len < sizeof(struct htt_ppdu_stats_usr_cmpltn_cmn)) {
1293 ath11k_warn(ab, "Invalid len %d for the tag 0x%x\n",
1294 len, tag);
1295 return -EINVAL;
1296 }
1297
1298 peer_id = ((struct htt_ppdu_stats_usr_cmpltn_cmn *)ptr)->sw_peer_id;
1299 cur_user = ath11k_get_ppdu_user_index(&ppdu_info->ppdu_stats,
1300 peer_id);
1301 if (cur_user < 0)
1302 return -EINVAL;
1303 user_stats = &ppdu_info->ppdu_stats.user_stats[cur_user];
1304 user_stats->peer_id = peer_id;
1305 user_stats->is_valid_peer_id = true;
1306 memcpy((void *)&user_stats->cmpltn_cmn, ptr,
1307 sizeof(struct htt_ppdu_stats_usr_cmpltn_cmn));
1308 user_stats->tlv_flags |= BIT(tag);
1309 break;
1310 case HTT_PPDU_STATS_TAG_USR_COMPLTN_ACK_BA_STATUS:
1311 if (len <
1312 sizeof(struct htt_ppdu_stats_usr_cmpltn_ack_ba_status)) {
1313 ath11k_warn(ab, "Invalid len %d for the tag 0x%x\n",
1314 len, tag);
1315 return -EINVAL;
1316 }
1317
1318 peer_id =
1319 ((struct htt_ppdu_stats_usr_cmpltn_ack_ba_status *)ptr)->sw_peer_id;
1320 cur_user = ath11k_get_ppdu_user_index(&ppdu_info->ppdu_stats,
1321 peer_id);
1322 if (cur_user < 0)
1323 return -EINVAL;
1324 user_stats = &ppdu_info->ppdu_stats.user_stats[cur_user];
1325 user_stats->peer_id = peer_id;
1326 user_stats->is_valid_peer_id = true;
1327 memcpy((void *)&user_stats->ack_ba, ptr,
1328 sizeof(struct htt_ppdu_stats_usr_cmpltn_ack_ba_status));
1329 user_stats->tlv_flags |= BIT(tag);
1330 break;
1331 }
1332 return 0;
1333}
1334
1335int ath11k_dp_htt_tlv_iter(struct ath11k_base *ab, const void *ptr, size_t len,
1336 int (*iter)(struct ath11k_base *ar, u16 tag, u16 len,
1337 const void *ptr, void *data),
1338 void *data)
1339{
1340 const struct htt_tlv *tlv;
1341 const void *begin = ptr;
1342 u16 tlv_tag, tlv_len;
1343 int ret = -EINVAL;
1344
1345 while (len > 0) {
1346 if (len < sizeof(*tlv)) {
1347 ath11k_err(ab, "htt tlv parse failure at byte %zd (%zu bytes left, %zu expected)\n",
1348 ptr - begin, len, sizeof(*tlv));
1349 return -EINVAL;
1350 }
1351 tlv = (struct htt_tlv *)ptr;
1352 tlv_tag = FIELD_GET(HTT_TLV_TAG, tlv->header);
1353 tlv_len = FIELD_GET(HTT_TLV_LEN, tlv->header);
1354 ptr += sizeof(*tlv);
1355 len -= sizeof(*tlv);
1356
1357 if (tlv_len > len) {
1358 ath11k_err(ab, "htt tlv parse failure of tag %u at byte %zd (%zu bytes left, %u expected)\n",
1359 tlv_tag, ptr - begin, len, tlv_len);
1360 return -EINVAL;
1361 }
1362 ret = iter(ab, tlv_tag, tlv_len, ptr, data);
1363 if (ret == -ENOMEM)
1364 return ret;
1365
1366 ptr += tlv_len;
1367 len -= tlv_len;
1368 }
1369 return 0;
1370}
1371
1372static void
1373ath11k_update_per_peer_tx_stats(struct ath11k *ar,
1374 struct htt_ppdu_stats *ppdu_stats, u8 user)
1375{
1376 struct ath11k_base *ab = ar->ab;
1377 struct ath11k_peer *peer;
1378 struct ieee80211_sta *sta;
1379 struct ath11k_sta *arsta;
1380 struct htt_ppdu_stats_user_rate *user_rate;
1381 struct ath11k_per_peer_tx_stats *peer_stats = &ar->peer_tx_stats;
1382 struct htt_ppdu_user_stats *usr_stats = &ppdu_stats->user_stats[user];
1383 struct htt_ppdu_stats_common *common = &ppdu_stats->common;
1384 int ret;
1385 u8 flags, mcs, nss, bw, sgi, dcm, rate_idx = 0;
1386 u32 succ_bytes = 0;
1387 u16 rate = 0, succ_pkts = 0;
1388 u32 tx_duration = 0;
1389 u8 tid = HTT_PPDU_STATS_NON_QOS_TID;
1390 bool is_ampdu = false;
1391
1392 if (!(usr_stats->tlv_flags & BIT(HTT_PPDU_STATS_TAG_USR_RATE)))
1393 return;
1394
1395 if (usr_stats->tlv_flags & BIT(HTT_PPDU_STATS_TAG_USR_COMPLTN_COMMON))
1396 is_ampdu =
1397 HTT_USR_CMPLTN_IS_AMPDU(usr_stats->cmpltn_cmn.flags);
1398
1399 if (usr_stats->tlv_flags &
1400 BIT(HTT_PPDU_STATS_TAG_USR_COMPLTN_ACK_BA_STATUS)) {
1401 succ_bytes = usr_stats->ack_ba.success_bytes;
1402 succ_pkts = FIELD_GET(HTT_PPDU_STATS_ACK_BA_INFO_NUM_MSDU_M,
1403 usr_stats->ack_ba.info);
1404 tid = FIELD_GET(HTT_PPDU_STATS_ACK_BA_INFO_TID_NUM,
1405 usr_stats->ack_ba.info);
1406 }
1407
1408 if (common->fes_duration_us)
1409 tx_duration = common->fes_duration_us;
1410
1411 user_rate = &usr_stats->rate;
1412 flags = HTT_USR_RATE_PREAMBLE(user_rate->rate_flags);
1413 bw = HTT_USR_RATE_BW(user_rate->rate_flags) - 2;
1414 nss = HTT_USR_RATE_NSS(user_rate->rate_flags) + 1;
1415 mcs = HTT_USR_RATE_MCS(user_rate->rate_flags);
1416 sgi = HTT_USR_RATE_GI(user_rate->rate_flags);
1417 dcm = HTT_USR_RATE_DCM(user_rate->rate_flags);
1418
1419 /* Note: If host configured fixed rates and in some other special
1420 * cases, the broadcast/management frames are sent in different rates.
1421 * Firmware rate's control to be skipped for this?
1422 */
1423
1424 if (flags == WMI_RATE_PREAMBLE_HE && mcs > ATH11K_HE_MCS_MAX) {
1425 ath11k_warn(ab, "Invalid HE mcs %d peer stats", mcs);
1426 return;
1427 }
1428
1429 if (flags == WMI_RATE_PREAMBLE_VHT && mcs > ATH11K_VHT_MCS_MAX) {
1430 ath11k_warn(ab, "Invalid VHT mcs %d peer stats", mcs);
1431 return;
1432 }
1433
1434 if (flags == WMI_RATE_PREAMBLE_HT && (mcs > ATH11K_HT_MCS_MAX || nss < 1)) {
1435 ath11k_warn(ab, "Invalid HT mcs %d nss %d peer stats",
1436 mcs, nss);
1437 return;
1438 }
1439
1440 if (flags == WMI_RATE_PREAMBLE_CCK || flags == WMI_RATE_PREAMBLE_OFDM) {
1441 ret = ath11k_mac_hw_ratecode_to_legacy_rate(mcs,
1442 flags,
1443 &rate_idx,
1444 &rate);
1445 if (ret < 0)
1446 return;
1447 }
1448
1449 rcu_read_lock();
1450 spin_lock_bh(&ab->base_lock);
1451 peer = ath11k_peer_find_by_id(ab, usr_stats->peer_id);
1452
1453 if (!peer || !peer->sta) {
1454 spin_unlock_bh(&ab->base_lock);
1455 rcu_read_unlock();
1456 return;
1457 }
1458
1459 sta = peer->sta;
1460 arsta = ath11k_sta_to_arsta(sta);
1461
1462 memset(&arsta->txrate, 0, sizeof(arsta->txrate));
1463
1464 switch (flags) {
1465 case WMI_RATE_PREAMBLE_OFDM:
1466 arsta->txrate.legacy = rate;
1467 break;
1468 case WMI_RATE_PREAMBLE_CCK:
1469 arsta->txrate.legacy = rate;
1470 break;
1471 case WMI_RATE_PREAMBLE_HT:
1472 arsta->txrate.mcs = mcs + 8 * (nss - 1);
1473 arsta->txrate.flags = RATE_INFO_FLAGS_MCS;
1474 if (sgi)
1475 arsta->txrate.flags |= RATE_INFO_FLAGS_SHORT_GI;
1476 break;
1477 case WMI_RATE_PREAMBLE_VHT:
1478 arsta->txrate.mcs = mcs;
1479 arsta->txrate.flags = RATE_INFO_FLAGS_VHT_MCS;
1480 if (sgi)
1481 arsta->txrate.flags |= RATE_INFO_FLAGS_SHORT_GI;
1482 break;
1483 case WMI_RATE_PREAMBLE_HE:
1484 arsta->txrate.mcs = mcs;
1485 arsta->txrate.flags = RATE_INFO_FLAGS_HE_MCS;
1486 arsta->txrate.he_dcm = dcm;
1487 arsta->txrate.he_gi = ath11k_mac_he_gi_to_nl80211_he_gi(sgi);
1488 arsta->txrate.he_ru_alloc = ath11k_mac_phy_he_ru_to_nl80211_he_ru_alloc
1489 ((user_rate->ru_end -
1490 user_rate->ru_start) + 1);
1491 break;
1492 }
1493
1494 arsta->txrate.nss = nss;
1495
1496 arsta->txrate.bw = ath11k_mac_bw_to_mac80211_bw(bw);
1497 arsta->tx_duration += tx_duration;
1498 memcpy(&arsta->last_txrate, &arsta->txrate, sizeof(struct rate_info));
1499
1500 /* PPDU stats reported for mgmt packet doesn't have valid tx bytes.
1501 * So skip peer stats update for mgmt packets.
1502 */
1503 if (tid < HTT_PPDU_STATS_NON_QOS_TID) {
1504 memset(peer_stats, 0, sizeof(*peer_stats));
1505 peer_stats->succ_pkts = succ_pkts;
1506 peer_stats->succ_bytes = succ_bytes;
1507 peer_stats->is_ampdu = is_ampdu;
1508 peer_stats->duration = tx_duration;
1509 peer_stats->ba_fails =
1510 HTT_USR_CMPLTN_LONG_RETRY(usr_stats->cmpltn_cmn.flags) +
1511 HTT_USR_CMPLTN_SHORT_RETRY(usr_stats->cmpltn_cmn.flags);
1512
1513 if (ath11k_debugfs_is_extd_tx_stats_enabled(ar))
1514 ath11k_debugfs_sta_add_tx_stats(arsta, peer_stats, rate_idx);
1515 }
1516
1517 spin_unlock_bh(&ab->base_lock);
1518 rcu_read_unlock();
1519}
1520
1521static void ath11k_htt_update_ppdu_stats(struct ath11k *ar,
1522 struct htt_ppdu_stats *ppdu_stats)
1523{
1524 u8 user;
1525
1526 for (user = 0; user < HTT_PPDU_STATS_MAX_USERS - 1; user++)
1527 ath11k_update_per_peer_tx_stats(ar, ppdu_stats, user);
1528}
1529
1530static
1531struct htt_ppdu_stats_info *ath11k_dp_htt_get_ppdu_desc(struct ath11k *ar,
1532 u32 ppdu_id)
1533{
1534 struct htt_ppdu_stats_info *ppdu_info;
1535
1536 lockdep_assert_held(&ar->data_lock);
1537
1538 if (!list_empty(&ar->ppdu_stats_info)) {
1539 list_for_each_entry(ppdu_info, &ar->ppdu_stats_info, list) {
1540 if (ppdu_info->ppdu_id == ppdu_id)
1541 return ppdu_info;
1542 }
1543
1544 if (ar->ppdu_stat_list_depth > HTT_PPDU_DESC_MAX_DEPTH) {
1545 ppdu_info = list_first_entry(&ar->ppdu_stats_info,
1546 typeof(*ppdu_info), list);
1547 list_del(&ppdu_info->list);
1548 ar->ppdu_stat_list_depth--;
1549 ath11k_htt_update_ppdu_stats(ar, &ppdu_info->ppdu_stats);
1550 kfree(ppdu_info);
1551 }
1552 }
1553
1554 ppdu_info = kzalloc(sizeof(*ppdu_info), GFP_ATOMIC);
1555 if (!ppdu_info)
1556 return NULL;
1557
1558 list_add_tail(&ppdu_info->list, &ar->ppdu_stats_info);
1559 ar->ppdu_stat_list_depth++;
1560
1561 return ppdu_info;
1562}
1563
1564static int ath11k_htt_pull_ppdu_stats(struct ath11k_base *ab,
1565 struct sk_buff *skb)
1566{
1567 struct ath11k_htt_ppdu_stats_msg *msg;
1568 struct htt_ppdu_stats_info *ppdu_info;
1569 struct ath11k *ar;
1570 int ret;
1571 u8 pdev_id;
1572 u32 ppdu_id, len;
1573
1574 msg = (struct ath11k_htt_ppdu_stats_msg *)skb->data;
1575 len = FIELD_GET(HTT_T2H_PPDU_STATS_INFO_PAYLOAD_SIZE, msg->info);
1576 pdev_id = FIELD_GET(HTT_T2H_PPDU_STATS_INFO_PDEV_ID, msg->info);
1577 ppdu_id = msg->ppdu_id;
1578
1579 rcu_read_lock();
1580 ar = ath11k_mac_get_ar_by_pdev_id(ab, pdev_id);
1581 if (!ar) {
1582 ret = -EINVAL;
1583 goto out;
1584 }
1585
1586 if (ath11k_debugfs_is_pktlog_lite_mode_enabled(ar))
1587 trace_ath11k_htt_ppdu_stats(ar, skb->data, len);
1588
1589 spin_lock_bh(&ar->data_lock);
1590 ppdu_info = ath11k_dp_htt_get_ppdu_desc(ar, ppdu_id);
1591 if (!ppdu_info) {
1592 ret = -EINVAL;
1593 goto out_unlock_data;
1594 }
1595
1596 ppdu_info->ppdu_id = ppdu_id;
1597 ret = ath11k_dp_htt_tlv_iter(ab, msg->data, len,
1598 ath11k_htt_tlv_ppdu_stats_parse,
1599 (void *)ppdu_info);
1600 if (ret) {
1601 ath11k_warn(ab, "Failed to parse tlv %d\n", ret);
1602 goto out_unlock_data;
1603 }
1604
1605out_unlock_data:
1606 spin_unlock_bh(&ar->data_lock);
1607
1608out:
1609 rcu_read_unlock();
1610
1611 return ret;
1612}
1613
1614static void ath11k_htt_pktlog(struct ath11k_base *ab, struct sk_buff *skb)
1615{
1616 struct htt_pktlog_msg *data = (struct htt_pktlog_msg *)skb->data;
1617 struct ath_pktlog_hdr *hdr = (struct ath_pktlog_hdr *)data;
1618 struct ath11k *ar;
1619 u8 pdev_id;
1620
1621 pdev_id = FIELD_GET(HTT_T2H_PPDU_STATS_INFO_PDEV_ID, data->hdr);
1622
1623 rcu_read_lock();
1624
1625 ar = ath11k_mac_get_ar_by_pdev_id(ab, pdev_id);
1626 if (!ar) {
1627 ath11k_warn(ab, "invalid pdev id %d on htt pktlog\n", pdev_id);
1628 goto out;
1629 }
1630
1631 trace_ath11k_htt_pktlog(ar, data->payload, hdr->size,
1632 ar->ab->pktlog_defs_checksum);
1633
1634out:
1635 rcu_read_unlock();
1636}
1637
1638static void ath11k_htt_backpressure_event_handler(struct ath11k_base *ab,
1639 struct sk_buff *skb)
1640{
1641 u32 *data = (u32 *)skb->data;
1642 u8 pdev_id, ring_type, ring_id, pdev_idx;
1643 u16 hp, tp;
1644 u32 backpressure_time;
1645 struct ath11k_bp_stats *bp_stats;
1646
1647 pdev_id = FIELD_GET(HTT_BACKPRESSURE_EVENT_PDEV_ID_M, *data);
1648 ring_type = FIELD_GET(HTT_BACKPRESSURE_EVENT_RING_TYPE_M, *data);
1649 ring_id = FIELD_GET(HTT_BACKPRESSURE_EVENT_RING_ID_M, *data);
1650 ++data;
1651
1652 hp = FIELD_GET(HTT_BACKPRESSURE_EVENT_HP_M, *data);
1653 tp = FIELD_GET(HTT_BACKPRESSURE_EVENT_TP_M, *data);
1654 ++data;
1655
1656 backpressure_time = *data;
1657
1658 ath11k_dbg(ab, ATH11K_DBG_DP_HTT, "backpressure event, pdev %d, ring type %d,ring id %d, hp %d tp %d, backpressure time %d\n",
1659 pdev_id, ring_type, ring_id, hp, tp, backpressure_time);
1660
1661 if (ring_type == HTT_BACKPRESSURE_UMAC_RING_TYPE) {
1662 if (ring_id >= HTT_SW_UMAC_RING_IDX_MAX)
1663 return;
1664
1665 bp_stats = &ab->soc_stats.bp_stats.umac_ring_bp_stats[ring_id];
1666 } else if (ring_type == HTT_BACKPRESSURE_LMAC_RING_TYPE) {
1667 pdev_idx = DP_HW2SW_MACID(pdev_id);
1668
1669 if (ring_id >= HTT_SW_LMAC_RING_IDX_MAX || pdev_idx >= MAX_RADIOS)
1670 return;
1671
1672 bp_stats = &ab->soc_stats.bp_stats.lmac_ring_bp_stats[ring_id][pdev_idx];
1673 } else {
1674 ath11k_warn(ab, "unknown ring type received in htt bp event %d\n",
1675 ring_type);
1676 return;
1677 }
1678
1679 spin_lock_bh(&ab->base_lock);
1680 bp_stats->hp = hp;
1681 bp_stats->tp = tp;
1682 bp_stats->count++;
1683 bp_stats->jiffies = jiffies;
1684 spin_unlock_bh(&ab->base_lock);
1685}
1686
1687void ath11k_dp_htt_htc_t2h_msg_handler(struct ath11k_base *ab,
1688 struct sk_buff *skb)
1689{
1690 struct ath11k_dp *dp = &ab->dp;
1691 struct htt_resp_msg *resp = (struct htt_resp_msg *)skb->data;
1692 enum htt_t2h_msg_type type = FIELD_GET(HTT_T2H_MSG_TYPE, *(u32 *)resp);
1693 u16 peer_id;
1694 u8 vdev_id;
1695 u8 mac_addr[ETH_ALEN];
1696 u16 peer_mac_h16;
1697 u16 ast_hash;
1698 u16 hw_peer_id;
1699
1700 ath11k_dbg(ab, ATH11K_DBG_DP_HTT, "dp_htt rx msg type :0x%0x\n", type);
1701
1702 switch (type) {
1703 case HTT_T2H_MSG_TYPE_VERSION_CONF:
1704 dp->htt_tgt_ver_major = FIELD_GET(HTT_T2H_VERSION_CONF_MAJOR,
1705 resp->version_msg.version);
1706 dp->htt_tgt_ver_minor = FIELD_GET(HTT_T2H_VERSION_CONF_MINOR,
1707 resp->version_msg.version);
1708 complete(&dp->htt_tgt_version_received);
1709 break;
1710 case HTT_T2H_MSG_TYPE_PEER_MAP:
1711 vdev_id = FIELD_GET(HTT_T2H_PEER_MAP_INFO_VDEV_ID,
1712 resp->peer_map_ev.info);
1713 peer_id = FIELD_GET(HTT_T2H_PEER_MAP_INFO_PEER_ID,
1714 resp->peer_map_ev.info);
1715 peer_mac_h16 = FIELD_GET(HTT_T2H_PEER_MAP_INFO1_MAC_ADDR_H16,
1716 resp->peer_map_ev.info1);
1717 ath11k_dp_get_mac_addr(resp->peer_map_ev.mac_addr_l32,
1718 peer_mac_h16, mac_addr);
1719 ath11k_peer_map_event(ab, vdev_id, peer_id, mac_addr, 0, 0);
1720 break;
1721 case HTT_T2H_MSG_TYPE_PEER_MAP2:
1722 vdev_id = FIELD_GET(HTT_T2H_PEER_MAP_INFO_VDEV_ID,
1723 resp->peer_map_ev.info);
1724 peer_id = FIELD_GET(HTT_T2H_PEER_MAP_INFO_PEER_ID,
1725 resp->peer_map_ev.info);
1726 peer_mac_h16 = FIELD_GET(HTT_T2H_PEER_MAP_INFO1_MAC_ADDR_H16,
1727 resp->peer_map_ev.info1);
1728 ath11k_dp_get_mac_addr(resp->peer_map_ev.mac_addr_l32,
1729 peer_mac_h16, mac_addr);
1730 ast_hash = FIELD_GET(HTT_T2H_PEER_MAP_INFO2_AST_HASH_VAL,
1731 resp->peer_map_ev.info2);
1732 hw_peer_id = FIELD_GET(HTT_T2H_PEER_MAP_INFO1_HW_PEER_ID,
1733 resp->peer_map_ev.info1);
1734 ath11k_peer_map_event(ab, vdev_id, peer_id, mac_addr, ast_hash,
1735 hw_peer_id);
1736 break;
1737 case HTT_T2H_MSG_TYPE_PEER_UNMAP:
1738 case HTT_T2H_MSG_TYPE_PEER_UNMAP2:
1739 peer_id = FIELD_GET(HTT_T2H_PEER_UNMAP_INFO_PEER_ID,
1740 resp->peer_unmap_ev.info);
1741 ath11k_peer_unmap_event(ab, peer_id);
1742 break;
1743 case HTT_T2H_MSG_TYPE_PPDU_STATS_IND:
1744 ath11k_htt_pull_ppdu_stats(ab, skb);
1745 break;
1746 case HTT_T2H_MSG_TYPE_EXT_STATS_CONF:
1747 ath11k_debugfs_htt_ext_stats_handler(ab, skb);
1748 break;
1749 case HTT_T2H_MSG_TYPE_PKTLOG:
1750 ath11k_htt_pktlog(ab, skb);
1751 break;
1752 case HTT_T2H_MSG_TYPE_BKPRESSURE_EVENT_IND:
1753 ath11k_htt_backpressure_event_handler(ab, skb);
1754 break;
1755 default:
1756 ath11k_warn(ab, "htt event %d not handled\n", type);
1757 break;
1758 }
1759
1760 dev_kfree_skb_any(skb);
1761}
1762
1763static int ath11k_dp_rx_msdu_coalesce(struct ath11k *ar,
1764 struct sk_buff_head *msdu_list,
1765 struct sk_buff *first, struct sk_buff *last,
1766 u8 l3pad_bytes, int msdu_len)
1767{
1768 struct ath11k_base *ab = ar->ab;
1769 struct sk_buff *skb;
1770 struct ath11k_skb_rxcb *rxcb = ATH11K_SKB_RXCB(first);
1771 int buf_first_hdr_len, buf_first_len;
1772 struct hal_rx_desc *ldesc;
1773 int space_extra, rem_len, buf_len;
1774 u32 hal_rx_desc_sz = ar->ab->hw_params.hal_desc_sz;
1775
1776 /* As the msdu is spread across multiple rx buffers,
1777 * find the offset to the start of msdu for computing
1778 * the length of the msdu in the first buffer.
1779 */
1780 buf_first_hdr_len = hal_rx_desc_sz + l3pad_bytes;
1781 buf_first_len = DP_RX_BUFFER_SIZE - buf_first_hdr_len;
1782
1783 if (WARN_ON_ONCE(msdu_len <= buf_first_len)) {
1784 skb_put(first, buf_first_hdr_len + msdu_len);
1785 skb_pull(first, buf_first_hdr_len);
1786 return 0;
1787 }
1788
1789 ldesc = (struct hal_rx_desc *)last->data;
1790 rxcb->is_first_msdu = ath11k_dp_rx_h_msdu_end_first_msdu(ab, ldesc);
1791 rxcb->is_last_msdu = ath11k_dp_rx_h_msdu_end_last_msdu(ab, ldesc);
1792
1793 /* MSDU spans over multiple buffers because the length of the MSDU
1794 * exceeds DP_RX_BUFFER_SIZE - HAL_RX_DESC_SIZE. So assume the data
1795 * in the first buf is of length DP_RX_BUFFER_SIZE - HAL_RX_DESC_SIZE.
1796 */
1797 skb_put(first, DP_RX_BUFFER_SIZE);
1798 skb_pull(first, buf_first_hdr_len);
1799
1800 /* When an MSDU spread over multiple buffers attention, MSDU_END and
1801 * MPDU_END tlvs are valid only in the last buffer. Copy those tlvs.
1802 */
1803 ath11k_dp_rx_desc_end_tlv_copy(ab, rxcb->rx_desc, ldesc);
1804
1805 space_extra = msdu_len - (buf_first_len + skb_tailroom(first));
1806 if (space_extra > 0 &&
1807 (pskb_expand_head(first, 0, space_extra, GFP_ATOMIC) < 0)) {
1808 /* Free up all buffers of the MSDU */
1809 while ((skb = __skb_dequeue(msdu_list)) != NULL) {
1810 rxcb = ATH11K_SKB_RXCB(skb);
1811 if (!rxcb->is_continuation) {
1812 dev_kfree_skb_any(skb);
1813 break;
1814 }
1815 dev_kfree_skb_any(skb);
1816 }
1817 return -ENOMEM;
1818 }
1819
1820 rem_len = msdu_len - buf_first_len;
1821 while ((skb = __skb_dequeue(msdu_list)) != NULL && rem_len > 0) {
1822 rxcb = ATH11K_SKB_RXCB(skb);
1823 if (rxcb->is_continuation)
1824 buf_len = DP_RX_BUFFER_SIZE - hal_rx_desc_sz;
1825 else
1826 buf_len = rem_len;
1827
1828 if (buf_len > (DP_RX_BUFFER_SIZE - hal_rx_desc_sz)) {
1829 WARN_ON_ONCE(1);
1830 dev_kfree_skb_any(skb);
1831 return -EINVAL;
1832 }
1833
1834 skb_put(skb, buf_len + hal_rx_desc_sz);
1835 skb_pull(skb, hal_rx_desc_sz);
1836 skb_copy_from_linear_data(skb, skb_put(first, buf_len),
1837 buf_len);
1838 dev_kfree_skb_any(skb);
1839
1840 rem_len -= buf_len;
1841 if (!rxcb->is_continuation)
1842 break;
1843 }
1844
1845 return 0;
1846}
1847
1848static struct sk_buff *ath11k_dp_rx_get_msdu_last_buf(struct sk_buff_head *msdu_list,
1849 struct sk_buff *first)
1850{
1851 struct sk_buff *skb;
1852 struct ath11k_skb_rxcb *rxcb = ATH11K_SKB_RXCB(first);
1853
1854 if (!rxcb->is_continuation)
1855 return first;
1856
1857 skb_queue_walk(msdu_list, skb) {
1858 rxcb = ATH11K_SKB_RXCB(skb);
1859 if (!rxcb->is_continuation)
1860 return skb;
1861 }
1862
1863 return NULL;
1864}
1865
1866static void ath11k_dp_rx_h_csum_offload(struct ath11k *ar, struct sk_buff *msdu)
1867{
1868 struct ath11k_skb_rxcb *rxcb = ATH11K_SKB_RXCB(msdu);
1869 struct rx_attention *rx_attention;
1870 bool ip_csum_fail, l4_csum_fail;
1871
1872 rx_attention = ath11k_dp_rx_get_attention(ar->ab, rxcb->rx_desc);
1873 ip_csum_fail = ath11k_dp_rx_h_attn_ip_cksum_fail(rx_attention);
1874 l4_csum_fail = ath11k_dp_rx_h_attn_l4_cksum_fail(rx_attention);
1875
1876 msdu->ip_summed = (ip_csum_fail || l4_csum_fail) ?
1877 CHECKSUM_NONE : CHECKSUM_UNNECESSARY;
1878}
1879
1880static int ath11k_dp_rx_crypto_mic_len(struct ath11k *ar,
1881 enum hal_encrypt_type enctype)
1882{
1883 switch (enctype) {
1884 case HAL_ENCRYPT_TYPE_OPEN:
1885 case HAL_ENCRYPT_TYPE_TKIP_NO_MIC:
1886 case HAL_ENCRYPT_TYPE_TKIP_MIC:
1887 return 0;
1888 case HAL_ENCRYPT_TYPE_CCMP_128:
1889 return IEEE80211_CCMP_MIC_LEN;
1890 case HAL_ENCRYPT_TYPE_CCMP_256:
1891 return IEEE80211_CCMP_256_MIC_LEN;
1892 case HAL_ENCRYPT_TYPE_GCMP_128:
1893 case HAL_ENCRYPT_TYPE_AES_GCMP_256:
1894 return IEEE80211_GCMP_MIC_LEN;
1895 case HAL_ENCRYPT_TYPE_WEP_40:
1896 case HAL_ENCRYPT_TYPE_WEP_104:
1897 case HAL_ENCRYPT_TYPE_WEP_128:
1898 case HAL_ENCRYPT_TYPE_WAPI_GCM_SM4:
1899 case HAL_ENCRYPT_TYPE_WAPI:
1900 break;
1901 }
1902
1903 ath11k_warn(ar->ab, "unsupported encryption type %d for mic len\n", enctype);
1904 return 0;
1905}
1906
1907static int ath11k_dp_rx_crypto_param_len(struct ath11k *ar,
1908 enum hal_encrypt_type enctype)
1909{
1910 switch (enctype) {
1911 case HAL_ENCRYPT_TYPE_OPEN:
1912 return 0;
1913 case HAL_ENCRYPT_TYPE_TKIP_NO_MIC:
1914 case HAL_ENCRYPT_TYPE_TKIP_MIC:
1915 return IEEE80211_TKIP_IV_LEN;
1916 case HAL_ENCRYPT_TYPE_CCMP_128:
1917 return IEEE80211_CCMP_HDR_LEN;
1918 case HAL_ENCRYPT_TYPE_CCMP_256:
1919 return IEEE80211_CCMP_256_HDR_LEN;
1920 case HAL_ENCRYPT_TYPE_GCMP_128:
1921 case HAL_ENCRYPT_TYPE_AES_GCMP_256:
1922 return IEEE80211_GCMP_HDR_LEN;
1923 case HAL_ENCRYPT_TYPE_WEP_40:
1924 case HAL_ENCRYPT_TYPE_WEP_104:
1925 case HAL_ENCRYPT_TYPE_WEP_128:
1926 case HAL_ENCRYPT_TYPE_WAPI_GCM_SM4:
1927 case HAL_ENCRYPT_TYPE_WAPI:
1928 break;
1929 }
1930
1931 ath11k_warn(ar->ab, "unsupported encryption type %d\n", enctype);
1932 return 0;
1933}
1934
1935static int ath11k_dp_rx_crypto_icv_len(struct ath11k *ar,
1936 enum hal_encrypt_type enctype)
1937{
1938 switch (enctype) {
1939 case HAL_ENCRYPT_TYPE_OPEN:
1940 case HAL_ENCRYPT_TYPE_CCMP_128:
1941 case HAL_ENCRYPT_TYPE_CCMP_256:
1942 case HAL_ENCRYPT_TYPE_GCMP_128:
1943 case HAL_ENCRYPT_TYPE_AES_GCMP_256:
1944 return 0;
1945 case HAL_ENCRYPT_TYPE_TKIP_NO_MIC:
1946 case HAL_ENCRYPT_TYPE_TKIP_MIC:
1947 return IEEE80211_TKIP_ICV_LEN;
1948 case HAL_ENCRYPT_TYPE_WEP_40:
1949 case HAL_ENCRYPT_TYPE_WEP_104:
1950 case HAL_ENCRYPT_TYPE_WEP_128:
1951 case HAL_ENCRYPT_TYPE_WAPI_GCM_SM4:
1952 case HAL_ENCRYPT_TYPE_WAPI:
1953 break;
1954 }
1955
1956 ath11k_warn(ar->ab, "unsupported encryption type %d\n", enctype);
1957 return 0;
1958}
1959
1960static void ath11k_dp_rx_h_undecap_nwifi(struct ath11k *ar,
1961 struct sk_buff *msdu,
1962 u8 *first_hdr,
1963 enum hal_encrypt_type enctype,
1964 struct ieee80211_rx_status *status)
1965{
1966 struct ath11k_skb_rxcb *rxcb = ATH11K_SKB_RXCB(msdu);
1967 u8 decap_hdr[DP_MAX_NWIFI_HDR_LEN];
1968 struct ieee80211_hdr *hdr;
1969 size_t hdr_len;
1970 u8 da[ETH_ALEN];
1971 u8 sa[ETH_ALEN];
1972 u16 qos_ctl = 0;
1973 u8 *qos;
1974
1975 /* copy SA & DA and pull decapped header */
1976 hdr = (struct ieee80211_hdr *)msdu->data;
1977 hdr_len = ieee80211_hdrlen(hdr->frame_control);
1978 ether_addr_copy(da, ieee80211_get_DA(hdr));
1979 ether_addr_copy(sa, ieee80211_get_SA(hdr));
1980 skb_pull(msdu, ieee80211_hdrlen(hdr->frame_control));
1981
1982 if (rxcb->is_first_msdu) {
1983 /* original 802.11 header is valid for the first msdu
1984 * hence we can reuse the same header
1985 */
1986 hdr = (struct ieee80211_hdr *)first_hdr;
1987 hdr_len = ieee80211_hdrlen(hdr->frame_control);
1988
1989 /* Each A-MSDU subframe will be reported as a separate MSDU,
1990 * so strip the A-MSDU bit from QoS Ctl.
1991 */
1992 if (ieee80211_is_data_qos(hdr->frame_control)) {
1993 qos = ieee80211_get_qos_ctl(hdr);
1994 qos[0] &= ~IEEE80211_QOS_CTL_A_MSDU_PRESENT;
1995 }
1996 } else {
1997 /* Rebuild qos header if this is a middle/last msdu */
1998 hdr->frame_control |= __cpu_to_le16(IEEE80211_STYPE_QOS_DATA);
1999
2000 /* Reset the order bit as the HT_Control header is stripped */
2001 hdr->frame_control &= ~(__cpu_to_le16(IEEE80211_FCTL_ORDER));
2002
2003 qos_ctl = rxcb->tid;
2004
2005 if (ath11k_dp_rx_h_msdu_start_mesh_ctl_present(ar->ab, rxcb->rx_desc))
2006 qos_ctl |= IEEE80211_QOS_CTL_MESH_CONTROL_PRESENT;
2007
2008 /* TODO Add other QoS ctl fields when required */
2009
2010 /* copy decap header before overwriting for reuse below */
2011 memcpy(decap_hdr, (uint8_t *)hdr, hdr_len);
2012 }
2013
2014 if (!(status->flag & RX_FLAG_IV_STRIPPED)) {
2015 memcpy(skb_push(msdu,
2016 ath11k_dp_rx_crypto_param_len(ar, enctype)),
2017 (void *)hdr + hdr_len,
2018 ath11k_dp_rx_crypto_param_len(ar, enctype));
2019 }
2020
2021 if (!rxcb->is_first_msdu) {
2022 memcpy(skb_push(msdu,
2023 IEEE80211_QOS_CTL_LEN), &qos_ctl,
2024 IEEE80211_QOS_CTL_LEN);
2025 memcpy(skb_push(msdu, hdr_len), decap_hdr, hdr_len);
2026 return;
2027 }
2028
2029 memcpy(skb_push(msdu, hdr_len), hdr, hdr_len);
2030
2031 /* original 802.11 header has a different DA and in
2032 * case of 4addr it may also have different SA
2033 */
2034 hdr = (struct ieee80211_hdr *)msdu->data;
2035 ether_addr_copy(ieee80211_get_DA(hdr), da);
2036 ether_addr_copy(ieee80211_get_SA(hdr), sa);
2037}
2038
2039static void ath11k_dp_rx_h_undecap_raw(struct ath11k *ar, struct sk_buff *msdu,
2040 enum hal_encrypt_type enctype,
2041 struct ieee80211_rx_status *status,
2042 bool decrypted)
2043{
2044 struct ath11k_skb_rxcb *rxcb = ATH11K_SKB_RXCB(msdu);
2045 struct ieee80211_hdr *hdr;
2046 size_t hdr_len;
2047 size_t crypto_len;
2048
2049 if (!rxcb->is_first_msdu ||
2050 !(rxcb->is_first_msdu && rxcb->is_last_msdu)) {
2051 WARN_ON_ONCE(1);
2052 return;
2053 }
2054
2055 skb_trim(msdu, msdu->len - FCS_LEN);
2056
2057 if (!decrypted)
2058 return;
2059
2060 hdr = (void *)msdu->data;
2061
2062 /* Tail */
2063 if (status->flag & RX_FLAG_IV_STRIPPED) {
2064 skb_trim(msdu, msdu->len -
2065 ath11k_dp_rx_crypto_mic_len(ar, enctype));
2066
2067 skb_trim(msdu, msdu->len -
2068 ath11k_dp_rx_crypto_icv_len(ar, enctype));
2069 } else {
2070 /* MIC */
2071 if (status->flag & RX_FLAG_MIC_STRIPPED)
2072 skb_trim(msdu, msdu->len -
2073 ath11k_dp_rx_crypto_mic_len(ar, enctype));
2074
2075 /* ICV */
2076 if (status->flag & RX_FLAG_ICV_STRIPPED)
2077 skb_trim(msdu, msdu->len -
2078 ath11k_dp_rx_crypto_icv_len(ar, enctype));
2079 }
2080
2081 /* MMIC */
2082 if ((status->flag & RX_FLAG_MMIC_STRIPPED) &&
2083 !ieee80211_has_morefrags(hdr->frame_control) &&
2084 enctype == HAL_ENCRYPT_TYPE_TKIP_MIC)
2085 skb_trim(msdu, msdu->len - IEEE80211_CCMP_MIC_LEN);
2086
2087 /* Head */
2088 if (status->flag & RX_FLAG_IV_STRIPPED) {
2089 hdr_len = ieee80211_hdrlen(hdr->frame_control);
2090 crypto_len = ath11k_dp_rx_crypto_param_len(ar, enctype);
2091
2092 memmove((void *)msdu->data + crypto_len,
2093 (void *)msdu->data, hdr_len);
2094 skb_pull(msdu, crypto_len);
2095 }
2096}
2097
2098static void *ath11k_dp_rx_h_find_rfc1042(struct ath11k *ar,
2099 struct sk_buff *msdu,
2100 enum hal_encrypt_type enctype)
2101{
2102 struct ath11k_skb_rxcb *rxcb = ATH11K_SKB_RXCB(msdu);
2103 struct ieee80211_hdr *hdr;
2104 size_t hdr_len, crypto_len;
2105 void *rfc1042;
2106 bool is_amsdu;
2107
2108 is_amsdu = !(rxcb->is_first_msdu && rxcb->is_last_msdu);
2109 hdr = (struct ieee80211_hdr *)ath11k_dp_rx_h_80211_hdr(ar->ab, rxcb->rx_desc);
2110 rfc1042 = hdr;
2111
2112 if (rxcb->is_first_msdu) {
2113 hdr_len = ieee80211_hdrlen(hdr->frame_control);
2114 crypto_len = ath11k_dp_rx_crypto_param_len(ar, enctype);
2115
2116 rfc1042 += hdr_len + crypto_len;
2117 }
2118
2119 if (is_amsdu)
2120 rfc1042 += sizeof(struct ath11k_dp_amsdu_subframe_hdr);
2121
2122 return rfc1042;
2123}
2124
2125static void ath11k_dp_rx_h_undecap_eth(struct ath11k *ar,
2126 struct sk_buff *msdu,
2127 u8 *first_hdr,
2128 enum hal_encrypt_type enctype,
2129 struct ieee80211_rx_status *status)
2130{
2131 struct ieee80211_hdr *hdr;
2132 struct ethhdr *eth;
2133 size_t hdr_len;
2134 u8 da[ETH_ALEN];
2135 u8 sa[ETH_ALEN];
2136 void *rfc1042;
2137
2138 rfc1042 = ath11k_dp_rx_h_find_rfc1042(ar, msdu, enctype);
2139 if (WARN_ON_ONCE(!rfc1042))
2140 return;
2141
2142 /* pull decapped header and copy SA & DA */
2143 eth = (struct ethhdr *)msdu->data;
2144 ether_addr_copy(da, eth->h_dest);
2145 ether_addr_copy(sa, eth->h_source);
2146 skb_pull(msdu, sizeof(struct ethhdr));
2147
2148 /* push rfc1042/llc/snap */
2149 memcpy(skb_push(msdu, sizeof(struct ath11k_dp_rfc1042_hdr)), rfc1042,
2150 sizeof(struct ath11k_dp_rfc1042_hdr));
2151
2152 /* push original 802.11 header */
2153 hdr = (struct ieee80211_hdr *)first_hdr;
2154 hdr_len = ieee80211_hdrlen(hdr->frame_control);
2155
2156 if (!(status->flag & RX_FLAG_IV_STRIPPED)) {
2157 memcpy(skb_push(msdu,
2158 ath11k_dp_rx_crypto_param_len(ar, enctype)),
2159 (void *)hdr + hdr_len,
2160 ath11k_dp_rx_crypto_param_len(ar, enctype));
2161 }
2162
2163 memcpy(skb_push(msdu, hdr_len), hdr, hdr_len);
2164
2165 /* original 802.11 header has a different DA and in
2166 * case of 4addr it may also have different SA
2167 */
2168 hdr = (struct ieee80211_hdr *)msdu->data;
2169 ether_addr_copy(ieee80211_get_DA(hdr), da);
2170 ether_addr_copy(ieee80211_get_SA(hdr), sa);
2171}
2172
2173static void ath11k_dp_rx_h_undecap(struct ath11k *ar, struct sk_buff *msdu,
2174 struct hal_rx_desc *rx_desc,
2175 enum hal_encrypt_type enctype,
2176 struct ieee80211_rx_status *status,
2177 bool decrypted)
2178{
2179 u8 *first_hdr;
2180 u8 decap;
2181 struct ethhdr *ehdr;
2182
2183 first_hdr = ath11k_dp_rx_h_80211_hdr(ar->ab, rx_desc);
2184 decap = ath11k_dp_rx_h_msdu_start_decap_type(ar->ab, rx_desc);
2185
2186 switch (decap) {
2187 case DP_RX_DECAP_TYPE_NATIVE_WIFI:
2188 ath11k_dp_rx_h_undecap_nwifi(ar, msdu, first_hdr,
2189 enctype, status);
2190 break;
2191 case DP_RX_DECAP_TYPE_RAW:
2192 ath11k_dp_rx_h_undecap_raw(ar, msdu, enctype, status,
2193 decrypted);
2194 break;
2195 case DP_RX_DECAP_TYPE_ETHERNET2_DIX:
2196 ehdr = (struct ethhdr *)msdu->data;
2197
2198 /* mac80211 allows fast path only for authorized STA */
2199 if (ehdr->h_proto == cpu_to_be16(ETH_P_PAE)) {
2200 ATH11K_SKB_RXCB(msdu)->is_eapol = true;
2201 ath11k_dp_rx_h_undecap_eth(ar, msdu, first_hdr,
2202 enctype, status);
2203 break;
2204 }
2205
2206 /* PN for mcast packets will be validated in mac80211;
2207 * remove eth header and add 802.11 header.
2208 */
2209 if (ATH11K_SKB_RXCB(msdu)->is_mcbc && decrypted)
2210 ath11k_dp_rx_h_undecap_eth(ar, msdu, first_hdr,
2211 enctype, status);
2212 break;
2213 case DP_RX_DECAP_TYPE_8023:
2214 /* TODO: Handle undecap for these formats */
2215 break;
2216 }
2217}
2218
2219static struct ath11k_peer *
2220ath11k_dp_rx_h_find_peer(struct ath11k_base *ab, struct sk_buff *msdu)
2221{
2222 struct ath11k_skb_rxcb *rxcb = ATH11K_SKB_RXCB(msdu);
2223 struct hal_rx_desc *rx_desc = rxcb->rx_desc;
2224 struct ath11k_peer *peer = NULL;
2225
2226 lockdep_assert_held(&ab->base_lock);
2227
2228 if (rxcb->peer_id)
2229 peer = ath11k_peer_find_by_id(ab, rxcb->peer_id);
2230
2231 if (peer)
2232 return peer;
2233
2234 if (!rx_desc || !(ath11k_dp_rxdesc_mac_addr2_valid(ab, rx_desc)))
2235 return NULL;
2236
2237 peer = ath11k_peer_find_by_addr(ab,
2238 ath11k_dp_rxdesc_mpdu_start_addr2(ab, rx_desc));
2239 return peer;
2240}
2241
2242static void ath11k_dp_rx_h_mpdu(struct ath11k *ar,
2243 struct sk_buff *msdu,
2244 struct hal_rx_desc *rx_desc,
2245 struct ieee80211_rx_status *rx_status)
2246{
2247 bool fill_crypto_hdr;
2248 enum hal_encrypt_type enctype;
2249 bool is_decrypted = false;
2250 struct ath11k_skb_rxcb *rxcb;
2251 struct ieee80211_hdr *hdr;
2252 struct ath11k_peer *peer;
2253 struct rx_attention *rx_attention;
2254 u32 err_bitmap;
2255
2256 /* PN for multicast packets will be checked in mac80211 */
2257 rxcb = ATH11K_SKB_RXCB(msdu);
2258 fill_crypto_hdr = ath11k_dp_rx_h_attn_is_mcbc(ar->ab, rx_desc);
2259 rxcb->is_mcbc = fill_crypto_hdr;
2260
2261 if (rxcb->is_mcbc) {
2262 rxcb->peer_id = ath11k_dp_rx_h_mpdu_start_peer_id(ar->ab, rx_desc);
2263 rxcb->seq_no = ath11k_dp_rx_h_mpdu_start_seq_no(ar->ab, rx_desc);
2264 }
2265
2266 spin_lock_bh(&ar->ab->base_lock);
2267 peer = ath11k_dp_rx_h_find_peer(ar->ab, msdu);
2268 if (peer) {
2269 if (rxcb->is_mcbc)
2270 enctype = peer->sec_type_grp;
2271 else
2272 enctype = peer->sec_type;
2273 } else {
2274 enctype = ath11k_dp_rx_h_mpdu_start_enctype(ar->ab, rx_desc);
2275 }
2276 spin_unlock_bh(&ar->ab->base_lock);
2277
2278 rx_attention = ath11k_dp_rx_get_attention(ar->ab, rx_desc);
2279 err_bitmap = ath11k_dp_rx_h_attn_mpdu_err(rx_attention);
2280 if (enctype != HAL_ENCRYPT_TYPE_OPEN && !err_bitmap)
2281 is_decrypted = ath11k_dp_rx_h_attn_is_decrypted(rx_attention);
2282
2283 /* Clear per-MPDU flags while leaving per-PPDU flags intact */
2284 rx_status->flag &= ~(RX_FLAG_FAILED_FCS_CRC |
2285 RX_FLAG_MMIC_ERROR |
2286 RX_FLAG_DECRYPTED |
2287 RX_FLAG_IV_STRIPPED |
2288 RX_FLAG_MMIC_STRIPPED);
2289
2290 if (err_bitmap & DP_RX_MPDU_ERR_FCS)
2291 rx_status->flag |= RX_FLAG_FAILED_FCS_CRC;
2292 if (err_bitmap & DP_RX_MPDU_ERR_TKIP_MIC)
2293 rx_status->flag |= RX_FLAG_MMIC_ERROR;
2294
2295 if (is_decrypted) {
2296 rx_status->flag |= RX_FLAG_DECRYPTED | RX_FLAG_MMIC_STRIPPED;
2297
2298 if (fill_crypto_hdr)
2299 rx_status->flag |= RX_FLAG_MIC_STRIPPED |
2300 RX_FLAG_ICV_STRIPPED;
2301 else
2302 rx_status->flag |= RX_FLAG_IV_STRIPPED |
2303 RX_FLAG_PN_VALIDATED;
2304 }
2305
2306 ath11k_dp_rx_h_csum_offload(ar, msdu);
2307 ath11k_dp_rx_h_undecap(ar, msdu, rx_desc,
2308 enctype, rx_status, is_decrypted);
2309
2310 if (!is_decrypted || fill_crypto_hdr)
2311 return;
2312
2313 if (ath11k_dp_rx_h_msdu_start_decap_type(ar->ab, rx_desc) !=
2314 DP_RX_DECAP_TYPE_ETHERNET2_DIX) {
2315 hdr = (void *)msdu->data;
2316 hdr->frame_control &= ~__cpu_to_le16(IEEE80211_FCTL_PROTECTED);
2317 }
2318}
2319
2320static void ath11k_dp_rx_h_rate(struct ath11k *ar, struct hal_rx_desc *rx_desc,
2321 struct ieee80211_rx_status *rx_status)
2322{
2323 struct ieee80211_supported_band *sband;
2324 enum rx_msdu_start_pkt_type pkt_type;
2325 u8 bw;
2326 u8 rate_mcs, nss;
2327 u8 sgi;
2328 bool is_cck, is_ldpc;
2329
2330 pkt_type = ath11k_dp_rx_h_msdu_start_pkt_type(ar->ab, rx_desc);
2331 bw = ath11k_dp_rx_h_msdu_start_rx_bw(ar->ab, rx_desc);
2332 rate_mcs = ath11k_dp_rx_h_msdu_start_rate_mcs(ar->ab, rx_desc);
2333 nss = ath11k_dp_rx_h_msdu_start_nss(ar->ab, rx_desc);
2334 sgi = ath11k_dp_rx_h_msdu_start_sgi(ar->ab, rx_desc);
2335
2336 switch (pkt_type) {
2337 case RX_MSDU_START_PKT_TYPE_11A:
2338 case RX_MSDU_START_PKT_TYPE_11B:
2339 is_cck = (pkt_type == RX_MSDU_START_PKT_TYPE_11B);
2340 sband = &ar->mac.sbands[rx_status->band];
2341 rx_status->rate_idx = ath11k_mac_hw_rate_to_idx(sband, rate_mcs,
2342 is_cck);
2343 break;
2344 case RX_MSDU_START_PKT_TYPE_11N:
2345 rx_status->encoding = RX_ENC_HT;
2346 if (rate_mcs > ATH11K_HT_MCS_MAX) {
2347 ath11k_warn(ar->ab,
2348 "Received with invalid mcs in HT mode %d\n",
2349 rate_mcs);
2350 break;
2351 }
2352 rx_status->rate_idx = rate_mcs + (8 * (nss - 1));
2353 if (sgi)
2354 rx_status->enc_flags |= RX_ENC_FLAG_SHORT_GI;
2355 rx_status->bw = ath11k_mac_bw_to_mac80211_bw(bw);
2356 break;
2357 case RX_MSDU_START_PKT_TYPE_11AC:
2358 rx_status->encoding = RX_ENC_VHT;
2359 rx_status->rate_idx = rate_mcs;
2360 if (rate_mcs > ATH11K_VHT_MCS_MAX) {
2361 ath11k_warn(ar->ab,
2362 "Received with invalid mcs in VHT mode %d\n",
2363 rate_mcs);
2364 break;
2365 }
2366 rx_status->nss = nss;
2367 if (sgi)
2368 rx_status->enc_flags |= RX_ENC_FLAG_SHORT_GI;
2369 rx_status->bw = ath11k_mac_bw_to_mac80211_bw(bw);
2370 is_ldpc = ath11k_dp_rx_h_msdu_start_ldpc_support(ar->ab, rx_desc);
2371 if (is_ldpc)
2372 rx_status->enc_flags |= RX_ENC_FLAG_LDPC;
2373 break;
2374 case RX_MSDU_START_PKT_TYPE_11AX:
2375 rx_status->rate_idx = rate_mcs;
2376 if (rate_mcs > ATH11K_HE_MCS_MAX) {
2377 ath11k_warn(ar->ab,
2378 "Received with invalid mcs in HE mode %d\n",
2379 rate_mcs);
2380 break;
2381 }
2382 rx_status->encoding = RX_ENC_HE;
2383 rx_status->nss = nss;
2384 rx_status->he_gi = ath11k_mac_he_gi_to_nl80211_he_gi(sgi);
2385 rx_status->bw = ath11k_mac_bw_to_mac80211_bw(bw);
2386 break;
2387 }
2388}
2389
2390static void ath11k_dp_rx_h_ppdu(struct ath11k *ar, struct hal_rx_desc *rx_desc,
2391 struct ieee80211_rx_status *rx_status)
2392{
2393 u8 channel_num;
2394 u32 center_freq, meta_data;
2395 struct ieee80211_channel *channel;
2396
2397 rx_status->freq = 0;
2398 rx_status->rate_idx = 0;
2399 rx_status->nss = 0;
2400 rx_status->encoding = RX_ENC_LEGACY;
2401 rx_status->bw = RATE_INFO_BW_20;
2402
2403 rx_status->flag |= RX_FLAG_NO_SIGNAL_VAL;
2404
2405 meta_data = ath11k_dp_rx_h_msdu_start_freq(ar->ab, rx_desc);
2406 channel_num = meta_data;
2407 center_freq = meta_data >> 16;
2408
2409 if (center_freq >= ATH11K_MIN_6G_FREQ &&
2410 center_freq <= ATH11K_MAX_6G_FREQ) {
2411 rx_status->band = NL80211_BAND_6GHZ;
2412 rx_status->freq = center_freq;
2413 } else if (channel_num >= 1 && channel_num <= 14) {
2414 rx_status->band = NL80211_BAND_2GHZ;
2415 } else if (channel_num >= 36 && channel_num <= 177) {
2416 rx_status->band = NL80211_BAND_5GHZ;
2417 } else {
2418 spin_lock_bh(&ar->data_lock);
2419 channel = ar->rx_channel;
2420 if (channel) {
2421 rx_status->band = channel->band;
2422 channel_num =
2423 ieee80211_frequency_to_channel(channel->center_freq);
2424 }
2425 spin_unlock_bh(&ar->data_lock);
2426 ath11k_dbg_dump(ar->ab, ATH11K_DBG_DATA, NULL, "rx_desc: ",
2427 rx_desc, sizeof(struct hal_rx_desc));
2428 }
2429
2430 if (rx_status->band != NL80211_BAND_6GHZ)
2431 rx_status->freq = ieee80211_channel_to_frequency(channel_num,
2432 rx_status->band);
2433
2434 ath11k_dp_rx_h_rate(ar, rx_desc, rx_status);
2435}
2436
2437static void ath11k_dp_rx_deliver_msdu(struct ath11k *ar, struct napi_struct *napi,
2438 struct sk_buff *msdu,
2439 struct ieee80211_rx_status *status)
2440{
2441 static const struct ieee80211_radiotap_he known = {
2442 .data1 = cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA1_DATA_MCS_KNOWN |
2443 IEEE80211_RADIOTAP_HE_DATA1_BW_RU_ALLOC_KNOWN),
2444 .data2 = cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA2_GI_KNOWN),
2445 };
2446 struct ieee80211_rx_status *rx_status;
2447 struct ieee80211_radiotap_he *he = NULL;
2448 struct ieee80211_sta *pubsta = NULL;
2449 struct ath11k_peer *peer;
2450 struct ath11k_skb_rxcb *rxcb = ATH11K_SKB_RXCB(msdu);
2451 u8 decap = DP_RX_DECAP_TYPE_RAW;
2452 bool is_mcbc = rxcb->is_mcbc;
2453 bool is_eapol = rxcb->is_eapol;
2454
2455 if (status->encoding == RX_ENC_HE &&
2456 !(status->flag & RX_FLAG_RADIOTAP_HE) &&
2457 !(status->flag & RX_FLAG_SKIP_MONITOR)) {
2458 he = skb_push(msdu, sizeof(known));
2459 memcpy(he, &known, sizeof(known));
2460 status->flag |= RX_FLAG_RADIOTAP_HE;
2461 }
2462
2463 if (!(status->flag & RX_FLAG_ONLY_MONITOR))
2464 decap = ath11k_dp_rx_h_msdu_start_decap_type(ar->ab, rxcb->rx_desc);
2465
2466 spin_lock_bh(&ar->ab->base_lock);
2467 peer = ath11k_dp_rx_h_find_peer(ar->ab, msdu);
2468 if (peer && peer->sta)
2469 pubsta = peer->sta;
2470 spin_unlock_bh(&ar->ab->base_lock);
2471
2472 ath11k_dbg(ar->ab, ATH11K_DBG_DATA,
2473 "rx skb %p len %u peer %pM %d %s sn %u %s%s%s%s%s%s%s %srate_idx %u vht_nss %u freq %u band %u flag 0x%x fcs-err %i mic-err %i amsdu-more %i\n",
2474 msdu,
2475 msdu->len,
2476 peer ? peer->addr : NULL,
2477 rxcb->tid,
2478 is_mcbc ? "mcast" : "ucast",
2479 rxcb->seq_no,
2480 (status->encoding == RX_ENC_LEGACY) ? "legacy" : "",
2481 (status->encoding == RX_ENC_HT) ? "ht" : "",
2482 (status->encoding == RX_ENC_VHT) ? "vht" : "",
2483 (status->encoding == RX_ENC_HE) ? "he" : "",
2484 (status->bw == RATE_INFO_BW_40) ? "40" : "",
2485 (status->bw == RATE_INFO_BW_80) ? "80" : "",
2486 (status->bw == RATE_INFO_BW_160) ? "160" : "",
2487 status->enc_flags & RX_ENC_FLAG_SHORT_GI ? "sgi " : "",
2488 status->rate_idx,
2489 status->nss,
2490 status->freq,
2491 status->band, status->flag,
2492 !!(status->flag & RX_FLAG_FAILED_FCS_CRC),
2493 !!(status->flag & RX_FLAG_MMIC_ERROR),
2494 !!(status->flag & RX_FLAG_AMSDU_MORE));
2495
2496 ath11k_dbg_dump(ar->ab, ATH11K_DBG_DP_RX, NULL, "dp rx msdu: ",
2497 msdu->data, msdu->len);
2498
2499 rx_status = IEEE80211_SKB_RXCB(msdu);
2500 *rx_status = *status;
2501
2502 /* TODO: trace rx packet */
2503
2504 /* PN for multicast packets are not validate in HW,
2505 * so skip 802.3 rx path
2506 * Also, fast_rx expects the STA to be authorized, hence
2507 * eapol packets are sent in slow path.
2508 */
2509 if (decap == DP_RX_DECAP_TYPE_ETHERNET2_DIX && !is_eapol &&
2510 !(is_mcbc && rx_status->flag & RX_FLAG_DECRYPTED))
2511 rx_status->flag |= RX_FLAG_8023;
2512
2513 ieee80211_rx_napi(ar->hw, pubsta, msdu, napi);
2514}
2515
2516static int ath11k_dp_rx_process_msdu(struct ath11k *ar,
2517 struct sk_buff *msdu,
2518 struct sk_buff_head *msdu_list,
2519 struct ieee80211_rx_status *rx_status)
2520{
2521 struct ath11k_base *ab = ar->ab;
2522 struct hal_rx_desc *rx_desc, *lrx_desc;
2523 struct rx_attention *rx_attention;
2524 struct ath11k_skb_rxcb *rxcb;
2525 struct sk_buff *last_buf;
2526 u8 l3_pad_bytes;
2527 u8 *hdr_status;
2528 u16 msdu_len;
2529 int ret;
2530 u32 hal_rx_desc_sz = ar->ab->hw_params.hal_desc_sz;
2531
2532 last_buf = ath11k_dp_rx_get_msdu_last_buf(msdu_list, msdu);
2533 if (!last_buf) {
2534 ath11k_warn(ab,
2535 "No valid Rx buffer to access Atten/MSDU_END/MPDU_END tlvs\n");
2536 ret = -EIO;
2537 goto free_out;
2538 }
2539
2540 rx_desc = (struct hal_rx_desc *)msdu->data;
2541 if (ath11k_dp_rx_h_attn_msdu_len_err(ab, rx_desc)) {
2542 ath11k_warn(ar->ab, "msdu len not valid\n");
2543 ret = -EIO;
2544 goto free_out;
2545 }
2546
2547 lrx_desc = (struct hal_rx_desc *)last_buf->data;
2548 rx_attention = ath11k_dp_rx_get_attention(ab, lrx_desc);
2549 if (!ath11k_dp_rx_h_attn_msdu_done(rx_attention)) {
2550 ath11k_warn(ab, "msdu_done bit in attention is not set\n");
2551 ret = -EIO;
2552 goto free_out;
2553 }
2554
2555 rxcb = ATH11K_SKB_RXCB(msdu);
2556 rxcb->rx_desc = rx_desc;
2557 msdu_len = ath11k_dp_rx_h_msdu_start_msdu_len(ab, rx_desc);
2558 l3_pad_bytes = ath11k_dp_rx_h_msdu_end_l3pad(ab, lrx_desc);
2559
2560 if (rxcb->is_frag) {
2561 skb_pull(msdu, hal_rx_desc_sz);
2562 } else if (!rxcb->is_continuation) {
2563 if ((msdu_len + hal_rx_desc_sz) > DP_RX_BUFFER_SIZE) {
2564 hdr_status = ath11k_dp_rx_h_80211_hdr(ab, rx_desc);
2565 ret = -EINVAL;
2566 ath11k_warn(ab, "invalid msdu len %u\n", msdu_len);
2567 ath11k_dbg_dump(ab, ATH11K_DBG_DATA, NULL, "", hdr_status,
2568 sizeof(struct ieee80211_hdr));
2569 ath11k_dbg_dump(ab, ATH11K_DBG_DATA, NULL, "", rx_desc,
2570 sizeof(struct hal_rx_desc));
2571 goto free_out;
2572 }
2573 skb_put(msdu, hal_rx_desc_sz + l3_pad_bytes + msdu_len);
2574 skb_pull(msdu, hal_rx_desc_sz + l3_pad_bytes);
2575 } else {
2576 ret = ath11k_dp_rx_msdu_coalesce(ar, msdu_list,
2577 msdu, last_buf,
2578 l3_pad_bytes, msdu_len);
2579 if (ret) {
2580 ath11k_warn(ab,
2581 "failed to coalesce msdu rx buffer%d\n", ret);
2582 goto free_out;
2583 }
2584 }
2585
2586 ath11k_dp_rx_h_ppdu(ar, rx_desc, rx_status);
2587 ath11k_dp_rx_h_mpdu(ar, msdu, rx_desc, rx_status);
2588
2589 rx_status->flag |= RX_FLAG_SKIP_MONITOR | RX_FLAG_DUP_VALIDATED;
2590
2591 return 0;
2592
2593free_out:
2594 return ret;
2595}
2596
2597static void ath11k_dp_rx_process_received_packets(struct ath11k_base *ab,
2598 struct napi_struct *napi,
2599 struct sk_buff_head *msdu_list,
2600 int mac_id)
2601{
2602 struct sk_buff *msdu;
2603 struct ath11k *ar;
2604 struct ieee80211_rx_status rx_status = {0};
2605 int ret;
2606
2607 if (skb_queue_empty(msdu_list))
2608 return;
2609
2610 if (unlikely(!rcu_access_pointer(ab->pdevs_active[mac_id]))) {
2611 __skb_queue_purge(msdu_list);
2612 return;
2613 }
2614
2615 ar = ab->pdevs[mac_id].ar;
2616 if (unlikely(test_bit(ATH11K_CAC_RUNNING, &ar->dev_flags))) {
2617 __skb_queue_purge(msdu_list);
2618 return;
2619 }
2620
2621 while ((msdu = __skb_dequeue(msdu_list))) {
2622 ret = ath11k_dp_rx_process_msdu(ar, msdu, msdu_list, &rx_status);
2623 if (unlikely(ret)) {
2624 ath11k_dbg(ab, ATH11K_DBG_DATA,
2625 "Unable to process msdu %d", ret);
2626 dev_kfree_skb_any(msdu);
2627 continue;
2628 }
2629
2630 ath11k_dp_rx_deliver_msdu(ar, napi, msdu, &rx_status);
2631 }
2632}
2633
2634int ath11k_dp_process_rx(struct ath11k_base *ab, int ring_id,
2635 struct napi_struct *napi, int budget)
2636{
2637 struct ath11k_dp *dp = &ab->dp;
2638 struct dp_rxdma_ring *rx_ring;
2639 int num_buffs_reaped[MAX_RADIOS] = {0};
2640 struct sk_buff_head msdu_list[MAX_RADIOS];
2641 struct ath11k_skb_rxcb *rxcb;
2642 int total_msdu_reaped = 0;
2643 struct hal_srng *srng;
2644 struct sk_buff *msdu;
2645 bool done = false;
2646 int buf_id, mac_id;
2647 struct ath11k *ar;
2648 struct hal_reo_dest_ring *desc;
2649 enum hal_reo_dest_ring_push_reason push_reason;
2650 u32 cookie;
2651 int i;
2652
2653 for (i = 0; i < MAX_RADIOS; i++)
2654 __skb_queue_head_init(&msdu_list[i]);
2655
2656 srng = &ab->hal.srng_list[dp->reo_dst_ring[ring_id].ring_id];
2657
2658 spin_lock_bh(&srng->lock);
2659
2660try_again:
2661 ath11k_hal_srng_access_begin(ab, srng);
2662
2663 while (likely(desc =
2664 (struct hal_reo_dest_ring *)ath11k_hal_srng_dst_get_next_entry(ab,
2665 srng))) {
2666 cookie = FIELD_GET(BUFFER_ADDR_INFO1_SW_COOKIE,
2667 desc->buf_addr_info.info1);
2668 buf_id = FIELD_GET(DP_RXDMA_BUF_COOKIE_BUF_ID,
2669 cookie);
2670 mac_id = FIELD_GET(DP_RXDMA_BUF_COOKIE_PDEV_ID, cookie);
2671
2672 if (unlikely(buf_id == 0))
2673 continue;
2674
2675 ar = ab->pdevs[mac_id].ar;
2676 rx_ring = &ar->dp.rx_refill_buf_ring;
2677 spin_lock_bh(&rx_ring->idr_lock);
2678 msdu = idr_find(&rx_ring->bufs_idr, buf_id);
2679 if (unlikely(!msdu)) {
2680 ath11k_warn(ab, "frame rx with invalid buf_id %d\n",
2681 buf_id);
2682 spin_unlock_bh(&rx_ring->idr_lock);
2683 continue;
2684 }
2685
2686 idr_remove(&rx_ring->bufs_idr, buf_id);
2687 spin_unlock_bh(&rx_ring->idr_lock);
2688
2689 rxcb = ATH11K_SKB_RXCB(msdu);
2690 dma_unmap_single(ab->dev, rxcb->paddr,
2691 msdu->len + skb_tailroom(msdu),
2692 DMA_FROM_DEVICE);
2693
2694 num_buffs_reaped[mac_id]++;
2695
2696 push_reason = FIELD_GET(HAL_REO_DEST_RING_INFO0_PUSH_REASON,
2697 desc->info0);
2698 if (unlikely(push_reason !=
2699 HAL_REO_DEST_RING_PUSH_REASON_ROUTING_INSTRUCTION)) {
2700 dev_kfree_skb_any(msdu);
2701 ab->soc_stats.hal_reo_error[dp->reo_dst_ring[ring_id].ring_id]++;
2702 continue;
2703 }
2704
2705 rxcb->is_first_msdu = !!(desc->rx_msdu_info.info0 &
2706 RX_MSDU_DESC_INFO0_FIRST_MSDU_IN_MPDU);
2707 rxcb->is_last_msdu = !!(desc->rx_msdu_info.info0 &
2708 RX_MSDU_DESC_INFO0_LAST_MSDU_IN_MPDU);
2709 rxcb->is_continuation = !!(desc->rx_msdu_info.info0 &
2710 RX_MSDU_DESC_INFO0_MSDU_CONTINUATION);
2711 rxcb->peer_id = FIELD_GET(RX_MPDU_DESC_META_DATA_PEER_ID,
2712 desc->rx_mpdu_info.meta_data);
2713 rxcb->seq_no = FIELD_GET(RX_MPDU_DESC_INFO0_SEQ_NUM,
2714 desc->rx_mpdu_info.info0);
2715 rxcb->tid = FIELD_GET(HAL_REO_DEST_RING_INFO0_RX_QUEUE_NUM,
2716 desc->info0);
2717
2718 rxcb->mac_id = mac_id;
2719 __skb_queue_tail(&msdu_list[mac_id], msdu);
2720
2721 if (rxcb->is_continuation) {
2722 done = false;
2723 } else {
2724 total_msdu_reaped++;
2725 done = true;
2726 }
2727
2728 if (total_msdu_reaped >= budget)
2729 break;
2730 }
2731
2732 /* Hw might have updated the head pointer after we cached it.
2733 * In this case, even though there are entries in the ring we'll
2734 * get rx_desc NULL. Give the read another try with updated cached
2735 * head pointer so that we can reap complete MPDU in the current
2736 * rx processing.
2737 */
2738 if (unlikely(!done && ath11k_hal_srng_dst_num_free(ab, srng, true))) {
2739 ath11k_hal_srng_access_end(ab, srng);
2740 goto try_again;
2741 }
2742
2743 ath11k_hal_srng_access_end(ab, srng);
2744
2745 spin_unlock_bh(&srng->lock);
2746
2747 if (unlikely(!total_msdu_reaped))
2748 goto exit;
2749
2750 for (i = 0; i < ab->num_radios; i++) {
2751 if (!num_buffs_reaped[i])
2752 continue;
2753
2754 ath11k_dp_rx_process_received_packets(ab, napi, &msdu_list[i], i);
2755
2756 ar = ab->pdevs[i].ar;
2757 rx_ring = &ar->dp.rx_refill_buf_ring;
2758
2759 ath11k_dp_rxbufs_replenish(ab, i, rx_ring, num_buffs_reaped[i],
2760 ab->hw_params.hal_params->rx_buf_rbm);
2761 }
2762exit:
2763 return total_msdu_reaped;
2764}
2765
2766static void ath11k_dp_rx_update_peer_stats(struct ath11k_sta *arsta,
2767 struct hal_rx_mon_ppdu_info *ppdu_info)
2768{
2769 struct ath11k_rx_peer_stats *rx_stats = arsta->rx_stats;
2770 u32 num_msdu;
2771 int i;
2772
2773 if (!rx_stats)
2774 return;
2775
2776 arsta->rssi_comb = ppdu_info->rssi_comb;
2777 ewma_avg_rssi_add(&arsta->avg_rssi, ppdu_info->rssi_comb);
2778
2779 num_msdu = ppdu_info->tcp_msdu_count + ppdu_info->tcp_ack_msdu_count +
2780 ppdu_info->udp_msdu_count + ppdu_info->other_msdu_count;
2781
2782 rx_stats->num_msdu += num_msdu;
2783 rx_stats->tcp_msdu_count += ppdu_info->tcp_msdu_count +
2784 ppdu_info->tcp_ack_msdu_count;
2785 rx_stats->udp_msdu_count += ppdu_info->udp_msdu_count;
2786 rx_stats->other_msdu_count += ppdu_info->other_msdu_count;
2787
2788 if (ppdu_info->preamble_type == HAL_RX_PREAMBLE_11A ||
2789 ppdu_info->preamble_type == HAL_RX_PREAMBLE_11B) {
2790 ppdu_info->nss = 1;
2791 ppdu_info->mcs = HAL_RX_MAX_MCS;
2792 ppdu_info->tid = IEEE80211_NUM_TIDS;
2793 }
2794
2795 if (ppdu_info->nss > 0 && ppdu_info->nss <= HAL_RX_MAX_NSS)
2796 rx_stats->nss_count[ppdu_info->nss - 1] += num_msdu;
2797
2798 if (ppdu_info->mcs <= HAL_RX_MAX_MCS)
2799 rx_stats->mcs_count[ppdu_info->mcs] += num_msdu;
2800
2801 if (ppdu_info->gi < HAL_RX_GI_MAX)
2802 rx_stats->gi_count[ppdu_info->gi] += num_msdu;
2803
2804 if (ppdu_info->bw < HAL_RX_BW_MAX)
2805 rx_stats->bw_count[ppdu_info->bw] += num_msdu;
2806
2807 if (ppdu_info->ldpc < HAL_RX_SU_MU_CODING_MAX)
2808 rx_stats->coding_count[ppdu_info->ldpc] += num_msdu;
2809
2810 if (ppdu_info->tid <= IEEE80211_NUM_TIDS)
2811 rx_stats->tid_count[ppdu_info->tid] += num_msdu;
2812
2813 if (ppdu_info->preamble_type < HAL_RX_PREAMBLE_MAX)
2814 rx_stats->pream_cnt[ppdu_info->preamble_type] += num_msdu;
2815
2816 if (ppdu_info->reception_type < HAL_RX_RECEPTION_TYPE_MAX)
2817 rx_stats->reception_type[ppdu_info->reception_type] += num_msdu;
2818
2819 if (ppdu_info->is_stbc)
2820 rx_stats->stbc_count += num_msdu;
2821
2822 if (ppdu_info->beamformed)
2823 rx_stats->beamformed_count += num_msdu;
2824
2825 if (ppdu_info->num_mpdu_fcs_ok > 1)
2826 rx_stats->ampdu_msdu_count += num_msdu;
2827 else
2828 rx_stats->non_ampdu_msdu_count += num_msdu;
2829
2830 rx_stats->num_mpdu_fcs_ok += ppdu_info->num_mpdu_fcs_ok;
2831 rx_stats->num_mpdu_fcs_err += ppdu_info->num_mpdu_fcs_err;
2832 rx_stats->dcm_count += ppdu_info->dcm;
2833 rx_stats->ru_alloc_cnt[ppdu_info->ru_alloc] += num_msdu;
2834
2835 arsta->rssi_comb = ppdu_info->rssi_comb;
2836
2837 BUILD_BUG_ON(ARRAY_SIZE(arsta->chain_signal) >
2838 ARRAY_SIZE(ppdu_info->rssi_chain_pri20));
2839
2840 for (i = 0; i < ARRAY_SIZE(arsta->chain_signal); i++)
2841 arsta->chain_signal[i] = ppdu_info->rssi_chain_pri20[i];
2842
2843 rx_stats->rx_duration += ppdu_info->rx_duration;
2844 arsta->rx_duration = rx_stats->rx_duration;
2845}
2846
2847static struct sk_buff *ath11k_dp_rx_alloc_mon_status_buf(struct ath11k_base *ab,
2848 struct dp_rxdma_ring *rx_ring,
2849 int *buf_id)
2850{
2851 struct sk_buff *skb;
2852 dma_addr_t paddr;
2853
2854 skb = dev_alloc_skb(DP_RX_BUFFER_SIZE +
2855 DP_RX_BUFFER_ALIGN_SIZE);
2856
2857 if (!skb)
2858 goto fail_alloc_skb;
2859
2860 if (!IS_ALIGNED((unsigned long)skb->data,
2861 DP_RX_BUFFER_ALIGN_SIZE)) {
2862 skb_pull(skb, PTR_ALIGN(skb->data, DP_RX_BUFFER_ALIGN_SIZE) -
2863 skb->data);
2864 }
2865
2866 paddr = dma_map_single(ab->dev, skb->data,
2867 skb->len + skb_tailroom(skb),
2868 DMA_FROM_DEVICE);
2869 if (unlikely(dma_mapping_error(ab->dev, paddr)))
2870 goto fail_free_skb;
2871
2872 spin_lock_bh(&rx_ring->idr_lock);
2873 *buf_id = idr_alloc(&rx_ring->bufs_idr, skb, 0,
2874 rx_ring->bufs_max, GFP_ATOMIC);
2875 spin_unlock_bh(&rx_ring->idr_lock);
2876 if (*buf_id < 0)
2877 goto fail_dma_unmap;
2878
2879 ATH11K_SKB_RXCB(skb)->paddr = paddr;
2880 return skb;
2881
2882fail_dma_unmap:
2883 dma_unmap_single(ab->dev, paddr, skb->len + skb_tailroom(skb),
2884 DMA_FROM_DEVICE);
2885fail_free_skb:
2886 dev_kfree_skb_any(skb);
2887fail_alloc_skb:
2888 return NULL;
2889}
2890
2891int ath11k_dp_rx_mon_status_bufs_replenish(struct ath11k_base *ab, int mac_id,
2892 struct dp_rxdma_ring *rx_ring,
2893 int req_entries,
2894 enum hal_rx_buf_return_buf_manager mgr)
2895{
2896 struct hal_srng *srng;
2897 u32 *desc;
2898 struct sk_buff *skb;
2899 int num_free;
2900 int num_remain;
2901 int buf_id;
2902 u32 cookie;
2903 dma_addr_t paddr;
2904
2905 req_entries = min(req_entries, rx_ring->bufs_max);
2906
2907 srng = &ab->hal.srng_list[rx_ring->refill_buf_ring.ring_id];
2908
2909 spin_lock_bh(&srng->lock);
2910
2911 ath11k_hal_srng_access_begin(ab, srng);
2912
2913 num_free = ath11k_hal_srng_src_num_free(ab, srng, true);
2914
2915 req_entries = min(num_free, req_entries);
2916 num_remain = req_entries;
2917
2918 while (num_remain > 0) {
2919 skb = ath11k_dp_rx_alloc_mon_status_buf(ab, rx_ring,
2920 &buf_id);
2921 if (!skb)
2922 break;
2923 paddr = ATH11K_SKB_RXCB(skb)->paddr;
2924
2925 desc = ath11k_hal_srng_src_get_next_entry(ab, srng);
2926 if (!desc)
2927 goto fail_desc_get;
2928
2929 cookie = FIELD_PREP(DP_RXDMA_BUF_COOKIE_PDEV_ID, mac_id) |
2930 FIELD_PREP(DP_RXDMA_BUF_COOKIE_BUF_ID, buf_id);
2931
2932 num_remain--;
2933
2934 ath11k_hal_rx_buf_addr_info_set(desc, paddr, cookie, mgr);
2935 }
2936
2937 ath11k_hal_srng_access_end(ab, srng);
2938
2939 spin_unlock_bh(&srng->lock);
2940
2941 return req_entries - num_remain;
2942
2943fail_desc_get:
2944 spin_lock_bh(&rx_ring->idr_lock);
2945 idr_remove(&rx_ring->bufs_idr, buf_id);
2946 spin_unlock_bh(&rx_ring->idr_lock);
2947 dma_unmap_single(ab->dev, paddr, skb->len + skb_tailroom(skb),
2948 DMA_FROM_DEVICE);
2949 dev_kfree_skb_any(skb);
2950 ath11k_hal_srng_access_end(ab, srng);
2951 spin_unlock_bh(&srng->lock);
2952
2953 return req_entries - num_remain;
2954}
2955
2956#define ATH11K_DP_RX_FULL_MON_PPDU_ID_WRAP 32535
2957
2958static void
2959ath11k_dp_rx_mon_update_status_buf_state(struct ath11k_mon_data *pmon,
2960 struct hal_tlv_hdr *tlv)
2961{
2962 struct hal_rx_ppdu_start *ppdu_start;
2963 u16 ppdu_id_diff, ppdu_id, tlv_len;
2964 u8 *ptr;
2965
2966 /* PPDU id is part of second tlv, move ptr to second tlv */
2967 tlv_len = FIELD_GET(HAL_TLV_HDR_LEN, tlv->tl);
2968 ptr = (u8 *)tlv;
2969 ptr += sizeof(*tlv) + tlv_len;
2970 tlv = (struct hal_tlv_hdr *)ptr;
2971
2972 if (FIELD_GET(HAL_TLV_HDR_TAG, tlv->tl) != HAL_RX_PPDU_START)
2973 return;
2974
2975 ptr += sizeof(*tlv);
2976 ppdu_start = (struct hal_rx_ppdu_start *)ptr;
2977 ppdu_id = FIELD_GET(HAL_RX_PPDU_START_INFO0_PPDU_ID,
2978 __le32_to_cpu(ppdu_start->info0));
2979
2980 if (pmon->sw_mon_entries.ppdu_id < ppdu_id) {
2981 pmon->buf_state = DP_MON_STATUS_LEAD;
2982 ppdu_id_diff = ppdu_id - pmon->sw_mon_entries.ppdu_id;
2983 if (ppdu_id_diff > ATH11K_DP_RX_FULL_MON_PPDU_ID_WRAP)
2984 pmon->buf_state = DP_MON_STATUS_LAG;
2985 } else if (pmon->sw_mon_entries.ppdu_id > ppdu_id) {
2986 pmon->buf_state = DP_MON_STATUS_LAG;
2987 ppdu_id_diff = pmon->sw_mon_entries.ppdu_id - ppdu_id;
2988 if (ppdu_id_diff > ATH11K_DP_RX_FULL_MON_PPDU_ID_WRAP)
2989 pmon->buf_state = DP_MON_STATUS_LEAD;
2990 }
2991}
2992
2993static int ath11k_dp_rx_reap_mon_status_ring(struct ath11k_base *ab, int mac_id,
2994 int *budget, struct sk_buff_head *skb_list)
2995{
2996 struct ath11k *ar;
2997 const struct ath11k_hw_hal_params *hal_params;
2998 struct ath11k_pdev_dp *dp;
2999 struct dp_rxdma_ring *rx_ring;
3000 struct ath11k_mon_data *pmon;
3001 struct hal_srng *srng;
3002 void *rx_mon_status_desc;
3003 struct sk_buff *skb;
3004 struct ath11k_skb_rxcb *rxcb;
3005 struct hal_tlv_hdr *tlv;
3006 u32 cookie;
3007 int buf_id, srng_id;
3008 dma_addr_t paddr;
3009 u8 rbm;
3010 int num_buffs_reaped = 0;
3011
3012 ar = ab->pdevs[ath11k_hw_mac_id_to_pdev_id(&ab->hw_params, mac_id)].ar;
3013 dp = &ar->dp;
3014 pmon = &dp->mon_data;
3015 srng_id = ath11k_hw_mac_id_to_srng_id(&ab->hw_params, mac_id);
3016 rx_ring = &dp->rx_mon_status_refill_ring[srng_id];
3017
3018 srng = &ab->hal.srng_list[rx_ring->refill_buf_ring.ring_id];
3019
3020 spin_lock_bh(&srng->lock);
3021
3022 ath11k_hal_srng_access_begin(ab, srng);
3023 while (*budget) {
3024 *budget -= 1;
3025 rx_mon_status_desc =
3026 ath11k_hal_srng_src_peek(ab, srng);
3027 if (!rx_mon_status_desc) {
3028 pmon->buf_state = DP_MON_STATUS_REPLINISH;
3029 break;
3030 }
3031
3032 ath11k_hal_rx_buf_addr_info_get(rx_mon_status_desc, &paddr,
3033 &cookie, &rbm);
3034 if (paddr) {
3035 buf_id = FIELD_GET(DP_RXDMA_BUF_COOKIE_BUF_ID, cookie);
3036
3037 spin_lock_bh(&rx_ring->idr_lock);
3038 skb = idr_find(&rx_ring->bufs_idr, buf_id);
3039 spin_unlock_bh(&rx_ring->idr_lock);
3040
3041 if (!skb) {
3042 ath11k_warn(ab, "rx monitor status with invalid buf_id %d\n",
3043 buf_id);
3044 pmon->buf_state = DP_MON_STATUS_REPLINISH;
3045 goto move_next;
3046 }
3047
3048 rxcb = ATH11K_SKB_RXCB(skb);
3049
3050 dma_sync_single_for_cpu(ab->dev, rxcb->paddr,
3051 skb->len + skb_tailroom(skb),
3052 DMA_FROM_DEVICE);
3053
3054 tlv = (struct hal_tlv_hdr *)skb->data;
3055 if (FIELD_GET(HAL_TLV_HDR_TAG, tlv->tl) !=
3056 HAL_RX_STATUS_BUFFER_DONE) {
3057 ath11k_warn(ab, "mon status DONE not set %lx, buf_id %d\n",
3058 FIELD_GET(HAL_TLV_HDR_TAG,
3059 tlv->tl), buf_id);
3060 /* If done status is missing, hold onto status
3061 * ring until status is done for this status
3062 * ring buffer.
3063 * Keep HP in mon_status_ring unchanged,
3064 * and break from here.
3065 * Check status for same buffer for next time
3066 */
3067 pmon->buf_state = DP_MON_STATUS_NO_DMA;
3068 break;
3069 }
3070
3071 spin_lock_bh(&rx_ring->idr_lock);
3072 idr_remove(&rx_ring->bufs_idr, buf_id);
3073 spin_unlock_bh(&rx_ring->idr_lock);
3074 if (ab->hw_params.full_monitor_mode) {
3075 ath11k_dp_rx_mon_update_status_buf_state(pmon, tlv);
3076 if (paddr == pmon->mon_status_paddr)
3077 pmon->buf_state = DP_MON_STATUS_MATCH;
3078 }
3079
3080 dma_unmap_single(ab->dev, rxcb->paddr,
3081 skb->len + skb_tailroom(skb),
3082 DMA_FROM_DEVICE);
3083
3084 __skb_queue_tail(skb_list, skb);
3085 } else {
3086 pmon->buf_state = DP_MON_STATUS_REPLINISH;
3087 }
3088move_next:
3089 skb = ath11k_dp_rx_alloc_mon_status_buf(ab, rx_ring,
3090 &buf_id);
3091
3092 if (!skb) {
3093 hal_params = ab->hw_params.hal_params;
3094 ath11k_hal_rx_buf_addr_info_set(rx_mon_status_desc, 0, 0,
3095 hal_params->rx_buf_rbm);
3096 num_buffs_reaped++;
3097 break;
3098 }
3099 rxcb = ATH11K_SKB_RXCB(skb);
3100
3101 cookie = FIELD_PREP(DP_RXDMA_BUF_COOKIE_PDEV_ID, mac_id) |
3102 FIELD_PREP(DP_RXDMA_BUF_COOKIE_BUF_ID, buf_id);
3103
3104 ath11k_hal_rx_buf_addr_info_set(rx_mon_status_desc, rxcb->paddr,
3105 cookie,
3106 ab->hw_params.hal_params->rx_buf_rbm);
3107 ath11k_hal_srng_src_get_next_entry(ab, srng);
3108 num_buffs_reaped++;
3109 }
3110 ath11k_hal_srng_access_end(ab, srng);
3111 spin_unlock_bh(&srng->lock);
3112
3113 return num_buffs_reaped;
3114}
3115
3116static void ath11k_dp_rx_frag_timer(struct timer_list *timer)
3117{
3118 struct dp_rx_tid *rx_tid = from_timer(rx_tid, timer, frag_timer);
3119
3120 spin_lock_bh(&rx_tid->ab->base_lock);
3121 if (rx_tid->last_frag_no &&
3122 rx_tid->rx_frag_bitmap == GENMASK(rx_tid->last_frag_no, 0)) {
3123 spin_unlock_bh(&rx_tid->ab->base_lock);
3124 return;
3125 }
3126 ath11k_dp_rx_frags_cleanup(rx_tid, true);
3127 spin_unlock_bh(&rx_tid->ab->base_lock);
3128}
3129
3130int ath11k_peer_rx_frag_setup(struct ath11k *ar, const u8 *peer_mac, int vdev_id)
3131{
3132 struct ath11k_base *ab = ar->ab;
3133 struct crypto_shash *tfm;
3134 struct ath11k_peer *peer;
3135 struct dp_rx_tid *rx_tid;
3136 int i;
3137
3138 tfm = crypto_alloc_shash("michael_mic", 0, 0);
3139 if (IS_ERR(tfm)) {
3140 ath11k_warn(ab, "failed to allocate michael_mic shash: %ld\n",
3141 PTR_ERR(tfm));
3142 return PTR_ERR(tfm);
3143 }
3144
3145 spin_lock_bh(&ab->base_lock);
3146
3147 peer = ath11k_peer_find(ab, vdev_id, peer_mac);
3148 if (!peer) {
3149 ath11k_warn(ab, "failed to find the peer to set up fragment info\n");
3150 spin_unlock_bh(&ab->base_lock);
3151 crypto_free_shash(tfm);
3152 return -ENOENT;
3153 }
3154
3155 for (i = 0; i <= IEEE80211_NUM_TIDS; i++) {
3156 rx_tid = &peer->rx_tid[i];
3157 rx_tid->ab = ab;
3158 timer_setup(&rx_tid->frag_timer, ath11k_dp_rx_frag_timer, 0);
3159 skb_queue_head_init(&rx_tid->rx_frags);
3160 }
3161
3162 peer->tfm_mmic = tfm;
3163 peer->dp_setup_done = true;
3164 spin_unlock_bh(&ab->base_lock);
3165
3166 return 0;
3167}
3168
3169static int ath11k_dp_rx_h_michael_mic(struct crypto_shash *tfm, u8 *key,
3170 struct ieee80211_hdr *hdr, u8 *data,
3171 size_t data_len, u8 *mic)
3172{
3173 SHASH_DESC_ON_STACK(desc, tfm);
3174 u8 mic_hdr[16] = {0};
3175 u8 tid = 0;
3176 int ret;
3177
3178 if (!tfm)
3179 return -EINVAL;
3180
3181 desc->tfm = tfm;
3182
3183 ret = crypto_shash_setkey(tfm, key, 8);
3184 if (ret)
3185 goto out;
3186
3187 ret = crypto_shash_init(desc);
3188 if (ret)
3189 goto out;
3190
3191 /* TKIP MIC header */
3192 memcpy(mic_hdr, ieee80211_get_DA(hdr), ETH_ALEN);
3193 memcpy(mic_hdr + ETH_ALEN, ieee80211_get_SA(hdr), ETH_ALEN);
3194 if (ieee80211_is_data_qos(hdr->frame_control))
3195 tid = ieee80211_get_tid(hdr);
3196 mic_hdr[12] = tid;
3197
3198 ret = crypto_shash_update(desc, mic_hdr, 16);
3199 if (ret)
3200 goto out;
3201 ret = crypto_shash_update(desc, data, data_len);
3202 if (ret)
3203 goto out;
3204 ret = crypto_shash_final(desc, mic);
3205out:
3206 shash_desc_zero(desc);
3207 return ret;
3208}
3209
3210static int ath11k_dp_rx_h_verify_tkip_mic(struct ath11k *ar, struct ath11k_peer *peer,
3211 struct sk_buff *msdu)
3212{
3213 struct hal_rx_desc *rx_desc = (struct hal_rx_desc *)msdu->data;
3214 struct ieee80211_rx_status *rxs = IEEE80211_SKB_RXCB(msdu);
3215 struct ieee80211_key_conf *key_conf;
3216 struct ieee80211_hdr *hdr;
3217 u8 mic[IEEE80211_CCMP_MIC_LEN];
3218 int head_len, tail_len, ret;
3219 size_t data_len;
3220 u32 hdr_len, hal_rx_desc_sz = ar->ab->hw_params.hal_desc_sz;
3221 u8 *key, *data;
3222 u8 key_idx;
3223
3224 if (ath11k_dp_rx_h_mpdu_start_enctype(ar->ab, rx_desc) !=
3225 HAL_ENCRYPT_TYPE_TKIP_MIC)
3226 return 0;
3227
3228 hdr = (struct ieee80211_hdr *)(msdu->data + hal_rx_desc_sz);
3229 hdr_len = ieee80211_hdrlen(hdr->frame_control);
3230 head_len = hdr_len + hal_rx_desc_sz + IEEE80211_TKIP_IV_LEN;
3231 tail_len = IEEE80211_CCMP_MIC_LEN + IEEE80211_TKIP_ICV_LEN + FCS_LEN;
3232
3233 if (!is_multicast_ether_addr(hdr->addr1))
3234 key_idx = peer->ucast_keyidx;
3235 else
3236 key_idx = peer->mcast_keyidx;
3237
3238 key_conf = peer->keys[key_idx];
3239
3240 data = msdu->data + head_len;
3241 data_len = msdu->len - head_len - tail_len;
3242 key = &key_conf->key[NL80211_TKIP_DATA_OFFSET_RX_MIC_KEY];
3243
3244 ret = ath11k_dp_rx_h_michael_mic(peer->tfm_mmic, key, hdr, data, data_len, mic);
3245 if (ret || memcmp(mic, data + data_len, IEEE80211_CCMP_MIC_LEN))
3246 goto mic_fail;
3247
3248 return 0;
3249
3250mic_fail:
3251 (ATH11K_SKB_RXCB(msdu))->is_first_msdu = true;
3252 (ATH11K_SKB_RXCB(msdu))->is_last_msdu = true;
3253
3254 rxs->flag |= RX_FLAG_MMIC_ERROR | RX_FLAG_MMIC_STRIPPED |
3255 RX_FLAG_IV_STRIPPED | RX_FLAG_DECRYPTED;
3256 skb_pull(msdu, hal_rx_desc_sz);
3257
3258 ath11k_dp_rx_h_ppdu(ar, rx_desc, rxs);
3259 ath11k_dp_rx_h_undecap(ar, msdu, rx_desc,
3260 HAL_ENCRYPT_TYPE_TKIP_MIC, rxs, true);
3261 ieee80211_rx(ar->hw, msdu);
3262 return -EINVAL;
3263}
3264
3265static void ath11k_dp_rx_h_undecap_frag(struct ath11k *ar, struct sk_buff *msdu,
3266 enum hal_encrypt_type enctype, u32 flags)
3267{
3268 struct ieee80211_hdr *hdr;
3269 size_t hdr_len;
3270 size_t crypto_len;
3271 u32 hal_rx_desc_sz = ar->ab->hw_params.hal_desc_sz;
3272
3273 if (!flags)
3274 return;
3275
3276 hdr = (struct ieee80211_hdr *)(msdu->data + hal_rx_desc_sz);
3277
3278 if (flags & RX_FLAG_MIC_STRIPPED)
3279 skb_trim(msdu, msdu->len -
3280 ath11k_dp_rx_crypto_mic_len(ar, enctype));
3281
3282 if (flags & RX_FLAG_ICV_STRIPPED)
3283 skb_trim(msdu, msdu->len -
3284 ath11k_dp_rx_crypto_icv_len(ar, enctype));
3285
3286 if (flags & RX_FLAG_IV_STRIPPED) {
3287 hdr_len = ieee80211_hdrlen(hdr->frame_control);
3288 crypto_len = ath11k_dp_rx_crypto_param_len(ar, enctype);
3289
3290 memmove((void *)msdu->data + hal_rx_desc_sz + crypto_len,
3291 (void *)msdu->data + hal_rx_desc_sz, hdr_len);
3292 skb_pull(msdu, crypto_len);
3293 }
3294}
3295
3296static int ath11k_dp_rx_h_defrag(struct ath11k *ar,
3297 struct ath11k_peer *peer,
3298 struct dp_rx_tid *rx_tid,
3299 struct sk_buff **defrag_skb)
3300{
3301 struct hal_rx_desc *rx_desc;
3302 struct sk_buff *skb, *first_frag, *last_frag;
3303 struct ieee80211_hdr *hdr;
3304 struct rx_attention *rx_attention;
3305 enum hal_encrypt_type enctype;
3306 bool is_decrypted = false;
3307 int msdu_len = 0;
3308 int extra_space;
3309 u32 flags, hal_rx_desc_sz = ar->ab->hw_params.hal_desc_sz;
3310
3311 first_frag = skb_peek(&rx_tid->rx_frags);
3312 last_frag = skb_peek_tail(&rx_tid->rx_frags);
3313
3314 skb_queue_walk(&rx_tid->rx_frags, skb) {
3315 flags = 0;
3316 rx_desc = (struct hal_rx_desc *)skb->data;
3317 hdr = (struct ieee80211_hdr *)(skb->data + hal_rx_desc_sz);
3318
3319 enctype = ath11k_dp_rx_h_mpdu_start_enctype(ar->ab, rx_desc);
3320 if (enctype != HAL_ENCRYPT_TYPE_OPEN) {
3321 rx_attention = ath11k_dp_rx_get_attention(ar->ab, rx_desc);
3322 is_decrypted = ath11k_dp_rx_h_attn_is_decrypted(rx_attention);
3323 }
3324
3325 if (is_decrypted) {
3326 if (skb != first_frag)
3327 flags |= RX_FLAG_IV_STRIPPED;
3328 if (skb != last_frag)
3329 flags |= RX_FLAG_ICV_STRIPPED |
3330 RX_FLAG_MIC_STRIPPED;
3331 }
3332
3333 /* RX fragments are always raw packets */
3334 if (skb != last_frag)
3335 skb_trim(skb, skb->len - FCS_LEN);
3336 ath11k_dp_rx_h_undecap_frag(ar, skb, enctype, flags);
3337
3338 if (skb != first_frag)
3339 skb_pull(skb, hal_rx_desc_sz +
3340 ieee80211_hdrlen(hdr->frame_control));
3341 msdu_len += skb->len;
3342 }
3343
3344 extra_space = msdu_len - (DP_RX_BUFFER_SIZE + skb_tailroom(first_frag));
3345 if (extra_space > 0 &&
3346 (pskb_expand_head(first_frag, 0, extra_space, GFP_ATOMIC) < 0))
3347 return -ENOMEM;
3348
3349 __skb_unlink(first_frag, &rx_tid->rx_frags);
3350 while ((skb = __skb_dequeue(&rx_tid->rx_frags))) {
3351 skb_put_data(first_frag, skb->data, skb->len);
3352 dev_kfree_skb_any(skb);
3353 }
3354
3355 hdr = (struct ieee80211_hdr *)(first_frag->data + hal_rx_desc_sz);
3356 hdr->frame_control &= ~__cpu_to_le16(IEEE80211_FCTL_MOREFRAGS);
3357 ATH11K_SKB_RXCB(first_frag)->is_frag = 1;
3358
3359 if (ath11k_dp_rx_h_verify_tkip_mic(ar, peer, first_frag))
3360 first_frag = NULL;
3361
3362 *defrag_skb = first_frag;
3363 return 0;
3364}
3365
3366static int ath11k_dp_rx_h_defrag_reo_reinject(struct ath11k *ar, struct dp_rx_tid *rx_tid,
3367 struct sk_buff *defrag_skb)
3368{
3369 struct ath11k_base *ab = ar->ab;
3370 struct ath11k_pdev_dp *dp = &ar->dp;
3371 struct dp_rxdma_ring *rx_refill_ring = &dp->rx_refill_buf_ring;
3372 struct hal_rx_desc *rx_desc = (struct hal_rx_desc *)defrag_skb->data;
3373 struct hal_reo_entrance_ring *reo_ent_ring;
3374 struct hal_reo_dest_ring *reo_dest_ring;
3375 struct dp_link_desc_bank *link_desc_banks;
3376 struct hal_rx_msdu_link *msdu_link;
3377 struct hal_rx_msdu_details *msdu0;
3378 struct hal_srng *srng;
3379 dma_addr_t paddr;
3380 u32 desc_bank, msdu_info, mpdu_info;
3381 u32 dst_idx, cookie, hal_rx_desc_sz;
3382 int ret, buf_id;
3383
3384 hal_rx_desc_sz = ab->hw_params.hal_desc_sz;
3385 link_desc_banks = ab->dp.link_desc_banks;
3386 reo_dest_ring = rx_tid->dst_ring_desc;
3387
3388 ath11k_hal_rx_reo_ent_paddr_get(ab, reo_dest_ring, &paddr, &desc_bank);
3389 msdu_link = (struct hal_rx_msdu_link *)(link_desc_banks[desc_bank].vaddr +
3390 (paddr - link_desc_banks[desc_bank].paddr));
3391 msdu0 = &msdu_link->msdu_link[0];
3392 dst_idx = FIELD_GET(RX_MSDU_DESC_INFO0_REO_DEST_IND, msdu0->rx_msdu_info.info0);
3393 memset(msdu0, 0, sizeof(*msdu0));
3394
3395 msdu_info = FIELD_PREP(RX_MSDU_DESC_INFO0_FIRST_MSDU_IN_MPDU, 1) |
3396 FIELD_PREP(RX_MSDU_DESC_INFO0_LAST_MSDU_IN_MPDU, 1) |
3397 FIELD_PREP(RX_MSDU_DESC_INFO0_MSDU_CONTINUATION, 0) |
3398 FIELD_PREP(RX_MSDU_DESC_INFO0_MSDU_LENGTH,
3399 defrag_skb->len - hal_rx_desc_sz) |
3400 FIELD_PREP(RX_MSDU_DESC_INFO0_REO_DEST_IND, dst_idx) |
3401 FIELD_PREP(RX_MSDU_DESC_INFO0_VALID_SA, 1) |
3402 FIELD_PREP(RX_MSDU_DESC_INFO0_VALID_DA, 1);
3403 msdu0->rx_msdu_info.info0 = msdu_info;
3404
3405 /* change msdu len in hal rx desc */
3406 ath11k_dp_rxdesc_set_msdu_len(ab, rx_desc, defrag_skb->len - hal_rx_desc_sz);
3407
3408 paddr = dma_map_single(ab->dev, defrag_skb->data,
3409 defrag_skb->len + skb_tailroom(defrag_skb),
3410 DMA_TO_DEVICE);
3411 if (dma_mapping_error(ab->dev, paddr))
3412 return -ENOMEM;
3413
3414 spin_lock_bh(&rx_refill_ring->idr_lock);
3415 buf_id = idr_alloc(&rx_refill_ring->bufs_idr, defrag_skb, 0,
3416 rx_refill_ring->bufs_max * 3, GFP_ATOMIC);
3417 spin_unlock_bh(&rx_refill_ring->idr_lock);
3418 if (buf_id < 0) {
3419 ret = -ENOMEM;
3420 goto err_unmap_dma;
3421 }
3422
3423 ATH11K_SKB_RXCB(defrag_skb)->paddr = paddr;
3424 cookie = FIELD_PREP(DP_RXDMA_BUF_COOKIE_PDEV_ID, dp->mac_id) |
3425 FIELD_PREP(DP_RXDMA_BUF_COOKIE_BUF_ID, buf_id);
3426
3427 ath11k_hal_rx_buf_addr_info_set(msdu0, paddr, cookie,
3428 ab->hw_params.hal_params->rx_buf_rbm);
3429
3430 /* Fill mpdu details into reo entrance ring */
3431 srng = &ab->hal.srng_list[ab->dp.reo_reinject_ring.ring_id];
3432
3433 spin_lock_bh(&srng->lock);
3434 ath11k_hal_srng_access_begin(ab, srng);
3435
3436 reo_ent_ring = (struct hal_reo_entrance_ring *)
3437 ath11k_hal_srng_src_get_next_entry(ab, srng);
3438 if (!reo_ent_ring) {
3439 ath11k_hal_srng_access_end(ab, srng);
3440 spin_unlock_bh(&srng->lock);
3441 ret = -ENOSPC;
3442 goto err_free_idr;
3443 }
3444 memset(reo_ent_ring, 0, sizeof(*reo_ent_ring));
3445
3446 ath11k_hal_rx_reo_ent_paddr_get(ab, reo_dest_ring, &paddr, &desc_bank);
3447 ath11k_hal_rx_buf_addr_info_set(reo_ent_ring, paddr, desc_bank,
3448 HAL_RX_BUF_RBM_WBM_IDLE_DESC_LIST);
3449
3450 mpdu_info = FIELD_PREP(RX_MPDU_DESC_INFO0_MSDU_COUNT, 1) |
3451 FIELD_PREP(RX_MPDU_DESC_INFO0_SEQ_NUM, rx_tid->cur_sn) |
3452 FIELD_PREP(RX_MPDU_DESC_INFO0_FRAG_FLAG, 0) |
3453 FIELD_PREP(RX_MPDU_DESC_INFO0_VALID_SA, 1) |
3454 FIELD_PREP(RX_MPDU_DESC_INFO0_VALID_DA, 1) |
3455 FIELD_PREP(RX_MPDU_DESC_INFO0_RAW_MPDU, 1) |
3456 FIELD_PREP(RX_MPDU_DESC_INFO0_VALID_PN, 1);
3457
3458 reo_ent_ring->rx_mpdu_info.info0 = mpdu_info;
3459 reo_ent_ring->rx_mpdu_info.meta_data = reo_dest_ring->rx_mpdu_info.meta_data;
3460 reo_ent_ring->queue_addr_lo = reo_dest_ring->queue_addr_lo;
3461 reo_ent_ring->info0 = FIELD_PREP(HAL_REO_ENTR_RING_INFO0_QUEUE_ADDR_HI,
3462 FIELD_GET(HAL_REO_DEST_RING_INFO0_QUEUE_ADDR_HI,
3463 reo_dest_ring->info0)) |
3464 FIELD_PREP(HAL_REO_ENTR_RING_INFO0_DEST_IND, dst_idx);
3465 ath11k_hal_srng_access_end(ab, srng);
3466 spin_unlock_bh(&srng->lock);
3467
3468 return 0;
3469
3470err_free_idr:
3471 spin_lock_bh(&rx_refill_ring->idr_lock);
3472 idr_remove(&rx_refill_ring->bufs_idr, buf_id);
3473 spin_unlock_bh(&rx_refill_ring->idr_lock);
3474err_unmap_dma:
3475 dma_unmap_single(ab->dev, paddr, defrag_skb->len + skb_tailroom(defrag_skb),
3476 DMA_TO_DEVICE);
3477 return ret;
3478}
3479
3480static int ath11k_dp_rx_h_cmp_frags(struct ath11k *ar,
3481 struct sk_buff *a, struct sk_buff *b)
3482{
3483 int frag1, frag2;
3484
3485 frag1 = ath11k_dp_rx_h_mpdu_start_frag_no(ar->ab, a);
3486 frag2 = ath11k_dp_rx_h_mpdu_start_frag_no(ar->ab, b);
3487
3488 return frag1 - frag2;
3489}
3490
3491static void ath11k_dp_rx_h_sort_frags(struct ath11k *ar,
3492 struct sk_buff_head *frag_list,
3493 struct sk_buff *cur_frag)
3494{
3495 struct sk_buff *skb;
3496 int cmp;
3497
3498 skb_queue_walk(frag_list, skb) {
3499 cmp = ath11k_dp_rx_h_cmp_frags(ar, skb, cur_frag);
3500 if (cmp < 0)
3501 continue;
3502 __skb_queue_before(frag_list, skb, cur_frag);
3503 return;
3504 }
3505 __skb_queue_tail(frag_list, cur_frag);
3506}
3507
3508static u64 ath11k_dp_rx_h_get_pn(struct ath11k *ar, struct sk_buff *skb)
3509{
3510 struct ieee80211_hdr *hdr;
3511 u64 pn = 0;
3512 u8 *ehdr;
3513 u32 hal_rx_desc_sz = ar->ab->hw_params.hal_desc_sz;
3514
3515 hdr = (struct ieee80211_hdr *)(skb->data + hal_rx_desc_sz);
3516 ehdr = skb->data + hal_rx_desc_sz + ieee80211_hdrlen(hdr->frame_control);
3517
3518 pn = ehdr[0];
3519 pn |= (u64)ehdr[1] << 8;
3520 pn |= (u64)ehdr[4] << 16;
3521 pn |= (u64)ehdr[5] << 24;
3522 pn |= (u64)ehdr[6] << 32;
3523 pn |= (u64)ehdr[7] << 40;
3524
3525 return pn;
3526}
3527
3528static bool
3529ath11k_dp_rx_h_defrag_validate_incr_pn(struct ath11k *ar, struct dp_rx_tid *rx_tid)
3530{
3531 enum hal_encrypt_type encrypt_type;
3532 struct sk_buff *first_frag, *skb;
3533 struct hal_rx_desc *desc;
3534 u64 last_pn;
3535 u64 cur_pn;
3536
3537 first_frag = skb_peek(&rx_tid->rx_frags);
3538 desc = (struct hal_rx_desc *)first_frag->data;
3539
3540 encrypt_type = ath11k_dp_rx_h_mpdu_start_enctype(ar->ab, desc);
3541 if (encrypt_type != HAL_ENCRYPT_TYPE_CCMP_128 &&
3542 encrypt_type != HAL_ENCRYPT_TYPE_CCMP_256 &&
3543 encrypt_type != HAL_ENCRYPT_TYPE_GCMP_128 &&
3544 encrypt_type != HAL_ENCRYPT_TYPE_AES_GCMP_256)
3545 return true;
3546
3547 last_pn = ath11k_dp_rx_h_get_pn(ar, first_frag);
3548 skb_queue_walk(&rx_tid->rx_frags, skb) {
3549 if (skb == first_frag)
3550 continue;
3551
3552 cur_pn = ath11k_dp_rx_h_get_pn(ar, skb);
3553 if (cur_pn != last_pn + 1)
3554 return false;
3555 last_pn = cur_pn;
3556 }
3557 return true;
3558}
3559
3560static int ath11k_dp_rx_frag_h_mpdu(struct ath11k *ar,
3561 struct sk_buff *msdu,
3562 u32 *ring_desc)
3563{
3564 struct ath11k_base *ab = ar->ab;
3565 struct hal_rx_desc *rx_desc;
3566 struct ath11k_peer *peer;
3567 struct dp_rx_tid *rx_tid;
3568 struct sk_buff *defrag_skb = NULL;
3569 u32 peer_id;
3570 u16 seqno, frag_no;
3571 u8 tid;
3572 int ret = 0;
3573 bool more_frags;
3574 bool is_mcbc;
3575
3576 rx_desc = (struct hal_rx_desc *)msdu->data;
3577 peer_id = ath11k_dp_rx_h_mpdu_start_peer_id(ar->ab, rx_desc);
3578 tid = ath11k_dp_rx_h_mpdu_start_tid(ar->ab, rx_desc);
3579 seqno = ath11k_dp_rx_h_mpdu_start_seq_no(ar->ab, rx_desc);
3580 frag_no = ath11k_dp_rx_h_mpdu_start_frag_no(ar->ab, msdu);
3581 more_frags = ath11k_dp_rx_h_mpdu_start_more_frags(ar->ab, msdu);
3582 is_mcbc = ath11k_dp_rx_h_attn_is_mcbc(ar->ab, rx_desc);
3583
3584 /* Multicast/Broadcast fragments are not expected */
3585 if (is_mcbc)
3586 return -EINVAL;
3587
3588 if (!ath11k_dp_rx_h_mpdu_start_seq_ctrl_valid(ar->ab, rx_desc) ||
3589 !ath11k_dp_rx_h_mpdu_start_fc_valid(ar->ab, rx_desc) ||
3590 tid > IEEE80211_NUM_TIDS)
3591 return -EINVAL;
3592
3593 /* received unfragmented packet in reo
3594 * exception ring, this shouldn't happen
3595 * as these packets typically come from
3596 * reo2sw srngs.
3597 */
3598 if (WARN_ON_ONCE(!frag_no && !more_frags))
3599 return -EINVAL;
3600
3601 spin_lock_bh(&ab->base_lock);
3602 peer = ath11k_peer_find_by_id(ab, peer_id);
3603 if (!peer) {
3604 ath11k_warn(ab, "failed to find the peer to de-fragment received fragment peer_id %d\n",
3605 peer_id);
3606 ret = -ENOENT;
3607 goto out_unlock;
3608 }
3609 if (!peer->dp_setup_done) {
3610 ath11k_warn(ab, "The peer %pM [%d] has uninitialized datapath\n",
3611 peer->addr, peer_id);
3612 ret = -ENOENT;
3613 goto out_unlock;
3614 }
3615
3616 rx_tid = &peer->rx_tid[tid];
3617
3618 if ((!skb_queue_empty(&rx_tid->rx_frags) && seqno != rx_tid->cur_sn) ||
3619 skb_queue_empty(&rx_tid->rx_frags)) {
3620 /* Flush stored fragments and start a new sequence */
3621 ath11k_dp_rx_frags_cleanup(rx_tid, true);
3622 rx_tid->cur_sn = seqno;
3623 }
3624
3625 if (rx_tid->rx_frag_bitmap & BIT(frag_no)) {
3626 /* Fragment already present */
3627 ret = -EINVAL;
3628 goto out_unlock;
3629 }
3630
3631 if (!rx_tid->rx_frag_bitmap || (frag_no > __fls(rx_tid->rx_frag_bitmap)))
3632 __skb_queue_tail(&rx_tid->rx_frags, msdu);
3633 else
3634 ath11k_dp_rx_h_sort_frags(ar, &rx_tid->rx_frags, msdu);
3635
3636 rx_tid->rx_frag_bitmap |= BIT(frag_no);
3637 if (!more_frags)
3638 rx_tid->last_frag_no = frag_no;
3639
3640 if (frag_no == 0) {
3641 rx_tid->dst_ring_desc = kmemdup(ring_desc,
3642 sizeof(*rx_tid->dst_ring_desc),
3643 GFP_ATOMIC);
3644 if (!rx_tid->dst_ring_desc) {
3645 ret = -ENOMEM;
3646 goto out_unlock;
3647 }
3648 } else {
3649 ath11k_dp_rx_link_desc_return(ab, ring_desc,
3650 HAL_WBM_REL_BM_ACT_PUT_IN_IDLE);
3651 }
3652
3653 if (!rx_tid->last_frag_no ||
3654 rx_tid->rx_frag_bitmap != GENMASK(rx_tid->last_frag_no, 0)) {
3655 mod_timer(&rx_tid->frag_timer, jiffies +
3656 ATH11K_DP_RX_FRAGMENT_TIMEOUT_MS);
3657 goto out_unlock;
3658 }
3659
3660 spin_unlock_bh(&ab->base_lock);
3661 del_timer_sync(&rx_tid->frag_timer);
3662 spin_lock_bh(&ab->base_lock);
3663
3664 peer = ath11k_peer_find_by_id(ab, peer_id);
3665 if (!peer)
3666 goto err_frags_cleanup;
3667
3668 if (!ath11k_dp_rx_h_defrag_validate_incr_pn(ar, rx_tid))
3669 goto err_frags_cleanup;
3670
3671 if (ath11k_dp_rx_h_defrag(ar, peer, rx_tid, &defrag_skb))
3672 goto err_frags_cleanup;
3673
3674 if (!defrag_skb)
3675 goto err_frags_cleanup;
3676
3677 if (ath11k_dp_rx_h_defrag_reo_reinject(ar, rx_tid, defrag_skb))
3678 goto err_frags_cleanup;
3679
3680 ath11k_dp_rx_frags_cleanup(rx_tid, false);
3681 goto out_unlock;
3682
3683err_frags_cleanup:
3684 dev_kfree_skb_any(defrag_skb);
3685 ath11k_dp_rx_frags_cleanup(rx_tid, true);
3686out_unlock:
3687 spin_unlock_bh(&ab->base_lock);
3688 return ret;
3689}
3690
3691static int
3692ath11k_dp_process_rx_err_buf(struct ath11k *ar, u32 *ring_desc, int buf_id, bool drop)
3693{
3694 struct ath11k_pdev_dp *dp = &ar->dp;
3695 struct dp_rxdma_ring *rx_ring = &dp->rx_refill_buf_ring;
3696 struct sk_buff *msdu;
3697 struct ath11k_skb_rxcb *rxcb;
3698 struct hal_rx_desc *rx_desc;
3699 u8 *hdr_status;
3700 u16 msdu_len;
3701 u32 hal_rx_desc_sz = ar->ab->hw_params.hal_desc_sz;
3702
3703 spin_lock_bh(&rx_ring->idr_lock);
3704 msdu = idr_find(&rx_ring->bufs_idr, buf_id);
3705 if (!msdu) {
3706 ath11k_warn(ar->ab, "rx err buf with invalid buf_id %d\n",
3707 buf_id);
3708 spin_unlock_bh(&rx_ring->idr_lock);
3709 return -EINVAL;
3710 }
3711
3712 idr_remove(&rx_ring->bufs_idr, buf_id);
3713 spin_unlock_bh(&rx_ring->idr_lock);
3714
3715 rxcb = ATH11K_SKB_RXCB(msdu);
3716 dma_unmap_single(ar->ab->dev, rxcb->paddr,
3717 msdu->len + skb_tailroom(msdu),
3718 DMA_FROM_DEVICE);
3719
3720 if (drop) {
3721 dev_kfree_skb_any(msdu);
3722 return 0;
3723 }
3724
3725 rcu_read_lock();
3726 if (!rcu_dereference(ar->ab->pdevs_active[ar->pdev_idx])) {
3727 dev_kfree_skb_any(msdu);
3728 goto exit;
3729 }
3730
3731 if (test_bit(ATH11K_CAC_RUNNING, &ar->dev_flags)) {
3732 dev_kfree_skb_any(msdu);
3733 goto exit;
3734 }
3735
3736 rx_desc = (struct hal_rx_desc *)msdu->data;
3737 msdu_len = ath11k_dp_rx_h_msdu_start_msdu_len(ar->ab, rx_desc);
3738 if ((msdu_len + hal_rx_desc_sz) > DP_RX_BUFFER_SIZE) {
3739 hdr_status = ath11k_dp_rx_h_80211_hdr(ar->ab, rx_desc);
3740 ath11k_warn(ar->ab, "invalid msdu leng %u", msdu_len);
3741 ath11k_dbg_dump(ar->ab, ATH11K_DBG_DATA, NULL, "", hdr_status,
3742 sizeof(struct ieee80211_hdr));
3743 ath11k_dbg_dump(ar->ab, ATH11K_DBG_DATA, NULL, "", rx_desc,
3744 sizeof(struct hal_rx_desc));
3745 dev_kfree_skb_any(msdu);
3746 goto exit;
3747 }
3748
3749 skb_put(msdu, hal_rx_desc_sz + msdu_len);
3750
3751 if (ath11k_dp_rx_frag_h_mpdu(ar, msdu, ring_desc)) {
3752 dev_kfree_skb_any(msdu);
3753 ath11k_dp_rx_link_desc_return(ar->ab, ring_desc,
3754 HAL_WBM_REL_BM_ACT_PUT_IN_IDLE);
3755 }
3756exit:
3757 rcu_read_unlock();
3758 return 0;
3759}
3760
3761int ath11k_dp_process_rx_err(struct ath11k_base *ab, struct napi_struct *napi,
3762 int budget)
3763{
3764 u32 msdu_cookies[HAL_NUM_RX_MSDUS_PER_LINK_DESC];
3765 struct dp_link_desc_bank *link_desc_banks;
3766 enum hal_rx_buf_return_buf_manager rbm;
3767 int tot_n_bufs_reaped, quota, ret, i;
3768 int n_bufs_reaped[MAX_RADIOS] = {0};
3769 struct dp_rxdma_ring *rx_ring;
3770 struct dp_srng *reo_except;
3771 u32 desc_bank, num_msdus;
3772 struct hal_srng *srng;
3773 struct ath11k_dp *dp;
3774 void *link_desc_va;
3775 int buf_id, mac_id;
3776 struct ath11k *ar;
3777 dma_addr_t paddr;
3778 u32 *desc;
3779 bool is_frag;
3780 u8 drop = 0;
3781
3782 tot_n_bufs_reaped = 0;
3783 quota = budget;
3784
3785 dp = &ab->dp;
3786 reo_except = &dp->reo_except_ring;
3787 link_desc_banks = dp->link_desc_banks;
3788
3789 srng = &ab->hal.srng_list[reo_except->ring_id];
3790
3791 spin_lock_bh(&srng->lock);
3792
3793 ath11k_hal_srng_access_begin(ab, srng);
3794
3795 while (budget &&
3796 (desc = ath11k_hal_srng_dst_get_next_entry(ab, srng))) {
3797 struct hal_reo_dest_ring *reo_desc = (struct hal_reo_dest_ring *)desc;
3798
3799 ab->soc_stats.err_ring_pkts++;
3800 ret = ath11k_hal_desc_reo_parse_err(ab, desc, &paddr,
3801 &desc_bank);
3802 if (ret) {
3803 ath11k_warn(ab, "failed to parse error reo desc %d\n",
3804 ret);
3805 continue;
3806 }
3807 link_desc_va = link_desc_banks[desc_bank].vaddr +
3808 (paddr - link_desc_banks[desc_bank].paddr);
3809 ath11k_hal_rx_msdu_link_info_get(link_desc_va, &num_msdus, msdu_cookies,
3810 &rbm);
3811 if (rbm != HAL_RX_BUF_RBM_WBM_IDLE_DESC_LIST &&
3812 rbm != HAL_RX_BUF_RBM_SW3_BM) {
3813 ab->soc_stats.invalid_rbm++;
3814 ath11k_warn(ab, "invalid return buffer manager %d\n", rbm);
3815 ath11k_dp_rx_link_desc_return(ab, desc,
3816 HAL_WBM_REL_BM_ACT_REL_MSDU);
3817 continue;
3818 }
3819
3820 is_frag = !!(reo_desc->rx_mpdu_info.info0 & RX_MPDU_DESC_INFO0_FRAG_FLAG);
3821
3822 /* Process only rx fragments with one msdu per link desc below, and drop
3823 * msdu's indicated due to error reasons.
3824 */
3825 if (!is_frag || num_msdus > 1) {
3826 drop = 1;
3827 /* Return the link desc back to wbm idle list */
3828 ath11k_dp_rx_link_desc_return(ab, desc,
3829 HAL_WBM_REL_BM_ACT_PUT_IN_IDLE);
3830 }
3831
3832 for (i = 0; i < num_msdus; i++) {
3833 buf_id = FIELD_GET(DP_RXDMA_BUF_COOKIE_BUF_ID,
3834 msdu_cookies[i]);
3835
3836 mac_id = FIELD_GET(DP_RXDMA_BUF_COOKIE_PDEV_ID,
3837 msdu_cookies[i]);
3838
3839 ar = ab->pdevs[mac_id].ar;
3840
3841 if (!ath11k_dp_process_rx_err_buf(ar, desc, buf_id, drop)) {
3842 n_bufs_reaped[mac_id]++;
3843 tot_n_bufs_reaped++;
3844 }
3845 }
3846
3847 if (tot_n_bufs_reaped >= quota) {
3848 tot_n_bufs_reaped = quota;
3849 goto exit;
3850 }
3851
3852 budget = quota - tot_n_bufs_reaped;
3853 }
3854
3855exit:
3856 ath11k_hal_srng_access_end(ab, srng);
3857
3858 spin_unlock_bh(&srng->lock);
3859
3860 for (i = 0; i < ab->num_radios; i++) {
3861 if (!n_bufs_reaped[i])
3862 continue;
3863
3864 ar = ab->pdevs[i].ar;
3865 rx_ring = &ar->dp.rx_refill_buf_ring;
3866
3867 ath11k_dp_rxbufs_replenish(ab, i, rx_ring, n_bufs_reaped[i],
3868 ab->hw_params.hal_params->rx_buf_rbm);
3869 }
3870
3871 return tot_n_bufs_reaped;
3872}
3873
3874static void ath11k_dp_rx_null_q_desc_sg_drop(struct ath11k *ar,
3875 int msdu_len,
3876 struct sk_buff_head *msdu_list)
3877{
3878 struct sk_buff *skb, *tmp;
3879 struct ath11k_skb_rxcb *rxcb;
3880 int n_buffs;
3881
3882 n_buffs = DIV_ROUND_UP(msdu_len,
3883 (DP_RX_BUFFER_SIZE - ar->ab->hw_params.hal_desc_sz));
3884
3885 skb_queue_walk_safe(msdu_list, skb, tmp) {
3886 rxcb = ATH11K_SKB_RXCB(skb);
3887 if (rxcb->err_rel_src == HAL_WBM_REL_SRC_MODULE_REO &&
3888 rxcb->err_code == HAL_REO_DEST_RING_ERROR_CODE_DESC_ADDR_ZERO) {
3889 if (!n_buffs)
3890 break;
3891 __skb_unlink(skb, msdu_list);
3892 dev_kfree_skb_any(skb);
3893 n_buffs--;
3894 }
3895 }
3896}
3897
3898static int ath11k_dp_rx_h_null_q_desc(struct ath11k *ar, struct sk_buff *msdu,
3899 struct ieee80211_rx_status *status,
3900 struct sk_buff_head *msdu_list)
3901{
3902 u16 msdu_len;
3903 struct hal_rx_desc *desc = (struct hal_rx_desc *)msdu->data;
3904 struct rx_attention *rx_attention;
3905 u8 l3pad_bytes;
3906 struct ath11k_skb_rxcb *rxcb = ATH11K_SKB_RXCB(msdu);
3907 u32 hal_rx_desc_sz = ar->ab->hw_params.hal_desc_sz;
3908
3909 msdu_len = ath11k_dp_rx_h_msdu_start_msdu_len(ar->ab, desc);
3910
3911 if (!rxcb->is_frag && ((msdu_len + hal_rx_desc_sz) > DP_RX_BUFFER_SIZE)) {
3912 /* First buffer will be freed by the caller, so deduct it's length */
3913 msdu_len = msdu_len - (DP_RX_BUFFER_SIZE - hal_rx_desc_sz);
3914 ath11k_dp_rx_null_q_desc_sg_drop(ar, msdu_len, msdu_list);
3915 return -EINVAL;
3916 }
3917
3918 rx_attention = ath11k_dp_rx_get_attention(ar->ab, desc);
3919 if (!ath11k_dp_rx_h_attn_msdu_done(rx_attention)) {
3920 ath11k_warn(ar->ab,
3921 "msdu_done bit not set in null_q_des processing\n");
3922 __skb_queue_purge(msdu_list);
3923 return -EIO;
3924 }
3925
3926 /* Handle NULL queue descriptor violations arising out a missing
3927 * REO queue for a given peer or a given TID. This typically
3928 * may happen if a packet is received on a QOS enabled TID before the
3929 * ADDBA negotiation for that TID, when the TID queue is setup. Or
3930 * it may also happen for MC/BC frames if they are not routed to the
3931 * non-QOS TID queue, in the absence of any other default TID queue.
3932 * This error can show up both in a REO destination or WBM release ring.
3933 */
3934
3935 rxcb->is_first_msdu = ath11k_dp_rx_h_msdu_end_first_msdu(ar->ab, desc);
3936 rxcb->is_last_msdu = ath11k_dp_rx_h_msdu_end_last_msdu(ar->ab, desc);
3937
3938 if (rxcb->is_frag) {
3939 skb_pull(msdu, hal_rx_desc_sz);
3940 } else {
3941 l3pad_bytes = ath11k_dp_rx_h_msdu_end_l3pad(ar->ab, desc);
3942
3943 if ((hal_rx_desc_sz + l3pad_bytes + msdu_len) > DP_RX_BUFFER_SIZE)
3944 return -EINVAL;
3945
3946 skb_put(msdu, hal_rx_desc_sz + l3pad_bytes + msdu_len);
3947 skb_pull(msdu, hal_rx_desc_sz + l3pad_bytes);
3948 }
3949 ath11k_dp_rx_h_ppdu(ar, desc, status);
3950
3951 ath11k_dp_rx_h_mpdu(ar, msdu, desc, status);
3952
3953 rxcb->tid = ath11k_dp_rx_h_mpdu_start_tid(ar->ab, desc);
3954
3955 /* Please note that caller will having the access to msdu and completing
3956 * rx with mac80211. Need not worry about cleaning up amsdu_list.
3957 */
3958
3959 return 0;
3960}
3961
3962static bool ath11k_dp_rx_h_reo_err(struct ath11k *ar, struct sk_buff *msdu,
3963 struct ieee80211_rx_status *status,
3964 struct sk_buff_head *msdu_list)
3965{
3966 struct ath11k_skb_rxcb *rxcb = ATH11K_SKB_RXCB(msdu);
3967 bool drop = false;
3968
3969 ar->ab->soc_stats.reo_error[rxcb->err_code]++;
3970
3971 switch (rxcb->err_code) {
3972 case HAL_REO_DEST_RING_ERROR_CODE_DESC_ADDR_ZERO:
3973 if (ath11k_dp_rx_h_null_q_desc(ar, msdu, status, msdu_list))
3974 drop = true;
3975 break;
3976 case HAL_REO_DEST_RING_ERROR_CODE_PN_CHECK_FAILED:
3977 /* TODO: Do not drop PN failed packets in the driver;
3978 * instead, it is good to drop such packets in mac80211
3979 * after incrementing the replay counters.
3980 */
3981 fallthrough;
3982 default:
3983 /* TODO: Review other errors and process them to mac80211
3984 * as appropriate.
3985 */
3986 drop = true;
3987 break;
3988 }
3989
3990 return drop;
3991}
3992
3993static void ath11k_dp_rx_h_tkip_mic_err(struct ath11k *ar, struct sk_buff *msdu,
3994 struct ieee80211_rx_status *status)
3995{
3996 u16 msdu_len;
3997 struct hal_rx_desc *desc = (struct hal_rx_desc *)msdu->data;
3998 u8 l3pad_bytes;
3999 struct ath11k_skb_rxcb *rxcb = ATH11K_SKB_RXCB(msdu);
4000 u32 hal_rx_desc_sz = ar->ab->hw_params.hal_desc_sz;
4001
4002 rxcb->is_first_msdu = ath11k_dp_rx_h_msdu_end_first_msdu(ar->ab, desc);
4003 rxcb->is_last_msdu = ath11k_dp_rx_h_msdu_end_last_msdu(ar->ab, desc);
4004
4005 l3pad_bytes = ath11k_dp_rx_h_msdu_end_l3pad(ar->ab, desc);
4006 msdu_len = ath11k_dp_rx_h_msdu_start_msdu_len(ar->ab, desc);
4007 skb_put(msdu, hal_rx_desc_sz + l3pad_bytes + msdu_len);
4008 skb_pull(msdu, hal_rx_desc_sz + l3pad_bytes);
4009
4010 ath11k_dp_rx_h_ppdu(ar, desc, status);
4011
4012 status->flag |= (RX_FLAG_MMIC_STRIPPED | RX_FLAG_MMIC_ERROR |
4013 RX_FLAG_DECRYPTED);
4014
4015 ath11k_dp_rx_h_undecap(ar, msdu, desc,
4016 HAL_ENCRYPT_TYPE_TKIP_MIC, status, false);
4017}
4018
4019static bool ath11k_dp_rx_h_rxdma_err(struct ath11k *ar, struct sk_buff *msdu,
4020 struct ieee80211_rx_status *status)
4021{
4022 struct ath11k_skb_rxcb *rxcb = ATH11K_SKB_RXCB(msdu);
4023 bool drop = false;
4024
4025 ar->ab->soc_stats.rxdma_error[rxcb->err_code]++;
4026
4027 switch (rxcb->err_code) {
4028 case HAL_REO_ENTR_RING_RXDMA_ECODE_TKIP_MIC_ERR:
4029 ath11k_dp_rx_h_tkip_mic_err(ar, msdu, status);
4030 break;
4031 default:
4032 /* TODO: Review other rxdma error code to check if anything is
4033 * worth reporting to mac80211
4034 */
4035 drop = true;
4036 break;
4037 }
4038
4039 return drop;
4040}
4041
4042static void ath11k_dp_rx_wbm_err(struct ath11k *ar,
4043 struct napi_struct *napi,
4044 struct sk_buff *msdu,
4045 struct sk_buff_head *msdu_list)
4046{
4047 struct ath11k_skb_rxcb *rxcb = ATH11K_SKB_RXCB(msdu);
4048 struct ieee80211_rx_status rxs = {0};
4049 bool drop = true;
4050
4051 switch (rxcb->err_rel_src) {
4052 case HAL_WBM_REL_SRC_MODULE_REO:
4053 drop = ath11k_dp_rx_h_reo_err(ar, msdu, &rxs, msdu_list);
4054 break;
4055 case HAL_WBM_REL_SRC_MODULE_RXDMA:
4056 drop = ath11k_dp_rx_h_rxdma_err(ar, msdu, &rxs);
4057 break;
4058 default:
4059 /* msdu will get freed */
4060 break;
4061 }
4062
4063 if (drop) {
4064 dev_kfree_skb_any(msdu);
4065 return;
4066 }
4067
4068 ath11k_dp_rx_deliver_msdu(ar, napi, msdu, &rxs);
4069}
4070
4071int ath11k_dp_rx_process_wbm_err(struct ath11k_base *ab,
4072 struct napi_struct *napi, int budget)
4073{
4074 struct ath11k *ar;
4075 struct ath11k_dp *dp = &ab->dp;
4076 struct dp_rxdma_ring *rx_ring;
4077 struct hal_rx_wbm_rel_info err_info;
4078 struct hal_srng *srng;
4079 struct sk_buff *msdu;
4080 struct sk_buff_head msdu_list[MAX_RADIOS];
4081 struct ath11k_skb_rxcb *rxcb;
4082 u32 *rx_desc;
4083 int buf_id, mac_id;
4084 int num_buffs_reaped[MAX_RADIOS] = {0};
4085 int total_num_buffs_reaped = 0;
4086 int ret, i;
4087
4088 for (i = 0; i < ab->num_radios; i++)
4089 __skb_queue_head_init(&msdu_list[i]);
4090
4091 srng = &ab->hal.srng_list[dp->rx_rel_ring.ring_id];
4092
4093 spin_lock_bh(&srng->lock);
4094
4095 ath11k_hal_srng_access_begin(ab, srng);
4096
4097 while (budget) {
4098 rx_desc = ath11k_hal_srng_dst_get_next_entry(ab, srng);
4099 if (!rx_desc)
4100 break;
4101
4102 ret = ath11k_hal_wbm_desc_parse_err(ab, rx_desc, &err_info);
4103 if (ret) {
4104 ath11k_warn(ab,
4105 "failed to parse rx error in wbm_rel ring desc %d\n",
4106 ret);
4107 continue;
4108 }
4109
4110 buf_id = FIELD_GET(DP_RXDMA_BUF_COOKIE_BUF_ID, err_info.cookie);
4111 mac_id = FIELD_GET(DP_RXDMA_BUF_COOKIE_PDEV_ID, err_info.cookie);
4112
4113 ar = ab->pdevs[mac_id].ar;
4114 rx_ring = &ar->dp.rx_refill_buf_ring;
4115
4116 spin_lock_bh(&rx_ring->idr_lock);
4117 msdu = idr_find(&rx_ring->bufs_idr, buf_id);
4118 if (!msdu) {
4119 ath11k_warn(ab, "frame rx with invalid buf_id %d pdev %d\n",
4120 buf_id, mac_id);
4121 spin_unlock_bh(&rx_ring->idr_lock);
4122 continue;
4123 }
4124
4125 idr_remove(&rx_ring->bufs_idr, buf_id);
4126 spin_unlock_bh(&rx_ring->idr_lock);
4127
4128 rxcb = ATH11K_SKB_RXCB(msdu);
4129 dma_unmap_single(ab->dev, rxcb->paddr,
4130 msdu->len + skb_tailroom(msdu),
4131 DMA_FROM_DEVICE);
4132
4133 num_buffs_reaped[mac_id]++;
4134 total_num_buffs_reaped++;
4135 budget--;
4136
4137 if (err_info.push_reason !=
4138 HAL_REO_DEST_RING_PUSH_REASON_ERR_DETECTED) {
4139 dev_kfree_skb_any(msdu);
4140 continue;
4141 }
4142
4143 rxcb->err_rel_src = err_info.err_rel_src;
4144 rxcb->err_code = err_info.err_code;
4145 rxcb->rx_desc = (struct hal_rx_desc *)msdu->data;
4146 __skb_queue_tail(&msdu_list[mac_id], msdu);
4147 }
4148
4149 ath11k_hal_srng_access_end(ab, srng);
4150
4151 spin_unlock_bh(&srng->lock);
4152
4153 if (!total_num_buffs_reaped)
4154 goto done;
4155
4156 for (i = 0; i < ab->num_radios; i++) {
4157 if (!num_buffs_reaped[i])
4158 continue;
4159
4160 ar = ab->pdevs[i].ar;
4161 rx_ring = &ar->dp.rx_refill_buf_ring;
4162
4163 ath11k_dp_rxbufs_replenish(ab, i, rx_ring, num_buffs_reaped[i],
4164 ab->hw_params.hal_params->rx_buf_rbm);
4165 }
4166
4167 rcu_read_lock();
4168 for (i = 0; i < ab->num_radios; i++) {
4169 if (!rcu_dereference(ab->pdevs_active[i])) {
4170 __skb_queue_purge(&msdu_list[i]);
4171 continue;
4172 }
4173
4174 ar = ab->pdevs[i].ar;
4175
4176 if (test_bit(ATH11K_CAC_RUNNING, &ar->dev_flags)) {
4177 __skb_queue_purge(&msdu_list[i]);
4178 continue;
4179 }
4180
4181 while ((msdu = __skb_dequeue(&msdu_list[i])) != NULL)
4182 ath11k_dp_rx_wbm_err(ar, napi, msdu, &msdu_list[i]);
4183 }
4184 rcu_read_unlock();
4185done:
4186 return total_num_buffs_reaped;
4187}
4188
4189int ath11k_dp_process_rxdma_err(struct ath11k_base *ab, int mac_id, int budget)
4190{
4191 struct ath11k *ar;
4192 struct dp_srng *err_ring;
4193 struct dp_rxdma_ring *rx_ring;
4194 struct dp_link_desc_bank *link_desc_banks = ab->dp.link_desc_banks;
4195 struct hal_srng *srng;
4196 u32 msdu_cookies[HAL_NUM_RX_MSDUS_PER_LINK_DESC];
4197 enum hal_rx_buf_return_buf_manager rbm;
4198 enum hal_reo_entr_rxdma_ecode rxdma_err_code;
4199 struct ath11k_skb_rxcb *rxcb;
4200 struct sk_buff *skb;
4201 struct hal_reo_entrance_ring *entr_ring;
4202 void *desc;
4203 int num_buf_freed = 0;
4204 int quota = budget;
4205 dma_addr_t paddr;
4206 u32 desc_bank;
4207 void *link_desc_va;
4208 int num_msdus;
4209 int i;
4210 int buf_id;
4211
4212 ar = ab->pdevs[ath11k_hw_mac_id_to_pdev_id(&ab->hw_params, mac_id)].ar;
4213 err_ring = &ar->dp.rxdma_err_dst_ring[ath11k_hw_mac_id_to_srng_id(&ab->hw_params,
4214 mac_id)];
4215 rx_ring = &ar->dp.rx_refill_buf_ring;
4216
4217 srng = &ab->hal.srng_list[err_ring->ring_id];
4218
4219 spin_lock_bh(&srng->lock);
4220
4221 ath11k_hal_srng_access_begin(ab, srng);
4222
4223 while (quota-- &&
4224 (desc = ath11k_hal_srng_dst_get_next_entry(ab, srng))) {
4225 ath11k_hal_rx_reo_ent_paddr_get(ab, desc, &paddr, &desc_bank);
4226
4227 entr_ring = (struct hal_reo_entrance_ring *)desc;
4228 rxdma_err_code =
4229 FIELD_GET(HAL_REO_ENTR_RING_INFO1_RXDMA_ERROR_CODE,
4230 entr_ring->info1);
4231 ab->soc_stats.rxdma_error[rxdma_err_code]++;
4232
4233 link_desc_va = link_desc_banks[desc_bank].vaddr +
4234 (paddr - link_desc_banks[desc_bank].paddr);
4235 ath11k_hal_rx_msdu_link_info_get(link_desc_va, &num_msdus,
4236 msdu_cookies, &rbm);
4237
4238 for (i = 0; i < num_msdus; i++) {
4239 buf_id = FIELD_GET(DP_RXDMA_BUF_COOKIE_BUF_ID,
4240 msdu_cookies[i]);
4241
4242 spin_lock_bh(&rx_ring->idr_lock);
4243 skb = idr_find(&rx_ring->bufs_idr, buf_id);
4244 if (!skb) {
4245 ath11k_warn(ab, "rxdma error with invalid buf_id %d\n",
4246 buf_id);
4247 spin_unlock_bh(&rx_ring->idr_lock);
4248 continue;
4249 }
4250
4251 idr_remove(&rx_ring->bufs_idr, buf_id);
4252 spin_unlock_bh(&rx_ring->idr_lock);
4253
4254 rxcb = ATH11K_SKB_RXCB(skb);
4255 dma_unmap_single(ab->dev, rxcb->paddr,
4256 skb->len + skb_tailroom(skb),
4257 DMA_FROM_DEVICE);
4258 dev_kfree_skb_any(skb);
4259
4260 num_buf_freed++;
4261 }
4262
4263 ath11k_dp_rx_link_desc_return(ab, desc,
4264 HAL_WBM_REL_BM_ACT_PUT_IN_IDLE);
4265 }
4266
4267 ath11k_hal_srng_access_end(ab, srng);
4268
4269 spin_unlock_bh(&srng->lock);
4270
4271 if (num_buf_freed)
4272 ath11k_dp_rxbufs_replenish(ab, mac_id, rx_ring, num_buf_freed,
4273 ab->hw_params.hal_params->rx_buf_rbm);
4274
4275 return budget - quota;
4276}
4277
4278void ath11k_dp_process_reo_status(struct ath11k_base *ab)
4279{
4280 struct ath11k_dp *dp = &ab->dp;
4281 struct hal_srng *srng;
4282 struct dp_reo_cmd *cmd, *tmp;
4283 bool found = false;
4284 u32 *reo_desc;
4285 u16 tag;
4286 struct hal_reo_status reo_status;
4287
4288 srng = &ab->hal.srng_list[dp->reo_status_ring.ring_id];
4289
4290 memset(&reo_status, 0, sizeof(reo_status));
4291
4292 spin_lock_bh(&srng->lock);
4293
4294 ath11k_hal_srng_access_begin(ab, srng);
4295
4296 while ((reo_desc = ath11k_hal_srng_dst_get_next_entry(ab, srng))) {
4297 tag = FIELD_GET(HAL_SRNG_TLV_HDR_TAG, *reo_desc);
4298
4299 switch (tag) {
4300 case HAL_REO_GET_QUEUE_STATS_STATUS:
4301 ath11k_hal_reo_status_queue_stats(ab, reo_desc,
4302 &reo_status);
4303 break;
4304 case HAL_REO_FLUSH_QUEUE_STATUS:
4305 ath11k_hal_reo_flush_queue_status(ab, reo_desc,
4306 &reo_status);
4307 break;
4308 case HAL_REO_FLUSH_CACHE_STATUS:
4309 ath11k_hal_reo_flush_cache_status(ab, reo_desc,
4310 &reo_status);
4311 break;
4312 case HAL_REO_UNBLOCK_CACHE_STATUS:
4313 ath11k_hal_reo_unblk_cache_status(ab, reo_desc,
4314 &reo_status);
4315 break;
4316 case HAL_REO_FLUSH_TIMEOUT_LIST_STATUS:
4317 ath11k_hal_reo_flush_timeout_list_status(ab, reo_desc,
4318 &reo_status);
4319 break;
4320 case HAL_REO_DESCRIPTOR_THRESHOLD_REACHED_STATUS:
4321 ath11k_hal_reo_desc_thresh_reached_status(ab, reo_desc,
4322 &reo_status);
4323 break;
4324 case HAL_REO_UPDATE_RX_REO_QUEUE_STATUS:
4325 ath11k_hal_reo_update_rx_reo_queue_status(ab, reo_desc,
4326 &reo_status);
4327 break;
4328 default:
4329 ath11k_warn(ab, "Unknown reo status type %d\n", tag);
4330 continue;
4331 }
4332
4333 spin_lock_bh(&dp->reo_cmd_lock);
4334 list_for_each_entry_safe(cmd, tmp, &dp->reo_cmd_list, list) {
4335 if (reo_status.uniform_hdr.cmd_num == cmd->cmd_num) {
4336 found = true;
4337 list_del(&cmd->list);
4338 break;
4339 }
4340 }
4341 spin_unlock_bh(&dp->reo_cmd_lock);
4342
4343 if (found) {
4344 cmd->handler(dp, (void *)&cmd->data,
4345 reo_status.uniform_hdr.cmd_status);
4346 kfree(cmd);
4347 }
4348
4349 found = false;
4350 }
4351
4352 ath11k_hal_srng_access_end(ab, srng);
4353
4354 spin_unlock_bh(&srng->lock);
4355}
4356
4357void ath11k_dp_rx_pdev_free(struct ath11k_base *ab, int mac_id)
4358{
4359 struct ath11k *ar = ab->pdevs[mac_id].ar;
4360
4361 ath11k_dp_rx_pdev_srng_free(ar);
4362 ath11k_dp_rxdma_pdev_buf_free(ar);
4363}
4364
4365int ath11k_dp_rx_pdev_alloc(struct ath11k_base *ab, int mac_id)
4366{
4367 struct ath11k *ar = ab->pdevs[mac_id].ar;
4368 struct ath11k_pdev_dp *dp = &ar->dp;
4369 u32 ring_id;
4370 int i;
4371 int ret;
4372
4373 ret = ath11k_dp_rx_pdev_srng_alloc(ar);
4374 if (ret) {
4375 ath11k_warn(ab, "failed to setup rx srngs\n");
4376 return ret;
4377 }
4378
4379 ret = ath11k_dp_rxdma_pdev_buf_setup(ar);
4380 if (ret) {
4381 ath11k_warn(ab, "failed to setup rxdma ring\n");
4382 return ret;
4383 }
4384
4385 ring_id = dp->rx_refill_buf_ring.refill_buf_ring.ring_id;
4386 ret = ath11k_dp_tx_htt_srng_setup(ab, ring_id, mac_id, HAL_RXDMA_BUF);
4387 if (ret) {
4388 ath11k_warn(ab, "failed to configure rx_refill_buf_ring %d\n",
4389 ret);
4390 return ret;
4391 }
4392
4393 if (ab->hw_params.rx_mac_buf_ring) {
4394 for (i = 0; i < ab->hw_params.num_rxmda_per_pdev; i++) {
4395 ring_id = dp->rx_mac_buf_ring[i].ring_id;
4396 ret = ath11k_dp_tx_htt_srng_setup(ab, ring_id,
4397 mac_id + i, HAL_RXDMA_BUF);
4398 if (ret) {
4399 ath11k_warn(ab, "failed to configure rx_mac_buf_ring%d %d\n",
4400 i, ret);
4401 return ret;
4402 }
4403 }
4404 }
4405
4406 for (i = 0; i < ab->hw_params.num_rxmda_per_pdev; i++) {
4407 ring_id = dp->rxdma_err_dst_ring[i].ring_id;
4408 ret = ath11k_dp_tx_htt_srng_setup(ab, ring_id,
4409 mac_id + i, HAL_RXDMA_DST);
4410 if (ret) {
4411 ath11k_warn(ab, "failed to configure rxdma_err_dest_ring%d %d\n",
4412 i, ret);
4413 return ret;
4414 }
4415 }
4416
4417 if (!ab->hw_params.rxdma1_enable)
4418 goto config_refill_ring;
4419
4420 ring_id = dp->rxdma_mon_buf_ring.refill_buf_ring.ring_id;
4421 ret = ath11k_dp_tx_htt_srng_setup(ab, ring_id,
4422 mac_id, HAL_RXDMA_MONITOR_BUF);
4423 if (ret) {
4424 ath11k_warn(ab, "failed to configure rxdma_mon_buf_ring %d\n",
4425 ret);
4426 return ret;
4427 }
4428 ret = ath11k_dp_tx_htt_srng_setup(ab,
4429 dp->rxdma_mon_dst_ring.ring_id,
4430 mac_id, HAL_RXDMA_MONITOR_DST);
4431 if (ret) {
4432 ath11k_warn(ab, "failed to configure rxdma_mon_dst_ring %d\n",
4433 ret);
4434 return ret;
4435 }
4436 ret = ath11k_dp_tx_htt_srng_setup(ab,
4437 dp->rxdma_mon_desc_ring.ring_id,
4438 mac_id, HAL_RXDMA_MONITOR_DESC);
4439 if (ret) {
4440 ath11k_warn(ab, "failed to configure rxdma_mon_dst_ring %d\n",
4441 ret);
4442 return ret;
4443 }
4444
4445config_refill_ring:
4446 for (i = 0; i < ab->hw_params.num_rxmda_per_pdev; i++) {
4447 ring_id = dp->rx_mon_status_refill_ring[i].refill_buf_ring.ring_id;
4448 ret = ath11k_dp_tx_htt_srng_setup(ab, ring_id, mac_id + i,
4449 HAL_RXDMA_MONITOR_STATUS);
4450 if (ret) {
4451 ath11k_warn(ab,
4452 "failed to configure mon_status_refill_ring%d %d\n",
4453 i, ret);
4454 return ret;
4455 }
4456 }
4457
4458 return 0;
4459}
4460
4461static void ath11k_dp_mon_set_frag_len(u32 *total_len, u32 *frag_len)
4462{
4463 if (*total_len >= (DP_RX_BUFFER_SIZE - sizeof(struct hal_rx_desc))) {
4464 *frag_len = DP_RX_BUFFER_SIZE - sizeof(struct hal_rx_desc);
4465 *total_len -= *frag_len;
4466 } else {
4467 *frag_len = *total_len;
4468 *total_len = 0;
4469 }
4470}
4471
4472static
4473int ath11k_dp_rx_monitor_link_desc_return(struct ath11k *ar,
4474 void *p_last_buf_addr_info,
4475 u8 mac_id)
4476{
4477 struct ath11k_pdev_dp *dp = &ar->dp;
4478 struct dp_srng *dp_srng;
4479 void *hal_srng;
4480 void *src_srng_desc;
4481 int ret = 0;
4482
4483 if (ar->ab->hw_params.rxdma1_enable) {
4484 dp_srng = &dp->rxdma_mon_desc_ring;
4485 hal_srng = &ar->ab->hal.srng_list[dp_srng->ring_id];
4486 } else {
4487 dp_srng = &ar->ab->dp.wbm_desc_rel_ring;
4488 hal_srng = &ar->ab->hal.srng_list[dp_srng->ring_id];
4489 }
4490
4491 ath11k_hal_srng_access_begin(ar->ab, hal_srng);
4492
4493 src_srng_desc = ath11k_hal_srng_src_get_next_entry(ar->ab, hal_srng);
4494
4495 if (src_srng_desc) {
4496 struct ath11k_buffer_addr *src_desc = src_srng_desc;
4497
4498 *src_desc = *((struct ath11k_buffer_addr *)p_last_buf_addr_info);
4499 } else {
4500 ath11k_dbg(ar->ab, ATH11K_DBG_DATA,
4501 "Monitor Link Desc Ring %d Full", mac_id);
4502 ret = -ENOMEM;
4503 }
4504
4505 ath11k_hal_srng_access_end(ar->ab, hal_srng);
4506 return ret;
4507}
4508
4509static
4510void ath11k_dp_rx_mon_next_link_desc_get(void *rx_msdu_link_desc,
4511 dma_addr_t *paddr, u32 *sw_cookie,
4512 u8 *rbm,
4513 void **pp_buf_addr_info)
4514{
4515 struct hal_rx_msdu_link *msdu_link = rx_msdu_link_desc;
4516 struct ath11k_buffer_addr *buf_addr_info;
4517
4518 buf_addr_info = (struct ath11k_buffer_addr *)&msdu_link->buf_addr_info;
4519
4520 ath11k_hal_rx_buf_addr_info_get(buf_addr_info, paddr, sw_cookie, rbm);
4521
4522 *pp_buf_addr_info = (void *)buf_addr_info;
4523}
4524
4525static int ath11k_dp_pkt_set_pktlen(struct sk_buff *skb, u32 len)
4526{
4527 if (skb->len > len) {
4528 skb_trim(skb, len);
4529 } else {
4530 if (skb_tailroom(skb) < len - skb->len) {
4531 if ((pskb_expand_head(skb, 0,
4532 len - skb->len - skb_tailroom(skb),
4533 GFP_ATOMIC))) {
4534 dev_kfree_skb_any(skb);
4535 return -ENOMEM;
4536 }
4537 }
4538 skb_put(skb, (len - skb->len));
4539 }
4540 return 0;
4541}
4542
4543static void ath11k_hal_rx_msdu_list_get(struct ath11k *ar,
4544 void *msdu_link_desc,
4545 struct hal_rx_msdu_list *msdu_list,
4546 u16 *num_msdus)
4547{
4548 struct hal_rx_msdu_details *msdu_details = NULL;
4549 struct rx_msdu_desc *msdu_desc_info = NULL;
4550 struct hal_rx_msdu_link *msdu_link = NULL;
4551 int i;
4552 u32 last = FIELD_PREP(RX_MSDU_DESC_INFO0_LAST_MSDU_IN_MPDU, 1);
4553 u32 first = FIELD_PREP(RX_MSDU_DESC_INFO0_FIRST_MSDU_IN_MPDU, 1);
4554 u8 tmp = 0;
4555
4556 msdu_link = msdu_link_desc;
4557 msdu_details = &msdu_link->msdu_link[0];
4558
4559 for (i = 0; i < HAL_RX_NUM_MSDU_DESC; i++) {
4560 if (FIELD_GET(BUFFER_ADDR_INFO0_ADDR,
4561 msdu_details[i].buf_addr_info.info0) == 0) {
4562 msdu_desc_info = &msdu_details[i - 1].rx_msdu_info;
4563 msdu_desc_info->info0 |= last;
4564 ;
4565 break;
4566 }
4567 msdu_desc_info = &msdu_details[i].rx_msdu_info;
4568
4569 if (!i)
4570 msdu_desc_info->info0 |= first;
4571 else if (i == (HAL_RX_NUM_MSDU_DESC - 1))
4572 msdu_desc_info->info0 |= last;
4573 msdu_list->msdu_info[i].msdu_flags = msdu_desc_info->info0;
4574 msdu_list->msdu_info[i].msdu_len =
4575 HAL_RX_MSDU_PKT_LENGTH_GET(msdu_desc_info->info0);
4576 msdu_list->sw_cookie[i] =
4577 FIELD_GET(BUFFER_ADDR_INFO1_SW_COOKIE,
4578 msdu_details[i].buf_addr_info.info1);
4579 tmp = FIELD_GET(BUFFER_ADDR_INFO1_RET_BUF_MGR,
4580 msdu_details[i].buf_addr_info.info1);
4581 msdu_list->rbm[i] = tmp;
4582 }
4583 *num_msdus = i;
4584}
4585
4586static u32 ath11k_dp_rx_mon_comp_ppduid(u32 msdu_ppdu_id, u32 *ppdu_id,
4587 u32 *rx_bufs_used)
4588{
4589 u32 ret = 0;
4590
4591 if ((*ppdu_id < msdu_ppdu_id) &&
4592 ((msdu_ppdu_id - *ppdu_id) < DP_NOT_PPDU_ID_WRAP_AROUND)) {
4593 *ppdu_id = msdu_ppdu_id;
4594 ret = msdu_ppdu_id;
4595 } else if ((*ppdu_id > msdu_ppdu_id) &&
4596 ((*ppdu_id - msdu_ppdu_id) > DP_NOT_PPDU_ID_WRAP_AROUND)) {
4597 /* mon_dst is behind than mon_status
4598 * skip dst_ring and free it
4599 */
4600 *rx_bufs_used += 1;
4601 *ppdu_id = msdu_ppdu_id;
4602 ret = msdu_ppdu_id;
4603 }
4604 return ret;
4605}
4606
4607static void ath11k_dp_mon_get_buf_len(struct hal_rx_msdu_desc_info *info,
4608 bool *is_frag, u32 *total_len,
4609 u32 *frag_len, u32 *msdu_cnt)
4610{
4611 if (info->msdu_flags & RX_MSDU_DESC_INFO0_MSDU_CONTINUATION) {
4612 if (!*is_frag) {
4613 *total_len = info->msdu_len;
4614 *is_frag = true;
4615 }
4616 ath11k_dp_mon_set_frag_len(total_len,
4617 frag_len);
4618 } else {
4619 if (*is_frag) {
4620 ath11k_dp_mon_set_frag_len(total_len,
4621 frag_len);
4622 } else {
4623 *frag_len = info->msdu_len;
4624 }
4625 *is_frag = false;
4626 *msdu_cnt -= 1;
4627 }
4628}
4629
4630static u32
4631ath11k_dp_rx_mon_mpdu_pop(struct ath11k *ar, int mac_id,
4632 void *ring_entry, struct sk_buff **head_msdu,
4633 struct sk_buff **tail_msdu, u32 *npackets,
4634 u32 *ppdu_id)
4635{
4636 struct ath11k_pdev_dp *dp = &ar->dp;
4637 struct ath11k_mon_data *pmon = (struct ath11k_mon_data *)&dp->mon_data;
4638 struct dp_rxdma_ring *rx_ring = &dp->rxdma_mon_buf_ring;
4639 struct sk_buff *msdu = NULL, *last = NULL;
4640 struct hal_rx_msdu_list msdu_list;
4641 void *p_buf_addr_info, *p_last_buf_addr_info;
4642 struct hal_rx_desc *rx_desc;
4643 void *rx_msdu_link_desc;
4644 dma_addr_t paddr;
4645 u16 num_msdus = 0;
4646 u32 rx_buf_size, rx_pkt_offset, sw_cookie;
4647 u32 rx_bufs_used = 0, i = 0;
4648 u32 msdu_ppdu_id = 0, msdu_cnt = 0;
4649 u32 total_len = 0, frag_len = 0;
4650 bool is_frag, is_first_msdu;
4651 bool drop_mpdu = false;
4652 struct ath11k_skb_rxcb *rxcb;
4653 struct hal_reo_entrance_ring *ent_desc = ring_entry;
4654 int buf_id;
4655 u32 rx_link_buf_info[2];
4656 u8 rbm;
4657
4658 if (!ar->ab->hw_params.rxdma1_enable)
4659 rx_ring = &dp->rx_refill_buf_ring;
4660
4661 ath11k_hal_rx_reo_ent_buf_paddr_get(ring_entry, &paddr,
4662 &sw_cookie,
4663 &p_last_buf_addr_info, &rbm,
4664 &msdu_cnt);
4665
4666 if (FIELD_GET(HAL_REO_ENTR_RING_INFO1_RXDMA_PUSH_REASON,
4667 ent_desc->info1) ==
4668 HAL_REO_DEST_RING_PUSH_REASON_ERR_DETECTED) {
4669 u8 rxdma_err =
4670 FIELD_GET(HAL_REO_ENTR_RING_INFO1_RXDMA_ERROR_CODE,
4671 ent_desc->info1);
4672 if (rxdma_err == HAL_REO_ENTR_RING_RXDMA_ECODE_FLUSH_REQUEST_ERR ||
4673 rxdma_err == HAL_REO_ENTR_RING_RXDMA_ECODE_MPDU_LEN_ERR ||
4674 rxdma_err == HAL_REO_ENTR_RING_RXDMA_ECODE_OVERFLOW_ERR) {
4675 drop_mpdu = true;
4676 pmon->rx_mon_stats.dest_mpdu_drop++;
4677 }
4678 }
4679
4680 is_frag = false;
4681 is_first_msdu = true;
4682
4683 do {
4684 if (pmon->mon_last_linkdesc_paddr == paddr) {
4685 pmon->rx_mon_stats.dup_mon_linkdesc_cnt++;
4686 return rx_bufs_used;
4687 }
4688
4689 if (ar->ab->hw_params.rxdma1_enable)
4690 rx_msdu_link_desc =
4691 (void *)pmon->link_desc_banks[sw_cookie].vaddr +
4692 (paddr - pmon->link_desc_banks[sw_cookie].paddr);
4693 else
4694 rx_msdu_link_desc =
4695 (void *)ar->ab->dp.link_desc_banks[sw_cookie].vaddr +
4696 (paddr - ar->ab->dp.link_desc_banks[sw_cookie].paddr);
4697
4698 ath11k_hal_rx_msdu_list_get(ar, rx_msdu_link_desc, &msdu_list,
4699 &num_msdus);
4700
4701 for (i = 0; i < num_msdus; i++) {
4702 u32 l2_hdr_offset;
4703
4704 if (pmon->mon_last_buf_cookie == msdu_list.sw_cookie[i]) {
4705 ath11k_dbg(ar->ab, ATH11K_DBG_DATA,
4706 "i %d last_cookie %d is same\n",
4707 i, pmon->mon_last_buf_cookie);
4708 drop_mpdu = true;
4709 pmon->rx_mon_stats.dup_mon_buf_cnt++;
4710 continue;
4711 }
4712 buf_id = FIELD_GET(DP_RXDMA_BUF_COOKIE_BUF_ID,
4713 msdu_list.sw_cookie[i]);
4714
4715 spin_lock_bh(&rx_ring->idr_lock);
4716 msdu = idr_find(&rx_ring->bufs_idr, buf_id);
4717 spin_unlock_bh(&rx_ring->idr_lock);
4718 if (!msdu) {
4719 ath11k_dbg(ar->ab, ATH11K_DBG_DATA,
4720 "msdu_pop: invalid buf_id %d\n", buf_id);
4721 break;
4722 }
4723 rxcb = ATH11K_SKB_RXCB(msdu);
4724 if (!rxcb->unmapped) {
4725 dma_unmap_single(ar->ab->dev, rxcb->paddr,
4726 msdu->len +
4727 skb_tailroom(msdu),
4728 DMA_FROM_DEVICE);
4729 rxcb->unmapped = 1;
4730 }
4731 if (drop_mpdu) {
4732 ath11k_dbg(ar->ab, ATH11K_DBG_DATA,
4733 "i %d drop msdu %p *ppdu_id %x\n",
4734 i, msdu, *ppdu_id);
4735 dev_kfree_skb_any(msdu);
4736 msdu = NULL;
4737 goto next_msdu;
4738 }
4739
4740 rx_desc = (struct hal_rx_desc *)msdu->data;
4741
4742 rx_pkt_offset = sizeof(struct hal_rx_desc);
4743 l2_hdr_offset = ath11k_dp_rx_h_msdu_end_l3pad(ar->ab, rx_desc);
4744
4745 if (is_first_msdu) {
4746 if (!ath11k_dp_rxdesc_mpdu_valid(ar->ab, rx_desc)) {
4747 drop_mpdu = true;
4748 dev_kfree_skb_any(msdu);
4749 msdu = NULL;
4750 pmon->mon_last_linkdesc_paddr = paddr;
4751 goto next_msdu;
4752 }
4753
4754 msdu_ppdu_id =
4755 ath11k_dp_rxdesc_get_ppduid(ar->ab, rx_desc);
4756
4757 if (ath11k_dp_rx_mon_comp_ppduid(msdu_ppdu_id,
4758 ppdu_id,
4759 &rx_bufs_used)) {
4760 if (rx_bufs_used) {
4761 drop_mpdu = true;
4762 dev_kfree_skb_any(msdu);
4763 msdu = NULL;
4764 goto next_msdu;
4765 }
4766 return rx_bufs_used;
4767 }
4768 pmon->mon_last_linkdesc_paddr = paddr;
4769 is_first_msdu = false;
4770 }
4771 ath11k_dp_mon_get_buf_len(&msdu_list.msdu_info[i],
4772 &is_frag, &total_len,
4773 &frag_len, &msdu_cnt);
4774 rx_buf_size = rx_pkt_offset + l2_hdr_offset + frag_len;
4775
4776 ath11k_dp_pkt_set_pktlen(msdu, rx_buf_size);
4777
4778 if (!(*head_msdu))
4779 *head_msdu = msdu;
4780 else if (last)
4781 last->next = msdu;
4782
4783 last = msdu;
4784next_msdu:
4785 pmon->mon_last_buf_cookie = msdu_list.sw_cookie[i];
4786 rx_bufs_used++;
4787 spin_lock_bh(&rx_ring->idr_lock);
4788 idr_remove(&rx_ring->bufs_idr, buf_id);
4789 spin_unlock_bh(&rx_ring->idr_lock);
4790 }
4791
4792 ath11k_hal_rx_buf_addr_info_set(rx_link_buf_info, paddr, sw_cookie, rbm);
4793
4794 ath11k_dp_rx_mon_next_link_desc_get(rx_msdu_link_desc, &paddr,
4795 &sw_cookie, &rbm,
4796 &p_buf_addr_info);
4797
4798 if (ar->ab->hw_params.rxdma1_enable) {
4799 if (ath11k_dp_rx_monitor_link_desc_return(ar,
4800 p_last_buf_addr_info,
4801 dp->mac_id))
4802 ath11k_dbg(ar->ab, ATH11K_DBG_DATA,
4803 "dp_rx_monitor_link_desc_return failed");
4804 } else {
4805 ath11k_dp_rx_link_desc_return(ar->ab, rx_link_buf_info,
4806 HAL_WBM_REL_BM_ACT_PUT_IN_IDLE);
4807 }
4808
4809 p_last_buf_addr_info = p_buf_addr_info;
4810
4811 } while (paddr && msdu_cnt);
4812
4813 if (last)
4814 last->next = NULL;
4815
4816 *tail_msdu = msdu;
4817
4818 if (msdu_cnt == 0)
4819 *npackets = 1;
4820
4821 return rx_bufs_used;
4822}
4823
4824static void ath11k_dp_rx_msdus_set_payload(struct ath11k *ar, struct sk_buff *msdu)
4825{
4826 u32 rx_pkt_offset, l2_hdr_offset;
4827
4828 rx_pkt_offset = ar->ab->hw_params.hal_desc_sz;
4829 l2_hdr_offset = ath11k_dp_rx_h_msdu_end_l3pad(ar->ab,
4830 (struct hal_rx_desc *)msdu->data);
4831 skb_pull(msdu, rx_pkt_offset + l2_hdr_offset);
4832}
4833
4834static struct sk_buff *
4835ath11k_dp_rx_mon_merg_msdus(struct ath11k *ar,
4836 u32 mac_id, struct sk_buff *head_msdu,
4837 struct sk_buff *last_msdu,
4838 struct ieee80211_rx_status *rxs, bool *fcs_err)
4839{
4840 struct ath11k_base *ab = ar->ab;
4841 struct sk_buff *msdu, *prev_buf;
4842 struct hal_rx_desc *rx_desc;
4843 char *hdr_desc;
4844 u8 *dest, decap_format;
4845 struct ieee80211_hdr_3addr *wh;
4846 struct rx_attention *rx_attention;
4847 u32 err_bitmap;
4848
4849 if (!head_msdu)
4850 goto err_merge_fail;
4851
4852 rx_desc = (struct hal_rx_desc *)head_msdu->data;
4853 rx_attention = ath11k_dp_rx_get_attention(ab, rx_desc);
4854 err_bitmap = ath11k_dp_rx_h_attn_mpdu_err(rx_attention);
4855
4856 if (err_bitmap & DP_RX_MPDU_ERR_FCS)
4857 *fcs_err = true;
4858
4859 if (ath11k_dp_rxdesc_get_mpdulen_err(rx_attention))
4860 return NULL;
4861
4862 decap_format = ath11k_dp_rx_h_msdu_start_decap_type(ab, rx_desc);
4863
4864 ath11k_dp_rx_h_ppdu(ar, rx_desc, rxs);
4865
4866 if (decap_format == DP_RX_DECAP_TYPE_RAW) {
4867 ath11k_dp_rx_msdus_set_payload(ar, head_msdu);
4868
4869 prev_buf = head_msdu;
4870 msdu = head_msdu->next;
4871
4872 while (msdu) {
4873 ath11k_dp_rx_msdus_set_payload(ar, msdu);
4874
4875 prev_buf = msdu;
4876 msdu = msdu->next;
4877 }
4878
4879 prev_buf->next = NULL;
4880
4881 skb_trim(prev_buf, prev_buf->len - HAL_RX_FCS_LEN);
4882 } else if (decap_format == DP_RX_DECAP_TYPE_NATIVE_WIFI) {
4883 u8 qos_pkt = 0;
4884
4885 rx_desc = (struct hal_rx_desc *)head_msdu->data;
4886 hdr_desc = ath11k_dp_rxdesc_get_80211hdr(ab, rx_desc);
4887
4888 /* Base size */
4889 wh = (struct ieee80211_hdr_3addr *)hdr_desc;
4890
4891 if (ieee80211_is_data_qos(wh->frame_control))
4892 qos_pkt = 1;
4893
4894 msdu = head_msdu;
4895
4896 while (msdu) {
4897 ath11k_dp_rx_msdus_set_payload(ar, msdu);
4898 if (qos_pkt) {
4899 dest = skb_push(msdu, sizeof(__le16));
4900 if (!dest)
4901 goto err_merge_fail;
4902 memcpy(dest, hdr_desc, sizeof(struct ieee80211_qos_hdr));
4903 }
4904 prev_buf = msdu;
4905 msdu = msdu->next;
4906 }
4907 dest = skb_put(prev_buf, HAL_RX_FCS_LEN);
4908 if (!dest)
4909 goto err_merge_fail;
4910
4911 ath11k_dbg(ab, ATH11K_DBG_DATA,
4912 "mpdu_buf %p mpdu_buf->len %u",
4913 prev_buf, prev_buf->len);
4914 } else {
4915 ath11k_dbg(ab, ATH11K_DBG_DATA,
4916 "decap format %d is not supported!\n",
4917 decap_format);
4918 goto err_merge_fail;
4919 }
4920
4921 return head_msdu;
4922
4923err_merge_fail:
4924 return NULL;
4925}
4926
4927static void
4928ath11k_dp_rx_update_radiotap_he(struct hal_rx_mon_ppdu_info *rx_status,
4929 u8 *rtap_buf)
4930{
4931 u32 rtap_len = 0;
4932
4933 put_unaligned_le16(rx_status->he_data1, &rtap_buf[rtap_len]);
4934 rtap_len += 2;
4935
4936 put_unaligned_le16(rx_status->he_data2, &rtap_buf[rtap_len]);
4937 rtap_len += 2;
4938
4939 put_unaligned_le16(rx_status->he_data3, &rtap_buf[rtap_len]);
4940 rtap_len += 2;
4941
4942 put_unaligned_le16(rx_status->he_data4, &rtap_buf[rtap_len]);
4943 rtap_len += 2;
4944
4945 put_unaligned_le16(rx_status->he_data5, &rtap_buf[rtap_len]);
4946 rtap_len += 2;
4947
4948 put_unaligned_le16(rx_status->he_data6, &rtap_buf[rtap_len]);
4949}
4950
4951static void
4952ath11k_dp_rx_update_radiotap_he_mu(struct hal_rx_mon_ppdu_info *rx_status,
4953 u8 *rtap_buf)
4954{
4955 u32 rtap_len = 0;
4956
4957 put_unaligned_le16(rx_status->he_flags1, &rtap_buf[rtap_len]);
4958 rtap_len += 2;
4959
4960 put_unaligned_le16(rx_status->he_flags2, &rtap_buf[rtap_len]);
4961 rtap_len += 2;
4962
4963 rtap_buf[rtap_len] = rx_status->he_RU[0];
4964 rtap_len += 1;
4965
4966 rtap_buf[rtap_len] = rx_status->he_RU[1];
4967 rtap_len += 1;
4968
4969 rtap_buf[rtap_len] = rx_status->he_RU[2];
4970 rtap_len += 1;
4971
4972 rtap_buf[rtap_len] = rx_status->he_RU[3];
4973}
4974
4975static void ath11k_update_radiotap(struct ath11k *ar,
4976 struct hal_rx_mon_ppdu_info *ppduinfo,
4977 struct sk_buff *mon_skb,
4978 struct ieee80211_rx_status *rxs)
4979{
4980 struct ieee80211_supported_band *sband;
4981 u8 *ptr = NULL;
4982
4983 rxs->flag |= RX_FLAG_MACTIME_START;
4984 rxs->signal = ppduinfo->rssi_comb + ATH11K_DEFAULT_NOISE_FLOOR;
4985
4986 if (ppduinfo->nss)
4987 rxs->nss = ppduinfo->nss;
4988
4989 if (ppduinfo->he_mu_flags) {
4990 rxs->flag |= RX_FLAG_RADIOTAP_HE_MU;
4991 rxs->encoding = RX_ENC_HE;
4992 ptr = skb_push(mon_skb, sizeof(struct ieee80211_radiotap_he_mu));
4993 ath11k_dp_rx_update_radiotap_he_mu(ppduinfo, ptr);
4994 } else if (ppduinfo->he_flags) {
4995 rxs->flag |= RX_FLAG_RADIOTAP_HE;
4996 rxs->encoding = RX_ENC_HE;
4997 ptr = skb_push(mon_skb, sizeof(struct ieee80211_radiotap_he));
4998 ath11k_dp_rx_update_radiotap_he(ppduinfo, ptr);
4999 rxs->rate_idx = ppduinfo->rate;
5000 } else if (ppduinfo->vht_flags) {
5001 rxs->encoding = RX_ENC_VHT;
5002 rxs->rate_idx = ppduinfo->rate;
5003 } else if (ppduinfo->ht_flags) {
5004 rxs->encoding = RX_ENC_HT;
5005 rxs->rate_idx = ppduinfo->rate;
5006 } else {
5007 rxs->encoding = RX_ENC_LEGACY;
5008 sband = &ar->mac.sbands[rxs->band];
5009 rxs->rate_idx = ath11k_mac_hw_rate_to_idx(sband, ppduinfo->rate,
5010 ppduinfo->cck_flag);
5011 }
5012
5013 rxs->mactime = ppduinfo->tsft;
5014}
5015
5016static int ath11k_dp_rx_mon_deliver(struct ath11k *ar, u32 mac_id,
5017 struct sk_buff *head_msdu,
5018 struct hal_rx_mon_ppdu_info *ppduinfo,
5019 struct sk_buff *tail_msdu,
5020 struct napi_struct *napi)
5021{
5022 struct ath11k_pdev_dp *dp = &ar->dp;
5023 struct sk_buff *mon_skb, *skb_next, *header;
5024 struct ieee80211_rx_status *rxs = &dp->rx_status;
5025 bool fcs_err = false;
5026
5027 mon_skb = ath11k_dp_rx_mon_merg_msdus(ar, mac_id, head_msdu,
5028 tail_msdu, rxs, &fcs_err);
5029
5030 if (!mon_skb)
5031 goto mon_deliver_fail;
5032
5033 header = mon_skb;
5034
5035 rxs->flag = 0;
5036
5037 if (fcs_err)
5038 rxs->flag = RX_FLAG_FAILED_FCS_CRC;
5039
5040 do {
5041 skb_next = mon_skb->next;
5042 if (!skb_next)
5043 rxs->flag &= ~RX_FLAG_AMSDU_MORE;
5044 else
5045 rxs->flag |= RX_FLAG_AMSDU_MORE;
5046
5047 if (mon_skb == header) {
5048 header = NULL;
5049 rxs->flag &= ~RX_FLAG_ALLOW_SAME_PN;
5050 } else {
5051 rxs->flag |= RX_FLAG_ALLOW_SAME_PN;
5052 }
5053 rxs->flag |= RX_FLAG_ONLY_MONITOR;
5054 ath11k_update_radiotap(ar, ppduinfo, mon_skb, rxs);
5055
5056 ath11k_dp_rx_deliver_msdu(ar, napi, mon_skb, rxs);
5057 mon_skb = skb_next;
5058 } while (mon_skb);
5059 rxs->flag = 0;
5060
5061 return 0;
5062
5063mon_deliver_fail:
5064 mon_skb = head_msdu;
5065 while (mon_skb) {
5066 skb_next = mon_skb->next;
5067 dev_kfree_skb_any(mon_skb);
5068 mon_skb = skb_next;
5069 }
5070 return -EINVAL;
5071}
5072
5073/* The destination ring processing is stuck if the destination is not
5074 * moving while status ring moves 16 PPDU. The destination ring processing
5075 * skips this destination ring PPDU as a workaround.
5076 */
5077#define MON_DEST_RING_STUCK_MAX_CNT 16
5078
5079static void ath11k_dp_rx_mon_dest_process(struct ath11k *ar, int mac_id,
5080 u32 quota, struct napi_struct *napi)
5081{
5082 struct ath11k_pdev_dp *dp = &ar->dp;
5083 struct ath11k_mon_data *pmon = (struct ath11k_mon_data *)&dp->mon_data;
5084 const struct ath11k_hw_hal_params *hal_params;
5085 void *ring_entry;
5086 void *mon_dst_srng;
5087 u32 ppdu_id;
5088 u32 rx_bufs_used;
5089 u32 ring_id;
5090 struct ath11k_pdev_mon_stats *rx_mon_stats;
5091 u32 npackets = 0;
5092 u32 mpdu_rx_bufs_used;
5093
5094 if (ar->ab->hw_params.rxdma1_enable)
5095 ring_id = dp->rxdma_mon_dst_ring.ring_id;
5096 else
5097 ring_id = dp->rxdma_err_dst_ring[mac_id].ring_id;
5098
5099 mon_dst_srng = &ar->ab->hal.srng_list[ring_id];
5100
5101 spin_lock_bh(&pmon->mon_lock);
5102
5103 ath11k_hal_srng_access_begin(ar->ab, mon_dst_srng);
5104
5105 ppdu_id = pmon->mon_ppdu_info.ppdu_id;
5106 rx_bufs_used = 0;
5107 rx_mon_stats = &pmon->rx_mon_stats;
5108
5109 while ((ring_entry = ath11k_hal_srng_dst_peek(ar->ab, mon_dst_srng))) {
5110 struct sk_buff *head_msdu, *tail_msdu;
5111
5112 head_msdu = NULL;
5113 tail_msdu = NULL;
5114
5115 mpdu_rx_bufs_used = ath11k_dp_rx_mon_mpdu_pop(ar, mac_id, ring_entry,
5116 &head_msdu,
5117 &tail_msdu,
5118 &npackets, &ppdu_id);
5119
5120 rx_bufs_used += mpdu_rx_bufs_used;
5121
5122 if (mpdu_rx_bufs_used) {
5123 dp->mon_dest_ring_stuck_cnt = 0;
5124 } else {
5125 dp->mon_dest_ring_stuck_cnt++;
5126 rx_mon_stats->dest_mon_not_reaped++;
5127 }
5128
5129 if (dp->mon_dest_ring_stuck_cnt > MON_DEST_RING_STUCK_MAX_CNT) {
5130 rx_mon_stats->dest_mon_stuck++;
5131 ath11k_dbg(ar->ab, ATH11K_DBG_DATA,
5132 "status ring ppdu_id=%d dest ring ppdu_id=%d mon_dest_ring_stuck_cnt=%d dest_mon_not_reaped=%u dest_mon_stuck=%u\n",
5133 pmon->mon_ppdu_info.ppdu_id, ppdu_id,
5134 dp->mon_dest_ring_stuck_cnt,
5135 rx_mon_stats->dest_mon_not_reaped,
5136 rx_mon_stats->dest_mon_stuck);
5137 pmon->mon_ppdu_info.ppdu_id = ppdu_id;
5138 continue;
5139 }
5140
5141 if (ppdu_id != pmon->mon_ppdu_info.ppdu_id) {
5142 pmon->mon_ppdu_status = DP_PPDU_STATUS_START;
5143 ath11k_dbg(ar->ab, ATH11K_DBG_DATA,
5144 "dest_rx: new ppdu_id %x != status ppdu_id %x dest_mon_not_reaped = %u dest_mon_stuck = %u\n",
5145 ppdu_id, pmon->mon_ppdu_info.ppdu_id,
5146 rx_mon_stats->dest_mon_not_reaped,
5147 rx_mon_stats->dest_mon_stuck);
5148 break;
5149 }
5150 if (head_msdu && tail_msdu) {
5151 ath11k_dp_rx_mon_deliver(ar, dp->mac_id, head_msdu,
5152 &pmon->mon_ppdu_info,
5153 tail_msdu, napi);
5154 rx_mon_stats->dest_mpdu_done++;
5155 }
5156
5157 ring_entry = ath11k_hal_srng_dst_get_next_entry(ar->ab,
5158 mon_dst_srng);
5159 }
5160 ath11k_hal_srng_access_end(ar->ab, mon_dst_srng);
5161
5162 spin_unlock_bh(&pmon->mon_lock);
5163
5164 if (rx_bufs_used) {
5165 rx_mon_stats->dest_ppdu_done++;
5166 hal_params = ar->ab->hw_params.hal_params;
5167
5168 if (ar->ab->hw_params.rxdma1_enable)
5169 ath11k_dp_rxbufs_replenish(ar->ab, dp->mac_id,
5170 &dp->rxdma_mon_buf_ring,
5171 rx_bufs_used,
5172 hal_params->rx_buf_rbm);
5173 else
5174 ath11k_dp_rxbufs_replenish(ar->ab, dp->mac_id,
5175 &dp->rx_refill_buf_ring,
5176 rx_bufs_used,
5177 hal_params->rx_buf_rbm);
5178 }
5179}
5180
5181int ath11k_dp_rx_process_mon_status(struct ath11k_base *ab, int mac_id,
5182 struct napi_struct *napi, int budget)
5183{
5184 struct ath11k *ar = ath11k_ab_to_ar(ab, mac_id);
5185 enum hal_rx_mon_status hal_status;
5186 struct sk_buff *skb;
5187 struct sk_buff_head skb_list;
5188 struct ath11k_peer *peer;
5189 struct ath11k_sta *arsta;
5190 int num_buffs_reaped = 0;
5191 u32 rx_buf_sz;
5192 u16 log_type;
5193 struct ath11k_mon_data *pmon = (struct ath11k_mon_data *)&ar->dp.mon_data;
5194 struct ath11k_pdev_mon_stats *rx_mon_stats = &pmon->rx_mon_stats;
5195 struct hal_rx_mon_ppdu_info *ppdu_info = &pmon->mon_ppdu_info;
5196
5197 __skb_queue_head_init(&skb_list);
5198
5199 num_buffs_reaped = ath11k_dp_rx_reap_mon_status_ring(ab, mac_id, &budget,
5200 &skb_list);
5201 if (!num_buffs_reaped)
5202 goto exit;
5203
5204 memset(ppdu_info, 0, sizeof(*ppdu_info));
5205 ppdu_info->peer_id = HAL_INVALID_PEERID;
5206
5207 while ((skb = __skb_dequeue(&skb_list))) {
5208 if (ath11k_debugfs_is_pktlog_lite_mode_enabled(ar)) {
5209 log_type = ATH11K_PKTLOG_TYPE_LITE_RX;
5210 rx_buf_sz = DP_RX_BUFFER_SIZE_LITE;
5211 } else if (ath11k_debugfs_is_pktlog_rx_stats_enabled(ar)) {
5212 log_type = ATH11K_PKTLOG_TYPE_RX_STATBUF;
5213 rx_buf_sz = DP_RX_BUFFER_SIZE;
5214 } else {
5215 log_type = ATH11K_PKTLOG_TYPE_INVALID;
5216 rx_buf_sz = 0;
5217 }
5218
5219 if (log_type != ATH11K_PKTLOG_TYPE_INVALID)
5220 trace_ath11k_htt_rxdesc(ar, skb->data, log_type, rx_buf_sz);
5221
5222 memset(ppdu_info, 0, sizeof(*ppdu_info));
5223 ppdu_info->peer_id = HAL_INVALID_PEERID;
5224 hal_status = ath11k_hal_rx_parse_mon_status(ab, ppdu_info, skb);
5225
5226 if (test_bit(ATH11K_FLAG_MONITOR_STARTED, &ar->monitor_flags) &&
5227 pmon->mon_ppdu_status == DP_PPDU_STATUS_START &&
5228 hal_status == HAL_TLV_STATUS_PPDU_DONE) {
5229 rx_mon_stats->status_ppdu_done++;
5230 pmon->mon_ppdu_status = DP_PPDU_STATUS_DONE;
5231 ath11k_dp_rx_mon_dest_process(ar, mac_id, budget, napi);
5232 pmon->mon_ppdu_status = DP_PPDU_STATUS_START;
5233 }
5234
5235 if (ppdu_info->peer_id == HAL_INVALID_PEERID ||
5236 hal_status != HAL_RX_MON_STATUS_PPDU_DONE) {
5237 dev_kfree_skb_any(skb);
5238 continue;
5239 }
5240
5241 rcu_read_lock();
5242 spin_lock_bh(&ab->base_lock);
5243 peer = ath11k_peer_find_by_id(ab, ppdu_info->peer_id);
5244
5245 if (!peer || !peer->sta) {
5246 ath11k_dbg(ab, ATH11K_DBG_DATA,
5247 "failed to find the peer with peer_id %d\n",
5248 ppdu_info->peer_id);
5249 goto next_skb;
5250 }
5251
5252 arsta = ath11k_sta_to_arsta(peer->sta);
5253 ath11k_dp_rx_update_peer_stats(arsta, ppdu_info);
5254
5255 if (ath11k_debugfs_is_pktlog_peer_valid(ar, peer->addr))
5256 trace_ath11k_htt_rxdesc(ar, skb->data, log_type, rx_buf_sz);
5257
5258next_skb:
5259 spin_unlock_bh(&ab->base_lock);
5260 rcu_read_unlock();
5261
5262 dev_kfree_skb_any(skb);
5263 memset(ppdu_info, 0, sizeof(*ppdu_info));
5264 ppdu_info->peer_id = HAL_INVALID_PEERID;
5265 }
5266exit:
5267 return num_buffs_reaped;
5268}
5269
5270static u32
5271ath11k_dp_rx_full_mon_mpdu_pop(struct ath11k *ar,
5272 void *ring_entry, struct sk_buff **head_msdu,
5273 struct sk_buff **tail_msdu,
5274 struct hal_sw_mon_ring_entries *sw_mon_entries)
5275{
5276 struct ath11k_pdev_dp *dp = &ar->dp;
5277 struct ath11k_mon_data *pmon = &dp->mon_data;
5278 struct dp_rxdma_ring *rx_ring = &dp->rxdma_mon_buf_ring;
5279 struct sk_buff *msdu = NULL, *last = NULL;
5280 struct hal_sw_monitor_ring *sw_desc = ring_entry;
5281 struct hal_rx_msdu_list msdu_list;
5282 struct hal_rx_desc *rx_desc;
5283 struct ath11k_skb_rxcb *rxcb;
5284 void *rx_msdu_link_desc;
5285 void *p_buf_addr_info, *p_last_buf_addr_info;
5286 int buf_id, i = 0;
5287 u32 rx_buf_size, rx_pkt_offset, l2_hdr_offset;
5288 u32 rx_bufs_used = 0, msdu_cnt = 0;
5289 u32 total_len = 0, frag_len = 0, sw_cookie;
5290 u16 num_msdus = 0;
5291 u8 rxdma_err, rbm;
5292 bool is_frag, is_first_msdu;
5293 bool drop_mpdu = false;
5294
5295 ath11k_hal_rx_sw_mon_ring_buf_paddr_get(ring_entry, sw_mon_entries);
5296
5297 sw_cookie = sw_mon_entries->mon_dst_sw_cookie;
5298 sw_mon_entries->end_of_ppdu = false;
5299 sw_mon_entries->drop_ppdu = false;
5300 p_last_buf_addr_info = sw_mon_entries->dst_buf_addr_info;
5301 msdu_cnt = sw_mon_entries->msdu_cnt;
5302
5303 sw_mon_entries->end_of_ppdu =
5304 FIELD_GET(HAL_SW_MON_RING_INFO0_END_OF_PPDU, sw_desc->info0);
5305 if (sw_mon_entries->end_of_ppdu)
5306 return rx_bufs_used;
5307
5308 if (FIELD_GET(HAL_SW_MON_RING_INFO0_RXDMA_PUSH_REASON,
5309 sw_desc->info0) ==
5310 HAL_REO_DEST_RING_PUSH_REASON_ERR_DETECTED) {
5311 rxdma_err =
5312 FIELD_GET(HAL_SW_MON_RING_INFO0_RXDMA_ERROR_CODE,
5313 sw_desc->info0);
5314 if (rxdma_err == HAL_REO_ENTR_RING_RXDMA_ECODE_FLUSH_REQUEST_ERR ||
5315 rxdma_err == HAL_REO_ENTR_RING_RXDMA_ECODE_MPDU_LEN_ERR ||
5316 rxdma_err == HAL_REO_ENTR_RING_RXDMA_ECODE_OVERFLOW_ERR) {
5317 pmon->rx_mon_stats.dest_mpdu_drop++;
5318 drop_mpdu = true;
5319 }
5320 }
5321
5322 is_frag = false;
5323 is_first_msdu = true;
5324
5325 do {
5326 rx_msdu_link_desc =
5327 (u8 *)pmon->link_desc_banks[sw_cookie].vaddr +
5328 (sw_mon_entries->mon_dst_paddr -
5329 pmon->link_desc_banks[sw_cookie].paddr);
5330
5331 ath11k_hal_rx_msdu_list_get(ar, rx_msdu_link_desc, &msdu_list,
5332 &num_msdus);
5333
5334 for (i = 0; i < num_msdus; i++) {
5335 buf_id = FIELD_GET(DP_RXDMA_BUF_COOKIE_BUF_ID,
5336 msdu_list.sw_cookie[i]);
5337
5338 spin_lock_bh(&rx_ring->idr_lock);
5339 msdu = idr_find(&rx_ring->bufs_idr, buf_id);
5340 if (!msdu) {
5341 ath11k_dbg(ar->ab, ATH11K_DBG_DATA,
5342 "full mon msdu_pop: invalid buf_id %d\n",
5343 buf_id);
5344 spin_unlock_bh(&rx_ring->idr_lock);
5345 break;
5346 }
5347 idr_remove(&rx_ring->bufs_idr, buf_id);
5348 spin_unlock_bh(&rx_ring->idr_lock);
5349
5350 rxcb = ATH11K_SKB_RXCB(msdu);
5351 if (!rxcb->unmapped) {
5352 dma_unmap_single(ar->ab->dev, rxcb->paddr,
5353 msdu->len +
5354 skb_tailroom(msdu),
5355 DMA_FROM_DEVICE);
5356 rxcb->unmapped = 1;
5357 }
5358 if (drop_mpdu) {
5359 ath11k_dbg(ar->ab, ATH11K_DBG_DATA,
5360 "full mon: i %d drop msdu %p *ppdu_id %x\n",
5361 i, msdu, sw_mon_entries->ppdu_id);
5362 dev_kfree_skb_any(msdu);
5363 msdu_cnt--;
5364 goto next_msdu;
5365 }
5366
5367 rx_desc = (struct hal_rx_desc *)msdu->data;
5368
5369 rx_pkt_offset = sizeof(struct hal_rx_desc);
5370 l2_hdr_offset = ath11k_dp_rx_h_msdu_end_l3pad(ar->ab, rx_desc);
5371
5372 if (is_first_msdu) {
5373 if (!ath11k_dp_rxdesc_mpdu_valid(ar->ab, rx_desc)) {
5374 drop_mpdu = true;
5375 dev_kfree_skb_any(msdu);
5376 msdu = NULL;
5377 goto next_msdu;
5378 }
5379 is_first_msdu = false;
5380 }
5381
5382 ath11k_dp_mon_get_buf_len(&msdu_list.msdu_info[i],
5383 &is_frag, &total_len,
5384 &frag_len, &msdu_cnt);
5385
5386 rx_buf_size = rx_pkt_offset + l2_hdr_offset + frag_len;
5387
5388 ath11k_dp_pkt_set_pktlen(msdu, rx_buf_size);
5389
5390 if (!(*head_msdu))
5391 *head_msdu = msdu;
5392 else if (last)
5393 last->next = msdu;
5394
5395 last = msdu;
5396next_msdu:
5397 rx_bufs_used++;
5398 }
5399
5400 ath11k_dp_rx_mon_next_link_desc_get(rx_msdu_link_desc,
5401 &sw_mon_entries->mon_dst_paddr,
5402 &sw_mon_entries->mon_dst_sw_cookie,
5403 &rbm,
5404 &p_buf_addr_info);
5405
5406 if (ath11k_dp_rx_monitor_link_desc_return(ar,
5407 p_last_buf_addr_info,
5408 dp->mac_id))
5409 ath11k_dbg(ar->ab, ATH11K_DBG_DATA,
5410 "full mon: dp_rx_monitor_link_desc_return failed\n");
5411
5412 p_last_buf_addr_info = p_buf_addr_info;
5413
5414 } while (sw_mon_entries->mon_dst_paddr && msdu_cnt);
5415
5416 if (last)
5417 last->next = NULL;
5418
5419 *tail_msdu = msdu;
5420
5421 return rx_bufs_used;
5422}
5423
5424static int ath11k_dp_rx_full_mon_prepare_mpdu(struct ath11k_dp *dp,
5425 struct dp_full_mon_mpdu *mon_mpdu,
5426 struct sk_buff *head,
5427 struct sk_buff *tail)
5428{
5429 mon_mpdu = kzalloc(sizeof(*mon_mpdu), GFP_ATOMIC);
5430 if (!mon_mpdu)
5431 return -ENOMEM;
5432
5433 list_add_tail(&mon_mpdu->list, &dp->dp_full_mon_mpdu_list);
5434 mon_mpdu->head = head;
5435 mon_mpdu->tail = tail;
5436
5437 return 0;
5438}
5439
5440static void ath11k_dp_rx_full_mon_drop_ppdu(struct ath11k_dp *dp,
5441 struct dp_full_mon_mpdu *mon_mpdu)
5442{
5443 struct dp_full_mon_mpdu *tmp;
5444 struct sk_buff *tmp_msdu, *skb_next;
5445
5446 if (list_empty(&dp->dp_full_mon_mpdu_list))
5447 return;
5448
5449 list_for_each_entry_safe(mon_mpdu, tmp, &dp->dp_full_mon_mpdu_list, list) {
5450 list_del(&mon_mpdu->list);
5451
5452 tmp_msdu = mon_mpdu->head;
5453 while (tmp_msdu) {
5454 skb_next = tmp_msdu->next;
5455 dev_kfree_skb_any(tmp_msdu);
5456 tmp_msdu = skb_next;
5457 }
5458
5459 kfree(mon_mpdu);
5460 }
5461}
5462
5463static int ath11k_dp_rx_full_mon_deliver_ppdu(struct ath11k *ar,
5464 int mac_id,
5465 struct ath11k_mon_data *pmon,
5466 struct napi_struct *napi)
5467{
5468 struct ath11k_pdev_mon_stats *rx_mon_stats;
5469 struct dp_full_mon_mpdu *tmp;
5470 struct dp_full_mon_mpdu *mon_mpdu = pmon->mon_mpdu;
5471 struct sk_buff *head_msdu, *tail_msdu;
5472 struct ath11k_base *ab = ar->ab;
5473 struct ath11k_dp *dp = &ab->dp;
5474 int ret;
5475
5476 rx_mon_stats = &pmon->rx_mon_stats;
5477
5478 list_for_each_entry_safe(mon_mpdu, tmp, &dp->dp_full_mon_mpdu_list, list) {
5479 list_del(&mon_mpdu->list);
5480 head_msdu = mon_mpdu->head;
5481 tail_msdu = mon_mpdu->tail;
5482 if (head_msdu && tail_msdu) {
5483 ret = ath11k_dp_rx_mon_deliver(ar, mac_id, head_msdu,
5484 &pmon->mon_ppdu_info,
5485 tail_msdu, napi);
5486 rx_mon_stats->dest_mpdu_done++;
5487 ath11k_dbg(ar->ab, ATH11K_DBG_DATA, "full mon: deliver ppdu\n");
5488 }
5489 kfree(mon_mpdu);
5490 }
5491
5492 return ret;
5493}
5494
5495static int
5496ath11k_dp_rx_process_full_mon_status_ring(struct ath11k_base *ab, int mac_id,
5497 struct napi_struct *napi, int budget)
5498{
5499 struct ath11k *ar = ab->pdevs[mac_id].ar;
5500 struct ath11k_pdev_dp *dp = &ar->dp;
5501 struct ath11k_mon_data *pmon = &dp->mon_data;
5502 struct hal_sw_mon_ring_entries *sw_mon_entries;
5503 int quota = 0, work = 0, count;
5504
5505 sw_mon_entries = &pmon->sw_mon_entries;
5506
5507 while (pmon->hold_mon_dst_ring) {
5508 quota = ath11k_dp_rx_process_mon_status(ab, mac_id,
5509 napi, 1);
5510 if (pmon->buf_state == DP_MON_STATUS_MATCH) {
5511 count = sw_mon_entries->status_buf_count;
5512 if (count > 1) {
5513 quota += ath11k_dp_rx_process_mon_status(ab, mac_id,
5514 napi, count);
5515 }
5516
5517 ath11k_dp_rx_full_mon_deliver_ppdu(ar, dp->mac_id,
5518 pmon, napi);
5519 pmon->hold_mon_dst_ring = false;
5520 } else if (!pmon->mon_status_paddr ||
5521 pmon->buf_state == DP_MON_STATUS_LEAD) {
5522 sw_mon_entries->drop_ppdu = true;
5523 pmon->hold_mon_dst_ring = false;
5524 }
5525
5526 if (!quota)
5527 break;
5528
5529 work += quota;
5530 }
5531
5532 if (sw_mon_entries->drop_ppdu)
5533 ath11k_dp_rx_full_mon_drop_ppdu(&ab->dp, pmon->mon_mpdu);
5534
5535 return work;
5536}
5537
5538static int ath11k_dp_full_mon_process_rx(struct ath11k_base *ab, int mac_id,
5539 struct napi_struct *napi, int budget)
5540{
5541 struct ath11k *ar = ab->pdevs[mac_id].ar;
5542 struct ath11k_pdev_dp *dp = &ar->dp;
5543 struct ath11k_mon_data *pmon = &dp->mon_data;
5544 struct hal_sw_mon_ring_entries *sw_mon_entries;
5545 struct ath11k_pdev_mon_stats *rx_mon_stats;
5546 struct sk_buff *head_msdu, *tail_msdu;
5547 void *mon_dst_srng = &ar->ab->hal.srng_list[dp->rxdma_mon_dst_ring.ring_id];
5548 void *ring_entry;
5549 u32 rx_bufs_used = 0, mpdu_rx_bufs_used;
5550 int quota = 0, ret;
5551 bool break_dst_ring = false;
5552
5553 spin_lock_bh(&pmon->mon_lock);
5554
5555 sw_mon_entries = &pmon->sw_mon_entries;
5556 rx_mon_stats = &pmon->rx_mon_stats;
5557
5558 if (pmon->hold_mon_dst_ring) {
5559 spin_unlock_bh(&pmon->mon_lock);
5560 goto reap_status_ring;
5561 }
5562
5563 ath11k_hal_srng_access_begin(ar->ab, mon_dst_srng);
5564 while ((ring_entry = ath11k_hal_srng_dst_peek(ar->ab, mon_dst_srng))) {
5565 head_msdu = NULL;
5566 tail_msdu = NULL;
5567
5568 mpdu_rx_bufs_used = ath11k_dp_rx_full_mon_mpdu_pop(ar, ring_entry,
5569 &head_msdu,
5570 &tail_msdu,
5571 sw_mon_entries);
5572 rx_bufs_used += mpdu_rx_bufs_used;
5573
5574 if (!sw_mon_entries->end_of_ppdu) {
5575 if (head_msdu) {
5576 ret = ath11k_dp_rx_full_mon_prepare_mpdu(&ab->dp,
5577 pmon->mon_mpdu,
5578 head_msdu,
5579 tail_msdu);
5580 if (ret)
5581 break_dst_ring = true;
5582 }
5583
5584 goto next_entry;
5585 } else {
5586 if (!sw_mon_entries->ppdu_id &&
5587 !sw_mon_entries->mon_status_paddr) {
5588 break_dst_ring = true;
5589 goto next_entry;
5590 }
5591 }
5592
5593 rx_mon_stats->dest_ppdu_done++;
5594 pmon->mon_ppdu_status = DP_PPDU_STATUS_START;
5595 pmon->buf_state = DP_MON_STATUS_LAG;
5596 pmon->mon_status_paddr = sw_mon_entries->mon_status_paddr;
5597 pmon->hold_mon_dst_ring = true;
5598next_entry:
5599 ring_entry = ath11k_hal_srng_dst_get_next_entry(ar->ab,
5600 mon_dst_srng);
5601 if (break_dst_ring)
5602 break;
5603 }
5604
5605 ath11k_hal_srng_access_end(ar->ab, mon_dst_srng);
5606 spin_unlock_bh(&pmon->mon_lock);
5607
5608 if (rx_bufs_used) {
5609 ath11k_dp_rxbufs_replenish(ar->ab, dp->mac_id,
5610 &dp->rxdma_mon_buf_ring,
5611 rx_bufs_used,
5612 HAL_RX_BUF_RBM_SW3_BM);
5613 }
5614
5615reap_status_ring:
5616 quota = ath11k_dp_rx_process_full_mon_status_ring(ab, mac_id,
5617 napi, budget);
5618
5619 return quota;
5620}
5621
5622int ath11k_dp_rx_process_mon_rings(struct ath11k_base *ab, int mac_id,
5623 struct napi_struct *napi, int budget)
5624{
5625 struct ath11k *ar = ath11k_ab_to_ar(ab, mac_id);
5626 int ret = 0;
5627
5628 if (test_bit(ATH11K_FLAG_MONITOR_STARTED, &ar->monitor_flags) &&
5629 ab->hw_params.full_monitor_mode)
5630 ret = ath11k_dp_full_mon_process_rx(ab, mac_id, napi, budget);
5631 else
5632 ret = ath11k_dp_rx_process_mon_status(ab, mac_id, napi, budget);
5633
5634 return ret;
5635}
5636
5637static int ath11k_dp_rx_pdev_mon_status_attach(struct ath11k *ar)
5638{
5639 struct ath11k_pdev_dp *dp = &ar->dp;
5640 struct ath11k_mon_data *pmon = (struct ath11k_mon_data *)&dp->mon_data;
5641
5642 skb_queue_head_init(&pmon->rx_status_q);
5643
5644 pmon->mon_ppdu_status = DP_PPDU_STATUS_START;
5645
5646 memset(&pmon->rx_mon_stats, 0,
5647 sizeof(pmon->rx_mon_stats));
5648 return 0;
5649}
5650
5651int ath11k_dp_rx_pdev_mon_attach(struct ath11k *ar)
5652{
5653 struct ath11k_pdev_dp *dp = &ar->dp;
5654 struct ath11k_mon_data *pmon = &dp->mon_data;
5655 struct hal_srng *mon_desc_srng = NULL;
5656 struct dp_srng *dp_srng;
5657 int ret = 0;
5658 u32 n_link_desc = 0;
5659
5660 ret = ath11k_dp_rx_pdev_mon_status_attach(ar);
5661 if (ret) {
5662 ath11k_warn(ar->ab, "pdev_mon_status_attach() failed");
5663 return ret;
5664 }
5665
5666 /* if rxdma1_enable is false, no need to setup
5667 * rxdma_mon_desc_ring.
5668 */
5669 if (!ar->ab->hw_params.rxdma1_enable)
5670 return 0;
5671
5672 dp_srng = &dp->rxdma_mon_desc_ring;
5673 n_link_desc = dp_srng->size /
5674 ath11k_hal_srng_get_entrysize(ar->ab, HAL_RXDMA_MONITOR_DESC);
5675 mon_desc_srng =
5676 &ar->ab->hal.srng_list[dp->rxdma_mon_desc_ring.ring_id];
5677
5678 ret = ath11k_dp_link_desc_setup(ar->ab, pmon->link_desc_banks,
5679 HAL_RXDMA_MONITOR_DESC, mon_desc_srng,
5680 n_link_desc);
5681 if (ret) {
5682 ath11k_warn(ar->ab, "mon_link_desc_pool_setup() failed");
5683 return ret;
5684 }
5685 pmon->mon_last_linkdesc_paddr = 0;
5686 pmon->mon_last_buf_cookie = DP_RX_DESC_COOKIE_MAX + 1;
5687 spin_lock_init(&pmon->mon_lock);
5688
5689 return 0;
5690}
5691
5692static int ath11k_dp_mon_link_free(struct ath11k *ar)
5693{
5694 struct ath11k_pdev_dp *dp = &ar->dp;
5695 struct ath11k_mon_data *pmon = &dp->mon_data;
5696
5697 ath11k_dp_link_desc_cleanup(ar->ab, pmon->link_desc_banks,
5698 HAL_RXDMA_MONITOR_DESC,
5699 &dp->rxdma_mon_desc_ring);
5700 return 0;
5701}
5702
5703int ath11k_dp_rx_pdev_mon_detach(struct ath11k *ar)
5704{
5705 ath11k_dp_mon_link_free(ar);
5706 return 0;
5707}
5708
5709int ath11k_dp_rx_pktlog_start(struct ath11k_base *ab)
5710{
5711 /* start reap timer */
5712 mod_timer(&ab->mon_reap_timer,
5713 jiffies + msecs_to_jiffies(ATH11K_MON_TIMER_INTERVAL));
5714
5715 return 0;
5716}
5717
5718int ath11k_dp_rx_pktlog_stop(struct ath11k_base *ab, bool stop_timer)
5719{
5720 int ret;
5721
5722 if (stop_timer)
5723 del_timer_sync(&ab->mon_reap_timer);
5724
5725 /* reap all the monitor related rings */
5726 ret = ath11k_dp_purge_mon_ring(ab);
5727 if (ret) {
5728 ath11k_warn(ab, "failed to purge dp mon ring: %d\n", ret);
5729 return ret;
5730 }
5731
5732 return 0;
5733}
1// SPDX-License-Identifier: BSD-3-Clause-Clear
2/*
3 * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
4 * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
5 */
6
7#include <linux/ieee80211.h>
8#include <linux/kernel.h>
9#include <linux/skbuff.h>
10#include <crypto/hash.h>
11#include "core.h"
12#include "debug.h"
13#include "debugfs_htt_stats.h"
14#include "debugfs_sta.h"
15#include "hal_desc.h"
16#include "hw.h"
17#include "dp_rx.h"
18#include "hal_rx.h"
19#include "dp_tx.h"
20#include "peer.h"
21
22#define ATH11K_DP_RX_FRAGMENT_TIMEOUT_MS (2 * HZ)
23
24static inline
25u8 *ath11k_dp_rx_h_80211_hdr(struct ath11k_base *ab, struct hal_rx_desc *desc)
26{
27 return ab->hw_params.hw_ops->rx_desc_get_hdr_status(desc);
28}
29
30static inline
31enum hal_encrypt_type ath11k_dp_rx_h_mpdu_start_enctype(struct ath11k_base *ab,
32 struct hal_rx_desc *desc)
33{
34 if (!ab->hw_params.hw_ops->rx_desc_encrypt_valid(desc))
35 return HAL_ENCRYPT_TYPE_OPEN;
36
37 return ab->hw_params.hw_ops->rx_desc_get_encrypt_type(desc);
38}
39
40static inline u8 ath11k_dp_rx_h_msdu_start_decap_type(struct ath11k_base *ab,
41 struct hal_rx_desc *desc)
42{
43 return ab->hw_params.hw_ops->rx_desc_get_decap_type(desc);
44}
45
46static inline
47bool ath11k_dp_rx_h_msdu_start_ldpc_support(struct ath11k_base *ab,
48 struct hal_rx_desc *desc)
49{
50 return ab->hw_params.hw_ops->rx_desc_get_ldpc_support(desc);
51}
52
53static inline
54u8 ath11k_dp_rx_h_msdu_start_mesh_ctl_present(struct ath11k_base *ab,
55 struct hal_rx_desc *desc)
56{
57 return ab->hw_params.hw_ops->rx_desc_get_mesh_ctl(desc);
58}
59
60static inline
61bool ath11k_dp_rx_h_mpdu_start_seq_ctrl_valid(struct ath11k_base *ab,
62 struct hal_rx_desc *desc)
63{
64 return ab->hw_params.hw_ops->rx_desc_get_mpdu_seq_ctl_vld(desc);
65}
66
67static inline bool ath11k_dp_rx_h_mpdu_start_fc_valid(struct ath11k_base *ab,
68 struct hal_rx_desc *desc)
69{
70 return ab->hw_params.hw_ops->rx_desc_get_mpdu_fc_valid(desc);
71}
72
73static inline bool ath11k_dp_rx_h_mpdu_start_more_frags(struct ath11k_base *ab,
74 struct sk_buff *skb)
75{
76 struct ieee80211_hdr *hdr;
77
78 hdr = (struct ieee80211_hdr *)(skb->data + ab->hw_params.hal_desc_sz);
79 return ieee80211_has_morefrags(hdr->frame_control);
80}
81
82static inline u16 ath11k_dp_rx_h_mpdu_start_frag_no(struct ath11k_base *ab,
83 struct sk_buff *skb)
84{
85 struct ieee80211_hdr *hdr;
86
87 hdr = (struct ieee80211_hdr *)(skb->data + ab->hw_params.hal_desc_sz);
88 return le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_FRAG;
89}
90
91static inline u16 ath11k_dp_rx_h_mpdu_start_seq_no(struct ath11k_base *ab,
92 struct hal_rx_desc *desc)
93{
94 return ab->hw_params.hw_ops->rx_desc_get_mpdu_start_seq_no(desc);
95}
96
97static inline void *ath11k_dp_rx_get_attention(struct ath11k_base *ab,
98 struct hal_rx_desc *desc)
99{
100 return ab->hw_params.hw_ops->rx_desc_get_attention(desc);
101}
102
103static inline bool ath11k_dp_rx_h_attn_msdu_done(struct rx_attention *attn)
104{
105 return !!FIELD_GET(RX_ATTENTION_INFO2_MSDU_DONE,
106 __le32_to_cpu(attn->info2));
107}
108
109static inline bool ath11k_dp_rx_h_attn_l4_cksum_fail(struct rx_attention *attn)
110{
111 return !!FIELD_GET(RX_ATTENTION_INFO1_TCP_UDP_CKSUM_FAIL,
112 __le32_to_cpu(attn->info1));
113}
114
115static inline bool ath11k_dp_rx_h_attn_ip_cksum_fail(struct rx_attention *attn)
116{
117 return !!FIELD_GET(RX_ATTENTION_INFO1_IP_CKSUM_FAIL,
118 __le32_to_cpu(attn->info1));
119}
120
121static inline bool ath11k_dp_rx_h_attn_is_decrypted(struct rx_attention *attn)
122{
123 return (FIELD_GET(RX_ATTENTION_INFO2_DCRYPT_STATUS_CODE,
124 __le32_to_cpu(attn->info2)) ==
125 RX_DESC_DECRYPT_STATUS_CODE_OK);
126}
127
128static u32 ath11k_dp_rx_h_attn_mpdu_err(struct rx_attention *attn)
129{
130 u32 info = __le32_to_cpu(attn->info1);
131 u32 errmap = 0;
132
133 if (info & RX_ATTENTION_INFO1_FCS_ERR)
134 errmap |= DP_RX_MPDU_ERR_FCS;
135
136 if (info & RX_ATTENTION_INFO1_DECRYPT_ERR)
137 errmap |= DP_RX_MPDU_ERR_DECRYPT;
138
139 if (info & RX_ATTENTION_INFO1_TKIP_MIC_ERR)
140 errmap |= DP_RX_MPDU_ERR_TKIP_MIC;
141
142 if (info & RX_ATTENTION_INFO1_A_MSDU_ERROR)
143 errmap |= DP_RX_MPDU_ERR_AMSDU_ERR;
144
145 if (info & RX_ATTENTION_INFO1_OVERFLOW_ERR)
146 errmap |= DP_RX_MPDU_ERR_OVERFLOW;
147
148 if (info & RX_ATTENTION_INFO1_MSDU_LEN_ERR)
149 errmap |= DP_RX_MPDU_ERR_MSDU_LEN;
150
151 if (info & RX_ATTENTION_INFO1_MPDU_LEN_ERR)
152 errmap |= DP_RX_MPDU_ERR_MPDU_LEN;
153
154 return errmap;
155}
156
157static bool ath11k_dp_rx_h_attn_msdu_len_err(struct ath11k_base *ab,
158 struct hal_rx_desc *desc)
159{
160 struct rx_attention *rx_attention;
161 u32 errmap;
162
163 rx_attention = ath11k_dp_rx_get_attention(ab, desc);
164 errmap = ath11k_dp_rx_h_attn_mpdu_err(rx_attention);
165
166 return errmap & DP_RX_MPDU_ERR_MSDU_LEN;
167}
168
169static inline u16 ath11k_dp_rx_h_msdu_start_msdu_len(struct ath11k_base *ab,
170 struct hal_rx_desc *desc)
171{
172 return ab->hw_params.hw_ops->rx_desc_get_msdu_len(desc);
173}
174
175static inline u8 ath11k_dp_rx_h_msdu_start_sgi(struct ath11k_base *ab,
176 struct hal_rx_desc *desc)
177{
178 return ab->hw_params.hw_ops->rx_desc_get_msdu_sgi(desc);
179}
180
181static inline u8 ath11k_dp_rx_h_msdu_start_rate_mcs(struct ath11k_base *ab,
182 struct hal_rx_desc *desc)
183{
184 return ab->hw_params.hw_ops->rx_desc_get_msdu_rate_mcs(desc);
185}
186
187static inline u8 ath11k_dp_rx_h_msdu_start_rx_bw(struct ath11k_base *ab,
188 struct hal_rx_desc *desc)
189{
190 return ab->hw_params.hw_ops->rx_desc_get_msdu_rx_bw(desc);
191}
192
193static inline u32 ath11k_dp_rx_h_msdu_start_freq(struct ath11k_base *ab,
194 struct hal_rx_desc *desc)
195{
196 return ab->hw_params.hw_ops->rx_desc_get_msdu_freq(desc);
197}
198
199static inline u8 ath11k_dp_rx_h_msdu_start_pkt_type(struct ath11k_base *ab,
200 struct hal_rx_desc *desc)
201{
202 return ab->hw_params.hw_ops->rx_desc_get_msdu_pkt_type(desc);
203}
204
205static inline u8 ath11k_dp_rx_h_msdu_start_nss(struct ath11k_base *ab,
206 struct hal_rx_desc *desc)
207{
208 return hweight8(ab->hw_params.hw_ops->rx_desc_get_msdu_nss(desc));
209}
210
211static inline u8 ath11k_dp_rx_h_mpdu_start_tid(struct ath11k_base *ab,
212 struct hal_rx_desc *desc)
213{
214 return ab->hw_params.hw_ops->rx_desc_get_mpdu_tid(desc);
215}
216
217static inline u16 ath11k_dp_rx_h_mpdu_start_peer_id(struct ath11k_base *ab,
218 struct hal_rx_desc *desc)
219{
220 return ab->hw_params.hw_ops->rx_desc_get_mpdu_peer_id(desc);
221}
222
223static inline u8 ath11k_dp_rx_h_msdu_end_l3pad(struct ath11k_base *ab,
224 struct hal_rx_desc *desc)
225{
226 return ab->hw_params.hw_ops->rx_desc_get_l3_pad_bytes(desc);
227}
228
229static inline bool ath11k_dp_rx_h_msdu_end_first_msdu(struct ath11k_base *ab,
230 struct hal_rx_desc *desc)
231{
232 return ab->hw_params.hw_ops->rx_desc_get_first_msdu(desc);
233}
234
235static bool ath11k_dp_rx_h_msdu_end_last_msdu(struct ath11k_base *ab,
236 struct hal_rx_desc *desc)
237{
238 return ab->hw_params.hw_ops->rx_desc_get_last_msdu(desc);
239}
240
241static void ath11k_dp_rx_desc_end_tlv_copy(struct ath11k_base *ab,
242 struct hal_rx_desc *fdesc,
243 struct hal_rx_desc *ldesc)
244{
245 ab->hw_params.hw_ops->rx_desc_copy_attn_end_tlv(fdesc, ldesc);
246}
247
248static inline u32 ath11k_dp_rxdesc_get_mpdulen_err(struct rx_attention *attn)
249{
250 return FIELD_GET(RX_ATTENTION_INFO1_MPDU_LEN_ERR,
251 __le32_to_cpu(attn->info1));
252}
253
254static inline u8 *ath11k_dp_rxdesc_get_80211hdr(struct ath11k_base *ab,
255 struct hal_rx_desc *rx_desc)
256{
257 u8 *rx_pkt_hdr;
258
259 rx_pkt_hdr = ab->hw_params.hw_ops->rx_desc_get_msdu_payload(rx_desc);
260
261 return rx_pkt_hdr;
262}
263
264static inline bool ath11k_dp_rxdesc_mpdu_valid(struct ath11k_base *ab,
265 struct hal_rx_desc *rx_desc)
266{
267 u32 tlv_tag;
268
269 tlv_tag = ab->hw_params.hw_ops->rx_desc_get_mpdu_start_tag(rx_desc);
270
271 return tlv_tag == HAL_RX_MPDU_START;
272}
273
274static inline u32 ath11k_dp_rxdesc_get_ppduid(struct ath11k_base *ab,
275 struct hal_rx_desc *rx_desc)
276{
277 return ab->hw_params.hw_ops->rx_desc_get_mpdu_ppdu_id(rx_desc);
278}
279
280static inline void ath11k_dp_rxdesc_set_msdu_len(struct ath11k_base *ab,
281 struct hal_rx_desc *desc,
282 u16 len)
283{
284 ab->hw_params.hw_ops->rx_desc_set_msdu_len(desc, len);
285}
286
287static bool ath11k_dp_rx_h_attn_is_mcbc(struct ath11k_base *ab,
288 struct hal_rx_desc *desc)
289{
290 struct rx_attention *attn = ath11k_dp_rx_get_attention(ab, desc);
291
292 return ath11k_dp_rx_h_msdu_end_first_msdu(ab, desc) &&
293 (!!FIELD_GET(RX_ATTENTION_INFO1_MCAST_BCAST,
294 __le32_to_cpu(attn->info1)));
295}
296
297static bool ath11k_dp_rxdesc_mac_addr2_valid(struct ath11k_base *ab,
298 struct hal_rx_desc *desc)
299{
300 return ab->hw_params.hw_ops->rx_desc_mac_addr2_valid(desc);
301}
302
303static u8 *ath11k_dp_rxdesc_mpdu_start_addr2(struct ath11k_base *ab,
304 struct hal_rx_desc *desc)
305{
306 return ab->hw_params.hw_ops->rx_desc_mpdu_start_addr2(desc);
307}
308
309static void ath11k_dp_service_mon_ring(struct timer_list *t)
310{
311 struct ath11k_base *ab = from_timer(ab, t, mon_reap_timer);
312 int i;
313
314 for (i = 0; i < ab->hw_params.num_rxdma_per_pdev; i++)
315 ath11k_dp_rx_process_mon_rings(ab, i, NULL, DP_MON_SERVICE_BUDGET);
316
317 mod_timer(&ab->mon_reap_timer, jiffies +
318 msecs_to_jiffies(ATH11K_MON_TIMER_INTERVAL));
319}
320
321static int ath11k_dp_purge_mon_ring(struct ath11k_base *ab)
322{
323 int i, reaped = 0;
324 unsigned long timeout = jiffies + msecs_to_jiffies(DP_MON_PURGE_TIMEOUT_MS);
325
326 do {
327 for (i = 0; i < ab->hw_params.num_rxdma_per_pdev; i++)
328 reaped += ath11k_dp_rx_process_mon_rings(ab, i,
329 NULL,
330 DP_MON_SERVICE_BUDGET);
331
332 /* nothing more to reap */
333 if (reaped < DP_MON_SERVICE_BUDGET)
334 return 0;
335
336 } while (time_before(jiffies, timeout));
337
338 ath11k_warn(ab, "dp mon ring purge timeout");
339
340 return -ETIMEDOUT;
341}
342
343/* Returns number of Rx buffers replenished */
344int ath11k_dp_rxbufs_replenish(struct ath11k_base *ab, int mac_id,
345 struct dp_rxdma_ring *rx_ring,
346 int req_entries,
347 enum hal_rx_buf_return_buf_manager mgr)
348{
349 struct hal_srng *srng;
350 u32 *desc;
351 struct sk_buff *skb;
352 int num_free;
353 int num_remain;
354 int buf_id;
355 u32 cookie;
356 dma_addr_t paddr;
357
358 req_entries = min(req_entries, rx_ring->bufs_max);
359
360 srng = &ab->hal.srng_list[rx_ring->refill_buf_ring.ring_id];
361
362 spin_lock_bh(&srng->lock);
363
364 ath11k_hal_srng_access_begin(ab, srng);
365
366 num_free = ath11k_hal_srng_src_num_free(ab, srng, true);
367 if (!req_entries && (num_free > (rx_ring->bufs_max * 3) / 4))
368 req_entries = num_free;
369
370 req_entries = min(num_free, req_entries);
371 num_remain = req_entries;
372
373 while (num_remain > 0) {
374 skb = dev_alloc_skb(DP_RX_BUFFER_SIZE +
375 DP_RX_BUFFER_ALIGN_SIZE);
376 if (!skb)
377 break;
378
379 if (!IS_ALIGNED((unsigned long)skb->data,
380 DP_RX_BUFFER_ALIGN_SIZE)) {
381 skb_pull(skb,
382 PTR_ALIGN(skb->data, DP_RX_BUFFER_ALIGN_SIZE) -
383 skb->data);
384 }
385
386 paddr = dma_map_single(ab->dev, skb->data,
387 skb->len + skb_tailroom(skb),
388 DMA_FROM_DEVICE);
389 if (dma_mapping_error(ab->dev, paddr))
390 goto fail_free_skb;
391
392 spin_lock_bh(&rx_ring->idr_lock);
393 buf_id = idr_alloc(&rx_ring->bufs_idr, skb, 1,
394 (rx_ring->bufs_max * 3) + 1, GFP_ATOMIC);
395 spin_unlock_bh(&rx_ring->idr_lock);
396 if (buf_id <= 0)
397 goto fail_dma_unmap;
398
399 desc = ath11k_hal_srng_src_get_next_entry(ab, srng);
400 if (!desc)
401 goto fail_idr_remove;
402
403 ATH11K_SKB_RXCB(skb)->paddr = paddr;
404
405 cookie = FIELD_PREP(DP_RXDMA_BUF_COOKIE_PDEV_ID, mac_id) |
406 FIELD_PREP(DP_RXDMA_BUF_COOKIE_BUF_ID, buf_id);
407
408 num_remain--;
409
410 ath11k_hal_rx_buf_addr_info_set(desc, paddr, cookie, mgr);
411 }
412
413 ath11k_hal_srng_access_end(ab, srng);
414
415 spin_unlock_bh(&srng->lock);
416
417 return req_entries - num_remain;
418
419fail_idr_remove:
420 spin_lock_bh(&rx_ring->idr_lock);
421 idr_remove(&rx_ring->bufs_idr, buf_id);
422 spin_unlock_bh(&rx_ring->idr_lock);
423fail_dma_unmap:
424 dma_unmap_single(ab->dev, paddr, skb->len + skb_tailroom(skb),
425 DMA_FROM_DEVICE);
426fail_free_skb:
427 dev_kfree_skb_any(skb);
428
429 ath11k_hal_srng_access_end(ab, srng);
430
431 spin_unlock_bh(&srng->lock);
432
433 return req_entries - num_remain;
434}
435
436static int ath11k_dp_rxdma_buf_ring_free(struct ath11k *ar,
437 struct dp_rxdma_ring *rx_ring)
438{
439 struct sk_buff *skb;
440 int buf_id;
441
442 spin_lock_bh(&rx_ring->idr_lock);
443 idr_for_each_entry(&rx_ring->bufs_idr, skb, buf_id) {
444 idr_remove(&rx_ring->bufs_idr, buf_id);
445 /* TODO: Understand where internal driver does this dma_unmap
446 * of rxdma_buffer.
447 */
448 dma_unmap_single(ar->ab->dev, ATH11K_SKB_RXCB(skb)->paddr,
449 skb->len + skb_tailroom(skb), DMA_FROM_DEVICE);
450 dev_kfree_skb_any(skb);
451 }
452
453 idr_destroy(&rx_ring->bufs_idr);
454 spin_unlock_bh(&rx_ring->idr_lock);
455
456 return 0;
457}
458
459static int ath11k_dp_rxdma_pdev_buf_free(struct ath11k *ar)
460{
461 struct ath11k_pdev_dp *dp = &ar->dp;
462 struct ath11k_base *ab = ar->ab;
463 struct dp_rxdma_ring *rx_ring = &dp->rx_refill_buf_ring;
464 int i;
465
466 ath11k_dp_rxdma_buf_ring_free(ar, rx_ring);
467
468 rx_ring = &dp->rxdma_mon_buf_ring;
469 ath11k_dp_rxdma_buf_ring_free(ar, rx_ring);
470
471 for (i = 0; i < ab->hw_params.num_rxdma_per_pdev; i++) {
472 rx_ring = &dp->rx_mon_status_refill_ring[i];
473 ath11k_dp_rxdma_buf_ring_free(ar, rx_ring);
474 }
475
476 return 0;
477}
478
479static int ath11k_dp_rxdma_ring_buf_setup(struct ath11k *ar,
480 struct dp_rxdma_ring *rx_ring,
481 u32 ringtype)
482{
483 struct ath11k_pdev_dp *dp = &ar->dp;
484 int num_entries;
485
486 num_entries = rx_ring->refill_buf_ring.size /
487 ath11k_hal_srng_get_entrysize(ar->ab, ringtype);
488
489 rx_ring->bufs_max = num_entries;
490 ath11k_dp_rxbufs_replenish(ar->ab, dp->mac_id, rx_ring, num_entries,
491 ar->ab->hw_params.hal_params->rx_buf_rbm);
492 return 0;
493}
494
495static int ath11k_dp_rxdma_pdev_buf_setup(struct ath11k *ar)
496{
497 struct ath11k_pdev_dp *dp = &ar->dp;
498 struct ath11k_base *ab = ar->ab;
499 struct dp_rxdma_ring *rx_ring = &dp->rx_refill_buf_ring;
500 int i;
501
502 ath11k_dp_rxdma_ring_buf_setup(ar, rx_ring, HAL_RXDMA_BUF);
503
504 if (ar->ab->hw_params.rxdma1_enable) {
505 rx_ring = &dp->rxdma_mon_buf_ring;
506 ath11k_dp_rxdma_ring_buf_setup(ar, rx_ring, HAL_RXDMA_MONITOR_BUF);
507 }
508
509 for (i = 0; i < ab->hw_params.num_rxdma_per_pdev; i++) {
510 rx_ring = &dp->rx_mon_status_refill_ring[i];
511 ath11k_dp_rxdma_ring_buf_setup(ar, rx_ring, HAL_RXDMA_MONITOR_STATUS);
512 }
513
514 return 0;
515}
516
517static void ath11k_dp_rx_pdev_srng_free(struct ath11k *ar)
518{
519 struct ath11k_pdev_dp *dp = &ar->dp;
520 struct ath11k_base *ab = ar->ab;
521 int i;
522
523 ath11k_dp_srng_cleanup(ab, &dp->rx_refill_buf_ring.refill_buf_ring);
524
525 for (i = 0; i < ab->hw_params.num_rxdma_per_pdev; i++) {
526 if (ab->hw_params.rx_mac_buf_ring)
527 ath11k_dp_srng_cleanup(ab, &dp->rx_mac_buf_ring[i]);
528
529 ath11k_dp_srng_cleanup(ab, &dp->rxdma_err_dst_ring[i]);
530 ath11k_dp_srng_cleanup(ab,
531 &dp->rx_mon_status_refill_ring[i].refill_buf_ring);
532 }
533
534 ath11k_dp_srng_cleanup(ab, &dp->rxdma_mon_buf_ring.refill_buf_ring);
535}
536
537void ath11k_dp_pdev_reo_cleanup(struct ath11k_base *ab)
538{
539 struct ath11k_dp *dp = &ab->dp;
540 int i;
541
542 for (i = 0; i < DP_REO_DST_RING_MAX; i++)
543 ath11k_dp_srng_cleanup(ab, &dp->reo_dst_ring[i]);
544}
545
546int ath11k_dp_pdev_reo_setup(struct ath11k_base *ab)
547{
548 struct ath11k_dp *dp = &ab->dp;
549 int ret;
550 int i;
551
552 for (i = 0; i < DP_REO_DST_RING_MAX; i++) {
553 ret = ath11k_dp_srng_setup(ab, &dp->reo_dst_ring[i],
554 HAL_REO_DST, i, 0,
555 DP_REO_DST_RING_SIZE);
556 if (ret) {
557 ath11k_warn(ab, "failed to setup reo_dst_ring\n");
558 goto err_reo_cleanup;
559 }
560 }
561
562 return 0;
563
564err_reo_cleanup:
565 ath11k_dp_pdev_reo_cleanup(ab);
566
567 return ret;
568}
569
570static int ath11k_dp_rx_pdev_srng_alloc(struct ath11k *ar)
571{
572 struct ath11k_pdev_dp *dp = &ar->dp;
573 struct ath11k_base *ab = ar->ab;
574 struct dp_srng *srng = NULL;
575 int i;
576 int ret;
577
578 ret = ath11k_dp_srng_setup(ar->ab,
579 &dp->rx_refill_buf_ring.refill_buf_ring,
580 HAL_RXDMA_BUF, 0,
581 dp->mac_id, DP_RXDMA_BUF_RING_SIZE);
582 if (ret) {
583 ath11k_warn(ar->ab, "failed to setup rx_refill_buf_ring\n");
584 return ret;
585 }
586
587 if (ar->ab->hw_params.rx_mac_buf_ring) {
588 for (i = 0; i < ab->hw_params.num_rxdma_per_pdev; i++) {
589 ret = ath11k_dp_srng_setup(ar->ab,
590 &dp->rx_mac_buf_ring[i],
591 HAL_RXDMA_BUF, 1,
592 dp->mac_id + i, 1024);
593 if (ret) {
594 ath11k_warn(ar->ab, "failed to setup rx_mac_buf_ring %d\n",
595 i);
596 return ret;
597 }
598 }
599 }
600
601 for (i = 0; i < ab->hw_params.num_rxdma_per_pdev; i++) {
602 ret = ath11k_dp_srng_setup(ar->ab, &dp->rxdma_err_dst_ring[i],
603 HAL_RXDMA_DST, 0, dp->mac_id + i,
604 DP_RXDMA_ERR_DST_RING_SIZE);
605 if (ret) {
606 ath11k_warn(ar->ab, "failed to setup rxdma_err_dst_ring %d\n", i);
607 return ret;
608 }
609 }
610
611 for (i = 0; i < ab->hw_params.num_rxdma_per_pdev; i++) {
612 srng = &dp->rx_mon_status_refill_ring[i].refill_buf_ring;
613 ret = ath11k_dp_srng_setup(ar->ab,
614 srng,
615 HAL_RXDMA_MONITOR_STATUS, 0, dp->mac_id + i,
616 DP_RXDMA_MON_STATUS_RING_SIZE);
617 if (ret) {
618 ath11k_warn(ar->ab,
619 "failed to setup rx_mon_status_refill_ring %d\n", i);
620 return ret;
621 }
622 }
623
624 /* if rxdma1_enable is false, then it doesn't need
625 * to setup rxdam_mon_buf_ring, rxdma_mon_dst_ring
626 * and rxdma_mon_desc_ring.
627 * init reap timer for QCA6390.
628 */
629 if (!ar->ab->hw_params.rxdma1_enable) {
630 //init mon status buffer reap timer
631 timer_setup(&ar->ab->mon_reap_timer,
632 ath11k_dp_service_mon_ring, 0);
633 return 0;
634 }
635
636 ret = ath11k_dp_srng_setup(ar->ab,
637 &dp->rxdma_mon_buf_ring.refill_buf_ring,
638 HAL_RXDMA_MONITOR_BUF, 0, dp->mac_id,
639 DP_RXDMA_MONITOR_BUF_RING_SIZE);
640 if (ret) {
641 ath11k_warn(ar->ab,
642 "failed to setup HAL_RXDMA_MONITOR_BUF\n");
643 return ret;
644 }
645
646 ret = ath11k_dp_srng_setup(ar->ab, &dp->rxdma_mon_dst_ring,
647 HAL_RXDMA_MONITOR_DST, 0, dp->mac_id,
648 DP_RXDMA_MONITOR_DST_RING_SIZE);
649 if (ret) {
650 ath11k_warn(ar->ab,
651 "failed to setup HAL_RXDMA_MONITOR_DST\n");
652 return ret;
653 }
654
655 ret = ath11k_dp_srng_setup(ar->ab, &dp->rxdma_mon_desc_ring,
656 HAL_RXDMA_MONITOR_DESC, 0, dp->mac_id,
657 DP_RXDMA_MONITOR_DESC_RING_SIZE);
658 if (ret) {
659 ath11k_warn(ar->ab,
660 "failed to setup HAL_RXDMA_MONITOR_DESC\n");
661 return ret;
662 }
663
664 return 0;
665}
666
667void ath11k_dp_reo_cmd_list_cleanup(struct ath11k_base *ab)
668{
669 struct ath11k_dp *dp = &ab->dp;
670 struct dp_reo_cmd *cmd, *tmp;
671 struct dp_reo_cache_flush_elem *cmd_cache, *tmp_cache;
672 struct dp_rx_tid *rx_tid;
673
674 spin_lock_bh(&dp->reo_cmd_lock);
675 list_for_each_entry_safe(cmd, tmp, &dp->reo_cmd_list, list) {
676 list_del(&cmd->list);
677 rx_tid = &cmd->data;
678 if (rx_tid->vaddr) {
679 dma_unmap_single(ab->dev, rx_tid->paddr,
680 rx_tid->size, DMA_BIDIRECTIONAL);
681 kfree(rx_tid->vaddr);
682 rx_tid->vaddr = NULL;
683 }
684 kfree(cmd);
685 }
686
687 list_for_each_entry_safe(cmd_cache, tmp_cache,
688 &dp->reo_cmd_cache_flush_list, list) {
689 list_del(&cmd_cache->list);
690 dp->reo_cmd_cache_flush_count--;
691 rx_tid = &cmd_cache->data;
692 if (rx_tid->vaddr) {
693 dma_unmap_single(ab->dev, rx_tid->paddr,
694 rx_tid->size, DMA_BIDIRECTIONAL);
695 kfree(rx_tid->vaddr);
696 rx_tid->vaddr = NULL;
697 }
698 kfree(cmd_cache);
699 }
700 spin_unlock_bh(&dp->reo_cmd_lock);
701}
702
703static void ath11k_dp_reo_cmd_free(struct ath11k_dp *dp, void *ctx,
704 enum hal_reo_cmd_status status)
705{
706 struct dp_rx_tid *rx_tid = ctx;
707
708 if (status != HAL_REO_CMD_SUCCESS)
709 ath11k_warn(dp->ab, "failed to flush rx tid hw desc, tid %d status %d\n",
710 rx_tid->tid, status);
711 if (rx_tid->vaddr) {
712 dma_unmap_single(dp->ab->dev, rx_tid->paddr, rx_tid->size,
713 DMA_BIDIRECTIONAL);
714 kfree(rx_tid->vaddr);
715 rx_tid->vaddr = NULL;
716 }
717}
718
719static void ath11k_dp_reo_cache_flush(struct ath11k_base *ab,
720 struct dp_rx_tid *rx_tid)
721{
722 struct ath11k_hal_reo_cmd cmd = {0};
723 unsigned long tot_desc_sz, desc_sz;
724 int ret;
725
726 tot_desc_sz = rx_tid->size;
727 desc_sz = ath11k_hal_reo_qdesc_size(0, HAL_DESC_REO_NON_QOS_TID);
728
729 while (tot_desc_sz > desc_sz) {
730 tot_desc_sz -= desc_sz;
731 cmd.addr_lo = lower_32_bits(rx_tid->paddr + tot_desc_sz);
732 cmd.addr_hi = upper_32_bits(rx_tid->paddr);
733 ret = ath11k_dp_tx_send_reo_cmd(ab, rx_tid,
734 HAL_REO_CMD_FLUSH_CACHE, &cmd,
735 NULL);
736 if (ret)
737 ath11k_warn(ab,
738 "failed to send HAL_REO_CMD_FLUSH_CACHE, tid %d (%d)\n",
739 rx_tid->tid, ret);
740 }
741
742 memset(&cmd, 0, sizeof(cmd));
743 cmd.addr_lo = lower_32_bits(rx_tid->paddr);
744 cmd.addr_hi = upper_32_bits(rx_tid->paddr);
745 cmd.flag |= HAL_REO_CMD_FLG_NEED_STATUS;
746 ret = ath11k_dp_tx_send_reo_cmd(ab, rx_tid,
747 HAL_REO_CMD_FLUSH_CACHE,
748 &cmd, ath11k_dp_reo_cmd_free);
749 if (ret) {
750 ath11k_err(ab, "failed to send HAL_REO_CMD_FLUSH_CACHE cmd, tid %d (%d)\n",
751 rx_tid->tid, ret);
752 dma_unmap_single(ab->dev, rx_tid->paddr, rx_tid->size,
753 DMA_BIDIRECTIONAL);
754 kfree(rx_tid->vaddr);
755 rx_tid->vaddr = NULL;
756 }
757}
758
759static void ath11k_dp_rx_tid_del_func(struct ath11k_dp *dp, void *ctx,
760 enum hal_reo_cmd_status status)
761{
762 struct ath11k_base *ab = dp->ab;
763 struct dp_rx_tid *rx_tid = ctx;
764 struct dp_reo_cache_flush_elem *elem, *tmp;
765
766 if (status == HAL_REO_CMD_DRAIN) {
767 goto free_desc;
768 } else if (status != HAL_REO_CMD_SUCCESS) {
769 /* Shouldn't happen! Cleanup in case of other failure? */
770 ath11k_warn(ab, "failed to delete rx tid %d hw descriptor %d\n",
771 rx_tid->tid, status);
772 return;
773 }
774
775 elem = kzalloc(sizeof(*elem), GFP_ATOMIC);
776 if (!elem)
777 goto free_desc;
778
779 elem->ts = jiffies;
780 memcpy(&elem->data, rx_tid, sizeof(*rx_tid));
781
782 spin_lock_bh(&dp->reo_cmd_lock);
783 list_add_tail(&elem->list, &dp->reo_cmd_cache_flush_list);
784 dp->reo_cmd_cache_flush_count++;
785
786 /* Flush and invalidate aged REO desc from HW cache */
787 list_for_each_entry_safe(elem, tmp, &dp->reo_cmd_cache_flush_list,
788 list) {
789 if (dp->reo_cmd_cache_flush_count > DP_REO_DESC_FREE_THRESHOLD ||
790 time_after(jiffies, elem->ts +
791 msecs_to_jiffies(DP_REO_DESC_FREE_TIMEOUT_MS))) {
792 list_del(&elem->list);
793 dp->reo_cmd_cache_flush_count--;
794 spin_unlock_bh(&dp->reo_cmd_lock);
795
796 ath11k_dp_reo_cache_flush(ab, &elem->data);
797 kfree(elem);
798 spin_lock_bh(&dp->reo_cmd_lock);
799 }
800 }
801 spin_unlock_bh(&dp->reo_cmd_lock);
802
803 return;
804free_desc:
805 dma_unmap_single(ab->dev, rx_tid->paddr, rx_tid->size,
806 DMA_BIDIRECTIONAL);
807 kfree(rx_tid->vaddr);
808 rx_tid->vaddr = NULL;
809}
810
811void ath11k_peer_rx_tid_delete(struct ath11k *ar,
812 struct ath11k_peer *peer, u8 tid)
813{
814 struct ath11k_hal_reo_cmd cmd = {0};
815 struct dp_rx_tid *rx_tid = &peer->rx_tid[tid];
816 int ret;
817
818 if (!rx_tid->active)
819 return;
820
821 rx_tid->active = false;
822
823 cmd.flag = HAL_REO_CMD_FLG_NEED_STATUS;
824 cmd.addr_lo = lower_32_bits(rx_tid->paddr);
825 cmd.addr_hi = upper_32_bits(rx_tid->paddr);
826 cmd.upd0 |= HAL_REO_CMD_UPD0_VLD;
827 ret = ath11k_dp_tx_send_reo_cmd(ar->ab, rx_tid,
828 HAL_REO_CMD_UPDATE_RX_QUEUE, &cmd,
829 ath11k_dp_rx_tid_del_func);
830 if (ret) {
831 if (ret != -ESHUTDOWN)
832 ath11k_err(ar->ab, "failed to send HAL_REO_CMD_UPDATE_RX_QUEUE cmd, tid %d (%d)\n",
833 tid, ret);
834 dma_unmap_single(ar->ab->dev, rx_tid->paddr, rx_tid->size,
835 DMA_BIDIRECTIONAL);
836 kfree(rx_tid->vaddr);
837 rx_tid->vaddr = NULL;
838 }
839
840 rx_tid->paddr = 0;
841 rx_tid->size = 0;
842}
843
844static int ath11k_dp_rx_link_desc_return(struct ath11k_base *ab,
845 u32 *link_desc,
846 enum hal_wbm_rel_bm_act action)
847{
848 struct ath11k_dp *dp = &ab->dp;
849 struct hal_srng *srng;
850 u32 *desc;
851 int ret = 0;
852
853 srng = &ab->hal.srng_list[dp->wbm_desc_rel_ring.ring_id];
854
855 spin_lock_bh(&srng->lock);
856
857 ath11k_hal_srng_access_begin(ab, srng);
858
859 desc = ath11k_hal_srng_src_get_next_entry(ab, srng);
860 if (!desc) {
861 ret = -ENOBUFS;
862 goto exit;
863 }
864
865 ath11k_hal_rx_msdu_link_desc_set(ab, (void *)desc, (void *)link_desc,
866 action);
867
868exit:
869 ath11k_hal_srng_access_end(ab, srng);
870
871 spin_unlock_bh(&srng->lock);
872
873 return ret;
874}
875
876static void ath11k_dp_rx_frags_cleanup(struct dp_rx_tid *rx_tid, bool rel_link_desc)
877{
878 struct ath11k_base *ab = rx_tid->ab;
879
880 lockdep_assert_held(&ab->base_lock);
881
882 if (rx_tid->dst_ring_desc) {
883 if (rel_link_desc)
884 ath11k_dp_rx_link_desc_return(ab, (u32 *)rx_tid->dst_ring_desc,
885 HAL_WBM_REL_BM_ACT_PUT_IN_IDLE);
886 kfree(rx_tid->dst_ring_desc);
887 rx_tid->dst_ring_desc = NULL;
888 }
889
890 rx_tid->cur_sn = 0;
891 rx_tid->last_frag_no = 0;
892 rx_tid->rx_frag_bitmap = 0;
893 __skb_queue_purge(&rx_tid->rx_frags);
894}
895
896void ath11k_peer_frags_flush(struct ath11k *ar, struct ath11k_peer *peer)
897{
898 struct dp_rx_tid *rx_tid;
899 int i;
900
901 lockdep_assert_held(&ar->ab->base_lock);
902
903 for (i = 0; i <= IEEE80211_NUM_TIDS; i++) {
904 rx_tid = &peer->rx_tid[i];
905
906 spin_unlock_bh(&ar->ab->base_lock);
907 del_timer_sync(&rx_tid->frag_timer);
908 spin_lock_bh(&ar->ab->base_lock);
909
910 ath11k_dp_rx_frags_cleanup(rx_tid, true);
911 }
912}
913
914void ath11k_peer_rx_tid_cleanup(struct ath11k *ar, struct ath11k_peer *peer)
915{
916 struct dp_rx_tid *rx_tid;
917 int i;
918
919 lockdep_assert_held(&ar->ab->base_lock);
920
921 for (i = 0; i <= IEEE80211_NUM_TIDS; i++) {
922 rx_tid = &peer->rx_tid[i];
923
924 ath11k_peer_rx_tid_delete(ar, peer, i);
925 ath11k_dp_rx_frags_cleanup(rx_tid, true);
926
927 spin_unlock_bh(&ar->ab->base_lock);
928 del_timer_sync(&rx_tid->frag_timer);
929 spin_lock_bh(&ar->ab->base_lock);
930 }
931}
932
933static int ath11k_peer_rx_tid_reo_update(struct ath11k *ar,
934 struct ath11k_peer *peer,
935 struct dp_rx_tid *rx_tid,
936 u32 ba_win_sz, u16 ssn,
937 bool update_ssn)
938{
939 struct ath11k_hal_reo_cmd cmd = {0};
940 int ret;
941
942 cmd.addr_lo = lower_32_bits(rx_tid->paddr);
943 cmd.addr_hi = upper_32_bits(rx_tid->paddr);
944 cmd.flag = HAL_REO_CMD_FLG_NEED_STATUS;
945 cmd.upd0 = HAL_REO_CMD_UPD0_BA_WINDOW_SIZE;
946 cmd.ba_window_size = ba_win_sz;
947
948 if (update_ssn) {
949 cmd.upd0 |= HAL_REO_CMD_UPD0_SSN;
950 cmd.upd2 = FIELD_PREP(HAL_REO_CMD_UPD2_SSN, ssn);
951 }
952
953 ret = ath11k_dp_tx_send_reo_cmd(ar->ab, rx_tid,
954 HAL_REO_CMD_UPDATE_RX_QUEUE, &cmd,
955 NULL);
956 if (ret) {
957 ath11k_warn(ar->ab, "failed to update rx tid queue, tid %d (%d)\n",
958 rx_tid->tid, ret);
959 return ret;
960 }
961
962 rx_tid->ba_win_sz = ba_win_sz;
963
964 return 0;
965}
966
967static void ath11k_dp_rx_tid_mem_free(struct ath11k_base *ab,
968 const u8 *peer_mac, int vdev_id, u8 tid)
969{
970 struct ath11k_peer *peer;
971 struct dp_rx_tid *rx_tid;
972
973 spin_lock_bh(&ab->base_lock);
974
975 peer = ath11k_peer_find(ab, vdev_id, peer_mac);
976 if (!peer) {
977 ath11k_warn(ab, "failed to find the peer to free up rx tid mem\n");
978 goto unlock_exit;
979 }
980
981 rx_tid = &peer->rx_tid[tid];
982 if (!rx_tid->active)
983 goto unlock_exit;
984
985 dma_unmap_single(ab->dev, rx_tid->paddr, rx_tid->size,
986 DMA_BIDIRECTIONAL);
987 kfree(rx_tid->vaddr);
988 rx_tid->vaddr = NULL;
989
990 rx_tid->active = false;
991
992unlock_exit:
993 spin_unlock_bh(&ab->base_lock);
994}
995
996int ath11k_peer_rx_tid_setup(struct ath11k *ar, const u8 *peer_mac, int vdev_id,
997 u8 tid, u32 ba_win_sz, u16 ssn,
998 enum hal_pn_type pn_type)
999{
1000 struct ath11k_base *ab = ar->ab;
1001 struct ath11k_peer *peer;
1002 struct dp_rx_tid *rx_tid;
1003 u32 hw_desc_sz;
1004 u32 *addr_aligned;
1005 void *vaddr;
1006 dma_addr_t paddr;
1007 int ret;
1008
1009 spin_lock_bh(&ab->base_lock);
1010
1011 peer = ath11k_peer_find(ab, vdev_id, peer_mac);
1012 if (!peer) {
1013 ath11k_warn(ab, "failed to find the peer %pM to set up rx tid\n",
1014 peer_mac);
1015 spin_unlock_bh(&ab->base_lock);
1016 return -ENOENT;
1017 }
1018
1019 rx_tid = &peer->rx_tid[tid];
1020 /* Update the tid queue if it is already setup */
1021 if (rx_tid->active) {
1022 paddr = rx_tid->paddr;
1023 ret = ath11k_peer_rx_tid_reo_update(ar, peer, rx_tid,
1024 ba_win_sz, ssn, true);
1025 spin_unlock_bh(&ab->base_lock);
1026 if (ret) {
1027 ath11k_warn(ab, "failed to update reo for peer %pM rx tid %d\n: %d",
1028 peer_mac, tid, ret);
1029 return ret;
1030 }
1031
1032 ret = ath11k_wmi_peer_rx_reorder_queue_setup(ar, vdev_id,
1033 peer_mac, paddr,
1034 tid, 1, ba_win_sz);
1035 if (ret)
1036 ath11k_warn(ab, "failed to send wmi rx reorder queue for peer %pM tid %d: %d\n",
1037 peer_mac, tid, ret);
1038 return ret;
1039 }
1040
1041 rx_tid->tid = tid;
1042
1043 rx_tid->ba_win_sz = ba_win_sz;
1044
1045 /* TODO: Optimize the memory allocation for qos tid based on
1046 * the actual BA window size in REO tid update path.
1047 */
1048 if (tid == HAL_DESC_REO_NON_QOS_TID)
1049 hw_desc_sz = ath11k_hal_reo_qdesc_size(ba_win_sz, tid);
1050 else
1051 hw_desc_sz = ath11k_hal_reo_qdesc_size(DP_BA_WIN_SZ_MAX, tid);
1052
1053 vaddr = kzalloc(hw_desc_sz + HAL_LINK_DESC_ALIGN - 1, GFP_ATOMIC);
1054 if (!vaddr) {
1055 spin_unlock_bh(&ab->base_lock);
1056 return -ENOMEM;
1057 }
1058
1059 addr_aligned = PTR_ALIGN(vaddr, HAL_LINK_DESC_ALIGN);
1060
1061 ath11k_hal_reo_qdesc_setup(addr_aligned, tid, ba_win_sz,
1062 ssn, pn_type);
1063
1064 paddr = dma_map_single(ab->dev, addr_aligned, hw_desc_sz,
1065 DMA_BIDIRECTIONAL);
1066
1067 ret = dma_mapping_error(ab->dev, paddr);
1068 if (ret) {
1069 spin_unlock_bh(&ab->base_lock);
1070 ath11k_warn(ab, "failed to setup dma map for peer %pM rx tid %d: %d\n",
1071 peer_mac, tid, ret);
1072 goto err_mem_free;
1073 }
1074
1075 rx_tid->vaddr = vaddr;
1076 rx_tid->paddr = paddr;
1077 rx_tid->size = hw_desc_sz;
1078 rx_tid->active = true;
1079
1080 spin_unlock_bh(&ab->base_lock);
1081
1082 ret = ath11k_wmi_peer_rx_reorder_queue_setup(ar, vdev_id, peer_mac,
1083 paddr, tid, 1, ba_win_sz);
1084 if (ret) {
1085 ath11k_warn(ar->ab, "failed to setup rx reorder queue for peer %pM tid %d: %d\n",
1086 peer_mac, tid, ret);
1087 ath11k_dp_rx_tid_mem_free(ab, peer_mac, vdev_id, tid);
1088 }
1089
1090 return ret;
1091
1092err_mem_free:
1093 kfree(rx_tid->vaddr);
1094 rx_tid->vaddr = NULL;
1095
1096 return ret;
1097}
1098
1099int ath11k_dp_rx_ampdu_start(struct ath11k *ar,
1100 struct ieee80211_ampdu_params *params)
1101{
1102 struct ath11k_base *ab = ar->ab;
1103 struct ath11k_sta *arsta = ath11k_sta_to_arsta(params->sta);
1104 int vdev_id = arsta->arvif->vdev_id;
1105 int ret;
1106
1107 ret = ath11k_peer_rx_tid_setup(ar, params->sta->addr, vdev_id,
1108 params->tid, params->buf_size,
1109 params->ssn, arsta->pn_type);
1110 if (ret)
1111 ath11k_warn(ab, "failed to setup rx tid %d\n", ret);
1112
1113 return ret;
1114}
1115
1116int ath11k_dp_rx_ampdu_stop(struct ath11k *ar,
1117 struct ieee80211_ampdu_params *params)
1118{
1119 struct ath11k_base *ab = ar->ab;
1120 struct ath11k_peer *peer;
1121 struct ath11k_sta *arsta = ath11k_sta_to_arsta(params->sta);
1122 int vdev_id = arsta->arvif->vdev_id;
1123 dma_addr_t paddr;
1124 bool active;
1125 int ret;
1126
1127 spin_lock_bh(&ab->base_lock);
1128
1129 peer = ath11k_peer_find(ab, vdev_id, params->sta->addr);
1130 if (!peer) {
1131 ath11k_warn(ab, "failed to find the peer to stop rx aggregation\n");
1132 spin_unlock_bh(&ab->base_lock);
1133 return -ENOENT;
1134 }
1135
1136 paddr = peer->rx_tid[params->tid].paddr;
1137 active = peer->rx_tid[params->tid].active;
1138
1139 if (!active) {
1140 spin_unlock_bh(&ab->base_lock);
1141 return 0;
1142 }
1143
1144 ret = ath11k_peer_rx_tid_reo_update(ar, peer, peer->rx_tid, 1, 0, false);
1145 spin_unlock_bh(&ab->base_lock);
1146 if (ret) {
1147 ath11k_warn(ab, "failed to update reo for rx tid %d: %d\n",
1148 params->tid, ret);
1149 return ret;
1150 }
1151
1152 ret = ath11k_wmi_peer_rx_reorder_queue_setup(ar, vdev_id,
1153 params->sta->addr, paddr,
1154 params->tid, 1, 1);
1155 if (ret)
1156 ath11k_warn(ab, "failed to send wmi to delete rx tid %d\n",
1157 ret);
1158
1159 return ret;
1160}
1161
1162int ath11k_dp_peer_rx_pn_replay_config(struct ath11k_vif *arvif,
1163 const u8 *peer_addr,
1164 enum set_key_cmd key_cmd,
1165 struct ieee80211_key_conf *key)
1166{
1167 struct ath11k *ar = arvif->ar;
1168 struct ath11k_base *ab = ar->ab;
1169 struct ath11k_hal_reo_cmd cmd = {0};
1170 struct ath11k_peer *peer;
1171 struct dp_rx_tid *rx_tid;
1172 u8 tid;
1173 int ret = 0;
1174
1175 /* NOTE: Enable PN/TSC replay check offload only for unicast frames.
1176 * We use mac80211 PN/TSC replay check functionality for bcast/mcast
1177 * for now.
1178 */
1179 if (!(key->flags & IEEE80211_KEY_FLAG_PAIRWISE))
1180 return 0;
1181
1182 cmd.flag |= HAL_REO_CMD_FLG_NEED_STATUS;
1183 cmd.upd0 |= HAL_REO_CMD_UPD0_PN |
1184 HAL_REO_CMD_UPD0_PN_SIZE |
1185 HAL_REO_CMD_UPD0_PN_VALID |
1186 HAL_REO_CMD_UPD0_PN_CHECK |
1187 HAL_REO_CMD_UPD0_SVLD;
1188
1189 switch (key->cipher) {
1190 case WLAN_CIPHER_SUITE_TKIP:
1191 case WLAN_CIPHER_SUITE_CCMP:
1192 case WLAN_CIPHER_SUITE_CCMP_256:
1193 case WLAN_CIPHER_SUITE_GCMP:
1194 case WLAN_CIPHER_SUITE_GCMP_256:
1195 if (key_cmd == SET_KEY) {
1196 cmd.upd1 |= HAL_REO_CMD_UPD1_PN_CHECK;
1197 cmd.pn_size = 48;
1198 }
1199 break;
1200 default:
1201 break;
1202 }
1203
1204 spin_lock_bh(&ab->base_lock);
1205
1206 peer = ath11k_peer_find(ab, arvif->vdev_id, peer_addr);
1207 if (!peer) {
1208 ath11k_warn(ab, "failed to find the peer to configure pn replay detection\n");
1209 spin_unlock_bh(&ab->base_lock);
1210 return -ENOENT;
1211 }
1212
1213 for (tid = 0; tid <= IEEE80211_NUM_TIDS; tid++) {
1214 rx_tid = &peer->rx_tid[tid];
1215 if (!rx_tid->active)
1216 continue;
1217 cmd.addr_lo = lower_32_bits(rx_tid->paddr);
1218 cmd.addr_hi = upper_32_bits(rx_tid->paddr);
1219 ret = ath11k_dp_tx_send_reo_cmd(ab, rx_tid,
1220 HAL_REO_CMD_UPDATE_RX_QUEUE,
1221 &cmd, NULL);
1222 if (ret) {
1223 ath11k_warn(ab, "failed to configure rx tid %d queue for pn replay detection %d\n",
1224 tid, ret);
1225 break;
1226 }
1227 }
1228
1229 spin_unlock_bh(&ab->base_lock);
1230
1231 return ret;
1232}
1233
1234static inline int ath11k_get_ppdu_user_index(struct htt_ppdu_stats *ppdu_stats,
1235 u16 peer_id)
1236{
1237 int i;
1238
1239 for (i = 0; i < HTT_PPDU_STATS_MAX_USERS - 1; i++) {
1240 if (ppdu_stats->user_stats[i].is_valid_peer_id) {
1241 if (peer_id == ppdu_stats->user_stats[i].peer_id)
1242 return i;
1243 } else {
1244 return i;
1245 }
1246 }
1247
1248 return -EINVAL;
1249}
1250
1251static int ath11k_htt_tlv_ppdu_stats_parse(struct ath11k_base *ab,
1252 u16 tag, u16 len, const void *ptr,
1253 void *data)
1254{
1255 struct htt_ppdu_stats_info *ppdu_info;
1256 struct htt_ppdu_user_stats *user_stats;
1257 int cur_user;
1258 u16 peer_id;
1259
1260 ppdu_info = data;
1261
1262 switch (tag) {
1263 case HTT_PPDU_STATS_TAG_COMMON:
1264 if (len < sizeof(struct htt_ppdu_stats_common)) {
1265 ath11k_warn(ab, "Invalid len %d for the tag 0x%x\n",
1266 len, tag);
1267 return -EINVAL;
1268 }
1269 memcpy((void *)&ppdu_info->ppdu_stats.common, ptr,
1270 sizeof(struct htt_ppdu_stats_common));
1271 break;
1272 case HTT_PPDU_STATS_TAG_USR_RATE:
1273 if (len < sizeof(struct htt_ppdu_stats_user_rate)) {
1274 ath11k_warn(ab, "Invalid len %d for the tag 0x%x\n",
1275 len, tag);
1276 return -EINVAL;
1277 }
1278
1279 peer_id = ((struct htt_ppdu_stats_user_rate *)ptr)->sw_peer_id;
1280 cur_user = ath11k_get_ppdu_user_index(&ppdu_info->ppdu_stats,
1281 peer_id);
1282 if (cur_user < 0)
1283 return -EINVAL;
1284 user_stats = &ppdu_info->ppdu_stats.user_stats[cur_user];
1285 user_stats->peer_id = peer_id;
1286 user_stats->is_valid_peer_id = true;
1287 memcpy((void *)&user_stats->rate, ptr,
1288 sizeof(struct htt_ppdu_stats_user_rate));
1289 user_stats->tlv_flags |= BIT(tag);
1290 break;
1291 case HTT_PPDU_STATS_TAG_USR_COMPLTN_COMMON:
1292 if (len < sizeof(struct htt_ppdu_stats_usr_cmpltn_cmn)) {
1293 ath11k_warn(ab, "Invalid len %d for the tag 0x%x\n",
1294 len, tag);
1295 return -EINVAL;
1296 }
1297
1298 peer_id = ((struct htt_ppdu_stats_usr_cmpltn_cmn *)ptr)->sw_peer_id;
1299 cur_user = ath11k_get_ppdu_user_index(&ppdu_info->ppdu_stats,
1300 peer_id);
1301 if (cur_user < 0)
1302 return -EINVAL;
1303 user_stats = &ppdu_info->ppdu_stats.user_stats[cur_user];
1304 user_stats->peer_id = peer_id;
1305 user_stats->is_valid_peer_id = true;
1306 memcpy((void *)&user_stats->cmpltn_cmn, ptr,
1307 sizeof(struct htt_ppdu_stats_usr_cmpltn_cmn));
1308 user_stats->tlv_flags |= BIT(tag);
1309 break;
1310 case HTT_PPDU_STATS_TAG_USR_COMPLTN_ACK_BA_STATUS:
1311 if (len <
1312 sizeof(struct htt_ppdu_stats_usr_cmpltn_ack_ba_status)) {
1313 ath11k_warn(ab, "Invalid len %d for the tag 0x%x\n",
1314 len, tag);
1315 return -EINVAL;
1316 }
1317
1318 peer_id =
1319 ((struct htt_ppdu_stats_usr_cmpltn_ack_ba_status *)ptr)->sw_peer_id;
1320 cur_user = ath11k_get_ppdu_user_index(&ppdu_info->ppdu_stats,
1321 peer_id);
1322 if (cur_user < 0)
1323 return -EINVAL;
1324 user_stats = &ppdu_info->ppdu_stats.user_stats[cur_user];
1325 user_stats->peer_id = peer_id;
1326 user_stats->is_valid_peer_id = true;
1327 memcpy((void *)&user_stats->ack_ba, ptr,
1328 sizeof(struct htt_ppdu_stats_usr_cmpltn_ack_ba_status));
1329 user_stats->tlv_flags |= BIT(tag);
1330 break;
1331 }
1332 return 0;
1333}
1334
1335int ath11k_dp_htt_tlv_iter(struct ath11k_base *ab, const void *ptr, size_t len,
1336 int (*iter)(struct ath11k_base *ar, u16 tag, u16 len,
1337 const void *ptr, void *data),
1338 void *data)
1339{
1340 const struct htt_tlv *tlv;
1341 const void *begin = ptr;
1342 u16 tlv_tag, tlv_len;
1343 int ret = -EINVAL;
1344
1345 while (len > 0) {
1346 if (len < sizeof(*tlv)) {
1347 ath11k_err(ab, "htt tlv parse failure at byte %zd (%zu bytes left, %zu expected)\n",
1348 ptr - begin, len, sizeof(*tlv));
1349 return -EINVAL;
1350 }
1351 tlv = (struct htt_tlv *)ptr;
1352 tlv_tag = FIELD_GET(HTT_TLV_TAG, tlv->header);
1353 tlv_len = FIELD_GET(HTT_TLV_LEN, tlv->header);
1354 ptr += sizeof(*tlv);
1355 len -= sizeof(*tlv);
1356
1357 if (tlv_len > len) {
1358 ath11k_err(ab, "htt tlv parse failure of tag %u at byte %zd (%zu bytes left, %u expected)\n",
1359 tlv_tag, ptr - begin, len, tlv_len);
1360 return -EINVAL;
1361 }
1362 ret = iter(ab, tlv_tag, tlv_len, ptr, data);
1363 if (ret == -ENOMEM)
1364 return ret;
1365
1366 ptr += tlv_len;
1367 len -= tlv_len;
1368 }
1369 return 0;
1370}
1371
1372static void
1373ath11k_update_per_peer_tx_stats(struct ath11k *ar,
1374 struct htt_ppdu_stats *ppdu_stats, u8 user)
1375{
1376 struct ath11k_base *ab = ar->ab;
1377 struct ath11k_peer *peer;
1378 struct ieee80211_sta *sta;
1379 struct ath11k_sta *arsta;
1380 struct htt_ppdu_stats_user_rate *user_rate;
1381 struct ath11k_per_peer_tx_stats *peer_stats = &ar->peer_tx_stats;
1382 struct htt_ppdu_user_stats *usr_stats = &ppdu_stats->user_stats[user];
1383 struct htt_ppdu_stats_common *common = &ppdu_stats->common;
1384 int ret;
1385 u8 flags, mcs, nss, bw, sgi, dcm, rate_idx = 0;
1386 u32 succ_bytes = 0;
1387 u16 rate = 0, succ_pkts = 0;
1388 u32 tx_duration = 0;
1389 u8 tid = HTT_PPDU_STATS_NON_QOS_TID;
1390 bool is_ampdu = false;
1391
1392 if (!(usr_stats->tlv_flags & BIT(HTT_PPDU_STATS_TAG_USR_RATE)))
1393 return;
1394
1395 if (usr_stats->tlv_flags & BIT(HTT_PPDU_STATS_TAG_USR_COMPLTN_COMMON))
1396 is_ampdu =
1397 HTT_USR_CMPLTN_IS_AMPDU(usr_stats->cmpltn_cmn.flags);
1398
1399 if (usr_stats->tlv_flags &
1400 BIT(HTT_PPDU_STATS_TAG_USR_COMPLTN_ACK_BA_STATUS)) {
1401 succ_bytes = usr_stats->ack_ba.success_bytes;
1402 succ_pkts = FIELD_GET(HTT_PPDU_STATS_ACK_BA_INFO_NUM_MSDU_M,
1403 usr_stats->ack_ba.info);
1404 tid = FIELD_GET(HTT_PPDU_STATS_ACK_BA_INFO_TID_NUM,
1405 usr_stats->ack_ba.info);
1406 }
1407
1408 if (common->fes_duration_us)
1409 tx_duration = common->fes_duration_us;
1410
1411 user_rate = &usr_stats->rate;
1412 flags = HTT_USR_RATE_PREAMBLE(user_rate->rate_flags);
1413 bw = HTT_USR_RATE_BW(user_rate->rate_flags) - 2;
1414 nss = HTT_USR_RATE_NSS(user_rate->rate_flags) + 1;
1415 mcs = HTT_USR_RATE_MCS(user_rate->rate_flags);
1416 sgi = HTT_USR_RATE_GI(user_rate->rate_flags);
1417 dcm = HTT_USR_RATE_DCM(user_rate->rate_flags);
1418
1419 /* Note: If host configured fixed rates and in some other special
1420 * cases, the broadcast/management frames are sent in different rates.
1421 * Firmware rate's control to be skipped for this?
1422 */
1423
1424 if (flags == WMI_RATE_PREAMBLE_HE && mcs > ATH11K_HE_MCS_MAX) {
1425 ath11k_warn(ab, "Invalid HE mcs %d peer stats", mcs);
1426 return;
1427 }
1428
1429 if (flags == WMI_RATE_PREAMBLE_VHT && mcs > ATH11K_VHT_MCS_MAX) {
1430 ath11k_warn(ab, "Invalid VHT mcs %d peer stats", mcs);
1431 return;
1432 }
1433
1434 if (flags == WMI_RATE_PREAMBLE_HT && (mcs > ATH11K_HT_MCS_MAX || nss < 1)) {
1435 ath11k_warn(ab, "Invalid HT mcs %d nss %d peer stats",
1436 mcs, nss);
1437 return;
1438 }
1439
1440 if (flags == WMI_RATE_PREAMBLE_CCK || flags == WMI_RATE_PREAMBLE_OFDM) {
1441 ret = ath11k_mac_hw_ratecode_to_legacy_rate(mcs,
1442 flags,
1443 &rate_idx,
1444 &rate);
1445 if (ret < 0)
1446 return;
1447 }
1448
1449 rcu_read_lock();
1450 spin_lock_bh(&ab->base_lock);
1451 peer = ath11k_peer_find_by_id(ab, usr_stats->peer_id);
1452
1453 if (!peer || !peer->sta) {
1454 spin_unlock_bh(&ab->base_lock);
1455 rcu_read_unlock();
1456 return;
1457 }
1458
1459 sta = peer->sta;
1460 arsta = ath11k_sta_to_arsta(sta);
1461
1462 memset(&arsta->txrate, 0, sizeof(arsta->txrate));
1463
1464 switch (flags) {
1465 case WMI_RATE_PREAMBLE_OFDM:
1466 arsta->txrate.legacy = rate;
1467 break;
1468 case WMI_RATE_PREAMBLE_CCK:
1469 arsta->txrate.legacy = rate;
1470 break;
1471 case WMI_RATE_PREAMBLE_HT:
1472 arsta->txrate.mcs = mcs + 8 * (nss - 1);
1473 arsta->txrate.flags = RATE_INFO_FLAGS_MCS;
1474 if (sgi)
1475 arsta->txrate.flags |= RATE_INFO_FLAGS_SHORT_GI;
1476 break;
1477 case WMI_RATE_PREAMBLE_VHT:
1478 arsta->txrate.mcs = mcs;
1479 arsta->txrate.flags = RATE_INFO_FLAGS_VHT_MCS;
1480 if (sgi)
1481 arsta->txrate.flags |= RATE_INFO_FLAGS_SHORT_GI;
1482 break;
1483 case WMI_RATE_PREAMBLE_HE:
1484 arsta->txrate.mcs = mcs;
1485 arsta->txrate.flags = RATE_INFO_FLAGS_HE_MCS;
1486 arsta->txrate.he_dcm = dcm;
1487 arsta->txrate.he_gi = ath11k_mac_he_gi_to_nl80211_he_gi(sgi);
1488 arsta->txrate.he_ru_alloc = ath11k_mac_phy_he_ru_to_nl80211_he_ru_alloc
1489 ((user_rate->ru_end -
1490 user_rate->ru_start) + 1);
1491 break;
1492 }
1493
1494 arsta->txrate.nss = nss;
1495
1496 arsta->txrate.bw = ath11k_mac_bw_to_mac80211_bw(bw);
1497 arsta->tx_duration += tx_duration;
1498 memcpy(&arsta->last_txrate, &arsta->txrate, sizeof(struct rate_info));
1499
1500 /* PPDU stats reported for mgmt packet doesn't have valid tx bytes.
1501 * So skip peer stats update for mgmt packets.
1502 */
1503 if (tid < HTT_PPDU_STATS_NON_QOS_TID) {
1504 memset(peer_stats, 0, sizeof(*peer_stats));
1505 peer_stats->succ_pkts = succ_pkts;
1506 peer_stats->succ_bytes = succ_bytes;
1507 peer_stats->is_ampdu = is_ampdu;
1508 peer_stats->duration = tx_duration;
1509 peer_stats->ba_fails =
1510 HTT_USR_CMPLTN_LONG_RETRY(usr_stats->cmpltn_cmn.flags) +
1511 HTT_USR_CMPLTN_SHORT_RETRY(usr_stats->cmpltn_cmn.flags);
1512
1513 if (ath11k_debugfs_is_extd_tx_stats_enabled(ar))
1514 ath11k_debugfs_sta_add_tx_stats(arsta, peer_stats, rate_idx);
1515 }
1516
1517 spin_unlock_bh(&ab->base_lock);
1518 rcu_read_unlock();
1519}
1520
1521static void ath11k_htt_update_ppdu_stats(struct ath11k *ar,
1522 struct htt_ppdu_stats *ppdu_stats)
1523{
1524 u8 user;
1525
1526 for (user = 0; user < HTT_PPDU_STATS_MAX_USERS - 1; user++)
1527 ath11k_update_per_peer_tx_stats(ar, ppdu_stats, user);
1528}
1529
1530static
1531struct htt_ppdu_stats_info *ath11k_dp_htt_get_ppdu_desc(struct ath11k *ar,
1532 u32 ppdu_id)
1533{
1534 struct htt_ppdu_stats_info *ppdu_info;
1535
1536 lockdep_assert_held(&ar->data_lock);
1537
1538 if (!list_empty(&ar->ppdu_stats_info)) {
1539 list_for_each_entry(ppdu_info, &ar->ppdu_stats_info, list) {
1540 if (ppdu_info->ppdu_id == ppdu_id)
1541 return ppdu_info;
1542 }
1543
1544 if (ar->ppdu_stat_list_depth > HTT_PPDU_DESC_MAX_DEPTH) {
1545 ppdu_info = list_first_entry(&ar->ppdu_stats_info,
1546 typeof(*ppdu_info), list);
1547 list_del(&ppdu_info->list);
1548 ar->ppdu_stat_list_depth--;
1549 ath11k_htt_update_ppdu_stats(ar, &ppdu_info->ppdu_stats);
1550 kfree(ppdu_info);
1551 }
1552 }
1553
1554 ppdu_info = kzalloc(sizeof(*ppdu_info), GFP_ATOMIC);
1555 if (!ppdu_info)
1556 return NULL;
1557
1558 list_add_tail(&ppdu_info->list, &ar->ppdu_stats_info);
1559 ar->ppdu_stat_list_depth++;
1560
1561 return ppdu_info;
1562}
1563
1564static int ath11k_htt_pull_ppdu_stats(struct ath11k_base *ab,
1565 struct sk_buff *skb)
1566{
1567 struct ath11k_htt_ppdu_stats_msg *msg;
1568 struct htt_ppdu_stats_info *ppdu_info;
1569 struct ath11k *ar;
1570 int ret;
1571 u8 pdev_id;
1572 u32 ppdu_id, len;
1573
1574 msg = (struct ath11k_htt_ppdu_stats_msg *)skb->data;
1575 len = FIELD_GET(HTT_T2H_PPDU_STATS_INFO_PAYLOAD_SIZE, msg->info);
1576 pdev_id = FIELD_GET(HTT_T2H_PPDU_STATS_INFO_PDEV_ID, msg->info);
1577 ppdu_id = msg->ppdu_id;
1578
1579 rcu_read_lock();
1580 ar = ath11k_mac_get_ar_by_pdev_id(ab, pdev_id);
1581 if (!ar) {
1582 ret = -EINVAL;
1583 goto out;
1584 }
1585
1586 if (ath11k_debugfs_is_pktlog_lite_mode_enabled(ar))
1587 trace_ath11k_htt_ppdu_stats(ar, skb->data, len);
1588
1589 spin_lock_bh(&ar->data_lock);
1590 ppdu_info = ath11k_dp_htt_get_ppdu_desc(ar, ppdu_id);
1591 if (!ppdu_info) {
1592 ret = -EINVAL;
1593 goto out_unlock_data;
1594 }
1595
1596 ppdu_info->ppdu_id = ppdu_id;
1597 ret = ath11k_dp_htt_tlv_iter(ab, msg->data, len,
1598 ath11k_htt_tlv_ppdu_stats_parse,
1599 (void *)ppdu_info);
1600 if (ret) {
1601 ath11k_warn(ab, "Failed to parse tlv %d\n", ret);
1602 goto out_unlock_data;
1603 }
1604
1605out_unlock_data:
1606 spin_unlock_bh(&ar->data_lock);
1607
1608out:
1609 rcu_read_unlock();
1610
1611 return ret;
1612}
1613
1614static void ath11k_htt_pktlog(struct ath11k_base *ab, struct sk_buff *skb)
1615{
1616 struct htt_pktlog_msg *data = (struct htt_pktlog_msg *)skb->data;
1617 struct ath_pktlog_hdr *hdr = (struct ath_pktlog_hdr *)data;
1618 struct ath11k *ar;
1619 u8 pdev_id;
1620
1621 pdev_id = FIELD_GET(HTT_T2H_PPDU_STATS_INFO_PDEV_ID, data->hdr);
1622
1623 rcu_read_lock();
1624
1625 ar = ath11k_mac_get_ar_by_pdev_id(ab, pdev_id);
1626 if (!ar) {
1627 ath11k_warn(ab, "invalid pdev id %d on htt pktlog\n", pdev_id);
1628 goto out;
1629 }
1630
1631 trace_ath11k_htt_pktlog(ar, data->payload, hdr->size,
1632 ar->ab->pktlog_defs_checksum);
1633
1634out:
1635 rcu_read_unlock();
1636}
1637
1638static void ath11k_htt_backpressure_event_handler(struct ath11k_base *ab,
1639 struct sk_buff *skb)
1640{
1641 u32 *data = (u32 *)skb->data;
1642 u8 pdev_id, ring_type, ring_id, pdev_idx;
1643 u16 hp, tp;
1644 u32 backpressure_time;
1645 struct ath11k_bp_stats *bp_stats;
1646
1647 pdev_id = FIELD_GET(HTT_BACKPRESSURE_EVENT_PDEV_ID_M, *data);
1648 ring_type = FIELD_GET(HTT_BACKPRESSURE_EVENT_RING_TYPE_M, *data);
1649 ring_id = FIELD_GET(HTT_BACKPRESSURE_EVENT_RING_ID_M, *data);
1650 ++data;
1651
1652 hp = FIELD_GET(HTT_BACKPRESSURE_EVENT_HP_M, *data);
1653 tp = FIELD_GET(HTT_BACKPRESSURE_EVENT_TP_M, *data);
1654 ++data;
1655
1656 backpressure_time = *data;
1657
1658 ath11k_dbg(ab, ATH11K_DBG_DP_HTT, "backpressure event, pdev %d, ring type %d,ring id %d, hp %d tp %d, backpressure time %d\n",
1659 pdev_id, ring_type, ring_id, hp, tp, backpressure_time);
1660
1661 if (ring_type == HTT_BACKPRESSURE_UMAC_RING_TYPE) {
1662 if (ring_id >= HTT_SW_UMAC_RING_IDX_MAX)
1663 return;
1664
1665 bp_stats = &ab->soc_stats.bp_stats.umac_ring_bp_stats[ring_id];
1666 } else if (ring_type == HTT_BACKPRESSURE_LMAC_RING_TYPE) {
1667 pdev_idx = DP_HW2SW_MACID(pdev_id);
1668
1669 if (ring_id >= HTT_SW_LMAC_RING_IDX_MAX || pdev_idx >= MAX_RADIOS)
1670 return;
1671
1672 bp_stats = &ab->soc_stats.bp_stats.lmac_ring_bp_stats[ring_id][pdev_idx];
1673 } else {
1674 ath11k_warn(ab, "unknown ring type received in htt bp event %d\n",
1675 ring_type);
1676 return;
1677 }
1678
1679 spin_lock_bh(&ab->base_lock);
1680 bp_stats->hp = hp;
1681 bp_stats->tp = tp;
1682 bp_stats->count++;
1683 bp_stats->jiffies = jiffies;
1684 spin_unlock_bh(&ab->base_lock);
1685}
1686
1687void ath11k_dp_htt_htc_t2h_msg_handler(struct ath11k_base *ab,
1688 struct sk_buff *skb)
1689{
1690 struct ath11k_dp *dp = &ab->dp;
1691 struct htt_resp_msg *resp = (struct htt_resp_msg *)skb->data;
1692 enum htt_t2h_msg_type type = FIELD_GET(HTT_T2H_MSG_TYPE, *(u32 *)resp);
1693 u16 peer_id;
1694 u8 vdev_id;
1695 u8 mac_addr[ETH_ALEN];
1696 u16 peer_mac_h16;
1697 u16 ast_hash;
1698 u16 hw_peer_id;
1699
1700 ath11k_dbg(ab, ATH11K_DBG_DP_HTT, "dp_htt rx msg type :0x%0x\n", type);
1701
1702 switch (type) {
1703 case HTT_T2H_MSG_TYPE_VERSION_CONF:
1704 dp->htt_tgt_ver_major = FIELD_GET(HTT_T2H_VERSION_CONF_MAJOR,
1705 resp->version_msg.version);
1706 dp->htt_tgt_ver_minor = FIELD_GET(HTT_T2H_VERSION_CONF_MINOR,
1707 resp->version_msg.version);
1708 complete(&dp->htt_tgt_version_received);
1709 break;
1710 case HTT_T2H_MSG_TYPE_PEER_MAP:
1711 vdev_id = FIELD_GET(HTT_T2H_PEER_MAP_INFO_VDEV_ID,
1712 resp->peer_map_ev.info);
1713 peer_id = FIELD_GET(HTT_T2H_PEER_MAP_INFO_PEER_ID,
1714 resp->peer_map_ev.info);
1715 peer_mac_h16 = FIELD_GET(HTT_T2H_PEER_MAP_INFO1_MAC_ADDR_H16,
1716 resp->peer_map_ev.info1);
1717 ath11k_dp_get_mac_addr(resp->peer_map_ev.mac_addr_l32,
1718 peer_mac_h16, mac_addr);
1719 ath11k_peer_map_event(ab, vdev_id, peer_id, mac_addr, 0, 0);
1720 break;
1721 case HTT_T2H_MSG_TYPE_PEER_MAP2:
1722 vdev_id = FIELD_GET(HTT_T2H_PEER_MAP_INFO_VDEV_ID,
1723 resp->peer_map_ev.info);
1724 peer_id = FIELD_GET(HTT_T2H_PEER_MAP_INFO_PEER_ID,
1725 resp->peer_map_ev.info);
1726 peer_mac_h16 = FIELD_GET(HTT_T2H_PEER_MAP_INFO1_MAC_ADDR_H16,
1727 resp->peer_map_ev.info1);
1728 ath11k_dp_get_mac_addr(resp->peer_map_ev.mac_addr_l32,
1729 peer_mac_h16, mac_addr);
1730 ast_hash = FIELD_GET(HTT_T2H_PEER_MAP_INFO2_AST_HASH_VAL,
1731 resp->peer_map_ev.info2);
1732 hw_peer_id = FIELD_GET(HTT_T2H_PEER_MAP_INFO1_HW_PEER_ID,
1733 resp->peer_map_ev.info1);
1734 ath11k_peer_map_event(ab, vdev_id, peer_id, mac_addr, ast_hash,
1735 hw_peer_id);
1736 break;
1737 case HTT_T2H_MSG_TYPE_PEER_UNMAP:
1738 case HTT_T2H_MSG_TYPE_PEER_UNMAP2:
1739 peer_id = FIELD_GET(HTT_T2H_PEER_UNMAP_INFO_PEER_ID,
1740 resp->peer_unmap_ev.info);
1741 ath11k_peer_unmap_event(ab, peer_id);
1742 break;
1743 case HTT_T2H_MSG_TYPE_PPDU_STATS_IND:
1744 ath11k_htt_pull_ppdu_stats(ab, skb);
1745 break;
1746 case HTT_T2H_MSG_TYPE_EXT_STATS_CONF:
1747 ath11k_debugfs_htt_ext_stats_handler(ab, skb);
1748 break;
1749 case HTT_T2H_MSG_TYPE_PKTLOG:
1750 ath11k_htt_pktlog(ab, skb);
1751 break;
1752 case HTT_T2H_MSG_TYPE_BKPRESSURE_EVENT_IND:
1753 ath11k_htt_backpressure_event_handler(ab, skb);
1754 break;
1755 default:
1756 ath11k_warn(ab, "htt event %d not handled\n", type);
1757 break;
1758 }
1759
1760 dev_kfree_skb_any(skb);
1761}
1762
1763static int ath11k_dp_rx_msdu_coalesce(struct ath11k *ar,
1764 struct sk_buff_head *msdu_list,
1765 struct sk_buff *first, struct sk_buff *last,
1766 u8 l3pad_bytes, int msdu_len)
1767{
1768 struct ath11k_base *ab = ar->ab;
1769 struct sk_buff *skb;
1770 struct ath11k_skb_rxcb *rxcb = ATH11K_SKB_RXCB(first);
1771 int buf_first_hdr_len, buf_first_len;
1772 struct hal_rx_desc *ldesc;
1773 int space_extra, rem_len, buf_len;
1774 u32 hal_rx_desc_sz = ar->ab->hw_params.hal_desc_sz;
1775
1776 /* As the msdu is spread across multiple rx buffers,
1777 * find the offset to the start of msdu for computing
1778 * the length of the msdu in the first buffer.
1779 */
1780 buf_first_hdr_len = hal_rx_desc_sz + l3pad_bytes;
1781 buf_first_len = DP_RX_BUFFER_SIZE - buf_first_hdr_len;
1782
1783 if (WARN_ON_ONCE(msdu_len <= buf_first_len)) {
1784 skb_put(first, buf_first_hdr_len + msdu_len);
1785 skb_pull(first, buf_first_hdr_len);
1786 return 0;
1787 }
1788
1789 ldesc = (struct hal_rx_desc *)last->data;
1790 rxcb->is_first_msdu = ath11k_dp_rx_h_msdu_end_first_msdu(ab, ldesc);
1791 rxcb->is_last_msdu = ath11k_dp_rx_h_msdu_end_last_msdu(ab, ldesc);
1792
1793 /* MSDU spans over multiple buffers because the length of the MSDU
1794 * exceeds DP_RX_BUFFER_SIZE - HAL_RX_DESC_SIZE. So assume the data
1795 * in the first buf is of length DP_RX_BUFFER_SIZE - HAL_RX_DESC_SIZE.
1796 */
1797 skb_put(first, DP_RX_BUFFER_SIZE);
1798 skb_pull(first, buf_first_hdr_len);
1799
1800 /* When an MSDU spread over multiple buffers attention, MSDU_END and
1801 * MPDU_END tlvs are valid only in the last buffer. Copy those tlvs.
1802 */
1803 ath11k_dp_rx_desc_end_tlv_copy(ab, rxcb->rx_desc, ldesc);
1804
1805 space_extra = msdu_len - (buf_first_len + skb_tailroom(first));
1806 if (space_extra > 0 &&
1807 (pskb_expand_head(first, 0, space_extra, GFP_ATOMIC) < 0)) {
1808 /* Free up all buffers of the MSDU */
1809 while ((skb = __skb_dequeue(msdu_list)) != NULL) {
1810 rxcb = ATH11K_SKB_RXCB(skb);
1811 if (!rxcb->is_continuation) {
1812 dev_kfree_skb_any(skb);
1813 break;
1814 }
1815 dev_kfree_skb_any(skb);
1816 }
1817 return -ENOMEM;
1818 }
1819
1820 rem_len = msdu_len - buf_first_len;
1821 while ((skb = __skb_dequeue(msdu_list)) != NULL && rem_len > 0) {
1822 rxcb = ATH11K_SKB_RXCB(skb);
1823 if (rxcb->is_continuation)
1824 buf_len = DP_RX_BUFFER_SIZE - hal_rx_desc_sz;
1825 else
1826 buf_len = rem_len;
1827
1828 if (buf_len > (DP_RX_BUFFER_SIZE - hal_rx_desc_sz)) {
1829 WARN_ON_ONCE(1);
1830 dev_kfree_skb_any(skb);
1831 return -EINVAL;
1832 }
1833
1834 skb_put(skb, buf_len + hal_rx_desc_sz);
1835 skb_pull(skb, hal_rx_desc_sz);
1836 skb_copy_from_linear_data(skb, skb_put(first, buf_len),
1837 buf_len);
1838 dev_kfree_skb_any(skb);
1839
1840 rem_len -= buf_len;
1841 if (!rxcb->is_continuation)
1842 break;
1843 }
1844
1845 return 0;
1846}
1847
1848static struct sk_buff *ath11k_dp_rx_get_msdu_last_buf(struct sk_buff_head *msdu_list,
1849 struct sk_buff *first)
1850{
1851 struct sk_buff *skb;
1852 struct ath11k_skb_rxcb *rxcb = ATH11K_SKB_RXCB(first);
1853
1854 if (!rxcb->is_continuation)
1855 return first;
1856
1857 skb_queue_walk(msdu_list, skb) {
1858 rxcb = ATH11K_SKB_RXCB(skb);
1859 if (!rxcb->is_continuation)
1860 return skb;
1861 }
1862
1863 return NULL;
1864}
1865
1866static void ath11k_dp_rx_h_csum_offload(struct ath11k *ar, struct sk_buff *msdu)
1867{
1868 struct ath11k_skb_rxcb *rxcb = ATH11K_SKB_RXCB(msdu);
1869 struct rx_attention *rx_attention;
1870 bool ip_csum_fail, l4_csum_fail;
1871
1872 rx_attention = ath11k_dp_rx_get_attention(ar->ab, rxcb->rx_desc);
1873 ip_csum_fail = ath11k_dp_rx_h_attn_ip_cksum_fail(rx_attention);
1874 l4_csum_fail = ath11k_dp_rx_h_attn_l4_cksum_fail(rx_attention);
1875
1876 msdu->ip_summed = (ip_csum_fail || l4_csum_fail) ?
1877 CHECKSUM_NONE : CHECKSUM_UNNECESSARY;
1878}
1879
1880int ath11k_dp_rx_crypto_mic_len(struct ath11k *ar, enum hal_encrypt_type enctype)
1881{
1882 switch (enctype) {
1883 case HAL_ENCRYPT_TYPE_OPEN:
1884 case HAL_ENCRYPT_TYPE_TKIP_NO_MIC:
1885 case HAL_ENCRYPT_TYPE_TKIP_MIC:
1886 return 0;
1887 case HAL_ENCRYPT_TYPE_CCMP_128:
1888 return IEEE80211_CCMP_MIC_LEN;
1889 case HAL_ENCRYPT_TYPE_CCMP_256:
1890 return IEEE80211_CCMP_256_MIC_LEN;
1891 case HAL_ENCRYPT_TYPE_GCMP_128:
1892 case HAL_ENCRYPT_TYPE_AES_GCMP_256:
1893 return IEEE80211_GCMP_MIC_LEN;
1894 case HAL_ENCRYPT_TYPE_WEP_40:
1895 case HAL_ENCRYPT_TYPE_WEP_104:
1896 case HAL_ENCRYPT_TYPE_WEP_128:
1897 case HAL_ENCRYPT_TYPE_WAPI_GCM_SM4:
1898 case HAL_ENCRYPT_TYPE_WAPI:
1899 break;
1900 }
1901
1902 ath11k_warn(ar->ab, "unsupported encryption type %d for mic len\n", enctype);
1903 return 0;
1904}
1905
1906static int ath11k_dp_rx_crypto_param_len(struct ath11k *ar,
1907 enum hal_encrypt_type enctype)
1908{
1909 switch (enctype) {
1910 case HAL_ENCRYPT_TYPE_OPEN:
1911 return 0;
1912 case HAL_ENCRYPT_TYPE_TKIP_NO_MIC:
1913 case HAL_ENCRYPT_TYPE_TKIP_MIC:
1914 return IEEE80211_TKIP_IV_LEN;
1915 case HAL_ENCRYPT_TYPE_CCMP_128:
1916 return IEEE80211_CCMP_HDR_LEN;
1917 case HAL_ENCRYPT_TYPE_CCMP_256:
1918 return IEEE80211_CCMP_256_HDR_LEN;
1919 case HAL_ENCRYPT_TYPE_GCMP_128:
1920 case HAL_ENCRYPT_TYPE_AES_GCMP_256:
1921 return IEEE80211_GCMP_HDR_LEN;
1922 case HAL_ENCRYPT_TYPE_WEP_40:
1923 case HAL_ENCRYPT_TYPE_WEP_104:
1924 case HAL_ENCRYPT_TYPE_WEP_128:
1925 case HAL_ENCRYPT_TYPE_WAPI_GCM_SM4:
1926 case HAL_ENCRYPT_TYPE_WAPI:
1927 break;
1928 }
1929
1930 ath11k_warn(ar->ab, "unsupported encryption type %d\n", enctype);
1931 return 0;
1932}
1933
1934static int ath11k_dp_rx_crypto_icv_len(struct ath11k *ar,
1935 enum hal_encrypt_type enctype)
1936{
1937 switch (enctype) {
1938 case HAL_ENCRYPT_TYPE_OPEN:
1939 case HAL_ENCRYPT_TYPE_CCMP_128:
1940 case HAL_ENCRYPT_TYPE_CCMP_256:
1941 case HAL_ENCRYPT_TYPE_GCMP_128:
1942 case HAL_ENCRYPT_TYPE_AES_GCMP_256:
1943 return 0;
1944 case HAL_ENCRYPT_TYPE_TKIP_NO_MIC:
1945 case HAL_ENCRYPT_TYPE_TKIP_MIC:
1946 return IEEE80211_TKIP_ICV_LEN;
1947 case HAL_ENCRYPT_TYPE_WEP_40:
1948 case HAL_ENCRYPT_TYPE_WEP_104:
1949 case HAL_ENCRYPT_TYPE_WEP_128:
1950 case HAL_ENCRYPT_TYPE_WAPI_GCM_SM4:
1951 case HAL_ENCRYPT_TYPE_WAPI:
1952 break;
1953 }
1954
1955 ath11k_warn(ar->ab, "unsupported encryption type %d\n", enctype);
1956 return 0;
1957}
1958
1959static void ath11k_dp_rx_h_undecap_nwifi(struct ath11k *ar,
1960 struct sk_buff *msdu,
1961 u8 *first_hdr,
1962 enum hal_encrypt_type enctype,
1963 struct ieee80211_rx_status *status)
1964{
1965 struct ath11k_skb_rxcb *rxcb = ATH11K_SKB_RXCB(msdu);
1966 u8 decap_hdr[DP_MAX_NWIFI_HDR_LEN];
1967 struct ieee80211_hdr *hdr;
1968 size_t hdr_len;
1969 u8 da[ETH_ALEN];
1970 u8 sa[ETH_ALEN];
1971 u16 qos_ctl = 0;
1972 u8 *qos;
1973
1974 /* copy SA & DA and pull decapped header */
1975 hdr = (struct ieee80211_hdr *)msdu->data;
1976 hdr_len = ieee80211_hdrlen(hdr->frame_control);
1977 ether_addr_copy(da, ieee80211_get_DA(hdr));
1978 ether_addr_copy(sa, ieee80211_get_SA(hdr));
1979 skb_pull(msdu, ieee80211_hdrlen(hdr->frame_control));
1980
1981 if (rxcb->is_first_msdu) {
1982 /* original 802.11 header is valid for the first msdu
1983 * hence we can reuse the same header
1984 */
1985 hdr = (struct ieee80211_hdr *)first_hdr;
1986 hdr_len = ieee80211_hdrlen(hdr->frame_control);
1987
1988 /* Each A-MSDU subframe will be reported as a separate MSDU,
1989 * so strip the A-MSDU bit from QoS Ctl.
1990 */
1991 if (ieee80211_is_data_qos(hdr->frame_control)) {
1992 qos = ieee80211_get_qos_ctl(hdr);
1993 qos[0] &= ~IEEE80211_QOS_CTL_A_MSDU_PRESENT;
1994 }
1995 } else {
1996 /* Rebuild qos header if this is a middle/last msdu */
1997 hdr->frame_control |= __cpu_to_le16(IEEE80211_STYPE_QOS_DATA);
1998
1999 /* Reset the order bit as the HT_Control header is stripped */
2000 hdr->frame_control &= ~(__cpu_to_le16(IEEE80211_FCTL_ORDER));
2001
2002 qos_ctl = rxcb->tid;
2003
2004 if (ath11k_dp_rx_h_msdu_start_mesh_ctl_present(ar->ab, rxcb->rx_desc))
2005 qos_ctl |= IEEE80211_QOS_CTL_MESH_CONTROL_PRESENT;
2006
2007 /* TODO Add other QoS ctl fields when required */
2008
2009 /* copy decap header before overwriting for reuse below */
2010 memcpy(decap_hdr, (uint8_t *)hdr, hdr_len);
2011 }
2012
2013 if (!(status->flag & RX_FLAG_IV_STRIPPED)) {
2014 memcpy(skb_push(msdu,
2015 ath11k_dp_rx_crypto_param_len(ar, enctype)),
2016 (void *)hdr + hdr_len,
2017 ath11k_dp_rx_crypto_param_len(ar, enctype));
2018 }
2019
2020 if (!rxcb->is_first_msdu) {
2021 memcpy(skb_push(msdu,
2022 IEEE80211_QOS_CTL_LEN), &qos_ctl,
2023 IEEE80211_QOS_CTL_LEN);
2024 memcpy(skb_push(msdu, hdr_len), decap_hdr, hdr_len);
2025 return;
2026 }
2027
2028 memcpy(skb_push(msdu, hdr_len), hdr, hdr_len);
2029
2030 /* original 802.11 header has a different DA and in
2031 * case of 4addr it may also have different SA
2032 */
2033 hdr = (struct ieee80211_hdr *)msdu->data;
2034 ether_addr_copy(ieee80211_get_DA(hdr), da);
2035 ether_addr_copy(ieee80211_get_SA(hdr), sa);
2036}
2037
2038static void ath11k_dp_rx_h_undecap_raw(struct ath11k *ar, struct sk_buff *msdu,
2039 enum hal_encrypt_type enctype,
2040 struct ieee80211_rx_status *status,
2041 bool decrypted)
2042{
2043 struct ath11k_skb_rxcb *rxcb = ATH11K_SKB_RXCB(msdu);
2044 struct ieee80211_hdr *hdr;
2045 size_t hdr_len;
2046 size_t crypto_len;
2047
2048 if (!rxcb->is_first_msdu ||
2049 !(rxcb->is_first_msdu && rxcb->is_last_msdu)) {
2050 WARN_ON_ONCE(1);
2051 return;
2052 }
2053
2054 skb_trim(msdu, msdu->len - FCS_LEN);
2055
2056 if (!decrypted)
2057 return;
2058
2059 hdr = (void *)msdu->data;
2060
2061 /* Tail */
2062 if (status->flag & RX_FLAG_IV_STRIPPED) {
2063 skb_trim(msdu, msdu->len -
2064 ath11k_dp_rx_crypto_mic_len(ar, enctype));
2065
2066 skb_trim(msdu, msdu->len -
2067 ath11k_dp_rx_crypto_icv_len(ar, enctype));
2068 } else {
2069 /* MIC */
2070 if (status->flag & RX_FLAG_MIC_STRIPPED)
2071 skb_trim(msdu, msdu->len -
2072 ath11k_dp_rx_crypto_mic_len(ar, enctype));
2073
2074 /* ICV */
2075 if (status->flag & RX_FLAG_ICV_STRIPPED)
2076 skb_trim(msdu, msdu->len -
2077 ath11k_dp_rx_crypto_icv_len(ar, enctype));
2078 }
2079
2080 /* MMIC */
2081 if ((status->flag & RX_FLAG_MMIC_STRIPPED) &&
2082 !ieee80211_has_morefrags(hdr->frame_control) &&
2083 enctype == HAL_ENCRYPT_TYPE_TKIP_MIC)
2084 skb_trim(msdu, msdu->len - IEEE80211_CCMP_MIC_LEN);
2085
2086 /* Head */
2087 if (status->flag & RX_FLAG_IV_STRIPPED) {
2088 hdr_len = ieee80211_hdrlen(hdr->frame_control);
2089 crypto_len = ath11k_dp_rx_crypto_param_len(ar, enctype);
2090
2091 memmove((void *)msdu->data + crypto_len,
2092 (void *)msdu->data, hdr_len);
2093 skb_pull(msdu, crypto_len);
2094 }
2095}
2096
2097static void *ath11k_dp_rx_h_find_rfc1042(struct ath11k *ar,
2098 struct sk_buff *msdu,
2099 enum hal_encrypt_type enctype)
2100{
2101 struct ath11k_skb_rxcb *rxcb = ATH11K_SKB_RXCB(msdu);
2102 struct ieee80211_hdr *hdr;
2103 size_t hdr_len, crypto_len;
2104 void *rfc1042;
2105 bool is_amsdu;
2106
2107 is_amsdu = !(rxcb->is_first_msdu && rxcb->is_last_msdu);
2108 hdr = (struct ieee80211_hdr *)ath11k_dp_rx_h_80211_hdr(ar->ab, rxcb->rx_desc);
2109 rfc1042 = hdr;
2110
2111 if (rxcb->is_first_msdu) {
2112 hdr_len = ieee80211_hdrlen(hdr->frame_control);
2113 crypto_len = ath11k_dp_rx_crypto_param_len(ar, enctype);
2114
2115 rfc1042 += hdr_len + crypto_len;
2116 }
2117
2118 if (is_amsdu)
2119 rfc1042 += sizeof(struct ath11k_dp_amsdu_subframe_hdr);
2120
2121 return rfc1042;
2122}
2123
2124static void ath11k_dp_rx_h_undecap_eth(struct ath11k *ar,
2125 struct sk_buff *msdu,
2126 u8 *first_hdr,
2127 enum hal_encrypt_type enctype,
2128 struct ieee80211_rx_status *status)
2129{
2130 struct ieee80211_hdr *hdr;
2131 struct ethhdr *eth;
2132 size_t hdr_len;
2133 u8 da[ETH_ALEN];
2134 u8 sa[ETH_ALEN];
2135 void *rfc1042;
2136
2137 rfc1042 = ath11k_dp_rx_h_find_rfc1042(ar, msdu, enctype);
2138 if (WARN_ON_ONCE(!rfc1042))
2139 return;
2140
2141 /* pull decapped header and copy SA & DA */
2142 eth = (struct ethhdr *)msdu->data;
2143 ether_addr_copy(da, eth->h_dest);
2144 ether_addr_copy(sa, eth->h_source);
2145 skb_pull(msdu, sizeof(struct ethhdr));
2146
2147 /* push rfc1042/llc/snap */
2148 memcpy(skb_push(msdu, sizeof(struct ath11k_dp_rfc1042_hdr)), rfc1042,
2149 sizeof(struct ath11k_dp_rfc1042_hdr));
2150
2151 /* push original 802.11 header */
2152 hdr = (struct ieee80211_hdr *)first_hdr;
2153 hdr_len = ieee80211_hdrlen(hdr->frame_control);
2154
2155 if (!(status->flag & RX_FLAG_IV_STRIPPED)) {
2156 memcpy(skb_push(msdu,
2157 ath11k_dp_rx_crypto_param_len(ar, enctype)),
2158 (void *)hdr + hdr_len,
2159 ath11k_dp_rx_crypto_param_len(ar, enctype));
2160 }
2161
2162 memcpy(skb_push(msdu, hdr_len), hdr, hdr_len);
2163
2164 /* original 802.11 header has a different DA and in
2165 * case of 4addr it may also have different SA
2166 */
2167 hdr = (struct ieee80211_hdr *)msdu->data;
2168 ether_addr_copy(ieee80211_get_DA(hdr), da);
2169 ether_addr_copy(ieee80211_get_SA(hdr), sa);
2170}
2171
2172static void ath11k_dp_rx_h_undecap(struct ath11k *ar, struct sk_buff *msdu,
2173 struct hal_rx_desc *rx_desc,
2174 enum hal_encrypt_type enctype,
2175 struct ieee80211_rx_status *status,
2176 bool decrypted)
2177{
2178 u8 *first_hdr;
2179 u8 decap;
2180 struct ethhdr *ehdr;
2181
2182 first_hdr = ath11k_dp_rx_h_80211_hdr(ar->ab, rx_desc);
2183 decap = ath11k_dp_rx_h_msdu_start_decap_type(ar->ab, rx_desc);
2184
2185 switch (decap) {
2186 case DP_RX_DECAP_TYPE_NATIVE_WIFI:
2187 ath11k_dp_rx_h_undecap_nwifi(ar, msdu, first_hdr,
2188 enctype, status);
2189 break;
2190 case DP_RX_DECAP_TYPE_RAW:
2191 ath11k_dp_rx_h_undecap_raw(ar, msdu, enctype, status,
2192 decrypted);
2193 break;
2194 case DP_RX_DECAP_TYPE_ETHERNET2_DIX:
2195 ehdr = (struct ethhdr *)msdu->data;
2196
2197 /* mac80211 allows fast path only for authorized STA */
2198 if (ehdr->h_proto == cpu_to_be16(ETH_P_PAE)) {
2199 ATH11K_SKB_RXCB(msdu)->is_eapol = true;
2200 ath11k_dp_rx_h_undecap_eth(ar, msdu, first_hdr,
2201 enctype, status);
2202 break;
2203 }
2204
2205 /* PN for mcast packets will be validated in mac80211;
2206 * remove eth header and add 802.11 header.
2207 */
2208 if (ATH11K_SKB_RXCB(msdu)->is_mcbc && decrypted)
2209 ath11k_dp_rx_h_undecap_eth(ar, msdu, first_hdr,
2210 enctype, status);
2211 break;
2212 case DP_RX_DECAP_TYPE_8023:
2213 /* TODO: Handle undecap for these formats */
2214 break;
2215 }
2216}
2217
2218static struct ath11k_peer *
2219ath11k_dp_rx_h_find_peer(struct ath11k_base *ab, struct sk_buff *msdu)
2220{
2221 struct ath11k_skb_rxcb *rxcb = ATH11K_SKB_RXCB(msdu);
2222 struct hal_rx_desc *rx_desc = rxcb->rx_desc;
2223 struct ath11k_peer *peer = NULL;
2224
2225 lockdep_assert_held(&ab->base_lock);
2226
2227 if (rxcb->peer_id)
2228 peer = ath11k_peer_find_by_id(ab, rxcb->peer_id);
2229
2230 if (peer)
2231 return peer;
2232
2233 if (!rx_desc || !(ath11k_dp_rxdesc_mac_addr2_valid(ab, rx_desc)))
2234 return NULL;
2235
2236 peer = ath11k_peer_find_by_addr(ab,
2237 ath11k_dp_rxdesc_mpdu_start_addr2(ab, rx_desc));
2238 return peer;
2239}
2240
2241static void ath11k_dp_rx_h_mpdu(struct ath11k *ar,
2242 struct sk_buff *msdu,
2243 struct hal_rx_desc *rx_desc,
2244 struct ieee80211_rx_status *rx_status)
2245{
2246 bool fill_crypto_hdr;
2247 enum hal_encrypt_type enctype;
2248 bool is_decrypted = false;
2249 struct ath11k_skb_rxcb *rxcb;
2250 struct ieee80211_hdr *hdr;
2251 struct ath11k_peer *peer;
2252 struct rx_attention *rx_attention;
2253 u32 err_bitmap;
2254
2255 /* PN for multicast packets will be checked in mac80211 */
2256 rxcb = ATH11K_SKB_RXCB(msdu);
2257 fill_crypto_hdr = ath11k_dp_rx_h_attn_is_mcbc(ar->ab, rx_desc);
2258 rxcb->is_mcbc = fill_crypto_hdr;
2259
2260 if (rxcb->is_mcbc) {
2261 rxcb->peer_id = ath11k_dp_rx_h_mpdu_start_peer_id(ar->ab, rx_desc);
2262 rxcb->seq_no = ath11k_dp_rx_h_mpdu_start_seq_no(ar->ab, rx_desc);
2263 }
2264
2265 spin_lock_bh(&ar->ab->base_lock);
2266 peer = ath11k_dp_rx_h_find_peer(ar->ab, msdu);
2267 if (peer) {
2268 if (rxcb->is_mcbc)
2269 enctype = peer->sec_type_grp;
2270 else
2271 enctype = peer->sec_type;
2272 } else {
2273 enctype = ath11k_dp_rx_h_mpdu_start_enctype(ar->ab, rx_desc);
2274 }
2275 spin_unlock_bh(&ar->ab->base_lock);
2276
2277 rx_attention = ath11k_dp_rx_get_attention(ar->ab, rx_desc);
2278 err_bitmap = ath11k_dp_rx_h_attn_mpdu_err(rx_attention);
2279 if (enctype != HAL_ENCRYPT_TYPE_OPEN && !err_bitmap)
2280 is_decrypted = ath11k_dp_rx_h_attn_is_decrypted(rx_attention);
2281
2282 /* Clear per-MPDU flags while leaving per-PPDU flags intact */
2283 rx_status->flag &= ~(RX_FLAG_FAILED_FCS_CRC |
2284 RX_FLAG_MMIC_ERROR |
2285 RX_FLAG_DECRYPTED |
2286 RX_FLAG_IV_STRIPPED |
2287 RX_FLAG_MMIC_STRIPPED);
2288
2289 if (err_bitmap & DP_RX_MPDU_ERR_FCS)
2290 rx_status->flag |= RX_FLAG_FAILED_FCS_CRC;
2291 if (err_bitmap & DP_RX_MPDU_ERR_TKIP_MIC)
2292 rx_status->flag |= RX_FLAG_MMIC_ERROR;
2293
2294 if (is_decrypted) {
2295 rx_status->flag |= RX_FLAG_DECRYPTED | RX_FLAG_MMIC_STRIPPED;
2296
2297 if (fill_crypto_hdr)
2298 rx_status->flag |= RX_FLAG_MIC_STRIPPED |
2299 RX_FLAG_ICV_STRIPPED;
2300 else
2301 rx_status->flag |= RX_FLAG_IV_STRIPPED |
2302 RX_FLAG_PN_VALIDATED;
2303 }
2304
2305 ath11k_dp_rx_h_csum_offload(ar, msdu);
2306 ath11k_dp_rx_h_undecap(ar, msdu, rx_desc,
2307 enctype, rx_status, is_decrypted);
2308
2309 if (!is_decrypted || fill_crypto_hdr)
2310 return;
2311
2312 if (ath11k_dp_rx_h_msdu_start_decap_type(ar->ab, rx_desc) !=
2313 DP_RX_DECAP_TYPE_ETHERNET2_DIX) {
2314 hdr = (void *)msdu->data;
2315 hdr->frame_control &= ~__cpu_to_le16(IEEE80211_FCTL_PROTECTED);
2316 }
2317}
2318
2319static void ath11k_dp_rx_h_rate(struct ath11k *ar, struct hal_rx_desc *rx_desc,
2320 struct ieee80211_rx_status *rx_status)
2321{
2322 struct ieee80211_supported_band *sband;
2323 enum rx_msdu_start_pkt_type pkt_type;
2324 u8 bw;
2325 u8 rate_mcs, nss;
2326 u8 sgi;
2327 bool is_cck, is_ldpc;
2328
2329 pkt_type = ath11k_dp_rx_h_msdu_start_pkt_type(ar->ab, rx_desc);
2330 bw = ath11k_dp_rx_h_msdu_start_rx_bw(ar->ab, rx_desc);
2331 rate_mcs = ath11k_dp_rx_h_msdu_start_rate_mcs(ar->ab, rx_desc);
2332 nss = ath11k_dp_rx_h_msdu_start_nss(ar->ab, rx_desc);
2333 sgi = ath11k_dp_rx_h_msdu_start_sgi(ar->ab, rx_desc);
2334
2335 switch (pkt_type) {
2336 case RX_MSDU_START_PKT_TYPE_11A:
2337 case RX_MSDU_START_PKT_TYPE_11B:
2338 is_cck = (pkt_type == RX_MSDU_START_PKT_TYPE_11B);
2339 sband = &ar->mac.sbands[rx_status->band];
2340 rx_status->rate_idx = ath11k_mac_hw_rate_to_idx(sband, rate_mcs,
2341 is_cck);
2342 break;
2343 case RX_MSDU_START_PKT_TYPE_11N:
2344 rx_status->encoding = RX_ENC_HT;
2345 if (rate_mcs > ATH11K_HT_MCS_MAX) {
2346 ath11k_warn(ar->ab,
2347 "Received with invalid mcs in HT mode %d\n",
2348 rate_mcs);
2349 break;
2350 }
2351 rx_status->rate_idx = rate_mcs + (8 * (nss - 1));
2352 if (sgi)
2353 rx_status->enc_flags |= RX_ENC_FLAG_SHORT_GI;
2354 rx_status->bw = ath11k_mac_bw_to_mac80211_bw(bw);
2355 break;
2356 case RX_MSDU_START_PKT_TYPE_11AC:
2357 rx_status->encoding = RX_ENC_VHT;
2358 rx_status->rate_idx = rate_mcs;
2359 if (rate_mcs > ATH11K_VHT_MCS_MAX) {
2360 ath11k_warn(ar->ab,
2361 "Received with invalid mcs in VHT mode %d\n",
2362 rate_mcs);
2363 break;
2364 }
2365 rx_status->nss = nss;
2366 if (sgi)
2367 rx_status->enc_flags |= RX_ENC_FLAG_SHORT_GI;
2368 rx_status->bw = ath11k_mac_bw_to_mac80211_bw(bw);
2369 is_ldpc = ath11k_dp_rx_h_msdu_start_ldpc_support(ar->ab, rx_desc);
2370 if (is_ldpc)
2371 rx_status->enc_flags |= RX_ENC_FLAG_LDPC;
2372 break;
2373 case RX_MSDU_START_PKT_TYPE_11AX:
2374 rx_status->rate_idx = rate_mcs;
2375 if (rate_mcs > ATH11K_HE_MCS_MAX) {
2376 ath11k_warn(ar->ab,
2377 "Received with invalid mcs in HE mode %d\n",
2378 rate_mcs);
2379 break;
2380 }
2381 rx_status->encoding = RX_ENC_HE;
2382 rx_status->nss = nss;
2383 rx_status->he_gi = ath11k_mac_he_gi_to_nl80211_he_gi(sgi);
2384 rx_status->bw = ath11k_mac_bw_to_mac80211_bw(bw);
2385 break;
2386 }
2387}
2388
2389static void ath11k_dp_rx_h_ppdu(struct ath11k *ar, struct hal_rx_desc *rx_desc,
2390 struct ieee80211_rx_status *rx_status)
2391{
2392 u8 channel_num;
2393 u32 center_freq, meta_data;
2394 struct ieee80211_channel *channel;
2395
2396 rx_status->freq = 0;
2397 rx_status->rate_idx = 0;
2398 rx_status->nss = 0;
2399 rx_status->encoding = RX_ENC_LEGACY;
2400 rx_status->bw = RATE_INFO_BW_20;
2401
2402 rx_status->flag |= RX_FLAG_NO_SIGNAL_VAL;
2403
2404 meta_data = ath11k_dp_rx_h_msdu_start_freq(ar->ab, rx_desc);
2405 channel_num = meta_data;
2406 center_freq = meta_data >> 16;
2407
2408 if (center_freq >= ATH11K_MIN_6G_FREQ &&
2409 center_freq <= ATH11K_MAX_6G_FREQ) {
2410 rx_status->band = NL80211_BAND_6GHZ;
2411 rx_status->freq = center_freq;
2412 } else if (channel_num >= 1 && channel_num <= 14) {
2413 rx_status->band = NL80211_BAND_2GHZ;
2414 } else if (channel_num >= 36 && channel_num <= 177) {
2415 rx_status->band = NL80211_BAND_5GHZ;
2416 } else {
2417 spin_lock_bh(&ar->data_lock);
2418 channel = ar->rx_channel;
2419 if (channel) {
2420 rx_status->band = channel->band;
2421 channel_num =
2422 ieee80211_frequency_to_channel(channel->center_freq);
2423 }
2424 spin_unlock_bh(&ar->data_lock);
2425 ath11k_dbg_dump(ar->ab, ATH11K_DBG_DATA, NULL, "rx_desc: ",
2426 rx_desc, sizeof(struct hal_rx_desc));
2427 }
2428
2429 if (rx_status->band != NL80211_BAND_6GHZ)
2430 rx_status->freq = ieee80211_channel_to_frequency(channel_num,
2431 rx_status->band);
2432
2433 ath11k_dp_rx_h_rate(ar, rx_desc, rx_status);
2434}
2435
2436static void ath11k_dp_rx_deliver_msdu(struct ath11k *ar, struct napi_struct *napi,
2437 struct sk_buff *msdu,
2438 struct ieee80211_rx_status *status)
2439{
2440 static const struct ieee80211_radiotap_he known = {
2441 .data1 = cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA1_DATA_MCS_KNOWN |
2442 IEEE80211_RADIOTAP_HE_DATA1_BW_RU_ALLOC_KNOWN),
2443 .data2 = cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA2_GI_KNOWN),
2444 };
2445 struct ieee80211_rx_status *rx_status;
2446 struct ieee80211_radiotap_he *he = NULL;
2447 struct ieee80211_sta *pubsta = NULL;
2448 struct ath11k_peer *peer;
2449 struct ath11k_skb_rxcb *rxcb = ATH11K_SKB_RXCB(msdu);
2450 u8 decap = DP_RX_DECAP_TYPE_RAW;
2451 bool is_mcbc = rxcb->is_mcbc;
2452 bool is_eapol = rxcb->is_eapol;
2453
2454 if (status->encoding == RX_ENC_HE &&
2455 !(status->flag & RX_FLAG_RADIOTAP_HE) &&
2456 !(status->flag & RX_FLAG_SKIP_MONITOR)) {
2457 he = skb_push(msdu, sizeof(known));
2458 memcpy(he, &known, sizeof(known));
2459 status->flag |= RX_FLAG_RADIOTAP_HE;
2460 }
2461
2462 if (!(status->flag & RX_FLAG_ONLY_MONITOR))
2463 decap = ath11k_dp_rx_h_msdu_start_decap_type(ar->ab, rxcb->rx_desc);
2464
2465 spin_lock_bh(&ar->ab->base_lock);
2466 peer = ath11k_dp_rx_h_find_peer(ar->ab, msdu);
2467 if (peer && peer->sta)
2468 pubsta = peer->sta;
2469 spin_unlock_bh(&ar->ab->base_lock);
2470
2471 ath11k_dbg(ar->ab, ATH11K_DBG_DATA,
2472 "rx skb %p len %u peer %pM %d %s sn %u %s%s%s%s%s%s%s %srate_idx %u vht_nss %u freq %u band %u flag 0x%x fcs-err %i mic-err %i amsdu-more %i\n",
2473 msdu,
2474 msdu->len,
2475 peer ? peer->addr : NULL,
2476 rxcb->tid,
2477 is_mcbc ? "mcast" : "ucast",
2478 rxcb->seq_no,
2479 (status->encoding == RX_ENC_LEGACY) ? "legacy" : "",
2480 (status->encoding == RX_ENC_HT) ? "ht" : "",
2481 (status->encoding == RX_ENC_VHT) ? "vht" : "",
2482 (status->encoding == RX_ENC_HE) ? "he" : "",
2483 (status->bw == RATE_INFO_BW_40) ? "40" : "",
2484 (status->bw == RATE_INFO_BW_80) ? "80" : "",
2485 (status->bw == RATE_INFO_BW_160) ? "160" : "",
2486 status->enc_flags & RX_ENC_FLAG_SHORT_GI ? "sgi " : "",
2487 status->rate_idx,
2488 status->nss,
2489 status->freq,
2490 status->band, status->flag,
2491 !!(status->flag & RX_FLAG_FAILED_FCS_CRC),
2492 !!(status->flag & RX_FLAG_MMIC_ERROR),
2493 !!(status->flag & RX_FLAG_AMSDU_MORE));
2494
2495 ath11k_dbg_dump(ar->ab, ATH11K_DBG_DP_RX, NULL, "dp rx msdu: ",
2496 msdu->data, msdu->len);
2497
2498 rx_status = IEEE80211_SKB_RXCB(msdu);
2499 *rx_status = *status;
2500
2501 /* TODO: trace rx packet */
2502
2503 /* PN for multicast packets are not validate in HW,
2504 * so skip 802.3 rx path
2505 * Also, fast_rx expects the STA to be authorized, hence
2506 * eapol packets are sent in slow path.
2507 */
2508 if (decap == DP_RX_DECAP_TYPE_ETHERNET2_DIX && !is_eapol &&
2509 !(is_mcbc && rx_status->flag & RX_FLAG_DECRYPTED))
2510 rx_status->flag |= RX_FLAG_8023;
2511
2512 ieee80211_rx_napi(ar->hw, pubsta, msdu, napi);
2513}
2514
2515static int ath11k_dp_rx_process_msdu(struct ath11k *ar,
2516 struct sk_buff *msdu,
2517 struct sk_buff_head *msdu_list,
2518 struct ieee80211_rx_status *rx_status)
2519{
2520 struct ath11k_base *ab = ar->ab;
2521 struct hal_rx_desc *rx_desc, *lrx_desc;
2522 struct rx_attention *rx_attention;
2523 struct ath11k_skb_rxcb *rxcb;
2524 struct sk_buff *last_buf;
2525 u8 l3_pad_bytes;
2526 u8 *hdr_status;
2527 u16 msdu_len;
2528 int ret;
2529 u32 hal_rx_desc_sz = ar->ab->hw_params.hal_desc_sz;
2530
2531 last_buf = ath11k_dp_rx_get_msdu_last_buf(msdu_list, msdu);
2532 if (!last_buf) {
2533 ath11k_warn(ab,
2534 "No valid Rx buffer to access Atten/MSDU_END/MPDU_END tlvs\n");
2535 ret = -EIO;
2536 goto free_out;
2537 }
2538
2539 rx_desc = (struct hal_rx_desc *)msdu->data;
2540 if (ath11k_dp_rx_h_attn_msdu_len_err(ab, rx_desc)) {
2541 ath11k_warn(ar->ab, "msdu len not valid\n");
2542 ret = -EIO;
2543 goto free_out;
2544 }
2545
2546 lrx_desc = (struct hal_rx_desc *)last_buf->data;
2547 rx_attention = ath11k_dp_rx_get_attention(ab, lrx_desc);
2548 if (!ath11k_dp_rx_h_attn_msdu_done(rx_attention)) {
2549 ath11k_warn(ab, "msdu_done bit in attention is not set\n");
2550 ret = -EIO;
2551 goto free_out;
2552 }
2553
2554 rxcb = ATH11K_SKB_RXCB(msdu);
2555 rxcb->rx_desc = rx_desc;
2556 msdu_len = ath11k_dp_rx_h_msdu_start_msdu_len(ab, rx_desc);
2557 l3_pad_bytes = ath11k_dp_rx_h_msdu_end_l3pad(ab, lrx_desc);
2558
2559 if (rxcb->is_frag) {
2560 skb_pull(msdu, hal_rx_desc_sz);
2561 } else if (!rxcb->is_continuation) {
2562 if ((msdu_len + hal_rx_desc_sz) > DP_RX_BUFFER_SIZE) {
2563 hdr_status = ath11k_dp_rx_h_80211_hdr(ab, rx_desc);
2564 ret = -EINVAL;
2565 ath11k_warn(ab, "invalid msdu len %u\n", msdu_len);
2566 ath11k_dbg_dump(ab, ATH11K_DBG_DATA, NULL, "", hdr_status,
2567 sizeof(struct ieee80211_hdr));
2568 ath11k_dbg_dump(ab, ATH11K_DBG_DATA, NULL, "", rx_desc,
2569 sizeof(struct hal_rx_desc));
2570 goto free_out;
2571 }
2572 skb_put(msdu, hal_rx_desc_sz + l3_pad_bytes + msdu_len);
2573 skb_pull(msdu, hal_rx_desc_sz + l3_pad_bytes);
2574 } else {
2575 ret = ath11k_dp_rx_msdu_coalesce(ar, msdu_list,
2576 msdu, last_buf,
2577 l3_pad_bytes, msdu_len);
2578 if (ret) {
2579 ath11k_warn(ab,
2580 "failed to coalesce msdu rx buffer%d\n", ret);
2581 goto free_out;
2582 }
2583 }
2584
2585 ath11k_dp_rx_h_ppdu(ar, rx_desc, rx_status);
2586 ath11k_dp_rx_h_mpdu(ar, msdu, rx_desc, rx_status);
2587
2588 rx_status->flag |= RX_FLAG_SKIP_MONITOR | RX_FLAG_DUP_VALIDATED;
2589
2590 return 0;
2591
2592free_out:
2593 return ret;
2594}
2595
2596static void ath11k_dp_rx_process_received_packets(struct ath11k_base *ab,
2597 struct napi_struct *napi,
2598 struct sk_buff_head *msdu_list,
2599 int mac_id)
2600{
2601 struct sk_buff *msdu;
2602 struct ath11k *ar;
2603 struct ieee80211_rx_status rx_status = {0};
2604 int ret;
2605
2606 if (skb_queue_empty(msdu_list))
2607 return;
2608
2609 if (unlikely(!rcu_access_pointer(ab->pdevs_active[mac_id]))) {
2610 __skb_queue_purge(msdu_list);
2611 return;
2612 }
2613
2614 ar = ab->pdevs[mac_id].ar;
2615 if (unlikely(test_bit(ATH11K_CAC_RUNNING, &ar->dev_flags))) {
2616 __skb_queue_purge(msdu_list);
2617 return;
2618 }
2619
2620 while ((msdu = __skb_dequeue(msdu_list))) {
2621 ret = ath11k_dp_rx_process_msdu(ar, msdu, msdu_list, &rx_status);
2622 if (unlikely(ret)) {
2623 ath11k_dbg(ab, ATH11K_DBG_DATA,
2624 "Unable to process msdu %d", ret);
2625 dev_kfree_skb_any(msdu);
2626 continue;
2627 }
2628
2629 ath11k_dp_rx_deliver_msdu(ar, napi, msdu, &rx_status);
2630 }
2631}
2632
2633int ath11k_dp_process_rx(struct ath11k_base *ab, int ring_id,
2634 struct napi_struct *napi, int budget)
2635{
2636 struct ath11k_dp *dp = &ab->dp;
2637 struct dp_rxdma_ring *rx_ring;
2638 int num_buffs_reaped[MAX_RADIOS] = {0};
2639 struct sk_buff_head msdu_list[MAX_RADIOS];
2640 struct ath11k_skb_rxcb *rxcb;
2641 int total_msdu_reaped = 0;
2642 struct hal_srng *srng;
2643 struct sk_buff *msdu;
2644 bool done = false;
2645 int buf_id, mac_id;
2646 struct ath11k *ar;
2647 struct hal_reo_dest_ring *desc;
2648 enum hal_reo_dest_ring_push_reason push_reason;
2649 u32 cookie;
2650 int i;
2651
2652 for (i = 0; i < MAX_RADIOS; i++)
2653 __skb_queue_head_init(&msdu_list[i]);
2654
2655 srng = &ab->hal.srng_list[dp->reo_dst_ring[ring_id].ring_id];
2656
2657 spin_lock_bh(&srng->lock);
2658
2659try_again:
2660 ath11k_hal_srng_access_begin(ab, srng);
2661
2662 while (likely(desc =
2663 (struct hal_reo_dest_ring *)ath11k_hal_srng_dst_get_next_entry(ab,
2664 srng))) {
2665 cookie = FIELD_GET(BUFFER_ADDR_INFO1_SW_COOKIE,
2666 desc->buf_addr_info.info1);
2667 buf_id = FIELD_GET(DP_RXDMA_BUF_COOKIE_BUF_ID,
2668 cookie);
2669 mac_id = FIELD_GET(DP_RXDMA_BUF_COOKIE_PDEV_ID, cookie);
2670
2671 if (unlikely(buf_id == 0))
2672 continue;
2673
2674 ar = ab->pdevs[mac_id].ar;
2675 rx_ring = &ar->dp.rx_refill_buf_ring;
2676 spin_lock_bh(&rx_ring->idr_lock);
2677 msdu = idr_find(&rx_ring->bufs_idr, buf_id);
2678 if (unlikely(!msdu)) {
2679 ath11k_warn(ab, "frame rx with invalid buf_id %d\n",
2680 buf_id);
2681 spin_unlock_bh(&rx_ring->idr_lock);
2682 continue;
2683 }
2684
2685 idr_remove(&rx_ring->bufs_idr, buf_id);
2686 spin_unlock_bh(&rx_ring->idr_lock);
2687
2688 rxcb = ATH11K_SKB_RXCB(msdu);
2689 dma_unmap_single(ab->dev, rxcb->paddr,
2690 msdu->len + skb_tailroom(msdu),
2691 DMA_FROM_DEVICE);
2692
2693 num_buffs_reaped[mac_id]++;
2694
2695 push_reason = FIELD_GET(HAL_REO_DEST_RING_INFO0_PUSH_REASON,
2696 desc->info0);
2697 if (unlikely(push_reason !=
2698 HAL_REO_DEST_RING_PUSH_REASON_ROUTING_INSTRUCTION)) {
2699 dev_kfree_skb_any(msdu);
2700 ab->soc_stats.hal_reo_error[ring_id]++;
2701 continue;
2702 }
2703
2704 rxcb->is_first_msdu = !!(desc->rx_msdu_info.info0 &
2705 RX_MSDU_DESC_INFO0_FIRST_MSDU_IN_MPDU);
2706 rxcb->is_last_msdu = !!(desc->rx_msdu_info.info0 &
2707 RX_MSDU_DESC_INFO0_LAST_MSDU_IN_MPDU);
2708 rxcb->is_continuation = !!(desc->rx_msdu_info.info0 &
2709 RX_MSDU_DESC_INFO0_MSDU_CONTINUATION);
2710 rxcb->peer_id = FIELD_GET(RX_MPDU_DESC_META_DATA_PEER_ID,
2711 desc->rx_mpdu_info.meta_data);
2712 rxcb->seq_no = FIELD_GET(RX_MPDU_DESC_INFO0_SEQ_NUM,
2713 desc->rx_mpdu_info.info0);
2714 rxcb->tid = FIELD_GET(HAL_REO_DEST_RING_INFO0_RX_QUEUE_NUM,
2715 desc->info0);
2716
2717 rxcb->mac_id = mac_id;
2718 __skb_queue_tail(&msdu_list[mac_id], msdu);
2719
2720 if (rxcb->is_continuation) {
2721 done = false;
2722 } else {
2723 total_msdu_reaped++;
2724 done = true;
2725 }
2726
2727 if (total_msdu_reaped >= budget)
2728 break;
2729 }
2730
2731 /* Hw might have updated the head pointer after we cached it.
2732 * In this case, even though there are entries in the ring we'll
2733 * get rx_desc NULL. Give the read another try with updated cached
2734 * head pointer so that we can reap complete MPDU in the current
2735 * rx processing.
2736 */
2737 if (unlikely(!done && ath11k_hal_srng_dst_num_free(ab, srng, true))) {
2738 ath11k_hal_srng_access_end(ab, srng);
2739 goto try_again;
2740 }
2741
2742 ath11k_hal_srng_access_end(ab, srng);
2743
2744 spin_unlock_bh(&srng->lock);
2745
2746 if (unlikely(!total_msdu_reaped))
2747 goto exit;
2748
2749 for (i = 0; i < ab->num_radios; i++) {
2750 if (!num_buffs_reaped[i])
2751 continue;
2752
2753 ath11k_dp_rx_process_received_packets(ab, napi, &msdu_list[i], i);
2754
2755 ar = ab->pdevs[i].ar;
2756 rx_ring = &ar->dp.rx_refill_buf_ring;
2757
2758 ath11k_dp_rxbufs_replenish(ab, i, rx_ring, num_buffs_reaped[i],
2759 ab->hw_params.hal_params->rx_buf_rbm);
2760 }
2761exit:
2762 return total_msdu_reaped;
2763}
2764
2765static void ath11k_dp_rx_update_peer_stats(struct ath11k_sta *arsta,
2766 struct hal_rx_mon_ppdu_info *ppdu_info)
2767{
2768 struct ath11k_rx_peer_stats *rx_stats = arsta->rx_stats;
2769 u32 num_msdu;
2770 int i;
2771
2772 if (!rx_stats)
2773 return;
2774
2775 arsta->rssi_comb = ppdu_info->rssi_comb;
2776 ewma_avg_rssi_add(&arsta->avg_rssi, ppdu_info->rssi_comb);
2777
2778 num_msdu = ppdu_info->tcp_msdu_count + ppdu_info->tcp_ack_msdu_count +
2779 ppdu_info->udp_msdu_count + ppdu_info->other_msdu_count;
2780
2781 rx_stats->num_msdu += num_msdu;
2782 rx_stats->tcp_msdu_count += ppdu_info->tcp_msdu_count +
2783 ppdu_info->tcp_ack_msdu_count;
2784 rx_stats->udp_msdu_count += ppdu_info->udp_msdu_count;
2785 rx_stats->other_msdu_count += ppdu_info->other_msdu_count;
2786
2787 if (ppdu_info->preamble_type == HAL_RX_PREAMBLE_11A ||
2788 ppdu_info->preamble_type == HAL_RX_PREAMBLE_11B) {
2789 ppdu_info->nss = 1;
2790 ppdu_info->mcs = HAL_RX_MAX_MCS;
2791 ppdu_info->tid = IEEE80211_NUM_TIDS;
2792 }
2793
2794 if (ppdu_info->nss > 0 && ppdu_info->nss <= HAL_RX_MAX_NSS)
2795 rx_stats->nss_count[ppdu_info->nss - 1] += num_msdu;
2796
2797 if (ppdu_info->mcs <= HAL_RX_MAX_MCS)
2798 rx_stats->mcs_count[ppdu_info->mcs] += num_msdu;
2799
2800 if (ppdu_info->gi < HAL_RX_GI_MAX)
2801 rx_stats->gi_count[ppdu_info->gi] += num_msdu;
2802
2803 if (ppdu_info->bw < HAL_RX_BW_MAX)
2804 rx_stats->bw_count[ppdu_info->bw] += num_msdu;
2805
2806 if (ppdu_info->ldpc < HAL_RX_SU_MU_CODING_MAX)
2807 rx_stats->coding_count[ppdu_info->ldpc] += num_msdu;
2808
2809 if (ppdu_info->tid <= IEEE80211_NUM_TIDS)
2810 rx_stats->tid_count[ppdu_info->tid] += num_msdu;
2811
2812 if (ppdu_info->preamble_type < HAL_RX_PREAMBLE_MAX)
2813 rx_stats->pream_cnt[ppdu_info->preamble_type] += num_msdu;
2814
2815 if (ppdu_info->reception_type < HAL_RX_RECEPTION_TYPE_MAX)
2816 rx_stats->reception_type[ppdu_info->reception_type] += num_msdu;
2817
2818 if (ppdu_info->is_stbc)
2819 rx_stats->stbc_count += num_msdu;
2820
2821 if (ppdu_info->beamformed)
2822 rx_stats->beamformed_count += num_msdu;
2823
2824 if (ppdu_info->num_mpdu_fcs_ok > 1)
2825 rx_stats->ampdu_msdu_count += num_msdu;
2826 else
2827 rx_stats->non_ampdu_msdu_count += num_msdu;
2828
2829 rx_stats->num_mpdu_fcs_ok += ppdu_info->num_mpdu_fcs_ok;
2830 rx_stats->num_mpdu_fcs_err += ppdu_info->num_mpdu_fcs_err;
2831 rx_stats->dcm_count += ppdu_info->dcm;
2832 rx_stats->ru_alloc_cnt[ppdu_info->ru_alloc] += num_msdu;
2833
2834 arsta->rssi_comb = ppdu_info->rssi_comb;
2835
2836 BUILD_BUG_ON(ARRAY_SIZE(arsta->chain_signal) >
2837 ARRAY_SIZE(ppdu_info->rssi_chain_pri20));
2838
2839 for (i = 0; i < ARRAY_SIZE(arsta->chain_signal); i++)
2840 arsta->chain_signal[i] = ppdu_info->rssi_chain_pri20[i];
2841
2842 rx_stats->rx_duration += ppdu_info->rx_duration;
2843 arsta->rx_duration = rx_stats->rx_duration;
2844}
2845
2846static struct sk_buff *ath11k_dp_rx_alloc_mon_status_buf(struct ath11k_base *ab,
2847 struct dp_rxdma_ring *rx_ring,
2848 int *buf_id)
2849{
2850 struct sk_buff *skb;
2851 dma_addr_t paddr;
2852
2853 skb = dev_alloc_skb(DP_RX_BUFFER_SIZE +
2854 DP_RX_BUFFER_ALIGN_SIZE);
2855
2856 if (!skb)
2857 goto fail_alloc_skb;
2858
2859 if (!IS_ALIGNED((unsigned long)skb->data,
2860 DP_RX_BUFFER_ALIGN_SIZE)) {
2861 skb_pull(skb, PTR_ALIGN(skb->data, DP_RX_BUFFER_ALIGN_SIZE) -
2862 skb->data);
2863 }
2864
2865 paddr = dma_map_single(ab->dev, skb->data,
2866 skb->len + skb_tailroom(skb),
2867 DMA_FROM_DEVICE);
2868 if (unlikely(dma_mapping_error(ab->dev, paddr)))
2869 goto fail_free_skb;
2870
2871 spin_lock_bh(&rx_ring->idr_lock);
2872 *buf_id = idr_alloc(&rx_ring->bufs_idr, skb, 0,
2873 rx_ring->bufs_max, GFP_ATOMIC);
2874 spin_unlock_bh(&rx_ring->idr_lock);
2875 if (*buf_id < 0)
2876 goto fail_dma_unmap;
2877
2878 ATH11K_SKB_RXCB(skb)->paddr = paddr;
2879 return skb;
2880
2881fail_dma_unmap:
2882 dma_unmap_single(ab->dev, paddr, skb->len + skb_tailroom(skb),
2883 DMA_FROM_DEVICE);
2884fail_free_skb:
2885 dev_kfree_skb_any(skb);
2886fail_alloc_skb:
2887 return NULL;
2888}
2889
2890int ath11k_dp_rx_mon_status_bufs_replenish(struct ath11k_base *ab, int mac_id,
2891 struct dp_rxdma_ring *rx_ring,
2892 int req_entries,
2893 enum hal_rx_buf_return_buf_manager mgr)
2894{
2895 struct hal_srng *srng;
2896 u32 *desc;
2897 struct sk_buff *skb;
2898 int num_free;
2899 int num_remain;
2900 int buf_id;
2901 u32 cookie;
2902 dma_addr_t paddr;
2903
2904 req_entries = min(req_entries, rx_ring->bufs_max);
2905
2906 srng = &ab->hal.srng_list[rx_ring->refill_buf_ring.ring_id];
2907
2908 spin_lock_bh(&srng->lock);
2909
2910 ath11k_hal_srng_access_begin(ab, srng);
2911
2912 num_free = ath11k_hal_srng_src_num_free(ab, srng, true);
2913
2914 req_entries = min(num_free, req_entries);
2915 num_remain = req_entries;
2916
2917 while (num_remain > 0) {
2918 skb = ath11k_dp_rx_alloc_mon_status_buf(ab, rx_ring,
2919 &buf_id);
2920 if (!skb)
2921 break;
2922 paddr = ATH11K_SKB_RXCB(skb)->paddr;
2923
2924 desc = ath11k_hal_srng_src_get_next_entry(ab, srng);
2925 if (!desc)
2926 goto fail_desc_get;
2927
2928 cookie = FIELD_PREP(DP_RXDMA_BUF_COOKIE_PDEV_ID, mac_id) |
2929 FIELD_PREP(DP_RXDMA_BUF_COOKIE_BUF_ID, buf_id);
2930
2931 num_remain--;
2932
2933 ath11k_hal_rx_buf_addr_info_set(desc, paddr, cookie, mgr);
2934 }
2935
2936 ath11k_hal_srng_access_end(ab, srng);
2937
2938 spin_unlock_bh(&srng->lock);
2939
2940 return req_entries - num_remain;
2941
2942fail_desc_get:
2943 spin_lock_bh(&rx_ring->idr_lock);
2944 idr_remove(&rx_ring->bufs_idr, buf_id);
2945 spin_unlock_bh(&rx_ring->idr_lock);
2946 dma_unmap_single(ab->dev, paddr, skb->len + skb_tailroom(skb),
2947 DMA_FROM_DEVICE);
2948 dev_kfree_skb_any(skb);
2949 ath11k_hal_srng_access_end(ab, srng);
2950 spin_unlock_bh(&srng->lock);
2951
2952 return req_entries - num_remain;
2953}
2954
2955#define ATH11K_DP_RX_FULL_MON_PPDU_ID_WRAP 32535
2956
2957static void
2958ath11k_dp_rx_mon_update_status_buf_state(struct ath11k_mon_data *pmon,
2959 struct hal_tlv_hdr *tlv)
2960{
2961 struct hal_rx_ppdu_start *ppdu_start;
2962 u16 ppdu_id_diff, ppdu_id, tlv_len;
2963 u8 *ptr;
2964
2965 /* PPDU id is part of second tlv, move ptr to second tlv */
2966 tlv_len = FIELD_GET(HAL_TLV_HDR_LEN, tlv->tl);
2967 ptr = (u8 *)tlv;
2968 ptr += sizeof(*tlv) + tlv_len;
2969 tlv = (struct hal_tlv_hdr *)ptr;
2970
2971 if (FIELD_GET(HAL_TLV_HDR_TAG, tlv->tl) != HAL_RX_PPDU_START)
2972 return;
2973
2974 ptr += sizeof(*tlv);
2975 ppdu_start = (struct hal_rx_ppdu_start *)ptr;
2976 ppdu_id = FIELD_GET(HAL_RX_PPDU_START_INFO0_PPDU_ID,
2977 __le32_to_cpu(ppdu_start->info0));
2978
2979 if (pmon->sw_mon_entries.ppdu_id < ppdu_id) {
2980 pmon->buf_state = DP_MON_STATUS_LEAD;
2981 ppdu_id_diff = ppdu_id - pmon->sw_mon_entries.ppdu_id;
2982 if (ppdu_id_diff > ATH11K_DP_RX_FULL_MON_PPDU_ID_WRAP)
2983 pmon->buf_state = DP_MON_STATUS_LAG;
2984 } else if (pmon->sw_mon_entries.ppdu_id > ppdu_id) {
2985 pmon->buf_state = DP_MON_STATUS_LAG;
2986 ppdu_id_diff = pmon->sw_mon_entries.ppdu_id - ppdu_id;
2987 if (ppdu_id_diff > ATH11K_DP_RX_FULL_MON_PPDU_ID_WRAP)
2988 pmon->buf_state = DP_MON_STATUS_LEAD;
2989 }
2990}
2991
2992static enum dp_mon_status_buf_state
2993ath11k_dp_rx_mon_buf_done(struct ath11k_base *ab, struct hal_srng *srng,
2994 struct dp_rxdma_ring *rx_ring)
2995{
2996 struct ath11k_skb_rxcb *rxcb;
2997 struct hal_tlv_hdr *tlv;
2998 struct sk_buff *skb;
2999 void *status_desc;
3000 dma_addr_t paddr;
3001 u32 cookie;
3002 int buf_id;
3003 u8 rbm;
3004
3005 status_desc = ath11k_hal_srng_src_next_peek(ab, srng);
3006 if (!status_desc)
3007 return DP_MON_STATUS_NO_DMA;
3008
3009 ath11k_hal_rx_buf_addr_info_get(status_desc, &paddr, &cookie, &rbm);
3010
3011 buf_id = FIELD_GET(DP_RXDMA_BUF_COOKIE_BUF_ID, cookie);
3012
3013 spin_lock_bh(&rx_ring->idr_lock);
3014 skb = idr_find(&rx_ring->bufs_idr, buf_id);
3015 spin_unlock_bh(&rx_ring->idr_lock);
3016
3017 if (!skb)
3018 return DP_MON_STATUS_NO_DMA;
3019
3020 rxcb = ATH11K_SKB_RXCB(skb);
3021 dma_sync_single_for_cpu(ab->dev, rxcb->paddr,
3022 skb->len + skb_tailroom(skb),
3023 DMA_FROM_DEVICE);
3024
3025 tlv = (struct hal_tlv_hdr *)skb->data;
3026 if (FIELD_GET(HAL_TLV_HDR_TAG, tlv->tl) != HAL_RX_STATUS_BUFFER_DONE)
3027 return DP_MON_STATUS_NO_DMA;
3028
3029 return DP_MON_STATUS_REPLINISH;
3030}
3031
3032static int ath11k_dp_rx_reap_mon_status_ring(struct ath11k_base *ab, int mac_id,
3033 int *budget, struct sk_buff_head *skb_list)
3034{
3035 struct ath11k *ar;
3036 const struct ath11k_hw_hal_params *hal_params;
3037 enum dp_mon_status_buf_state reap_status;
3038 struct ath11k_pdev_dp *dp;
3039 struct dp_rxdma_ring *rx_ring;
3040 struct ath11k_mon_data *pmon;
3041 struct hal_srng *srng;
3042 void *rx_mon_status_desc;
3043 struct sk_buff *skb;
3044 struct ath11k_skb_rxcb *rxcb;
3045 struct hal_tlv_hdr *tlv;
3046 u32 cookie;
3047 int buf_id, srng_id;
3048 dma_addr_t paddr;
3049 u8 rbm;
3050 int num_buffs_reaped = 0;
3051
3052 ar = ab->pdevs[ath11k_hw_mac_id_to_pdev_id(&ab->hw_params, mac_id)].ar;
3053 dp = &ar->dp;
3054 pmon = &dp->mon_data;
3055 srng_id = ath11k_hw_mac_id_to_srng_id(&ab->hw_params, mac_id);
3056 rx_ring = &dp->rx_mon_status_refill_ring[srng_id];
3057
3058 srng = &ab->hal.srng_list[rx_ring->refill_buf_ring.ring_id];
3059
3060 spin_lock_bh(&srng->lock);
3061
3062 ath11k_hal_srng_access_begin(ab, srng);
3063 while (*budget) {
3064 *budget -= 1;
3065 rx_mon_status_desc =
3066 ath11k_hal_srng_src_peek(ab, srng);
3067 if (!rx_mon_status_desc) {
3068 pmon->buf_state = DP_MON_STATUS_REPLINISH;
3069 break;
3070 }
3071
3072 ath11k_hal_rx_buf_addr_info_get(rx_mon_status_desc, &paddr,
3073 &cookie, &rbm);
3074 if (paddr) {
3075 buf_id = FIELD_GET(DP_RXDMA_BUF_COOKIE_BUF_ID, cookie);
3076
3077 spin_lock_bh(&rx_ring->idr_lock);
3078 skb = idr_find(&rx_ring->bufs_idr, buf_id);
3079 spin_unlock_bh(&rx_ring->idr_lock);
3080
3081 if (!skb) {
3082 ath11k_warn(ab, "rx monitor status with invalid buf_id %d\n",
3083 buf_id);
3084 pmon->buf_state = DP_MON_STATUS_REPLINISH;
3085 goto move_next;
3086 }
3087
3088 rxcb = ATH11K_SKB_RXCB(skb);
3089
3090 dma_sync_single_for_cpu(ab->dev, rxcb->paddr,
3091 skb->len + skb_tailroom(skb),
3092 DMA_FROM_DEVICE);
3093
3094 tlv = (struct hal_tlv_hdr *)skb->data;
3095 if (FIELD_GET(HAL_TLV_HDR_TAG, tlv->tl) !=
3096 HAL_RX_STATUS_BUFFER_DONE) {
3097 ath11k_warn(ab, "mon status DONE not set %lx, buf_id %d\n",
3098 FIELD_GET(HAL_TLV_HDR_TAG,
3099 tlv->tl), buf_id);
3100 /* RxDMA status done bit might not be set even
3101 * though tp is moved by HW.
3102 */
3103
3104 /* If done status is missing:
3105 * 1. As per MAC team's suggestion,
3106 * when HP + 1 entry is peeked and if DMA
3107 * is not done and if HP + 2 entry's DMA done
3108 * is set. skip HP + 1 entry and
3109 * start processing in next interrupt.
3110 * 2. If HP + 2 entry's DMA done is not set,
3111 * poll onto HP + 1 entry DMA done to be set.
3112 * Check status for same buffer for next time
3113 * dp_rx_mon_status_srng_process
3114 */
3115
3116 reap_status = ath11k_dp_rx_mon_buf_done(ab, srng,
3117 rx_ring);
3118 if (reap_status == DP_MON_STATUS_NO_DMA)
3119 continue;
3120
3121 spin_lock_bh(&rx_ring->idr_lock);
3122 idr_remove(&rx_ring->bufs_idr, buf_id);
3123 spin_unlock_bh(&rx_ring->idr_lock);
3124
3125 dma_unmap_single(ab->dev, rxcb->paddr,
3126 skb->len + skb_tailroom(skb),
3127 DMA_FROM_DEVICE);
3128
3129 dev_kfree_skb_any(skb);
3130 pmon->buf_state = DP_MON_STATUS_REPLINISH;
3131 goto move_next;
3132 }
3133
3134 spin_lock_bh(&rx_ring->idr_lock);
3135 idr_remove(&rx_ring->bufs_idr, buf_id);
3136 spin_unlock_bh(&rx_ring->idr_lock);
3137 if (ab->hw_params.full_monitor_mode) {
3138 ath11k_dp_rx_mon_update_status_buf_state(pmon, tlv);
3139 if (paddr == pmon->mon_status_paddr)
3140 pmon->buf_state = DP_MON_STATUS_MATCH;
3141 }
3142
3143 dma_unmap_single(ab->dev, rxcb->paddr,
3144 skb->len + skb_tailroom(skb),
3145 DMA_FROM_DEVICE);
3146
3147 __skb_queue_tail(skb_list, skb);
3148 } else {
3149 pmon->buf_state = DP_MON_STATUS_REPLINISH;
3150 }
3151move_next:
3152 skb = ath11k_dp_rx_alloc_mon_status_buf(ab, rx_ring,
3153 &buf_id);
3154
3155 if (!skb) {
3156 hal_params = ab->hw_params.hal_params;
3157 ath11k_hal_rx_buf_addr_info_set(rx_mon_status_desc, 0, 0,
3158 hal_params->rx_buf_rbm);
3159 num_buffs_reaped++;
3160 break;
3161 }
3162 rxcb = ATH11K_SKB_RXCB(skb);
3163
3164 cookie = FIELD_PREP(DP_RXDMA_BUF_COOKIE_PDEV_ID, mac_id) |
3165 FIELD_PREP(DP_RXDMA_BUF_COOKIE_BUF_ID, buf_id);
3166
3167 ath11k_hal_rx_buf_addr_info_set(rx_mon_status_desc, rxcb->paddr,
3168 cookie,
3169 ab->hw_params.hal_params->rx_buf_rbm);
3170 ath11k_hal_srng_src_get_next_entry(ab, srng);
3171 num_buffs_reaped++;
3172 }
3173 ath11k_hal_srng_access_end(ab, srng);
3174 spin_unlock_bh(&srng->lock);
3175
3176 return num_buffs_reaped;
3177}
3178
3179static void ath11k_dp_rx_frag_timer(struct timer_list *timer)
3180{
3181 struct dp_rx_tid *rx_tid = from_timer(rx_tid, timer, frag_timer);
3182
3183 spin_lock_bh(&rx_tid->ab->base_lock);
3184 if (rx_tid->last_frag_no &&
3185 rx_tid->rx_frag_bitmap == GENMASK(rx_tid->last_frag_no, 0)) {
3186 spin_unlock_bh(&rx_tid->ab->base_lock);
3187 return;
3188 }
3189 ath11k_dp_rx_frags_cleanup(rx_tid, true);
3190 spin_unlock_bh(&rx_tid->ab->base_lock);
3191}
3192
3193int ath11k_peer_rx_frag_setup(struct ath11k *ar, const u8 *peer_mac, int vdev_id)
3194{
3195 struct ath11k_base *ab = ar->ab;
3196 struct crypto_shash *tfm;
3197 struct ath11k_peer *peer;
3198 struct dp_rx_tid *rx_tid;
3199 int i;
3200
3201 tfm = crypto_alloc_shash("michael_mic", 0, 0);
3202 if (IS_ERR(tfm)) {
3203 ath11k_warn(ab, "failed to allocate michael_mic shash: %ld\n",
3204 PTR_ERR(tfm));
3205 return PTR_ERR(tfm);
3206 }
3207
3208 spin_lock_bh(&ab->base_lock);
3209
3210 peer = ath11k_peer_find(ab, vdev_id, peer_mac);
3211 if (!peer) {
3212 ath11k_warn(ab, "failed to find the peer to set up fragment info\n");
3213 spin_unlock_bh(&ab->base_lock);
3214 crypto_free_shash(tfm);
3215 return -ENOENT;
3216 }
3217
3218 for (i = 0; i <= IEEE80211_NUM_TIDS; i++) {
3219 rx_tid = &peer->rx_tid[i];
3220 rx_tid->ab = ab;
3221 timer_setup(&rx_tid->frag_timer, ath11k_dp_rx_frag_timer, 0);
3222 skb_queue_head_init(&rx_tid->rx_frags);
3223 }
3224
3225 peer->tfm_mmic = tfm;
3226 peer->dp_setup_done = true;
3227 spin_unlock_bh(&ab->base_lock);
3228
3229 return 0;
3230}
3231
3232static int ath11k_dp_rx_h_michael_mic(struct crypto_shash *tfm, u8 *key,
3233 struct ieee80211_hdr *hdr, u8 *data,
3234 size_t data_len, u8 *mic)
3235{
3236 SHASH_DESC_ON_STACK(desc, tfm);
3237 u8 mic_hdr[16] = {0};
3238 u8 tid = 0;
3239 int ret;
3240
3241 if (!tfm)
3242 return -EINVAL;
3243
3244 desc->tfm = tfm;
3245
3246 ret = crypto_shash_setkey(tfm, key, 8);
3247 if (ret)
3248 goto out;
3249
3250 ret = crypto_shash_init(desc);
3251 if (ret)
3252 goto out;
3253
3254 /* TKIP MIC header */
3255 memcpy(mic_hdr, ieee80211_get_DA(hdr), ETH_ALEN);
3256 memcpy(mic_hdr + ETH_ALEN, ieee80211_get_SA(hdr), ETH_ALEN);
3257 if (ieee80211_is_data_qos(hdr->frame_control))
3258 tid = ieee80211_get_tid(hdr);
3259 mic_hdr[12] = tid;
3260
3261 ret = crypto_shash_update(desc, mic_hdr, 16);
3262 if (ret)
3263 goto out;
3264 ret = crypto_shash_update(desc, data, data_len);
3265 if (ret)
3266 goto out;
3267 ret = crypto_shash_final(desc, mic);
3268out:
3269 shash_desc_zero(desc);
3270 return ret;
3271}
3272
3273static int ath11k_dp_rx_h_verify_tkip_mic(struct ath11k *ar, struct ath11k_peer *peer,
3274 struct sk_buff *msdu)
3275{
3276 struct hal_rx_desc *rx_desc = (struct hal_rx_desc *)msdu->data;
3277 struct ieee80211_rx_status *rxs = IEEE80211_SKB_RXCB(msdu);
3278 struct ieee80211_key_conf *key_conf;
3279 struct ieee80211_hdr *hdr;
3280 u8 mic[IEEE80211_CCMP_MIC_LEN];
3281 int head_len, tail_len, ret;
3282 size_t data_len;
3283 u32 hdr_len, hal_rx_desc_sz = ar->ab->hw_params.hal_desc_sz;
3284 u8 *key, *data;
3285 u8 key_idx;
3286
3287 if (ath11k_dp_rx_h_mpdu_start_enctype(ar->ab, rx_desc) !=
3288 HAL_ENCRYPT_TYPE_TKIP_MIC)
3289 return 0;
3290
3291 hdr = (struct ieee80211_hdr *)(msdu->data + hal_rx_desc_sz);
3292 hdr_len = ieee80211_hdrlen(hdr->frame_control);
3293 head_len = hdr_len + hal_rx_desc_sz + IEEE80211_TKIP_IV_LEN;
3294 tail_len = IEEE80211_CCMP_MIC_LEN + IEEE80211_TKIP_ICV_LEN + FCS_LEN;
3295
3296 if (!is_multicast_ether_addr(hdr->addr1))
3297 key_idx = peer->ucast_keyidx;
3298 else
3299 key_idx = peer->mcast_keyidx;
3300
3301 key_conf = peer->keys[key_idx];
3302
3303 data = msdu->data + head_len;
3304 data_len = msdu->len - head_len - tail_len;
3305 key = &key_conf->key[NL80211_TKIP_DATA_OFFSET_RX_MIC_KEY];
3306
3307 ret = ath11k_dp_rx_h_michael_mic(peer->tfm_mmic, key, hdr, data, data_len, mic);
3308 if (ret || memcmp(mic, data + data_len, IEEE80211_CCMP_MIC_LEN))
3309 goto mic_fail;
3310
3311 return 0;
3312
3313mic_fail:
3314 (ATH11K_SKB_RXCB(msdu))->is_first_msdu = true;
3315 (ATH11K_SKB_RXCB(msdu))->is_last_msdu = true;
3316
3317 rxs->flag |= RX_FLAG_MMIC_ERROR | RX_FLAG_MMIC_STRIPPED |
3318 RX_FLAG_IV_STRIPPED | RX_FLAG_DECRYPTED;
3319 skb_pull(msdu, hal_rx_desc_sz);
3320
3321 ath11k_dp_rx_h_ppdu(ar, rx_desc, rxs);
3322 ath11k_dp_rx_h_undecap(ar, msdu, rx_desc,
3323 HAL_ENCRYPT_TYPE_TKIP_MIC, rxs, true);
3324 ieee80211_rx(ar->hw, msdu);
3325 return -EINVAL;
3326}
3327
3328static void ath11k_dp_rx_h_undecap_frag(struct ath11k *ar, struct sk_buff *msdu,
3329 enum hal_encrypt_type enctype, u32 flags)
3330{
3331 struct ieee80211_hdr *hdr;
3332 size_t hdr_len;
3333 size_t crypto_len;
3334 u32 hal_rx_desc_sz = ar->ab->hw_params.hal_desc_sz;
3335
3336 if (!flags)
3337 return;
3338
3339 hdr = (struct ieee80211_hdr *)(msdu->data + hal_rx_desc_sz);
3340
3341 if (flags & RX_FLAG_MIC_STRIPPED)
3342 skb_trim(msdu, msdu->len -
3343 ath11k_dp_rx_crypto_mic_len(ar, enctype));
3344
3345 if (flags & RX_FLAG_ICV_STRIPPED)
3346 skb_trim(msdu, msdu->len -
3347 ath11k_dp_rx_crypto_icv_len(ar, enctype));
3348
3349 if (flags & RX_FLAG_IV_STRIPPED) {
3350 hdr_len = ieee80211_hdrlen(hdr->frame_control);
3351 crypto_len = ath11k_dp_rx_crypto_param_len(ar, enctype);
3352
3353 memmove((void *)msdu->data + hal_rx_desc_sz + crypto_len,
3354 (void *)msdu->data + hal_rx_desc_sz, hdr_len);
3355 skb_pull(msdu, crypto_len);
3356 }
3357}
3358
3359static int ath11k_dp_rx_h_defrag(struct ath11k *ar,
3360 struct ath11k_peer *peer,
3361 struct dp_rx_tid *rx_tid,
3362 struct sk_buff **defrag_skb)
3363{
3364 struct hal_rx_desc *rx_desc;
3365 struct sk_buff *skb, *first_frag, *last_frag;
3366 struct ieee80211_hdr *hdr;
3367 struct rx_attention *rx_attention;
3368 enum hal_encrypt_type enctype;
3369 bool is_decrypted = false;
3370 int msdu_len = 0;
3371 int extra_space;
3372 u32 flags, hal_rx_desc_sz = ar->ab->hw_params.hal_desc_sz;
3373
3374 first_frag = skb_peek(&rx_tid->rx_frags);
3375 last_frag = skb_peek_tail(&rx_tid->rx_frags);
3376
3377 skb_queue_walk(&rx_tid->rx_frags, skb) {
3378 flags = 0;
3379 rx_desc = (struct hal_rx_desc *)skb->data;
3380 hdr = (struct ieee80211_hdr *)(skb->data + hal_rx_desc_sz);
3381
3382 enctype = ath11k_dp_rx_h_mpdu_start_enctype(ar->ab, rx_desc);
3383 if (enctype != HAL_ENCRYPT_TYPE_OPEN) {
3384 rx_attention = ath11k_dp_rx_get_attention(ar->ab, rx_desc);
3385 is_decrypted = ath11k_dp_rx_h_attn_is_decrypted(rx_attention);
3386 }
3387
3388 if (is_decrypted) {
3389 if (skb != first_frag)
3390 flags |= RX_FLAG_IV_STRIPPED;
3391 if (skb != last_frag)
3392 flags |= RX_FLAG_ICV_STRIPPED |
3393 RX_FLAG_MIC_STRIPPED;
3394 }
3395
3396 /* RX fragments are always raw packets */
3397 if (skb != last_frag)
3398 skb_trim(skb, skb->len - FCS_LEN);
3399 ath11k_dp_rx_h_undecap_frag(ar, skb, enctype, flags);
3400
3401 if (skb != first_frag)
3402 skb_pull(skb, hal_rx_desc_sz +
3403 ieee80211_hdrlen(hdr->frame_control));
3404 msdu_len += skb->len;
3405 }
3406
3407 extra_space = msdu_len - (DP_RX_BUFFER_SIZE + skb_tailroom(first_frag));
3408 if (extra_space > 0 &&
3409 (pskb_expand_head(first_frag, 0, extra_space, GFP_ATOMIC) < 0))
3410 return -ENOMEM;
3411
3412 __skb_unlink(first_frag, &rx_tid->rx_frags);
3413 while ((skb = __skb_dequeue(&rx_tid->rx_frags))) {
3414 skb_put_data(first_frag, skb->data, skb->len);
3415 dev_kfree_skb_any(skb);
3416 }
3417
3418 hdr = (struct ieee80211_hdr *)(first_frag->data + hal_rx_desc_sz);
3419 hdr->frame_control &= ~__cpu_to_le16(IEEE80211_FCTL_MOREFRAGS);
3420 ATH11K_SKB_RXCB(first_frag)->is_frag = 1;
3421
3422 if (ath11k_dp_rx_h_verify_tkip_mic(ar, peer, first_frag))
3423 first_frag = NULL;
3424
3425 *defrag_skb = first_frag;
3426 return 0;
3427}
3428
3429static int ath11k_dp_rx_h_defrag_reo_reinject(struct ath11k *ar, struct dp_rx_tid *rx_tid,
3430 struct sk_buff *defrag_skb)
3431{
3432 struct ath11k_base *ab = ar->ab;
3433 struct ath11k_pdev_dp *dp = &ar->dp;
3434 struct dp_rxdma_ring *rx_refill_ring = &dp->rx_refill_buf_ring;
3435 struct hal_rx_desc *rx_desc = (struct hal_rx_desc *)defrag_skb->data;
3436 struct hal_reo_entrance_ring *reo_ent_ring;
3437 struct hal_reo_dest_ring *reo_dest_ring;
3438 struct dp_link_desc_bank *link_desc_banks;
3439 struct hal_rx_msdu_link *msdu_link;
3440 struct hal_rx_msdu_details *msdu0;
3441 struct hal_srng *srng;
3442 dma_addr_t paddr;
3443 u32 desc_bank, msdu_info, mpdu_info;
3444 u32 dst_idx, cookie, hal_rx_desc_sz;
3445 int ret, buf_id;
3446
3447 hal_rx_desc_sz = ab->hw_params.hal_desc_sz;
3448 link_desc_banks = ab->dp.link_desc_banks;
3449 reo_dest_ring = rx_tid->dst_ring_desc;
3450
3451 ath11k_hal_rx_reo_ent_paddr_get(ab, reo_dest_ring, &paddr, &desc_bank);
3452 msdu_link = (struct hal_rx_msdu_link *)(link_desc_banks[desc_bank].vaddr +
3453 (paddr - link_desc_banks[desc_bank].paddr));
3454 msdu0 = &msdu_link->msdu_link[0];
3455 dst_idx = FIELD_GET(RX_MSDU_DESC_INFO0_REO_DEST_IND, msdu0->rx_msdu_info.info0);
3456 memset(msdu0, 0, sizeof(*msdu0));
3457
3458 msdu_info = FIELD_PREP(RX_MSDU_DESC_INFO0_FIRST_MSDU_IN_MPDU, 1) |
3459 FIELD_PREP(RX_MSDU_DESC_INFO0_LAST_MSDU_IN_MPDU, 1) |
3460 FIELD_PREP(RX_MSDU_DESC_INFO0_MSDU_CONTINUATION, 0) |
3461 FIELD_PREP(RX_MSDU_DESC_INFO0_MSDU_LENGTH,
3462 defrag_skb->len - hal_rx_desc_sz) |
3463 FIELD_PREP(RX_MSDU_DESC_INFO0_REO_DEST_IND, dst_idx) |
3464 FIELD_PREP(RX_MSDU_DESC_INFO0_VALID_SA, 1) |
3465 FIELD_PREP(RX_MSDU_DESC_INFO0_VALID_DA, 1);
3466 msdu0->rx_msdu_info.info0 = msdu_info;
3467
3468 /* change msdu len in hal rx desc */
3469 ath11k_dp_rxdesc_set_msdu_len(ab, rx_desc, defrag_skb->len - hal_rx_desc_sz);
3470
3471 paddr = dma_map_single(ab->dev, defrag_skb->data,
3472 defrag_skb->len + skb_tailroom(defrag_skb),
3473 DMA_TO_DEVICE);
3474 if (dma_mapping_error(ab->dev, paddr))
3475 return -ENOMEM;
3476
3477 spin_lock_bh(&rx_refill_ring->idr_lock);
3478 buf_id = idr_alloc(&rx_refill_ring->bufs_idr, defrag_skb, 0,
3479 rx_refill_ring->bufs_max * 3, GFP_ATOMIC);
3480 spin_unlock_bh(&rx_refill_ring->idr_lock);
3481 if (buf_id < 0) {
3482 ret = -ENOMEM;
3483 goto err_unmap_dma;
3484 }
3485
3486 ATH11K_SKB_RXCB(defrag_skb)->paddr = paddr;
3487 cookie = FIELD_PREP(DP_RXDMA_BUF_COOKIE_PDEV_ID, dp->mac_id) |
3488 FIELD_PREP(DP_RXDMA_BUF_COOKIE_BUF_ID, buf_id);
3489
3490 ath11k_hal_rx_buf_addr_info_set(msdu0, paddr, cookie,
3491 ab->hw_params.hal_params->rx_buf_rbm);
3492
3493 /* Fill mpdu details into reo entrance ring */
3494 srng = &ab->hal.srng_list[ab->dp.reo_reinject_ring.ring_id];
3495
3496 spin_lock_bh(&srng->lock);
3497 ath11k_hal_srng_access_begin(ab, srng);
3498
3499 reo_ent_ring = (struct hal_reo_entrance_ring *)
3500 ath11k_hal_srng_src_get_next_entry(ab, srng);
3501 if (!reo_ent_ring) {
3502 ath11k_hal_srng_access_end(ab, srng);
3503 spin_unlock_bh(&srng->lock);
3504 ret = -ENOSPC;
3505 goto err_free_idr;
3506 }
3507 memset(reo_ent_ring, 0, sizeof(*reo_ent_ring));
3508
3509 ath11k_hal_rx_reo_ent_paddr_get(ab, reo_dest_ring, &paddr, &desc_bank);
3510 ath11k_hal_rx_buf_addr_info_set(reo_ent_ring, paddr, desc_bank,
3511 HAL_RX_BUF_RBM_WBM_IDLE_DESC_LIST);
3512
3513 mpdu_info = FIELD_PREP(RX_MPDU_DESC_INFO0_MSDU_COUNT, 1) |
3514 FIELD_PREP(RX_MPDU_DESC_INFO0_SEQ_NUM, rx_tid->cur_sn) |
3515 FIELD_PREP(RX_MPDU_DESC_INFO0_FRAG_FLAG, 0) |
3516 FIELD_PREP(RX_MPDU_DESC_INFO0_VALID_SA, 1) |
3517 FIELD_PREP(RX_MPDU_DESC_INFO0_VALID_DA, 1) |
3518 FIELD_PREP(RX_MPDU_DESC_INFO0_RAW_MPDU, 1) |
3519 FIELD_PREP(RX_MPDU_DESC_INFO0_VALID_PN, 1);
3520
3521 reo_ent_ring->rx_mpdu_info.info0 = mpdu_info;
3522 reo_ent_ring->rx_mpdu_info.meta_data = reo_dest_ring->rx_mpdu_info.meta_data;
3523 reo_ent_ring->queue_addr_lo = reo_dest_ring->queue_addr_lo;
3524 reo_ent_ring->info0 = FIELD_PREP(HAL_REO_ENTR_RING_INFO0_QUEUE_ADDR_HI,
3525 FIELD_GET(HAL_REO_DEST_RING_INFO0_QUEUE_ADDR_HI,
3526 reo_dest_ring->info0)) |
3527 FIELD_PREP(HAL_REO_ENTR_RING_INFO0_DEST_IND, dst_idx);
3528 ath11k_hal_srng_access_end(ab, srng);
3529 spin_unlock_bh(&srng->lock);
3530
3531 return 0;
3532
3533err_free_idr:
3534 spin_lock_bh(&rx_refill_ring->idr_lock);
3535 idr_remove(&rx_refill_ring->bufs_idr, buf_id);
3536 spin_unlock_bh(&rx_refill_ring->idr_lock);
3537err_unmap_dma:
3538 dma_unmap_single(ab->dev, paddr, defrag_skb->len + skb_tailroom(defrag_skb),
3539 DMA_TO_DEVICE);
3540 return ret;
3541}
3542
3543static int ath11k_dp_rx_h_cmp_frags(struct ath11k *ar,
3544 struct sk_buff *a, struct sk_buff *b)
3545{
3546 int frag1, frag2;
3547
3548 frag1 = ath11k_dp_rx_h_mpdu_start_frag_no(ar->ab, a);
3549 frag2 = ath11k_dp_rx_h_mpdu_start_frag_no(ar->ab, b);
3550
3551 return frag1 - frag2;
3552}
3553
3554static void ath11k_dp_rx_h_sort_frags(struct ath11k *ar,
3555 struct sk_buff_head *frag_list,
3556 struct sk_buff *cur_frag)
3557{
3558 struct sk_buff *skb;
3559 int cmp;
3560
3561 skb_queue_walk(frag_list, skb) {
3562 cmp = ath11k_dp_rx_h_cmp_frags(ar, skb, cur_frag);
3563 if (cmp < 0)
3564 continue;
3565 __skb_queue_before(frag_list, skb, cur_frag);
3566 return;
3567 }
3568 __skb_queue_tail(frag_list, cur_frag);
3569}
3570
3571static u64 ath11k_dp_rx_h_get_pn(struct ath11k *ar, struct sk_buff *skb)
3572{
3573 struct ieee80211_hdr *hdr;
3574 u64 pn = 0;
3575 u8 *ehdr;
3576 u32 hal_rx_desc_sz = ar->ab->hw_params.hal_desc_sz;
3577
3578 hdr = (struct ieee80211_hdr *)(skb->data + hal_rx_desc_sz);
3579 ehdr = skb->data + hal_rx_desc_sz + ieee80211_hdrlen(hdr->frame_control);
3580
3581 pn = ehdr[0];
3582 pn |= (u64)ehdr[1] << 8;
3583 pn |= (u64)ehdr[4] << 16;
3584 pn |= (u64)ehdr[5] << 24;
3585 pn |= (u64)ehdr[6] << 32;
3586 pn |= (u64)ehdr[7] << 40;
3587
3588 return pn;
3589}
3590
3591static bool
3592ath11k_dp_rx_h_defrag_validate_incr_pn(struct ath11k *ar, struct dp_rx_tid *rx_tid)
3593{
3594 enum hal_encrypt_type encrypt_type;
3595 struct sk_buff *first_frag, *skb;
3596 struct hal_rx_desc *desc;
3597 u64 last_pn;
3598 u64 cur_pn;
3599
3600 first_frag = skb_peek(&rx_tid->rx_frags);
3601 desc = (struct hal_rx_desc *)first_frag->data;
3602
3603 encrypt_type = ath11k_dp_rx_h_mpdu_start_enctype(ar->ab, desc);
3604 if (encrypt_type != HAL_ENCRYPT_TYPE_CCMP_128 &&
3605 encrypt_type != HAL_ENCRYPT_TYPE_CCMP_256 &&
3606 encrypt_type != HAL_ENCRYPT_TYPE_GCMP_128 &&
3607 encrypt_type != HAL_ENCRYPT_TYPE_AES_GCMP_256)
3608 return true;
3609
3610 last_pn = ath11k_dp_rx_h_get_pn(ar, first_frag);
3611 skb_queue_walk(&rx_tid->rx_frags, skb) {
3612 if (skb == first_frag)
3613 continue;
3614
3615 cur_pn = ath11k_dp_rx_h_get_pn(ar, skb);
3616 if (cur_pn != last_pn + 1)
3617 return false;
3618 last_pn = cur_pn;
3619 }
3620 return true;
3621}
3622
3623static int ath11k_dp_rx_frag_h_mpdu(struct ath11k *ar,
3624 struct sk_buff *msdu,
3625 u32 *ring_desc)
3626{
3627 struct ath11k_base *ab = ar->ab;
3628 struct hal_rx_desc *rx_desc;
3629 struct ath11k_peer *peer;
3630 struct dp_rx_tid *rx_tid;
3631 struct sk_buff *defrag_skb = NULL;
3632 u32 peer_id;
3633 u16 seqno, frag_no;
3634 u8 tid;
3635 int ret = 0;
3636 bool more_frags;
3637 bool is_mcbc;
3638
3639 rx_desc = (struct hal_rx_desc *)msdu->data;
3640 peer_id = ath11k_dp_rx_h_mpdu_start_peer_id(ar->ab, rx_desc);
3641 tid = ath11k_dp_rx_h_mpdu_start_tid(ar->ab, rx_desc);
3642 seqno = ath11k_dp_rx_h_mpdu_start_seq_no(ar->ab, rx_desc);
3643 frag_no = ath11k_dp_rx_h_mpdu_start_frag_no(ar->ab, msdu);
3644 more_frags = ath11k_dp_rx_h_mpdu_start_more_frags(ar->ab, msdu);
3645 is_mcbc = ath11k_dp_rx_h_attn_is_mcbc(ar->ab, rx_desc);
3646
3647 /* Multicast/Broadcast fragments are not expected */
3648 if (is_mcbc)
3649 return -EINVAL;
3650
3651 if (!ath11k_dp_rx_h_mpdu_start_seq_ctrl_valid(ar->ab, rx_desc) ||
3652 !ath11k_dp_rx_h_mpdu_start_fc_valid(ar->ab, rx_desc) ||
3653 tid > IEEE80211_NUM_TIDS)
3654 return -EINVAL;
3655
3656 /* received unfragmented packet in reo
3657 * exception ring, this shouldn't happen
3658 * as these packets typically come from
3659 * reo2sw srngs.
3660 */
3661 if (WARN_ON_ONCE(!frag_no && !more_frags))
3662 return -EINVAL;
3663
3664 spin_lock_bh(&ab->base_lock);
3665 peer = ath11k_peer_find_by_id(ab, peer_id);
3666 if (!peer) {
3667 ath11k_warn(ab, "failed to find the peer to de-fragment received fragment peer_id %d\n",
3668 peer_id);
3669 ret = -ENOENT;
3670 goto out_unlock;
3671 }
3672 if (!peer->dp_setup_done) {
3673 ath11k_warn(ab, "The peer %pM [%d] has uninitialized datapath\n",
3674 peer->addr, peer_id);
3675 ret = -ENOENT;
3676 goto out_unlock;
3677 }
3678
3679 rx_tid = &peer->rx_tid[tid];
3680
3681 if ((!skb_queue_empty(&rx_tid->rx_frags) && seqno != rx_tid->cur_sn) ||
3682 skb_queue_empty(&rx_tid->rx_frags)) {
3683 /* Flush stored fragments and start a new sequence */
3684 ath11k_dp_rx_frags_cleanup(rx_tid, true);
3685 rx_tid->cur_sn = seqno;
3686 }
3687
3688 if (rx_tid->rx_frag_bitmap & BIT(frag_no)) {
3689 /* Fragment already present */
3690 ret = -EINVAL;
3691 goto out_unlock;
3692 }
3693
3694 if (!rx_tid->rx_frag_bitmap || (frag_no > __fls(rx_tid->rx_frag_bitmap)))
3695 __skb_queue_tail(&rx_tid->rx_frags, msdu);
3696 else
3697 ath11k_dp_rx_h_sort_frags(ar, &rx_tid->rx_frags, msdu);
3698
3699 rx_tid->rx_frag_bitmap |= BIT(frag_no);
3700 if (!more_frags)
3701 rx_tid->last_frag_no = frag_no;
3702
3703 if (frag_no == 0) {
3704 rx_tid->dst_ring_desc = kmemdup(ring_desc,
3705 sizeof(*rx_tid->dst_ring_desc),
3706 GFP_ATOMIC);
3707 if (!rx_tid->dst_ring_desc) {
3708 ret = -ENOMEM;
3709 goto out_unlock;
3710 }
3711 } else {
3712 ath11k_dp_rx_link_desc_return(ab, ring_desc,
3713 HAL_WBM_REL_BM_ACT_PUT_IN_IDLE);
3714 }
3715
3716 if (!rx_tid->last_frag_no ||
3717 rx_tid->rx_frag_bitmap != GENMASK(rx_tid->last_frag_no, 0)) {
3718 mod_timer(&rx_tid->frag_timer, jiffies +
3719 ATH11K_DP_RX_FRAGMENT_TIMEOUT_MS);
3720 goto out_unlock;
3721 }
3722
3723 spin_unlock_bh(&ab->base_lock);
3724 del_timer_sync(&rx_tid->frag_timer);
3725 spin_lock_bh(&ab->base_lock);
3726
3727 peer = ath11k_peer_find_by_id(ab, peer_id);
3728 if (!peer)
3729 goto err_frags_cleanup;
3730
3731 if (!ath11k_dp_rx_h_defrag_validate_incr_pn(ar, rx_tid))
3732 goto err_frags_cleanup;
3733
3734 if (ath11k_dp_rx_h_defrag(ar, peer, rx_tid, &defrag_skb))
3735 goto err_frags_cleanup;
3736
3737 if (!defrag_skb)
3738 goto err_frags_cleanup;
3739
3740 if (ath11k_dp_rx_h_defrag_reo_reinject(ar, rx_tid, defrag_skb))
3741 goto err_frags_cleanup;
3742
3743 ath11k_dp_rx_frags_cleanup(rx_tid, false);
3744 goto out_unlock;
3745
3746err_frags_cleanup:
3747 dev_kfree_skb_any(defrag_skb);
3748 ath11k_dp_rx_frags_cleanup(rx_tid, true);
3749out_unlock:
3750 spin_unlock_bh(&ab->base_lock);
3751 return ret;
3752}
3753
3754static int
3755ath11k_dp_process_rx_err_buf(struct ath11k *ar, u32 *ring_desc, int buf_id, bool drop)
3756{
3757 struct ath11k_pdev_dp *dp = &ar->dp;
3758 struct dp_rxdma_ring *rx_ring = &dp->rx_refill_buf_ring;
3759 struct sk_buff *msdu;
3760 struct ath11k_skb_rxcb *rxcb;
3761 struct hal_rx_desc *rx_desc;
3762 u8 *hdr_status;
3763 u16 msdu_len;
3764 u32 hal_rx_desc_sz = ar->ab->hw_params.hal_desc_sz;
3765
3766 spin_lock_bh(&rx_ring->idr_lock);
3767 msdu = idr_find(&rx_ring->bufs_idr, buf_id);
3768 if (!msdu) {
3769 ath11k_warn(ar->ab, "rx err buf with invalid buf_id %d\n",
3770 buf_id);
3771 spin_unlock_bh(&rx_ring->idr_lock);
3772 return -EINVAL;
3773 }
3774
3775 idr_remove(&rx_ring->bufs_idr, buf_id);
3776 spin_unlock_bh(&rx_ring->idr_lock);
3777
3778 rxcb = ATH11K_SKB_RXCB(msdu);
3779 dma_unmap_single(ar->ab->dev, rxcb->paddr,
3780 msdu->len + skb_tailroom(msdu),
3781 DMA_FROM_DEVICE);
3782
3783 if (drop) {
3784 dev_kfree_skb_any(msdu);
3785 return 0;
3786 }
3787
3788 rcu_read_lock();
3789 if (!rcu_dereference(ar->ab->pdevs_active[ar->pdev_idx])) {
3790 dev_kfree_skb_any(msdu);
3791 goto exit;
3792 }
3793
3794 if (test_bit(ATH11K_CAC_RUNNING, &ar->dev_flags)) {
3795 dev_kfree_skb_any(msdu);
3796 goto exit;
3797 }
3798
3799 rx_desc = (struct hal_rx_desc *)msdu->data;
3800 msdu_len = ath11k_dp_rx_h_msdu_start_msdu_len(ar->ab, rx_desc);
3801 if ((msdu_len + hal_rx_desc_sz) > DP_RX_BUFFER_SIZE) {
3802 hdr_status = ath11k_dp_rx_h_80211_hdr(ar->ab, rx_desc);
3803 ath11k_warn(ar->ab, "invalid msdu leng %u", msdu_len);
3804 ath11k_dbg_dump(ar->ab, ATH11K_DBG_DATA, NULL, "", hdr_status,
3805 sizeof(struct ieee80211_hdr));
3806 ath11k_dbg_dump(ar->ab, ATH11K_DBG_DATA, NULL, "", rx_desc,
3807 sizeof(struct hal_rx_desc));
3808 dev_kfree_skb_any(msdu);
3809 goto exit;
3810 }
3811
3812 skb_put(msdu, hal_rx_desc_sz + msdu_len);
3813
3814 if (ath11k_dp_rx_frag_h_mpdu(ar, msdu, ring_desc)) {
3815 dev_kfree_skb_any(msdu);
3816 ath11k_dp_rx_link_desc_return(ar->ab, ring_desc,
3817 HAL_WBM_REL_BM_ACT_PUT_IN_IDLE);
3818 }
3819exit:
3820 rcu_read_unlock();
3821 return 0;
3822}
3823
3824int ath11k_dp_process_rx_err(struct ath11k_base *ab, struct napi_struct *napi,
3825 int budget)
3826{
3827 u32 msdu_cookies[HAL_NUM_RX_MSDUS_PER_LINK_DESC];
3828 struct dp_link_desc_bank *link_desc_banks;
3829 enum hal_rx_buf_return_buf_manager rbm;
3830 int tot_n_bufs_reaped, quota, ret, i;
3831 int n_bufs_reaped[MAX_RADIOS] = {0};
3832 struct dp_rxdma_ring *rx_ring;
3833 struct dp_srng *reo_except;
3834 u32 desc_bank, num_msdus;
3835 struct hal_srng *srng;
3836 struct ath11k_dp *dp;
3837 void *link_desc_va;
3838 int buf_id, mac_id;
3839 struct ath11k *ar;
3840 dma_addr_t paddr;
3841 u32 *desc;
3842 bool is_frag;
3843 u8 drop = 0;
3844
3845 tot_n_bufs_reaped = 0;
3846 quota = budget;
3847
3848 dp = &ab->dp;
3849 reo_except = &dp->reo_except_ring;
3850 link_desc_banks = dp->link_desc_banks;
3851
3852 srng = &ab->hal.srng_list[reo_except->ring_id];
3853
3854 spin_lock_bh(&srng->lock);
3855
3856 ath11k_hal_srng_access_begin(ab, srng);
3857
3858 while (budget &&
3859 (desc = ath11k_hal_srng_dst_get_next_entry(ab, srng))) {
3860 struct hal_reo_dest_ring *reo_desc = (struct hal_reo_dest_ring *)desc;
3861
3862 ab->soc_stats.err_ring_pkts++;
3863 ret = ath11k_hal_desc_reo_parse_err(ab, desc, &paddr,
3864 &desc_bank);
3865 if (ret) {
3866 ath11k_warn(ab, "failed to parse error reo desc %d\n",
3867 ret);
3868 continue;
3869 }
3870 link_desc_va = link_desc_banks[desc_bank].vaddr +
3871 (paddr - link_desc_banks[desc_bank].paddr);
3872 ath11k_hal_rx_msdu_link_info_get(link_desc_va, &num_msdus, msdu_cookies,
3873 &rbm);
3874 if (rbm != HAL_RX_BUF_RBM_WBM_IDLE_DESC_LIST &&
3875 rbm != HAL_RX_BUF_RBM_SW1_BM &&
3876 rbm != HAL_RX_BUF_RBM_SW3_BM) {
3877 ab->soc_stats.invalid_rbm++;
3878 ath11k_warn(ab, "invalid return buffer manager %d\n", rbm);
3879 ath11k_dp_rx_link_desc_return(ab, desc,
3880 HAL_WBM_REL_BM_ACT_REL_MSDU);
3881 continue;
3882 }
3883
3884 is_frag = !!(reo_desc->rx_mpdu_info.info0 & RX_MPDU_DESC_INFO0_FRAG_FLAG);
3885
3886 /* Process only rx fragments with one msdu per link desc below, and drop
3887 * msdu's indicated due to error reasons.
3888 */
3889 if (!is_frag || num_msdus > 1) {
3890 drop = 1;
3891 /* Return the link desc back to wbm idle list */
3892 ath11k_dp_rx_link_desc_return(ab, desc,
3893 HAL_WBM_REL_BM_ACT_PUT_IN_IDLE);
3894 }
3895
3896 for (i = 0; i < num_msdus; i++) {
3897 buf_id = FIELD_GET(DP_RXDMA_BUF_COOKIE_BUF_ID,
3898 msdu_cookies[i]);
3899
3900 mac_id = FIELD_GET(DP_RXDMA_BUF_COOKIE_PDEV_ID,
3901 msdu_cookies[i]);
3902
3903 ar = ab->pdevs[mac_id].ar;
3904
3905 if (!ath11k_dp_process_rx_err_buf(ar, desc, buf_id, drop)) {
3906 n_bufs_reaped[mac_id]++;
3907 tot_n_bufs_reaped++;
3908 }
3909 }
3910
3911 if (tot_n_bufs_reaped >= quota) {
3912 tot_n_bufs_reaped = quota;
3913 goto exit;
3914 }
3915
3916 budget = quota - tot_n_bufs_reaped;
3917 }
3918
3919exit:
3920 ath11k_hal_srng_access_end(ab, srng);
3921
3922 spin_unlock_bh(&srng->lock);
3923
3924 for (i = 0; i < ab->num_radios; i++) {
3925 if (!n_bufs_reaped[i])
3926 continue;
3927
3928 ar = ab->pdevs[i].ar;
3929 rx_ring = &ar->dp.rx_refill_buf_ring;
3930
3931 ath11k_dp_rxbufs_replenish(ab, i, rx_ring, n_bufs_reaped[i],
3932 ab->hw_params.hal_params->rx_buf_rbm);
3933 }
3934
3935 return tot_n_bufs_reaped;
3936}
3937
3938static void ath11k_dp_rx_null_q_desc_sg_drop(struct ath11k *ar,
3939 int msdu_len,
3940 struct sk_buff_head *msdu_list)
3941{
3942 struct sk_buff *skb, *tmp;
3943 struct ath11k_skb_rxcb *rxcb;
3944 int n_buffs;
3945
3946 n_buffs = DIV_ROUND_UP(msdu_len,
3947 (DP_RX_BUFFER_SIZE - ar->ab->hw_params.hal_desc_sz));
3948
3949 skb_queue_walk_safe(msdu_list, skb, tmp) {
3950 rxcb = ATH11K_SKB_RXCB(skb);
3951 if (rxcb->err_rel_src == HAL_WBM_REL_SRC_MODULE_REO &&
3952 rxcb->err_code == HAL_REO_DEST_RING_ERROR_CODE_DESC_ADDR_ZERO) {
3953 if (!n_buffs)
3954 break;
3955 __skb_unlink(skb, msdu_list);
3956 dev_kfree_skb_any(skb);
3957 n_buffs--;
3958 }
3959 }
3960}
3961
3962static int ath11k_dp_rx_h_null_q_desc(struct ath11k *ar, struct sk_buff *msdu,
3963 struct ieee80211_rx_status *status,
3964 struct sk_buff_head *msdu_list)
3965{
3966 u16 msdu_len;
3967 struct hal_rx_desc *desc = (struct hal_rx_desc *)msdu->data;
3968 struct rx_attention *rx_attention;
3969 u8 l3pad_bytes;
3970 struct ath11k_skb_rxcb *rxcb = ATH11K_SKB_RXCB(msdu);
3971 u32 hal_rx_desc_sz = ar->ab->hw_params.hal_desc_sz;
3972
3973 msdu_len = ath11k_dp_rx_h_msdu_start_msdu_len(ar->ab, desc);
3974
3975 if (!rxcb->is_frag && ((msdu_len + hal_rx_desc_sz) > DP_RX_BUFFER_SIZE)) {
3976 /* First buffer will be freed by the caller, so deduct it's length */
3977 msdu_len = msdu_len - (DP_RX_BUFFER_SIZE - hal_rx_desc_sz);
3978 ath11k_dp_rx_null_q_desc_sg_drop(ar, msdu_len, msdu_list);
3979 return -EINVAL;
3980 }
3981
3982 rx_attention = ath11k_dp_rx_get_attention(ar->ab, desc);
3983 if (!ath11k_dp_rx_h_attn_msdu_done(rx_attention)) {
3984 ath11k_warn(ar->ab,
3985 "msdu_done bit not set in null_q_des processing\n");
3986 __skb_queue_purge(msdu_list);
3987 return -EIO;
3988 }
3989
3990 /* Handle NULL queue descriptor violations arising out a missing
3991 * REO queue for a given peer or a given TID. This typically
3992 * may happen if a packet is received on a QOS enabled TID before the
3993 * ADDBA negotiation for that TID, when the TID queue is setup. Or
3994 * it may also happen for MC/BC frames if they are not routed to the
3995 * non-QOS TID queue, in the absence of any other default TID queue.
3996 * This error can show up both in a REO destination or WBM release ring.
3997 */
3998
3999 rxcb->is_first_msdu = ath11k_dp_rx_h_msdu_end_first_msdu(ar->ab, desc);
4000 rxcb->is_last_msdu = ath11k_dp_rx_h_msdu_end_last_msdu(ar->ab, desc);
4001
4002 if (rxcb->is_frag) {
4003 skb_pull(msdu, hal_rx_desc_sz);
4004 } else {
4005 l3pad_bytes = ath11k_dp_rx_h_msdu_end_l3pad(ar->ab, desc);
4006
4007 if ((hal_rx_desc_sz + l3pad_bytes + msdu_len) > DP_RX_BUFFER_SIZE)
4008 return -EINVAL;
4009
4010 skb_put(msdu, hal_rx_desc_sz + l3pad_bytes + msdu_len);
4011 skb_pull(msdu, hal_rx_desc_sz + l3pad_bytes);
4012 }
4013 ath11k_dp_rx_h_ppdu(ar, desc, status);
4014
4015 ath11k_dp_rx_h_mpdu(ar, msdu, desc, status);
4016
4017 rxcb->tid = ath11k_dp_rx_h_mpdu_start_tid(ar->ab, desc);
4018
4019 /* Please note that caller will having the access to msdu and completing
4020 * rx with mac80211. Need not worry about cleaning up amsdu_list.
4021 */
4022
4023 return 0;
4024}
4025
4026static bool ath11k_dp_rx_h_reo_err(struct ath11k *ar, struct sk_buff *msdu,
4027 struct ieee80211_rx_status *status,
4028 struct sk_buff_head *msdu_list)
4029{
4030 struct ath11k_skb_rxcb *rxcb = ATH11K_SKB_RXCB(msdu);
4031 bool drop = false;
4032
4033 ar->ab->soc_stats.reo_error[rxcb->err_code]++;
4034
4035 switch (rxcb->err_code) {
4036 case HAL_REO_DEST_RING_ERROR_CODE_DESC_ADDR_ZERO:
4037 if (ath11k_dp_rx_h_null_q_desc(ar, msdu, status, msdu_list))
4038 drop = true;
4039 break;
4040 case HAL_REO_DEST_RING_ERROR_CODE_PN_CHECK_FAILED:
4041 /* TODO: Do not drop PN failed packets in the driver;
4042 * instead, it is good to drop such packets in mac80211
4043 * after incrementing the replay counters.
4044 */
4045 fallthrough;
4046 default:
4047 /* TODO: Review other errors and process them to mac80211
4048 * as appropriate.
4049 */
4050 drop = true;
4051 break;
4052 }
4053
4054 return drop;
4055}
4056
4057static void ath11k_dp_rx_h_tkip_mic_err(struct ath11k *ar, struct sk_buff *msdu,
4058 struct ieee80211_rx_status *status)
4059{
4060 u16 msdu_len;
4061 struct hal_rx_desc *desc = (struct hal_rx_desc *)msdu->data;
4062 u8 l3pad_bytes;
4063 struct ath11k_skb_rxcb *rxcb = ATH11K_SKB_RXCB(msdu);
4064 u32 hal_rx_desc_sz = ar->ab->hw_params.hal_desc_sz;
4065
4066 rxcb->is_first_msdu = ath11k_dp_rx_h_msdu_end_first_msdu(ar->ab, desc);
4067 rxcb->is_last_msdu = ath11k_dp_rx_h_msdu_end_last_msdu(ar->ab, desc);
4068
4069 l3pad_bytes = ath11k_dp_rx_h_msdu_end_l3pad(ar->ab, desc);
4070 msdu_len = ath11k_dp_rx_h_msdu_start_msdu_len(ar->ab, desc);
4071 skb_put(msdu, hal_rx_desc_sz + l3pad_bytes + msdu_len);
4072 skb_pull(msdu, hal_rx_desc_sz + l3pad_bytes);
4073
4074 ath11k_dp_rx_h_ppdu(ar, desc, status);
4075
4076 status->flag |= (RX_FLAG_MMIC_STRIPPED | RX_FLAG_MMIC_ERROR |
4077 RX_FLAG_DECRYPTED);
4078
4079 ath11k_dp_rx_h_undecap(ar, msdu, desc,
4080 HAL_ENCRYPT_TYPE_TKIP_MIC, status, false);
4081}
4082
4083static bool ath11k_dp_rx_h_rxdma_err(struct ath11k *ar, struct sk_buff *msdu,
4084 struct ieee80211_rx_status *status)
4085{
4086 struct ath11k_skb_rxcb *rxcb = ATH11K_SKB_RXCB(msdu);
4087 bool drop = false;
4088
4089 ar->ab->soc_stats.rxdma_error[rxcb->err_code]++;
4090
4091 switch (rxcb->err_code) {
4092 case HAL_REO_ENTR_RING_RXDMA_ECODE_TKIP_MIC_ERR:
4093 ath11k_dp_rx_h_tkip_mic_err(ar, msdu, status);
4094 break;
4095 default:
4096 /* TODO: Review other rxdma error code to check if anything is
4097 * worth reporting to mac80211
4098 */
4099 drop = true;
4100 break;
4101 }
4102
4103 return drop;
4104}
4105
4106static void ath11k_dp_rx_wbm_err(struct ath11k *ar,
4107 struct napi_struct *napi,
4108 struct sk_buff *msdu,
4109 struct sk_buff_head *msdu_list)
4110{
4111 struct ath11k_skb_rxcb *rxcb = ATH11K_SKB_RXCB(msdu);
4112 struct ieee80211_rx_status rxs = {0};
4113 bool drop = true;
4114
4115 switch (rxcb->err_rel_src) {
4116 case HAL_WBM_REL_SRC_MODULE_REO:
4117 drop = ath11k_dp_rx_h_reo_err(ar, msdu, &rxs, msdu_list);
4118 break;
4119 case HAL_WBM_REL_SRC_MODULE_RXDMA:
4120 drop = ath11k_dp_rx_h_rxdma_err(ar, msdu, &rxs);
4121 break;
4122 default:
4123 /* msdu will get freed */
4124 break;
4125 }
4126
4127 if (drop) {
4128 dev_kfree_skb_any(msdu);
4129 return;
4130 }
4131
4132 ath11k_dp_rx_deliver_msdu(ar, napi, msdu, &rxs);
4133}
4134
4135int ath11k_dp_rx_process_wbm_err(struct ath11k_base *ab,
4136 struct napi_struct *napi, int budget)
4137{
4138 struct ath11k *ar;
4139 struct ath11k_dp *dp = &ab->dp;
4140 struct dp_rxdma_ring *rx_ring;
4141 struct hal_rx_wbm_rel_info err_info;
4142 struct hal_srng *srng;
4143 struct sk_buff *msdu;
4144 struct sk_buff_head msdu_list[MAX_RADIOS];
4145 struct ath11k_skb_rxcb *rxcb;
4146 u32 *rx_desc;
4147 int buf_id, mac_id;
4148 int num_buffs_reaped[MAX_RADIOS] = {0};
4149 int total_num_buffs_reaped = 0;
4150 int ret, i;
4151
4152 for (i = 0; i < ab->num_radios; i++)
4153 __skb_queue_head_init(&msdu_list[i]);
4154
4155 srng = &ab->hal.srng_list[dp->rx_rel_ring.ring_id];
4156
4157 spin_lock_bh(&srng->lock);
4158
4159 ath11k_hal_srng_access_begin(ab, srng);
4160
4161 while (budget) {
4162 rx_desc = ath11k_hal_srng_dst_get_next_entry(ab, srng);
4163 if (!rx_desc)
4164 break;
4165
4166 ret = ath11k_hal_wbm_desc_parse_err(ab, rx_desc, &err_info);
4167 if (ret) {
4168 ath11k_warn(ab,
4169 "failed to parse rx error in wbm_rel ring desc %d\n",
4170 ret);
4171 continue;
4172 }
4173
4174 buf_id = FIELD_GET(DP_RXDMA_BUF_COOKIE_BUF_ID, err_info.cookie);
4175 mac_id = FIELD_GET(DP_RXDMA_BUF_COOKIE_PDEV_ID, err_info.cookie);
4176
4177 ar = ab->pdevs[mac_id].ar;
4178 rx_ring = &ar->dp.rx_refill_buf_ring;
4179
4180 spin_lock_bh(&rx_ring->idr_lock);
4181 msdu = idr_find(&rx_ring->bufs_idr, buf_id);
4182 if (!msdu) {
4183 ath11k_warn(ab, "frame rx with invalid buf_id %d pdev %d\n",
4184 buf_id, mac_id);
4185 spin_unlock_bh(&rx_ring->idr_lock);
4186 continue;
4187 }
4188
4189 idr_remove(&rx_ring->bufs_idr, buf_id);
4190 spin_unlock_bh(&rx_ring->idr_lock);
4191
4192 rxcb = ATH11K_SKB_RXCB(msdu);
4193 dma_unmap_single(ab->dev, rxcb->paddr,
4194 msdu->len + skb_tailroom(msdu),
4195 DMA_FROM_DEVICE);
4196
4197 num_buffs_reaped[mac_id]++;
4198 total_num_buffs_reaped++;
4199 budget--;
4200
4201 if (err_info.push_reason !=
4202 HAL_REO_DEST_RING_PUSH_REASON_ERR_DETECTED) {
4203 dev_kfree_skb_any(msdu);
4204 continue;
4205 }
4206
4207 rxcb->err_rel_src = err_info.err_rel_src;
4208 rxcb->err_code = err_info.err_code;
4209 rxcb->rx_desc = (struct hal_rx_desc *)msdu->data;
4210 __skb_queue_tail(&msdu_list[mac_id], msdu);
4211 }
4212
4213 ath11k_hal_srng_access_end(ab, srng);
4214
4215 spin_unlock_bh(&srng->lock);
4216
4217 if (!total_num_buffs_reaped)
4218 goto done;
4219
4220 for (i = 0; i < ab->num_radios; i++) {
4221 if (!num_buffs_reaped[i])
4222 continue;
4223
4224 ar = ab->pdevs[i].ar;
4225 rx_ring = &ar->dp.rx_refill_buf_ring;
4226
4227 ath11k_dp_rxbufs_replenish(ab, i, rx_ring, num_buffs_reaped[i],
4228 ab->hw_params.hal_params->rx_buf_rbm);
4229 }
4230
4231 rcu_read_lock();
4232 for (i = 0; i < ab->num_radios; i++) {
4233 if (!rcu_dereference(ab->pdevs_active[i])) {
4234 __skb_queue_purge(&msdu_list[i]);
4235 continue;
4236 }
4237
4238 ar = ab->pdevs[i].ar;
4239
4240 if (test_bit(ATH11K_CAC_RUNNING, &ar->dev_flags)) {
4241 __skb_queue_purge(&msdu_list[i]);
4242 continue;
4243 }
4244
4245 while ((msdu = __skb_dequeue(&msdu_list[i])) != NULL)
4246 ath11k_dp_rx_wbm_err(ar, napi, msdu, &msdu_list[i]);
4247 }
4248 rcu_read_unlock();
4249done:
4250 return total_num_buffs_reaped;
4251}
4252
4253int ath11k_dp_process_rxdma_err(struct ath11k_base *ab, int mac_id, int budget)
4254{
4255 struct ath11k *ar;
4256 struct dp_srng *err_ring;
4257 struct dp_rxdma_ring *rx_ring;
4258 struct dp_link_desc_bank *link_desc_banks = ab->dp.link_desc_banks;
4259 struct hal_srng *srng;
4260 u32 msdu_cookies[HAL_NUM_RX_MSDUS_PER_LINK_DESC];
4261 enum hal_rx_buf_return_buf_manager rbm;
4262 enum hal_reo_entr_rxdma_ecode rxdma_err_code;
4263 struct ath11k_skb_rxcb *rxcb;
4264 struct sk_buff *skb;
4265 struct hal_reo_entrance_ring *entr_ring;
4266 void *desc;
4267 int num_buf_freed = 0;
4268 int quota = budget;
4269 dma_addr_t paddr;
4270 u32 desc_bank;
4271 void *link_desc_va;
4272 int num_msdus;
4273 int i;
4274 int buf_id;
4275
4276 ar = ab->pdevs[ath11k_hw_mac_id_to_pdev_id(&ab->hw_params, mac_id)].ar;
4277 err_ring = &ar->dp.rxdma_err_dst_ring[ath11k_hw_mac_id_to_srng_id(&ab->hw_params,
4278 mac_id)];
4279 rx_ring = &ar->dp.rx_refill_buf_ring;
4280
4281 srng = &ab->hal.srng_list[err_ring->ring_id];
4282
4283 spin_lock_bh(&srng->lock);
4284
4285 ath11k_hal_srng_access_begin(ab, srng);
4286
4287 while (quota-- &&
4288 (desc = ath11k_hal_srng_dst_get_next_entry(ab, srng))) {
4289 ath11k_hal_rx_reo_ent_paddr_get(ab, desc, &paddr, &desc_bank);
4290
4291 entr_ring = (struct hal_reo_entrance_ring *)desc;
4292 rxdma_err_code =
4293 FIELD_GET(HAL_REO_ENTR_RING_INFO1_RXDMA_ERROR_CODE,
4294 entr_ring->info1);
4295 ab->soc_stats.rxdma_error[rxdma_err_code]++;
4296
4297 link_desc_va = link_desc_banks[desc_bank].vaddr +
4298 (paddr - link_desc_banks[desc_bank].paddr);
4299 ath11k_hal_rx_msdu_link_info_get(link_desc_va, &num_msdus,
4300 msdu_cookies, &rbm);
4301
4302 for (i = 0; i < num_msdus; i++) {
4303 buf_id = FIELD_GET(DP_RXDMA_BUF_COOKIE_BUF_ID,
4304 msdu_cookies[i]);
4305
4306 spin_lock_bh(&rx_ring->idr_lock);
4307 skb = idr_find(&rx_ring->bufs_idr, buf_id);
4308 if (!skb) {
4309 ath11k_warn(ab, "rxdma error with invalid buf_id %d\n",
4310 buf_id);
4311 spin_unlock_bh(&rx_ring->idr_lock);
4312 continue;
4313 }
4314
4315 idr_remove(&rx_ring->bufs_idr, buf_id);
4316 spin_unlock_bh(&rx_ring->idr_lock);
4317
4318 rxcb = ATH11K_SKB_RXCB(skb);
4319 dma_unmap_single(ab->dev, rxcb->paddr,
4320 skb->len + skb_tailroom(skb),
4321 DMA_FROM_DEVICE);
4322 dev_kfree_skb_any(skb);
4323
4324 num_buf_freed++;
4325 }
4326
4327 ath11k_dp_rx_link_desc_return(ab, desc,
4328 HAL_WBM_REL_BM_ACT_PUT_IN_IDLE);
4329 }
4330
4331 ath11k_hal_srng_access_end(ab, srng);
4332
4333 spin_unlock_bh(&srng->lock);
4334
4335 if (num_buf_freed)
4336 ath11k_dp_rxbufs_replenish(ab, mac_id, rx_ring, num_buf_freed,
4337 ab->hw_params.hal_params->rx_buf_rbm);
4338
4339 return budget - quota;
4340}
4341
4342void ath11k_dp_process_reo_status(struct ath11k_base *ab)
4343{
4344 struct ath11k_dp *dp = &ab->dp;
4345 struct hal_srng *srng;
4346 struct dp_reo_cmd *cmd, *tmp;
4347 bool found = false;
4348 u32 *reo_desc;
4349 u16 tag;
4350 struct hal_reo_status reo_status;
4351
4352 srng = &ab->hal.srng_list[dp->reo_status_ring.ring_id];
4353
4354 memset(&reo_status, 0, sizeof(reo_status));
4355
4356 spin_lock_bh(&srng->lock);
4357
4358 ath11k_hal_srng_access_begin(ab, srng);
4359
4360 while ((reo_desc = ath11k_hal_srng_dst_get_next_entry(ab, srng))) {
4361 tag = FIELD_GET(HAL_SRNG_TLV_HDR_TAG, *reo_desc);
4362
4363 switch (tag) {
4364 case HAL_REO_GET_QUEUE_STATS_STATUS:
4365 ath11k_hal_reo_status_queue_stats(ab, reo_desc,
4366 &reo_status);
4367 break;
4368 case HAL_REO_FLUSH_QUEUE_STATUS:
4369 ath11k_hal_reo_flush_queue_status(ab, reo_desc,
4370 &reo_status);
4371 break;
4372 case HAL_REO_FLUSH_CACHE_STATUS:
4373 ath11k_hal_reo_flush_cache_status(ab, reo_desc,
4374 &reo_status);
4375 break;
4376 case HAL_REO_UNBLOCK_CACHE_STATUS:
4377 ath11k_hal_reo_unblk_cache_status(ab, reo_desc,
4378 &reo_status);
4379 break;
4380 case HAL_REO_FLUSH_TIMEOUT_LIST_STATUS:
4381 ath11k_hal_reo_flush_timeout_list_status(ab, reo_desc,
4382 &reo_status);
4383 break;
4384 case HAL_REO_DESCRIPTOR_THRESHOLD_REACHED_STATUS:
4385 ath11k_hal_reo_desc_thresh_reached_status(ab, reo_desc,
4386 &reo_status);
4387 break;
4388 case HAL_REO_UPDATE_RX_REO_QUEUE_STATUS:
4389 ath11k_hal_reo_update_rx_reo_queue_status(ab, reo_desc,
4390 &reo_status);
4391 break;
4392 default:
4393 ath11k_warn(ab, "Unknown reo status type %d\n", tag);
4394 continue;
4395 }
4396
4397 spin_lock_bh(&dp->reo_cmd_lock);
4398 list_for_each_entry_safe(cmd, tmp, &dp->reo_cmd_list, list) {
4399 if (reo_status.uniform_hdr.cmd_num == cmd->cmd_num) {
4400 found = true;
4401 list_del(&cmd->list);
4402 break;
4403 }
4404 }
4405 spin_unlock_bh(&dp->reo_cmd_lock);
4406
4407 if (found) {
4408 cmd->handler(dp, (void *)&cmd->data,
4409 reo_status.uniform_hdr.cmd_status);
4410 kfree(cmd);
4411 }
4412
4413 found = false;
4414 }
4415
4416 ath11k_hal_srng_access_end(ab, srng);
4417
4418 spin_unlock_bh(&srng->lock);
4419}
4420
4421void ath11k_dp_rx_pdev_free(struct ath11k_base *ab, int mac_id)
4422{
4423 struct ath11k *ar = ab->pdevs[mac_id].ar;
4424
4425 ath11k_dp_rx_pdev_srng_free(ar);
4426 ath11k_dp_rxdma_pdev_buf_free(ar);
4427}
4428
4429int ath11k_dp_rx_pdev_alloc(struct ath11k_base *ab, int mac_id)
4430{
4431 struct ath11k *ar = ab->pdevs[mac_id].ar;
4432 struct ath11k_pdev_dp *dp = &ar->dp;
4433 u32 ring_id;
4434 int i;
4435 int ret;
4436
4437 ret = ath11k_dp_rx_pdev_srng_alloc(ar);
4438 if (ret) {
4439 ath11k_warn(ab, "failed to setup rx srngs\n");
4440 return ret;
4441 }
4442
4443 ret = ath11k_dp_rxdma_pdev_buf_setup(ar);
4444 if (ret) {
4445 ath11k_warn(ab, "failed to setup rxdma ring\n");
4446 return ret;
4447 }
4448
4449 ring_id = dp->rx_refill_buf_ring.refill_buf_ring.ring_id;
4450 ret = ath11k_dp_tx_htt_srng_setup(ab, ring_id, mac_id, HAL_RXDMA_BUF);
4451 if (ret) {
4452 ath11k_warn(ab, "failed to configure rx_refill_buf_ring %d\n",
4453 ret);
4454 return ret;
4455 }
4456
4457 if (ab->hw_params.rx_mac_buf_ring) {
4458 for (i = 0; i < ab->hw_params.num_rxdma_per_pdev; i++) {
4459 ring_id = dp->rx_mac_buf_ring[i].ring_id;
4460 ret = ath11k_dp_tx_htt_srng_setup(ab, ring_id,
4461 mac_id + i, HAL_RXDMA_BUF);
4462 if (ret) {
4463 ath11k_warn(ab, "failed to configure rx_mac_buf_ring%d %d\n",
4464 i, ret);
4465 return ret;
4466 }
4467 }
4468 }
4469
4470 for (i = 0; i < ab->hw_params.num_rxdma_per_pdev; i++) {
4471 ring_id = dp->rxdma_err_dst_ring[i].ring_id;
4472 ret = ath11k_dp_tx_htt_srng_setup(ab, ring_id,
4473 mac_id + i, HAL_RXDMA_DST);
4474 if (ret) {
4475 ath11k_warn(ab, "failed to configure rxdma_err_dest_ring%d %d\n",
4476 i, ret);
4477 return ret;
4478 }
4479 }
4480
4481 if (!ab->hw_params.rxdma1_enable)
4482 goto config_refill_ring;
4483
4484 ring_id = dp->rxdma_mon_buf_ring.refill_buf_ring.ring_id;
4485 ret = ath11k_dp_tx_htt_srng_setup(ab, ring_id,
4486 mac_id, HAL_RXDMA_MONITOR_BUF);
4487 if (ret) {
4488 ath11k_warn(ab, "failed to configure rxdma_mon_buf_ring %d\n",
4489 ret);
4490 return ret;
4491 }
4492 ret = ath11k_dp_tx_htt_srng_setup(ab,
4493 dp->rxdma_mon_dst_ring.ring_id,
4494 mac_id, HAL_RXDMA_MONITOR_DST);
4495 if (ret) {
4496 ath11k_warn(ab, "failed to configure rxdma_mon_dst_ring %d\n",
4497 ret);
4498 return ret;
4499 }
4500 ret = ath11k_dp_tx_htt_srng_setup(ab,
4501 dp->rxdma_mon_desc_ring.ring_id,
4502 mac_id, HAL_RXDMA_MONITOR_DESC);
4503 if (ret) {
4504 ath11k_warn(ab, "failed to configure rxdma_mon_dst_ring %d\n",
4505 ret);
4506 return ret;
4507 }
4508
4509config_refill_ring:
4510 for (i = 0; i < ab->hw_params.num_rxdma_per_pdev; i++) {
4511 ring_id = dp->rx_mon_status_refill_ring[i].refill_buf_ring.ring_id;
4512 ret = ath11k_dp_tx_htt_srng_setup(ab, ring_id, mac_id + i,
4513 HAL_RXDMA_MONITOR_STATUS);
4514 if (ret) {
4515 ath11k_warn(ab,
4516 "failed to configure mon_status_refill_ring%d %d\n",
4517 i, ret);
4518 return ret;
4519 }
4520 }
4521
4522 return 0;
4523}
4524
4525static void ath11k_dp_mon_set_frag_len(u32 *total_len, u32 *frag_len)
4526{
4527 if (*total_len >= (DP_RX_BUFFER_SIZE - sizeof(struct hal_rx_desc))) {
4528 *frag_len = DP_RX_BUFFER_SIZE - sizeof(struct hal_rx_desc);
4529 *total_len -= *frag_len;
4530 } else {
4531 *frag_len = *total_len;
4532 *total_len = 0;
4533 }
4534}
4535
4536static
4537int ath11k_dp_rx_monitor_link_desc_return(struct ath11k *ar,
4538 void *p_last_buf_addr_info,
4539 u8 mac_id)
4540{
4541 struct ath11k_pdev_dp *dp = &ar->dp;
4542 struct dp_srng *dp_srng;
4543 void *hal_srng;
4544 void *src_srng_desc;
4545 int ret = 0;
4546
4547 if (ar->ab->hw_params.rxdma1_enable) {
4548 dp_srng = &dp->rxdma_mon_desc_ring;
4549 hal_srng = &ar->ab->hal.srng_list[dp_srng->ring_id];
4550 } else {
4551 dp_srng = &ar->ab->dp.wbm_desc_rel_ring;
4552 hal_srng = &ar->ab->hal.srng_list[dp_srng->ring_id];
4553 }
4554
4555 ath11k_hal_srng_access_begin(ar->ab, hal_srng);
4556
4557 src_srng_desc = ath11k_hal_srng_src_get_next_entry(ar->ab, hal_srng);
4558
4559 if (src_srng_desc) {
4560 struct ath11k_buffer_addr *src_desc = src_srng_desc;
4561
4562 *src_desc = *((struct ath11k_buffer_addr *)p_last_buf_addr_info);
4563 } else {
4564 ath11k_dbg(ar->ab, ATH11K_DBG_DATA,
4565 "Monitor Link Desc Ring %d Full", mac_id);
4566 ret = -ENOMEM;
4567 }
4568
4569 ath11k_hal_srng_access_end(ar->ab, hal_srng);
4570 return ret;
4571}
4572
4573static
4574void ath11k_dp_rx_mon_next_link_desc_get(void *rx_msdu_link_desc,
4575 dma_addr_t *paddr, u32 *sw_cookie,
4576 u8 *rbm,
4577 void **pp_buf_addr_info)
4578{
4579 struct hal_rx_msdu_link *msdu_link = rx_msdu_link_desc;
4580 struct ath11k_buffer_addr *buf_addr_info;
4581
4582 buf_addr_info = (struct ath11k_buffer_addr *)&msdu_link->buf_addr_info;
4583
4584 ath11k_hal_rx_buf_addr_info_get(buf_addr_info, paddr, sw_cookie, rbm);
4585
4586 *pp_buf_addr_info = (void *)buf_addr_info;
4587}
4588
4589static int ath11k_dp_pkt_set_pktlen(struct sk_buff *skb, u32 len)
4590{
4591 if (skb->len > len) {
4592 skb_trim(skb, len);
4593 } else {
4594 if (skb_tailroom(skb) < len - skb->len) {
4595 if ((pskb_expand_head(skb, 0,
4596 len - skb->len - skb_tailroom(skb),
4597 GFP_ATOMIC))) {
4598 dev_kfree_skb_any(skb);
4599 return -ENOMEM;
4600 }
4601 }
4602 skb_put(skb, (len - skb->len));
4603 }
4604 return 0;
4605}
4606
4607static void ath11k_hal_rx_msdu_list_get(struct ath11k *ar,
4608 void *msdu_link_desc,
4609 struct hal_rx_msdu_list *msdu_list,
4610 u16 *num_msdus)
4611{
4612 struct hal_rx_msdu_details *msdu_details = NULL;
4613 struct rx_msdu_desc *msdu_desc_info = NULL;
4614 struct hal_rx_msdu_link *msdu_link = NULL;
4615 int i;
4616 u32 last = FIELD_PREP(RX_MSDU_DESC_INFO0_LAST_MSDU_IN_MPDU, 1);
4617 u32 first = FIELD_PREP(RX_MSDU_DESC_INFO0_FIRST_MSDU_IN_MPDU, 1);
4618 u8 tmp = 0;
4619
4620 msdu_link = msdu_link_desc;
4621 msdu_details = &msdu_link->msdu_link[0];
4622
4623 for (i = 0; i < HAL_RX_NUM_MSDU_DESC; i++) {
4624 if (FIELD_GET(BUFFER_ADDR_INFO0_ADDR,
4625 msdu_details[i].buf_addr_info.info0) == 0) {
4626 msdu_desc_info = &msdu_details[i - 1].rx_msdu_info;
4627 msdu_desc_info->info0 |= last;
4628 ;
4629 break;
4630 }
4631 msdu_desc_info = &msdu_details[i].rx_msdu_info;
4632
4633 if (!i)
4634 msdu_desc_info->info0 |= first;
4635 else if (i == (HAL_RX_NUM_MSDU_DESC - 1))
4636 msdu_desc_info->info0 |= last;
4637 msdu_list->msdu_info[i].msdu_flags = msdu_desc_info->info0;
4638 msdu_list->msdu_info[i].msdu_len =
4639 HAL_RX_MSDU_PKT_LENGTH_GET(msdu_desc_info->info0);
4640 msdu_list->sw_cookie[i] =
4641 FIELD_GET(BUFFER_ADDR_INFO1_SW_COOKIE,
4642 msdu_details[i].buf_addr_info.info1);
4643 tmp = FIELD_GET(BUFFER_ADDR_INFO1_RET_BUF_MGR,
4644 msdu_details[i].buf_addr_info.info1);
4645 msdu_list->rbm[i] = tmp;
4646 }
4647 *num_msdus = i;
4648}
4649
4650static u32 ath11k_dp_rx_mon_comp_ppduid(u32 msdu_ppdu_id, u32 *ppdu_id,
4651 u32 *rx_bufs_used)
4652{
4653 u32 ret = 0;
4654
4655 if ((*ppdu_id < msdu_ppdu_id) &&
4656 ((msdu_ppdu_id - *ppdu_id) < DP_NOT_PPDU_ID_WRAP_AROUND)) {
4657 *ppdu_id = msdu_ppdu_id;
4658 ret = msdu_ppdu_id;
4659 } else if ((*ppdu_id > msdu_ppdu_id) &&
4660 ((*ppdu_id - msdu_ppdu_id) > DP_NOT_PPDU_ID_WRAP_AROUND)) {
4661 /* mon_dst is behind than mon_status
4662 * skip dst_ring and free it
4663 */
4664 *rx_bufs_used += 1;
4665 *ppdu_id = msdu_ppdu_id;
4666 ret = msdu_ppdu_id;
4667 }
4668 return ret;
4669}
4670
4671static void ath11k_dp_mon_get_buf_len(struct hal_rx_msdu_desc_info *info,
4672 bool *is_frag, u32 *total_len,
4673 u32 *frag_len, u32 *msdu_cnt)
4674{
4675 if (info->msdu_flags & RX_MSDU_DESC_INFO0_MSDU_CONTINUATION) {
4676 if (!*is_frag) {
4677 *total_len = info->msdu_len;
4678 *is_frag = true;
4679 }
4680 ath11k_dp_mon_set_frag_len(total_len,
4681 frag_len);
4682 } else {
4683 if (*is_frag) {
4684 ath11k_dp_mon_set_frag_len(total_len,
4685 frag_len);
4686 } else {
4687 *frag_len = info->msdu_len;
4688 }
4689 *is_frag = false;
4690 *msdu_cnt -= 1;
4691 }
4692}
4693
4694static u32
4695ath11k_dp_rx_mon_mpdu_pop(struct ath11k *ar, int mac_id,
4696 void *ring_entry, struct sk_buff **head_msdu,
4697 struct sk_buff **tail_msdu, u32 *npackets,
4698 u32 *ppdu_id)
4699{
4700 struct ath11k_pdev_dp *dp = &ar->dp;
4701 struct ath11k_mon_data *pmon = (struct ath11k_mon_data *)&dp->mon_data;
4702 struct dp_rxdma_ring *rx_ring = &dp->rxdma_mon_buf_ring;
4703 struct sk_buff *msdu = NULL, *last = NULL;
4704 struct hal_rx_msdu_list msdu_list;
4705 void *p_buf_addr_info, *p_last_buf_addr_info;
4706 struct hal_rx_desc *rx_desc;
4707 void *rx_msdu_link_desc;
4708 dma_addr_t paddr;
4709 u16 num_msdus = 0;
4710 u32 rx_buf_size, rx_pkt_offset, sw_cookie;
4711 u32 rx_bufs_used = 0, i = 0;
4712 u32 msdu_ppdu_id = 0, msdu_cnt = 0;
4713 u32 total_len = 0, frag_len = 0;
4714 bool is_frag, is_first_msdu;
4715 bool drop_mpdu = false;
4716 struct ath11k_skb_rxcb *rxcb;
4717 struct hal_reo_entrance_ring *ent_desc = ring_entry;
4718 int buf_id;
4719 u32 rx_link_buf_info[2];
4720 u8 rbm;
4721
4722 if (!ar->ab->hw_params.rxdma1_enable)
4723 rx_ring = &dp->rx_refill_buf_ring;
4724
4725 ath11k_hal_rx_reo_ent_buf_paddr_get(ring_entry, &paddr,
4726 &sw_cookie,
4727 &p_last_buf_addr_info, &rbm,
4728 &msdu_cnt);
4729
4730 if (FIELD_GET(HAL_REO_ENTR_RING_INFO1_RXDMA_PUSH_REASON,
4731 ent_desc->info1) ==
4732 HAL_REO_DEST_RING_PUSH_REASON_ERR_DETECTED) {
4733 u8 rxdma_err =
4734 FIELD_GET(HAL_REO_ENTR_RING_INFO1_RXDMA_ERROR_CODE,
4735 ent_desc->info1);
4736 if (rxdma_err == HAL_REO_ENTR_RING_RXDMA_ECODE_FLUSH_REQUEST_ERR ||
4737 rxdma_err == HAL_REO_ENTR_RING_RXDMA_ECODE_MPDU_LEN_ERR ||
4738 rxdma_err == HAL_REO_ENTR_RING_RXDMA_ECODE_OVERFLOW_ERR) {
4739 drop_mpdu = true;
4740 pmon->rx_mon_stats.dest_mpdu_drop++;
4741 }
4742 }
4743
4744 is_frag = false;
4745 is_first_msdu = true;
4746
4747 do {
4748 if (pmon->mon_last_linkdesc_paddr == paddr) {
4749 pmon->rx_mon_stats.dup_mon_linkdesc_cnt++;
4750 return rx_bufs_used;
4751 }
4752
4753 if (ar->ab->hw_params.rxdma1_enable)
4754 rx_msdu_link_desc =
4755 (void *)pmon->link_desc_banks[sw_cookie].vaddr +
4756 (paddr - pmon->link_desc_banks[sw_cookie].paddr);
4757 else
4758 rx_msdu_link_desc =
4759 (void *)ar->ab->dp.link_desc_banks[sw_cookie].vaddr +
4760 (paddr - ar->ab->dp.link_desc_banks[sw_cookie].paddr);
4761
4762 ath11k_hal_rx_msdu_list_get(ar, rx_msdu_link_desc, &msdu_list,
4763 &num_msdus);
4764
4765 for (i = 0; i < num_msdus; i++) {
4766 u32 l2_hdr_offset;
4767
4768 if (pmon->mon_last_buf_cookie == msdu_list.sw_cookie[i]) {
4769 ath11k_dbg(ar->ab, ATH11K_DBG_DATA,
4770 "i %d last_cookie %d is same\n",
4771 i, pmon->mon_last_buf_cookie);
4772 drop_mpdu = true;
4773 pmon->rx_mon_stats.dup_mon_buf_cnt++;
4774 continue;
4775 }
4776 buf_id = FIELD_GET(DP_RXDMA_BUF_COOKIE_BUF_ID,
4777 msdu_list.sw_cookie[i]);
4778
4779 spin_lock_bh(&rx_ring->idr_lock);
4780 msdu = idr_find(&rx_ring->bufs_idr, buf_id);
4781 spin_unlock_bh(&rx_ring->idr_lock);
4782 if (!msdu) {
4783 ath11k_dbg(ar->ab, ATH11K_DBG_DATA,
4784 "msdu_pop: invalid buf_id %d\n", buf_id);
4785 break;
4786 }
4787 rxcb = ATH11K_SKB_RXCB(msdu);
4788 if (!rxcb->unmapped) {
4789 dma_unmap_single(ar->ab->dev, rxcb->paddr,
4790 msdu->len +
4791 skb_tailroom(msdu),
4792 DMA_FROM_DEVICE);
4793 rxcb->unmapped = 1;
4794 }
4795 if (drop_mpdu) {
4796 ath11k_dbg(ar->ab, ATH11K_DBG_DATA,
4797 "i %d drop msdu %p *ppdu_id %x\n",
4798 i, msdu, *ppdu_id);
4799 dev_kfree_skb_any(msdu);
4800 msdu = NULL;
4801 goto next_msdu;
4802 }
4803
4804 rx_desc = (struct hal_rx_desc *)msdu->data;
4805
4806 rx_pkt_offset = sizeof(struct hal_rx_desc);
4807 l2_hdr_offset = ath11k_dp_rx_h_msdu_end_l3pad(ar->ab, rx_desc);
4808
4809 if (is_first_msdu) {
4810 if (!ath11k_dp_rxdesc_mpdu_valid(ar->ab, rx_desc)) {
4811 drop_mpdu = true;
4812 dev_kfree_skb_any(msdu);
4813 msdu = NULL;
4814 pmon->mon_last_linkdesc_paddr = paddr;
4815 goto next_msdu;
4816 }
4817
4818 msdu_ppdu_id =
4819 ath11k_dp_rxdesc_get_ppduid(ar->ab, rx_desc);
4820
4821 if (ath11k_dp_rx_mon_comp_ppduid(msdu_ppdu_id,
4822 ppdu_id,
4823 &rx_bufs_used)) {
4824 if (rx_bufs_used) {
4825 drop_mpdu = true;
4826 dev_kfree_skb_any(msdu);
4827 msdu = NULL;
4828 goto next_msdu;
4829 }
4830 return rx_bufs_used;
4831 }
4832 pmon->mon_last_linkdesc_paddr = paddr;
4833 is_first_msdu = false;
4834 }
4835 ath11k_dp_mon_get_buf_len(&msdu_list.msdu_info[i],
4836 &is_frag, &total_len,
4837 &frag_len, &msdu_cnt);
4838 rx_buf_size = rx_pkt_offset + l2_hdr_offset + frag_len;
4839
4840 ath11k_dp_pkt_set_pktlen(msdu, rx_buf_size);
4841
4842 if (!(*head_msdu))
4843 *head_msdu = msdu;
4844 else if (last)
4845 last->next = msdu;
4846
4847 last = msdu;
4848next_msdu:
4849 pmon->mon_last_buf_cookie = msdu_list.sw_cookie[i];
4850 rx_bufs_used++;
4851 spin_lock_bh(&rx_ring->idr_lock);
4852 idr_remove(&rx_ring->bufs_idr, buf_id);
4853 spin_unlock_bh(&rx_ring->idr_lock);
4854 }
4855
4856 ath11k_hal_rx_buf_addr_info_set(rx_link_buf_info, paddr, sw_cookie, rbm);
4857
4858 ath11k_dp_rx_mon_next_link_desc_get(rx_msdu_link_desc, &paddr,
4859 &sw_cookie, &rbm,
4860 &p_buf_addr_info);
4861
4862 if (ar->ab->hw_params.rxdma1_enable) {
4863 if (ath11k_dp_rx_monitor_link_desc_return(ar,
4864 p_last_buf_addr_info,
4865 dp->mac_id))
4866 ath11k_dbg(ar->ab, ATH11K_DBG_DATA,
4867 "dp_rx_monitor_link_desc_return failed");
4868 } else {
4869 ath11k_dp_rx_link_desc_return(ar->ab, rx_link_buf_info,
4870 HAL_WBM_REL_BM_ACT_PUT_IN_IDLE);
4871 }
4872
4873 p_last_buf_addr_info = p_buf_addr_info;
4874
4875 } while (paddr && msdu_cnt);
4876
4877 if (last)
4878 last->next = NULL;
4879
4880 *tail_msdu = msdu;
4881
4882 if (msdu_cnt == 0)
4883 *npackets = 1;
4884
4885 return rx_bufs_used;
4886}
4887
4888static void ath11k_dp_rx_msdus_set_payload(struct ath11k *ar, struct sk_buff *msdu)
4889{
4890 u32 rx_pkt_offset, l2_hdr_offset;
4891
4892 rx_pkt_offset = ar->ab->hw_params.hal_desc_sz;
4893 l2_hdr_offset = ath11k_dp_rx_h_msdu_end_l3pad(ar->ab,
4894 (struct hal_rx_desc *)msdu->data);
4895 skb_pull(msdu, rx_pkt_offset + l2_hdr_offset);
4896}
4897
4898static struct sk_buff *
4899ath11k_dp_rx_mon_merg_msdus(struct ath11k *ar,
4900 u32 mac_id, struct sk_buff *head_msdu,
4901 struct sk_buff *last_msdu,
4902 struct ieee80211_rx_status *rxs, bool *fcs_err)
4903{
4904 struct ath11k_base *ab = ar->ab;
4905 struct sk_buff *msdu, *prev_buf;
4906 struct hal_rx_desc *rx_desc;
4907 char *hdr_desc;
4908 u8 *dest, decap_format;
4909 struct ieee80211_hdr_3addr *wh;
4910 struct rx_attention *rx_attention;
4911 u32 err_bitmap;
4912
4913 if (!head_msdu)
4914 goto err_merge_fail;
4915
4916 rx_desc = (struct hal_rx_desc *)head_msdu->data;
4917 rx_attention = ath11k_dp_rx_get_attention(ab, rx_desc);
4918 err_bitmap = ath11k_dp_rx_h_attn_mpdu_err(rx_attention);
4919
4920 if (err_bitmap & DP_RX_MPDU_ERR_FCS)
4921 *fcs_err = true;
4922
4923 if (ath11k_dp_rxdesc_get_mpdulen_err(rx_attention))
4924 return NULL;
4925
4926 decap_format = ath11k_dp_rx_h_msdu_start_decap_type(ab, rx_desc);
4927
4928 ath11k_dp_rx_h_ppdu(ar, rx_desc, rxs);
4929
4930 if (decap_format == DP_RX_DECAP_TYPE_RAW) {
4931 ath11k_dp_rx_msdus_set_payload(ar, head_msdu);
4932
4933 prev_buf = head_msdu;
4934 msdu = head_msdu->next;
4935
4936 while (msdu) {
4937 ath11k_dp_rx_msdus_set_payload(ar, msdu);
4938
4939 prev_buf = msdu;
4940 msdu = msdu->next;
4941 }
4942
4943 prev_buf->next = NULL;
4944
4945 skb_trim(prev_buf, prev_buf->len - HAL_RX_FCS_LEN);
4946 } else if (decap_format == DP_RX_DECAP_TYPE_NATIVE_WIFI) {
4947 u8 qos_pkt = 0;
4948
4949 rx_desc = (struct hal_rx_desc *)head_msdu->data;
4950 hdr_desc = ath11k_dp_rxdesc_get_80211hdr(ab, rx_desc);
4951
4952 /* Base size */
4953 wh = (struct ieee80211_hdr_3addr *)hdr_desc;
4954
4955 if (ieee80211_is_data_qos(wh->frame_control))
4956 qos_pkt = 1;
4957
4958 msdu = head_msdu;
4959
4960 while (msdu) {
4961 ath11k_dp_rx_msdus_set_payload(ar, msdu);
4962 if (qos_pkt) {
4963 dest = skb_push(msdu, sizeof(__le16));
4964 if (!dest)
4965 goto err_merge_fail;
4966 memcpy(dest, hdr_desc, sizeof(struct ieee80211_qos_hdr));
4967 }
4968 prev_buf = msdu;
4969 msdu = msdu->next;
4970 }
4971 dest = skb_put(prev_buf, HAL_RX_FCS_LEN);
4972 if (!dest)
4973 goto err_merge_fail;
4974
4975 ath11k_dbg(ab, ATH11K_DBG_DATA,
4976 "mpdu_buf %p mpdu_buf->len %u",
4977 prev_buf, prev_buf->len);
4978 } else {
4979 ath11k_dbg(ab, ATH11K_DBG_DATA,
4980 "decap format %d is not supported!\n",
4981 decap_format);
4982 goto err_merge_fail;
4983 }
4984
4985 return head_msdu;
4986
4987err_merge_fail:
4988 return NULL;
4989}
4990
4991static void
4992ath11k_dp_rx_update_radiotap_he(struct hal_rx_mon_ppdu_info *rx_status,
4993 u8 *rtap_buf)
4994{
4995 u32 rtap_len = 0;
4996
4997 put_unaligned_le16(rx_status->he_data1, &rtap_buf[rtap_len]);
4998 rtap_len += 2;
4999
5000 put_unaligned_le16(rx_status->he_data2, &rtap_buf[rtap_len]);
5001 rtap_len += 2;
5002
5003 put_unaligned_le16(rx_status->he_data3, &rtap_buf[rtap_len]);
5004 rtap_len += 2;
5005
5006 put_unaligned_le16(rx_status->he_data4, &rtap_buf[rtap_len]);
5007 rtap_len += 2;
5008
5009 put_unaligned_le16(rx_status->he_data5, &rtap_buf[rtap_len]);
5010 rtap_len += 2;
5011
5012 put_unaligned_le16(rx_status->he_data6, &rtap_buf[rtap_len]);
5013}
5014
5015static void
5016ath11k_dp_rx_update_radiotap_he_mu(struct hal_rx_mon_ppdu_info *rx_status,
5017 u8 *rtap_buf)
5018{
5019 u32 rtap_len = 0;
5020
5021 put_unaligned_le16(rx_status->he_flags1, &rtap_buf[rtap_len]);
5022 rtap_len += 2;
5023
5024 put_unaligned_le16(rx_status->he_flags2, &rtap_buf[rtap_len]);
5025 rtap_len += 2;
5026
5027 rtap_buf[rtap_len] = rx_status->he_RU[0];
5028 rtap_len += 1;
5029
5030 rtap_buf[rtap_len] = rx_status->he_RU[1];
5031 rtap_len += 1;
5032
5033 rtap_buf[rtap_len] = rx_status->he_RU[2];
5034 rtap_len += 1;
5035
5036 rtap_buf[rtap_len] = rx_status->he_RU[3];
5037}
5038
5039static void ath11k_update_radiotap(struct ath11k *ar,
5040 struct hal_rx_mon_ppdu_info *ppduinfo,
5041 struct sk_buff *mon_skb,
5042 struct ieee80211_rx_status *rxs)
5043{
5044 struct ieee80211_supported_band *sband;
5045 u8 *ptr = NULL;
5046
5047 rxs->flag |= RX_FLAG_MACTIME_START;
5048 rxs->signal = ppduinfo->rssi_comb + ATH11K_DEFAULT_NOISE_FLOOR;
5049
5050 if (ppduinfo->nss)
5051 rxs->nss = ppduinfo->nss;
5052
5053 if (ppduinfo->he_mu_flags) {
5054 rxs->flag |= RX_FLAG_RADIOTAP_HE_MU;
5055 rxs->encoding = RX_ENC_HE;
5056 ptr = skb_push(mon_skb, sizeof(struct ieee80211_radiotap_he_mu));
5057 ath11k_dp_rx_update_radiotap_he_mu(ppduinfo, ptr);
5058 } else if (ppduinfo->he_flags) {
5059 rxs->flag |= RX_FLAG_RADIOTAP_HE;
5060 rxs->encoding = RX_ENC_HE;
5061 ptr = skb_push(mon_skb, sizeof(struct ieee80211_radiotap_he));
5062 ath11k_dp_rx_update_radiotap_he(ppduinfo, ptr);
5063 rxs->rate_idx = ppduinfo->rate;
5064 } else if (ppduinfo->vht_flags) {
5065 rxs->encoding = RX_ENC_VHT;
5066 rxs->rate_idx = ppduinfo->rate;
5067 } else if (ppduinfo->ht_flags) {
5068 rxs->encoding = RX_ENC_HT;
5069 rxs->rate_idx = ppduinfo->rate;
5070 } else {
5071 rxs->encoding = RX_ENC_LEGACY;
5072 sband = &ar->mac.sbands[rxs->band];
5073 rxs->rate_idx = ath11k_mac_hw_rate_to_idx(sband, ppduinfo->rate,
5074 ppduinfo->cck_flag);
5075 }
5076
5077 rxs->mactime = ppduinfo->tsft;
5078}
5079
5080static int ath11k_dp_rx_mon_deliver(struct ath11k *ar, u32 mac_id,
5081 struct sk_buff *head_msdu,
5082 struct hal_rx_mon_ppdu_info *ppduinfo,
5083 struct sk_buff *tail_msdu,
5084 struct napi_struct *napi)
5085{
5086 struct ath11k_pdev_dp *dp = &ar->dp;
5087 struct sk_buff *mon_skb, *skb_next, *header;
5088 struct ieee80211_rx_status *rxs = &dp->rx_status;
5089 bool fcs_err = false;
5090
5091 mon_skb = ath11k_dp_rx_mon_merg_msdus(ar, mac_id, head_msdu,
5092 tail_msdu, rxs, &fcs_err);
5093
5094 if (!mon_skb)
5095 goto mon_deliver_fail;
5096
5097 header = mon_skb;
5098
5099 rxs->flag = 0;
5100
5101 if (fcs_err)
5102 rxs->flag = RX_FLAG_FAILED_FCS_CRC;
5103
5104 do {
5105 skb_next = mon_skb->next;
5106 if (!skb_next)
5107 rxs->flag &= ~RX_FLAG_AMSDU_MORE;
5108 else
5109 rxs->flag |= RX_FLAG_AMSDU_MORE;
5110
5111 if (mon_skb == header) {
5112 header = NULL;
5113 rxs->flag &= ~RX_FLAG_ALLOW_SAME_PN;
5114 } else {
5115 rxs->flag |= RX_FLAG_ALLOW_SAME_PN;
5116 }
5117 rxs->flag |= RX_FLAG_ONLY_MONITOR;
5118 ath11k_update_radiotap(ar, ppduinfo, mon_skb, rxs);
5119
5120 ath11k_dp_rx_deliver_msdu(ar, napi, mon_skb, rxs);
5121 mon_skb = skb_next;
5122 } while (mon_skb);
5123 rxs->flag = 0;
5124
5125 return 0;
5126
5127mon_deliver_fail:
5128 mon_skb = head_msdu;
5129 while (mon_skb) {
5130 skb_next = mon_skb->next;
5131 dev_kfree_skb_any(mon_skb);
5132 mon_skb = skb_next;
5133 }
5134 return -EINVAL;
5135}
5136
5137/* The destination ring processing is stuck if the destination is not
5138 * moving while status ring moves 16 PPDU. The destination ring processing
5139 * skips this destination ring PPDU as a workaround.
5140 */
5141#define MON_DEST_RING_STUCK_MAX_CNT 16
5142
5143static void ath11k_dp_rx_mon_dest_process(struct ath11k *ar, int mac_id,
5144 u32 quota, struct napi_struct *napi)
5145{
5146 struct ath11k_pdev_dp *dp = &ar->dp;
5147 struct ath11k_mon_data *pmon = (struct ath11k_mon_data *)&dp->mon_data;
5148 const struct ath11k_hw_hal_params *hal_params;
5149 void *ring_entry;
5150 void *mon_dst_srng;
5151 u32 ppdu_id;
5152 u32 rx_bufs_used;
5153 u32 ring_id;
5154 struct ath11k_pdev_mon_stats *rx_mon_stats;
5155 u32 npackets = 0;
5156 u32 mpdu_rx_bufs_used;
5157
5158 if (ar->ab->hw_params.rxdma1_enable)
5159 ring_id = dp->rxdma_mon_dst_ring.ring_id;
5160 else
5161 ring_id = dp->rxdma_err_dst_ring[mac_id].ring_id;
5162
5163 mon_dst_srng = &ar->ab->hal.srng_list[ring_id];
5164
5165 spin_lock_bh(&pmon->mon_lock);
5166
5167 ath11k_hal_srng_access_begin(ar->ab, mon_dst_srng);
5168
5169 ppdu_id = pmon->mon_ppdu_info.ppdu_id;
5170 rx_bufs_used = 0;
5171 rx_mon_stats = &pmon->rx_mon_stats;
5172
5173 while ((ring_entry = ath11k_hal_srng_dst_peek(ar->ab, mon_dst_srng))) {
5174 struct sk_buff *head_msdu, *tail_msdu;
5175
5176 head_msdu = NULL;
5177 tail_msdu = NULL;
5178
5179 mpdu_rx_bufs_used = ath11k_dp_rx_mon_mpdu_pop(ar, mac_id, ring_entry,
5180 &head_msdu,
5181 &tail_msdu,
5182 &npackets, &ppdu_id);
5183
5184 rx_bufs_used += mpdu_rx_bufs_used;
5185
5186 if (mpdu_rx_bufs_used) {
5187 dp->mon_dest_ring_stuck_cnt = 0;
5188 } else {
5189 dp->mon_dest_ring_stuck_cnt++;
5190 rx_mon_stats->dest_mon_not_reaped++;
5191 }
5192
5193 if (dp->mon_dest_ring_stuck_cnt > MON_DEST_RING_STUCK_MAX_CNT) {
5194 rx_mon_stats->dest_mon_stuck++;
5195 ath11k_dbg(ar->ab, ATH11K_DBG_DATA,
5196 "status ring ppdu_id=%d dest ring ppdu_id=%d mon_dest_ring_stuck_cnt=%d dest_mon_not_reaped=%u dest_mon_stuck=%u\n",
5197 pmon->mon_ppdu_info.ppdu_id, ppdu_id,
5198 dp->mon_dest_ring_stuck_cnt,
5199 rx_mon_stats->dest_mon_not_reaped,
5200 rx_mon_stats->dest_mon_stuck);
5201 pmon->mon_ppdu_info.ppdu_id = ppdu_id;
5202 continue;
5203 }
5204
5205 if (ppdu_id != pmon->mon_ppdu_info.ppdu_id) {
5206 pmon->mon_ppdu_status = DP_PPDU_STATUS_START;
5207 ath11k_dbg(ar->ab, ATH11K_DBG_DATA,
5208 "dest_rx: new ppdu_id %x != status ppdu_id %x dest_mon_not_reaped = %u dest_mon_stuck = %u\n",
5209 ppdu_id, pmon->mon_ppdu_info.ppdu_id,
5210 rx_mon_stats->dest_mon_not_reaped,
5211 rx_mon_stats->dest_mon_stuck);
5212 break;
5213 }
5214 if (head_msdu && tail_msdu) {
5215 ath11k_dp_rx_mon_deliver(ar, dp->mac_id, head_msdu,
5216 &pmon->mon_ppdu_info,
5217 tail_msdu, napi);
5218 rx_mon_stats->dest_mpdu_done++;
5219 }
5220
5221 ring_entry = ath11k_hal_srng_dst_get_next_entry(ar->ab,
5222 mon_dst_srng);
5223 }
5224 ath11k_hal_srng_access_end(ar->ab, mon_dst_srng);
5225
5226 spin_unlock_bh(&pmon->mon_lock);
5227
5228 if (rx_bufs_used) {
5229 rx_mon_stats->dest_ppdu_done++;
5230 hal_params = ar->ab->hw_params.hal_params;
5231
5232 if (ar->ab->hw_params.rxdma1_enable)
5233 ath11k_dp_rxbufs_replenish(ar->ab, dp->mac_id,
5234 &dp->rxdma_mon_buf_ring,
5235 rx_bufs_used,
5236 hal_params->rx_buf_rbm);
5237 else
5238 ath11k_dp_rxbufs_replenish(ar->ab, dp->mac_id,
5239 &dp->rx_refill_buf_ring,
5240 rx_bufs_used,
5241 hal_params->rx_buf_rbm);
5242 }
5243}
5244
5245int ath11k_dp_rx_process_mon_status(struct ath11k_base *ab, int mac_id,
5246 struct napi_struct *napi, int budget)
5247{
5248 struct ath11k *ar = ath11k_ab_to_ar(ab, mac_id);
5249 enum hal_rx_mon_status hal_status;
5250 struct sk_buff *skb;
5251 struct sk_buff_head skb_list;
5252 struct ath11k_peer *peer;
5253 struct ath11k_sta *arsta;
5254 int num_buffs_reaped = 0;
5255 u32 rx_buf_sz;
5256 u16 log_type;
5257 struct ath11k_mon_data *pmon = (struct ath11k_mon_data *)&ar->dp.mon_data;
5258 struct ath11k_pdev_mon_stats *rx_mon_stats = &pmon->rx_mon_stats;
5259 struct hal_rx_mon_ppdu_info *ppdu_info = &pmon->mon_ppdu_info;
5260
5261 __skb_queue_head_init(&skb_list);
5262
5263 num_buffs_reaped = ath11k_dp_rx_reap_mon_status_ring(ab, mac_id, &budget,
5264 &skb_list);
5265 if (!num_buffs_reaped)
5266 goto exit;
5267
5268 memset(ppdu_info, 0, sizeof(*ppdu_info));
5269 ppdu_info->peer_id = HAL_INVALID_PEERID;
5270
5271 while ((skb = __skb_dequeue(&skb_list))) {
5272 if (ath11k_debugfs_is_pktlog_lite_mode_enabled(ar)) {
5273 log_type = ATH11K_PKTLOG_TYPE_LITE_RX;
5274 rx_buf_sz = DP_RX_BUFFER_SIZE_LITE;
5275 } else if (ath11k_debugfs_is_pktlog_rx_stats_enabled(ar)) {
5276 log_type = ATH11K_PKTLOG_TYPE_RX_STATBUF;
5277 rx_buf_sz = DP_RX_BUFFER_SIZE;
5278 } else {
5279 log_type = ATH11K_PKTLOG_TYPE_INVALID;
5280 rx_buf_sz = 0;
5281 }
5282
5283 if (log_type != ATH11K_PKTLOG_TYPE_INVALID)
5284 trace_ath11k_htt_rxdesc(ar, skb->data, log_type, rx_buf_sz);
5285
5286 memset(ppdu_info, 0, sizeof(*ppdu_info));
5287 ppdu_info->peer_id = HAL_INVALID_PEERID;
5288 hal_status = ath11k_hal_rx_parse_mon_status(ab, ppdu_info, skb);
5289
5290 if (test_bit(ATH11K_FLAG_MONITOR_STARTED, &ar->monitor_flags) &&
5291 pmon->mon_ppdu_status == DP_PPDU_STATUS_START &&
5292 hal_status == HAL_TLV_STATUS_PPDU_DONE) {
5293 rx_mon_stats->status_ppdu_done++;
5294 pmon->mon_ppdu_status = DP_PPDU_STATUS_DONE;
5295 if (!ab->hw_params.full_monitor_mode) {
5296 ath11k_dp_rx_mon_dest_process(ar, mac_id,
5297 budget, napi);
5298 pmon->mon_ppdu_status = DP_PPDU_STATUS_START;
5299 }
5300 }
5301
5302 if (ppdu_info->peer_id == HAL_INVALID_PEERID ||
5303 hal_status != HAL_RX_MON_STATUS_PPDU_DONE) {
5304 dev_kfree_skb_any(skb);
5305 continue;
5306 }
5307
5308 rcu_read_lock();
5309 spin_lock_bh(&ab->base_lock);
5310 peer = ath11k_peer_find_by_id(ab, ppdu_info->peer_id);
5311
5312 if (!peer || !peer->sta) {
5313 ath11k_dbg(ab, ATH11K_DBG_DATA,
5314 "failed to find the peer with peer_id %d\n",
5315 ppdu_info->peer_id);
5316 goto next_skb;
5317 }
5318
5319 arsta = ath11k_sta_to_arsta(peer->sta);
5320 ath11k_dp_rx_update_peer_stats(arsta, ppdu_info);
5321
5322 if (ath11k_debugfs_is_pktlog_peer_valid(ar, peer->addr))
5323 trace_ath11k_htt_rxdesc(ar, skb->data, log_type, rx_buf_sz);
5324
5325next_skb:
5326 spin_unlock_bh(&ab->base_lock);
5327 rcu_read_unlock();
5328
5329 dev_kfree_skb_any(skb);
5330 memset(ppdu_info, 0, sizeof(*ppdu_info));
5331 ppdu_info->peer_id = HAL_INVALID_PEERID;
5332 }
5333exit:
5334 return num_buffs_reaped;
5335}
5336
5337static u32
5338ath11k_dp_rx_full_mon_mpdu_pop(struct ath11k *ar,
5339 void *ring_entry, struct sk_buff **head_msdu,
5340 struct sk_buff **tail_msdu,
5341 struct hal_sw_mon_ring_entries *sw_mon_entries)
5342{
5343 struct ath11k_pdev_dp *dp = &ar->dp;
5344 struct ath11k_mon_data *pmon = &dp->mon_data;
5345 struct dp_rxdma_ring *rx_ring = &dp->rxdma_mon_buf_ring;
5346 struct sk_buff *msdu = NULL, *last = NULL;
5347 struct hal_sw_monitor_ring *sw_desc = ring_entry;
5348 struct hal_rx_msdu_list msdu_list;
5349 struct hal_rx_desc *rx_desc;
5350 struct ath11k_skb_rxcb *rxcb;
5351 void *rx_msdu_link_desc;
5352 void *p_buf_addr_info, *p_last_buf_addr_info;
5353 int buf_id, i = 0;
5354 u32 rx_buf_size, rx_pkt_offset, l2_hdr_offset;
5355 u32 rx_bufs_used = 0, msdu_cnt = 0;
5356 u32 total_len = 0, frag_len = 0, sw_cookie;
5357 u16 num_msdus = 0;
5358 u8 rxdma_err, rbm;
5359 bool is_frag, is_first_msdu;
5360 bool drop_mpdu = false;
5361
5362 ath11k_hal_rx_sw_mon_ring_buf_paddr_get(ring_entry, sw_mon_entries);
5363
5364 sw_cookie = sw_mon_entries->mon_dst_sw_cookie;
5365 sw_mon_entries->end_of_ppdu = false;
5366 sw_mon_entries->drop_ppdu = false;
5367 p_last_buf_addr_info = sw_mon_entries->dst_buf_addr_info;
5368 msdu_cnt = sw_mon_entries->msdu_cnt;
5369
5370 sw_mon_entries->end_of_ppdu =
5371 FIELD_GET(HAL_SW_MON_RING_INFO0_END_OF_PPDU, sw_desc->info0);
5372 if (sw_mon_entries->end_of_ppdu)
5373 return rx_bufs_used;
5374
5375 if (FIELD_GET(HAL_SW_MON_RING_INFO0_RXDMA_PUSH_REASON,
5376 sw_desc->info0) ==
5377 HAL_REO_DEST_RING_PUSH_REASON_ERR_DETECTED) {
5378 rxdma_err =
5379 FIELD_GET(HAL_SW_MON_RING_INFO0_RXDMA_ERROR_CODE,
5380 sw_desc->info0);
5381 if (rxdma_err == HAL_REO_ENTR_RING_RXDMA_ECODE_FLUSH_REQUEST_ERR ||
5382 rxdma_err == HAL_REO_ENTR_RING_RXDMA_ECODE_MPDU_LEN_ERR ||
5383 rxdma_err == HAL_REO_ENTR_RING_RXDMA_ECODE_OVERFLOW_ERR) {
5384 pmon->rx_mon_stats.dest_mpdu_drop++;
5385 drop_mpdu = true;
5386 }
5387 }
5388
5389 is_frag = false;
5390 is_first_msdu = true;
5391
5392 do {
5393 rx_msdu_link_desc =
5394 (u8 *)pmon->link_desc_banks[sw_cookie].vaddr +
5395 (sw_mon_entries->mon_dst_paddr -
5396 pmon->link_desc_banks[sw_cookie].paddr);
5397
5398 ath11k_hal_rx_msdu_list_get(ar, rx_msdu_link_desc, &msdu_list,
5399 &num_msdus);
5400
5401 for (i = 0; i < num_msdus; i++) {
5402 buf_id = FIELD_GET(DP_RXDMA_BUF_COOKIE_BUF_ID,
5403 msdu_list.sw_cookie[i]);
5404
5405 spin_lock_bh(&rx_ring->idr_lock);
5406 msdu = idr_find(&rx_ring->bufs_idr, buf_id);
5407 if (!msdu) {
5408 ath11k_dbg(ar->ab, ATH11K_DBG_DATA,
5409 "full mon msdu_pop: invalid buf_id %d\n",
5410 buf_id);
5411 spin_unlock_bh(&rx_ring->idr_lock);
5412 break;
5413 }
5414 idr_remove(&rx_ring->bufs_idr, buf_id);
5415 spin_unlock_bh(&rx_ring->idr_lock);
5416
5417 rxcb = ATH11K_SKB_RXCB(msdu);
5418 if (!rxcb->unmapped) {
5419 dma_unmap_single(ar->ab->dev, rxcb->paddr,
5420 msdu->len +
5421 skb_tailroom(msdu),
5422 DMA_FROM_DEVICE);
5423 rxcb->unmapped = 1;
5424 }
5425 if (drop_mpdu) {
5426 ath11k_dbg(ar->ab, ATH11K_DBG_DATA,
5427 "full mon: i %d drop msdu %p *ppdu_id %x\n",
5428 i, msdu, sw_mon_entries->ppdu_id);
5429 dev_kfree_skb_any(msdu);
5430 msdu_cnt--;
5431 goto next_msdu;
5432 }
5433
5434 rx_desc = (struct hal_rx_desc *)msdu->data;
5435
5436 rx_pkt_offset = sizeof(struct hal_rx_desc);
5437 l2_hdr_offset = ath11k_dp_rx_h_msdu_end_l3pad(ar->ab, rx_desc);
5438
5439 if (is_first_msdu) {
5440 if (!ath11k_dp_rxdesc_mpdu_valid(ar->ab, rx_desc)) {
5441 drop_mpdu = true;
5442 dev_kfree_skb_any(msdu);
5443 msdu = NULL;
5444 goto next_msdu;
5445 }
5446 is_first_msdu = false;
5447 }
5448
5449 ath11k_dp_mon_get_buf_len(&msdu_list.msdu_info[i],
5450 &is_frag, &total_len,
5451 &frag_len, &msdu_cnt);
5452
5453 rx_buf_size = rx_pkt_offset + l2_hdr_offset + frag_len;
5454
5455 ath11k_dp_pkt_set_pktlen(msdu, rx_buf_size);
5456
5457 if (!(*head_msdu))
5458 *head_msdu = msdu;
5459 else if (last)
5460 last->next = msdu;
5461
5462 last = msdu;
5463next_msdu:
5464 rx_bufs_used++;
5465 }
5466
5467 ath11k_dp_rx_mon_next_link_desc_get(rx_msdu_link_desc,
5468 &sw_mon_entries->mon_dst_paddr,
5469 &sw_mon_entries->mon_dst_sw_cookie,
5470 &rbm,
5471 &p_buf_addr_info);
5472
5473 if (ath11k_dp_rx_monitor_link_desc_return(ar,
5474 p_last_buf_addr_info,
5475 dp->mac_id))
5476 ath11k_dbg(ar->ab, ATH11K_DBG_DATA,
5477 "full mon: dp_rx_monitor_link_desc_return failed\n");
5478
5479 p_last_buf_addr_info = p_buf_addr_info;
5480
5481 } while (sw_mon_entries->mon_dst_paddr && msdu_cnt);
5482
5483 if (last)
5484 last->next = NULL;
5485
5486 *tail_msdu = msdu;
5487
5488 return rx_bufs_used;
5489}
5490
5491static int ath11k_dp_rx_full_mon_prepare_mpdu(struct ath11k_dp *dp,
5492 struct dp_full_mon_mpdu *mon_mpdu,
5493 struct sk_buff *head,
5494 struct sk_buff *tail)
5495{
5496 mon_mpdu = kzalloc(sizeof(*mon_mpdu), GFP_ATOMIC);
5497 if (!mon_mpdu)
5498 return -ENOMEM;
5499
5500 list_add_tail(&mon_mpdu->list, &dp->dp_full_mon_mpdu_list);
5501 mon_mpdu->head = head;
5502 mon_mpdu->tail = tail;
5503
5504 return 0;
5505}
5506
5507static void ath11k_dp_rx_full_mon_drop_ppdu(struct ath11k_dp *dp,
5508 struct dp_full_mon_mpdu *mon_mpdu)
5509{
5510 struct dp_full_mon_mpdu *tmp;
5511 struct sk_buff *tmp_msdu, *skb_next;
5512
5513 if (list_empty(&dp->dp_full_mon_mpdu_list))
5514 return;
5515
5516 list_for_each_entry_safe(mon_mpdu, tmp, &dp->dp_full_mon_mpdu_list, list) {
5517 list_del(&mon_mpdu->list);
5518
5519 tmp_msdu = mon_mpdu->head;
5520 while (tmp_msdu) {
5521 skb_next = tmp_msdu->next;
5522 dev_kfree_skb_any(tmp_msdu);
5523 tmp_msdu = skb_next;
5524 }
5525
5526 kfree(mon_mpdu);
5527 }
5528}
5529
5530static int ath11k_dp_rx_full_mon_deliver_ppdu(struct ath11k *ar,
5531 int mac_id,
5532 struct ath11k_mon_data *pmon,
5533 struct napi_struct *napi)
5534{
5535 struct ath11k_pdev_mon_stats *rx_mon_stats;
5536 struct dp_full_mon_mpdu *tmp;
5537 struct dp_full_mon_mpdu *mon_mpdu = pmon->mon_mpdu;
5538 struct sk_buff *head_msdu, *tail_msdu;
5539 struct ath11k_base *ab = ar->ab;
5540 struct ath11k_dp *dp = &ab->dp;
5541 int ret;
5542
5543 rx_mon_stats = &pmon->rx_mon_stats;
5544
5545 list_for_each_entry_safe(mon_mpdu, tmp, &dp->dp_full_mon_mpdu_list, list) {
5546 list_del(&mon_mpdu->list);
5547 head_msdu = mon_mpdu->head;
5548 tail_msdu = mon_mpdu->tail;
5549 if (head_msdu && tail_msdu) {
5550 ret = ath11k_dp_rx_mon_deliver(ar, mac_id, head_msdu,
5551 &pmon->mon_ppdu_info,
5552 tail_msdu, napi);
5553 rx_mon_stats->dest_mpdu_done++;
5554 ath11k_dbg(ar->ab, ATH11K_DBG_DATA, "full mon: deliver ppdu\n");
5555 }
5556 kfree(mon_mpdu);
5557 }
5558
5559 return ret;
5560}
5561
5562static int
5563ath11k_dp_rx_process_full_mon_status_ring(struct ath11k_base *ab, int mac_id,
5564 struct napi_struct *napi, int budget)
5565{
5566 struct ath11k *ar = ab->pdevs[mac_id].ar;
5567 struct ath11k_pdev_dp *dp = &ar->dp;
5568 struct ath11k_mon_data *pmon = &dp->mon_data;
5569 struct hal_sw_mon_ring_entries *sw_mon_entries;
5570 int quota = 0, work = 0, count;
5571
5572 sw_mon_entries = &pmon->sw_mon_entries;
5573
5574 while (pmon->hold_mon_dst_ring) {
5575 quota = ath11k_dp_rx_process_mon_status(ab, mac_id,
5576 napi, 1);
5577 if (pmon->buf_state == DP_MON_STATUS_MATCH) {
5578 count = sw_mon_entries->status_buf_count;
5579 if (count > 1) {
5580 quota += ath11k_dp_rx_process_mon_status(ab, mac_id,
5581 napi, count);
5582 }
5583
5584 ath11k_dp_rx_full_mon_deliver_ppdu(ar, dp->mac_id,
5585 pmon, napi);
5586 pmon->hold_mon_dst_ring = false;
5587 } else if (!pmon->mon_status_paddr ||
5588 pmon->buf_state == DP_MON_STATUS_LEAD) {
5589 sw_mon_entries->drop_ppdu = true;
5590 pmon->hold_mon_dst_ring = false;
5591 }
5592
5593 if (!quota)
5594 break;
5595
5596 work += quota;
5597 }
5598
5599 if (sw_mon_entries->drop_ppdu)
5600 ath11k_dp_rx_full_mon_drop_ppdu(&ab->dp, pmon->mon_mpdu);
5601
5602 return work;
5603}
5604
5605static int ath11k_dp_full_mon_process_rx(struct ath11k_base *ab, int mac_id,
5606 struct napi_struct *napi, int budget)
5607{
5608 struct ath11k *ar = ab->pdevs[mac_id].ar;
5609 struct ath11k_pdev_dp *dp = &ar->dp;
5610 struct ath11k_mon_data *pmon = &dp->mon_data;
5611 struct hal_sw_mon_ring_entries *sw_mon_entries;
5612 struct ath11k_pdev_mon_stats *rx_mon_stats;
5613 struct sk_buff *head_msdu, *tail_msdu;
5614 void *mon_dst_srng = &ar->ab->hal.srng_list[dp->rxdma_mon_dst_ring.ring_id];
5615 void *ring_entry;
5616 u32 rx_bufs_used = 0, mpdu_rx_bufs_used;
5617 int quota = 0, ret;
5618 bool break_dst_ring = false;
5619
5620 spin_lock_bh(&pmon->mon_lock);
5621
5622 sw_mon_entries = &pmon->sw_mon_entries;
5623 rx_mon_stats = &pmon->rx_mon_stats;
5624
5625 if (pmon->hold_mon_dst_ring) {
5626 spin_unlock_bh(&pmon->mon_lock);
5627 goto reap_status_ring;
5628 }
5629
5630 ath11k_hal_srng_access_begin(ar->ab, mon_dst_srng);
5631 while ((ring_entry = ath11k_hal_srng_dst_peek(ar->ab, mon_dst_srng))) {
5632 head_msdu = NULL;
5633 tail_msdu = NULL;
5634
5635 mpdu_rx_bufs_used = ath11k_dp_rx_full_mon_mpdu_pop(ar, ring_entry,
5636 &head_msdu,
5637 &tail_msdu,
5638 sw_mon_entries);
5639 rx_bufs_used += mpdu_rx_bufs_used;
5640
5641 if (!sw_mon_entries->end_of_ppdu) {
5642 if (head_msdu) {
5643 ret = ath11k_dp_rx_full_mon_prepare_mpdu(&ab->dp,
5644 pmon->mon_mpdu,
5645 head_msdu,
5646 tail_msdu);
5647 if (ret)
5648 break_dst_ring = true;
5649 }
5650
5651 goto next_entry;
5652 } else {
5653 if (!sw_mon_entries->ppdu_id &&
5654 !sw_mon_entries->mon_status_paddr) {
5655 break_dst_ring = true;
5656 goto next_entry;
5657 }
5658 }
5659
5660 rx_mon_stats->dest_ppdu_done++;
5661 pmon->mon_ppdu_status = DP_PPDU_STATUS_START;
5662 pmon->buf_state = DP_MON_STATUS_LAG;
5663 pmon->mon_status_paddr = sw_mon_entries->mon_status_paddr;
5664 pmon->hold_mon_dst_ring = true;
5665next_entry:
5666 ring_entry = ath11k_hal_srng_dst_get_next_entry(ar->ab,
5667 mon_dst_srng);
5668 if (break_dst_ring)
5669 break;
5670 }
5671
5672 ath11k_hal_srng_access_end(ar->ab, mon_dst_srng);
5673 spin_unlock_bh(&pmon->mon_lock);
5674
5675 if (rx_bufs_used) {
5676 ath11k_dp_rxbufs_replenish(ar->ab, dp->mac_id,
5677 &dp->rxdma_mon_buf_ring,
5678 rx_bufs_used,
5679 HAL_RX_BUF_RBM_SW3_BM);
5680 }
5681
5682reap_status_ring:
5683 quota = ath11k_dp_rx_process_full_mon_status_ring(ab, mac_id,
5684 napi, budget);
5685
5686 return quota;
5687}
5688
5689int ath11k_dp_rx_process_mon_rings(struct ath11k_base *ab, int mac_id,
5690 struct napi_struct *napi, int budget)
5691{
5692 struct ath11k *ar = ath11k_ab_to_ar(ab, mac_id);
5693 int ret = 0;
5694
5695 if (test_bit(ATH11K_FLAG_MONITOR_STARTED, &ar->monitor_flags) &&
5696 ab->hw_params.full_monitor_mode)
5697 ret = ath11k_dp_full_mon_process_rx(ab, mac_id, napi, budget);
5698 else
5699 ret = ath11k_dp_rx_process_mon_status(ab, mac_id, napi, budget);
5700
5701 return ret;
5702}
5703
5704static int ath11k_dp_rx_pdev_mon_status_attach(struct ath11k *ar)
5705{
5706 struct ath11k_pdev_dp *dp = &ar->dp;
5707 struct ath11k_mon_data *pmon = (struct ath11k_mon_data *)&dp->mon_data;
5708
5709 skb_queue_head_init(&pmon->rx_status_q);
5710
5711 pmon->mon_ppdu_status = DP_PPDU_STATUS_START;
5712
5713 memset(&pmon->rx_mon_stats, 0,
5714 sizeof(pmon->rx_mon_stats));
5715 return 0;
5716}
5717
5718int ath11k_dp_rx_pdev_mon_attach(struct ath11k *ar)
5719{
5720 struct ath11k_pdev_dp *dp = &ar->dp;
5721 struct ath11k_mon_data *pmon = &dp->mon_data;
5722 struct hal_srng *mon_desc_srng = NULL;
5723 struct dp_srng *dp_srng;
5724 int ret = 0;
5725 u32 n_link_desc = 0;
5726
5727 ret = ath11k_dp_rx_pdev_mon_status_attach(ar);
5728 if (ret) {
5729 ath11k_warn(ar->ab, "pdev_mon_status_attach() failed");
5730 return ret;
5731 }
5732
5733 /* if rxdma1_enable is false, no need to setup
5734 * rxdma_mon_desc_ring.
5735 */
5736 if (!ar->ab->hw_params.rxdma1_enable)
5737 return 0;
5738
5739 dp_srng = &dp->rxdma_mon_desc_ring;
5740 n_link_desc = dp_srng->size /
5741 ath11k_hal_srng_get_entrysize(ar->ab, HAL_RXDMA_MONITOR_DESC);
5742 mon_desc_srng =
5743 &ar->ab->hal.srng_list[dp->rxdma_mon_desc_ring.ring_id];
5744
5745 ret = ath11k_dp_link_desc_setup(ar->ab, pmon->link_desc_banks,
5746 HAL_RXDMA_MONITOR_DESC, mon_desc_srng,
5747 n_link_desc);
5748 if (ret) {
5749 ath11k_warn(ar->ab, "mon_link_desc_pool_setup() failed");
5750 return ret;
5751 }
5752 pmon->mon_last_linkdesc_paddr = 0;
5753 pmon->mon_last_buf_cookie = DP_RX_DESC_COOKIE_MAX + 1;
5754 spin_lock_init(&pmon->mon_lock);
5755
5756 return 0;
5757}
5758
5759static int ath11k_dp_mon_link_free(struct ath11k *ar)
5760{
5761 struct ath11k_pdev_dp *dp = &ar->dp;
5762 struct ath11k_mon_data *pmon = &dp->mon_data;
5763
5764 ath11k_dp_link_desc_cleanup(ar->ab, pmon->link_desc_banks,
5765 HAL_RXDMA_MONITOR_DESC,
5766 &dp->rxdma_mon_desc_ring);
5767 return 0;
5768}
5769
5770int ath11k_dp_rx_pdev_mon_detach(struct ath11k *ar)
5771{
5772 ath11k_dp_mon_link_free(ar);
5773 return 0;
5774}
5775
5776int ath11k_dp_rx_pktlog_start(struct ath11k_base *ab)
5777{
5778 /* start reap timer */
5779 mod_timer(&ab->mon_reap_timer,
5780 jiffies + msecs_to_jiffies(ATH11K_MON_TIMER_INTERVAL));
5781
5782 return 0;
5783}
5784
5785int ath11k_dp_rx_pktlog_stop(struct ath11k_base *ab, bool stop_timer)
5786{
5787 int ret;
5788
5789 if (stop_timer)
5790 del_timer_sync(&ab->mon_reap_timer);
5791
5792 /* reap all the monitor related rings */
5793 ret = ath11k_dp_purge_mon_ring(ab);
5794 if (ret) {
5795 ath11k_warn(ab, "failed to purge dp mon ring: %d\n", ret);
5796 return ret;
5797 }
5798
5799 return 0;
5800}