Linux Audio

Check our new training course

Loading...
Note: File does not exist in v3.1.
   1// SPDX-License-Identifier: BSD-3-Clause-Clear
   2/*
   3 * Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
   4 * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
   5 */
   6
   7#include <linux/ieee80211.h>
   8#include <linux/kernel.h>
   9#include <linux/skbuff.h>
  10#include <crypto/hash.h>
  11#include "core.h"
  12#include "debug.h"
  13#include "hal_desc.h"
  14#include "hw.h"
  15#include "dp_rx.h"
  16#include "hal_rx.h"
  17#include "dp_tx.h"
  18#include "peer.h"
  19#include "dp_mon.h"
  20#include "debugfs_htt_stats.h"
  21
  22#define ATH12K_DP_RX_FRAGMENT_TIMEOUT_MS (2 * HZ)
  23
  24static enum hal_encrypt_type ath12k_dp_rx_h_enctype(struct ath12k_base *ab,
  25						    struct hal_rx_desc *desc)
  26{
  27	if (!ab->hal_rx_ops->rx_desc_encrypt_valid(desc))
  28		return HAL_ENCRYPT_TYPE_OPEN;
  29
  30	return ab->hal_rx_ops->rx_desc_get_encrypt_type(desc);
  31}
  32
  33u8 ath12k_dp_rx_h_decap_type(struct ath12k_base *ab,
  34			     struct hal_rx_desc *desc)
  35{
  36	return ab->hal_rx_ops->rx_desc_get_decap_type(desc);
  37}
  38
  39static u8 ath12k_dp_rx_h_mesh_ctl_present(struct ath12k_base *ab,
  40					  struct hal_rx_desc *desc)
  41{
  42	return ab->hal_rx_ops->rx_desc_get_mesh_ctl(desc);
  43}
  44
  45static bool ath12k_dp_rx_h_seq_ctrl_valid(struct ath12k_base *ab,
  46					  struct hal_rx_desc *desc)
  47{
  48	return ab->hal_rx_ops->rx_desc_get_mpdu_seq_ctl_vld(desc);
  49}
  50
  51static bool ath12k_dp_rx_h_fc_valid(struct ath12k_base *ab,
  52				    struct hal_rx_desc *desc)
  53{
  54	return ab->hal_rx_ops->rx_desc_get_mpdu_fc_valid(desc);
  55}
  56
  57static bool ath12k_dp_rx_h_more_frags(struct ath12k_base *ab,
  58				      struct sk_buff *skb)
  59{
  60	struct ieee80211_hdr *hdr;
  61
  62	hdr = (struct ieee80211_hdr *)(skb->data + ab->hal.hal_desc_sz);
  63	return ieee80211_has_morefrags(hdr->frame_control);
  64}
  65
  66static u16 ath12k_dp_rx_h_frag_no(struct ath12k_base *ab,
  67				  struct sk_buff *skb)
  68{
  69	struct ieee80211_hdr *hdr;
  70
  71	hdr = (struct ieee80211_hdr *)(skb->data + ab->hal.hal_desc_sz);
  72	return le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_FRAG;
  73}
  74
  75static u16 ath12k_dp_rx_h_seq_no(struct ath12k_base *ab,
  76				 struct hal_rx_desc *desc)
  77{
  78	return ab->hal_rx_ops->rx_desc_get_mpdu_start_seq_no(desc);
  79}
  80
  81static bool ath12k_dp_rx_h_msdu_done(struct ath12k_base *ab,
  82				     struct hal_rx_desc *desc)
  83{
  84	return ab->hal_rx_ops->dp_rx_h_msdu_done(desc);
  85}
  86
  87static bool ath12k_dp_rx_h_l4_cksum_fail(struct ath12k_base *ab,
  88					 struct hal_rx_desc *desc)
  89{
  90	return ab->hal_rx_ops->dp_rx_h_l4_cksum_fail(desc);
  91}
  92
  93static bool ath12k_dp_rx_h_ip_cksum_fail(struct ath12k_base *ab,
  94					 struct hal_rx_desc *desc)
  95{
  96	return ab->hal_rx_ops->dp_rx_h_ip_cksum_fail(desc);
  97}
  98
  99static bool ath12k_dp_rx_h_is_decrypted(struct ath12k_base *ab,
 100					struct hal_rx_desc *desc)
 101{
 102	return ab->hal_rx_ops->dp_rx_h_is_decrypted(desc);
 103}
 104
 105u32 ath12k_dp_rx_h_mpdu_err(struct ath12k_base *ab,
 106			    struct hal_rx_desc *desc)
 107{
 108	return ab->hal_rx_ops->dp_rx_h_mpdu_err(desc);
 109}
 110
 111static u16 ath12k_dp_rx_h_msdu_len(struct ath12k_base *ab,
 112				   struct hal_rx_desc *desc)
 113{
 114	return ab->hal_rx_ops->rx_desc_get_msdu_len(desc);
 115}
 116
 117static u8 ath12k_dp_rx_h_sgi(struct ath12k_base *ab,
 118			     struct hal_rx_desc *desc)
 119{
 120	return ab->hal_rx_ops->rx_desc_get_msdu_sgi(desc);
 121}
 122
 123static u8 ath12k_dp_rx_h_rate_mcs(struct ath12k_base *ab,
 124				  struct hal_rx_desc *desc)
 125{
 126	return ab->hal_rx_ops->rx_desc_get_msdu_rate_mcs(desc);
 127}
 128
 129static u8 ath12k_dp_rx_h_rx_bw(struct ath12k_base *ab,
 130			       struct hal_rx_desc *desc)
 131{
 132	return ab->hal_rx_ops->rx_desc_get_msdu_rx_bw(desc);
 133}
 134
 135static u32 ath12k_dp_rx_h_freq(struct ath12k_base *ab,
 136			       struct hal_rx_desc *desc)
 137{
 138	return ab->hal_rx_ops->rx_desc_get_msdu_freq(desc);
 139}
 140
 141static u8 ath12k_dp_rx_h_pkt_type(struct ath12k_base *ab,
 142				  struct hal_rx_desc *desc)
 143{
 144	return ab->hal_rx_ops->rx_desc_get_msdu_pkt_type(desc);
 145}
 146
 147static u8 ath12k_dp_rx_h_nss(struct ath12k_base *ab,
 148			     struct hal_rx_desc *desc)
 149{
 150	return hweight8(ab->hal_rx_ops->rx_desc_get_msdu_nss(desc));
 151}
 152
 153static u8 ath12k_dp_rx_h_tid(struct ath12k_base *ab,
 154			     struct hal_rx_desc *desc)
 155{
 156	return ab->hal_rx_ops->rx_desc_get_mpdu_tid(desc);
 157}
 158
 159static u16 ath12k_dp_rx_h_peer_id(struct ath12k_base *ab,
 160				  struct hal_rx_desc *desc)
 161{
 162	return ab->hal_rx_ops->rx_desc_get_mpdu_peer_id(desc);
 163}
 164
 165u8 ath12k_dp_rx_h_l3pad(struct ath12k_base *ab,
 166			struct hal_rx_desc *desc)
 167{
 168	return ab->hal_rx_ops->rx_desc_get_l3_pad_bytes(desc);
 169}
 170
 171static bool ath12k_dp_rx_h_first_msdu(struct ath12k_base *ab,
 172				      struct hal_rx_desc *desc)
 173{
 174	return ab->hal_rx_ops->rx_desc_get_first_msdu(desc);
 175}
 176
 177static bool ath12k_dp_rx_h_last_msdu(struct ath12k_base *ab,
 178				     struct hal_rx_desc *desc)
 179{
 180	return ab->hal_rx_ops->rx_desc_get_last_msdu(desc);
 181}
 182
 183static void ath12k_dp_rx_desc_end_tlv_copy(struct ath12k_base *ab,
 184					   struct hal_rx_desc *fdesc,
 185					   struct hal_rx_desc *ldesc)
 186{
 187	ab->hal_rx_ops->rx_desc_copy_end_tlv(fdesc, ldesc);
 188}
 189
 190static void ath12k_dp_rxdesc_set_msdu_len(struct ath12k_base *ab,
 191					  struct hal_rx_desc *desc,
 192					  u16 len)
 193{
 194	ab->hal_rx_ops->rx_desc_set_msdu_len(desc, len);
 195}
 196
 197static bool ath12k_dp_rx_h_is_da_mcbc(struct ath12k_base *ab,
 198				      struct hal_rx_desc *desc)
 199{
 200	return (ath12k_dp_rx_h_first_msdu(ab, desc) &&
 201		ab->hal_rx_ops->rx_desc_is_da_mcbc(desc));
 202}
 203
 204static bool ath12k_dp_rxdesc_mac_addr2_valid(struct ath12k_base *ab,
 205					     struct hal_rx_desc *desc)
 206{
 207	return ab->hal_rx_ops->rx_desc_mac_addr2_valid(desc);
 208}
 209
 210static u8 *ath12k_dp_rxdesc_get_mpdu_start_addr2(struct ath12k_base *ab,
 211						 struct hal_rx_desc *desc)
 212{
 213	return ab->hal_rx_ops->rx_desc_mpdu_start_addr2(desc);
 214}
 215
 216static void ath12k_dp_rx_desc_get_dot11_hdr(struct ath12k_base *ab,
 217					    struct hal_rx_desc *desc,
 218					    struct ieee80211_hdr *hdr)
 219{
 220	ab->hal_rx_ops->rx_desc_get_dot11_hdr(desc, hdr);
 221}
 222
 223static void ath12k_dp_rx_desc_get_crypto_header(struct ath12k_base *ab,
 224						struct hal_rx_desc *desc,
 225						u8 *crypto_hdr,
 226						enum hal_encrypt_type enctype)
 227{
 228	ab->hal_rx_ops->rx_desc_get_crypto_header(desc, crypto_hdr, enctype);
 229}
 230
 231static u16 ath12k_dp_rxdesc_get_mpdu_frame_ctrl(struct ath12k_base *ab,
 232						struct hal_rx_desc *desc)
 233{
 234	return ab->hal_rx_ops->rx_desc_get_mpdu_frame_ctl(desc);
 235}
 236
 237static inline u8 ath12k_dp_rx_get_msdu_src_link(struct ath12k_base *ab,
 238						struct hal_rx_desc *desc)
 239{
 240	return ab->hal_rx_ops->rx_desc_get_msdu_src_link_id(desc);
 241}
 242
 243static void ath12k_dp_clean_up_skb_list(struct sk_buff_head *skb_list)
 244{
 245	struct sk_buff *skb;
 246
 247	while ((skb = __skb_dequeue(skb_list)))
 248		dev_kfree_skb_any(skb);
 249}
 250
 251static size_t ath12k_dp_list_cut_nodes(struct list_head *list,
 252				       struct list_head *head,
 253				       size_t count)
 254{
 255	struct list_head *cur;
 256	struct ath12k_rx_desc_info *rx_desc;
 257	size_t nodes = 0;
 258
 259	if (!count) {
 260		INIT_LIST_HEAD(list);
 261		goto out;
 262	}
 263
 264	list_for_each(cur, head) {
 265		if (!count)
 266			break;
 267
 268		rx_desc = list_entry(cur, struct ath12k_rx_desc_info, list);
 269		rx_desc->in_use = true;
 270
 271		count--;
 272		nodes++;
 273	}
 274
 275	list_cut_before(list, head, cur);
 276out:
 277	return nodes;
 278}
 279
 280static void ath12k_dp_rx_enqueue_free(struct ath12k_dp *dp,
 281				      struct list_head *used_list)
 282{
 283	struct ath12k_rx_desc_info *rx_desc, *safe;
 284
 285	/* Reset the use flag */
 286	list_for_each_entry_safe(rx_desc, safe, used_list, list)
 287		rx_desc->in_use = false;
 288
 289	spin_lock_bh(&dp->rx_desc_lock);
 290	list_splice_tail(used_list, &dp->rx_desc_free_list);
 291	spin_unlock_bh(&dp->rx_desc_lock);
 292}
 293
 294/* Returns number of Rx buffers replenished */
 295int ath12k_dp_rx_bufs_replenish(struct ath12k_base *ab,
 296				struct dp_rxdma_ring *rx_ring,
 297				struct list_head *used_list,
 298				int req_entries)
 299{
 300	struct ath12k_buffer_addr *desc;
 301	struct hal_srng *srng;
 302	struct sk_buff *skb;
 303	int num_free;
 304	int num_remain;
 305	u32 cookie;
 306	dma_addr_t paddr;
 307	struct ath12k_dp *dp = &ab->dp;
 308	struct ath12k_rx_desc_info *rx_desc;
 309	enum hal_rx_buf_return_buf_manager mgr = ab->hw_params->hal_params->rx_buf_rbm;
 310
 311	req_entries = min(req_entries, rx_ring->bufs_max);
 312
 313	srng = &ab->hal.srng_list[rx_ring->refill_buf_ring.ring_id];
 314
 315	spin_lock_bh(&srng->lock);
 316
 317	ath12k_hal_srng_access_begin(ab, srng);
 318
 319	num_free = ath12k_hal_srng_src_num_free(ab, srng, true);
 320	if (!req_entries && (num_free > (rx_ring->bufs_max * 3) / 4))
 321		req_entries = num_free;
 322
 323	req_entries = min(num_free, req_entries);
 324	num_remain = req_entries;
 325
 326	if (!num_remain)
 327		goto out;
 328
 329	/* Get the descriptor from free list */
 330	if (list_empty(used_list)) {
 331		spin_lock_bh(&dp->rx_desc_lock);
 332		req_entries = ath12k_dp_list_cut_nodes(used_list,
 333						       &dp->rx_desc_free_list,
 334						       num_remain);
 335		spin_unlock_bh(&dp->rx_desc_lock);
 336		num_remain = req_entries;
 337	}
 338
 339	while (num_remain > 0) {
 340		skb = dev_alloc_skb(DP_RX_BUFFER_SIZE +
 341				    DP_RX_BUFFER_ALIGN_SIZE);
 342		if (!skb)
 343			break;
 344
 345		if (!IS_ALIGNED((unsigned long)skb->data,
 346				DP_RX_BUFFER_ALIGN_SIZE)) {
 347			skb_pull(skb,
 348				 PTR_ALIGN(skb->data, DP_RX_BUFFER_ALIGN_SIZE) -
 349				 skb->data);
 350		}
 351
 352		paddr = dma_map_single(ab->dev, skb->data,
 353				       skb->len + skb_tailroom(skb),
 354				       DMA_FROM_DEVICE);
 355		if (dma_mapping_error(ab->dev, paddr))
 356			goto fail_free_skb;
 357
 358		rx_desc = list_first_entry_or_null(used_list,
 359						   struct ath12k_rx_desc_info,
 360						   list);
 361		if (!rx_desc)
 362			goto fail_dma_unmap;
 363
 364		rx_desc->skb = skb;
 365		cookie = rx_desc->cookie;
 366
 367		desc = ath12k_hal_srng_src_get_next_entry(ab, srng);
 368		if (!desc)
 369			goto fail_dma_unmap;
 370
 371		list_del(&rx_desc->list);
 372		ATH12K_SKB_RXCB(skb)->paddr = paddr;
 373
 374		num_remain--;
 375
 376		ath12k_hal_rx_buf_addr_info_set(desc, paddr, cookie, mgr);
 377	}
 378
 379	goto out;
 380
 381fail_dma_unmap:
 382	dma_unmap_single(ab->dev, paddr, skb->len + skb_tailroom(skb),
 383			 DMA_FROM_DEVICE);
 384fail_free_skb:
 385	dev_kfree_skb_any(skb);
 386out:
 387	ath12k_hal_srng_access_end(ab, srng);
 388
 389	if (!list_empty(used_list))
 390		ath12k_dp_rx_enqueue_free(dp, used_list);
 391
 392	spin_unlock_bh(&srng->lock);
 393
 394	return req_entries - num_remain;
 395}
 396
 397static int ath12k_dp_rxdma_mon_buf_ring_free(struct ath12k_base *ab,
 398					     struct dp_rxdma_mon_ring *rx_ring)
 399{
 400	struct sk_buff *skb;
 401	int buf_id;
 402
 403	spin_lock_bh(&rx_ring->idr_lock);
 404	idr_for_each_entry(&rx_ring->bufs_idr, skb, buf_id) {
 405		idr_remove(&rx_ring->bufs_idr, buf_id);
 406		/* TODO: Understand where internal driver does this dma_unmap
 407		 * of rxdma_buffer.
 408		 */
 409		dma_unmap_single(ab->dev, ATH12K_SKB_RXCB(skb)->paddr,
 410				 skb->len + skb_tailroom(skb), DMA_FROM_DEVICE);
 411		dev_kfree_skb_any(skb);
 412	}
 413
 414	idr_destroy(&rx_ring->bufs_idr);
 415	spin_unlock_bh(&rx_ring->idr_lock);
 416
 417	return 0;
 418}
 419
 420static int ath12k_dp_rxdma_buf_free(struct ath12k_base *ab)
 421{
 422	struct ath12k_dp *dp = &ab->dp;
 423
 424	ath12k_dp_rxdma_mon_buf_ring_free(ab, &dp->rxdma_mon_buf_ring);
 425
 426	return 0;
 427}
 428
 429static int ath12k_dp_rxdma_mon_ring_buf_setup(struct ath12k_base *ab,
 430					      struct dp_rxdma_mon_ring *rx_ring,
 431					      u32 ringtype)
 432{
 433	int num_entries;
 434
 435	num_entries = rx_ring->refill_buf_ring.size /
 436		ath12k_hal_srng_get_entrysize(ab, ringtype);
 437
 438	rx_ring->bufs_max = num_entries;
 439	ath12k_dp_mon_buf_replenish(ab, rx_ring, num_entries);
 440
 441	return 0;
 442}
 443
 444static int ath12k_dp_rxdma_ring_buf_setup(struct ath12k_base *ab,
 445					  struct dp_rxdma_ring *rx_ring)
 446{
 447	LIST_HEAD(list);
 448
 449	rx_ring->bufs_max = rx_ring->refill_buf_ring.size /
 450			ath12k_hal_srng_get_entrysize(ab, HAL_RXDMA_BUF);
 451
 452	ath12k_dp_rx_bufs_replenish(ab, rx_ring, &list, 0);
 453
 454	return 0;
 455}
 456
 457static int ath12k_dp_rxdma_buf_setup(struct ath12k_base *ab)
 458{
 459	struct ath12k_dp *dp = &ab->dp;
 460	int ret;
 461
 462	ret = ath12k_dp_rxdma_ring_buf_setup(ab, &dp->rx_refill_buf_ring);
 463	if (ret) {
 464		ath12k_warn(ab,
 465			    "failed to setup HAL_RXDMA_BUF\n");
 466		return ret;
 467	}
 468
 469	if (ab->hw_params->rxdma1_enable) {
 470		ret = ath12k_dp_rxdma_mon_ring_buf_setup(ab,
 471							 &dp->rxdma_mon_buf_ring,
 472							 HAL_RXDMA_MONITOR_BUF);
 473		if (ret) {
 474			ath12k_warn(ab,
 475				    "failed to setup HAL_RXDMA_MONITOR_BUF\n");
 476			return ret;
 477		}
 478	}
 479
 480	return 0;
 481}
 482
 483static void ath12k_dp_rx_pdev_srng_free(struct ath12k *ar)
 484{
 485	struct ath12k_pdev_dp *dp = &ar->dp;
 486	struct ath12k_base *ab = ar->ab;
 487	int i;
 488
 489	for (i = 0; i < ab->hw_params->num_rxdma_per_pdev; i++)
 490		ath12k_dp_srng_cleanup(ab, &dp->rxdma_mon_dst_ring[i]);
 491}
 492
 493void ath12k_dp_rx_pdev_reo_cleanup(struct ath12k_base *ab)
 494{
 495	struct ath12k_dp *dp = &ab->dp;
 496	int i;
 497
 498	for (i = 0; i < DP_REO_DST_RING_MAX; i++)
 499		ath12k_dp_srng_cleanup(ab, &dp->reo_dst_ring[i]);
 500}
 501
 502int ath12k_dp_rx_pdev_reo_setup(struct ath12k_base *ab)
 503{
 504	struct ath12k_dp *dp = &ab->dp;
 505	int ret;
 506	int i;
 507
 508	for (i = 0; i < DP_REO_DST_RING_MAX; i++) {
 509		ret = ath12k_dp_srng_setup(ab, &dp->reo_dst_ring[i],
 510					   HAL_REO_DST, i, 0,
 511					   DP_REO_DST_RING_SIZE);
 512		if (ret) {
 513			ath12k_warn(ab, "failed to setup reo_dst_ring\n");
 514			goto err_reo_cleanup;
 515		}
 516	}
 517
 518	return 0;
 519
 520err_reo_cleanup:
 521	ath12k_dp_rx_pdev_reo_cleanup(ab);
 522
 523	return ret;
 524}
 525
 526static int ath12k_dp_rx_pdev_srng_alloc(struct ath12k *ar)
 527{
 528	struct ath12k_pdev_dp *dp = &ar->dp;
 529	struct ath12k_base *ab = ar->ab;
 530	int i;
 531	int ret;
 532	u32 mac_id = dp->mac_id;
 533
 534	for (i = 0; i < ab->hw_params->num_rxdma_per_pdev; i++) {
 535		ret = ath12k_dp_srng_setup(ar->ab,
 536					   &dp->rxdma_mon_dst_ring[i],
 537					   HAL_RXDMA_MONITOR_DST,
 538					   0, mac_id + i,
 539					   DP_RXDMA_MONITOR_DST_RING_SIZE);
 540		if (ret) {
 541			ath12k_warn(ar->ab,
 542				    "failed to setup HAL_RXDMA_MONITOR_DST\n");
 543			return ret;
 544		}
 545	}
 546
 547	return 0;
 548}
 549
 550void ath12k_dp_rx_reo_cmd_list_cleanup(struct ath12k_base *ab)
 551{
 552	struct ath12k_dp *dp = &ab->dp;
 553	struct ath12k_dp_rx_reo_cmd *cmd, *tmp;
 554	struct ath12k_dp_rx_reo_cache_flush_elem *cmd_cache, *tmp_cache;
 555
 556	spin_lock_bh(&dp->reo_cmd_lock);
 557	list_for_each_entry_safe(cmd, tmp, &dp->reo_cmd_list, list) {
 558		list_del(&cmd->list);
 559		dma_unmap_single(ab->dev, cmd->data.paddr,
 560				 cmd->data.size, DMA_BIDIRECTIONAL);
 561		kfree(cmd->data.vaddr);
 562		kfree(cmd);
 563	}
 564
 565	list_for_each_entry_safe(cmd_cache, tmp_cache,
 566				 &dp->reo_cmd_cache_flush_list, list) {
 567		list_del(&cmd_cache->list);
 568		dp->reo_cmd_cache_flush_count--;
 569		dma_unmap_single(ab->dev, cmd_cache->data.paddr,
 570				 cmd_cache->data.size, DMA_BIDIRECTIONAL);
 571		kfree(cmd_cache->data.vaddr);
 572		kfree(cmd_cache);
 573	}
 574	spin_unlock_bh(&dp->reo_cmd_lock);
 575}
 576
 577static void ath12k_dp_reo_cmd_free(struct ath12k_dp *dp, void *ctx,
 578				   enum hal_reo_cmd_status status)
 579{
 580	struct ath12k_dp_rx_tid *rx_tid = ctx;
 581
 582	if (status != HAL_REO_CMD_SUCCESS)
 583		ath12k_warn(dp->ab, "failed to flush rx tid hw desc, tid %d status %d\n",
 584			    rx_tid->tid, status);
 585
 586	dma_unmap_single(dp->ab->dev, rx_tid->paddr, rx_tid->size,
 587			 DMA_BIDIRECTIONAL);
 588	kfree(rx_tid->vaddr);
 589	rx_tid->vaddr = NULL;
 590}
 591
 592static int ath12k_dp_reo_cmd_send(struct ath12k_base *ab, struct ath12k_dp_rx_tid *rx_tid,
 593				  enum hal_reo_cmd_type type,
 594				  struct ath12k_hal_reo_cmd *cmd,
 595				  void (*cb)(struct ath12k_dp *dp, void *ctx,
 596					     enum hal_reo_cmd_status status))
 597{
 598	struct ath12k_dp *dp = &ab->dp;
 599	struct ath12k_dp_rx_reo_cmd *dp_cmd;
 600	struct hal_srng *cmd_ring;
 601	int cmd_num;
 602
 603	cmd_ring = &ab->hal.srng_list[dp->reo_cmd_ring.ring_id];
 604	cmd_num = ath12k_hal_reo_cmd_send(ab, cmd_ring, type, cmd);
 605
 606	/* cmd_num should start from 1, during failure return the error code */
 607	if (cmd_num < 0)
 608		return cmd_num;
 609
 610	/* reo cmd ring descriptors has cmd_num starting from 1 */
 611	if (cmd_num == 0)
 612		return -EINVAL;
 613
 614	if (!cb)
 615		return 0;
 616
 617	/* Can this be optimized so that we keep the pending command list only
 618	 * for tid delete command to free up the resource on the command status
 619	 * indication?
 620	 */
 621	dp_cmd = kzalloc(sizeof(*dp_cmd), GFP_ATOMIC);
 622
 623	if (!dp_cmd)
 624		return -ENOMEM;
 625
 626	memcpy(&dp_cmd->data, rx_tid, sizeof(*rx_tid));
 627	dp_cmd->cmd_num = cmd_num;
 628	dp_cmd->handler = cb;
 629
 630	spin_lock_bh(&dp->reo_cmd_lock);
 631	list_add_tail(&dp_cmd->list, &dp->reo_cmd_list);
 632	spin_unlock_bh(&dp->reo_cmd_lock);
 633
 634	return 0;
 635}
 636
 637static void ath12k_dp_reo_cache_flush(struct ath12k_base *ab,
 638				      struct ath12k_dp_rx_tid *rx_tid)
 639{
 640	struct ath12k_hal_reo_cmd cmd = {0};
 641	unsigned long tot_desc_sz, desc_sz;
 642	int ret;
 643
 644	tot_desc_sz = rx_tid->size;
 645	desc_sz = ath12k_hal_reo_qdesc_size(0, HAL_DESC_REO_NON_QOS_TID);
 646
 647	while (tot_desc_sz > desc_sz) {
 648		tot_desc_sz -= desc_sz;
 649		cmd.addr_lo = lower_32_bits(rx_tid->paddr + tot_desc_sz);
 650		cmd.addr_hi = upper_32_bits(rx_tid->paddr);
 651		ret = ath12k_dp_reo_cmd_send(ab, rx_tid,
 652					     HAL_REO_CMD_FLUSH_CACHE, &cmd,
 653					     NULL);
 654		if (ret)
 655			ath12k_warn(ab,
 656				    "failed to send HAL_REO_CMD_FLUSH_CACHE, tid %d (%d)\n",
 657				    rx_tid->tid, ret);
 658	}
 659
 660	memset(&cmd, 0, sizeof(cmd));
 661	cmd.addr_lo = lower_32_bits(rx_tid->paddr);
 662	cmd.addr_hi = upper_32_bits(rx_tid->paddr);
 663	cmd.flag = HAL_REO_CMD_FLG_NEED_STATUS;
 664	ret = ath12k_dp_reo_cmd_send(ab, rx_tid,
 665				     HAL_REO_CMD_FLUSH_CACHE,
 666				     &cmd, ath12k_dp_reo_cmd_free);
 667	if (ret) {
 668		ath12k_err(ab, "failed to send HAL_REO_CMD_FLUSH_CACHE cmd, tid %d (%d)\n",
 669			   rx_tid->tid, ret);
 670		dma_unmap_single(ab->dev, rx_tid->paddr, rx_tid->size,
 671				 DMA_BIDIRECTIONAL);
 672		kfree(rx_tid->vaddr);
 673		rx_tid->vaddr = NULL;
 674	}
 675}
 676
 677static void ath12k_dp_rx_tid_del_func(struct ath12k_dp *dp, void *ctx,
 678				      enum hal_reo_cmd_status status)
 679{
 680	struct ath12k_base *ab = dp->ab;
 681	struct ath12k_dp_rx_tid *rx_tid = ctx;
 682	struct ath12k_dp_rx_reo_cache_flush_elem *elem, *tmp;
 683
 684	if (status == HAL_REO_CMD_DRAIN) {
 685		goto free_desc;
 686	} else if (status != HAL_REO_CMD_SUCCESS) {
 687		/* Shouldn't happen! Cleanup in case of other failure? */
 688		ath12k_warn(ab, "failed to delete rx tid %d hw descriptor %d\n",
 689			    rx_tid->tid, status);
 690		return;
 691	}
 692
 693	elem = kzalloc(sizeof(*elem), GFP_ATOMIC);
 694	if (!elem)
 695		goto free_desc;
 696
 697	elem->ts = jiffies;
 698	memcpy(&elem->data, rx_tid, sizeof(*rx_tid));
 699
 700	spin_lock_bh(&dp->reo_cmd_lock);
 701	list_add_tail(&elem->list, &dp->reo_cmd_cache_flush_list);
 702	dp->reo_cmd_cache_flush_count++;
 703
 704	/* Flush and invalidate aged REO desc from HW cache */
 705	list_for_each_entry_safe(elem, tmp, &dp->reo_cmd_cache_flush_list,
 706				 list) {
 707		if (dp->reo_cmd_cache_flush_count > ATH12K_DP_RX_REO_DESC_FREE_THRES ||
 708		    time_after(jiffies, elem->ts +
 709			       msecs_to_jiffies(ATH12K_DP_RX_REO_DESC_FREE_TIMEOUT_MS))) {
 710			list_del(&elem->list);
 711			dp->reo_cmd_cache_flush_count--;
 712
 713			/* Unlock the reo_cmd_lock before using ath12k_dp_reo_cmd_send()
 714			 * within ath12k_dp_reo_cache_flush. The reo_cmd_cache_flush_list
 715			 * is used in only two contexts, one is in this function called
 716			 * from napi and the other in ath12k_dp_free during core destroy.
 717			 * Before dp_free, the irqs would be disabled and would wait to
 718			 * synchronize. Hence there wouldn’t be any race against add or
 719			 * delete to this list. Hence unlock-lock is safe here.
 720			 */
 721			spin_unlock_bh(&dp->reo_cmd_lock);
 722
 723			ath12k_dp_reo_cache_flush(ab, &elem->data);
 724			kfree(elem);
 725			spin_lock_bh(&dp->reo_cmd_lock);
 726		}
 727	}
 728	spin_unlock_bh(&dp->reo_cmd_lock);
 729
 730	return;
 731free_desc:
 732	dma_unmap_single(ab->dev, rx_tid->paddr, rx_tid->size,
 733			 DMA_BIDIRECTIONAL);
 734	kfree(rx_tid->vaddr);
 735	rx_tid->vaddr = NULL;
 736}
 737
 738static void ath12k_peer_rx_tid_qref_setup(struct ath12k_base *ab, u16 peer_id, u16 tid,
 739					  dma_addr_t paddr)
 740{
 741	struct ath12k_reo_queue_ref *qref;
 742	struct ath12k_dp *dp = &ab->dp;
 743
 744	if (!ab->hw_params->reoq_lut_support)
 745		return;
 746
 747	/* TODO: based on ML peer or not, select the LUT. below assumes non
 748	 * ML peer
 749	 */
 750	qref = (struct ath12k_reo_queue_ref *)dp->reoq_lut.vaddr +
 751			(peer_id * (IEEE80211_NUM_TIDS + 1) + tid);
 752
 753	qref->info0 = u32_encode_bits(lower_32_bits(paddr),
 754				      BUFFER_ADDR_INFO0_ADDR);
 755	qref->info1 = u32_encode_bits(upper_32_bits(paddr),
 756				      BUFFER_ADDR_INFO1_ADDR) |
 757		      u32_encode_bits(tid, DP_REO_QREF_NUM);
 758}
 759
 760static void ath12k_peer_rx_tid_qref_reset(struct ath12k_base *ab, u16 peer_id, u16 tid)
 761{
 762	struct ath12k_reo_queue_ref *qref;
 763	struct ath12k_dp *dp = &ab->dp;
 764
 765	if (!ab->hw_params->reoq_lut_support)
 766		return;
 767
 768	/* TODO: based on ML peer or not, select the LUT. below assumes non
 769	 * ML peer
 770	 */
 771	qref = (struct ath12k_reo_queue_ref *)dp->reoq_lut.vaddr +
 772			(peer_id * (IEEE80211_NUM_TIDS + 1) + tid);
 773
 774	qref->info0 = u32_encode_bits(0, BUFFER_ADDR_INFO0_ADDR);
 775	qref->info1 = u32_encode_bits(0, BUFFER_ADDR_INFO1_ADDR) |
 776		      u32_encode_bits(tid, DP_REO_QREF_NUM);
 777}
 778
 779void ath12k_dp_rx_peer_tid_delete(struct ath12k *ar,
 780				  struct ath12k_peer *peer, u8 tid)
 781{
 782	struct ath12k_hal_reo_cmd cmd = {0};
 783	struct ath12k_dp_rx_tid *rx_tid = &peer->rx_tid[tid];
 784	int ret;
 785
 786	if (!rx_tid->active)
 787		return;
 788
 789	cmd.flag = HAL_REO_CMD_FLG_NEED_STATUS;
 790	cmd.addr_lo = lower_32_bits(rx_tid->paddr);
 791	cmd.addr_hi = upper_32_bits(rx_tid->paddr);
 792	cmd.upd0 = HAL_REO_CMD_UPD0_VLD;
 793	ret = ath12k_dp_reo_cmd_send(ar->ab, rx_tid,
 794				     HAL_REO_CMD_UPDATE_RX_QUEUE, &cmd,
 795				     ath12k_dp_rx_tid_del_func);
 796	if (ret) {
 797		ath12k_err(ar->ab, "failed to send HAL_REO_CMD_UPDATE_RX_QUEUE cmd, tid %d (%d)\n",
 798			   tid, ret);
 799		dma_unmap_single(ar->ab->dev, rx_tid->paddr, rx_tid->size,
 800				 DMA_BIDIRECTIONAL);
 801		kfree(rx_tid->vaddr);
 802		rx_tid->vaddr = NULL;
 803	}
 804
 805	ath12k_peer_rx_tid_qref_reset(ar->ab, peer->peer_id, tid);
 806
 807	rx_tid->active = false;
 808}
 809
 810/* TODO: it's strange (and ugly) that struct hal_reo_dest_ring is converted
 811 * to struct hal_wbm_release_ring, I couldn't figure out the logic behind
 812 * that.
 813 */
 814static int ath12k_dp_rx_link_desc_return(struct ath12k_base *ab,
 815					 struct hal_reo_dest_ring *ring,
 816					 enum hal_wbm_rel_bm_act action)
 817{
 818	struct hal_wbm_release_ring *link_desc = (struct hal_wbm_release_ring *)ring;
 819	struct hal_wbm_release_ring *desc;
 820	struct ath12k_dp *dp = &ab->dp;
 821	struct hal_srng *srng;
 822	int ret = 0;
 823
 824	srng = &ab->hal.srng_list[dp->wbm_desc_rel_ring.ring_id];
 825
 826	spin_lock_bh(&srng->lock);
 827
 828	ath12k_hal_srng_access_begin(ab, srng);
 829
 830	desc = ath12k_hal_srng_src_get_next_entry(ab, srng);
 831	if (!desc) {
 832		ret = -ENOBUFS;
 833		goto exit;
 834	}
 835
 836	ath12k_hal_rx_msdu_link_desc_set(ab, desc, link_desc, action);
 837
 838exit:
 839	ath12k_hal_srng_access_end(ab, srng);
 840
 841	spin_unlock_bh(&srng->lock);
 842
 843	return ret;
 844}
 845
 846static void ath12k_dp_rx_frags_cleanup(struct ath12k_dp_rx_tid *rx_tid,
 847				       bool rel_link_desc)
 848{
 849	struct ath12k_base *ab = rx_tid->ab;
 850
 851	lockdep_assert_held(&ab->base_lock);
 852
 853	if (rx_tid->dst_ring_desc) {
 854		if (rel_link_desc)
 855			ath12k_dp_rx_link_desc_return(ab, rx_tid->dst_ring_desc,
 856						      HAL_WBM_REL_BM_ACT_PUT_IN_IDLE);
 857		kfree(rx_tid->dst_ring_desc);
 858		rx_tid->dst_ring_desc = NULL;
 859	}
 860
 861	rx_tid->cur_sn = 0;
 862	rx_tid->last_frag_no = 0;
 863	rx_tid->rx_frag_bitmap = 0;
 864	__skb_queue_purge(&rx_tid->rx_frags);
 865}
 866
 867void ath12k_dp_rx_peer_tid_cleanup(struct ath12k *ar, struct ath12k_peer *peer)
 868{
 869	struct ath12k_dp_rx_tid *rx_tid;
 870	int i;
 871
 872	lockdep_assert_held(&ar->ab->base_lock);
 873
 874	for (i = 0; i <= IEEE80211_NUM_TIDS; i++) {
 875		rx_tid = &peer->rx_tid[i];
 876
 877		ath12k_dp_rx_peer_tid_delete(ar, peer, i);
 878		ath12k_dp_rx_frags_cleanup(rx_tid, true);
 879
 880		spin_unlock_bh(&ar->ab->base_lock);
 881		del_timer_sync(&rx_tid->frag_timer);
 882		spin_lock_bh(&ar->ab->base_lock);
 883	}
 884}
 885
 886static int ath12k_peer_rx_tid_reo_update(struct ath12k *ar,
 887					 struct ath12k_peer *peer,
 888					 struct ath12k_dp_rx_tid *rx_tid,
 889					 u32 ba_win_sz, u16 ssn,
 890					 bool update_ssn)
 891{
 892	struct ath12k_hal_reo_cmd cmd = {0};
 893	int ret;
 894
 895	cmd.addr_lo = lower_32_bits(rx_tid->paddr);
 896	cmd.addr_hi = upper_32_bits(rx_tid->paddr);
 897	cmd.flag = HAL_REO_CMD_FLG_NEED_STATUS;
 898	cmd.upd0 = HAL_REO_CMD_UPD0_BA_WINDOW_SIZE;
 899	cmd.ba_window_size = ba_win_sz;
 900
 901	if (update_ssn) {
 902		cmd.upd0 |= HAL_REO_CMD_UPD0_SSN;
 903		cmd.upd2 = u32_encode_bits(ssn, HAL_REO_CMD_UPD2_SSN);
 904	}
 905
 906	ret = ath12k_dp_reo_cmd_send(ar->ab, rx_tid,
 907				     HAL_REO_CMD_UPDATE_RX_QUEUE, &cmd,
 908				     NULL);
 909	if (ret) {
 910		ath12k_warn(ar->ab, "failed to update rx tid queue, tid %d (%d)\n",
 911			    rx_tid->tid, ret);
 912		return ret;
 913	}
 914
 915	rx_tid->ba_win_sz = ba_win_sz;
 916
 917	return 0;
 918}
 919
 920int ath12k_dp_rx_peer_tid_setup(struct ath12k *ar, const u8 *peer_mac, int vdev_id,
 921				u8 tid, u32 ba_win_sz, u16 ssn,
 922				enum hal_pn_type pn_type)
 923{
 924	struct ath12k_base *ab = ar->ab;
 925	struct ath12k_dp *dp = &ab->dp;
 926	struct hal_rx_reo_queue *addr_aligned;
 927	struct ath12k_peer *peer;
 928	struct ath12k_dp_rx_tid *rx_tid;
 929	u32 hw_desc_sz;
 930	void *vaddr;
 931	dma_addr_t paddr;
 932	int ret;
 933
 934	spin_lock_bh(&ab->base_lock);
 935
 936	peer = ath12k_peer_find(ab, vdev_id, peer_mac);
 937	if (!peer) {
 938		spin_unlock_bh(&ab->base_lock);
 939		ath12k_warn(ab, "failed to find the peer to set up rx tid\n");
 940		return -ENOENT;
 941	}
 942
 943	if (ab->hw_params->reoq_lut_support && !dp->reoq_lut.vaddr) {
 944		spin_unlock_bh(&ab->base_lock);
 945		ath12k_warn(ab, "reo qref table is not setup\n");
 946		return -EINVAL;
 947	}
 948
 949	if (peer->peer_id > DP_MAX_PEER_ID || tid > IEEE80211_NUM_TIDS) {
 950		ath12k_warn(ab, "peer id of peer %d or tid %d doesn't allow reoq setup\n",
 951			    peer->peer_id, tid);
 952		spin_unlock_bh(&ab->base_lock);
 953		return -EINVAL;
 954	}
 955
 956	rx_tid = &peer->rx_tid[tid];
 957	/* Update the tid queue if it is already setup */
 958	if (rx_tid->active) {
 959		paddr = rx_tid->paddr;
 960		ret = ath12k_peer_rx_tid_reo_update(ar, peer, rx_tid,
 961						    ba_win_sz, ssn, true);
 962		spin_unlock_bh(&ab->base_lock);
 963		if (ret) {
 964			ath12k_warn(ab, "failed to update reo for rx tid %d\n", tid);
 965			return ret;
 966		}
 967
 968		if (!ab->hw_params->reoq_lut_support) {
 969			ret = ath12k_wmi_peer_rx_reorder_queue_setup(ar, vdev_id,
 970								     peer_mac,
 971								     paddr, tid, 1,
 972								     ba_win_sz);
 973			if (ret) {
 974				ath12k_warn(ab, "failed to setup peer rx reorder queuefor tid %d: %d\n",
 975					    tid, ret);
 976				return ret;
 977			}
 978		}
 979
 980		return 0;
 981	}
 982
 983	rx_tid->tid = tid;
 984
 985	rx_tid->ba_win_sz = ba_win_sz;
 986
 987	/* TODO: Optimize the memory allocation for qos tid based on
 988	 * the actual BA window size in REO tid update path.
 989	 */
 990	if (tid == HAL_DESC_REO_NON_QOS_TID)
 991		hw_desc_sz = ath12k_hal_reo_qdesc_size(ba_win_sz, tid);
 992	else
 993		hw_desc_sz = ath12k_hal_reo_qdesc_size(DP_BA_WIN_SZ_MAX, tid);
 994
 995	vaddr = kzalloc(hw_desc_sz + HAL_LINK_DESC_ALIGN - 1, GFP_ATOMIC);
 996	if (!vaddr) {
 997		spin_unlock_bh(&ab->base_lock);
 998		return -ENOMEM;
 999	}
1000
1001	addr_aligned = PTR_ALIGN(vaddr, HAL_LINK_DESC_ALIGN);
1002
1003	ath12k_hal_reo_qdesc_setup(addr_aligned, tid, ba_win_sz,
1004				   ssn, pn_type);
1005
1006	paddr = dma_map_single(ab->dev, addr_aligned, hw_desc_sz,
1007			       DMA_BIDIRECTIONAL);
1008
1009	ret = dma_mapping_error(ab->dev, paddr);
1010	if (ret) {
1011		spin_unlock_bh(&ab->base_lock);
1012		goto err_mem_free;
1013	}
1014
1015	rx_tid->vaddr = vaddr;
1016	rx_tid->paddr = paddr;
1017	rx_tid->size = hw_desc_sz;
1018	rx_tid->active = true;
1019
1020	if (ab->hw_params->reoq_lut_support) {
1021		/* Update the REO queue LUT at the corresponding peer id
1022		 * and tid with qaddr.
1023		 */
1024		ath12k_peer_rx_tid_qref_setup(ab, peer->peer_id, tid, paddr);
1025		spin_unlock_bh(&ab->base_lock);
1026	} else {
1027		spin_unlock_bh(&ab->base_lock);
1028		ret = ath12k_wmi_peer_rx_reorder_queue_setup(ar, vdev_id, peer_mac,
1029							     paddr, tid, 1, ba_win_sz);
1030	}
1031
1032	return ret;
1033
1034err_mem_free:
1035	kfree(vaddr);
1036
1037	return ret;
1038}
1039
1040int ath12k_dp_rx_ampdu_start(struct ath12k *ar,
1041			     struct ieee80211_ampdu_params *params)
1042{
1043	struct ath12k_base *ab = ar->ab;
1044	struct ath12k_sta *ahsta = ath12k_sta_to_ahsta(params->sta);
1045	struct ath12k_link_sta *arsta = &ahsta->deflink;
1046	int vdev_id = arsta->arvif->vdev_id;
1047	int ret;
1048
1049	ret = ath12k_dp_rx_peer_tid_setup(ar, params->sta->addr, vdev_id,
1050					  params->tid, params->buf_size,
1051					  params->ssn, arsta->ahsta->pn_type);
1052	if (ret)
1053		ath12k_warn(ab, "failed to setup rx tid %d\n", ret);
1054
1055	return ret;
1056}
1057
1058int ath12k_dp_rx_ampdu_stop(struct ath12k *ar,
1059			    struct ieee80211_ampdu_params *params)
1060{
1061	struct ath12k_base *ab = ar->ab;
1062	struct ath12k_peer *peer;
1063	struct ath12k_sta *ahsta = ath12k_sta_to_ahsta(params->sta);
1064	struct ath12k_link_sta *arsta = &ahsta->deflink;
1065	int vdev_id = arsta->arvif->vdev_id;
1066	bool active;
1067	int ret;
1068
1069	spin_lock_bh(&ab->base_lock);
1070
1071	peer = ath12k_peer_find(ab, vdev_id, params->sta->addr);
1072	if (!peer) {
1073		spin_unlock_bh(&ab->base_lock);
1074		ath12k_warn(ab, "failed to find the peer to stop rx aggregation\n");
1075		return -ENOENT;
1076	}
1077
1078	active = peer->rx_tid[params->tid].active;
1079
1080	if (!active) {
1081		spin_unlock_bh(&ab->base_lock);
1082		return 0;
1083	}
1084
1085	ret = ath12k_peer_rx_tid_reo_update(ar, peer, peer->rx_tid, 1, 0, false);
1086	spin_unlock_bh(&ab->base_lock);
1087	if (ret) {
1088		ath12k_warn(ab, "failed to update reo for rx tid %d: %d\n",
1089			    params->tid, ret);
1090		return ret;
1091	}
1092
1093	return ret;
1094}
1095
1096int ath12k_dp_rx_peer_pn_replay_config(struct ath12k_link_vif *arvif,
1097				       const u8 *peer_addr,
1098				       enum set_key_cmd key_cmd,
1099				       struct ieee80211_key_conf *key)
1100{
1101	struct ath12k *ar = arvif->ar;
1102	struct ath12k_base *ab = ar->ab;
1103	struct ath12k_hal_reo_cmd cmd = {0};
1104	struct ath12k_peer *peer;
1105	struct ath12k_dp_rx_tid *rx_tid;
1106	u8 tid;
1107	int ret = 0;
1108
1109	/* NOTE: Enable PN/TSC replay check offload only for unicast frames.
1110	 * We use mac80211 PN/TSC replay check functionality for bcast/mcast
1111	 * for now.
1112	 */
1113	if (!(key->flags & IEEE80211_KEY_FLAG_PAIRWISE))
1114		return 0;
1115
1116	cmd.flag = HAL_REO_CMD_FLG_NEED_STATUS;
1117	cmd.upd0 = HAL_REO_CMD_UPD0_PN |
1118		    HAL_REO_CMD_UPD0_PN_SIZE |
1119		    HAL_REO_CMD_UPD0_PN_VALID |
1120		    HAL_REO_CMD_UPD0_PN_CHECK |
1121		    HAL_REO_CMD_UPD0_SVLD;
1122
1123	switch (key->cipher) {
1124	case WLAN_CIPHER_SUITE_TKIP:
1125	case WLAN_CIPHER_SUITE_CCMP:
1126	case WLAN_CIPHER_SUITE_CCMP_256:
1127	case WLAN_CIPHER_SUITE_GCMP:
1128	case WLAN_CIPHER_SUITE_GCMP_256:
1129		if (key_cmd == SET_KEY) {
1130			cmd.upd1 |= HAL_REO_CMD_UPD1_PN_CHECK;
1131			cmd.pn_size = 48;
1132		}
1133		break;
1134	default:
1135		break;
1136	}
1137
1138	spin_lock_bh(&ab->base_lock);
1139
1140	peer = ath12k_peer_find(ab, arvif->vdev_id, peer_addr);
1141	if (!peer) {
1142		spin_unlock_bh(&ab->base_lock);
1143		ath12k_warn(ab, "failed to find the peer %pM to configure pn replay detection\n",
1144			    peer_addr);
1145		return -ENOENT;
1146	}
1147
1148	for (tid = 0; tid <= IEEE80211_NUM_TIDS; tid++) {
1149		rx_tid = &peer->rx_tid[tid];
1150		if (!rx_tid->active)
1151			continue;
1152		cmd.addr_lo = lower_32_bits(rx_tid->paddr);
1153		cmd.addr_hi = upper_32_bits(rx_tid->paddr);
1154		ret = ath12k_dp_reo_cmd_send(ab, rx_tid,
1155					     HAL_REO_CMD_UPDATE_RX_QUEUE,
1156					     &cmd, NULL);
1157		if (ret) {
1158			ath12k_warn(ab, "failed to configure rx tid %d queue of peer %pM for pn replay detection %d\n",
1159				    tid, peer_addr, ret);
1160			break;
1161		}
1162	}
1163
1164	spin_unlock_bh(&ab->base_lock);
1165
1166	return ret;
1167}
1168
1169static int ath12k_get_ppdu_user_index(struct htt_ppdu_stats *ppdu_stats,
1170				      u16 peer_id)
1171{
1172	int i;
1173
1174	for (i = 0; i < HTT_PPDU_STATS_MAX_USERS - 1; i++) {
1175		if (ppdu_stats->user_stats[i].is_valid_peer_id) {
1176			if (peer_id == ppdu_stats->user_stats[i].peer_id)
1177				return i;
1178		} else {
1179			return i;
1180		}
1181	}
1182
1183	return -EINVAL;
1184}
1185
1186static int ath12k_htt_tlv_ppdu_stats_parse(struct ath12k_base *ab,
1187					   u16 tag, u16 len, const void *ptr,
1188					   void *data)
1189{
1190	const struct htt_ppdu_stats_usr_cmpltn_ack_ba_status *ba_status;
1191	const struct htt_ppdu_stats_usr_cmpltn_cmn *cmplt_cmn;
1192	const struct htt_ppdu_stats_user_rate *user_rate;
1193	struct htt_ppdu_stats_info *ppdu_info;
1194	struct htt_ppdu_user_stats *user_stats;
1195	int cur_user;
1196	u16 peer_id;
1197
1198	ppdu_info = data;
1199
1200	switch (tag) {
1201	case HTT_PPDU_STATS_TAG_COMMON:
1202		if (len < sizeof(struct htt_ppdu_stats_common)) {
1203			ath12k_warn(ab, "Invalid len %d for the tag 0x%x\n",
1204				    len, tag);
1205			return -EINVAL;
1206		}
1207		memcpy(&ppdu_info->ppdu_stats.common, ptr,
1208		       sizeof(struct htt_ppdu_stats_common));
1209		break;
1210	case HTT_PPDU_STATS_TAG_USR_RATE:
1211		if (len < sizeof(struct htt_ppdu_stats_user_rate)) {
1212			ath12k_warn(ab, "Invalid len %d for the tag 0x%x\n",
1213				    len, tag);
1214			return -EINVAL;
1215		}
1216		user_rate = ptr;
1217		peer_id = le16_to_cpu(user_rate->sw_peer_id);
1218		cur_user = ath12k_get_ppdu_user_index(&ppdu_info->ppdu_stats,
1219						      peer_id);
1220		if (cur_user < 0)
1221			return -EINVAL;
1222		user_stats = &ppdu_info->ppdu_stats.user_stats[cur_user];
1223		user_stats->peer_id = peer_id;
1224		user_stats->is_valid_peer_id = true;
1225		memcpy(&user_stats->rate, ptr,
1226		       sizeof(struct htt_ppdu_stats_user_rate));
1227		user_stats->tlv_flags |= BIT(tag);
1228		break;
1229	case HTT_PPDU_STATS_TAG_USR_COMPLTN_COMMON:
1230		if (len < sizeof(struct htt_ppdu_stats_usr_cmpltn_cmn)) {
1231			ath12k_warn(ab, "Invalid len %d for the tag 0x%x\n",
1232				    len, tag);
1233			return -EINVAL;
1234		}
1235
1236		cmplt_cmn = ptr;
1237		peer_id = le16_to_cpu(cmplt_cmn->sw_peer_id);
1238		cur_user = ath12k_get_ppdu_user_index(&ppdu_info->ppdu_stats,
1239						      peer_id);
1240		if (cur_user < 0)
1241			return -EINVAL;
1242		user_stats = &ppdu_info->ppdu_stats.user_stats[cur_user];
1243		user_stats->peer_id = peer_id;
1244		user_stats->is_valid_peer_id = true;
1245		memcpy(&user_stats->cmpltn_cmn, ptr,
1246		       sizeof(struct htt_ppdu_stats_usr_cmpltn_cmn));
1247		user_stats->tlv_flags |= BIT(tag);
1248		break;
1249	case HTT_PPDU_STATS_TAG_USR_COMPLTN_ACK_BA_STATUS:
1250		if (len <
1251		    sizeof(struct htt_ppdu_stats_usr_cmpltn_ack_ba_status)) {
1252			ath12k_warn(ab, "Invalid len %d for the tag 0x%x\n",
1253				    len, tag);
1254			return -EINVAL;
1255		}
1256
1257		ba_status = ptr;
1258		peer_id = le16_to_cpu(ba_status->sw_peer_id);
1259		cur_user = ath12k_get_ppdu_user_index(&ppdu_info->ppdu_stats,
1260						      peer_id);
1261		if (cur_user < 0)
1262			return -EINVAL;
1263		user_stats = &ppdu_info->ppdu_stats.user_stats[cur_user];
1264		user_stats->peer_id = peer_id;
1265		user_stats->is_valid_peer_id = true;
1266		memcpy(&user_stats->ack_ba, ptr,
1267		       sizeof(struct htt_ppdu_stats_usr_cmpltn_ack_ba_status));
1268		user_stats->tlv_flags |= BIT(tag);
1269		break;
1270	}
1271	return 0;
1272}
1273
1274int ath12k_dp_htt_tlv_iter(struct ath12k_base *ab, const void *ptr, size_t len,
1275			   int (*iter)(struct ath12k_base *ar, u16 tag, u16 len,
1276				       const void *ptr, void *data),
1277			   void *data)
1278{
1279	const struct htt_tlv *tlv;
1280	const void *begin = ptr;
1281	u16 tlv_tag, tlv_len;
1282	int ret = -EINVAL;
1283
1284	while (len > 0) {
1285		if (len < sizeof(*tlv)) {
1286			ath12k_err(ab, "htt tlv parse failure at byte %zd (%zu bytes left, %zu expected)\n",
1287				   ptr - begin, len, sizeof(*tlv));
1288			return -EINVAL;
1289		}
1290		tlv = (struct htt_tlv *)ptr;
1291		tlv_tag = le32_get_bits(tlv->header, HTT_TLV_TAG);
1292		tlv_len = le32_get_bits(tlv->header, HTT_TLV_LEN);
1293		ptr += sizeof(*tlv);
1294		len -= sizeof(*tlv);
1295
1296		if (tlv_len > len) {
1297			ath12k_err(ab, "htt tlv parse failure of tag %u at byte %zd (%zu bytes left, %u expected)\n",
1298				   tlv_tag, ptr - begin, len, tlv_len);
1299			return -EINVAL;
1300		}
1301		ret = iter(ab, tlv_tag, tlv_len, ptr, data);
1302		if (ret == -ENOMEM)
1303			return ret;
1304
1305		ptr += tlv_len;
1306		len -= tlv_len;
1307	}
1308	return 0;
1309}
1310
1311static void
1312ath12k_update_per_peer_tx_stats(struct ath12k *ar,
1313				struct htt_ppdu_stats *ppdu_stats, u8 user)
1314{
1315	struct ath12k_base *ab = ar->ab;
1316	struct ath12k_peer *peer;
1317	struct ieee80211_sta *sta;
1318	struct ath12k_sta *ahsta;
1319	struct ath12k_link_sta *arsta;
1320	struct htt_ppdu_stats_user_rate *user_rate;
1321	struct ath12k_per_peer_tx_stats *peer_stats = &ar->peer_tx_stats;
1322	struct htt_ppdu_user_stats *usr_stats = &ppdu_stats->user_stats[user];
1323	struct htt_ppdu_stats_common *common = &ppdu_stats->common;
1324	int ret;
1325	u8 flags, mcs, nss, bw, sgi, dcm, rate_idx = 0;
1326	u32 v, succ_bytes = 0;
1327	u16 tones, rate = 0, succ_pkts = 0;
1328	u32 tx_duration = 0;
1329	u8 tid = HTT_PPDU_STATS_NON_QOS_TID;
1330	bool is_ampdu = false;
1331
1332	if (!(usr_stats->tlv_flags & BIT(HTT_PPDU_STATS_TAG_USR_RATE)))
1333		return;
1334
1335	if (usr_stats->tlv_flags & BIT(HTT_PPDU_STATS_TAG_USR_COMPLTN_COMMON))
1336		is_ampdu =
1337			HTT_USR_CMPLTN_IS_AMPDU(usr_stats->cmpltn_cmn.flags);
1338
1339	if (usr_stats->tlv_flags &
1340	    BIT(HTT_PPDU_STATS_TAG_USR_COMPLTN_ACK_BA_STATUS)) {
1341		succ_bytes = le32_to_cpu(usr_stats->ack_ba.success_bytes);
1342		succ_pkts = le32_get_bits(usr_stats->ack_ba.info,
1343					  HTT_PPDU_STATS_ACK_BA_INFO_NUM_MSDU_M);
1344		tid = le32_get_bits(usr_stats->ack_ba.info,
1345				    HTT_PPDU_STATS_ACK_BA_INFO_TID_NUM);
1346	}
1347
1348	if (common->fes_duration_us)
1349		tx_duration = le32_to_cpu(common->fes_duration_us);
1350
1351	user_rate = &usr_stats->rate;
1352	flags = HTT_USR_RATE_PREAMBLE(user_rate->rate_flags);
1353	bw = HTT_USR_RATE_BW(user_rate->rate_flags) - 2;
1354	nss = HTT_USR_RATE_NSS(user_rate->rate_flags) + 1;
1355	mcs = HTT_USR_RATE_MCS(user_rate->rate_flags);
1356	sgi = HTT_USR_RATE_GI(user_rate->rate_flags);
1357	dcm = HTT_USR_RATE_DCM(user_rate->rate_flags);
1358
1359	/* Note: If host configured fixed rates and in some other special
1360	 * cases, the broadcast/management frames are sent in different rates.
1361	 * Firmware rate's control to be skipped for this?
1362	 */
1363
1364	if (flags == WMI_RATE_PREAMBLE_HE && mcs > ATH12K_HE_MCS_MAX) {
1365		ath12k_warn(ab, "Invalid HE mcs %d peer stats",  mcs);
1366		return;
1367	}
1368
1369	if (flags == WMI_RATE_PREAMBLE_VHT && mcs > ATH12K_VHT_MCS_MAX) {
1370		ath12k_warn(ab, "Invalid VHT mcs %d peer stats",  mcs);
1371		return;
1372	}
1373
1374	if (flags == WMI_RATE_PREAMBLE_HT && (mcs > ATH12K_HT_MCS_MAX || nss < 1)) {
1375		ath12k_warn(ab, "Invalid HT mcs %d nss %d peer stats",
1376			    mcs, nss);
1377		return;
1378	}
1379
1380	if (flags == WMI_RATE_PREAMBLE_CCK || flags == WMI_RATE_PREAMBLE_OFDM) {
1381		ret = ath12k_mac_hw_ratecode_to_legacy_rate(mcs,
1382							    flags,
1383							    &rate_idx,
1384							    &rate);
1385		if (ret < 0)
1386			return;
1387	}
1388
1389	rcu_read_lock();
1390	spin_lock_bh(&ab->base_lock);
1391	peer = ath12k_peer_find_by_id(ab, usr_stats->peer_id);
1392
1393	if (!peer || !peer->sta) {
1394		spin_unlock_bh(&ab->base_lock);
1395		rcu_read_unlock();
1396		return;
1397	}
1398
1399	sta = peer->sta;
1400	ahsta = ath12k_sta_to_ahsta(sta);
1401	arsta = &ahsta->deflink;
1402
1403	memset(&arsta->txrate, 0, sizeof(arsta->txrate));
1404
1405	switch (flags) {
1406	case WMI_RATE_PREAMBLE_OFDM:
1407		arsta->txrate.legacy = rate;
1408		break;
1409	case WMI_RATE_PREAMBLE_CCK:
1410		arsta->txrate.legacy = rate;
1411		break;
1412	case WMI_RATE_PREAMBLE_HT:
1413		arsta->txrate.mcs = mcs + 8 * (nss - 1);
1414		arsta->txrate.flags = RATE_INFO_FLAGS_MCS;
1415		if (sgi)
1416			arsta->txrate.flags |= RATE_INFO_FLAGS_SHORT_GI;
1417		break;
1418	case WMI_RATE_PREAMBLE_VHT:
1419		arsta->txrate.mcs = mcs;
1420		arsta->txrate.flags = RATE_INFO_FLAGS_VHT_MCS;
1421		if (sgi)
1422			arsta->txrate.flags |= RATE_INFO_FLAGS_SHORT_GI;
1423		break;
1424	case WMI_RATE_PREAMBLE_HE:
1425		arsta->txrate.mcs = mcs;
1426		arsta->txrate.flags = RATE_INFO_FLAGS_HE_MCS;
1427		arsta->txrate.he_dcm = dcm;
1428		arsta->txrate.he_gi = ath12k_he_gi_to_nl80211_he_gi(sgi);
1429		tones = le16_to_cpu(user_rate->ru_end) -
1430			le16_to_cpu(user_rate->ru_start) + 1;
1431		v = ath12k_he_ru_tones_to_nl80211_he_ru_alloc(tones);
1432		arsta->txrate.he_ru_alloc = v;
1433		break;
1434	}
1435
1436	arsta->txrate.nss = nss;
1437	arsta->txrate.bw = ath12k_mac_bw_to_mac80211_bw(bw);
1438	arsta->tx_duration += tx_duration;
1439	memcpy(&arsta->last_txrate, &arsta->txrate, sizeof(struct rate_info));
1440
1441	/* PPDU stats reported for mgmt packet doesn't have valid tx bytes.
1442	 * So skip peer stats update for mgmt packets.
1443	 */
1444	if (tid < HTT_PPDU_STATS_NON_QOS_TID) {
1445		memset(peer_stats, 0, sizeof(*peer_stats));
1446		peer_stats->succ_pkts = succ_pkts;
1447		peer_stats->succ_bytes = succ_bytes;
1448		peer_stats->is_ampdu = is_ampdu;
1449		peer_stats->duration = tx_duration;
1450		peer_stats->ba_fails =
1451			HTT_USR_CMPLTN_LONG_RETRY(usr_stats->cmpltn_cmn.flags) +
1452			HTT_USR_CMPLTN_SHORT_RETRY(usr_stats->cmpltn_cmn.flags);
1453	}
1454
1455	spin_unlock_bh(&ab->base_lock);
1456	rcu_read_unlock();
1457}
1458
1459static void ath12k_htt_update_ppdu_stats(struct ath12k *ar,
1460					 struct htt_ppdu_stats *ppdu_stats)
1461{
1462	u8 user;
1463
1464	for (user = 0; user < HTT_PPDU_STATS_MAX_USERS - 1; user++)
1465		ath12k_update_per_peer_tx_stats(ar, ppdu_stats, user);
1466}
1467
1468static
1469struct htt_ppdu_stats_info *ath12k_dp_htt_get_ppdu_desc(struct ath12k *ar,
1470							u32 ppdu_id)
1471{
1472	struct htt_ppdu_stats_info *ppdu_info;
1473
1474	lockdep_assert_held(&ar->data_lock);
1475	if (!list_empty(&ar->ppdu_stats_info)) {
1476		list_for_each_entry(ppdu_info, &ar->ppdu_stats_info, list) {
1477			if (ppdu_info->ppdu_id == ppdu_id)
1478				return ppdu_info;
1479		}
1480
1481		if (ar->ppdu_stat_list_depth > HTT_PPDU_DESC_MAX_DEPTH) {
1482			ppdu_info = list_first_entry(&ar->ppdu_stats_info,
1483						     typeof(*ppdu_info), list);
1484			list_del(&ppdu_info->list);
1485			ar->ppdu_stat_list_depth--;
1486			ath12k_htt_update_ppdu_stats(ar, &ppdu_info->ppdu_stats);
1487			kfree(ppdu_info);
1488		}
1489	}
1490
1491	ppdu_info = kzalloc(sizeof(*ppdu_info), GFP_ATOMIC);
1492	if (!ppdu_info)
1493		return NULL;
1494
1495	list_add_tail(&ppdu_info->list, &ar->ppdu_stats_info);
1496	ar->ppdu_stat_list_depth++;
1497
1498	return ppdu_info;
1499}
1500
1501static void ath12k_copy_to_delay_stats(struct ath12k_peer *peer,
1502				       struct htt_ppdu_user_stats *usr_stats)
1503{
1504	peer->ppdu_stats_delayba.sw_peer_id = le16_to_cpu(usr_stats->rate.sw_peer_id);
1505	peer->ppdu_stats_delayba.info0 = le32_to_cpu(usr_stats->rate.info0);
1506	peer->ppdu_stats_delayba.ru_end = le16_to_cpu(usr_stats->rate.ru_end);
1507	peer->ppdu_stats_delayba.ru_start = le16_to_cpu(usr_stats->rate.ru_start);
1508	peer->ppdu_stats_delayba.info1 = le32_to_cpu(usr_stats->rate.info1);
1509	peer->ppdu_stats_delayba.rate_flags = le32_to_cpu(usr_stats->rate.rate_flags);
1510	peer->ppdu_stats_delayba.resp_rate_flags =
1511		le32_to_cpu(usr_stats->rate.resp_rate_flags);
1512
1513	peer->delayba_flag = true;
1514}
1515
1516static void ath12k_copy_to_bar(struct ath12k_peer *peer,
1517			       struct htt_ppdu_user_stats *usr_stats)
1518{
1519	usr_stats->rate.sw_peer_id = cpu_to_le16(peer->ppdu_stats_delayba.sw_peer_id);
1520	usr_stats->rate.info0 = cpu_to_le32(peer->ppdu_stats_delayba.info0);
1521	usr_stats->rate.ru_end = cpu_to_le16(peer->ppdu_stats_delayba.ru_end);
1522	usr_stats->rate.ru_start = cpu_to_le16(peer->ppdu_stats_delayba.ru_start);
1523	usr_stats->rate.info1 = cpu_to_le32(peer->ppdu_stats_delayba.info1);
1524	usr_stats->rate.rate_flags = cpu_to_le32(peer->ppdu_stats_delayba.rate_flags);
1525	usr_stats->rate.resp_rate_flags =
1526		cpu_to_le32(peer->ppdu_stats_delayba.resp_rate_flags);
1527
1528	peer->delayba_flag = false;
1529}
1530
1531static int ath12k_htt_pull_ppdu_stats(struct ath12k_base *ab,
1532				      struct sk_buff *skb)
1533{
1534	struct ath12k_htt_ppdu_stats_msg *msg;
1535	struct htt_ppdu_stats_info *ppdu_info;
1536	struct ath12k_peer *peer = NULL;
1537	struct htt_ppdu_user_stats *usr_stats = NULL;
1538	u32 peer_id = 0;
1539	struct ath12k *ar;
1540	int ret, i;
1541	u8 pdev_id;
1542	u32 ppdu_id, len;
1543
1544	msg = (struct ath12k_htt_ppdu_stats_msg *)skb->data;
1545	len = le32_get_bits(msg->info, HTT_T2H_PPDU_STATS_INFO_PAYLOAD_SIZE);
1546	if (len > (skb->len - struct_size(msg, data, 0))) {
1547		ath12k_warn(ab,
1548			    "HTT PPDU STATS event has unexpected payload size %u, should be smaller than %u\n",
1549			    len, skb->len);
1550		return -EINVAL;
1551	}
1552
1553	pdev_id = le32_get_bits(msg->info, HTT_T2H_PPDU_STATS_INFO_PDEV_ID);
1554	ppdu_id = le32_to_cpu(msg->ppdu_id);
1555
1556	rcu_read_lock();
1557	ar = ath12k_mac_get_ar_by_pdev_id(ab, pdev_id);
1558	if (!ar) {
1559		ret = -EINVAL;
1560		goto exit;
1561	}
1562
1563	spin_lock_bh(&ar->data_lock);
1564	ppdu_info = ath12k_dp_htt_get_ppdu_desc(ar, ppdu_id);
1565	if (!ppdu_info) {
1566		spin_unlock_bh(&ar->data_lock);
1567		ret = -EINVAL;
1568		goto exit;
1569	}
1570
1571	ppdu_info->ppdu_id = ppdu_id;
1572	ret = ath12k_dp_htt_tlv_iter(ab, msg->data, len,
1573				     ath12k_htt_tlv_ppdu_stats_parse,
1574				     (void *)ppdu_info);
1575	if (ret) {
1576		spin_unlock_bh(&ar->data_lock);
1577		ath12k_warn(ab, "Failed to parse tlv %d\n", ret);
1578		goto exit;
1579	}
1580
1581	if (ppdu_info->ppdu_stats.common.num_users >= HTT_PPDU_STATS_MAX_USERS) {
1582		spin_unlock_bh(&ar->data_lock);
1583		ath12k_warn(ab,
1584			    "HTT PPDU STATS event has unexpected num_users %u, should be smaller than %u\n",
1585			    ppdu_info->ppdu_stats.common.num_users,
1586			    HTT_PPDU_STATS_MAX_USERS);
1587		ret = -EINVAL;
1588		goto exit;
1589	}
1590
1591	/* back up data rate tlv for all peers */
1592	if (ppdu_info->frame_type == HTT_STATS_PPDU_FTYPE_DATA &&
1593	    (ppdu_info->tlv_bitmap & (1 << HTT_PPDU_STATS_TAG_USR_COMMON)) &&
1594	    ppdu_info->delay_ba) {
1595		for (i = 0; i < ppdu_info->ppdu_stats.common.num_users; i++) {
1596			peer_id = ppdu_info->ppdu_stats.user_stats[i].peer_id;
1597			spin_lock_bh(&ab->base_lock);
1598			peer = ath12k_peer_find_by_id(ab, peer_id);
1599			if (!peer) {
1600				spin_unlock_bh(&ab->base_lock);
1601				continue;
1602			}
1603
1604			usr_stats = &ppdu_info->ppdu_stats.user_stats[i];
1605			if (usr_stats->delay_ba)
1606				ath12k_copy_to_delay_stats(peer, usr_stats);
1607			spin_unlock_bh(&ab->base_lock);
1608		}
1609	}
1610
1611	/* restore all peers' data rate tlv to mu-bar tlv */
1612	if (ppdu_info->frame_type == HTT_STATS_PPDU_FTYPE_BAR &&
1613	    (ppdu_info->tlv_bitmap & (1 << HTT_PPDU_STATS_TAG_USR_COMMON))) {
1614		for (i = 0; i < ppdu_info->bar_num_users; i++) {
1615			peer_id = ppdu_info->ppdu_stats.user_stats[i].peer_id;
1616			spin_lock_bh(&ab->base_lock);
1617			peer = ath12k_peer_find_by_id(ab, peer_id);
1618			if (!peer) {
1619				spin_unlock_bh(&ab->base_lock);
1620				continue;
1621			}
1622
1623			usr_stats = &ppdu_info->ppdu_stats.user_stats[i];
1624			if (peer->delayba_flag)
1625				ath12k_copy_to_bar(peer, usr_stats);
1626			spin_unlock_bh(&ab->base_lock);
1627		}
1628	}
1629
1630	spin_unlock_bh(&ar->data_lock);
1631
1632exit:
1633	rcu_read_unlock();
1634
1635	return ret;
1636}
1637
1638static void ath12k_htt_mlo_offset_event_handler(struct ath12k_base *ab,
1639						struct sk_buff *skb)
1640{
1641	struct ath12k_htt_mlo_offset_msg *msg;
1642	struct ath12k_pdev *pdev;
1643	struct ath12k *ar;
1644	u8 pdev_id;
1645
1646	msg = (struct ath12k_htt_mlo_offset_msg *)skb->data;
1647	pdev_id = u32_get_bits(__le32_to_cpu(msg->info),
1648			       HTT_T2H_MLO_OFFSET_INFO_PDEV_ID);
1649
1650	rcu_read_lock();
1651	ar = ath12k_mac_get_ar_by_pdev_id(ab, pdev_id);
1652	if (!ar) {
1653		ath12k_warn(ab, "invalid pdev id %d on htt mlo offset\n", pdev_id);
1654		goto exit;
1655	}
1656
1657	spin_lock_bh(&ar->data_lock);
1658	pdev = ar->pdev;
1659
1660	pdev->timestamp.info = __le32_to_cpu(msg->info);
1661	pdev->timestamp.sync_timestamp_lo_us = __le32_to_cpu(msg->sync_timestamp_lo_us);
1662	pdev->timestamp.sync_timestamp_hi_us = __le32_to_cpu(msg->sync_timestamp_hi_us);
1663	pdev->timestamp.mlo_offset_lo = __le32_to_cpu(msg->mlo_offset_lo);
1664	pdev->timestamp.mlo_offset_hi = __le32_to_cpu(msg->mlo_offset_hi);
1665	pdev->timestamp.mlo_offset_clks = __le32_to_cpu(msg->mlo_offset_clks);
1666	pdev->timestamp.mlo_comp_clks = __le32_to_cpu(msg->mlo_comp_clks);
1667	pdev->timestamp.mlo_comp_timer = __le32_to_cpu(msg->mlo_comp_timer);
1668
1669	spin_unlock_bh(&ar->data_lock);
1670exit:
1671	rcu_read_unlock();
1672}
1673
1674void ath12k_dp_htt_htc_t2h_msg_handler(struct ath12k_base *ab,
1675				       struct sk_buff *skb)
1676{
1677	struct ath12k_dp *dp = &ab->dp;
1678	struct htt_resp_msg *resp = (struct htt_resp_msg *)skb->data;
1679	enum htt_t2h_msg_type type;
1680	u16 peer_id;
1681	u8 vdev_id;
1682	u8 mac_addr[ETH_ALEN];
1683	u16 peer_mac_h16;
1684	u16 ast_hash = 0;
1685	u16 hw_peer_id;
1686
1687	type = le32_get_bits(resp->version_msg.version, HTT_T2H_MSG_TYPE);
1688
1689	ath12k_dbg(ab, ATH12K_DBG_DP_HTT, "dp_htt rx msg type :0x%0x\n", type);
1690
1691	switch (type) {
1692	case HTT_T2H_MSG_TYPE_VERSION_CONF:
1693		dp->htt_tgt_ver_major = le32_get_bits(resp->version_msg.version,
1694						      HTT_T2H_VERSION_CONF_MAJOR);
1695		dp->htt_tgt_ver_minor = le32_get_bits(resp->version_msg.version,
1696						      HTT_T2H_VERSION_CONF_MINOR);
1697		complete(&dp->htt_tgt_version_received);
1698		break;
1699	/* TODO: remove unused peer map versions after testing */
1700	case HTT_T2H_MSG_TYPE_PEER_MAP:
1701		vdev_id = le32_get_bits(resp->peer_map_ev.info,
1702					HTT_T2H_PEER_MAP_INFO_VDEV_ID);
1703		peer_id = le32_get_bits(resp->peer_map_ev.info,
1704					HTT_T2H_PEER_MAP_INFO_PEER_ID);
1705		peer_mac_h16 = le32_get_bits(resp->peer_map_ev.info1,
1706					     HTT_T2H_PEER_MAP_INFO1_MAC_ADDR_H16);
1707		ath12k_dp_get_mac_addr(le32_to_cpu(resp->peer_map_ev.mac_addr_l32),
1708				       peer_mac_h16, mac_addr);
1709		ath12k_peer_map_event(ab, vdev_id, peer_id, mac_addr, 0, 0);
1710		break;
1711	case HTT_T2H_MSG_TYPE_PEER_MAP2:
1712		vdev_id = le32_get_bits(resp->peer_map_ev.info,
1713					HTT_T2H_PEER_MAP_INFO_VDEV_ID);
1714		peer_id = le32_get_bits(resp->peer_map_ev.info,
1715					HTT_T2H_PEER_MAP_INFO_PEER_ID);
1716		peer_mac_h16 = le32_get_bits(resp->peer_map_ev.info1,
1717					     HTT_T2H_PEER_MAP_INFO1_MAC_ADDR_H16);
1718		ath12k_dp_get_mac_addr(le32_to_cpu(resp->peer_map_ev.mac_addr_l32),
1719				       peer_mac_h16, mac_addr);
1720		ast_hash = le32_get_bits(resp->peer_map_ev.info2,
1721					 HTT_T2H_PEER_MAP_INFO2_AST_HASH_VAL);
1722		hw_peer_id = le32_get_bits(resp->peer_map_ev.info1,
1723					   HTT_T2H_PEER_MAP_INFO1_HW_PEER_ID);
1724		ath12k_peer_map_event(ab, vdev_id, peer_id, mac_addr, ast_hash,
1725				      hw_peer_id);
1726		break;
1727	case HTT_T2H_MSG_TYPE_PEER_MAP3:
1728		vdev_id = le32_get_bits(resp->peer_map_ev.info,
1729					HTT_T2H_PEER_MAP_INFO_VDEV_ID);
1730		peer_id = le32_get_bits(resp->peer_map_ev.info,
1731					HTT_T2H_PEER_MAP_INFO_PEER_ID);
1732		peer_mac_h16 = le32_get_bits(resp->peer_map_ev.info1,
1733					     HTT_T2H_PEER_MAP_INFO1_MAC_ADDR_H16);
1734		ath12k_dp_get_mac_addr(le32_to_cpu(resp->peer_map_ev.mac_addr_l32),
1735				       peer_mac_h16, mac_addr);
1736		ath12k_peer_map_event(ab, vdev_id, peer_id, mac_addr, ast_hash,
1737				      peer_id);
1738		break;
1739	case HTT_T2H_MSG_TYPE_PEER_UNMAP:
1740	case HTT_T2H_MSG_TYPE_PEER_UNMAP2:
1741		peer_id = le32_get_bits(resp->peer_unmap_ev.info,
1742					HTT_T2H_PEER_UNMAP_INFO_PEER_ID);
1743		ath12k_peer_unmap_event(ab, peer_id);
1744		break;
1745	case HTT_T2H_MSG_TYPE_PPDU_STATS_IND:
1746		ath12k_htt_pull_ppdu_stats(ab, skb);
1747		break;
1748	case HTT_T2H_MSG_TYPE_EXT_STATS_CONF:
1749		ath12k_debugfs_htt_ext_stats_handler(ab, skb);
1750		break;
1751	case HTT_T2H_MSG_TYPE_MLO_TIMESTAMP_OFFSET_IND:
1752		ath12k_htt_mlo_offset_event_handler(ab, skb);
1753		break;
1754	default:
1755		ath12k_dbg(ab, ATH12K_DBG_DP_HTT, "dp_htt event %d not handled\n",
1756			   type);
1757		break;
1758	}
1759
1760	dev_kfree_skb_any(skb);
1761}
1762
1763static int ath12k_dp_rx_msdu_coalesce(struct ath12k *ar,
1764				      struct sk_buff_head *msdu_list,
1765				      struct sk_buff *first, struct sk_buff *last,
1766				      u8 l3pad_bytes, int msdu_len)
1767{
1768	struct ath12k_base *ab = ar->ab;
1769	struct sk_buff *skb;
1770	struct ath12k_skb_rxcb *rxcb = ATH12K_SKB_RXCB(first);
1771	int buf_first_hdr_len, buf_first_len;
1772	struct hal_rx_desc *ldesc;
1773	int space_extra, rem_len, buf_len;
1774	u32 hal_rx_desc_sz = ar->ab->hal.hal_desc_sz;
1775
1776	/* As the msdu is spread across multiple rx buffers,
1777	 * find the offset to the start of msdu for computing
1778	 * the length of the msdu in the first buffer.
1779	 */
1780	buf_first_hdr_len = hal_rx_desc_sz + l3pad_bytes;
1781	buf_first_len = DP_RX_BUFFER_SIZE - buf_first_hdr_len;
1782
1783	if (WARN_ON_ONCE(msdu_len <= buf_first_len)) {
1784		skb_put(first, buf_first_hdr_len + msdu_len);
1785		skb_pull(first, buf_first_hdr_len);
1786		return 0;
1787	}
1788
1789	ldesc = (struct hal_rx_desc *)last->data;
1790	rxcb->is_first_msdu = ath12k_dp_rx_h_first_msdu(ab, ldesc);
1791	rxcb->is_last_msdu = ath12k_dp_rx_h_last_msdu(ab, ldesc);
1792
1793	/* MSDU spans over multiple buffers because the length of the MSDU
1794	 * exceeds DP_RX_BUFFER_SIZE - HAL_RX_DESC_SIZE. So assume the data
1795	 * in the first buf is of length DP_RX_BUFFER_SIZE - HAL_RX_DESC_SIZE.
1796	 */
1797	skb_put(first, DP_RX_BUFFER_SIZE);
1798	skb_pull(first, buf_first_hdr_len);
1799
1800	/* When an MSDU spread over multiple buffers MSDU_END
1801	 * tlvs are valid only in the last buffer. Copy those tlvs.
1802	 */
1803	ath12k_dp_rx_desc_end_tlv_copy(ab, rxcb->rx_desc, ldesc);
1804
1805	space_extra = msdu_len - (buf_first_len + skb_tailroom(first));
1806	if (space_extra > 0 &&
1807	    (pskb_expand_head(first, 0, space_extra, GFP_ATOMIC) < 0)) {
1808		/* Free up all buffers of the MSDU */
1809		while ((skb = __skb_dequeue(msdu_list)) != NULL) {
1810			rxcb = ATH12K_SKB_RXCB(skb);
1811			if (!rxcb->is_continuation) {
1812				dev_kfree_skb_any(skb);
1813				break;
1814			}
1815			dev_kfree_skb_any(skb);
1816		}
1817		return -ENOMEM;
1818	}
1819
1820	rem_len = msdu_len - buf_first_len;
1821	while ((skb = __skb_dequeue(msdu_list)) != NULL && rem_len > 0) {
1822		rxcb = ATH12K_SKB_RXCB(skb);
1823		if (rxcb->is_continuation)
1824			buf_len = DP_RX_BUFFER_SIZE - hal_rx_desc_sz;
1825		else
1826			buf_len = rem_len;
1827
1828		if (buf_len > (DP_RX_BUFFER_SIZE - hal_rx_desc_sz)) {
1829			WARN_ON_ONCE(1);
1830			dev_kfree_skb_any(skb);
1831			return -EINVAL;
1832		}
1833
1834		skb_put(skb, buf_len + hal_rx_desc_sz);
1835		skb_pull(skb, hal_rx_desc_sz);
1836		skb_copy_from_linear_data(skb, skb_put(first, buf_len),
1837					  buf_len);
1838		dev_kfree_skb_any(skb);
1839
1840		rem_len -= buf_len;
1841		if (!rxcb->is_continuation)
1842			break;
1843	}
1844
1845	return 0;
1846}
1847
1848static struct sk_buff *ath12k_dp_rx_get_msdu_last_buf(struct sk_buff_head *msdu_list,
1849						      struct sk_buff *first)
1850{
1851	struct sk_buff *skb;
1852	struct ath12k_skb_rxcb *rxcb = ATH12K_SKB_RXCB(first);
1853
1854	if (!rxcb->is_continuation)
1855		return first;
1856
1857	skb_queue_walk(msdu_list, skb) {
1858		rxcb = ATH12K_SKB_RXCB(skb);
1859		if (!rxcb->is_continuation)
1860			return skb;
1861	}
1862
1863	return NULL;
1864}
1865
1866static void ath12k_dp_rx_h_csum_offload(struct ath12k *ar, struct sk_buff *msdu)
1867{
1868	struct ath12k_skb_rxcb *rxcb = ATH12K_SKB_RXCB(msdu);
1869	struct ath12k_base *ab = ar->ab;
1870	bool ip_csum_fail, l4_csum_fail;
1871
1872	ip_csum_fail = ath12k_dp_rx_h_ip_cksum_fail(ab, rxcb->rx_desc);
1873	l4_csum_fail = ath12k_dp_rx_h_l4_cksum_fail(ab, rxcb->rx_desc);
1874
1875	msdu->ip_summed = (ip_csum_fail || l4_csum_fail) ?
1876			  CHECKSUM_NONE : CHECKSUM_UNNECESSARY;
1877}
1878
1879static int ath12k_dp_rx_crypto_mic_len(struct ath12k *ar,
1880				       enum hal_encrypt_type enctype)
1881{
1882	switch (enctype) {
1883	case HAL_ENCRYPT_TYPE_OPEN:
1884	case HAL_ENCRYPT_TYPE_TKIP_NO_MIC:
1885	case HAL_ENCRYPT_TYPE_TKIP_MIC:
1886		return 0;
1887	case HAL_ENCRYPT_TYPE_CCMP_128:
1888		return IEEE80211_CCMP_MIC_LEN;
1889	case HAL_ENCRYPT_TYPE_CCMP_256:
1890		return IEEE80211_CCMP_256_MIC_LEN;
1891	case HAL_ENCRYPT_TYPE_GCMP_128:
1892	case HAL_ENCRYPT_TYPE_AES_GCMP_256:
1893		return IEEE80211_GCMP_MIC_LEN;
1894	case HAL_ENCRYPT_TYPE_WEP_40:
1895	case HAL_ENCRYPT_TYPE_WEP_104:
1896	case HAL_ENCRYPT_TYPE_WEP_128:
1897	case HAL_ENCRYPT_TYPE_WAPI_GCM_SM4:
1898	case HAL_ENCRYPT_TYPE_WAPI:
1899		break;
1900	}
1901
1902	ath12k_warn(ar->ab, "unsupported encryption type %d for mic len\n", enctype);
1903	return 0;
1904}
1905
1906static int ath12k_dp_rx_crypto_param_len(struct ath12k *ar,
1907					 enum hal_encrypt_type enctype)
1908{
1909	switch (enctype) {
1910	case HAL_ENCRYPT_TYPE_OPEN:
1911		return 0;
1912	case HAL_ENCRYPT_TYPE_TKIP_NO_MIC:
1913	case HAL_ENCRYPT_TYPE_TKIP_MIC:
1914		return IEEE80211_TKIP_IV_LEN;
1915	case HAL_ENCRYPT_TYPE_CCMP_128:
1916		return IEEE80211_CCMP_HDR_LEN;
1917	case HAL_ENCRYPT_TYPE_CCMP_256:
1918		return IEEE80211_CCMP_256_HDR_LEN;
1919	case HAL_ENCRYPT_TYPE_GCMP_128:
1920	case HAL_ENCRYPT_TYPE_AES_GCMP_256:
1921		return IEEE80211_GCMP_HDR_LEN;
1922	case HAL_ENCRYPT_TYPE_WEP_40:
1923	case HAL_ENCRYPT_TYPE_WEP_104:
1924	case HAL_ENCRYPT_TYPE_WEP_128:
1925	case HAL_ENCRYPT_TYPE_WAPI_GCM_SM4:
1926	case HAL_ENCRYPT_TYPE_WAPI:
1927		break;
1928	}
1929
1930	ath12k_warn(ar->ab, "unsupported encryption type %d\n", enctype);
1931	return 0;
1932}
1933
1934static int ath12k_dp_rx_crypto_icv_len(struct ath12k *ar,
1935				       enum hal_encrypt_type enctype)
1936{
1937	switch (enctype) {
1938	case HAL_ENCRYPT_TYPE_OPEN:
1939	case HAL_ENCRYPT_TYPE_CCMP_128:
1940	case HAL_ENCRYPT_TYPE_CCMP_256:
1941	case HAL_ENCRYPT_TYPE_GCMP_128:
1942	case HAL_ENCRYPT_TYPE_AES_GCMP_256:
1943		return 0;
1944	case HAL_ENCRYPT_TYPE_TKIP_NO_MIC:
1945	case HAL_ENCRYPT_TYPE_TKIP_MIC:
1946		return IEEE80211_TKIP_ICV_LEN;
1947	case HAL_ENCRYPT_TYPE_WEP_40:
1948	case HAL_ENCRYPT_TYPE_WEP_104:
1949	case HAL_ENCRYPT_TYPE_WEP_128:
1950	case HAL_ENCRYPT_TYPE_WAPI_GCM_SM4:
1951	case HAL_ENCRYPT_TYPE_WAPI:
1952		break;
1953	}
1954
1955	ath12k_warn(ar->ab, "unsupported encryption type %d\n", enctype);
1956	return 0;
1957}
1958
1959static void ath12k_dp_rx_h_undecap_nwifi(struct ath12k *ar,
1960					 struct sk_buff *msdu,
1961					 enum hal_encrypt_type enctype,
1962					 struct ieee80211_rx_status *status)
1963{
1964	struct ath12k_base *ab = ar->ab;
1965	struct ath12k_skb_rxcb *rxcb = ATH12K_SKB_RXCB(msdu);
1966	u8 decap_hdr[DP_MAX_NWIFI_HDR_LEN];
1967	struct ieee80211_hdr *hdr;
1968	size_t hdr_len;
1969	u8 *crypto_hdr;
1970	u16 qos_ctl;
1971
1972	/* pull decapped header */
1973	hdr = (struct ieee80211_hdr *)msdu->data;
1974	hdr_len = ieee80211_hdrlen(hdr->frame_control);
1975	skb_pull(msdu, hdr_len);
1976
1977	/*  Rebuild qos header */
1978	hdr->frame_control |= __cpu_to_le16(IEEE80211_STYPE_QOS_DATA);
1979
1980	/* Reset the order bit as the HT_Control header is stripped */
1981	hdr->frame_control &= ~(__cpu_to_le16(IEEE80211_FCTL_ORDER));
1982
1983	qos_ctl = rxcb->tid;
1984
1985	if (ath12k_dp_rx_h_mesh_ctl_present(ab, rxcb->rx_desc))
1986		qos_ctl |= IEEE80211_QOS_CTL_MESH_CONTROL_PRESENT;
1987
1988	/* TODO: Add other QoS ctl fields when required */
1989
1990	/* copy decap header before overwriting for reuse below */
1991	memcpy(decap_hdr, hdr, hdr_len);
1992
1993	/* Rebuild crypto header for mac80211 use */
1994	if (!(status->flag & RX_FLAG_IV_STRIPPED)) {
1995		crypto_hdr = skb_push(msdu, ath12k_dp_rx_crypto_param_len(ar, enctype));
1996		ath12k_dp_rx_desc_get_crypto_header(ar->ab,
1997						    rxcb->rx_desc, crypto_hdr,
1998						    enctype);
1999	}
2000
2001	memcpy(skb_push(msdu,
2002			IEEE80211_QOS_CTL_LEN), &qos_ctl,
2003			IEEE80211_QOS_CTL_LEN);
2004	memcpy(skb_push(msdu, hdr_len), decap_hdr, hdr_len);
2005}
2006
2007static void ath12k_dp_rx_h_undecap_raw(struct ath12k *ar, struct sk_buff *msdu,
2008				       enum hal_encrypt_type enctype,
2009				       struct ieee80211_rx_status *status,
2010				       bool decrypted)
2011{
2012	struct ath12k_skb_rxcb *rxcb = ATH12K_SKB_RXCB(msdu);
2013	struct ieee80211_hdr *hdr;
2014	size_t hdr_len;
2015	size_t crypto_len;
2016
2017	if (!rxcb->is_first_msdu ||
2018	    !(rxcb->is_first_msdu && rxcb->is_last_msdu)) {
2019		WARN_ON_ONCE(1);
2020		return;
2021	}
2022
2023	skb_trim(msdu, msdu->len - FCS_LEN);
2024
2025	if (!decrypted)
2026		return;
2027
2028	hdr = (void *)msdu->data;
2029
2030	/* Tail */
2031	if (status->flag & RX_FLAG_IV_STRIPPED) {
2032		skb_trim(msdu, msdu->len -
2033			 ath12k_dp_rx_crypto_mic_len(ar, enctype));
2034
2035		skb_trim(msdu, msdu->len -
2036			 ath12k_dp_rx_crypto_icv_len(ar, enctype));
2037	} else {
2038		/* MIC */
2039		if (status->flag & RX_FLAG_MIC_STRIPPED)
2040			skb_trim(msdu, msdu->len -
2041				 ath12k_dp_rx_crypto_mic_len(ar, enctype));
2042
2043		/* ICV */
2044		if (status->flag & RX_FLAG_ICV_STRIPPED)
2045			skb_trim(msdu, msdu->len -
2046				 ath12k_dp_rx_crypto_icv_len(ar, enctype));
2047	}
2048
2049	/* MMIC */
2050	if ((status->flag & RX_FLAG_MMIC_STRIPPED) &&
2051	    !ieee80211_has_morefrags(hdr->frame_control) &&
2052	    enctype == HAL_ENCRYPT_TYPE_TKIP_MIC)
2053		skb_trim(msdu, msdu->len - IEEE80211_CCMP_MIC_LEN);
2054
2055	/* Head */
2056	if (status->flag & RX_FLAG_IV_STRIPPED) {
2057		hdr_len = ieee80211_hdrlen(hdr->frame_control);
2058		crypto_len = ath12k_dp_rx_crypto_param_len(ar, enctype);
2059
2060		memmove(msdu->data + crypto_len, msdu->data, hdr_len);
2061		skb_pull(msdu, crypto_len);
2062	}
2063}
2064
2065static void ath12k_get_dot11_hdr_from_rx_desc(struct ath12k *ar,
2066					      struct sk_buff *msdu,
2067					      struct ath12k_skb_rxcb *rxcb,
2068					      struct ieee80211_rx_status *status,
2069					      enum hal_encrypt_type enctype)
2070{
2071	struct hal_rx_desc *rx_desc = rxcb->rx_desc;
2072	struct ath12k_base *ab = ar->ab;
2073	size_t hdr_len, crypto_len;
2074	struct ieee80211_hdr *hdr;
2075	u16 qos_ctl;
2076	__le16 fc;
2077	u8 *crypto_hdr;
2078
2079	if (!(status->flag & RX_FLAG_IV_STRIPPED)) {
2080		crypto_len = ath12k_dp_rx_crypto_param_len(ar, enctype);
2081		crypto_hdr = skb_push(msdu, crypto_len);
2082		ath12k_dp_rx_desc_get_crypto_header(ab, rx_desc, crypto_hdr, enctype);
2083	}
2084
2085	fc = cpu_to_le16(ath12k_dp_rxdesc_get_mpdu_frame_ctrl(ab, rx_desc));
2086	hdr_len = ieee80211_hdrlen(fc);
2087	skb_push(msdu, hdr_len);
2088	hdr = (struct ieee80211_hdr *)msdu->data;
2089	hdr->frame_control = fc;
2090
2091	/* Get wifi header from rx_desc */
2092	ath12k_dp_rx_desc_get_dot11_hdr(ab, rx_desc, hdr);
2093
2094	if (rxcb->is_mcbc)
2095		status->flag &= ~RX_FLAG_PN_VALIDATED;
2096
2097	/* Add QOS header */
2098	if (ieee80211_is_data_qos(hdr->frame_control)) {
2099		qos_ctl = rxcb->tid;
2100		if (ath12k_dp_rx_h_mesh_ctl_present(ab, rx_desc))
2101			qos_ctl |= IEEE80211_QOS_CTL_MESH_CONTROL_PRESENT;
2102
2103		/* TODO: Add other QoS ctl fields when required */
2104		memcpy(msdu->data + (hdr_len - IEEE80211_QOS_CTL_LEN),
2105		       &qos_ctl, IEEE80211_QOS_CTL_LEN);
2106	}
2107}
2108
2109static void ath12k_dp_rx_h_undecap_eth(struct ath12k *ar,
2110				       struct sk_buff *msdu,
2111				       enum hal_encrypt_type enctype,
2112				       struct ieee80211_rx_status *status)
2113{
2114	struct ieee80211_hdr *hdr;
2115	struct ethhdr *eth;
2116	u8 da[ETH_ALEN];
2117	u8 sa[ETH_ALEN];
2118	struct ath12k_skb_rxcb *rxcb = ATH12K_SKB_RXCB(msdu);
2119	struct ath12k_dp_rx_rfc1042_hdr rfc = {0xaa, 0xaa, 0x03, {0x00, 0x00, 0x00}};
2120
2121	eth = (struct ethhdr *)msdu->data;
2122	ether_addr_copy(da, eth->h_dest);
2123	ether_addr_copy(sa, eth->h_source);
2124	rfc.snap_type = eth->h_proto;
2125	skb_pull(msdu, sizeof(*eth));
2126	memcpy(skb_push(msdu, sizeof(rfc)), &rfc,
2127	       sizeof(rfc));
2128	ath12k_get_dot11_hdr_from_rx_desc(ar, msdu, rxcb, status, enctype);
2129
2130	/* original 802.11 header has a different DA and in
2131	 * case of 4addr it may also have different SA
2132	 */
2133	hdr = (struct ieee80211_hdr *)msdu->data;
2134	ether_addr_copy(ieee80211_get_DA(hdr), da);
2135	ether_addr_copy(ieee80211_get_SA(hdr), sa);
2136}
2137
2138static void ath12k_dp_rx_h_undecap(struct ath12k *ar, struct sk_buff *msdu,
2139				   struct hal_rx_desc *rx_desc,
2140				   enum hal_encrypt_type enctype,
2141				   struct ieee80211_rx_status *status,
2142				   bool decrypted)
2143{
2144	struct ath12k_base *ab = ar->ab;
2145	u8 decap;
2146	struct ethhdr *ehdr;
2147
2148	decap = ath12k_dp_rx_h_decap_type(ab, rx_desc);
2149
2150	switch (decap) {
2151	case DP_RX_DECAP_TYPE_NATIVE_WIFI:
2152		ath12k_dp_rx_h_undecap_nwifi(ar, msdu, enctype, status);
2153		break;
2154	case DP_RX_DECAP_TYPE_RAW:
2155		ath12k_dp_rx_h_undecap_raw(ar, msdu, enctype, status,
2156					   decrypted);
2157		break;
2158	case DP_RX_DECAP_TYPE_ETHERNET2_DIX:
2159		ehdr = (struct ethhdr *)msdu->data;
2160
2161		/* mac80211 allows fast path only for authorized STA */
2162		if (ehdr->h_proto == cpu_to_be16(ETH_P_PAE)) {
2163			ATH12K_SKB_RXCB(msdu)->is_eapol = true;
2164			ath12k_dp_rx_h_undecap_eth(ar, msdu, enctype, status);
2165			break;
2166		}
2167
2168		/* PN for mcast packets will be validated in mac80211;
2169		 * remove eth header and add 802.11 header.
2170		 */
2171		if (ATH12K_SKB_RXCB(msdu)->is_mcbc && decrypted)
2172			ath12k_dp_rx_h_undecap_eth(ar, msdu, enctype, status);
2173		break;
2174	case DP_RX_DECAP_TYPE_8023:
2175		/* TODO: Handle undecap for these formats */
2176		break;
2177	}
2178}
2179
2180struct ath12k_peer *
2181ath12k_dp_rx_h_find_peer(struct ath12k_base *ab, struct sk_buff *msdu)
2182{
2183	struct ath12k_skb_rxcb *rxcb = ATH12K_SKB_RXCB(msdu);
2184	struct hal_rx_desc *rx_desc = rxcb->rx_desc;
2185	struct ath12k_peer *peer = NULL;
2186
2187	lockdep_assert_held(&ab->base_lock);
2188
2189	if (rxcb->peer_id)
2190		peer = ath12k_peer_find_by_id(ab, rxcb->peer_id);
2191
2192	if (peer)
2193		return peer;
2194
2195	if (!rx_desc || !(ath12k_dp_rxdesc_mac_addr2_valid(ab, rx_desc)))
2196		return NULL;
2197
2198	peer = ath12k_peer_find_by_addr(ab,
2199					ath12k_dp_rxdesc_get_mpdu_start_addr2(ab,
2200									      rx_desc));
2201	return peer;
2202}
2203
2204static void ath12k_dp_rx_h_mpdu(struct ath12k *ar,
2205				struct sk_buff *msdu,
2206				struct hal_rx_desc *rx_desc,
2207				struct ieee80211_rx_status *rx_status)
2208{
2209	bool  fill_crypto_hdr;
2210	struct ath12k_base *ab = ar->ab;
2211	struct ath12k_skb_rxcb *rxcb;
2212	enum hal_encrypt_type enctype;
2213	bool is_decrypted = false;
2214	struct ieee80211_hdr *hdr;
2215	struct ath12k_peer *peer;
2216	u32 err_bitmap;
2217
2218	/* PN for multicast packets will be checked in mac80211 */
2219	rxcb = ATH12K_SKB_RXCB(msdu);
2220	fill_crypto_hdr = ath12k_dp_rx_h_is_da_mcbc(ar->ab, rx_desc);
2221	rxcb->is_mcbc = fill_crypto_hdr;
2222
2223	if (rxcb->is_mcbc)
2224		rxcb->peer_id = ath12k_dp_rx_h_peer_id(ar->ab, rx_desc);
2225
2226	spin_lock_bh(&ar->ab->base_lock);
2227	peer = ath12k_dp_rx_h_find_peer(ar->ab, msdu);
2228	if (peer) {
2229		if (rxcb->is_mcbc)
2230			enctype = peer->sec_type_grp;
2231		else
2232			enctype = peer->sec_type;
2233	} else {
2234		enctype = HAL_ENCRYPT_TYPE_OPEN;
2235	}
2236	spin_unlock_bh(&ar->ab->base_lock);
2237
2238	err_bitmap = ath12k_dp_rx_h_mpdu_err(ab, rx_desc);
2239	if (enctype != HAL_ENCRYPT_TYPE_OPEN && !err_bitmap)
2240		is_decrypted = ath12k_dp_rx_h_is_decrypted(ab, rx_desc);
2241
2242	/* Clear per-MPDU flags while leaving per-PPDU flags intact */
2243	rx_status->flag &= ~(RX_FLAG_FAILED_FCS_CRC |
2244			     RX_FLAG_MMIC_ERROR |
2245			     RX_FLAG_DECRYPTED |
2246			     RX_FLAG_IV_STRIPPED |
2247			     RX_FLAG_MMIC_STRIPPED);
2248
2249	if (err_bitmap & HAL_RX_MPDU_ERR_FCS)
2250		rx_status->flag |= RX_FLAG_FAILED_FCS_CRC;
2251	if (err_bitmap & HAL_RX_MPDU_ERR_TKIP_MIC)
2252		rx_status->flag |= RX_FLAG_MMIC_ERROR;
2253
2254	if (is_decrypted) {
2255		rx_status->flag |= RX_FLAG_DECRYPTED | RX_FLAG_MMIC_STRIPPED;
2256
2257		if (fill_crypto_hdr)
2258			rx_status->flag |= RX_FLAG_MIC_STRIPPED |
2259					RX_FLAG_ICV_STRIPPED;
2260		else
2261			rx_status->flag |= RX_FLAG_IV_STRIPPED |
2262					   RX_FLAG_PN_VALIDATED;
2263	}
2264
2265	ath12k_dp_rx_h_csum_offload(ar, msdu);
2266	ath12k_dp_rx_h_undecap(ar, msdu, rx_desc,
2267			       enctype, rx_status, is_decrypted);
2268
2269	if (!is_decrypted || fill_crypto_hdr)
2270		return;
2271
2272	if (ath12k_dp_rx_h_decap_type(ar->ab, rx_desc) !=
2273	    DP_RX_DECAP_TYPE_ETHERNET2_DIX) {
2274		hdr = (void *)msdu->data;
2275		hdr->frame_control &= ~__cpu_to_le16(IEEE80211_FCTL_PROTECTED);
2276	}
2277}
2278
2279static void ath12k_dp_rx_h_rate(struct ath12k *ar, struct hal_rx_desc *rx_desc,
2280				struct ieee80211_rx_status *rx_status)
2281{
2282	struct ath12k_base *ab = ar->ab;
2283	struct ieee80211_supported_band *sband;
2284	enum rx_msdu_start_pkt_type pkt_type;
2285	u8 bw;
2286	u8 rate_mcs, nss;
2287	u8 sgi;
2288	bool is_cck;
2289
2290	pkt_type = ath12k_dp_rx_h_pkt_type(ab, rx_desc);
2291	bw = ath12k_dp_rx_h_rx_bw(ab, rx_desc);
2292	rate_mcs = ath12k_dp_rx_h_rate_mcs(ab, rx_desc);
2293	nss = ath12k_dp_rx_h_nss(ab, rx_desc);
2294	sgi = ath12k_dp_rx_h_sgi(ab, rx_desc);
2295
2296	switch (pkt_type) {
2297	case RX_MSDU_START_PKT_TYPE_11A:
2298	case RX_MSDU_START_PKT_TYPE_11B:
2299		is_cck = (pkt_type == RX_MSDU_START_PKT_TYPE_11B);
2300		sband = &ar->mac.sbands[rx_status->band];
2301		rx_status->rate_idx = ath12k_mac_hw_rate_to_idx(sband, rate_mcs,
2302								is_cck);
2303		break;
2304	case RX_MSDU_START_PKT_TYPE_11N:
2305		rx_status->encoding = RX_ENC_HT;
2306		if (rate_mcs > ATH12K_HT_MCS_MAX) {
2307			ath12k_warn(ar->ab,
2308				    "Received with invalid mcs in HT mode %d\n",
2309				     rate_mcs);
2310			break;
2311		}
2312		rx_status->rate_idx = rate_mcs + (8 * (nss - 1));
2313		if (sgi)
2314			rx_status->enc_flags |= RX_ENC_FLAG_SHORT_GI;
2315		rx_status->bw = ath12k_mac_bw_to_mac80211_bw(bw);
2316		break;
2317	case RX_MSDU_START_PKT_TYPE_11AC:
2318		rx_status->encoding = RX_ENC_VHT;
2319		rx_status->rate_idx = rate_mcs;
2320		if (rate_mcs > ATH12K_VHT_MCS_MAX) {
2321			ath12k_warn(ar->ab,
2322				    "Received with invalid mcs in VHT mode %d\n",
2323				     rate_mcs);
2324			break;
2325		}
2326		rx_status->nss = nss;
2327		if (sgi)
2328			rx_status->enc_flags |= RX_ENC_FLAG_SHORT_GI;
2329		rx_status->bw = ath12k_mac_bw_to_mac80211_bw(bw);
2330		break;
2331	case RX_MSDU_START_PKT_TYPE_11AX:
2332		rx_status->rate_idx = rate_mcs;
2333		if (rate_mcs > ATH12K_HE_MCS_MAX) {
2334			ath12k_warn(ar->ab,
2335				    "Received with invalid mcs in HE mode %d\n",
2336				    rate_mcs);
2337			break;
2338		}
2339		rx_status->encoding = RX_ENC_HE;
2340		rx_status->nss = nss;
2341		rx_status->he_gi = ath12k_he_gi_to_nl80211_he_gi(sgi);
2342		rx_status->bw = ath12k_mac_bw_to_mac80211_bw(bw);
2343		break;
2344	}
2345}
2346
2347void ath12k_dp_rx_h_ppdu(struct ath12k *ar, struct hal_rx_desc *rx_desc,
2348			 struct ieee80211_rx_status *rx_status)
2349{
2350	struct ath12k_base *ab = ar->ab;
2351	u8 channel_num;
2352	u32 center_freq, meta_data;
2353	struct ieee80211_channel *channel;
2354
2355	rx_status->freq = 0;
2356	rx_status->rate_idx = 0;
2357	rx_status->nss = 0;
2358	rx_status->encoding = RX_ENC_LEGACY;
2359	rx_status->bw = RATE_INFO_BW_20;
2360	rx_status->enc_flags = 0;
2361
2362	rx_status->flag |= RX_FLAG_NO_SIGNAL_VAL;
2363
2364	meta_data = ath12k_dp_rx_h_freq(ab, rx_desc);
2365	channel_num = meta_data;
2366	center_freq = meta_data >> 16;
2367
2368	if (center_freq >= ATH12K_MIN_6G_FREQ &&
2369	    center_freq <= ATH12K_MAX_6G_FREQ) {
2370		rx_status->band = NL80211_BAND_6GHZ;
2371		rx_status->freq = center_freq;
2372	} else if (channel_num >= 1 && channel_num <= 14) {
2373		rx_status->band = NL80211_BAND_2GHZ;
2374	} else if (channel_num >= 36 && channel_num <= 173) {
2375		rx_status->band = NL80211_BAND_5GHZ;
2376	} else {
2377		spin_lock_bh(&ar->data_lock);
2378		channel = ar->rx_channel;
2379		if (channel) {
2380			rx_status->band = channel->band;
2381			channel_num =
2382				ieee80211_frequency_to_channel(channel->center_freq);
2383		}
2384		spin_unlock_bh(&ar->data_lock);
2385		ath12k_dbg_dump(ar->ab, ATH12K_DBG_DATA, NULL, "rx_desc: ",
2386				rx_desc, sizeof(*rx_desc));
2387	}
2388
2389	if (rx_status->band != NL80211_BAND_6GHZ)
2390		rx_status->freq = ieee80211_channel_to_frequency(channel_num,
2391								 rx_status->band);
2392
2393	ath12k_dp_rx_h_rate(ar, rx_desc, rx_status);
2394}
2395
2396static void ath12k_dp_rx_deliver_msdu(struct ath12k *ar, struct napi_struct *napi,
2397				      struct sk_buff *msdu,
2398				      struct ieee80211_rx_status *status)
2399{
2400	struct ath12k_base *ab = ar->ab;
2401	static const struct ieee80211_radiotap_he known = {
2402		.data1 = cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA1_DATA_MCS_KNOWN |
2403				     IEEE80211_RADIOTAP_HE_DATA1_BW_RU_ALLOC_KNOWN),
2404		.data2 = cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA2_GI_KNOWN),
2405	};
2406	struct ieee80211_radiotap_he *he;
2407	struct ieee80211_rx_status *rx_status;
2408	struct ieee80211_sta *pubsta;
2409	struct ath12k_peer *peer;
2410	struct ath12k_skb_rxcb *rxcb = ATH12K_SKB_RXCB(msdu);
2411	u8 decap = DP_RX_DECAP_TYPE_RAW;
2412	bool is_mcbc = rxcb->is_mcbc;
2413	bool is_eapol = rxcb->is_eapol;
2414
2415	if (status->encoding == RX_ENC_HE && !(status->flag & RX_FLAG_RADIOTAP_HE) &&
2416	    !(status->flag & RX_FLAG_SKIP_MONITOR)) {
2417		he = skb_push(msdu, sizeof(known));
2418		memcpy(he, &known, sizeof(known));
2419		status->flag |= RX_FLAG_RADIOTAP_HE;
2420	}
2421
2422	if (!(status->flag & RX_FLAG_ONLY_MONITOR))
2423		decap = ath12k_dp_rx_h_decap_type(ab, rxcb->rx_desc);
2424
2425	spin_lock_bh(&ab->base_lock);
2426	peer = ath12k_dp_rx_h_find_peer(ab, msdu);
2427
2428	pubsta = peer ? peer->sta : NULL;
2429
2430	spin_unlock_bh(&ab->base_lock);
2431
2432	ath12k_dbg(ab, ATH12K_DBG_DATA,
2433		   "rx skb %p len %u peer %pM %d %s sn %u %s%s%s%s%s%s%s%s%s rate_idx %u vht_nss %u freq %u band %u flag 0x%x fcs-err %i mic-err %i amsdu-more %i\n",
2434		   msdu,
2435		   msdu->len,
2436		   peer ? peer->addr : NULL,
2437		   rxcb->tid,
2438		   is_mcbc ? "mcast" : "ucast",
2439		   ath12k_dp_rx_h_seq_no(ab, rxcb->rx_desc),
2440		   (status->encoding == RX_ENC_LEGACY) ? "legacy" : "",
2441		   (status->encoding == RX_ENC_HT) ? "ht" : "",
2442		   (status->encoding == RX_ENC_VHT) ? "vht" : "",
2443		   (status->encoding == RX_ENC_HE) ? "he" : "",
2444		   (status->bw == RATE_INFO_BW_40) ? "40" : "",
2445		   (status->bw == RATE_INFO_BW_80) ? "80" : "",
2446		   (status->bw == RATE_INFO_BW_160) ? "160" : "",
2447		   (status->bw == RATE_INFO_BW_320) ? "320" : "",
2448		   status->enc_flags & RX_ENC_FLAG_SHORT_GI ? "sgi " : "",
2449		   status->rate_idx,
2450		   status->nss,
2451		   status->freq,
2452		   status->band, status->flag,
2453		   !!(status->flag & RX_FLAG_FAILED_FCS_CRC),
2454		   !!(status->flag & RX_FLAG_MMIC_ERROR),
2455		   !!(status->flag & RX_FLAG_AMSDU_MORE));
2456
2457	ath12k_dbg_dump(ab, ATH12K_DBG_DP_RX, NULL, "dp rx msdu: ",
2458			msdu->data, msdu->len);
2459
2460	rx_status = IEEE80211_SKB_RXCB(msdu);
2461	*rx_status = *status;
2462
2463	/* TODO: trace rx packet */
2464
2465	/* PN for multicast packets are not validate in HW,
2466	 * so skip 802.3 rx path
2467	 * Also, fast_rx expects the STA to be authorized, hence
2468	 * eapol packets are sent in slow path.
2469	 */
2470	if (decap == DP_RX_DECAP_TYPE_ETHERNET2_DIX && !is_eapol &&
2471	    !(is_mcbc && rx_status->flag & RX_FLAG_DECRYPTED))
2472		rx_status->flag |= RX_FLAG_8023;
2473
2474	ieee80211_rx_napi(ath12k_ar_to_hw(ar), pubsta, msdu, napi);
2475}
2476
2477static int ath12k_dp_rx_process_msdu(struct ath12k *ar,
2478				     struct sk_buff *msdu,
2479				     struct sk_buff_head *msdu_list,
2480				     struct ieee80211_rx_status *rx_status)
2481{
2482	struct ath12k_base *ab = ar->ab;
2483	struct hal_rx_desc *rx_desc, *lrx_desc;
2484	struct ath12k_skb_rxcb *rxcb;
2485	struct sk_buff *last_buf;
2486	u8 l3_pad_bytes;
2487	u16 msdu_len;
2488	int ret;
2489	u32 hal_rx_desc_sz = ar->ab->hal.hal_desc_sz;
2490
2491	last_buf = ath12k_dp_rx_get_msdu_last_buf(msdu_list, msdu);
2492	if (!last_buf) {
2493		ath12k_warn(ab,
2494			    "No valid Rx buffer to access MSDU_END tlv\n");
2495		ret = -EIO;
2496		goto free_out;
2497	}
2498
2499	rx_desc = (struct hal_rx_desc *)msdu->data;
2500	lrx_desc = (struct hal_rx_desc *)last_buf->data;
2501	if (!ath12k_dp_rx_h_msdu_done(ab, lrx_desc)) {
2502		ath12k_warn(ab, "msdu_done bit in msdu_end is not set\n");
2503		ret = -EIO;
2504		goto free_out;
2505	}
2506
2507	rxcb = ATH12K_SKB_RXCB(msdu);
2508	rxcb->rx_desc = rx_desc;
2509	msdu_len = ath12k_dp_rx_h_msdu_len(ab, lrx_desc);
2510	l3_pad_bytes = ath12k_dp_rx_h_l3pad(ab, lrx_desc);
2511
2512	if (rxcb->is_frag) {
2513		skb_pull(msdu, hal_rx_desc_sz);
2514	} else if (!rxcb->is_continuation) {
2515		if ((msdu_len + hal_rx_desc_sz) > DP_RX_BUFFER_SIZE) {
2516			ret = -EINVAL;
2517			ath12k_warn(ab, "invalid msdu len %u\n", msdu_len);
2518			ath12k_dbg_dump(ab, ATH12K_DBG_DATA, NULL, "", rx_desc,
2519					sizeof(*rx_desc));
2520			goto free_out;
2521		}
2522		skb_put(msdu, hal_rx_desc_sz + l3_pad_bytes + msdu_len);
2523		skb_pull(msdu, hal_rx_desc_sz + l3_pad_bytes);
2524	} else {
2525		ret = ath12k_dp_rx_msdu_coalesce(ar, msdu_list,
2526						 msdu, last_buf,
2527						 l3_pad_bytes, msdu_len);
2528		if (ret) {
2529			ath12k_warn(ab,
2530				    "failed to coalesce msdu rx buffer%d\n", ret);
2531			goto free_out;
2532		}
2533	}
2534
2535	ath12k_dp_rx_h_ppdu(ar, rx_desc, rx_status);
2536	ath12k_dp_rx_h_mpdu(ar, msdu, rx_desc, rx_status);
2537
2538	rx_status->flag |= RX_FLAG_SKIP_MONITOR | RX_FLAG_DUP_VALIDATED;
2539
2540	return 0;
2541
2542free_out:
2543	return ret;
2544}
2545
2546static void ath12k_dp_rx_process_received_packets(struct ath12k_base *ab,
2547						  struct napi_struct *napi,
2548						  struct sk_buff_head *msdu_list,
2549						  int ring_id)
2550{
2551	struct ieee80211_rx_status rx_status = {0};
2552	struct ath12k_skb_rxcb *rxcb;
2553	struct sk_buff *msdu;
2554	struct ath12k *ar;
2555	u8 mac_id, pdev_id;
2556	int ret;
2557
2558	if (skb_queue_empty(msdu_list))
2559		return;
2560
2561	rcu_read_lock();
2562
2563	while ((msdu = __skb_dequeue(msdu_list))) {
2564		rxcb = ATH12K_SKB_RXCB(msdu);
2565		mac_id = rxcb->mac_id;
2566		pdev_id = ath12k_hw_mac_id_to_pdev_id(ab->hw_params, mac_id);
2567		ar = ab->pdevs[pdev_id].ar;
2568		if (!rcu_dereference(ab->pdevs_active[pdev_id])) {
2569			dev_kfree_skb_any(msdu);
2570			continue;
2571		}
2572
2573		if (test_bit(ATH12K_CAC_RUNNING, &ar->dev_flags)) {
2574			dev_kfree_skb_any(msdu);
2575			continue;
2576		}
2577
2578		ret = ath12k_dp_rx_process_msdu(ar, msdu, msdu_list, &rx_status);
2579		if (ret) {
2580			ath12k_dbg(ab, ATH12K_DBG_DATA,
2581				   "Unable to process msdu %d", ret);
2582			dev_kfree_skb_any(msdu);
2583			continue;
2584		}
2585
2586		ath12k_dp_rx_deliver_msdu(ar, napi, msdu, &rx_status);
2587	}
2588
2589	rcu_read_unlock();
2590}
2591
2592static u16 ath12k_dp_rx_get_peer_id(struct ath12k_base *ab,
2593				    enum ath12k_peer_metadata_version ver,
2594				    __le32 peer_metadata)
2595{
2596	switch (ver) {
2597	default:
2598		ath12k_warn(ab, "Unknown peer metadata version: %d", ver);
2599		fallthrough;
2600	case ATH12K_PEER_METADATA_V0:
2601		return le32_get_bits(peer_metadata,
2602				     RX_MPDU_DESC_META_DATA_V0_PEER_ID);
2603	case ATH12K_PEER_METADATA_V1:
2604		return le32_get_bits(peer_metadata,
2605				     RX_MPDU_DESC_META_DATA_V1_PEER_ID);
2606	case ATH12K_PEER_METADATA_V1A:
2607		return le32_get_bits(peer_metadata,
2608				     RX_MPDU_DESC_META_DATA_V1A_PEER_ID);
2609	case ATH12K_PEER_METADATA_V1B:
2610		return le32_get_bits(peer_metadata,
2611				     RX_MPDU_DESC_META_DATA_V1B_PEER_ID);
2612	}
2613}
2614
2615int ath12k_dp_rx_process(struct ath12k_base *ab, int ring_id,
2616			 struct napi_struct *napi, int budget)
2617{
2618	LIST_HEAD(rx_desc_used_list);
2619	struct ath12k_rx_desc_info *desc_info;
2620	struct ath12k_dp *dp = &ab->dp;
2621	struct dp_rxdma_ring *rx_ring = &dp->rx_refill_buf_ring;
2622	struct hal_reo_dest_ring *desc;
2623	int num_buffs_reaped = 0;
2624	struct sk_buff_head msdu_list;
2625	struct ath12k_skb_rxcb *rxcb;
2626	int total_msdu_reaped = 0;
2627	struct hal_srng *srng;
2628	struct sk_buff *msdu;
2629	bool done = false;
2630	int mac_id;
2631	u64 desc_va;
2632
2633	__skb_queue_head_init(&msdu_list);
2634
2635	srng = &ab->hal.srng_list[dp->reo_dst_ring[ring_id].ring_id];
2636
2637	spin_lock_bh(&srng->lock);
2638
2639try_again:
2640	ath12k_hal_srng_access_begin(ab, srng);
2641
2642	while ((desc = ath12k_hal_srng_dst_get_next_entry(ab, srng))) {
2643		struct rx_mpdu_desc *mpdu_info;
2644		struct rx_msdu_desc *msdu_info;
2645		enum hal_reo_dest_ring_push_reason push_reason;
2646		u32 cookie;
2647
2648		cookie = le32_get_bits(desc->buf_addr_info.info1,
2649				       BUFFER_ADDR_INFO1_SW_COOKIE);
2650
2651		mac_id = le32_get_bits(desc->info0,
2652				       HAL_REO_DEST_RING_INFO0_SRC_LINK_ID);
2653
2654		desc_va = ((u64)le32_to_cpu(desc->buf_va_hi) << 32 |
2655			   le32_to_cpu(desc->buf_va_lo));
2656		desc_info = (struct ath12k_rx_desc_info *)((unsigned long)desc_va);
2657
2658		/* retry manual desc retrieval */
2659		if (!desc_info) {
2660			desc_info = ath12k_dp_get_rx_desc(ab, cookie);
2661			if (!desc_info) {
2662				ath12k_warn(ab, "Invalid cookie in manual descriptor retrieval: 0x%x\n",
2663					    cookie);
2664				continue;
2665			}
2666		}
2667
2668		if (desc_info->magic != ATH12K_DP_RX_DESC_MAGIC)
2669			ath12k_warn(ab, "Check HW CC implementation");
2670
2671		msdu = desc_info->skb;
2672		desc_info->skb = NULL;
2673
2674		list_add_tail(&desc_info->list, &rx_desc_used_list);
2675
2676		rxcb = ATH12K_SKB_RXCB(msdu);
2677		dma_unmap_single(ab->dev, rxcb->paddr,
2678				 msdu->len + skb_tailroom(msdu),
2679				 DMA_FROM_DEVICE);
2680
2681		num_buffs_reaped++;
2682
2683		push_reason = le32_get_bits(desc->info0,
2684					    HAL_REO_DEST_RING_INFO0_PUSH_REASON);
2685		if (push_reason !=
2686		    HAL_REO_DEST_RING_PUSH_REASON_ROUTING_INSTRUCTION) {
2687			dev_kfree_skb_any(msdu);
2688			ab->soc_stats.hal_reo_error[ring_id]++;
2689			continue;
2690		}
2691
2692		msdu_info = &desc->rx_msdu_info;
2693		mpdu_info = &desc->rx_mpdu_info;
2694
2695		rxcb->is_first_msdu = !!(le32_to_cpu(msdu_info->info0) &
2696					 RX_MSDU_DESC_INFO0_FIRST_MSDU_IN_MPDU);
2697		rxcb->is_last_msdu = !!(le32_to_cpu(msdu_info->info0) &
2698					RX_MSDU_DESC_INFO0_LAST_MSDU_IN_MPDU);
2699		rxcb->is_continuation = !!(le32_to_cpu(msdu_info->info0) &
2700					   RX_MSDU_DESC_INFO0_MSDU_CONTINUATION);
2701		rxcb->mac_id = mac_id;
2702		rxcb->peer_id = ath12k_dp_rx_get_peer_id(ab, dp->peer_metadata_ver,
2703							 mpdu_info->peer_meta_data);
2704		rxcb->tid = le32_get_bits(mpdu_info->info0,
2705					  RX_MPDU_DESC_INFO0_TID);
2706
2707		__skb_queue_tail(&msdu_list, msdu);
2708
2709		if (!rxcb->is_continuation) {
2710			total_msdu_reaped++;
2711			done = true;
2712		} else {
2713			done = false;
2714		}
2715
2716		if (total_msdu_reaped >= budget)
2717			break;
2718	}
2719
2720	/* Hw might have updated the head pointer after we cached it.
2721	 * In this case, even though there are entries in the ring we'll
2722	 * get rx_desc NULL. Give the read another try with updated cached
2723	 * head pointer so that we can reap complete MPDU in the current
2724	 * rx processing.
2725	 */
2726	if (!done && ath12k_hal_srng_dst_num_free(ab, srng, true)) {
2727		ath12k_hal_srng_access_end(ab, srng);
2728		goto try_again;
2729	}
2730
2731	ath12k_hal_srng_access_end(ab, srng);
2732
2733	spin_unlock_bh(&srng->lock);
2734
2735	if (!total_msdu_reaped)
2736		goto exit;
2737
2738	ath12k_dp_rx_bufs_replenish(ab, rx_ring, &rx_desc_used_list,
2739				    num_buffs_reaped);
2740
2741	ath12k_dp_rx_process_received_packets(ab, napi, &msdu_list,
2742					      ring_id);
2743
2744exit:
2745	return total_msdu_reaped;
2746}
2747
2748static void ath12k_dp_rx_frag_timer(struct timer_list *timer)
2749{
2750	struct ath12k_dp_rx_tid *rx_tid = from_timer(rx_tid, timer, frag_timer);
2751
2752	spin_lock_bh(&rx_tid->ab->base_lock);
2753	if (rx_tid->last_frag_no &&
2754	    rx_tid->rx_frag_bitmap == GENMASK(rx_tid->last_frag_no, 0)) {
2755		spin_unlock_bh(&rx_tid->ab->base_lock);
2756		return;
2757	}
2758	ath12k_dp_rx_frags_cleanup(rx_tid, true);
2759	spin_unlock_bh(&rx_tid->ab->base_lock);
2760}
2761
2762int ath12k_dp_rx_peer_frag_setup(struct ath12k *ar, const u8 *peer_mac, int vdev_id)
2763{
2764	struct ath12k_base *ab = ar->ab;
2765	struct crypto_shash *tfm;
2766	struct ath12k_peer *peer;
2767	struct ath12k_dp_rx_tid *rx_tid;
2768	int i;
2769
2770	tfm = crypto_alloc_shash("michael_mic", 0, 0);
2771	if (IS_ERR(tfm))
2772		return PTR_ERR(tfm);
2773
2774	spin_lock_bh(&ab->base_lock);
2775
2776	peer = ath12k_peer_find(ab, vdev_id, peer_mac);
2777	if (!peer) {
2778		spin_unlock_bh(&ab->base_lock);
2779		crypto_free_shash(tfm);
2780		ath12k_warn(ab, "failed to find the peer to set up fragment info\n");
2781		return -ENOENT;
2782	}
2783
2784	for (i = 0; i <= IEEE80211_NUM_TIDS; i++) {
2785		rx_tid = &peer->rx_tid[i];
2786		rx_tid->ab = ab;
2787		timer_setup(&rx_tid->frag_timer, ath12k_dp_rx_frag_timer, 0);
2788		skb_queue_head_init(&rx_tid->rx_frags);
2789	}
2790
2791	peer->tfm_mmic = tfm;
2792	peer->dp_setup_done = true;
2793	spin_unlock_bh(&ab->base_lock);
2794
2795	return 0;
2796}
2797
2798static int ath12k_dp_rx_h_michael_mic(struct crypto_shash *tfm, u8 *key,
2799				      struct ieee80211_hdr *hdr, u8 *data,
2800				      size_t data_len, u8 *mic)
2801{
2802	SHASH_DESC_ON_STACK(desc, tfm);
2803	u8 mic_hdr[16] = {0};
2804	u8 tid = 0;
2805	int ret;
2806
2807	if (!tfm)
2808		return -EINVAL;
2809
2810	desc->tfm = tfm;
2811
2812	ret = crypto_shash_setkey(tfm, key, 8);
2813	if (ret)
2814		goto out;
2815
2816	ret = crypto_shash_init(desc);
2817	if (ret)
2818		goto out;
2819
2820	/* TKIP MIC header */
2821	memcpy(mic_hdr, ieee80211_get_DA(hdr), ETH_ALEN);
2822	memcpy(mic_hdr + ETH_ALEN, ieee80211_get_SA(hdr), ETH_ALEN);
2823	if (ieee80211_is_data_qos(hdr->frame_control))
2824		tid = ieee80211_get_tid(hdr);
2825	mic_hdr[12] = tid;
2826
2827	ret = crypto_shash_update(desc, mic_hdr, 16);
2828	if (ret)
2829		goto out;
2830	ret = crypto_shash_update(desc, data, data_len);
2831	if (ret)
2832		goto out;
2833	ret = crypto_shash_final(desc, mic);
2834out:
2835	shash_desc_zero(desc);
2836	return ret;
2837}
2838
2839static int ath12k_dp_rx_h_verify_tkip_mic(struct ath12k *ar, struct ath12k_peer *peer,
2840					  struct sk_buff *msdu)
2841{
2842	struct ath12k_base *ab = ar->ab;
2843	struct hal_rx_desc *rx_desc = (struct hal_rx_desc *)msdu->data;
2844	struct ieee80211_rx_status *rxs = IEEE80211_SKB_RXCB(msdu);
2845	struct ieee80211_key_conf *key_conf;
2846	struct ieee80211_hdr *hdr;
2847	u8 mic[IEEE80211_CCMP_MIC_LEN];
2848	int head_len, tail_len, ret;
2849	size_t data_len;
2850	u32 hdr_len, hal_rx_desc_sz = ar->ab->hal.hal_desc_sz;
2851	u8 *key, *data;
2852	u8 key_idx;
2853
2854	if (ath12k_dp_rx_h_enctype(ab, rx_desc) != HAL_ENCRYPT_TYPE_TKIP_MIC)
2855		return 0;
2856
2857	hdr = (struct ieee80211_hdr *)(msdu->data + hal_rx_desc_sz);
2858	hdr_len = ieee80211_hdrlen(hdr->frame_control);
2859	head_len = hdr_len + hal_rx_desc_sz + IEEE80211_TKIP_IV_LEN;
2860	tail_len = IEEE80211_CCMP_MIC_LEN + IEEE80211_TKIP_ICV_LEN + FCS_LEN;
2861
2862	if (!is_multicast_ether_addr(hdr->addr1))
2863		key_idx = peer->ucast_keyidx;
2864	else
2865		key_idx = peer->mcast_keyidx;
2866
2867	key_conf = peer->keys[key_idx];
2868
2869	data = msdu->data + head_len;
2870	data_len = msdu->len - head_len - tail_len;
2871	key = &key_conf->key[NL80211_TKIP_DATA_OFFSET_RX_MIC_KEY];
2872
2873	ret = ath12k_dp_rx_h_michael_mic(peer->tfm_mmic, key, hdr, data, data_len, mic);
2874	if (ret || memcmp(mic, data + data_len, IEEE80211_CCMP_MIC_LEN))
2875		goto mic_fail;
2876
2877	return 0;
2878
2879mic_fail:
2880	(ATH12K_SKB_RXCB(msdu))->is_first_msdu = true;
2881	(ATH12K_SKB_RXCB(msdu))->is_last_msdu = true;
2882
2883	rxs->flag |= RX_FLAG_MMIC_ERROR | RX_FLAG_MMIC_STRIPPED |
2884		    RX_FLAG_IV_STRIPPED | RX_FLAG_DECRYPTED;
2885	skb_pull(msdu, hal_rx_desc_sz);
2886
2887	ath12k_dp_rx_h_ppdu(ar, rx_desc, rxs);
2888	ath12k_dp_rx_h_undecap(ar, msdu, rx_desc,
2889			       HAL_ENCRYPT_TYPE_TKIP_MIC, rxs, true);
2890	ieee80211_rx(ath12k_ar_to_hw(ar), msdu);
2891	return -EINVAL;
2892}
2893
2894static void ath12k_dp_rx_h_undecap_frag(struct ath12k *ar, struct sk_buff *msdu,
2895					enum hal_encrypt_type enctype, u32 flags)
2896{
2897	struct ieee80211_hdr *hdr;
2898	size_t hdr_len;
2899	size_t crypto_len;
2900	u32 hal_rx_desc_sz = ar->ab->hal.hal_desc_sz;
2901
2902	if (!flags)
2903		return;
2904
2905	hdr = (struct ieee80211_hdr *)(msdu->data + hal_rx_desc_sz);
2906
2907	if (flags & RX_FLAG_MIC_STRIPPED)
2908		skb_trim(msdu, msdu->len -
2909			 ath12k_dp_rx_crypto_mic_len(ar, enctype));
2910
2911	if (flags & RX_FLAG_ICV_STRIPPED)
2912		skb_trim(msdu, msdu->len -
2913			 ath12k_dp_rx_crypto_icv_len(ar, enctype));
2914
2915	if (flags & RX_FLAG_IV_STRIPPED) {
2916		hdr_len = ieee80211_hdrlen(hdr->frame_control);
2917		crypto_len = ath12k_dp_rx_crypto_param_len(ar, enctype);
2918
2919		memmove(msdu->data + hal_rx_desc_sz + crypto_len,
2920			msdu->data + hal_rx_desc_sz, hdr_len);
2921		skb_pull(msdu, crypto_len);
2922	}
2923}
2924
2925static int ath12k_dp_rx_h_defrag(struct ath12k *ar,
2926				 struct ath12k_peer *peer,
2927				 struct ath12k_dp_rx_tid *rx_tid,
2928				 struct sk_buff **defrag_skb)
2929{
2930	struct ath12k_base *ab = ar->ab;
2931	struct hal_rx_desc *rx_desc;
2932	struct sk_buff *skb, *first_frag, *last_frag;
2933	struct ieee80211_hdr *hdr;
2934	enum hal_encrypt_type enctype;
2935	bool is_decrypted = false;
2936	int msdu_len = 0;
2937	int extra_space;
2938	u32 flags, hal_rx_desc_sz = ar->ab->hal.hal_desc_sz;
2939
2940	first_frag = skb_peek(&rx_tid->rx_frags);
2941	last_frag = skb_peek_tail(&rx_tid->rx_frags);
2942
2943	skb_queue_walk(&rx_tid->rx_frags, skb) {
2944		flags = 0;
2945		rx_desc = (struct hal_rx_desc *)skb->data;
2946		hdr = (struct ieee80211_hdr *)(skb->data + hal_rx_desc_sz);
2947
2948		enctype = ath12k_dp_rx_h_enctype(ab, rx_desc);
2949		if (enctype != HAL_ENCRYPT_TYPE_OPEN)
2950			is_decrypted = ath12k_dp_rx_h_is_decrypted(ab,
2951								   rx_desc);
2952
2953		if (is_decrypted) {
2954			if (skb != first_frag)
2955				flags |= RX_FLAG_IV_STRIPPED;
2956			if (skb != last_frag)
2957				flags |= RX_FLAG_ICV_STRIPPED |
2958					 RX_FLAG_MIC_STRIPPED;
2959		}
2960
2961		/* RX fragments are always raw packets */
2962		if (skb != last_frag)
2963			skb_trim(skb, skb->len - FCS_LEN);
2964		ath12k_dp_rx_h_undecap_frag(ar, skb, enctype, flags);
2965
2966		if (skb != first_frag)
2967			skb_pull(skb, hal_rx_desc_sz +
2968				      ieee80211_hdrlen(hdr->frame_control));
2969		msdu_len += skb->len;
2970	}
2971
2972	extra_space = msdu_len - (DP_RX_BUFFER_SIZE + skb_tailroom(first_frag));
2973	if (extra_space > 0 &&
2974	    (pskb_expand_head(first_frag, 0, extra_space, GFP_ATOMIC) < 0))
2975		return -ENOMEM;
2976
2977	__skb_unlink(first_frag, &rx_tid->rx_frags);
2978	while ((skb = __skb_dequeue(&rx_tid->rx_frags))) {
2979		skb_put_data(first_frag, skb->data, skb->len);
2980		dev_kfree_skb_any(skb);
2981	}
2982
2983	hdr = (struct ieee80211_hdr *)(first_frag->data + hal_rx_desc_sz);
2984	hdr->frame_control &= ~__cpu_to_le16(IEEE80211_FCTL_MOREFRAGS);
2985	ATH12K_SKB_RXCB(first_frag)->is_frag = 1;
2986
2987	if (ath12k_dp_rx_h_verify_tkip_mic(ar, peer, first_frag))
2988		first_frag = NULL;
2989
2990	*defrag_skb = first_frag;
2991	return 0;
2992}
2993
2994static int ath12k_dp_rx_h_defrag_reo_reinject(struct ath12k *ar,
2995					      struct ath12k_dp_rx_tid *rx_tid,
2996					      struct sk_buff *defrag_skb)
2997{
2998	struct ath12k_base *ab = ar->ab;
2999	struct ath12k_dp *dp = &ab->dp;
3000	struct hal_rx_desc *rx_desc = (struct hal_rx_desc *)defrag_skb->data;
3001	struct hal_reo_entrance_ring *reo_ent_ring;
3002	struct hal_reo_dest_ring *reo_dest_ring;
3003	struct dp_link_desc_bank *link_desc_banks;
3004	struct hal_rx_msdu_link *msdu_link;
3005	struct hal_rx_msdu_details *msdu0;
3006	struct hal_srng *srng;
3007	dma_addr_t link_paddr, buf_paddr;
3008	u32 desc_bank, msdu_info, msdu_ext_info, mpdu_info;
3009	u32 cookie, hal_rx_desc_sz, dest_ring_info0, queue_addr_hi;
3010	int ret;
3011	struct ath12k_rx_desc_info *desc_info;
3012	enum hal_rx_buf_return_buf_manager idle_link_rbm = dp->idle_link_rbm;
3013	u8 dst_ind;
3014
3015	hal_rx_desc_sz = ab->hal.hal_desc_sz;
3016	link_desc_banks = dp->link_desc_banks;
3017	reo_dest_ring = rx_tid->dst_ring_desc;
3018
3019	ath12k_hal_rx_reo_ent_paddr_get(ab, &reo_dest_ring->buf_addr_info,
3020					&link_paddr, &cookie);
3021	desc_bank = u32_get_bits(cookie, DP_LINK_DESC_BANK_MASK);
3022
3023	msdu_link = (struct hal_rx_msdu_link *)(link_desc_banks[desc_bank].vaddr +
3024			(link_paddr - link_desc_banks[desc_bank].paddr));
3025	msdu0 = &msdu_link->msdu_link[0];
3026	msdu_ext_info = le32_to_cpu(msdu0->rx_msdu_ext_info.info0);
3027	dst_ind = u32_get_bits(msdu_ext_info, RX_MSDU_EXT_DESC_INFO0_REO_DEST_IND);
3028
3029	memset(msdu0, 0, sizeof(*msdu0));
3030
3031	msdu_info = u32_encode_bits(1, RX_MSDU_DESC_INFO0_FIRST_MSDU_IN_MPDU) |
3032		    u32_encode_bits(1, RX_MSDU_DESC_INFO0_LAST_MSDU_IN_MPDU) |
3033		    u32_encode_bits(0, RX_MSDU_DESC_INFO0_MSDU_CONTINUATION) |
3034		    u32_encode_bits(defrag_skb->len - hal_rx_desc_sz,
3035				    RX_MSDU_DESC_INFO0_MSDU_LENGTH) |
3036		    u32_encode_bits(1, RX_MSDU_DESC_INFO0_VALID_SA) |
3037		    u32_encode_bits(1, RX_MSDU_DESC_INFO0_VALID_DA);
3038	msdu0->rx_msdu_info.info0 = cpu_to_le32(msdu_info);
3039	msdu0->rx_msdu_ext_info.info0 = cpu_to_le32(msdu_ext_info);
3040
3041	/* change msdu len in hal rx desc */
3042	ath12k_dp_rxdesc_set_msdu_len(ab, rx_desc, defrag_skb->len - hal_rx_desc_sz);
3043
3044	buf_paddr = dma_map_single(ab->dev, defrag_skb->data,
3045				   defrag_skb->len + skb_tailroom(defrag_skb),
3046				   DMA_TO_DEVICE);
3047	if (dma_mapping_error(ab->dev, buf_paddr))
3048		return -ENOMEM;
3049
3050	spin_lock_bh(&dp->rx_desc_lock);
3051	desc_info = list_first_entry_or_null(&dp->rx_desc_free_list,
3052					     struct ath12k_rx_desc_info,
3053					     list);
3054	if (!desc_info) {
3055		spin_unlock_bh(&dp->rx_desc_lock);
3056		ath12k_warn(ab, "failed to find rx desc for reinject\n");
3057		ret = -ENOMEM;
3058		goto err_unmap_dma;
3059	}
3060
3061	desc_info->skb = defrag_skb;
3062	desc_info->in_use = true;
3063
3064	list_del(&desc_info->list);
3065	spin_unlock_bh(&dp->rx_desc_lock);
3066
3067	ATH12K_SKB_RXCB(defrag_skb)->paddr = buf_paddr;
3068
3069	ath12k_hal_rx_buf_addr_info_set(&msdu0->buf_addr_info, buf_paddr,
3070					desc_info->cookie,
3071					HAL_RX_BUF_RBM_SW3_BM);
3072
3073	/* Fill mpdu details into reo entrance ring */
3074	srng = &ab->hal.srng_list[dp->reo_reinject_ring.ring_id];
3075
3076	spin_lock_bh(&srng->lock);
3077	ath12k_hal_srng_access_begin(ab, srng);
3078
3079	reo_ent_ring = ath12k_hal_srng_src_get_next_entry(ab, srng);
3080	if (!reo_ent_ring) {
3081		ath12k_hal_srng_access_end(ab, srng);
3082		spin_unlock_bh(&srng->lock);
3083		ret = -ENOSPC;
3084		goto err_free_desc;
3085	}
3086	memset(reo_ent_ring, 0, sizeof(*reo_ent_ring));
3087
3088	ath12k_hal_rx_buf_addr_info_set(&reo_ent_ring->buf_addr_info, link_paddr,
3089					cookie,
3090					idle_link_rbm);
3091
3092	mpdu_info = u32_encode_bits(1, RX_MPDU_DESC_INFO0_MSDU_COUNT) |
3093		    u32_encode_bits(0, RX_MPDU_DESC_INFO0_FRAG_FLAG) |
3094		    u32_encode_bits(1, RX_MPDU_DESC_INFO0_RAW_MPDU) |
3095		    u32_encode_bits(1, RX_MPDU_DESC_INFO0_VALID_PN) |
3096		    u32_encode_bits(rx_tid->tid, RX_MPDU_DESC_INFO0_TID);
3097
3098	reo_ent_ring->rx_mpdu_info.info0 = cpu_to_le32(mpdu_info);
3099	reo_ent_ring->rx_mpdu_info.peer_meta_data =
3100		reo_dest_ring->rx_mpdu_info.peer_meta_data;
3101
3102	reo_ent_ring->queue_addr_lo = cpu_to_le32(lower_32_bits(rx_tid->paddr));
3103	queue_addr_hi = upper_32_bits(rx_tid->paddr);
3104	reo_ent_ring->info0 = le32_encode_bits(queue_addr_hi,
3105					       HAL_REO_ENTR_RING_INFO0_QUEUE_ADDR_HI) |
3106			      le32_encode_bits(dst_ind,
3107					       HAL_REO_ENTR_RING_INFO0_DEST_IND);
3108
3109	reo_ent_ring->info1 = le32_encode_bits(rx_tid->cur_sn,
3110					       HAL_REO_ENTR_RING_INFO1_MPDU_SEQ_NUM);
3111	dest_ring_info0 = le32_get_bits(reo_dest_ring->info0,
3112					HAL_REO_DEST_RING_INFO0_SRC_LINK_ID);
3113	reo_ent_ring->info2 =
3114		cpu_to_le32(u32_get_bits(dest_ring_info0,
3115					 HAL_REO_ENTR_RING_INFO2_SRC_LINK_ID));
3116
3117	ath12k_hal_srng_access_end(ab, srng);
3118	spin_unlock_bh(&srng->lock);
3119
3120	return 0;
3121
3122err_free_desc:
3123	spin_lock_bh(&dp->rx_desc_lock);
3124	desc_info->in_use = false;
3125	desc_info->skb = NULL;
3126	list_add_tail(&desc_info->list, &dp->rx_desc_free_list);
3127	spin_unlock_bh(&dp->rx_desc_lock);
3128err_unmap_dma:
3129	dma_unmap_single(ab->dev, buf_paddr, defrag_skb->len + skb_tailroom(defrag_skb),
3130			 DMA_TO_DEVICE);
3131	return ret;
3132}
3133
3134static int ath12k_dp_rx_h_cmp_frags(struct ath12k_base *ab,
3135				    struct sk_buff *a, struct sk_buff *b)
3136{
3137	int frag1, frag2;
3138
3139	frag1 = ath12k_dp_rx_h_frag_no(ab, a);
3140	frag2 = ath12k_dp_rx_h_frag_no(ab, b);
3141
3142	return frag1 - frag2;
3143}
3144
3145static void ath12k_dp_rx_h_sort_frags(struct ath12k_base *ab,
3146				      struct sk_buff_head *frag_list,
3147				      struct sk_buff *cur_frag)
3148{
3149	struct sk_buff *skb;
3150	int cmp;
3151
3152	skb_queue_walk(frag_list, skb) {
3153		cmp = ath12k_dp_rx_h_cmp_frags(ab, skb, cur_frag);
3154		if (cmp < 0)
3155			continue;
3156		__skb_queue_before(frag_list, skb, cur_frag);
3157		return;
3158	}
3159	__skb_queue_tail(frag_list, cur_frag);
3160}
3161
3162static u64 ath12k_dp_rx_h_get_pn(struct ath12k *ar, struct sk_buff *skb)
3163{
3164	struct ieee80211_hdr *hdr;
3165	u64 pn = 0;
3166	u8 *ehdr;
3167	u32 hal_rx_desc_sz = ar->ab->hal.hal_desc_sz;
3168
3169	hdr = (struct ieee80211_hdr *)(skb->data + hal_rx_desc_sz);
3170	ehdr = skb->data + hal_rx_desc_sz + ieee80211_hdrlen(hdr->frame_control);
3171
3172	pn = ehdr[0];
3173	pn |= (u64)ehdr[1] << 8;
3174	pn |= (u64)ehdr[4] << 16;
3175	pn |= (u64)ehdr[5] << 24;
3176	pn |= (u64)ehdr[6] << 32;
3177	pn |= (u64)ehdr[7] << 40;
3178
3179	return pn;
3180}
3181
3182static bool
3183ath12k_dp_rx_h_defrag_validate_incr_pn(struct ath12k *ar, struct ath12k_dp_rx_tid *rx_tid)
3184{
3185	struct ath12k_base *ab = ar->ab;
3186	enum hal_encrypt_type encrypt_type;
3187	struct sk_buff *first_frag, *skb;
3188	struct hal_rx_desc *desc;
3189	u64 last_pn;
3190	u64 cur_pn;
3191
3192	first_frag = skb_peek(&rx_tid->rx_frags);
3193	desc = (struct hal_rx_desc *)first_frag->data;
3194
3195	encrypt_type = ath12k_dp_rx_h_enctype(ab, desc);
3196	if (encrypt_type != HAL_ENCRYPT_TYPE_CCMP_128 &&
3197	    encrypt_type != HAL_ENCRYPT_TYPE_CCMP_256 &&
3198	    encrypt_type != HAL_ENCRYPT_TYPE_GCMP_128 &&
3199	    encrypt_type != HAL_ENCRYPT_TYPE_AES_GCMP_256)
3200		return true;
3201
3202	last_pn = ath12k_dp_rx_h_get_pn(ar, first_frag);
3203	skb_queue_walk(&rx_tid->rx_frags, skb) {
3204		if (skb == first_frag)
3205			continue;
3206
3207		cur_pn = ath12k_dp_rx_h_get_pn(ar, skb);
3208		if (cur_pn != last_pn + 1)
3209			return false;
3210		last_pn = cur_pn;
3211	}
3212	return true;
3213}
3214
3215static int ath12k_dp_rx_frag_h_mpdu(struct ath12k *ar,
3216				    struct sk_buff *msdu,
3217				    struct hal_reo_dest_ring *ring_desc)
3218{
3219	struct ath12k_base *ab = ar->ab;
3220	struct hal_rx_desc *rx_desc;
3221	struct ath12k_peer *peer;
3222	struct ath12k_dp_rx_tid *rx_tid;
3223	struct sk_buff *defrag_skb = NULL;
3224	u32 peer_id;
3225	u16 seqno, frag_no;
3226	u8 tid;
3227	int ret = 0;
3228	bool more_frags;
3229
3230	rx_desc = (struct hal_rx_desc *)msdu->data;
3231	peer_id = ath12k_dp_rx_h_peer_id(ab, rx_desc);
3232	tid = ath12k_dp_rx_h_tid(ab, rx_desc);
3233	seqno = ath12k_dp_rx_h_seq_no(ab, rx_desc);
3234	frag_no = ath12k_dp_rx_h_frag_no(ab, msdu);
3235	more_frags = ath12k_dp_rx_h_more_frags(ab, msdu);
3236
3237	if (!ath12k_dp_rx_h_seq_ctrl_valid(ab, rx_desc) ||
3238	    !ath12k_dp_rx_h_fc_valid(ab, rx_desc) ||
3239	    tid > IEEE80211_NUM_TIDS)
3240		return -EINVAL;
3241
3242	/* received unfragmented packet in reo
3243	 * exception ring, this shouldn't happen
3244	 * as these packets typically come from
3245	 * reo2sw srngs.
3246	 */
3247	if (WARN_ON_ONCE(!frag_no && !more_frags))
3248		return -EINVAL;
3249
3250	spin_lock_bh(&ab->base_lock);
3251	peer = ath12k_peer_find_by_id(ab, peer_id);
3252	if (!peer) {
3253		ath12k_warn(ab, "failed to find the peer to de-fragment received fragment peer_id %d\n",
3254			    peer_id);
3255		ret = -ENOENT;
3256		goto out_unlock;
3257	}
3258
3259	if (!peer->dp_setup_done) {
3260		ath12k_warn(ab, "The peer %pM [%d] has uninitialized datapath\n",
3261			    peer->addr, peer_id);
3262		ret = -ENOENT;
3263		goto out_unlock;
3264	}
3265
3266	rx_tid = &peer->rx_tid[tid];
3267
3268	if ((!skb_queue_empty(&rx_tid->rx_frags) && seqno != rx_tid->cur_sn) ||
3269	    skb_queue_empty(&rx_tid->rx_frags)) {
3270		/* Flush stored fragments and start a new sequence */
3271		ath12k_dp_rx_frags_cleanup(rx_tid, true);
3272		rx_tid->cur_sn = seqno;
3273	}
3274
3275	if (rx_tid->rx_frag_bitmap & BIT(frag_no)) {
3276		/* Fragment already present */
3277		ret = -EINVAL;
3278		goto out_unlock;
3279	}
3280
3281	if ((!rx_tid->rx_frag_bitmap || frag_no > __fls(rx_tid->rx_frag_bitmap)))
3282		__skb_queue_tail(&rx_tid->rx_frags, msdu);
3283	else
3284		ath12k_dp_rx_h_sort_frags(ab, &rx_tid->rx_frags, msdu);
3285
3286	rx_tid->rx_frag_bitmap |= BIT(frag_no);
3287	if (!more_frags)
3288		rx_tid->last_frag_no = frag_no;
3289
3290	if (frag_no == 0) {
3291		rx_tid->dst_ring_desc = kmemdup(ring_desc,
3292						sizeof(*rx_tid->dst_ring_desc),
3293						GFP_ATOMIC);
3294		if (!rx_tid->dst_ring_desc) {
3295			ret = -ENOMEM;
3296			goto out_unlock;
3297		}
3298	} else {
3299		ath12k_dp_rx_link_desc_return(ab, ring_desc,
3300					      HAL_WBM_REL_BM_ACT_PUT_IN_IDLE);
3301	}
3302
3303	if (!rx_tid->last_frag_no ||
3304	    rx_tid->rx_frag_bitmap != GENMASK(rx_tid->last_frag_no, 0)) {
3305		mod_timer(&rx_tid->frag_timer, jiffies +
3306					       ATH12K_DP_RX_FRAGMENT_TIMEOUT_MS);
3307		goto out_unlock;
3308	}
3309
3310	spin_unlock_bh(&ab->base_lock);
3311	del_timer_sync(&rx_tid->frag_timer);
3312	spin_lock_bh(&ab->base_lock);
3313
3314	peer = ath12k_peer_find_by_id(ab, peer_id);
3315	if (!peer)
3316		goto err_frags_cleanup;
3317
3318	if (!ath12k_dp_rx_h_defrag_validate_incr_pn(ar, rx_tid))
3319		goto err_frags_cleanup;
3320
3321	if (ath12k_dp_rx_h_defrag(ar, peer, rx_tid, &defrag_skb))
3322		goto err_frags_cleanup;
3323
3324	if (!defrag_skb)
3325		goto err_frags_cleanup;
3326
3327	if (ath12k_dp_rx_h_defrag_reo_reinject(ar, rx_tid, defrag_skb))
3328		goto err_frags_cleanup;
3329
3330	ath12k_dp_rx_frags_cleanup(rx_tid, false);
3331	goto out_unlock;
3332
3333err_frags_cleanup:
3334	dev_kfree_skb_any(defrag_skb);
3335	ath12k_dp_rx_frags_cleanup(rx_tid, true);
3336out_unlock:
3337	spin_unlock_bh(&ab->base_lock);
3338	return ret;
3339}
3340
3341static int
3342ath12k_dp_process_rx_err_buf(struct ath12k *ar, struct hal_reo_dest_ring *desc,
3343			     struct list_head *used_list,
3344			     bool drop, u32 cookie)
3345{
3346	struct ath12k_base *ab = ar->ab;
3347	struct sk_buff *msdu;
3348	struct ath12k_skb_rxcb *rxcb;
3349	struct hal_rx_desc *rx_desc;
3350	u16 msdu_len;
3351	u32 hal_rx_desc_sz = ab->hal.hal_desc_sz;
3352	struct ath12k_rx_desc_info *desc_info;
3353	u64 desc_va;
3354
3355	desc_va = ((u64)le32_to_cpu(desc->buf_va_hi) << 32 |
3356		   le32_to_cpu(desc->buf_va_lo));
3357	desc_info = (struct ath12k_rx_desc_info *)((unsigned long)desc_va);
3358
3359	/* retry manual desc retrieval */
3360	if (!desc_info) {
3361		desc_info = ath12k_dp_get_rx_desc(ab, cookie);
3362		if (!desc_info) {
3363			ath12k_warn(ab, "Invalid cookie in DP rx error descriptor retrieval: 0x%x\n",
3364				    cookie);
3365			return -EINVAL;
3366		}
3367	}
3368
3369	if (desc_info->magic != ATH12K_DP_RX_DESC_MAGIC)
3370		ath12k_warn(ab, " RX Exception, Check HW CC implementation");
3371
3372	msdu = desc_info->skb;
3373	desc_info->skb = NULL;
3374
3375	list_add_tail(&desc_info->list, used_list);
3376
3377	rxcb = ATH12K_SKB_RXCB(msdu);
3378	dma_unmap_single(ar->ab->dev, rxcb->paddr,
3379			 msdu->len + skb_tailroom(msdu),
3380			 DMA_FROM_DEVICE);
3381
3382	if (drop) {
3383		dev_kfree_skb_any(msdu);
3384		return 0;
3385	}
3386
3387	rcu_read_lock();
3388	if (!rcu_dereference(ar->ab->pdevs_active[ar->pdev_idx])) {
3389		dev_kfree_skb_any(msdu);
3390		goto exit;
3391	}
3392
3393	if (test_bit(ATH12K_CAC_RUNNING, &ar->dev_flags)) {
3394		dev_kfree_skb_any(msdu);
3395		goto exit;
3396	}
3397
3398	rx_desc = (struct hal_rx_desc *)msdu->data;
3399	msdu_len = ath12k_dp_rx_h_msdu_len(ar->ab, rx_desc);
3400	if ((msdu_len + hal_rx_desc_sz) > DP_RX_BUFFER_SIZE) {
3401		ath12k_warn(ar->ab, "invalid msdu leng %u", msdu_len);
3402		ath12k_dbg_dump(ar->ab, ATH12K_DBG_DATA, NULL, "", rx_desc,
3403				sizeof(*rx_desc));
3404		dev_kfree_skb_any(msdu);
3405		goto exit;
3406	}
3407
3408	skb_put(msdu, hal_rx_desc_sz + msdu_len);
3409
3410	if (ath12k_dp_rx_frag_h_mpdu(ar, msdu, desc)) {
3411		dev_kfree_skb_any(msdu);
3412		ath12k_dp_rx_link_desc_return(ar->ab, desc,
3413					      HAL_WBM_REL_BM_ACT_PUT_IN_IDLE);
3414	}
3415exit:
3416	rcu_read_unlock();
3417	return 0;
3418}
3419
3420int ath12k_dp_rx_process_err(struct ath12k_base *ab, struct napi_struct *napi,
3421			     int budget)
3422{
3423	u32 msdu_cookies[HAL_NUM_RX_MSDUS_PER_LINK_DESC];
3424	struct dp_link_desc_bank *link_desc_banks;
3425	enum hal_rx_buf_return_buf_manager rbm;
3426	struct hal_rx_msdu_link *link_desc_va;
3427	int tot_n_bufs_reaped, quota, ret, i;
3428	struct hal_reo_dest_ring *reo_desc;
3429	struct dp_rxdma_ring *rx_ring;
3430	struct dp_srng *reo_except;
3431	LIST_HEAD(rx_desc_used_list);
3432	u32 desc_bank, num_msdus;
3433	struct hal_srng *srng;
3434	struct ath12k_dp *dp;
3435	int mac_id;
3436	struct ath12k *ar;
3437	dma_addr_t paddr;
3438	bool is_frag;
3439	bool drop;
3440	int pdev_id;
3441
3442	tot_n_bufs_reaped = 0;
3443	quota = budget;
3444
3445	dp = &ab->dp;
3446	reo_except = &dp->reo_except_ring;
3447	link_desc_banks = dp->link_desc_banks;
3448
3449	srng = &ab->hal.srng_list[reo_except->ring_id];
3450
3451	spin_lock_bh(&srng->lock);
3452
3453	ath12k_hal_srng_access_begin(ab, srng);
3454
3455	while (budget &&
3456	       (reo_desc = ath12k_hal_srng_dst_get_next_entry(ab, srng))) {
3457		drop = false;
3458		ab->soc_stats.err_ring_pkts++;
3459
3460		ret = ath12k_hal_desc_reo_parse_err(ab, reo_desc, &paddr,
3461						    &desc_bank);
3462		if (ret) {
3463			ath12k_warn(ab, "failed to parse error reo desc %d\n",
3464				    ret);
3465			continue;
3466		}
3467		link_desc_va = link_desc_banks[desc_bank].vaddr +
3468			       (paddr - link_desc_banks[desc_bank].paddr);
3469		ath12k_hal_rx_msdu_link_info_get(link_desc_va, &num_msdus, msdu_cookies,
3470						 &rbm);
3471		if (rbm != dp->idle_link_rbm &&
3472		    rbm != HAL_RX_BUF_RBM_SW3_BM &&
3473		    rbm != ab->hw_params->hal_params->rx_buf_rbm) {
3474			ab->soc_stats.invalid_rbm++;
3475			ath12k_warn(ab, "invalid return buffer manager %d\n", rbm);
3476			ath12k_dp_rx_link_desc_return(ab, reo_desc,
3477						      HAL_WBM_REL_BM_ACT_REL_MSDU);
3478			continue;
3479		}
3480
3481		is_frag = !!(le32_to_cpu(reo_desc->rx_mpdu_info.info0) &
3482			     RX_MPDU_DESC_INFO0_FRAG_FLAG);
3483
3484		/* Process only rx fragments with one msdu per link desc below, and drop
3485		 * msdu's indicated due to error reasons.
3486		 */
3487		if (!is_frag || num_msdus > 1) {
3488			drop = true;
3489			/* Return the link desc back to wbm idle list */
3490			ath12k_dp_rx_link_desc_return(ab, reo_desc,
3491						      HAL_WBM_REL_BM_ACT_PUT_IN_IDLE);
3492		}
3493
3494		for (i = 0; i < num_msdus; i++) {
3495			mac_id = le32_get_bits(reo_desc->info0,
3496					       HAL_REO_DEST_RING_INFO0_SRC_LINK_ID);
3497
3498			pdev_id = ath12k_hw_mac_id_to_pdev_id(ab->hw_params, mac_id);
3499			ar = ab->pdevs[pdev_id].ar;
3500
3501			if (!ath12k_dp_process_rx_err_buf(ar, reo_desc,
3502							  &rx_desc_used_list,
3503							  drop,
3504							  msdu_cookies[i]))
3505				tot_n_bufs_reaped++;
3506		}
3507
3508		if (tot_n_bufs_reaped >= quota) {
3509			tot_n_bufs_reaped = quota;
3510			goto exit;
3511		}
3512
3513		budget = quota - tot_n_bufs_reaped;
3514	}
3515
3516exit:
3517	ath12k_hal_srng_access_end(ab, srng);
3518
3519	spin_unlock_bh(&srng->lock);
3520
3521	rx_ring = &dp->rx_refill_buf_ring;
3522
3523	ath12k_dp_rx_bufs_replenish(ab, rx_ring, &rx_desc_used_list,
3524				    tot_n_bufs_reaped);
3525
3526	return tot_n_bufs_reaped;
3527}
3528
3529static void ath12k_dp_rx_null_q_desc_sg_drop(struct ath12k *ar,
3530					     int msdu_len,
3531					     struct sk_buff_head *msdu_list)
3532{
3533	struct sk_buff *skb, *tmp;
3534	struct ath12k_skb_rxcb *rxcb;
3535	int n_buffs;
3536
3537	n_buffs = DIV_ROUND_UP(msdu_len,
3538			       (DP_RX_BUFFER_SIZE - ar->ab->hal.hal_desc_sz));
3539
3540	skb_queue_walk_safe(msdu_list, skb, tmp) {
3541		rxcb = ATH12K_SKB_RXCB(skb);
3542		if (rxcb->err_rel_src == HAL_WBM_REL_SRC_MODULE_REO &&
3543		    rxcb->err_code == HAL_REO_DEST_RING_ERROR_CODE_DESC_ADDR_ZERO) {
3544			if (!n_buffs)
3545				break;
3546			__skb_unlink(skb, msdu_list);
3547			dev_kfree_skb_any(skb);
3548			n_buffs--;
3549		}
3550	}
3551}
3552
3553static int ath12k_dp_rx_h_null_q_desc(struct ath12k *ar, struct sk_buff *msdu,
3554				      struct ieee80211_rx_status *status,
3555				      struct sk_buff_head *msdu_list)
3556{
3557	struct ath12k_base *ab = ar->ab;
3558	u16 msdu_len;
3559	struct hal_rx_desc *desc = (struct hal_rx_desc *)msdu->data;
3560	u8 l3pad_bytes;
3561	struct ath12k_skb_rxcb *rxcb = ATH12K_SKB_RXCB(msdu);
3562	u32 hal_rx_desc_sz = ar->ab->hal.hal_desc_sz;
3563
3564	msdu_len = ath12k_dp_rx_h_msdu_len(ab, desc);
3565
3566	if (!rxcb->is_frag && ((msdu_len + hal_rx_desc_sz) > DP_RX_BUFFER_SIZE)) {
3567		/* First buffer will be freed by the caller, so deduct it's length */
3568		msdu_len = msdu_len - (DP_RX_BUFFER_SIZE - hal_rx_desc_sz);
3569		ath12k_dp_rx_null_q_desc_sg_drop(ar, msdu_len, msdu_list);
3570		return -EINVAL;
3571	}
3572
3573	/* Even after cleaning up the sg buffers in the msdu list with above check
3574	 * any msdu received with continuation flag needs to be dropped as invalid.
3575	 * This protects against some random err frame with continuation flag.
3576	 */
3577	if (rxcb->is_continuation)
3578		return -EINVAL;
3579
3580	if (!ath12k_dp_rx_h_msdu_done(ab, desc)) {
3581		ath12k_warn(ar->ab,
3582			    "msdu_done bit not set in null_q_des processing\n");
3583		__skb_queue_purge(msdu_list);
3584		return -EIO;
3585	}
3586
3587	/* Handle NULL queue descriptor violations arising out a missing
3588	 * REO queue for a given peer or a given TID. This typically
3589	 * may happen if a packet is received on a QOS enabled TID before the
3590	 * ADDBA negotiation for that TID, when the TID queue is setup. Or
3591	 * it may also happen for MC/BC frames if they are not routed to the
3592	 * non-QOS TID queue, in the absence of any other default TID queue.
3593	 * This error can show up both in a REO destination or WBM release ring.
3594	 */
3595
3596	if (rxcb->is_frag) {
3597		skb_pull(msdu, hal_rx_desc_sz);
3598	} else {
3599		l3pad_bytes = ath12k_dp_rx_h_l3pad(ab, desc);
3600
3601		if ((hal_rx_desc_sz + l3pad_bytes + msdu_len) > DP_RX_BUFFER_SIZE)
3602			return -EINVAL;
3603
3604		skb_put(msdu, hal_rx_desc_sz + l3pad_bytes + msdu_len);
3605		skb_pull(msdu, hal_rx_desc_sz + l3pad_bytes);
3606	}
3607	ath12k_dp_rx_h_ppdu(ar, desc, status);
3608
3609	ath12k_dp_rx_h_mpdu(ar, msdu, desc, status);
3610
3611	rxcb->tid = ath12k_dp_rx_h_tid(ab, desc);
3612
3613	/* Please note that caller will having the access to msdu and completing
3614	 * rx with mac80211. Need not worry about cleaning up amsdu_list.
3615	 */
3616
3617	return 0;
3618}
3619
3620static bool ath12k_dp_rx_h_reo_err(struct ath12k *ar, struct sk_buff *msdu,
3621				   struct ieee80211_rx_status *status,
3622				   struct sk_buff_head *msdu_list)
3623{
3624	struct ath12k_skb_rxcb *rxcb = ATH12K_SKB_RXCB(msdu);
3625	bool drop = false;
3626
3627	ar->ab->soc_stats.reo_error[rxcb->err_code]++;
3628
3629	switch (rxcb->err_code) {
3630	case HAL_REO_DEST_RING_ERROR_CODE_DESC_ADDR_ZERO:
3631		if (ath12k_dp_rx_h_null_q_desc(ar, msdu, status, msdu_list))
3632			drop = true;
3633		break;
3634	case HAL_REO_DEST_RING_ERROR_CODE_PN_CHECK_FAILED:
3635		/* TODO: Do not drop PN failed packets in the driver;
3636		 * instead, it is good to drop such packets in mac80211
3637		 * after incrementing the replay counters.
3638		 */
3639		fallthrough;
3640	default:
3641		/* TODO: Review other errors and process them to mac80211
3642		 * as appropriate.
3643		 */
3644		drop = true;
3645		break;
3646	}
3647
3648	return drop;
3649}
3650
3651static void ath12k_dp_rx_h_tkip_mic_err(struct ath12k *ar, struct sk_buff *msdu,
3652					struct ieee80211_rx_status *status)
3653{
3654	struct ath12k_base *ab = ar->ab;
3655	u16 msdu_len;
3656	struct hal_rx_desc *desc = (struct hal_rx_desc *)msdu->data;
3657	u8 l3pad_bytes;
3658	struct ath12k_skb_rxcb *rxcb = ATH12K_SKB_RXCB(msdu);
3659	u32 hal_rx_desc_sz = ar->ab->hal.hal_desc_sz;
3660
3661	rxcb->is_first_msdu = ath12k_dp_rx_h_first_msdu(ab, desc);
3662	rxcb->is_last_msdu = ath12k_dp_rx_h_last_msdu(ab, desc);
3663
3664	l3pad_bytes = ath12k_dp_rx_h_l3pad(ab, desc);
3665	msdu_len = ath12k_dp_rx_h_msdu_len(ab, desc);
3666	skb_put(msdu, hal_rx_desc_sz + l3pad_bytes + msdu_len);
3667	skb_pull(msdu, hal_rx_desc_sz + l3pad_bytes);
3668
3669	ath12k_dp_rx_h_ppdu(ar, desc, status);
3670
3671	status->flag |= (RX_FLAG_MMIC_STRIPPED | RX_FLAG_MMIC_ERROR |
3672			 RX_FLAG_DECRYPTED);
3673
3674	ath12k_dp_rx_h_undecap(ar, msdu, desc,
3675			       HAL_ENCRYPT_TYPE_TKIP_MIC, status, false);
3676}
3677
3678static bool ath12k_dp_rx_h_rxdma_err(struct ath12k *ar,  struct sk_buff *msdu,
3679				     struct ieee80211_rx_status *status)
3680{
3681	struct ath12k_base *ab = ar->ab;
3682	struct ath12k_skb_rxcb *rxcb = ATH12K_SKB_RXCB(msdu);
3683	struct hal_rx_desc *rx_desc = (struct hal_rx_desc *)msdu->data;
3684	bool drop = false;
3685	u32 err_bitmap;
3686
3687	ar->ab->soc_stats.rxdma_error[rxcb->err_code]++;
3688
3689	switch (rxcb->err_code) {
3690	case HAL_REO_ENTR_RING_RXDMA_ECODE_DECRYPT_ERR:
3691	case HAL_REO_ENTR_RING_RXDMA_ECODE_TKIP_MIC_ERR:
3692		err_bitmap = ath12k_dp_rx_h_mpdu_err(ab, rx_desc);
3693		if (err_bitmap & HAL_RX_MPDU_ERR_TKIP_MIC) {
3694			ath12k_dp_rx_h_tkip_mic_err(ar, msdu, status);
3695			break;
3696		}
3697		fallthrough;
3698	default:
3699		/* TODO: Review other rxdma error code to check if anything is
3700		 * worth reporting to mac80211
3701		 */
3702		drop = true;
3703		break;
3704	}
3705
3706	return drop;
3707}
3708
3709static void ath12k_dp_rx_wbm_err(struct ath12k *ar,
3710				 struct napi_struct *napi,
3711				 struct sk_buff *msdu,
3712				 struct sk_buff_head *msdu_list)
3713{
3714	struct ath12k_skb_rxcb *rxcb = ATH12K_SKB_RXCB(msdu);
3715	struct ieee80211_rx_status rxs = {0};
3716	bool drop = true;
3717
3718	switch (rxcb->err_rel_src) {
3719	case HAL_WBM_REL_SRC_MODULE_REO:
3720		drop = ath12k_dp_rx_h_reo_err(ar, msdu, &rxs, msdu_list);
3721		break;
3722	case HAL_WBM_REL_SRC_MODULE_RXDMA:
3723		drop = ath12k_dp_rx_h_rxdma_err(ar, msdu, &rxs);
3724		break;
3725	default:
3726		/* msdu will get freed */
3727		break;
3728	}
3729
3730	if (drop) {
3731		dev_kfree_skb_any(msdu);
3732		return;
3733	}
3734
3735	ath12k_dp_rx_deliver_msdu(ar, napi, msdu, &rxs);
3736}
3737
3738int ath12k_dp_rx_process_wbm_err(struct ath12k_base *ab,
3739				 struct napi_struct *napi, int budget)
3740{
3741	LIST_HEAD(rx_desc_used_list);
3742	struct ath12k *ar;
3743	struct ath12k_dp *dp = &ab->dp;
3744	struct dp_rxdma_ring *rx_ring;
3745	struct hal_rx_wbm_rel_info err_info;
3746	struct hal_srng *srng;
3747	struct sk_buff *msdu;
3748	struct sk_buff_head msdu_list, scatter_msdu_list;
3749	struct ath12k_skb_rxcb *rxcb;
3750	void *rx_desc;
3751	u8 mac_id;
3752	int num_buffs_reaped = 0;
3753	struct ath12k_rx_desc_info *desc_info;
3754	int ret, pdev_id;
3755	struct hal_rx_desc *msdu_data;
3756
3757	__skb_queue_head_init(&msdu_list);
3758	__skb_queue_head_init(&scatter_msdu_list);
3759
3760	srng = &ab->hal.srng_list[dp->rx_rel_ring.ring_id];
3761	rx_ring = &dp->rx_refill_buf_ring;
3762	spin_lock_bh(&srng->lock);
3763
3764	ath12k_hal_srng_access_begin(ab, srng);
3765
3766	while (budget) {
3767		rx_desc = ath12k_hal_srng_dst_get_next_entry(ab, srng);
3768		if (!rx_desc)
3769			break;
3770
3771		ret = ath12k_hal_wbm_desc_parse_err(ab, rx_desc, &err_info);
3772		if (ret) {
3773			ath12k_warn(ab,
3774				    "failed to parse rx error in wbm_rel ring desc %d\n",
3775				    ret);
3776			continue;
3777		}
3778
3779		desc_info = err_info.rx_desc;
3780
3781		/* retry manual desc retrieval if hw cc is not done */
3782		if (!desc_info) {
3783			desc_info = ath12k_dp_get_rx_desc(ab, err_info.cookie);
3784			if (!desc_info) {
3785				ath12k_warn(ab, "Invalid cookie in DP WBM rx error descriptor retrieval: 0x%x\n",
3786					    err_info.cookie);
3787				continue;
3788			}
3789		}
3790
3791		if (desc_info->magic != ATH12K_DP_RX_DESC_MAGIC)
3792			ath12k_warn(ab, "WBM RX err, Check HW CC implementation");
3793
3794		msdu = desc_info->skb;
3795		desc_info->skb = NULL;
3796
3797		list_add_tail(&desc_info->list, &rx_desc_used_list);
3798
3799		rxcb = ATH12K_SKB_RXCB(msdu);
3800		dma_unmap_single(ab->dev, rxcb->paddr,
3801				 msdu->len + skb_tailroom(msdu),
3802				 DMA_FROM_DEVICE);
3803
3804		num_buffs_reaped++;
3805
3806		if (!err_info.continuation)
3807			budget--;
3808
3809		if (err_info.push_reason !=
3810		    HAL_REO_DEST_RING_PUSH_REASON_ERR_DETECTED) {
3811			dev_kfree_skb_any(msdu);
3812			continue;
3813		}
3814
3815		msdu_data = (struct hal_rx_desc *)msdu->data;
3816		rxcb->err_rel_src = err_info.err_rel_src;
3817		rxcb->err_code = err_info.err_code;
3818		rxcb->is_first_msdu = err_info.first_msdu;
3819		rxcb->is_last_msdu = err_info.last_msdu;
3820		rxcb->is_continuation = err_info.continuation;
3821		rxcb->rx_desc = msdu_data;
3822
3823		if (err_info.continuation) {
3824			__skb_queue_tail(&scatter_msdu_list, msdu);
3825			continue;
3826		}
3827
3828		mac_id = ath12k_dp_rx_get_msdu_src_link(ab,
3829							msdu_data);
3830		if (mac_id >= MAX_RADIOS) {
3831			dev_kfree_skb_any(msdu);
3832
3833			/* In any case continuation bit is set
3834			 * in the previous record, cleanup scatter_msdu_list
3835			 */
3836			ath12k_dp_clean_up_skb_list(&scatter_msdu_list);
3837			continue;
3838		}
3839
3840		if (!skb_queue_empty(&scatter_msdu_list)) {
3841			struct sk_buff *msdu;
3842
3843			skb_queue_walk(&scatter_msdu_list, msdu) {
3844				rxcb = ATH12K_SKB_RXCB(msdu);
3845				rxcb->mac_id = mac_id;
3846			}
3847
3848			skb_queue_splice_tail_init(&scatter_msdu_list,
3849						   &msdu_list);
3850		}
3851
3852		rxcb = ATH12K_SKB_RXCB(msdu);
3853		rxcb->mac_id = mac_id;
3854		__skb_queue_tail(&msdu_list, msdu);
3855	}
3856
3857	/* In any case continuation bit is set in the
3858	 * last record, cleanup scatter_msdu_list
3859	 */
3860	ath12k_dp_clean_up_skb_list(&scatter_msdu_list);
3861
3862	ath12k_hal_srng_access_end(ab, srng);
3863
3864	spin_unlock_bh(&srng->lock);
3865
3866	if (!num_buffs_reaped)
3867		goto done;
3868
3869	ath12k_dp_rx_bufs_replenish(ab, rx_ring, &rx_desc_used_list,
3870				    num_buffs_reaped);
3871
3872	rcu_read_lock();
3873	while ((msdu = __skb_dequeue(&msdu_list))) {
3874		rxcb = ATH12K_SKB_RXCB(msdu);
3875		mac_id = rxcb->mac_id;
3876
3877		pdev_id = ath12k_hw_mac_id_to_pdev_id(ab->hw_params, mac_id);
3878		ar = ab->pdevs[pdev_id].ar;
3879
3880		if (!ar || !rcu_dereference(ar->ab->pdevs_active[mac_id])) {
3881			dev_kfree_skb_any(msdu);
3882			continue;
3883		}
3884
3885		if (test_bit(ATH12K_CAC_RUNNING, &ar->dev_flags)) {
3886			dev_kfree_skb_any(msdu);
3887			continue;
3888		}
3889		ath12k_dp_rx_wbm_err(ar, napi, msdu, &msdu_list);
3890	}
3891	rcu_read_unlock();
3892done:
3893	return num_buffs_reaped;
3894}
3895
3896void ath12k_dp_rx_process_reo_status(struct ath12k_base *ab)
3897{
3898	struct ath12k_dp *dp = &ab->dp;
3899	struct hal_tlv_64_hdr *hdr;
3900	struct hal_srng *srng;
3901	struct ath12k_dp_rx_reo_cmd *cmd, *tmp;
3902	bool found = false;
3903	u16 tag;
3904	struct hal_reo_status reo_status;
3905
3906	srng = &ab->hal.srng_list[dp->reo_status_ring.ring_id];
3907
3908	memset(&reo_status, 0, sizeof(reo_status));
3909
3910	spin_lock_bh(&srng->lock);
3911
3912	ath12k_hal_srng_access_begin(ab, srng);
3913
3914	while ((hdr = ath12k_hal_srng_dst_get_next_entry(ab, srng))) {
3915		tag = u64_get_bits(hdr->tl, HAL_SRNG_TLV_HDR_TAG);
3916
3917		switch (tag) {
3918		case HAL_REO_GET_QUEUE_STATS_STATUS:
3919			ath12k_hal_reo_status_queue_stats(ab, hdr,
3920							  &reo_status);
3921			break;
3922		case HAL_REO_FLUSH_QUEUE_STATUS:
3923			ath12k_hal_reo_flush_queue_status(ab, hdr,
3924							  &reo_status);
3925			break;
3926		case HAL_REO_FLUSH_CACHE_STATUS:
3927			ath12k_hal_reo_flush_cache_status(ab, hdr,
3928							  &reo_status);
3929			break;
3930		case HAL_REO_UNBLOCK_CACHE_STATUS:
3931			ath12k_hal_reo_unblk_cache_status(ab, hdr,
3932							  &reo_status);
3933			break;
3934		case HAL_REO_FLUSH_TIMEOUT_LIST_STATUS:
3935			ath12k_hal_reo_flush_timeout_list_status(ab, hdr,
3936								 &reo_status);
3937			break;
3938		case HAL_REO_DESCRIPTOR_THRESHOLD_REACHED_STATUS:
3939			ath12k_hal_reo_desc_thresh_reached_status(ab, hdr,
3940								  &reo_status);
3941			break;
3942		case HAL_REO_UPDATE_RX_REO_QUEUE_STATUS:
3943			ath12k_hal_reo_update_rx_reo_queue_status(ab, hdr,
3944								  &reo_status);
3945			break;
3946		default:
3947			ath12k_warn(ab, "Unknown reo status type %d\n", tag);
3948			continue;
3949		}
3950
3951		spin_lock_bh(&dp->reo_cmd_lock);
3952		list_for_each_entry_safe(cmd, tmp, &dp->reo_cmd_list, list) {
3953			if (reo_status.uniform_hdr.cmd_num == cmd->cmd_num) {
3954				found = true;
3955				list_del(&cmd->list);
3956				break;
3957			}
3958		}
3959		spin_unlock_bh(&dp->reo_cmd_lock);
3960
3961		if (found) {
3962			cmd->handler(dp, (void *)&cmd->data,
3963				     reo_status.uniform_hdr.cmd_status);
3964			kfree(cmd);
3965		}
3966
3967		found = false;
3968	}
3969
3970	ath12k_hal_srng_access_end(ab, srng);
3971
3972	spin_unlock_bh(&srng->lock);
3973}
3974
3975void ath12k_dp_rx_free(struct ath12k_base *ab)
3976{
3977	struct ath12k_dp *dp = &ab->dp;
3978	int i;
3979
3980	ath12k_dp_srng_cleanup(ab, &dp->rx_refill_buf_ring.refill_buf_ring);
3981
3982	for (i = 0; i < ab->hw_params->num_rxdma_per_pdev; i++) {
3983		if (ab->hw_params->rx_mac_buf_ring)
3984			ath12k_dp_srng_cleanup(ab, &dp->rx_mac_buf_ring[i]);
3985	}
3986
3987	for (i = 0; i < ab->hw_params->num_rxdma_dst_ring; i++)
3988		ath12k_dp_srng_cleanup(ab, &dp->rxdma_err_dst_ring[i]);
3989
3990	ath12k_dp_srng_cleanup(ab, &dp->rxdma_mon_buf_ring.refill_buf_ring);
3991
3992	ath12k_dp_rxdma_buf_free(ab);
3993}
3994
3995void ath12k_dp_rx_pdev_free(struct ath12k_base *ab, int mac_id)
3996{
3997	struct ath12k *ar = ab->pdevs[mac_id].ar;
3998
3999	ath12k_dp_rx_pdev_srng_free(ar);
4000}
4001
4002int ath12k_dp_rxdma_ring_sel_config_qcn9274(struct ath12k_base *ab)
4003{
4004	struct ath12k_dp *dp = &ab->dp;
4005	struct htt_rx_ring_tlv_filter tlv_filter = {0};
4006	u32 ring_id;
4007	int ret;
4008	u32 hal_rx_desc_sz = ab->hal.hal_desc_sz;
4009
4010	ring_id = dp->rx_refill_buf_ring.refill_buf_ring.ring_id;
4011
4012	tlv_filter.rx_filter = HTT_RX_TLV_FLAGS_RXDMA_RING;
4013	tlv_filter.pkt_filter_flags2 = HTT_RX_FP_CTRL_PKT_FILTER_TLV_FLAGS2_BAR;
4014	tlv_filter.pkt_filter_flags3 = HTT_RX_FP_DATA_PKT_FILTER_TLV_FLASG3_MCAST |
4015					HTT_RX_FP_DATA_PKT_FILTER_TLV_FLASG3_UCAST |
4016					HTT_RX_FP_DATA_PKT_FILTER_TLV_FLASG3_NULL_DATA;
4017	tlv_filter.offset_valid = true;
4018	tlv_filter.rx_packet_offset = hal_rx_desc_sz;
4019
4020	tlv_filter.rx_mpdu_start_offset =
4021		ab->hal_rx_ops->rx_desc_get_mpdu_start_offset();
4022	tlv_filter.rx_msdu_end_offset =
4023		ab->hal_rx_ops->rx_desc_get_msdu_end_offset();
4024
4025	if (ath12k_dp_wmask_compaction_rx_tlv_supported(ab)) {
4026		tlv_filter.rx_mpdu_start_wmask =
4027			ab->hw_params->hal_ops->rxdma_ring_wmask_rx_mpdu_start();
4028		tlv_filter.rx_msdu_end_wmask =
4029			ab->hw_params->hal_ops->rxdma_ring_wmask_rx_msdu_end();
4030		ath12k_dbg(ab, ATH12K_DBG_DATA,
4031			   "Configuring compact tlv masks rx_mpdu_start_wmask 0x%x rx_msdu_end_wmask 0x%x\n",
4032			   tlv_filter.rx_mpdu_start_wmask, tlv_filter.rx_msdu_end_wmask);
4033	}
4034
4035	ret = ath12k_dp_tx_htt_rx_filter_setup(ab, ring_id, 0,
4036					       HAL_RXDMA_BUF,
4037					       DP_RXDMA_REFILL_RING_SIZE,
4038					       &tlv_filter);
4039
4040	return ret;
4041}
4042
4043int ath12k_dp_rxdma_ring_sel_config_wcn7850(struct ath12k_base *ab)
4044{
4045	struct ath12k_dp *dp = &ab->dp;
4046	struct htt_rx_ring_tlv_filter tlv_filter = {0};
4047	u32 ring_id;
4048	int ret = 0;
4049	u32 hal_rx_desc_sz = ab->hal.hal_desc_sz;
4050	int i;
4051
4052	ring_id = dp->rx_refill_buf_ring.refill_buf_ring.ring_id;
4053
4054	tlv_filter.rx_filter = HTT_RX_TLV_FLAGS_RXDMA_RING;
4055	tlv_filter.pkt_filter_flags2 = HTT_RX_FP_CTRL_PKT_FILTER_TLV_FLAGS2_BAR;
4056	tlv_filter.pkt_filter_flags3 = HTT_RX_FP_DATA_PKT_FILTER_TLV_FLASG3_MCAST |
4057					HTT_RX_FP_DATA_PKT_FILTER_TLV_FLASG3_UCAST |
4058					HTT_RX_FP_DATA_PKT_FILTER_TLV_FLASG3_NULL_DATA;
4059	tlv_filter.offset_valid = true;
4060	tlv_filter.rx_packet_offset = hal_rx_desc_sz;
4061
4062	tlv_filter.rx_header_offset = offsetof(struct hal_rx_desc_wcn7850, pkt_hdr_tlv);
4063
4064	tlv_filter.rx_mpdu_start_offset =
4065		ab->hal_rx_ops->rx_desc_get_mpdu_start_offset();
4066	tlv_filter.rx_msdu_end_offset =
4067		ab->hal_rx_ops->rx_desc_get_msdu_end_offset();
4068
4069	/* TODO: Selectively subscribe to required qwords within msdu_end
4070	 * and mpdu_start and setup the mask in below msg
4071	 * and modify the rx_desc struct
4072	 */
4073
4074	for (i = 0; i < ab->hw_params->num_rxdma_per_pdev; i++) {
4075		ring_id = dp->rx_mac_buf_ring[i].ring_id;
4076		ret = ath12k_dp_tx_htt_rx_filter_setup(ab, ring_id, i,
4077						       HAL_RXDMA_BUF,
4078						       DP_RXDMA_REFILL_RING_SIZE,
4079						       &tlv_filter);
4080	}
4081
4082	return ret;
4083}
4084
4085int ath12k_dp_rx_htt_setup(struct ath12k_base *ab)
4086{
4087	struct ath12k_dp *dp = &ab->dp;
4088	u32 ring_id;
4089	int i, ret;
4090
4091	/* TODO: Need to verify the HTT setup for QCN9224 */
4092	ring_id = dp->rx_refill_buf_ring.refill_buf_ring.ring_id;
4093	ret = ath12k_dp_tx_htt_srng_setup(ab, ring_id, 0, HAL_RXDMA_BUF);
4094	if (ret) {
4095		ath12k_warn(ab, "failed to configure rx_refill_buf_ring %d\n",
4096			    ret);
4097		return ret;
4098	}
4099
4100	if (ab->hw_params->rx_mac_buf_ring) {
4101		for (i = 0; i < ab->hw_params->num_rxdma_per_pdev; i++) {
4102			ring_id = dp->rx_mac_buf_ring[i].ring_id;
4103			ret = ath12k_dp_tx_htt_srng_setup(ab, ring_id,
4104							  i, HAL_RXDMA_BUF);
4105			if (ret) {
4106				ath12k_warn(ab, "failed to configure rx_mac_buf_ring%d %d\n",
4107					    i, ret);
4108				return ret;
4109			}
4110		}
4111	}
4112
4113	for (i = 0; i < ab->hw_params->num_rxdma_dst_ring; i++) {
4114		ring_id = dp->rxdma_err_dst_ring[i].ring_id;
4115		ret = ath12k_dp_tx_htt_srng_setup(ab, ring_id,
4116						  i, HAL_RXDMA_DST);
4117		if (ret) {
4118			ath12k_warn(ab, "failed to configure rxdma_err_dest_ring%d %d\n",
4119				    i, ret);
4120			return ret;
4121		}
4122	}
4123
4124	if (ab->hw_params->rxdma1_enable) {
4125		ring_id = dp->rxdma_mon_buf_ring.refill_buf_ring.ring_id;
4126		ret = ath12k_dp_tx_htt_srng_setup(ab, ring_id,
4127						  0, HAL_RXDMA_MONITOR_BUF);
4128		if (ret) {
4129			ath12k_warn(ab, "failed to configure rxdma_mon_buf_ring %d\n",
4130				    ret);
4131			return ret;
4132		}
4133	}
4134
4135	ret = ab->hw_params->hw_ops->rxdma_ring_sel_config(ab);
4136	if (ret) {
4137		ath12k_warn(ab, "failed to setup rxdma ring selection config\n");
4138		return ret;
4139	}
4140
4141	return 0;
4142}
4143
4144int ath12k_dp_rx_alloc(struct ath12k_base *ab)
4145{
4146	struct ath12k_dp *dp = &ab->dp;
4147	int i, ret;
4148
4149	idr_init(&dp->rxdma_mon_buf_ring.bufs_idr);
4150	spin_lock_init(&dp->rxdma_mon_buf_ring.idr_lock);
4151
4152	ret = ath12k_dp_srng_setup(ab,
4153				   &dp->rx_refill_buf_ring.refill_buf_ring,
4154				   HAL_RXDMA_BUF, 0, 0,
4155				   DP_RXDMA_BUF_RING_SIZE);
4156	if (ret) {
4157		ath12k_warn(ab, "failed to setup rx_refill_buf_ring\n");
4158		return ret;
4159	}
4160
4161	if (ab->hw_params->rx_mac_buf_ring) {
4162		for (i = 0; i < ab->hw_params->num_rxdma_per_pdev; i++) {
4163			ret = ath12k_dp_srng_setup(ab,
4164						   &dp->rx_mac_buf_ring[i],
4165						   HAL_RXDMA_BUF, 1,
4166						   i, DP_RX_MAC_BUF_RING_SIZE);
4167			if (ret) {
4168				ath12k_warn(ab, "failed to setup rx_mac_buf_ring %d\n",
4169					    i);
4170				return ret;
4171			}
4172		}
4173	}
4174
4175	for (i = 0; i < ab->hw_params->num_rxdma_dst_ring; i++) {
4176		ret = ath12k_dp_srng_setup(ab, &dp->rxdma_err_dst_ring[i],
4177					   HAL_RXDMA_DST, 0, i,
4178					   DP_RXDMA_ERR_DST_RING_SIZE);
4179		if (ret) {
4180			ath12k_warn(ab, "failed to setup rxdma_err_dst_ring %d\n", i);
4181			return ret;
4182		}
4183	}
4184
4185	if (ab->hw_params->rxdma1_enable) {
4186		ret = ath12k_dp_srng_setup(ab,
4187					   &dp->rxdma_mon_buf_ring.refill_buf_ring,
4188					   HAL_RXDMA_MONITOR_BUF, 0, 0,
4189					   DP_RXDMA_MONITOR_BUF_RING_SIZE);
4190		if (ret) {
4191			ath12k_warn(ab, "failed to setup HAL_RXDMA_MONITOR_BUF\n");
4192			return ret;
4193		}
4194	}
4195
4196	ret = ath12k_dp_rxdma_buf_setup(ab);
4197	if (ret) {
4198		ath12k_warn(ab, "failed to setup rxdma ring\n");
4199		return ret;
4200	}
4201
4202	return 0;
4203}
4204
4205int ath12k_dp_rx_pdev_alloc(struct ath12k_base *ab, int mac_id)
4206{
4207	struct ath12k *ar = ab->pdevs[mac_id].ar;
4208	struct ath12k_pdev_dp *dp = &ar->dp;
4209	u32 ring_id;
4210	int i;
4211	int ret;
4212
4213	if (!ab->hw_params->rxdma1_enable)
4214		goto out;
4215
4216	ret = ath12k_dp_rx_pdev_srng_alloc(ar);
4217	if (ret) {
4218		ath12k_warn(ab, "failed to setup rx srngs\n");
4219		return ret;
4220	}
4221
4222	for (i = 0; i < ab->hw_params->num_rxdma_per_pdev; i++) {
4223		ring_id = dp->rxdma_mon_dst_ring[i].ring_id;
4224		ret = ath12k_dp_tx_htt_srng_setup(ab, ring_id,
4225						  mac_id + i,
4226						  HAL_RXDMA_MONITOR_DST);
4227		if (ret) {
4228			ath12k_warn(ab,
4229				    "failed to configure rxdma_mon_dst_ring %d %d\n",
4230				    i, ret);
4231			return ret;
4232		}
4233	}
4234out:
4235	return 0;
4236}
4237
4238static int ath12k_dp_rx_pdev_mon_status_attach(struct ath12k *ar)
4239{
4240	struct ath12k_pdev_dp *dp = &ar->dp;
4241	struct ath12k_mon_data *pmon = (struct ath12k_mon_data *)&dp->mon_data;
4242
4243	skb_queue_head_init(&pmon->rx_status_q);
4244
4245	pmon->mon_ppdu_status = DP_PPDU_STATUS_START;
4246
4247	memset(&pmon->rx_mon_stats, 0,
4248	       sizeof(pmon->rx_mon_stats));
4249	return 0;
4250}
4251
4252int ath12k_dp_rx_pdev_mon_attach(struct ath12k *ar)
4253{
4254	struct ath12k_pdev_dp *dp = &ar->dp;
4255	struct ath12k_mon_data *pmon = &dp->mon_data;
4256	int ret = 0;
4257
4258	ret = ath12k_dp_rx_pdev_mon_status_attach(ar);
4259	if (ret) {
4260		ath12k_warn(ar->ab, "pdev_mon_status_attach() failed");
4261		return ret;
4262	}
4263
4264	/* if rxdma1_enable is false, no need to setup
4265	 * rxdma_mon_desc_ring.
4266	 */
4267	if (!ar->ab->hw_params->rxdma1_enable)
4268		return 0;
4269
4270	pmon->mon_last_linkdesc_paddr = 0;
4271	pmon->mon_last_buf_cookie = DP_RX_DESC_COOKIE_MAX + 1;
4272	spin_lock_init(&pmon->mon_lock);
4273
4274	return 0;
4275}