Linux Audio

Check our new training course

Linux debugging, profiling, tracing and performance analysis training

Apr 14-17, 2025
Register
Loading...
Note: File does not exist in v3.1.
   1// SPDX-License-Identifier: BSD-3-Clause-Clear
   2/*
   3 * Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
   4 * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
   5 */
   6
   7#include <linux/ieee80211.h>
   8#include <linux/kernel.h>
   9#include <linux/skbuff.h>
  10#include <crypto/hash.h>
  11#include "core.h"
  12#include "debug.h"
  13#include "hal_desc.h"
  14#include "hw.h"
  15#include "dp_rx.h"
  16#include "hal_rx.h"
  17#include "dp_tx.h"
  18#include "peer.h"
  19#include "dp_mon.h"
  20
  21#define ATH12K_DP_RX_FRAGMENT_TIMEOUT_MS (2 * HZ)
  22
  23static enum hal_encrypt_type ath12k_dp_rx_h_enctype(struct ath12k_base *ab,
  24						    struct hal_rx_desc *desc)
  25{
  26	if (!ab->hw_params->hal_ops->rx_desc_encrypt_valid(desc))
  27		return HAL_ENCRYPT_TYPE_OPEN;
  28
  29	return ab->hw_params->hal_ops->rx_desc_get_encrypt_type(desc);
  30}
  31
  32u8 ath12k_dp_rx_h_decap_type(struct ath12k_base *ab,
  33			     struct hal_rx_desc *desc)
  34{
  35	return ab->hw_params->hal_ops->rx_desc_get_decap_type(desc);
  36}
  37
  38static u8 ath12k_dp_rx_h_mesh_ctl_present(struct ath12k_base *ab,
  39					  struct hal_rx_desc *desc)
  40{
  41	return ab->hw_params->hal_ops->rx_desc_get_mesh_ctl(desc);
  42}
  43
  44static bool ath12k_dp_rx_h_seq_ctrl_valid(struct ath12k_base *ab,
  45					  struct hal_rx_desc *desc)
  46{
  47	return ab->hw_params->hal_ops->rx_desc_get_mpdu_seq_ctl_vld(desc);
  48}
  49
  50static bool ath12k_dp_rx_h_fc_valid(struct ath12k_base *ab,
  51				    struct hal_rx_desc *desc)
  52{
  53	return ab->hw_params->hal_ops->rx_desc_get_mpdu_fc_valid(desc);
  54}
  55
  56static bool ath12k_dp_rx_h_more_frags(struct ath12k_base *ab,
  57				      struct sk_buff *skb)
  58{
  59	struct ieee80211_hdr *hdr;
  60
  61	hdr = (struct ieee80211_hdr *)(skb->data + ab->hw_params->hal_desc_sz);
  62	return ieee80211_has_morefrags(hdr->frame_control);
  63}
  64
  65static u16 ath12k_dp_rx_h_frag_no(struct ath12k_base *ab,
  66				  struct sk_buff *skb)
  67{
  68	struct ieee80211_hdr *hdr;
  69
  70	hdr = (struct ieee80211_hdr *)(skb->data + ab->hw_params->hal_desc_sz);
  71	return le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_FRAG;
  72}
  73
  74static u16 ath12k_dp_rx_h_seq_no(struct ath12k_base *ab,
  75				 struct hal_rx_desc *desc)
  76{
  77	return ab->hw_params->hal_ops->rx_desc_get_mpdu_start_seq_no(desc);
  78}
  79
  80static bool ath12k_dp_rx_h_msdu_done(struct ath12k_base *ab,
  81				     struct hal_rx_desc *desc)
  82{
  83	return ab->hw_params->hal_ops->dp_rx_h_msdu_done(desc);
  84}
  85
  86static bool ath12k_dp_rx_h_l4_cksum_fail(struct ath12k_base *ab,
  87					 struct hal_rx_desc *desc)
  88{
  89	return ab->hw_params->hal_ops->dp_rx_h_l4_cksum_fail(desc);
  90}
  91
  92static bool ath12k_dp_rx_h_ip_cksum_fail(struct ath12k_base *ab,
  93					 struct hal_rx_desc *desc)
  94{
  95	return ab->hw_params->hal_ops->dp_rx_h_ip_cksum_fail(desc);
  96}
  97
  98static bool ath12k_dp_rx_h_is_decrypted(struct ath12k_base *ab,
  99					struct hal_rx_desc *desc)
 100{
 101	return ab->hw_params->hal_ops->dp_rx_h_is_decrypted(desc);
 102}
 103
 104u32 ath12k_dp_rx_h_mpdu_err(struct ath12k_base *ab,
 105			    struct hal_rx_desc *desc)
 106{
 107	return ab->hw_params->hal_ops->dp_rx_h_mpdu_err(desc);
 108}
 109
 110static u16 ath12k_dp_rx_h_msdu_len(struct ath12k_base *ab,
 111				   struct hal_rx_desc *desc)
 112{
 113	return ab->hw_params->hal_ops->rx_desc_get_msdu_len(desc);
 114}
 115
 116static u8 ath12k_dp_rx_h_sgi(struct ath12k_base *ab,
 117			     struct hal_rx_desc *desc)
 118{
 119	return ab->hw_params->hal_ops->rx_desc_get_msdu_sgi(desc);
 120}
 121
 122static u8 ath12k_dp_rx_h_rate_mcs(struct ath12k_base *ab,
 123				  struct hal_rx_desc *desc)
 124{
 125	return ab->hw_params->hal_ops->rx_desc_get_msdu_rate_mcs(desc);
 126}
 127
 128static u8 ath12k_dp_rx_h_rx_bw(struct ath12k_base *ab,
 129			       struct hal_rx_desc *desc)
 130{
 131	return ab->hw_params->hal_ops->rx_desc_get_msdu_rx_bw(desc);
 132}
 133
 134static u32 ath12k_dp_rx_h_freq(struct ath12k_base *ab,
 135			       struct hal_rx_desc *desc)
 136{
 137	return ab->hw_params->hal_ops->rx_desc_get_msdu_freq(desc);
 138}
 139
 140static u8 ath12k_dp_rx_h_pkt_type(struct ath12k_base *ab,
 141				  struct hal_rx_desc *desc)
 142{
 143	return ab->hw_params->hal_ops->rx_desc_get_msdu_pkt_type(desc);
 144}
 145
 146static u8 ath12k_dp_rx_h_nss(struct ath12k_base *ab,
 147			     struct hal_rx_desc *desc)
 148{
 149	return hweight8(ab->hw_params->hal_ops->rx_desc_get_msdu_nss(desc));
 150}
 151
 152static u8 ath12k_dp_rx_h_tid(struct ath12k_base *ab,
 153			     struct hal_rx_desc *desc)
 154{
 155	return ab->hw_params->hal_ops->rx_desc_get_mpdu_tid(desc);
 156}
 157
 158static u16 ath12k_dp_rx_h_peer_id(struct ath12k_base *ab,
 159				  struct hal_rx_desc *desc)
 160{
 161	return ab->hw_params->hal_ops->rx_desc_get_mpdu_peer_id(desc);
 162}
 163
 164u8 ath12k_dp_rx_h_l3pad(struct ath12k_base *ab,
 165			struct hal_rx_desc *desc)
 166{
 167	return ab->hw_params->hal_ops->rx_desc_get_l3_pad_bytes(desc);
 168}
 169
 170static bool ath12k_dp_rx_h_first_msdu(struct ath12k_base *ab,
 171				      struct hal_rx_desc *desc)
 172{
 173	return ab->hw_params->hal_ops->rx_desc_get_first_msdu(desc);
 174}
 175
 176static bool ath12k_dp_rx_h_last_msdu(struct ath12k_base *ab,
 177				     struct hal_rx_desc *desc)
 178{
 179	return ab->hw_params->hal_ops->rx_desc_get_last_msdu(desc);
 180}
 181
 182static void ath12k_dp_rx_desc_end_tlv_copy(struct ath12k_base *ab,
 183					   struct hal_rx_desc *fdesc,
 184					   struct hal_rx_desc *ldesc)
 185{
 186	ab->hw_params->hal_ops->rx_desc_copy_end_tlv(fdesc, ldesc);
 187}
 188
 189static void ath12k_dp_rxdesc_set_msdu_len(struct ath12k_base *ab,
 190					  struct hal_rx_desc *desc,
 191					  u16 len)
 192{
 193	ab->hw_params->hal_ops->rx_desc_set_msdu_len(desc, len);
 194}
 195
 196static bool ath12k_dp_rx_h_is_da_mcbc(struct ath12k_base *ab,
 197				      struct hal_rx_desc *desc)
 198{
 199	return (ath12k_dp_rx_h_first_msdu(ab, desc) &&
 200		ab->hw_params->hal_ops->rx_desc_is_da_mcbc(desc));
 201}
 202
 203static bool ath12k_dp_rxdesc_mac_addr2_valid(struct ath12k_base *ab,
 204					     struct hal_rx_desc *desc)
 205{
 206	return ab->hw_params->hal_ops->rx_desc_mac_addr2_valid(desc);
 207}
 208
 209static u8 *ath12k_dp_rxdesc_get_mpdu_start_addr2(struct ath12k_base *ab,
 210						 struct hal_rx_desc *desc)
 211{
 212	return ab->hw_params->hal_ops->rx_desc_mpdu_start_addr2(desc);
 213}
 214
 215static void ath12k_dp_rx_desc_get_dot11_hdr(struct ath12k_base *ab,
 216					    struct hal_rx_desc *desc,
 217					    struct ieee80211_hdr *hdr)
 218{
 219	ab->hw_params->hal_ops->rx_desc_get_dot11_hdr(desc, hdr);
 220}
 221
 222static void ath12k_dp_rx_desc_get_crypto_header(struct ath12k_base *ab,
 223						struct hal_rx_desc *desc,
 224						u8 *crypto_hdr,
 225						enum hal_encrypt_type enctype)
 226{
 227	ab->hw_params->hal_ops->rx_desc_get_crypto_header(desc, crypto_hdr, enctype);
 228}
 229
 230static u16 ath12k_dp_rxdesc_get_mpdu_frame_ctrl(struct ath12k_base *ab,
 231						struct hal_rx_desc *desc)
 232{
 233	return ab->hw_params->hal_ops->rx_desc_get_mpdu_frame_ctl(desc);
 234}
 235
 236static int ath12k_dp_purge_mon_ring(struct ath12k_base *ab)
 237{
 238	int i, reaped = 0;
 239	unsigned long timeout = jiffies + msecs_to_jiffies(DP_MON_PURGE_TIMEOUT_MS);
 240
 241	do {
 242		for (i = 0; i < ab->hw_params->num_rxmda_per_pdev; i++)
 243			reaped += ath12k_dp_mon_process_ring(ab, i, NULL,
 244							     DP_MON_SERVICE_BUDGET,
 245							     ATH12K_DP_RX_MONITOR_MODE);
 246
 247		/* nothing more to reap */
 248		if (reaped < DP_MON_SERVICE_BUDGET)
 249			return 0;
 250
 251	} while (time_before(jiffies, timeout));
 252
 253	ath12k_warn(ab, "dp mon ring purge timeout");
 254
 255	return -ETIMEDOUT;
 256}
 257
 258/* Returns number of Rx buffers replenished */
 259int ath12k_dp_rx_bufs_replenish(struct ath12k_base *ab,
 260				struct dp_rxdma_ring *rx_ring,
 261				int req_entries)
 262{
 263	struct ath12k_buffer_addr *desc;
 264	struct hal_srng *srng;
 265	struct sk_buff *skb;
 266	int num_free;
 267	int num_remain;
 268	u32 cookie;
 269	dma_addr_t paddr;
 270	struct ath12k_dp *dp = &ab->dp;
 271	struct ath12k_rx_desc_info *rx_desc;
 272	enum hal_rx_buf_return_buf_manager mgr = ab->hw_params->hal_params->rx_buf_rbm;
 273
 274	req_entries = min(req_entries, rx_ring->bufs_max);
 275
 276	srng = &ab->hal.srng_list[rx_ring->refill_buf_ring.ring_id];
 277
 278	spin_lock_bh(&srng->lock);
 279
 280	ath12k_hal_srng_access_begin(ab, srng);
 281
 282	num_free = ath12k_hal_srng_src_num_free(ab, srng, true);
 283	if (!req_entries && (num_free > (rx_ring->bufs_max * 3) / 4))
 284		req_entries = num_free;
 285
 286	req_entries = min(num_free, req_entries);
 287	num_remain = req_entries;
 288
 289	while (num_remain > 0) {
 290		skb = dev_alloc_skb(DP_RX_BUFFER_SIZE +
 291				    DP_RX_BUFFER_ALIGN_SIZE);
 292		if (!skb)
 293			break;
 294
 295		if (!IS_ALIGNED((unsigned long)skb->data,
 296				DP_RX_BUFFER_ALIGN_SIZE)) {
 297			skb_pull(skb,
 298				 PTR_ALIGN(skb->data, DP_RX_BUFFER_ALIGN_SIZE) -
 299				 skb->data);
 300		}
 301
 302		paddr = dma_map_single(ab->dev, skb->data,
 303				       skb->len + skb_tailroom(skb),
 304				       DMA_FROM_DEVICE);
 305		if (dma_mapping_error(ab->dev, paddr))
 306			goto fail_free_skb;
 307
 308		spin_lock_bh(&dp->rx_desc_lock);
 309
 310		/* Get desc from free list and store in used list
 311		 * for cleanup purposes
 312		 *
 313		 * TODO: pass the removed descs rather than
 314		 * add/read to optimize
 315		 */
 316		rx_desc = list_first_entry_or_null(&dp->rx_desc_free_list,
 317						   struct ath12k_rx_desc_info,
 318						   list);
 319		if (!rx_desc) {
 320			spin_unlock_bh(&dp->rx_desc_lock);
 321			goto fail_dma_unmap;
 322		}
 323
 324		rx_desc->skb = skb;
 325		cookie = rx_desc->cookie;
 326		list_del(&rx_desc->list);
 327		list_add_tail(&rx_desc->list, &dp->rx_desc_used_list);
 328
 329		spin_unlock_bh(&dp->rx_desc_lock);
 330
 331		desc = ath12k_hal_srng_src_get_next_entry(ab, srng);
 332		if (!desc)
 333			goto fail_buf_unassign;
 334
 335		ATH12K_SKB_RXCB(skb)->paddr = paddr;
 336
 337		num_remain--;
 338
 339		ath12k_hal_rx_buf_addr_info_set(desc, paddr, cookie, mgr);
 340	}
 341
 342	ath12k_hal_srng_access_end(ab, srng);
 343
 344	spin_unlock_bh(&srng->lock);
 345
 346	return req_entries - num_remain;
 347
 348fail_buf_unassign:
 349	spin_lock_bh(&dp->rx_desc_lock);
 350	list_del(&rx_desc->list);
 351	list_add_tail(&rx_desc->list, &dp->rx_desc_free_list);
 352	rx_desc->skb = NULL;
 353	spin_unlock_bh(&dp->rx_desc_lock);
 354fail_dma_unmap:
 355	dma_unmap_single(ab->dev, paddr, skb->len + skb_tailroom(skb),
 356			 DMA_FROM_DEVICE);
 357fail_free_skb:
 358	dev_kfree_skb_any(skb);
 359
 360	ath12k_hal_srng_access_end(ab, srng);
 361
 362	spin_unlock_bh(&srng->lock);
 363
 364	return req_entries - num_remain;
 365}
 366
 367static int ath12k_dp_rxdma_mon_buf_ring_free(struct ath12k_base *ab,
 368					     struct dp_rxdma_mon_ring *rx_ring)
 369{
 370	struct sk_buff *skb;
 371	int buf_id;
 372
 373	spin_lock_bh(&rx_ring->idr_lock);
 374	idr_for_each_entry(&rx_ring->bufs_idr, skb, buf_id) {
 375		idr_remove(&rx_ring->bufs_idr, buf_id);
 376		/* TODO: Understand where internal driver does this dma_unmap
 377		 * of rxdma_buffer.
 378		 */
 379		dma_unmap_single(ab->dev, ATH12K_SKB_RXCB(skb)->paddr,
 380				 skb->len + skb_tailroom(skb), DMA_FROM_DEVICE);
 381		dev_kfree_skb_any(skb);
 382	}
 383
 384	idr_destroy(&rx_ring->bufs_idr);
 385	spin_unlock_bh(&rx_ring->idr_lock);
 386
 387	return 0;
 388}
 389
 390static int ath12k_dp_rxdma_buf_free(struct ath12k_base *ab)
 391{
 392	struct ath12k_dp *dp = &ab->dp;
 393
 394	ath12k_dp_rxdma_mon_buf_ring_free(ab, &dp->rxdma_mon_buf_ring);
 395
 396	ath12k_dp_rxdma_mon_buf_ring_free(ab, &dp->tx_mon_buf_ring);
 397
 398	return 0;
 399}
 400
 401static int ath12k_dp_rxdma_mon_ring_buf_setup(struct ath12k_base *ab,
 402					      struct dp_rxdma_mon_ring *rx_ring,
 403					      u32 ringtype)
 404{
 405	int num_entries;
 406
 407	num_entries = rx_ring->refill_buf_ring.size /
 408		ath12k_hal_srng_get_entrysize(ab, ringtype);
 409
 410	rx_ring->bufs_max = num_entries;
 411	ath12k_dp_mon_buf_replenish(ab, rx_ring, num_entries);
 412
 413	return 0;
 414}
 415
 416static int ath12k_dp_rxdma_ring_buf_setup(struct ath12k_base *ab,
 417					  struct dp_rxdma_ring *rx_ring)
 418{
 419	int num_entries;
 420
 421	num_entries = rx_ring->refill_buf_ring.size /
 422		ath12k_hal_srng_get_entrysize(ab, HAL_RXDMA_BUF);
 423
 424	rx_ring->bufs_max = num_entries;
 425	ath12k_dp_rx_bufs_replenish(ab, rx_ring, num_entries);
 426
 427	return 0;
 428}
 429
 430static int ath12k_dp_rxdma_buf_setup(struct ath12k_base *ab)
 431{
 432	struct ath12k_dp *dp = &ab->dp;
 433	int ret;
 434
 435	ret = ath12k_dp_rxdma_ring_buf_setup(ab, &dp->rx_refill_buf_ring);
 436	if (ret) {
 437		ath12k_warn(ab,
 438			    "failed to setup HAL_RXDMA_BUF\n");
 439		return ret;
 440	}
 441
 442	if (ab->hw_params->rxdma1_enable) {
 443		ret = ath12k_dp_rxdma_mon_ring_buf_setup(ab,
 444							 &dp->rxdma_mon_buf_ring,
 445							 HAL_RXDMA_MONITOR_BUF);
 446		if (ret) {
 447			ath12k_warn(ab,
 448				    "failed to setup HAL_RXDMA_MONITOR_BUF\n");
 449			return ret;
 450		}
 451
 452		ret = ath12k_dp_rxdma_mon_ring_buf_setup(ab,
 453							 &dp->tx_mon_buf_ring,
 454							 HAL_TX_MONITOR_BUF);
 455		if (ret) {
 456			ath12k_warn(ab,
 457				    "failed to setup HAL_TX_MONITOR_BUF\n");
 458			return ret;
 459		}
 460	}
 461
 462	return 0;
 463}
 464
 465static void ath12k_dp_rx_pdev_srng_free(struct ath12k *ar)
 466{
 467	struct ath12k_pdev_dp *dp = &ar->dp;
 468	struct ath12k_base *ab = ar->ab;
 469	int i;
 470
 471	for (i = 0; i < ab->hw_params->num_rxmda_per_pdev; i++) {
 472		ath12k_dp_srng_cleanup(ab, &dp->rxdma_mon_dst_ring[i]);
 473		ath12k_dp_srng_cleanup(ab, &dp->tx_mon_dst_ring[i]);
 474	}
 475}
 476
 477void ath12k_dp_rx_pdev_reo_cleanup(struct ath12k_base *ab)
 478{
 479	struct ath12k_dp *dp = &ab->dp;
 480	int i;
 481
 482	for (i = 0; i < DP_REO_DST_RING_MAX; i++)
 483		ath12k_dp_srng_cleanup(ab, &dp->reo_dst_ring[i]);
 484}
 485
 486int ath12k_dp_rx_pdev_reo_setup(struct ath12k_base *ab)
 487{
 488	struct ath12k_dp *dp = &ab->dp;
 489	int ret;
 490	int i;
 491
 492	for (i = 0; i < DP_REO_DST_RING_MAX; i++) {
 493		ret = ath12k_dp_srng_setup(ab, &dp->reo_dst_ring[i],
 494					   HAL_REO_DST, i, 0,
 495					   DP_REO_DST_RING_SIZE);
 496		if (ret) {
 497			ath12k_warn(ab, "failed to setup reo_dst_ring\n");
 498			goto err_reo_cleanup;
 499		}
 500	}
 501
 502	return 0;
 503
 504err_reo_cleanup:
 505	ath12k_dp_rx_pdev_reo_cleanup(ab);
 506
 507	return ret;
 508}
 509
 510static int ath12k_dp_rx_pdev_srng_alloc(struct ath12k *ar)
 511{
 512	struct ath12k_pdev_dp *dp = &ar->dp;
 513	struct ath12k_base *ab = ar->ab;
 514	int i;
 515	int ret;
 516	u32 mac_id = dp->mac_id;
 517
 518	for (i = 0; i < ab->hw_params->num_rxmda_per_pdev; i++) {
 519		ret = ath12k_dp_srng_setup(ar->ab,
 520					   &dp->rxdma_mon_dst_ring[i],
 521					   HAL_RXDMA_MONITOR_DST,
 522					   0, mac_id + i,
 523					   DP_RXDMA_MONITOR_DST_RING_SIZE);
 524		if (ret) {
 525			ath12k_warn(ar->ab,
 526				    "failed to setup HAL_RXDMA_MONITOR_DST\n");
 527			return ret;
 528		}
 529
 530		ret = ath12k_dp_srng_setup(ar->ab,
 531					   &dp->tx_mon_dst_ring[i],
 532					   HAL_TX_MONITOR_DST,
 533					   0, mac_id + i,
 534					   DP_TX_MONITOR_DEST_RING_SIZE);
 535		if (ret) {
 536			ath12k_warn(ar->ab,
 537				    "failed to setup HAL_TX_MONITOR_DST\n");
 538			return ret;
 539		}
 540	}
 541
 542	return 0;
 543}
 544
 545void ath12k_dp_rx_reo_cmd_list_cleanup(struct ath12k_base *ab)
 546{
 547	struct ath12k_dp *dp = &ab->dp;
 548	struct ath12k_dp_rx_reo_cmd *cmd, *tmp;
 549	struct ath12k_dp_rx_reo_cache_flush_elem *cmd_cache, *tmp_cache;
 550
 551	spin_lock_bh(&dp->reo_cmd_lock);
 552	list_for_each_entry_safe(cmd, tmp, &dp->reo_cmd_list, list) {
 553		list_del(&cmd->list);
 554		dma_unmap_single(ab->dev, cmd->data.paddr,
 555				 cmd->data.size, DMA_BIDIRECTIONAL);
 556		kfree(cmd->data.vaddr);
 557		kfree(cmd);
 558	}
 559
 560	list_for_each_entry_safe(cmd_cache, tmp_cache,
 561				 &dp->reo_cmd_cache_flush_list, list) {
 562		list_del(&cmd_cache->list);
 563		dp->reo_cmd_cache_flush_count--;
 564		dma_unmap_single(ab->dev, cmd_cache->data.paddr,
 565				 cmd_cache->data.size, DMA_BIDIRECTIONAL);
 566		kfree(cmd_cache->data.vaddr);
 567		kfree(cmd_cache);
 568	}
 569	spin_unlock_bh(&dp->reo_cmd_lock);
 570}
 571
 572static void ath12k_dp_reo_cmd_free(struct ath12k_dp *dp, void *ctx,
 573				   enum hal_reo_cmd_status status)
 574{
 575	struct ath12k_dp_rx_tid *rx_tid = ctx;
 576
 577	if (status != HAL_REO_CMD_SUCCESS)
 578		ath12k_warn(dp->ab, "failed to flush rx tid hw desc, tid %d status %d\n",
 579			    rx_tid->tid, status);
 580
 581	dma_unmap_single(dp->ab->dev, rx_tid->paddr, rx_tid->size,
 582			 DMA_BIDIRECTIONAL);
 583	kfree(rx_tid->vaddr);
 584	rx_tid->vaddr = NULL;
 585}
 586
 587static int ath12k_dp_reo_cmd_send(struct ath12k_base *ab, struct ath12k_dp_rx_tid *rx_tid,
 588				  enum hal_reo_cmd_type type,
 589				  struct ath12k_hal_reo_cmd *cmd,
 590				  void (*cb)(struct ath12k_dp *dp, void *ctx,
 591					     enum hal_reo_cmd_status status))
 592{
 593	struct ath12k_dp *dp = &ab->dp;
 594	struct ath12k_dp_rx_reo_cmd *dp_cmd;
 595	struct hal_srng *cmd_ring;
 596	int cmd_num;
 597
 598	cmd_ring = &ab->hal.srng_list[dp->reo_cmd_ring.ring_id];
 599	cmd_num = ath12k_hal_reo_cmd_send(ab, cmd_ring, type, cmd);
 600
 601	/* cmd_num should start from 1, during failure return the error code */
 602	if (cmd_num < 0)
 603		return cmd_num;
 604
 605	/* reo cmd ring descriptors has cmd_num starting from 1 */
 606	if (cmd_num == 0)
 607		return -EINVAL;
 608
 609	if (!cb)
 610		return 0;
 611
 612	/* Can this be optimized so that we keep the pending command list only
 613	 * for tid delete command to free up the resource on the command status
 614	 * indication?
 615	 */
 616	dp_cmd = kzalloc(sizeof(*dp_cmd), GFP_ATOMIC);
 617
 618	if (!dp_cmd)
 619		return -ENOMEM;
 620
 621	memcpy(&dp_cmd->data, rx_tid, sizeof(*rx_tid));
 622	dp_cmd->cmd_num = cmd_num;
 623	dp_cmd->handler = cb;
 624
 625	spin_lock_bh(&dp->reo_cmd_lock);
 626	list_add_tail(&dp_cmd->list, &dp->reo_cmd_list);
 627	spin_unlock_bh(&dp->reo_cmd_lock);
 628
 629	return 0;
 630}
 631
 632static void ath12k_dp_reo_cache_flush(struct ath12k_base *ab,
 633				      struct ath12k_dp_rx_tid *rx_tid)
 634{
 635	struct ath12k_hal_reo_cmd cmd = {0};
 636	unsigned long tot_desc_sz, desc_sz;
 637	int ret;
 638
 639	tot_desc_sz = rx_tid->size;
 640	desc_sz = ath12k_hal_reo_qdesc_size(0, HAL_DESC_REO_NON_QOS_TID);
 641
 642	while (tot_desc_sz > desc_sz) {
 643		tot_desc_sz -= desc_sz;
 644		cmd.addr_lo = lower_32_bits(rx_tid->paddr + tot_desc_sz);
 645		cmd.addr_hi = upper_32_bits(rx_tid->paddr);
 646		ret = ath12k_dp_reo_cmd_send(ab, rx_tid,
 647					     HAL_REO_CMD_FLUSH_CACHE, &cmd,
 648					     NULL);
 649		if (ret)
 650			ath12k_warn(ab,
 651				    "failed to send HAL_REO_CMD_FLUSH_CACHE, tid %d (%d)\n",
 652				    rx_tid->tid, ret);
 653	}
 654
 655	memset(&cmd, 0, sizeof(cmd));
 656	cmd.addr_lo = lower_32_bits(rx_tid->paddr);
 657	cmd.addr_hi = upper_32_bits(rx_tid->paddr);
 658	cmd.flag = HAL_REO_CMD_FLG_NEED_STATUS;
 659	ret = ath12k_dp_reo_cmd_send(ab, rx_tid,
 660				     HAL_REO_CMD_FLUSH_CACHE,
 661				     &cmd, ath12k_dp_reo_cmd_free);
 662	if (ret) {
 663		ath12k_err(ab, "failed to send HAL_REO_CMD_FLUSH_CACHE cmd, tid %d (%d)\n",
 664			   rx_tid->tid, ret);
 665		dma_unmap_single(ab->dev, rx_tid->paddr, rx_tid->size,
 666				 DMA_BIDIRECTIONAL);
 667		kfree(rx_tid->vaddr);
 668		rx_tid->vaddr = NULL;
 669	}
 670}
 671
 672static void ath12k_dp_rx_tid_del_func(struct ath12k_dp *dp, void *ctx,
 673				      enum hal_reo_cmd_status status)
 674{
 675	struct ath12k_base *ab = dp->ab;
 676	struct ath12k_dp_rx_tid *rx_tid = ctx;
 677	struct ath12k_dp_rx_reo_cache_flush_elem *elem, *tmp;
 678
 679	if (status == HAL_REO_CMD_DRAIN) {
 680		goto free_desc;
 681	} else if (status != HAL_REO_CMD_SUCCESS) {
 682		/* Shouldn't happen! Cleanup in case of other failure? */
 683		ath12k_warn(ab, "failed to delete rx tid %d hw descriptor %d\n",
 684			    rx_tid->tid, status);
 685		return;
 686	}
 687
 688	elem = kzalloc(sizeof(*elem), GFP_ATOMIC);
 689	if (!elem)
 690		goto free_desc;
 691
 692	elem->ts = jiffies;
 693	memcpy(&elem->data, rx_tid, sizeof(*rx_tid));
 694
 695	spin_lock_bh(&dp->reo_cmd_lock);
 696	list_add_tail(&elem->list, &dp->reo_cmd_cache_flush_list);
 697	dp->reo_cmd_cache_flush_count++;
 698
 699	/* Flush and invalidate aged REO desc from HW cache */
 700	list_for_each_entry_safe(elem, tmp, &dp->reo_cmd_cache_flush_list,
 701				 list) {
 702		if (dp->reo_cmd_cache_flush_count > ATH12K_DP_RX_REO_DESC_FREE_THRES ||
 703		    time_after(jiffies, elem->ts +
 704			       msecs_to_jiffies(ATH12K_DP_RX_REO_DESC_FREE_TIMEOUT_MS))) {
 705			list_del(&elem->list);
 706			dp->reo_cmd_cache_flush_count--;
 707
 708			/* Unlock the reo_cmd_lock before using ath12k_dp_reo_cmd_send()
 709			 * within ath12k_dp_reo_cache_flush. The reo_cmd_cache_flush_list
 710			 * is used in only two contexts, one is in this function called
 711			 * from napi and the other in ath12k_dp_free during core destroy.
 712			 * Before dp_free, the irqs would be disabled and would wait to
 713			 * synchronize. Hence there wouldn’t be any race against add or
 714			 * delete to this list. Hence unlock-lock is safe here.
 715			 */
 716			spin_unlock_bh(&dp->reo_cmd_lock);
 717
 718			ath12k_dp_reo_cache_flush(ab, &elem->data);
 719			kfree(elem);
 720			spin_lock_bh(&dp->reo_cmd_lock);
 721		}
 722	}
 723	spin_unlock_bh(&dp->reo_cmd_lock);
 724
 725	return;
 726free_desc:
 727	dma_unmap_single(ab->dev, rx_tid->paddr, rx_tid->size,
 728			 DMA_BIDIRECTIONAL);
 729	kfree(rx_tid->vaddr);
 730	rx_tid->vaddr = NULL;
 731}
 732
 733static void ath12k_peer_rx_tid_qref_setup(struct ath12k_base *ab, u16 peer_id, u16 tid,
 734					  dma_addr_t paddr)
 735{
 736	struct ath12k_reo_queue_ref *qref;
 737	struct ath12k_dp *dp = &ab->dp;
 738
 739	if (!ab->hw_params->reoq_lut_support)
 740		return;
 741
 742	/* TODO: based on ML peer or not, select the LUT. below assumes non
 743	 * ML peer
 744	 */
 745	qref = (struct ath12k_reo_queue_ref *)dp->reoq_lut.vaddr +
 746			(peer_id * (IEEE80211_NUM_TIDS + 1) + tid);
 747
 748	qref->info0 = u32_encode_bits(lower_32_bits(paddr),
 749				      BUFFER_ADDR_INFO0_ADDR);
 750	qref->info1 = u32_encode_bits(upper_32_bits(paddr),
 751				      BUFFER_ADDR_INFO1_ADDR) |
 752		      u32_encode_bits(tid, DP_REO_QREF_NUM);
 753}
 754
 755static void ath12k_peer_rx_tid_qref_reset(struct ath12k_base *ab, u16 peer_id, u16 tid)
 756{
 757	struct ath12k_reo_queue_ref *qref;
 758	struct ath12k_dp *dp = &ab->dp;
 759
 760	if (!ab->hw_params->reoq_lut_support)
 761		return;
 762
 763	/* TODO: based on ML peer or not, select the LUT. below assumes non
 764	 * ML peer
 765	 */
 766	qref = (struct ath12k_reo_queue_ref *)dp->reoq_lut.vaddr +
 767			(peer_id * (IEEE80211_NUM_TIDS + 1) + tid);
 768
 769	qref->info0 = u32_encode_bits(0, BUFFER_ADDR_INFO0_ADDR);
 770	qref->info1 = u32_encode_bits(0, BUFFER_ADDR_INFO1_ADDR) |
 771		      u32_encode_bits(tid, DP_REO_QREF_NUM);
 772}
 773
 774void ath12k_dp_rx_peer_tid_delete(struct ath12k *ar,
 775				  struct ath12k_peer *peer, u8 tid)
 776{
 777	struct ath12k_hal_reo_cmd cmd = {0};
 778	struct ath12k_dp_rx_tid *rx_tid = &peer->rx_tid[tid];
 779	int ret;
 780
 781	if (!rx_tid->active)
 782		return;
 783
 784	cmd.flag = HAL_REO_CMD_FLG_NEED_STATUS;
 785	cmd.addr_lo = lower_32_bits(rx_tid->paddr);
 786	cmd.addr_hi = upper_32_bits(rx_tid->paddr);
 787	cmd.upd0 = HAL_REO_CMD_UPD0_VLD;
 788	ret = ath12k_dp_reo_cmd_send(ar->ab, rx_tid,
 789				     HAL_REO_CMD_UPDATE_RX_QUEUE, &cmd,
 790				     ath12k_dp_rx_tid_del_func);
 791	if (ret) {
 792		ath12k_err(ar->ab, "failed to send HAL_REO_CMD_UPDATE_RX_QUEUE cmd, tid %d (%d)\n",
 793			   tid, ret);
 794		dma_unmap_single(ar->ab->dev, rx_tid->paddr, rx_tid->size,
 795				 DMA_BIDIRECTIONAL);
 796		kfree(rx_tid->vaddr);
 797		rx_tid->vaddr = NULL;
 798	}
 799
 800	ath12k_peer_rx_tid_qref_reset(ar->ab, peer->peer_id, tid);
 801
 802	rx_tid->active = false;
 803}
 804
 805/* TODO: it's strange (and ugly) that struct hal_reo_dest_ring is converted
 806 * to struct hal_wbm_release_ring, I couldn't figure out the logic behind
 807 * that.
 808 */
 809static int ath12k_dp_rx_link_desc_return(struct ath12k_base *ab,
 810					 struct hal_reo_dest_ring *ring,
 811					 enum hal_wbm_rel_bm_act action)
 812{
 813	struct hal_wbm_release_ring *link_desc = (struct hal_wbm_release_ring *)ring;
 814	struct hal_wbm_release_ring *desc;
 815	struct ath12k_dp *dp = &ab->dp;
 816	struct hal_srng *srng;
 817	int ret = 0;
 818
 819	srng = &ab->hal.srng_list[dp->wbm_desc_rel_ring.ring_id];
 820
 821	spin_lock_bh(&srng->lock);
 822
 823	ath12k_hal_srng_access_begin(ab, srng);
 824
 825	desc = ath12k_hal_srng_src_get_next_entry(ab, srng);
 826	if (!desc) {
 827		ret = -ENOBUFS;
 828		goto exit;
 829	}
 830
 831	ath12k_hal_rx_msdu_link_desc_set(ab, desc, link_desc, action);
 832
 833exit:
 834	ath12k_hal_srng_access_end(ab, srng);
 835
 836	spin_unlock_bh(&srng->lock);
 837
 838	return ret;
 839}
 840
 841static void ath12k_dp_rx_frags_cleanup(struct ath12k_dp_rx_tid *rx_tid,
 842				       bool rel_link_desc)
 843{
 844	struct ath12k_base *ab = rx_tid->ab;
 845
 846	lockdep_assert_held(&ab->base_lock);
 847
 848	if (rx_tid->dst_ring_desc) {
 849		if (rel_link_desc)
 850			ath12k_dp_rx_link_desc_return(ab, rx_tid->dst_ring_desc,
 851						      HAL_WBM_REL_BM_ACT_PUT_IN_IDLE);
 852		kfree(rx_tid->dst_ring_desc);
 853		rx_tid->dst_ring_desc = NULL;
 854	}
 855
 856	rx_tid->cur_sn = 0;
 857	rx_tid->last_frag_no = 0;
 858	rx_tid->rx_frag_bitmap = 0;
 859	__skb_queue_purge(&rx_tid->rx_frags);
 860}
 861
 862void ath12k_dp_rx_peer_tid_cleanup(struct ath12k *ar, struct ath12k_peer *peer)
 863{
 864	struct ath12k_dp_rx_tid *rx_tid;
 865	int i;
 866
 867	lockdep_assert_held(&ar->ab->base_lock);
 868
 869	for (i = 0; i <= IEEE80211_NUM_TIDS; i++) {
 870		rx_tid = &peer->rx_tid[i];
 871
 872		ath12k_dp_rx_peer_tid_delete(ar, peer, i);
 873		ath12k_dp_rx_frags_cleanup(rx_tid, true);
 874
 875		spin_unlock_bh(&ar->ab->base_lock);
 876		del_timer_sync(&rx_tid->frag_timer);
 877		spin_lock_bh(&ar->ab->base_lock);
 878	}
 879}
 880
 881static int ath12k_peer_rx_tid_reo_update(struct ath12k *ar,
 882					 struct ath12k_peer *peer,
 883					 struct ath12k_dp_rx_tid *rx_tid,
 884					 u32 ba_win_sz, u16 ssn,
 885					 bool update_ssn)
 886{
 887	struct ath12k_hal_reo_cmd cmd = {0};
 888	int ret;
 889
 890	cmd.addr_lo = lower_32_bits(rx_tid->paddr);
 891	cmd.addr_hi = upper_32_bits(rx_tid->paddr);
 892	cmd.flag = HAL_REO_CMD_FLG_NEED_STATUS;
 893	cmd.upd0 = HAL_REO_CMD_UPD0_BA_WINDOW_SIZE;
 894	cmd.ba_window_size = ba_win_sz;
 895
 896	if (update_ssn) {
 897		cmd.upd0 |= HAL_REO_CMD_UPD0_SSN;
 898		cmd.upd2 = u32_encode_bits(ssn, HAL_REO_CMD_UPD2_SSN);
 899	}
 900
 901	ret = ath12k_dp_reo_cmd_send(ar->ab, rx_tid,
 902				     HAL_REO_CMD_UPDATE_RX_QUEUE, &cmd,
 903				     NULL);
 904	if (ret) {
 905		ath12k_warn(ar->ab, "failed to update rx tid queue, tid %d (%d)\n",
 906			    rx_tid->tid, ret);
 907		return ret;
 908	}
 909
 910	rx_tid->ba_win_sz = ba_win_sz;
 911
 912	return 0;
 913}
 914
 915int ath12k_dp_rx_peer_tid_setup(struct ath12k *ar, const u8 *peer_mac, int vdev_id,
 916				u8 tid, u32 ba_win_sz, u16 ssn,
 917				enum hal_pn_type pn_type)
 918{
 919	struct ath12k_base *ab = ar->ab;
 920	struct ath12k_dp *dp = &ab->dp;
 921	struct hal_rx_reo_queue *addr_aligned;
 922	struct ath12k_peer *peer;
 923	struct ath12k_dp_rx_tid *rx_tid;
 924	u32 hw_desc_sz;
 925	void *vaddr;
 926	dma_addr_t paddr;
 927	int ret;
 928
 929	spin_lock_bh(&ab->base_lock);
 930
 931	peer = ath12k_peer_find(ab, vdev_id, peer_mac);
 932	if (!peer) {
 933		spin_unlock_bh(&ab->base_lock);
 934		ath12k_warn(ab, "failed to find the peer to set up rx tid\n");
 935		return -ENOENT;
 936	}
 937
 938	if (ab->hw_params->reoq_lut_support && !dp->reoq_lut.vaddr) {
 939		spin_unlock_bh(&ab->base_lock);
 940		ath12k_warn(ab, "reo qref table is not setup\n");
 941		return -EINVAL;
 942	}
 943
 944	if (peer->peer_id > DP_MAX_PEER_ID || tid > IEEE80211_NUM_TIDS) {
 945		ath12k_warn(ab, "peer id of peer %d or tid %d doesn't allow reoq setup\n",
 946			    peer->peer_id, tid);
 947		spin_unlock_bh(&ab->base_lock);
 948		return -EINVAL;
 949	}
 950
 951	rx_tid = &peer->rx_tid[tid];
 952	/* Update the tid queue if it is already setup */
 953	if (rx_tid->active) {
 954		paddr = rx_tid->paddr;
 955		ret = ath12k_peer_rx_tid_reo_update(ar, peer, rx_tid,
 956						    ba_win_sz, ssn, true);
 957		spin_unlock_bh(&ab->base_lock);
 958		if (ret) {
 959			ath12k_warn(ab, "failed to update reo for rx tid %d\n", tid);
 960			return ret;
 961		}
 962
 963		if (!ab->hw_params->reoq_lut_support) {
 964			ret = ath12k_wmi_peer_rx_reorder_queue_setup(ar, vdev_id,
 965								     peer_mac,
 966								     paddr, tid, 1,
 967								     ba_win_sz);
 968			if (ret) {
 969				ath12k_warn(ab, "failed to setup peer rx reorder queuefor tid %d: %d\n",
 970					    tid, ret);
 971				return ret;
 972			}
 973		}
 974
 975		return 0;
 976	}
 977
 978	rx_tid->tid = tid;
 979
 980	rx_tid->ba_win_sz = ba_win_sz;
 981
 982	/* TODO: Optimize the memory allocation for qos tid based on
 983	 * the actual BA window size in REO tid update path.
 984	 */
 985	if (tid == HAL_DESC_REO_NON_QOS_TID)
 986		hw_desc_sz = ath12k_hal_reo_qdesc_size(ba_win_sz, tid);
 987	else
 988		hw_desc_sz = ath12k_hal_reo_qdesc_size(DP_BA_WIN_SZ_MAX, tid);
 989
 990	vaddr = kzalloc(hw_desc_sz + HAL_LINK_DESC_ALIGN - 1, GFP_ATOMIC);
 991	if (!vaddr) {
 992		spin_unlock_bh(&ab->base_lock);
 993		return -ENOMEM;
 994	}
 995
 996	addr_aligned = PTR_ALIGN(vaddr, HAL_LINK_DESC_ALIGN);
 997
 998	ath12k_hal_reo_qdesc_setup(addr_aligned, tid, ba_win_sz,
 999				   ssn, pn_type);
1000
1001	paddr = dma_map_single(ab->dev, addr_aligned, hw_desc_sz,
1002			       DMA_BIDIRECTIONAL);
1003
1004	ret = dma_mapping_error(ab->dev, paddr);
1005	if (ret) {
1006		spin_unlock_bh(&ab->base_lock);
1007		goto err_mem_free;
1008	}
1009
1010	rx_tid->vaddr = vaddr;
1011	rx_tid->paddr = paddr;
1012	rx_tid->size = hw_desc_sz;
1013	rx_tid->active = true;
1014
1015	if (ab->hw_params->reoq_lut_support) {
1016		/* Update the REO queue LUT at the corresponding peer id
1017		 * and tid with qaddr.
1018		 */
1019		ath12k_peer_rx_tid_qref_setup(ab, peer->peer_id, tid, paddr);
1020		spin_unlock_bh(&ab->base_lock);
1021	} else {
1022		spin_unlock_bh(&ab->base_lock);
1023		ret = ath12k_wmi_peer_rx_reorder_queue_setup(ar, vdev_id, peer_mac,
1024							     paddr, tid, 1, ba_win_sz);
1025	}
1026
1027	return ret;
1028
1029err_mem_free:
1030	kfree(vaddr);
1031
1032	return ret;
1033}
1034
1035int ath12k_dp_rx_ampdu_start(struct ath12k *ar,
1036			     struct ieee80211_ampdu_params *params)
1037{
1038	struct ath12k_base *ab = ar->ab;
1039	struct ath12k_sta *arsta = ath12k_sta_to_arsta(params->sta);
1040	int vdev_id = arsta->arvif->vdev_id;
1041	int ret;
1042
1043	ret = ath12k_dp_rx_peer_tid_setup(ar, params->sta->addr, vdev_id,
1044					  params->tid, params->buf_size,
1045					  params->ssn, arsta->pn_type);
1046	if (ret)
1047		ath12k_warn(ab, "failed to setup rx tid %d\n", ret);
1048
1049	return ret;
1050}
1051
1052int ath12k_dp_rx_ampdu_stop(struct ath12k *ar,
1053			    struct ieee80211_ampdu_params *params)
1054{
1055	struct ath12k_base *ab = ar->ab;
1056	struct ath12k_peer *peer;
1057	struct ath12k_sta *arsta = ath12k_sta_to_arsta(params->sta);
1058	int vdev_id = arsta->arvif->vdev_id;
1059	bool active;
1060	int ret;
1061
1062	spin_lock_bh(&ab->base_lock);
1063
1064	peer = ath12k_peer_find(ab, vdev_id, params->sta->addr);
1065	if (!peer) {
1066		spin_unlock_bh(&ab->base_lock);
1067		ath12k_warn(ab, "failed to find the peer to stop rx aggregation\n");
1068		return -ENOENT;
1069	}
1070
1071	active = peer->rx_tid[params->tid].active;
1072
1073	if (!active) {
1074		spin_unlock_bh(&ab->base_lock);
1075		return 0;
1076	}
1077
1078	ret = ath12k_peer_rx_tid_reo_update(ar, peer, peer->rx_tid, 1, 0, false);
1079	spin_unlock_bh(&ab->base_lock);
1080	if (ret) {
1081		ath12k_warn(ab, "failed to update reo for rx tid %d: %d\n",
1082			    params->tid, ret);
1083		return ret;
1084	}
1085
1086	return ret;
1087}
1088
1089int ath12k_dp_rx_peer_pn_replay_config(struct ath12k_vif *arvif,
1090				       const u8 *peer_addr,
1091				       enum set_key_cmd key_cmd,
1092				       struct ieee80211_key_conf *key)
1093{
1094	struct ath12k *ar = arvif->ar;
1095	struct ath12k_base *ab = ar->ab;
1096	struct ath12k_hal_reo_cmd cmd = {0};
1097	struct ath12k_peer *peer;
1098	struct ath12k_dp_rx_tid *rx_tid;
1099	u8 tid;
1100	int ret = 0;
1101
1102	/* NOTE: Enable PN/TSC replay check offload only for unicast frames.
1103	 * We use mac80211 PN/TSC replay check functionality for bcast/mcast
1104	 * for now.
1105	 */
1106	if (!(key->flags & IEEE80211_KEY_FLAG_PAIRWISE))
1107		return 0;
1108
1109	cmd.flag = HAL_REO_CMD_FLG_NEED_STATUS;
1110	cmd.upd0 = HAL_REO_CMD_UPD0_PN |
1111		    HAL_REO_CMD_UPD0_PN_SIZE |
1112		    HAL_REO_CMD_UPD0_PN_VALID |
1113		    HAL_REO_CMD_UPD0_PN_CHECK |
1114		    HAL_REO_CMD_UPD0_SVLD;
1115
1116	switch (key->cipher) {
1117	case WLAN_CIPHER_SUITE_TKIP:
1118	case WLAN_CIPHER_SUITE_CCMP:
1119	case WLAN_CIPHER_SUITE_CCMP_256:
1120	case WLAN_CIPHER_SUITE_GCMP:
1121	case WLAN_CIPHER_SUITE_GCMP_256:
1122		if (key_cmd == SET_KEY) {
1123			cmd.upd1 |= HAL_REO_CMD_UPD1_PN_CHECK;
1124			cmd.pn_size = 48;
1125		}
1126		break;
1127	default:
1128		break;
1129	}
1130
1131	spin_lock_bh(&ab->base_lock);
1132
1133	peer = ath12k_peer_find(ab, arvif->vdev_id, peer_addr);
1134	if (!peer) {
1135		spin_unlock_bh(&ab->base_lock);
1136		ath12k_warn(ab, "failed to find the peer %pM to configure pn replay detection\n",
1137			    peer_addr);
1138		return -ENOENT;
1139	}
1140
1141	for (tid = 0; tid <= IEEE80211_NUM_TIDS; tid++) {
1142		rx_tid = &peer->rx_tid[tid];
1143		if (!rx_tid->active)
1144			continue;
1145		cmd.addr_lo = lower_32_bits(rx_tid->paddr);
1146		cmd.addr_hi = upper_32_bits(rx_tid->paddr);
1147		ret = ath12k_dp_reo_cmd_send(ab, rx_tid,
1148					     HAL_REO_CMD_UPDATE_RX_QUEUE,
1149					     &cmd, NULL);
1150		if (ret) {
1151			ath12k_warn(ab, "failed to configure rx tid %d queue of peer %pM for pn replay detection %d\n",
1152				    tid, peer_addr, ret);
1153			break;
1154		}
1155	}
1156
1157	spin_unlock_bh(&ab->base_lock);
1158
1159	return ret;
1160}
1161
1162static int ath12k_get_ppdu_user_index(struct htt_ppdu_stats *ppdu_stats,
1163				      u16 peer_id)
1164{
1165	int i;
1166
1167	for (i = 0; i < HTT_PPDU_STATS_MAX_USERS - 1; i++) {
1168		if (ppdu_stats->user_stats[i].is_valid_peer_id) {
1169			if (peer_id == ppdu_stats->user_stats[i].peer_id)
1170				return i;
1171		} else {
1172			return i;
1173		}
1174	}
1175
1176	return -EINVAL;
1177}
1178
1179static int ath12k_htt_tlv_ppdu_stats_parse(struct ath12k_base *ab,
1180					   u16 tag, u16 len, const void *ptr,
1181					   void *data)
1182{
1183	const struct htt_ppdu_stats_usr_cmpltn_ack_ba_status *ba_status;
1184	const struct htt_ppdu_stats_usr_cmpltn_cmn *cmplt_cmn;
1185	const struct htt_ppdu_stats_user_rate *user_rate;
1186	struct htt_ppdu_stats_info *ppdu_info;
1187	struct htt_ppdu_user_stats *user_stats;
1188	int cur_user;
1189	u16 peer_id;
1190
1191	ppdu_info = data;
1192
1193	switch (tag) {
1194	case HTT_PPDU_STATS_TAG_COMMON:
1195		if (len < sizeof(struct htt_ppdu_stats_common)) {
1196			ath12k_warn(ab, "Invalid len %d for the tag 0x%x\n",
1197				    len, tag);
1198			return -EINVAL;
1199		}
1200		memcpy(&ppdu_info->ppdu_stats.common, ptr,
1201		       sizeof(struct htt_ppdu_stats_common));
1202		break;
1203	case HTT_PPDU_STATS_TAG_USR_RATE:
1204		if (len < sizeof(struct htt_ppdu_stats_user_rate)) {
1205			ath12k_warn(ab, "Invalid len %d for the tag 0x%x\n",
1206				    len, tag);
1207			return -EINVAL;
1208		}
1209		user_rate = ptr;
1210		peer_id = le16_to_cpu(user_rate->sw_peer_id);
1211		cur_user = ath12k_get_ppdu_user_index(&ppdu_info->ppdu_stats,
1212						      peer_id);
1213		if (cur_user < 0)
1214			return -EINVAL;
1215		user_stats = &ppdu_info->ppdu_stats.user_stats[cur_user];
1216		user_stats->peer_id = peer_id;
1217		user_stats->is_valid_peer_id = true;
1218		memcpy(&user_stats->rate, ptr,
1219		       sizeof(struct htt_ppdu_stats_user_rate));
1220		user_stats->tlv_flags |= BIT(tag);
1221		break;
1222	case HTT_PPDU_STATS_TAG_USR_COMPLTN_COMMON:
1223		if (len < sizeof(struct htt_ppdu_stats_usr_cmpltn_cmn)) {
1224			ath12k_warn(ab, "Invalid len %d for the tag 0x%x\n",
1225				    len, tag);
1226			return -EINVAL;
1227		}
1228
1229		cmplt_cmn = ptr;
1230		peer_id = le16_to_cpu(cmplt_cmn->sw_peer_id);
1231		cur_user = ath12k_get_ppdu_user_index(&ppdu_info->ppdu_stats,
1232						      peer_id);
1233		if (cur_user < 0)
1234			return -EINVAL;
1235		user_stats = &ppdu_info->ppdu_stats.user_stats[cur_user];
1236		user_stats->peer_id = peer_id;
1237		user_stats->is_valid_peer_id = true;
1238		memcpy(&user_stats->cmpltn_cmn, ptr,
1239		       sizeof(struct htt_ppdu_stats_usr_cmpltn_cmn));
1240		user_stats->tlv_flags |= BIT(tag);
1241		break;
1242	case HTT_PPDU_STATS_TAG_USR_COMPLTN_ACK_BA_STATUS:
1243		if (len <
1244		    sizeof(struct htt_ppdu_stats_usr_cmpltn_ack_ba_status)) {
1245			ath12k_warn(ab, "Invalid len %d for the tag 0x%x\n",
1246				    len, tag);
1247			return -EINVAL;
1248		}
1249
1250		ba_status = ptr;
1251		peer_id = le16_to_cpu(ba_status->sw_peer_id);
1252		cur_user = ath12k_get_ppdu_user_index(&ppdu_info->ppdu_stats,
1253						      peer_id);
1254		if (cur_user < 0)
1255			return -EINVAL;
1256		user_stats = &ppdu_info->ppdu_stats.user_stats[cur_user];
1257		user_stats->peer_id = peer_id;
1258		user_stats->is_valid_peer_id = true;
1259		memcpy(&user_stats->ack_ba, ptr,
1260		       sizeof(struct htt_ppdu_stats_usr_cmpltn_ack_ba_status));
1261		user_stats->tlv_flags |= BIT(tag);
1262		break;
1263	}
1264	return 0;
1265}
1266
1267static int ath12k_dp_htt_tlv_iter(struct ath12k_base *ab, const void *ptr, size_t len,
1268				  int (*iter)(struct ath12k_base *ar, u16 tag, u16 len,
1269					      const void *ptr, void *data),
1270				  void *data)
1271{
1272	const struct htt_tlv *tlv;
1273	const void *begin = ptr;
1274	u16 tlv_tag, tlv_len;
1275	int ret = -EINVAL;
1276
1277	while (len > 0) {
1278		if (len < sizeof(*tlv)) {
1279			ath12k_err(ab, "htt tlv parse failure at byte %zd (%zu bytes left, %zu expected)\n",
1280				   ptr - begin, len, sizeof(*tlv));
1281			return -EINVAL;
1282		}
1283		tlv = (struct htt_tlv *)ptr;
1284		tlv_tag = le32_get_bits(tlv->header, HTT_TLV_TAG);
1285		tlv_len = le32_get_bits(tlv->header, HTT_TLV_LEN);
1286		ptr += sizeof(*tlv);
1287		len -= sizeof(*tlv);
1288
1289		if (tlv_len > len) {
1290			ath12k_err(ab, "htt tlv parse failure of tag %u at byte %zd (%zu bytes left, %u expected)\n",
1291				   tlv_tag, ptr - begin, len, tlv_len);
1292			return -EINVAL;
1293		}
1294		ret = iter(ab, tlv_tag, tlv_len, ptr, data);
1295		if (ret == -ENOMEM)
1296			return ret;
1297
1298		ptr += tlv_len;
1299		len -= tlv_len;
1300	}
1301	return 0;
1302}
1303
1304static void
1305ath12k_update_per_peer_tx_stats(struct ath12k *ar,
1306				struct htt_ppdu_stats *ppdu_stats, u8 user)
1307{
1308	struct ath12k_base *ab = ar->ab;
1309	struct ath12k_peer *peer;
1310	struct ieee80211_sta *sta;
1311	struct ath12k_sta *arsta;
1312	struct htt_ppdu_stats_user_rate *user_rate;
1313	struct ath12k_per_peer_tx_stats *peer_stats = &ar->peer_tx_stats;
1314	struct htt_ppdu_user_stats *usr_stats = &ppdu_stats->user_stats[user];
1315	struct htt_ppdu_stats_common *common = &ppdu_stats->common;
1316	int ret;
1317	u8 flags, mcs, nss, bw, sgi, dcm, rate_idx = 0;
1318	u32 v, succ_bytes = 0;
1319	u16 tones, rate = 0, succ_pkts = 0;
1320	u32 tx_duration = 0;
1321	u8 tid = HTT_PPDU_STATS_NON_QOS_TID;
1322	bool is_ampdu = false;
1323
1324	if (!(usr_stats->tlv_flags & BIT(HTT_PPDU_STATS_TAG_USR_RATE)))
1325		return;
1326
1327	if (usr_stats->tlv_flags & BIT(HTT_PPDU_STATS_TAG_USR_COMPLTN_COMMON))
1328		is_ampdu =
1329			HTT_USR_CMPLTN_IS_AMPDU(usr_stats->cmpltn_cmn.flags);
1330
1331	if (usr_stats->tlv_flags &
1332	    BIT(HTT_PPDU_STATS_TAG_USR_COMPLTN_ACK_BA_STATUS)) {
1333		succ_bytes = le32_to_cpu(usr_stats->ack_ba.success_bytes);
1334		succ_pkts = le32_get_bits(usr_stats->ack_ba.info,
1335					  HTT_PPDU_STATS_ACK_BA_INFO_NUM_MSDU_M);
1336		tid = le32_get_bits(usr_stats->ack_ba.info,
1337				    HTT_PPDU_STATS_ACK_BA_INFO_TID_NUM);
1338	}
1339
1340	if (common->fes_duration_us)
1341		tx_duration = le32_to_cpu(common->fes_duration_us);
1342
1343	user_rate = &usr_stats->rate;
1344	flags = HTT_USR_RATE_PREAMBLE(user_rate->rate_flags);
1345	bw = HTT_USR_RATE_BW(user_rate->rate_flags) - 2;
1346	nss = HTT_USR_RATE_NSS(user_rate->rate_flags) + 1;
1347	mcs = HTT_USR_RATE_MCS(user_rate->rate_flags);
1348	sgi = HTT_USR_RATE_GI(user_rate->rate_flags);
1349	dcm = HTT_USR_RATE_DCM(user_rate->rate_flags);
1350
1351	/* Note: If host configured fixed rates and in some other special
1352	 * cases, the broadcast/management frames are sent in different rates.
1353	 * Firmware rate's control to be skipped for this?
1354	 */
1355
1356	if (flags == WMI_RATE_PREAMBLE_HE && mcs > ATH12K_HE_MCS_MAX) {
1357		ath12k_warn(ab, "Invalid HE mcs %d peer stats",  mcs);
1358		return;
1359	}
1360
1361	if (flags == WMI_RATE_PREAMBLE_VHT && mcs > ATH12K_VHT_MCS_MAX) {
1362		ath12k_warn(ab, "Invalid VHT mcs %d peer stats",  mcs);
1363		return;
1364	}
1365
1366	if (flags == WMI_RATE_PREAMBLE_HT && (mcs > ATH12K_HT_MCS_MAX || nss < 1)) {
1367		ath12k_warn(ab, "Invalid HT mcs %d nss %d peer stats",
1368			    mcs, nss);
1369		return;
1370	}
1371
1372	if (flags == WMI_RATE_PREAMBLE_CCK || flags == WMI_RATE_PREAMBLE_OFDM) {
1373		ret = ath12k_mac_hw_ratecode_to_legacy_rate(mcs,
1374							    flags,
1375							    &rate_idx,
1376							    &rate);
1377		if (ret < 0)
1378			return;
1379	}
1380
1381	rcu_read_lock();
1382	spin_lock_bh(&ab->base_lock);
1383	peer = ath12k_peer_find_by_id(ab, usr_stats->peer_id);
1384
1385	if (!peer || !peer->sta) {
1386		spin_unlock_bh(&ab->base_lock);
1387		rcu_read_unlock();
1388		return;
1389	}
1390
1391	sta = peer->sta;
1392	arsta = ath12k_sta_to_arsta(sta);
1393
1394	memset(&arsta->txrate, 0, sizeof(arsta->txrate));
1395
1396	switch (flags) {
1397	case WMI_RATE_PREAMBLE_OFDM:
1398		arsta->txrate.legacy = rate;
1399		break;
1400	case WMI_RATE_PREAMBLE_CCK:
1401		arsta->txrate.legacy = rate;
1402		break;
1403	case WMI_RATE_PREAMBLE_HT:
1404		arsta->txrate.mcs = mcs + 8 * (nss - 1);
1405		arsta->txrate.flags = RATE_INFO_FLAGS_MCS;
1406		if (sgi)
1407			arsta->txrate.flags |= RATE_INFO_FLAGS_SHORT_GI;
1408		break;
1409	case WMI_RATE_PREAMBLE_VHT:
1410		arsta->txrate.mcs = mcs;
1411		arsta->txrate.flags = RATE_INFO_FLAGS_VHT_MCS;
1412		if (sgi)
1413			arsta->txrate.flags |= RATE_INFO_FLAGS_SHORT_GI;
1414		break;
1415	case WMI_RATE_PREAMBLE_HE:
1416		arsta->txrate.mcs = mcs;
1417		arsta->txrate.flags = RATE_INFO_FLAGS_HE_MCS;
1418		arsta->txrate.he_dcm = dcm;
1419		arsta->txrate.he_gi = ath12k_he_gi_to_nl80211_he_gi(sgi);
1420		tones = le16_to_cpu(user_rate->ru_end) -
1421			le16_to_cpu(user_rate->ru_start) + 1;
1422		v = ath12k_he_ru_tones_to_nl80211_he_ru_alloc(tones);
1423		arsta->txrate.he_ru_alloc = v;
1424		break;
1425	}
1426
1427	arsta->txrate.nss = nss;
1428	arsta->txrate.bw = ath12k_mac_bw_to_mac80211_bw(bw);
1429	arsta->tx_duration += tx_duration;
1430	memcpy(&arsta->last_txrate, &arsta->txrate, sizeof(struct rate_info));
1431
1432	/* PPDU stats reported for mgmt packet doesn't have valid tx bytes.
1433	 * So skip peer stats update for mgmt packets.
1434	 */
1435	if (tid < HTT_PPDU_STATS_NON_QOS_TID) {
1436		memset(peer_stats, 0, sizeof(*peer_stats));
1437		peer_stats->succ_pkts = succ_pkts;
1438		peer_stats->succ_bytes = succ_bytes;
1439		peer_stats->is_ampdu = is_ampdu;
1440		peer_stats->duration = tx_duration;
1441		peer_stats->ba_fails =
1442			HTT_USR_CMPLTN_LONG_RETRY(usr_stats->cmpltn_cmn.flags) +
1443			HTT_USR_CMPLTN_SHORT_RETRY(usr_stats->cmpltn_cmn.flags);
1444	}
1445
1446	spin_unlock_bh(&ab->base_lock);
1447	rcu_read_unlock();
1448}
1449
1450static void ath12k_htt_update_ppdu_stats(struct ath12k *ar,
1451					 struct htt_ppdu_stats *ppdu_stats)
1452{
1453	u8 user;
1454
1455	for (user = 0; user < HTT_PPDU_STATS_MAX_USERS - 1; user++)
1456		ath12k_update_per_peer_tx_stats(ar, ppdu_stats, user);
1457}
1458
1459static
1460struct htt_ppdu_stats_info *ath12k_dp_htt_get_ppdu_desc(struct ath12k *ar,
1461							u32 ppdu_id)
1462{
1463	struct htt_ppdu_stats_info *ppdu_info;
1464
1465	lockdep_assert_held(&ar->data_lock);
1466	if (!list_empty(&ar->ppdu_stats_info)) {
1467		list_for_each_entry(ppdu_info, &ar->ppdu_stats_info, list) {
1468			if (ppdu_info->ppdu_id == ppdu_id)
1469				return ppdu_info;
1470		}
1471
1472		if (ar->ppdu_stat_list_depth > HTT_PPDU_DESC_MAX_DEPTH) {
1473			ppdu_info = list_first_entry(&ar->ppdu_stats_info,
1474						     typeof(*ppdu_info), list);
1475			list_del(&ppdu_info->list);
1476			ar->ppdu_stat_list_depth--;
1477			ath12k_htt_update_ppdu_stats(ar, &ppdu_info->ppdu_stats);
1478			kfree(ppdu_info);
1479		}
1480	}
1481
1482	ppdu_info = kzalloc(sizeof(*ppdu_info), GFP_ATOMIC);
1483	if (!ppdu_info)
1484		return NULL;
1485
1486	list_add_tail(&ppdu_info->list, &ar->ppdu_stats_info);
1487	ar->ppdu_stat_list_depth++;
1488
1489	return ppdu_info;
1490}
1491
1492static void ath12k_copy_to_delay_stats(struct ath12k_peer *peer,
1493				       struct htt_ppdu_user_stats *usr_stats)
1494{
1495	peer->ppdu_stats_delayba.sw_peer_id = le16_to_cpu(usr_stats->rate.sw_peer_id);
1496	peer->ppdu_stats_delayba.info0 = le32_to_cpu(usr_stats->rate.info0);
1497	peer->ppdu_stats_delayba.ru_end = le16_to_cpu(usr_stats->rate.ru_end);
1498	peer->ppdu_stats_delayba.ru_start = le16_to_cpu(usr_stats->rate.ru_start);
1499	peer->ppdu_stats_delayba.info1 = le32_to_cpu(usr_stats->rate.info1);
1500	peer->ppdu_stats_delayba.rate_flags = le32_to_cpu(usr_stats->rate.rate_flags);
1501	peer->ppdu_stats_delayba.resp_rate_flags =
1502		le32_to_cpu(usr_stats->rate.resp_rate_flags);
1503
1504	peer->delayba_flag = true;
1505}
1506
1507static void ath12k_copy_to_bar(struct ath12k_peer *peer,
1508			       struct htt_ppdu_user_stats *usr_stats)
1509{
1510	usr_stats->rate.sw_peer_id = cpu_to_le16(peer->ppdu_stats_delayba.sw_peer_id);
1511	usr_stats->rate.info0 = cpu_to_le32(peer->ppdu_stats_delayba.info0);
1512	usr_stats->rate.ru_end = cpu_to_le16(peer->ppdu_stats_delayba.ru_end);
1513	usr_stats->rate.ru_start = cpu_to_le16(peer->ppdu_stats_delayba.ru_start);
1514	usr_stats->rate.info1 = cpu_to_le32(peer->ppdu_stats_delayba.info1);
1515	usr_stats->rate.rate_flags = cpu_to_le32(peer->ppdu_stats_delayba.rate_flags);
1516	usr_stats->rate.resp_rate_flags =
1517		cpu_to_le32(peer->ppdu_stats_delayba.resp_rate_flags);
1518
1519	peer->delayba_flag = false;
1520}
1521
1522static int ath12k_htt_pull_ppdu_stats(struct ath12k_base *ab,
1523				      struct sk_buff *skb)
1524{
1525	struct ath12k_htt_ppdu_stats_msg *msg;
1526	struct htt_ppdu_stats_info *ppdu_info;
1527	struct ath12k_peer *peer = NULL;
1528	struct htt_ppdu_user_stats *usr_stats = NULL;
1529	u32 peer_id = 0;
1530	struct ath12k *ar;
1531	int ret, i;
1532	u8 pdev_id;
1533	u32 ppdu_id, len;
1534
1535	msg = (struct ath12k_htt_ppdu_stats_msg *)skb->data;
1536	len = le32_get_bits(msg->info, HTT_T2H_PPDU_STATS_INFO_PAYLOAD_SIZE);
1537	if (len > (skb->len - struct_size(msg, data, 0))) {
1538		ath12k_warn(ab,
1539			    "HTT PPDU STATS event has unexpected payload size %u, should be smaller than %u\n",
1540			    len, skb->len);
1541		return -EINVAL;
1542	}
1543
1544	pdev_id = le32_get_bits(msg->info, HTT_T2H_PPDU_STATS_INFO_PDEV_ID);
1545	ppdu_id = le32_to_cpu(msg->ppdu_id);
1546
1547	rcu_read_lock();
1548	ar = ath12k_mac_get_ar_by_pdev_id(ab, pdev_id);
1549	if (!ar) {
1550		ret = -EINVAL;
1551		goto exit;
1552	}
1553
1554	spin_lock_bh(&ar->data_lock);
1555	ppdu_info = ath12k_dp_htt_get_ppdu_desc(ar, ppdu_id);
1556	if (!ppdu_info) {
1557		spin_unlock_bh(&ar->data_lock);
1558		ret = -EINVAL;
1559		goto exit;
1560	}
1561
1562	ppdu_info->ppdu_id = ppdu_id;
1563	ret = ath12k_dp_htt_tlv_iter(ab, msg->data, len,
1564				     ath12k_htt_tlv_ppdu_stats_parse,
1565				     (void *)ppdu_info);
1566	if (ret) {
1567		spin_unlock_bh(&ar->data_lock);
1568		ath12k_warn(ab, "Failed to parse tlv %d\n", ret);
1569		goto exit;
1570	}
1571
1572	if (ppdu_info->ppdu_stats.common.num_users >= HTT_PPDU_STATS_MAX_USERS) {
1573		spin_unlock_bh(&ar->data_lock);
1574		ath12k_warn(ab,
1575			    "HTT PPDU STATS event has unexpected num_users %u, should be smaller than %u\n",
1576			    ppdu_info->ppdu_stats.common.num_users,
1577			    HTT_PPDU_STATS_MAX_USERS);
1578		ret = -EINVAL;
1579		goto exit;
1580	}
1581
1582	/* back up data rate tlv for all peers */
1583	if (ppdu_info->frame_type == HTT_STATS_PPDU_FTYPE_DATA &&
1584	    (ppdu_info->tlv_bitmap & (1 << HTT_PPDU_STATS_TAG_USR_COMMON)) &&
1585	    ppdu_info->delay_ba) {
1586		for (i = 0; i < ppdu_info->ppdu_stats.common.num_users; i++) {
1587			peer_id = ppdu_info->ppdu_stats.user_stats[i].peer_id;
1588			spin_lock_bh(&ab->base_lock);
1589			peer = ath12k_peer_find_by_id(ab, peer_id);
1590			if (!peer) {
1591				spin_unlock_bh(&ab->base_lock);
1592				continue;
1593			}
1594
1595			usr_stats = &ppdu_info->ppdu_stats.user_stats[i];
1596			if (usr_stats->delay_ba)
1597				ath12k_copy_to_delay_stats(peer, usr_stats);
1598			spin_unlock_bh(&ab->base_lock);
1599		}
1600	}
1601
1602	/* restore all peers' data rate tlv to mu-bar tlv */
1603	if (ppdu_info->frame_type == HTT_STATS_PPDU_FTYPE_BAR &&
1604	    (ppdu_info->tlv_bitmap & (1 << HTT_PPDU_STATS_TAG_USR_COMMON))) {
1605		for (i = 0; i < ppdu_info->bar_num_users; i++) {
1606			peer_id = ppdu_info->ppdu_stats.user_stats[i].peer_id;
1607			spin_lock_bh(&ab->base_lock);
1608			peer = ath12k_peer_find_by_id(ab, peer_id);
1609			if (!peer) {
1610				spin_unlock_bh(&ab->base_lock);
1611				continue;
1612			}
1613
1614			usr_stats = &ppdu_info->ppdu_stats.user_stats[i];
1615			if (peer->delayba_flag)
1616				ath12k_copy_to_bar(peer, usr_stats);
1617			spin_unlock_bh(&ab->base_lock);
1618		}
1619	}
1620
1621	spin_unlock_bh(&ar->data_lock);
1622
1623exit:
1624	rcu_read_unlock();
1625
1626	return ret;
1627}
1628
1629static void ath12k_htt_mlo_offset_event_handler(struct ath12k_base *ab,
1630						struct sk_buff *skb)
1631{
1632	struct ath12k_htt_mlo_offset_msg *msg;
1633	struct ath12k_pdev *pdev;
1634	struct ath12k *ar;
1635	u8 pdev_id;
1636
1637	msg = (struct ath12k_htt_mlo_offset_msg *)skb->data;
1638	pdev_id = u32_get_bits(__le32_to_cpu(msg->info),
1639			       HTT_T2H_MLO_OFFSET_INFO_PDEV_ID);
1640
1641	rcu_read_lock();
1642	ar = ath12k_mac_get_ar_by_pdev_id(ab, pdev_id);
1643	if (!ar) {
1644		ath12k_warn(ab, "invalid pdev id %d on htt mlo offset\n", pdev_id);
1645		goto exit;
1646	}
1647
1648	spin_lock_bh(&ar->data_lock);
1649	pdev = ar->pdev;
1650
1651	pdev->timestamp.info = __le32_to_cpu(msg->info);
1652	pdev->timestamp.sync_timestamp_lo_us = __le32_to_cpu(msg->sync_timestamp_lo_us);
1653	pdev->timestamp.sync_timestamp_hi_us = __le32_to_cpu(msg->sync_timestamp_hi_us);
1654	pdev->timestamp.mlo_offset_lo = __le32_to_cpu(msg->mlo_offset_lo);
1655	pdev->timestamp.mlo_offset_hi = __le32_to_cpu(msg->mlo_offset_hi);
1656	pdev->timestamp.mlo_offset_clks = __le32_to_cpu(msg->mlo_offset_clks);
1657	pdev->timestamp.mlo_comp_clks = __le32_to_cpu(msg->mlo_comp_clks);
1658	pdev->timestamp.mlo_comp_timer = __le32_to_cpu(msg->mlo_comp_timer);
1659
1660	spin_unlock_bh(&ar->data_lock);
1661exit:
1662	rcu_read_unlock();
1663}
1664
1665void ath12k_dp_htt_htc_t2h_msg_handler(struct ath12k_base *ab,
1666				       struct sk_buff *skb)
1667{
1668	struct ath12k_dp *dp = &ab->dp;
1669	struct htt_resp_msg *resp = (struct htt_resp_msg *)skb->data;
1670	enum htt_t2h_msg_type type;
1671	u16 peer_id;
1672	u8 vdev_id;
1673	u8 mac_addr[ETH_ALEN];
1674	u16 peer_mac_h16;
1675	u16 ast_hash = 0;
1676	u16 hw_peer_id;
1677
1678	type = le32_get_bits(resp->version_msg.version, HTT_T2H_MSG_TYPE);
1679
1680	ath12k_dbg(ab, ATH12K_DBG_DP_HTT, "dp_htt rx msg type :0x%0x\n", type);
1681
1682	switch (type) {
1683	case HTT_T2H_MSG_TYPE_VERSION_CONF:
1684		dp->htt_tgt_ver_major = le32_get_bits(resp->version_msg.version,
1685						      HTT_T2H_VERSION_CONF_MAJOR);
1686		dp->htt_tgt_ver_minor = le32_get_bits(resp->version_msg.version,
1687						      HTT_T2H_VERSION_CONF_MINOR);
1688		complete(&dp->htt_tgt_version_received);
1689		break;
1690	/* TODO: remove unused peer map versions after testing */
1691	case HTT_T2H_MSG_TYPE_PEER_MAP:
1692		vdev_id = le32_get_bits(resp->peer_map_ev.info,
1693					HTT_T2H_PEER_MAP_INFO_VDEV_ID);
1694		peer_id = le32_get_bits(resp->peer_map_ev.info,
1695					HTT_T2H_PEER_MAP_INFO_PEER_ID);
1696		peer_mac_h16 = le32_get_bits(resp->peer_map_ev.info1,
1697					     HTT_T2H_PEER_MAP_INFO1_MAC_ADDR_H16);
1698		ath12k_dp_get_mac_addr(le32_to_cpu(resp->peer_map_ev.mac_addr_l32),
1699				       peer_mac_h16, mac_addr);
1700		ath12k_peer_map_event(ab, vdev_id, peer_id, mac_addr, 0, 0);
1701		break;
1702	case HTT_T2H_MSG_TYPE_PEER_MAP2:
1703		vdev_id = le32_get_bits(resp->peer_map_ev.info,
1704					HTT_T2H_PEER_MAP_INFO_VDEV_ID);
1705		peer_id = le32_get_bits(resp->peer_map_ev.info,
1706					HTT_T2H_PEER_MAP_INFO_PEER_ID);
1707		peer_mac_h16 = le32_get_bits(resp->peer_map_ev.info1,
1708					     HTT_T2H_PEER_MAP_INFO1_MAC_ADDR_H16);
1709		ath12k_dp_get_mac_addr(le32_to_cpu(resp->peer_map_ev.mac_addr_l32),
1710				       peer_mac_h16, mac_addr);
1711		ast_hash = le32_get_bits(resp->peer_map_ev.info2,
1712					 HTT_T2H_PEER_MAP_INFO2_AST_HASH_VAL);
1713		hw_peer_id = le32_get_bits(resp->peer_map_ev.info1,
1714					   HTT_T2H_PEER_MAP_INFO1_HW_PEER_ID);
1715		ath12k_peer_map_event(ab, vdev_id, peer_id, mac_addr, ast_hash,
1716				      hw_peer_id);
1717		break;
1718	case HTT_T2H_MSG_TYPE_PEER_MAP3:
1719		vdev_id = le32_get_bits(resp->peer_map_ev.info,
1720					HTT_T2H_PEER_MAP_INFO_VDEV_ID);
1721		peer_id = le32_get_bits(resp->peer_map_ev.info,
1722					HTT_T2H_PEER_MAP_INFO_PEER_ID);
1723		peer_mac_h16 = le32_get_bits(resp->peer_map_ev.info1,
1724					     HTT_T2H_PEER_MAP_INFO1_MAC_ADDR_H16);
1725		ath12k_dp_get_mac_addr(le32_to_cpu(resp->peer_map_ev.mac_addr_l32),
1726				       peer_mac_h16, mac_addr);
1727		ath12k_peer_map_event(ab, vdev_id, peer_id, mac_addr, ast_hash,
1728				      peer_id);
1729		break;
1730	case HTT_T2H_MSG_TYPE_PEER_UNMAP:
1731	case HTT_T2H_MSG_TYPE_PEER_UNMAP2:
1732		peer_id = le32_get_bits(resp->peer_unmap_ev.info,
1733					HTT_T2H_PEER_UNMAP_INFO_PEER_ID);
1734		ath12k_peer_unmap_event(ab, peer_id);
1735		break;
1736	case HTT_T2H_MSG_TYPE_PPDU_STATS_IND:
1737		ath12k_htt_pull_ppdu_stats(ab, skb);
1738		break;
1739	case HTT_T2H_MSG_TYPE_EXT_STATS_CONF:
1740		break;
1741	case HTT_T2H_MSG_TYPE_MLO_TIMESTAMP_OFFSET_IND:
1742		ath12k_htt_mlo_offset_event_handler(ab, skb);
1743		break;
1744	default:
1745		ath12k_dbg(ab, ATH12K_DBG_DP_HTT, "dp_htt event %d not handled\n",
1746			   type);
1747		break;
1748	}
1749
1750	dev_kfree_skb_any(skb);
1751}
1752
1753static int ath12k_dp_rx_msdu_coalesce(struct ath12k *ar,
1754				      struct sk_buff_head *msdu_list,
1755				      struct sk_buff *first, struct sk_buff *last,
1756				      u8 l3pad_bytes, int msdu_len)
1757{
1758	struct ath12k_base *ab = ar->ab;
1759	struct sk_buff *skb;
1760	struct ath12k_skb_rxcb *rxcb = ATH12K_SKB_RXCB(first);
1761	int buf_first_hdr_len, buf_first_len;
1762	struct hal_rx_desc *ldesc;
1763	int space_extra, rem_len, buf_len;
1764	u32 hal_rx_desc_sz = ar->ab->hw_params->hal_desc_sz;
1765
1766	/* As the msdu is spread across multiple rx buffers,
1767	 * find the offset to the start of msdu for computing
1768	 * the length of the msdu in the first buffer.
1769	 */
1770	buf_first_hdr_len = hal_rx_desc_sz + l3pad_bytes;
1771	buf_first_len = DP_RX_BUFFER_SIZE - buf_first_hdr_len;
1772
1773	if (WARN_ON_ONCE(msdu_len <= buf_first_len)) {
1774		skb_put(first, buf_first_hdr_len + msdu_len);
1775		skb_pull(first, buf_first_hdr_len);
1776		return 0;
1777	}
1778
1779	ldesc = (struct hal_rx_desc *)last->data;
1780	rxcb->is_first_msdu = ath12k_dp_rx_h_first_msdu(ab, ldesc);
1781	rxcb->is_last_msdu = ath12k_dp_rx_h_last_msdu(ab, ldesc);
1782
1783	/* MSDU spans over multiple buffers because the length of the MSDU
1784	 * exceeds DP_RX_BUFFER_SIZE - HAL_RX_DESC_SIZE. So assume the data
1785	 * in the first buf is of length DP_RX_BUFFER_SIZE - HAL_RX_DESC_SIZE.
1786	 */
1787	skb_put(first, DP_RX_BUFFER_SIZE);
1788	skb_pull(first, buf_first_hdr_len);
1789
1790	/* When an MSDU spread over multiple buffers MSDU_END
1791	 * tlvs are valid only in the last buffer. Copy those tlvs.
1792	 */
1793	ath12k_dp_rx_desc_end_tlv_copy(ab, rxcb->rx_desc, ldesc);
1794
1795	space_extra = msdu_len - (buf_first_len + skb_tailroom(first));
1796	if (space_extra > 0 &&
1797	    (pskb_expand_head(first, 0, space_extra, GFP_ATOMIC) < 0)) {
1798		/* Free up all buffers of the MSDU */
1799		while ((skb = __skb_dequeue(msdu_list)) != NULL) {
1800			rxcb = ATH12K_SKB_RXCB(skb);
1801			if (!rxcb->is_continuation) {
1802				dev_kfree_skb_any(skb);
1803				break;
1804			}
1805			dev_kfree_skb_any(skb);
1806		}
1807		return -ENOMEM;
1808	}
1809
1810	rem_len = msdu_len - buf_first_len;
1811	while ((skb = __skb_dequeue(msdu_list)) != NULL && rem_len > 0) {
1812		rxcb = ATH12K_SKB_RXCB(skb);
1813		if (rxcb->is_continuation)
1814			buf_len = DP_RX_BUFFER_SIZE - hal_rx_desc_sz;
1815		else
1816			buf_len = rem_len;
1817
1818		if (buf_len > (DP_RX_BUFFER_SIZE - hal_rx_desc_sz)) {
1819			WARN_ON_ONCE(1);
1820			dev_kfree_skb_any(skb);
1821			return -EINVAL;
1822		}
1823
1824		skb_put(skb, buf_len + hal_rx_desc_sz);
1825		skb_pull(skb, hal_rx_desc_sz);
1826		skb_copy_from_linear_data(skb, skb_put(first, buf_len),
1827					  buf_len);
1828		dev_kfree_skb_any(skb);
1829
1830		rem_len -= buf_len;
1831		if (!rxcb->is_continuation)
1832			break;
1833	}
1834
1835	return 0;
1836}
1837
1838static struct sk_buff *ath12k_dp_rx_get_msdu_last_buf(struct sk_buff_head *msdu_list,
1839						      struct sk_buff *first)
1840{
1841	struct sk_buff *skb;
1842	struct ath12k_skb_rxcb *rxcb = ATH12K_SKB_RXCB(first);
1843
1844	if (!rxcb->is_continuation)
1845		return first;
1846
1847	skb_queue_walk(msdu_list, skb) {
1848		rxcb = ATH12K_SKB_RXCB(skb);
1849		if (!rxcb->is_continuation)
1850			return skb;
1851	}
1852
1853	return NULL;
1854}
1855
1856static void ath12k_dp_rx_h_csum_offload(struct ath12k *ar, struct sk_buff *msdu)
1857{
1858	struct ath12k_skb_rxcb *rxcb = ATH12K_SKB_RXCB(msdu);
1859	struct ath12k_base *ab = ar->ab;
1860	bool ip_csum_fail, l4_csum_fail;
1861
1862	ip_csum_fail = ath12k_dp_rx_h_ip_cksum_fail(ab, rxcb->rx_desc);
1863	l4_csum_fail = ath12k_dp_rx_h_l4_cksum_fail(ab, rxcb->rx_desc);
1864
1865	msdu->ip_summed = (ip_csum_fail || l4_csum_fail) ?
1866			  CHECKSUM_NONE : CHECKSUM_UNNECESSARY;
1867}
1868
1869static int ath12k_dp_rx_crypto_mic_len(struct ath12k *ar,
1870				       enum hal_encrypt_type enctype)
1871{
1872	switch (enctype) {
1873	case HAL_ENCRYPT_TYPE_OPEN:
1874	case HAL_ENCRYPT_TYPE_TKIP_NO_MIC:
1875	case HAL_ENCRYPT_TYPE_TKIP_MIC:
1876		return 0;
1877	case HAL_ENCRYPT_TYPE_CCMP_128:
1878		return IEEE80211_CCMP_MIC_LEN;
1879	case HAL_ENCRYPT_TYPE_CCMP_256:
1880		return IEEE80211_CCMP_256_MIC_LEN;
1881	case HAL_ENCRYPT_TYPE_GCMP_128:
1882	case HAL_ENCRYPT_TYPE_AES_GCMP_256:
1883		return IEEE80211_GCMP_MIC_LEN;
1884	case HAL_ENCRYPT_TYPE_WEP_40:
1885	case HAL_ENCRYPT_TYPE_WEP_104:
1886	case HAL_ENCRYPT_TYPE_WEP_128:
1887	case HAL_ENCRYPT_TYPE_WAPI_GCM_SM4:
1888	case HAL_ENCRYPT_TYPE_WAPI:
1889		break;
1890	}
1891
1892	ath12k_warn(ar->ab, "unsupported encryption type %d for mic len\n", enctype);
1893	return 0;
1894}
1895
1896static int ath12k_dp_rx_crypto_param_len(struct ath12k *ar,
1897					 enum hal_encrypt_type enctype)
1898{
1899	switch (enctype) {
1900	case HAL_ENCRYPT_TYPE_OPEN:
1901		return 0;
1902	case HAL_ENCRYPT_TYPE_TKIP_NO_MIC:
1903	case HAL_ENCRYPT_TYPE_TKIP_MIC:
1904		return IEEE80211_TKIP_IV_LEN;
1905	case HAL_ENCRYPT_TYPE_CCMP_128:
1906		return IEEE80211_CCMP_HDR_LEN;
1907	case HAL_ENCRYPT_TYPE_CCMP_256:
1908		return IEEE80211_CCMP_256_HDR_LEN;
1909	case HAL_ENCRYPT_TYPE_GCMP_128:
1910	case HAL_ENCRYPT_TYPE_AES_GCMP_256:
1911		return IEEE80211_GCMP_HDR_LEN;
1912	case HAL_ENCRYPT_TYPE_WEP_40:
1913	case HAL_ENCRYPT_TYPE_WEP_104:
1914	case HAL_ENCRYPT_TYPE_WEP_128:
1915	case HAL_ENCRYPT_TYPE_WAPI_GCM_SM4:
1916	case HAL_ENCRYPT_TYPE_WAPI:
1917		break;
1918	}
1919
1920	ath12k_warn(ar->ab, "unsupported encryption type %d\n", enctype);
1921	return 0;
1922}
1923
1924static int ath12k_dp_rx_crypto_icv_len(struct ath12k *ar,
1925				       enum hal_encrypt_type enctype)
1926{
1927	switch (enctype) {
1928	case HAL_ENCRYPT_TYPE_OPEN:
1929	case HAL_ENCRYPT_TYPE_CCMP_128:
1930	case HAL_ENCRYPT_TYPE_CCMP_256:
1931	case HAL_ENCRYPT_TYPE_GCMP_128:
1932	case HAL_ENCRYPT_TYPE_AES_GCMP_256:
1933		return 0;
1934	case HAL_ENCRYPT_TYPE_TKIP_NO_MIC:
1935	case HAL_ENCRYPT_TYPE_TKIP_MIC:
1936		return IEEE80211_TKIP_ICV_LEN;
1937	case HAL_ENCRYPT_TYPE_WEP_40:
1938	case HAL_ENCRYPT_TYPE_WEP_104:
1939	case HAL_ENCRYPT_TYPE_WEP_128:
1940	case HAL_ENCRYPT_TYPE_WAPI_GCM_SM4:
1941	case HAL_ENCRYPT_TYPE_WAPI:
1942		break;
1943	}
1944
1945	ath12k_warn(ar->ab, "unsupported encryption type %d\n", enctype);
1946	return 0;
1947}
1948
1949static void ath12k_dp_rx_h_undecap_nwifi(struct ath12k *ar,
1950					 struct sk_buff *msdu,
1951					 enum hal_encrypt_type enctype,
1952					 struct ieee80211_rx_status *status)
1953{
1954	struct ath12k_base *ab = ar->ab;
1955	struct ath12k_skb_rxcb *rxcb = ATH12K_SKB_RXCB(msdu);
1956	u8 decap_hdr[DP_MAX_NWIFI_HDR_LEN];
1957	struct ieee80211_hdr *hdr;
1958	size_t hdr_len;
1959	u8 *crypto_hdr;
1960	u16 qos_ctl;
1961
1962	/* pull decapped header */
1963	hdr = (struct ieee80211_hdr *)msdu->data;
1964	hdr_len = ieee80211_hdrlen(hdr->frame_control);
1965	skb_pull(msdu, hdr_len);
1966
1967	/*  Rebuild qos header */
1968	hdr->frame_control |= __cpu_to_le16(IEEE80211_STYPE_QOS_DATA);
1969
1970	/* Reset the order bit as the HT_Control header is stripped */
1971	hdr->frame_control &= ~(__cpu_to_le16(IEEE80211_FCTL_ORDER));
1972
1973	qos_ctl = rxcb->tid;
1974
1975	if (ath12k_dp_rx_h_mesh_ctl_present(ab, rxcb->rx_desc))
1976		qos_ctl |= IEEE80211_QOS_CTL_MESH_CONTROL_PRESENT;
1977
1978	/* TODO: Add other QoS ctl fields when required */
1979
1980	/* copy decap header before overwriting for reuse below */
1981	memcpy(decap_hdr, hdr, hdr_len);
1982
1983	/* Rebuild crypto header for mac80211 use */
1984	if (!(status->flag & RX_FLAG_IV_STRIPPED)) {
1985		crypto_hdr = skb_push(msdu, ath12k_dp_rx_crypto_param_len(ar, enctype));
1986		ath12k_dp_rx_desc_get_crypto_header(ar->ab,
1987						    rxcb->rx_desc, crypto_hdr,
1988						    enctype);
1989	}
1990
1991	memcpy(skb_push(msdu,
1992			IEEE80211_QOS_CTL_LEN), &qos_ctl,
1993			IEEE80211_QOS_CTL_LEN);
1994	memcpy(skb_push(msdu, hdr_len), decap_hdr, hdr_len);
1995}
1996
1997static void ath12k_dp_rx_h_undecap_raw(struct ath12k *ar, struct sk_buff *msdu,
1998				       enum hal_encrypt_type enctype,
1999				       struct ieee80211_rx_status *status,
2000				       bool decrypted)
2001{
2002	struct ath12k_skb_rxcb *rxcb = ATH12K_SKB_RXCB(msdu);
2003	struct ieee80211_hdr *hdr;
2004	size_t hdr_len;
2005	size_t crypto_len;
2006
2007	if (!rxcb->is_first_msdu ||
2008	    !(rxcb->is_first_msdu && rxcb->is_last_msdu)) {
2009		WARN_ON_ONCE(1);
2010		return;
2011	}
2012
2013	skb_trim(msdu, msdu->len - FCS_LEN);
2014
2015	if (!decrypted)
2016		return;
2017
2018	hdr = (void *)msdu->data;
2019
2020	/* Tail */
2021	if (status->flag & RX_FLAG_IV_STRIPPED) {
2022		skb_trim(msdu, msdu->len -
2023			 ath12k_dp_rx_crypto_mic_len(ar, enctype));
2024
2025		skb_trim(msdu, msdu->len -
2026			 ath12k_dp_rx_crypto_icv_len(ar, enctype));
2027	} else {
2028		/* MIC */
2029		if (status->flag & RX_FLAG_MIC_STRIPPED)
2030			skb_trim(msdu, msdu->len -
2031				 ath12k_dp_rx_crypto_mic_len(ar, enctype));
2032
2033		/* ICV */
2034		if (status->flag & RX_FLAG_ICV_STRIPPED)
2035			skb_trim(msdu, msdu->len -
2036				 ath12k_dp_rx_crypto_icv_len(ar, enctype));
2037	}
2038
2039	/* MMIC */
2040	if ((status->flag & RX_FLAG_MMIC_STRIPPED) &&
2041	    !ieee80211_has_morefrags(hdr->frame_control) &&
2042	    enctype == HAL_ENCRYPT_TYPE_TKIP_MIC)
2043		skb_trim(msdu, msdu->len - IEEE80211_CCMP_MIC_LEN);
2044
2045	/* Head */
2046	if (status->flag & RX_FLAG_IV_STRIPPED) {
2047		hdr_len = ieee80211_hdrlen(hdr->frame_control);
2048		crypto_len = ath12k_dp_rx_crypto_param_len(ar, enctype);
2049
2050		memmove(msdu->data + crypto_len, msdu->data, hdr_len);
2051		skb_pull(msdu, crypto_len);
2052	}
2053}
2054
2055static void ath12k_get_dot11_hdr_from_rx_desc(struct ath12k *ar,
2056					      struct sk_buff *msdu,
2057					      struct ath12k_skb_rxcb *rxcb,
2058					      struct ieee80211_rx_status *status,
2059					      enum hal_encrypt_type enctype)
2060{
2061	struct hal_rx_desc *rx_desc = rxcb->rx_desc;
2062	struct ath12k_base *ab = ar->ab;
2063	size_t hdr_len, crypto_len;
2064	struct ieee80211_hdr *hdr;
2065	u16 qos_ctl;
2066	__le16 fc;
2067	u8 *crypto_hdr;
2068
2069	if (!(status->flag & RX_FLAG_IV_STRIPPED)) {
2070		crypto_len = ath12k_dp_rx_crypto_param_len(ar, enctype);
2071		crypto_hdr = skb_push(msdu, crypto_len);
2072		ath12k_dp_rx_desc_get_crypto_header(ab, rx_desc, crypto_hdr, enctype);
2073	}
2074
2075	fc = cpu_to_le16(ath12k_dp_rxdesc_get_mpdu_frame_ctrl(ab, rx_desc));
2076	hdr_len = ieee80211_hdrlen(fc);
2077	skb_push(msdu, hdr_len);
2078	hdr = (struct ieee80211_hdr *)msdu->data;
2079	hdr->frame_control = fc;
2080
2081	/* Get wifi header from rx_desc */
2082	ath12k_dp_rx_desc_get_dot11_hdr(ab, rx_desc, hdr);
2083
2084	if (rxcb->is_mcbc)
2085		status->flag &= ~RX_FLAG_PN_VALIDATED;
2086
2087	/* Add QOS header */
2088	if (ieee80211_is_data_qos(hdr->frame_control)) {
2089		qos_ctl = rxcb->tid;
2090		if (ath12k_dp_rx_h_mesh_ctl_present(ab, rx_desc))
2091			qos_ctl |= IEEE80211_QOS_CTL_MESH_CONTROL_PRESENT;
2092
2093		/* TODO: Add other QoS ctl fields when required */
2094		memcpy(msdu->data + (hdr_len - IEEE80211_QOS_CTL_LEN),
2095		       &qos_ctl, IEEE80211_QOS_CTL_LEN);
2096	}
2097}
2098
2099static void ath12k_dp_rx_h_undecap_eth(struct ath12k *ar,
2100				       struct sk_buff *msdu,
2101				       enum hal_encrypt_type enctype,
2102				       struct ieee80211_rx_status *status)
2103{
2104	struct ieee80211_hdr *hdr;
2105	struct ethhdr *eth;
2106	u8 da[ETH_ALEN];
2107	u8 sa[ETH_ALEN];
2108	struct ath12k_skb_rxcb *rxcb = ATH12K_SKB_RXCB(msdu);
2109	struct ath12k_dp_rx_rfc1042_hdr rfc = {0xaa, 0xaa, 0x03, {0x00, 0x00, 0x00}};
2110
2111	eth = (struct ethhdr *)msdu->data;
2112	ether_addr_copy(da, eth->h_dest);
2113	ether_addr_copy(sa, eth->h_source);
2114	rfc.snap_type = eth->h_proto;
2115	skb_pull(msdu, sizeof(*eth));
2116	memcpy(skb_push(msdu, sizeof(rfc)), &rfc,
2117	       sizeof(rfc));
2118	ath12k_get_dot11_hdr_from_rx_desc(ar, msdu, rxcb, status, enctype);
2119
2120	/* original 802.11 header has a different DA and in
2121	 * case of 4addr it may also have different SA
2122	 */
2123	hdr = (struct ieee80211_hdr *)msdu->data;
2124	ether_addr_copy(ieee80211_get_DA(hdr), da);
2125	ether_addr_copy(ieee80211_get_SA(hdr), sa);
2126}
2127
2128static void ath12k_dp_rx_h_undecap(struct ath12k *ar, struct sk_buff *msdu,
2129				   struct hal_rx_desc *rx_desc,
2130				   enum hal_encrypt_type enctype,
2131				   struct ieee80211_rx_status *status,
2132				   bool decrypted)
2133{
2134	struct ath12k_base *ab = ar->ab;
2135	u8 decap;
2136	struct ethhdr *ehdr;
2137
2138	decap = ath12k_dp_rx_h_decap_type(ab, rx_desc);
2139
2140	switch (decap) {
2141	case DP_RX_DECAP_TYPE_NATIVE_WIFI:
2142		ath12k_dp_rx_h_undecap_nwifi(ar, msdu, enctype, status);
2143		break;
2144	case DP_RX_DECAP_TYPE_RAW:
2145		ath12k_dp_rx_h_undecap_raw(ar, msdu, enctype, status,
2146					   decrypted);
2147		break;
2148	case DP_RX_DECAP_TYPE_ETHERNET2_DIX:
2149		ehdr = (struct ethhdr *)msdu->data;
2150
2151		/* mac80211 allows fast path only for authorized STA */
2152		if (ehdr->h_proto == cpu_to_be16(ETH_P_PAE)) {
2153			ATH12K_SKB_RXCB(msdu)->is_eapol = true;
2154			ath12k_dp_rx_h_undecap_eth(ar, msdu, enctype, status);
2155			break;
2156		}
2157
2158		/* PN for mcast packets will be validated in mac80211;
2159		 * remove eth header and add 802.11 header.
2160		 */
2161		if (ATH12K_SKB_RXCB(msdu)->is_mcbc && decrypted)
2162			ath12k_dp_rx_h_undecap_eth(ar, msdu, enctype, status);
2163		break;
2164	case DP_RX_DECAP_TYPE_8023:
2165		/* TODO: Handle undecap for these formats */
2166		break;
2167	}
2168}
2169
2170struct ath12k_peer *
2171ath12k_dp_rx_h_find_peer(struct ath12k_base *ab, struct sk_buff *msdu)
2172{
2173	struct ath12k_skb_rxcb *rxcb = ATH12K_SKB_RXCB(msdu);
2174	struct hal_rx_desc *rx_desc = rxcb->rx_desc;
2175	struct ath12k_peer *peer = NULL;
2176
2177	lockdep_assert_held(&ab->base_lock);
2178
2179	if (rxcb->peer_id)
2180		peer = ath12k_peer_find_by_id(ab, rxcb->peer_id);
2181
2182	if (peer)
2183		return peer;
2184
2185	if (!rx_desc || !(ath12k_dp_rxdesc_mac_addr2_valid(ab, rx_desc)))
2186		return NULL;
2187
2188	peer = ath12k_peer_find_by_addr(ab,
2189					ath12k_dp_rxdesc_get_mpdu_start_addr2(ab,
2190									      rx_desc));
2191	return peer;
2192}
2193
2194static void ath12k_dp_rx_h_mpdu(struct ath12k *ar,
2195				struct sk_buff *msdu,
2196				struct hal_rx_desc *rx_desc,
2197				struct ieee80211_rx_status *rx_status)
2198{
2199	bool  fill_crypto_hdr;
2200	struct ath12k_base *ab = ar->ab;
2201	struct ath12k_skb_rxcb *rxcb;
2202	enum hal_encrypt_type enctype;
2203	bool is_decrypted = false;
2204	struct ieee80211_hdr *hdr;
2205	struct ath12k_peer *peer;
2206	u32 err_bitmap;
2207
2208	/* PN for multicast packets will be checked in mac80211 */
2209	rxcb = ATH12K_SKB_RXCB(msdu);
2210	fill_crypto_hdr = ath12k_dp_rx_h_is_da_mcbc(ar->ab, rx_desc);
2211	rxcb->is_mcbc = fill_crypto_hdr;
2212
2213	if (rxcb->is_mcbc)
2214		rxcb->peer_id = ath12k_dp_rx_h_peer_id(ar->ab, rx_desc);
2215
2216	spin_lock_bh(&ar->ab->base_lock);
2217	peer = ath12k_dp_rx_h_find_peer(ar->ab, msdu);
2218	if (peer) {
2219		if (rxcb->is_mcbc)
2220			enctype = peer->sec_type_grp;
2221		else
2222			enctype = peer->sec_type;
2223	} else {
2224		enctype = HAL_ENCRYPT_TYPE_OPEN;
2225	}
2226	spin_unlock_bh(&ar->ab->base_lock);
2227
2228	err_bitmap = ath12k_dp_rx_h_mpdu_err(ab, rx_desc);
2229	if (enctype != HAL_ENCRYPT_TYPE_OPEN && !err_bitmap)
2230		is_decrypted = ath12k_dp_rx_h_is_decrypted(ab, rx_desc);
2231
2232	/* Clear per-MPDU flags while leaving per-PPDU flags intact */
2233	rx_status->flag &= ~(RX_FLAG_FAILED_FCS_CRC |
2234			     RX_FLAG_MMIC_ERROR |
2235			     RX_FLAG_DECRYPTED |
2236			     RX_FLAG_IV_STRIPPED |
2237			     RX_FLAG_MMIC_STRIPPED);
2238
2239	if (err_bitmap & HAL_RX_MPDU_ERR_FCS)
2240		rx_status->flag |= RX_FLAG_FAILED_FCS_CRC;
2241	if (err_bitmap & HAL_RX_MPDU_ERR_TKIP_MIC)
2242		rx_status->flag |= RX_FLAG_MMIC_ERROR;
2243
2244	if (is_decrypted) {
2245		rx_status->flag |= RX_FLAG_DECRYPTED | RX_FLAG_MMIC_STRIPPED;
2246
2247		if (fill_crypto_hdr)
2248			rx_status->flag |= RX_FLAG_MIC_STRIPPED |
2249					RX_FLAG_ICV_STRIPPED;
2250		else
2251			rx_status->flag |= RX_FLAG_IV_STRIPPED |
2252					   RX_FLAG_PN_VALIDATED;
2253	}
2254
2255	ath12k_dp_rx_h_csum_offload(ar, msdu);
2256	ath12k_dp_rx_h_undecap(ar, msdu, rx_desc,
2257			       enctype, rx_status, is_decrypted);
2258
2259	if (!is_decrypted || fill_crypto_hdr)
2260		return;
2261
2262	if (ath12k_dp_rx_h_decap_type(ar->ab, rx_desc) !=
2263	    DP_RX_DECAP_TYPE_ETHERNET2_DIX) {
2264		hdr = (void *)msdu->data;
2265		hdr->frame_control &= ~__cpu_to_le16(IEEE80211_FCTL_PROTECTED);
2266	}
2267}
2268
2269static void ath12k_dp_rx_h_rate(struct ath12k *ar, struct hal_rx_desc *rx_desc,
2270				struct ieee80211_rx_status *rx_status)
2271{
2272	struct ath12k_base *ab = ar->ab;
2273	struct ieee80211_supported_band *sband;
2274	enum rx_msdu_start_pkt_type pkt_type;
2275	u8 bw;
2276	u8 rate_mcs, nss;
2277	u8 sgi;
2278	bool is_cck;
2279
2280	pkt_type = ath12k_dp_rx_h_pkt_type(ab, rx_desc);
2281	bw = ath12k_dp_rx_h_rx_bw(ab, rx_desc);
2282	rate_mcs = ath12k_dp_rx_h_rate_mcs(ab, rx_desc);
2283	nss = ath12k_dp_rx_h_nss(ab, rx_desc);
2284	sgi = ath12k_dp_rx_h_sgi(ab, rx_desc);
2285
2286	switch (pkt_type) {
2287	case RX_MSDU_START_PKT_TYPE_11A:
2288	case RX_MSDU_START_PKT_TYPE_11B:
2289		is_cck = (pkt_type == RX_MSDU_START_PKT_TYPE_11B);
2290		sband = &ar->mac.sbands[rx_status->band];
2291		rx_status->rate_idx = ath12k_mac_hw_rate_to_idx(sband, rate_mcs,
2292								is_cck);
2293		break;
2294	case RX_MSDU_START_PKT_TYPE_11N:
2295		rx_status->encoding = RX_ENC_HT;
2296		if (rate_mcs > ATH12K_HT_MCS_MAX) {
2297			ath12k_warn(ar->ab,
2298				    "Received with invalid mcs in HT mode %d\n",
2299				     rate_mcs);
2300			break;
2301		}
2302		rx_status->rate_idx = rate_mcs + (8 * (nss - 1));
2303		if (sgi)
2304			rx_status->enc_flags |= RX_ENC_FLAG_SHORT_GI;
2305		rx_status->bw = ath12k_mac_bw_to_mac80211_bw(bw);
2306		break;
2307	case RX_MSDU_START_PKT_TYPE_11AC:
2308		rx_status->encoding = RX_ENC_VHT;
2309		rx_status->rate_idx = rate_mcs;
2310		if (rate_mcs > ATH12K_VHT_MCS_MAX) {
2311			ath12k_warn(ar->ab,
2312				    "Received with invalid mcs in VHT mode %d\n",
2313				     rate_mcs);
2314			break;
2315		}
2316		rx_status->nss = nss;
2317		if (sgi)
2318			rx_status->enc_flags |= RX_ENC_FLAG_SHORT_GI;
2319		rx_status->bw = ath12k_mac_bw_to_mac80211_bw(bw);
2320		break;
2321	case RX_MSDU_START_PKT_TYPE_11AX:
2322		rx_status->rate_idx = rate_mcs;
2323		if (rate_mcs > ATH12K_HE_MCS_MAX) {
2324			ath12k_warn(ar->ab,
2325				    "Received with invalid mcs in HE mode %d\n",
2326				    rate_mcs);
2327			break;
2328		}
2329		rx_status->encoding = RX_ENC_HE;
2330		rx_status->nss = nss;
2331		rx_status->he_gi = ath12k_he_gi_to_nl80211_he_gi(sgi);
2332		rx_status->bw = ath12k_mac_bw_to_mac80211_bw(bw);
2333		break;
2334	}
2335}
2336
2337void ath12k_dp_rx_h_ppdu(struct ath12k *ar, struct hal_rx_desc *rx_desc,
2338			 struct ieee80211_rx_status *rx_status)
2339{
2340	struct ath12k_base *ab = ar->ab;
2341	u8 channel_num;
2342	u32 center_freq, meta_data;
2343	struct ieee80211_channel *channel;
2344
2345	rx_status->freq = 0;
2346	rx_status->rate_idx = 0;
2347	rx_status->nss = 0;
2348	rx_status->encoding = RX_ENC_LEGACY;
2349	rx_status->bw = RATE_INFO_BW_20;
2350	rx_status->enc_flags = 0;
2351
2352	rx_status->flag |= RX_FLAG_NO_SIGNAL_VAL;
2353
2354	meta_data = ath12k_dp_rx_h_freq(ab, rx_desc);
2355	channel_num = meta_data;
2356	center_freq = meta_data >> 16;
2357
2358	if (center_freq >= 5935 && center_freq <= 7105) {
2359		rx_status->band = NL80211_BAND_6GHZ;
2360	} else if (channel_num >= 1 && channel_num <= 14) {
2361		rx_status->band = NL80211_BAND_2GHZ;
2362	} else if (channel_num >= 36 && channel_num <= 173) {
2363		rx_status->band = NL80211_BAND_5GHZ;
2364	} else {
2365		spin_lock_bh(&ar->data_lock);
2366		channel = ar->rx_channel;
2367		if (channel) {
2368			rx_status->band = channel->band;
2369			channel_num =
2370				ieee80211_frequency_to_channel(channel->center_freq);
2371		}
2372		spin_unlock_bh(&ar->data_lock);
2373		ath12k_dbg_dump(ar->ab, ATH12K_DBG_DATA, NULL, "rx_desc: ",
2374				rx_desc, sizeof(*rx_desc));
2375	}
2376
2377	rx_status->freq = ieee80211_channel_to_frequency(channel_num,
2378							 rx_status->band);
2379
2380	ath12k_dp_rx_h_rate(ar, rx_desc, rx_status);
2381}
2382
2383static void ath12k_dp_rx_deliver_msdu(struct ath12k *ar, struct napi_struct *napi,
2384				      struct sk_buff *msdu,
2385				      struct ieee80211_rx_status *status)
2386{
2387	struct ath12k_base *ab = ar->ab;
2388	static const struct ieee80211_radiotap_he known = {
2389		.data1 = cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA1_DATA_MCS_KNOWN |
2390				     IEEE80211_RADIOTAP_HE_DATA1_BW_RU_ALLOC_KNOWN),
2391		.data2 = cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA2_GI_KNOWN),
2392	};
2393	struct ieee80211_radiotap_he *he;
2394	struct ieee80211_rx_status *rx_status;
2395	struct ieee80211_sta *pubsta;
2396	struct ath12k_peer *peer;
2397	struct ath12k_skb_rxcb *rxcb = ATH12K_SKB_RXCB(msdu);
2398	u8 decap = DP_RX_DECAP_TYPE_RAW;
2399	bool is_mcbc = rxcb->is_mcbc;
2400	bool is_eapol = rxcb->is_eapol;
2401
2402	if (status->encoding == RX_ENC_HE && !(status->flag & RX_FLAG_RADIOTAP_HE) &&
2403	    !(status->flag & RX_FLAG_SKIP_MONITOR)) {
2404		he = skb_push(msdu, sizeof(known));
2405		memcpy(he, &known, sizeof(known));
2406		status->flag |= RX_FLAG_RADIOTAP_HE;
2407	}
2408
2409	if (!(status->flag & RX_FLAG_ONLY_MONITOR))
2410		decap = ath12k_dp_rx_h_decap_type(ab, rxcb->rx_desc);
2411
2412	spin_lock_bh(&ab->base_lock);
2413	peer = ath12k_dp_rx_h_find_peer(ab, msdu);
2414
2415	pubsta = peer ? peer->sta : NULL;
2416
2417	spin_unlock_bh(&ab->base_lock);
2418
2419	ath12k_dbg(ab, ATH12K_DBG_DATA,
2420		   "rx skb %pK len %u peer %pM %d %s sn %u %s%s%s%s%s%s%s%s%s rate_idx %u vht_nss %u freq %u band %u flag 0x%x fcs-err %i mic-err %i amsdu-more %i\n",
2421		   msdu,
2422		   msdu->len,
2423		   peer ? peer->addr : NULL,
2424		   rxcb->tid,
2425		   is_mcbc ? "mcast" : "ucast",
2426		   ath12k_dp_rx_h_seq_no(ab, rxcb->rx_desc),
2427		   (status->encoding == RX_ENC_LEGACY) ? "legacy" : "",
2428		   (status->encoding == RX_ENC_HT) ? "ht" : "",
2429		   (status->encoding == RX_ENC_VHT) ? "vht" : "",
2430		   (status->encoding == RX_ENC_HE) ? "he" : "",
2431		   (status->bw == RATE_INFO_BW_40) ? "40" : "",
2432		   (status->bw == RATE_INFO_BW_80) ? "80" : "",
2433		   (status->bw == RATE_INFO_BW_160) ? "160" : "",
2434		   (status->bw == RATE_INFO_BW_320) ? "320" : "",
2435		   status->enc_flags & RX_ENC_FLAG_SHORT_GI ? "sgi " : "",
2436		   status->rate_idx,
2437		   status->nss,
2438		   status->freq,
2439		   status->band, status->flag,
2440		   !!(status->flag & RX_FLAG_FAILED_FCS_CRC),
2441		   !!(status->flag & RX_FLAG_MMIC_ERROR),
2442		   !!(status->flag & RX_FLAG_AMSDU_MORE));
2443
2444	ath12k_dbg_dump(ab, ATH12K_DBG_DP_RX, NULL, "dp rx msdu: ",
2445			msdu->data, msdu->len);
2446
2447	rx_status = IEEE80211_SKB_RXCB(msdu);
2448	*rx_status = *status;
2449
2450	/* TODO: trace rx packet */
2451
2452	/* PN for multicast packets are not validate in HW,
2453	 * so skip 802.3 rx path
2454	 * Also, fast_rx expects the STA to be authorized, hence
2455	 * eapol packets are sent in slow path.
2456	 */
2457	if (decap == DP_RX_DECAP_TYPE_ETHERNET2_DIX && !is_eapol &&
2458	    !(is_mcbc && rx_status->flag & RX_FLAG_DECRYPTED))
2459		rx_status->flag |= RX_FLAG_8023;
2460
2461	ieee80211_rx_napi(ar->hw, pubsta, msdu, napi);
2462}
2463
2464static int ath12k_dp_rx_process_msdu(struct ath12k *ar,
2465				     struct sk_buff *msdu,
2466				     struct sk_buff_head *msdu_list,
2467				     struct ieee80211_rx_status *rx_status)
2468{
2469	struct ath12k_base *ab = ar->ab;
2470	struct hal_rx_desc *rx_desc, *lrx_desc;
2471	struct ath12k_skb_rxcb *rxcb;
2472	struct sk_buff *last_buf;
2473	u8 l3_pad_bytes;
2474	u16 msdu_len;
2475	int ret;
2476	u32 hal_rx_desc_sz = ar->ab->hw_params->hal_desc_sz;
2477
2478	last_buf = ath12k_dp_rx_get_msdu_last_buf(msdu_list, msdu);
2479	if (!last_buf) {
2480		ath12k_warn(ab,
2481			    "No valid Rx buffer to access MSDU_END tlv\n");
2482		ret = -EIO;
2483		goto free_out;
2484	}
2485
2486	rx_desc = (struct hal_rx_desc *)msdu->data;
2487	lrx_desc = (struct hal_rx_desc *)last_buf->data;
2488	if (!ath12k_dp_rx_h_msdu_done(ab, lrx_desc)) {
2489		ath12k_warn(ab, "msdu_done bit in msdu_end is not set\n");
2490		ret = -EIO;
2491		goto free_out;
2492	}
2493
2494	rxcb = ATH12K_SKB_RXCB(msdu);
2495	rxcb->rx_desc = rx_desc;
2496	msdu_len = ath12k_dp_rx_h_msdu_len(ab, lrx_desc);
2497	l3_pad_bytes = ath12k_dp_rx_h_l3pad(ab, lrx_desc);
2498
2499	if (rxcb->is_frag) {
2500		skb_pull(msdu, hal_rx_desc_sz);
2501	} else if (!rxcb->is_continuation) {
2502		if ((msdu_len + hal_rx_desc_sz) > DP_RX_BUFFER_SIZE) {
2503			ret = -EINVAL;
2504			ath12k_warn(ab, "invalid msdu len %u\n", msdu_len);
2505			ath12k_dbg_dump(ab, ATH12K_DBG_DATA, NULL, "", rx_desc,
2506					sizeof(*rx_desc));
2507			goto free_out;
2508		}
2509		skb_put(msdu, hal_rx_desc_sz + l3_pad_bytes + msdu_len);
2510		skb_pull(msdu, hal_rx_desc_sz + l3_pad_bytes);
2511	} else {
2512		ret = ath12k_dp_rx_msdu_coalesce(ar, msdu_list,
2513						 msdu, last_buf,
2514						 l3_pad_bytes, msdu_len);
2515		if (ret) {
2516			ath12k_warn(ab,
2517				    "failed to coalesce msdu rx buffer%d\n", ret);
2518			goto free_out;
2519		}
2520	}
2521
2522	ath12k_dp_rx_h_ppdu(ar, rx_desc, rx_status);
2523	ath12k_dp_rx_h_mpdu(ar, msdu, rx_desc, rx_status);
2524
2525	rx_status->flag |= RX_FLAG_SKIP_MONITOR | RX_FLAG_DUP_VALIDATED;
2526
2527	return 0;
2528
2529free_out:
2530	return ret;
2531}
2532
2533static void ath12k_dp_rx_process_received_packets(struct ath12k_base *ab,
2534						  struct napi_struct *napi,
2535						  struct sk_buff_head *msdu_list,
2536						  int ring_id)
2537{
2538	struct ieee80211_rx_status rx_status = {0};
2539	struct ath12k_skb_rxcb *rxcb;
2540	struct sk_buff *msdu;
2541	struct ath12k *ar;
2542	u8 mac_id, pdev_id;
2543	int ret;
2544
2545	if (skb_queue_empty(msdu_list))
2546		return;
2547
2548	rcu_read_lock();
2549
2550	while ((msdu = __skb_dequeue(msdu_list))) {
2551		rxcb = ATH12K_SKB_RXCB(msdu);
2552		mac_id = rxcb->mac_id;
2553		pdev_id = ath12k_hw_mac_id_to_pdev_id(ab->hw_params, mac_id);
2554		ar = ab->pdevs[pdev_id].ar;
2555		if (!rcu_dereference(ab->pdevs_active[pdev_id])) {
2556			dev_kfree_skb_any(msdu);
2557			continue;
2558		}
2559
2560		if (test_bit(ATH12K_CAC_RUNNING, &ar->dev_flags)) {
2561			dev_kfree_skb_any(msdu);
2562			continue;
2563		}
2564
2565		ret = ath12k_dp_rx_process_msdu(ar, msdu, msdu_list, &rx_status);
2566		if (ret) {
2567			ath12k_dbg(ab, ATH12K_DBG_DATA,
2568				   "Unable to process msdu %d", ret);
2569			dev_kfree_skb_any(msdu);
2570			continue;
2571		}
2572
2573		ath12k_dp_rx_deliver_msdu(ar, napi, msdu, &rx_status);
2574	}
2575
2576	rcu_read_unlock();
2577}
2578
2579int ath12k_dp_rx_process(struct ath12k_base *ab, int ring_id,
2580			 struct napi_struct *napi, int budget)
2581{
2582	struct ath12k_rx_desc_info *desc_info;
2583	struct ath12k_dp *dp = &ab->dp;
2584	struct dp_rxdma_ring *rx_ring = &dp->rx_refill_buf_ring;
2585	struct hal_reo_dest_ring *desc;
2586	int num_buffs_reaped = 0;
2587	struct sk_buff_head msdu_list;
2588	struct ath12k_skb_rxcb *rxcb;
2589	int total_msdu_reaped = 0;
2590	struct hal_srng *srng;
2591	struct sk_buff *msdu;
2592	bool done = false;
2593	int mac_id;
2594	u64 desc_va;
2595
2596	__skb_queue_head_init(&msdu_list);
2597
2598	srng = &ab->hal.srng_list[dp->reo_dst_ring[ring_id].ring_id];
2599
2600	spin_lock_bh(&srng->lock);
2601
2602try_again:
2603	ath12k_hal_srng_access_begin(ab, srng);
2604
2605	while ((desc = ath12k_hal_srng_dst_get_next_entry(ab, srng))) {
2606		enum hal_reo_dest_ring_push_reason push_reason;
2607		u32 cookie;
2608
2609		cookie = le32_get_bits(desc->buf_addr_info.info1,
2610				       BUFFER_ADDR_INFO1_SW_COOKIE);
2611
2612		mac_id = le32_get_bits(desc->info0,
2613				       HAL_REO_DEST_RING_INFO0_SRC_LINK_ID);
2614
2615		desc_va = ((u64)le32_to_cpu(desc->buf_va_hi) << 32 |
2616			   le32_to_cpu(desc->buf_va_lo));
2617		desc_info = (struct ath12k_rx_desc_info *)((unsigned long)desc_va);
2618
2619		/* retry manual desc retrieval */
2620		if (!desc_info) {
2621			desc_info = ath12k_dp_get_rx_desc(ab, cookie);
2622			if (!desc_info) {
2623				ath12k_warn(ab, "Invalid cookie in manual desc retrieval");
2624				continue;
2625			}
2626		}
2627
2628		if (desc_info->magic != ATH12K_DP_RX_DESC_MAGIC)
2629			ath12k_warn(ab, "Check HW CC implementation");
2630
2631		msdu = desc_info->skb;
2632		desc_info->skb = NULL;
2633
2634		spin_lock_bh(&dp->rx_desc_lock);
2635		list_move_tail(&desc_info->list, &dp->rx_desc_free_list);
2636		spin_unlock_bh(&dp->rx_desc_lock);
2637
2638		rxcb = ATH12K_SKB_RXCB(msdu);
2639		dma_unmap_single(ab->dev, rxcb->paddr,
2640				 msdu->len + skb_tailroom(msdu),
2641				 DMA_FROM_DEVICE);
2642
2643		num_buffs_reaped++;
2644
2645		push_reason = le32_get_bits(desc->info0,
2646					    HAL_REO_DEST_RING_INFO0_PUSH_REASON);
2647		if (push_reason !=
2648		    HAL_REO_DEST_RING_PUSH_REASON_ROUTING_INSTRUCTION) {
2649			dev_kfree_skb_any(msdu);
2650			ab->soc_stats.hal_reo_error[dp->reo_dst_ring[ring_id].ring_id]++;
2651			continue;
2652		}
2653
2654		rxcb->is_first_msdu = !!(le32_to_cpu(desc->rx_msdu_info.info0) &
2655					 RX_MSDU_DESC_INFO0_FIRST_MSDU_IN_MPDU);
2656		rxcb->is_last_msdu = !!(le32_to_cpu(desc->rx_msdu_info.info0) &
2657					RX_MSDU_DESC_INFO0_LAST_MSDU_IN_MPDU);
2658		rxcb->is_continuation = !!(le32_to_cpu(desc->rx_msdu_info.info0) &
2659					   RX_MSDU_DESC_INFO0_MSDU_CONTINUATION);
2660		rxcb->mac_id = mac_id;
2661		rxcb->peer_id = le32_get_bits(desc->rx_mpdu_info.peer_meta_data,
2662					      RX_MPDU_DESC_META_DATA_PEER_ID);
2663		rxcb->tid = le32_get_bits(desc->rx_mpdu_info.info0,
2664					  RX_MPDU_DESC_INFO0_TID);
2665
2666		__skb_queue_tail(&msdu_list, msdu);
2667
2668		if (!rxcb->is_continuation) {
2669			total_msdu_reaped++;
2670			done = true;
2671		} else {
2672			done = false;
2673		}
2674
2675		if (total_msdu_reaped >= budget)
2676			break;
2677	}
2678
2679	/* Hw might have updated the head pointer after we cached it.
2680	 * In this case, even though there are entries in the ring we'll
2681	 * get rx_desc NULL. Give the read another try with updated cached
2682	 * head pointer so that we can reap complete MPDU in the current
2683	 * rx processing.
2684	 */
2685	if (!done && ath12k_hal_srng_dst_num_free(ab, srng, true)) {
2686		ath12k_hal_srng_access_end(ab, srng);
2687		goto try_again;
2688	}
2689
2690	ath12k_hal_srng_access_end(ab, srng);
2691
2692	spin_unlock_bh(&srng->lock);
2693
2694	if (!total_msdu_reaped)
2695		goto exit;
2696
2697	ath12k_dp_rx_bufs_replenish(ab, rx_ring, num_buffs_reaped);
2698
2699	ath12k_dp_rx_process_received_packets(ab, napi, &msdu_list,
2700					      ring_id);
2701
2702exit:
2703	return total_msdu_reaped;
2704}
2705
2706static void ath12k_dp_rx_frag_timer(struct timer_list *timer)
2707{
2708	struct ath12k_dp_rx_tid *rx_tid = from_timer(rx_tid, timer, frag_timer);
2709
2710	spin_lock_bh(&rx_tid->ab->base_lock);
2711	if (rx_tid->last_frag_no &&
2712	    rx_tid->rx_frag_bitmap == GENMASK(rx_tid->last_frag_no, 0)) {
2713		spin_unlock_bh(&rx_tid->ab->base_lock);
2714		return;
2715	}
2716	ath12k_dp_rx_frags_cleanup(rx_tid, true);
2717	spin_unlock_bh(&rx_tid->ab->base_lock);
2718}
2719
2720int ath12k_dp_rx_peer_frag_setup(struct ath12k *ar, const u8 *peer_mac, int vdev_id)
2721{
2722	struct ath12k_base *ab = ar->ab;
2723	struct crypto_shash *tfm;
2724	struct ath12k_peer *peer;
2725	struct ath12k_dp_rx_tid *rx_tid;
2726	int i;
2727
2728	tfm = crypto_alloc_shash("michael_mic", 0, 0);
2729	if (IS_ERR(tfm))
2730		return PTR_ERR(tfm);
2731
2732	spin_lock_bh(&ab->base_lock);
2733
2734	peer = ath12k_peer_find(ab, vdev_id, peer_mac);
2735	if (!peer) {
2736		spin_unlock_bh(&ab->base_lock);
2737		ath12k_warn(ab, "failed to find the peer to set up fragment info\n");
2738		return -ENOENT;
2739	}
2740
2741	for (i = 0; i <= IEEE80211_NUM_TIDS; i++) {
2742		rx_tid = &peer->rx_tid[i];
2743		rx_tid->ab = ab;
2744		timer_setup(&rx_tid->frag_timer, ath12k_dp_rx_frag_timer, 0);
2745		skb_queue_head_init(&rx_tid->rx_frags);
2746	}
2747
2748	peer->tfm_mmic = tfm;
2749	peer->dp_setup_done = true;
2750	spin_unlock_bh(&ab->base_lock);
2751
2752	return 0;
2753}
2754
2755static int ath12k_dp_rx_h_michael_mic(struct crypto_shash *tfm, u8 *key,
2756				      struct ieee80211_hdr *hdr, u8 *data,
2757				      size_t data_len, u8 *mic)
2758{
2759	SHASH_DESC_ON_STACK(desc, tfm);
2760	u8 mic_hdr[16] = {0};
2761	u8 tid = 0;
2762	int ret;
2763
2764	if (!tfm)
2765		return -EINVAL;
2766
2767	desc->tfm = tfm;
2768
2769	ret = crypto_shash_setkey(tfm, key, 8);
2770	if (ret)
2771		goto out;
2772
2773	ret = crypto_shash_init(desc);
2774	if (ret)
2775		goto out;
2776
2777	/* TKIP MIC header */
2778	memcpy(mic_hdr, ieee80211_get_DA(hdr), ETH_ALEN);
2779	memcpy(mic_hdr + ETH_ALEN, ieee80211_get_SA(hdr), ETH_ALEN);
2780	if (ieee80211_is_data_qos(hdr->frame_control))
2781		tid = ieee80211_get_tid(hdr);
2782	mic_hdr[12] = tid;
2783
2784	ret = crypto_shash_update(desc, mic_hdr, 16);
2785	if (ret)
2786		goto out;
2787	ret = crypto_shash_update(desc, data, data_len);
2788	if (ret)
2789		goto out;
2790	ret = crypto_shash_final(desc, mic);
2791out:
2792	shash_desc_zero(desc);
2793	return ret;
2794}
2795
2796static int ath12k_dp_rx_h_verify_tkip_mic(struct ath12k *ar, struct ath12k_peer *peer,
2797					  struct sk_buff *msdu)
2798{
2799	struct ath12k_base *ab = ar->ab;
2800	struct hal_rx_desc *rx_desc = (struct hal_rx_desc *)msdu->data;
2801	struct ieee80211_rx_status *rxs = IEEE80211_SKB_RXCB(msdu);
2802	struct ieee80211_key_conf *key_conf;
2803	struct ieee80211_hdr *hdr;
2804	u8 mic[IEEE80211_CCMP_MIC_LEN];
2805	int head_len, tail_len, ret;
2806	size_t data_len;
2807	u32 hdr_len, hal_rx_desc_sz = ar->ab->hw_params->hal_desc_sz;
2808	u8 *key, *data;
2809	u8 key_idx;
2810
2811	if (ath12k_dp_rx_h_enctype(ab, rx_desc) != HAL_ENCRYPT_TYPE_TKIP_MIC)
2812		return 0;
2813
2814	hdr = (struct ieee80211_hdr *)(msdu->data + hal_rx_desc_sz);
2815	hdr_len = ieee80211_hdrlen(hdr->frame_control);
2816	head_len = hdr_len + hal_rx_desc_sz + IEEE80211_TKIP_IV_LEN;
2817	tail_len = IEEE80211_CCMP_MIC_LEN + IEEE80211_TKIP_ICV_LEN + FCS_LEN;
2818
2819	if (!is_multicast_ether_addr(hdr->addr1))
2820		key_idx = peer->ucast_keyidx;
2821	else
2822		key_idx = peer->mcast_keyidx;
2823
2824	key_conf = peer->keys[key_idx];
2825
2826	data = msdu->data + head_len;
2827	data_len = msdu->len - head_len - tail_len;
2828	key = &key_conf->key[NL80211_TKIP_DATA_OFFSET_RX_MIC_KEY];
2829
2830	ret = ath12k_dp_rx_h_michael_mic(peer->tfm_mmic, key, hdr, data, data_len, mic);
2831	if (ret || memcmp(mic, data + data_len, IEEE80211_CCMP_MIC_LEN))
2832		goto mic_fail;
2833
2834	return 0;
2835
2836mic_fail:
2837	(ATH12K_SKB_RXCB(msdu))->is_first_msdu = true;
2838	(ATH12K_SKB_RXCB(msdu))->is_last_msdu = true;
2839
2840	rxs->flag |= RX_FLAG_MMIC_ERROR | RX_FLAG_MMIC_STRIPPED |
2841		    RX_FLAG_IV_STRIPPED | RX_FLAG_DECRYPTED;
2842	skb_pull(msdu, hal_rx_desc_sz);
2843
2844	ath12k_dp_rx_h_ppdu(ar, rx_desc, rxs);
2845	ath12k_dp_rx_h_undecap(ar, msdu, rx_desc,
2846			       HAL_ENCRYPT_TYPE_TKIP_MIC, rxs, true);
2847	ieee80211_rx(ar->hw, msdu);
2848	return -EINVAL;
2849}
2850
2851static void ath12k_dp_rx_h_undecap_frag(struct ath12k *ar, struct sk_buff *msdu,
2852					enum hal_encrypt_type enctype, u32 flags)
2853{
2854	struct ieee80211_hdr *hdr;
2855	size_t hdr_len;
2856	size_t crypto_len;
2857	u32 hal_rx_desc_sz = ar->ab->hw_params->hal_desc_sz;
2858
2859	if (!flags)
2860		return;
2861
2862	hdr = (struct ieee80211_hdr *)(msdu->data + hal_rx_desc_sz);
2863
2864	if (flags & RX_FLAG_MIC_STRIPPED)
2865		skb_trim(msdu, msdu->len -
2866			 ath12k_dp_rx_crypto_mic_len(ar, enctype));
2867
2868	if (flags & RX_FLAG_ICV_STRIPPED)
2869		skb_trim(msdu, msdu->len -
2870			 ath12k_dp_rx_crypto_icv_len(ar, enctype));
2871
2872	if (flags & RX_FLAG_IV_STRIPPED) {
2873		hdr_len = ieee80211_hdrlen(hdr->frame_control);
2874		crypto_len = ath12k_dp_rx_crypto_param_len(ar, enctype);
2875
2876		memmove(msdu->data + hal_rx_desc_sz + crypto_len,
2877			msdu->data + hal_rx_desc_sz, hdr_len);
2878		skb_pull(msdu, crypto_len);
2879	}
2880}
2881
2882static int ath12k_dp_rx_h_defrag(struct ath12k *ar,
2883				 struct ath12k_peer *peer,
2884				 struct ath12k_dp_rx_tid *rx_tid,
2885				 struct sk_buff **defrag_skb)
2886{
2887	struct ath12k_base *ab = ar->ab;
2888	struct hal_rx_desc *rx_desc;
2889	struct sk_buff *skb, *first_frag, *last_frag;
2890	struct ieee80211_hdr *hdr;
2891	enum hal_encrypt_type enctype;
2892	bool is_decrypted = false;
2893	int msdu_len = 0;
2894	int extra_space;
2895	u32 flags, hal_rx_desc_sz = ar->ab->hw_params->hal_desc_sz;
2896
2897	first_frag = skb_peek(&rx_tid->rx_frags);
2898	last_frag = skb_peek_tail(&rx_tid->rx_frags);
2899
2900	skb_queue_walk(&rx_tid->rx_frags, skb) {
2901		flags = 0;
2902		rx_desc = (struct hal_rx_desc *)skb->data;
2903		hdr = (struct ieee80211_hdr *)(skb->data + hal_rx_desc_sz);
2904
2905		enctype = ath12k_dp_rx_h_enctype(ab, rx_desc);
2906		if (enctype != HAL_ENCRYPT_TYPE_OPEN)
2907			is_decrypted = ath12k_dp_rx_h_is_decrypted(ab,
2908								   rx_desc);
2909
2910		if (is_decrypted) {
2911			if (skb != first_frag)
2912				flags |= RX_FLAG_IV_STRIPPED;
2913			if (skb != last_frag)
2914				flags |= RX_FLAG_ICV_STRIPPED |
2915					 RX_FLAG_MIC_STRIPPED;
2916		}
2917
2918		/* RX fragments are always raw packets */
2919		if (skb != last_frag)
2920			skb_trim(skb, skb->len - FCS_LEN);
2921		ath12k_dp_rx_h_undecap_frag(ar, skb, enctype, flags);
2922
2923		if (skb != first_frag)
2924			skb_pull(skb, hal_rx_desc_sz +
2925				      ieee80211_hdrlen(hdr->frame_control));
2926		msdu_len += skb->len;
2927	}
2928
2929	extra_space = msdu_len - (DP_RX_BUFFER_SIZE + skb_tailroom(first_frag));
2930	if (extra_space > 0 &&
2931	    (pskb_expand_head(first_frag, 0, extra_space, GFP_ATOMIC) < 0))
2932		return -ENOMEM;
2933
2934	__skb_unlink(first_frag, &rx_tid->rx_frags);
2935	while ((skb = __skb_dequeue(&rx_tid->rx_frags))) {
2936		skb_put_data(first_frag, skb->data, skb->len);
2937		dev_kfree_skb_any(skb);
2938	}
2939
2940	hdr = (struct ieee80211_hdr *)(first_frag->data + hal_rx_desc_sz);
2941	hdr->frame_control &= ~__cpu_to_le16(IEEE80211_FCTL_MOREFRAGS);
2942	ATH12K_SKB_RXCB(first_frag)->is_frag = 1;
2943
2944	if (ath12k_dp_rx_h_verify_tkip_mic(ar, peer, first_frag))
2945		first_frag = NULL;
2946
2947	*defrag_skb = first_frag;
2948	return 0;
2949}
2950
2951static int ath12k_dp_rx_h_defrag_reo_reinject(struct ath12k *ar,
2952					      struct ath12k_dp_rx_tid *rx_tid,
2953					      struct sk_buff *defrag_skb)
2954{
2955	struct ath12k_base *ab = ar->ab;
2956	struct ath12k_dp *dp = &ab->dp;
2957	struct hal_rx_desc *rx_desc = (struct hal_rx_desc *)defrag_skb->data;
2958	struct hal_reo_entrance_ring *reo_ent_ring;
2959	struct hal_reo_dest_ring *reo_dest_ring;
2960	struct dp_link_desc_bank *link_desc_banks;
2961	struct hal_rx_msdu_link *msdu_link;
2962	struct hal_rx_msdu_details *msdu0;
2963	struct hal_srng *srng;
2964	dma_addr_t link_paddr, buf_paddr;
2965	u32 desc_bank, msdu_info, msdu_ext_info, mpdu_info;
2966	u32 cookie, hal_rx_desc_sz, dest_ring_info0;
2967	int ret;
2968	struct ath12k_rx_desc_info *desc_info;
2969	u8 dst_ind;
2970
2971	hal_rx_desc_sz = ab->hw_params->hal_desc_sz;
2972	link_desc_banks = dp->link_desc_banks;
2973	reo_dest_ring = rx_tid->dst_ring_desc;
2974
2975	ath12k_hal_rx_reo_ent_paddr_get(ab, &reo_dest_ring->buf_addr_info,
2976					&link_paddr, &cookie);
2977	desc_bank = u32_get_bits(cookie, DP_LINK_DESC_BANK_MASK);
2978
2979	msdu_link = (struct hal_rx_msdu_link *)(link_desc_banks[desc_bank].vaddr +
2980			(link_paddr - link_desc_banks[desc_bank].paddr));
2981	msdu0 = &msdu_link->msdu_link[0];
2982	msdu_ext_info = le32_to_cpu(msdu0->rx_msdu_ext_info.info0);
2983	dst_ind = u32_get_bits(msdu_ext_info, RX_MSDU_EXT_DESC_INFO0_REO_DEST_IND);
2984
2985	memset(msdu0, 0, sizeof(*msdu0));
2986
2987	msdu_info = u32_encode_bits(1, RX_MSDU_DESC_INFO0_FIRST_MSDU_IN_MPDU) |
2988		    u32_encode_bits(1, RX_MSDU_DESC_INFO0_LAST_MSDU_IN_MPDU) |
2989		    u32_encode_bits(0, RX_MSDU_DESC_INFO0_MSDU_CONTINUATION) |
2990		    u32_encode_bits(defrag_skb->len - hal_rx_desc_sz,
2991				    RX_MSDU_DESC_INFO0_MSDU_LENGTH) |
2992		    u32_encode_bits(1, RX_MSDU_DESC_INFO0_VALID_SA) |
2993		    u32_encode_bits(1, RX_MSDU_DESC_INFO0_VALID_DA);
2994	msdu0->rx_msdu_info.info0 = cpu_to_le32(msdu_info);
2995	msdu0->rx_msdu_ext_info.info0 = cpu_to_le32(msdu_ext_info);
2996
2997	/* change msdu len in hal rx desc */
2998	ath12k_dp_rxdesc_set_msdu_len(ab, rx_desc, defrag_skb->len - hal_rx_desc_sz);
2999
3000	buf_paddr = dma_map_single(ab->dev, defrag_skb->data,
3001				   defrag_skb->len + skb_tailroom(defrag_skb),
3002				   DMA_FROM_DEVICE);
3003	if (dma_mapping_error(ab->dev, buf_paddr))
3004		return -ENOMEM;
3005
3006	spin_lock_bh(&dp->rx_desc_lock);
3007	desc_info = list_first_entry_or_null(&dp->rx_desc_free_list,
3008					     struct ath12k_rx_desc_info,
3009					     list);
3010	if (!desc_info) {
3011		spin_unlock_bh(&dp->rx_desc_lock);
3012		ath12k_warn(ab, "failed to find rx desc for reinject\n");
3013		ret = -ENOMEM;
3014		goto err_unmap_dma;
3015	}
3016
3017	desc_info->skb = defrag_skb;
3018
3019	list_del(&desc_info->list);
3020	list_add_tail(&desc_info->list, &dp->rx_desc_used_list);
3021	spin_unlock_bh(&dp->rx_desc_lock);
3022
3023	ATH12K_SKB_RXCB(defrag_skb)->paddr = buf_paddr;
3024
3025	ath12k_hal_rx_buf_addr_info_set(&msdu0->buf_addr_info, buf_paddr,
3026					desc_info->cookie,
3027					HAL_RX_BUF_RBM_SW3_BM);
3028
3029	/* Fill mpdu details into reo entrance ring */
3030	srng = &ab->hal.srng_list[dp->reo_reinject_ring.ring_id];
3031
3032	spin_lock_bh(&srng->lock);
3033	ath12k_hal_srng_access_begin(ab, srng);
3034
3035	reo_ent_ring = ath12k_hal_srng_src_get_next_entry(ab, srng);
3036	if (!reo_ent_ring) {
3037		ath12k_hal_srng_access_end(ab, srng);
3038		spin_unlock_bh(&srng->lock);
3039		ret = -ENOSPC;
3040		goto err_free_desc;
3041	}
3042	memset(reo_ent_ring, 0, sizeof(*reo_ent_ring));
3043
3044	ath12k_hal_rx_buf_addr_info_set(&reo_ent_ring->buf_addr_info, link_paddr,
3045					cookie,
3046					HAL_RX_BUF_RBM_WBM_CHIP0_IDLE_DESC_LIST);
3047
3048	mpdu_info = u32_encode_bits(1, RX_MPDU_DESC_INFO0_MSDU_COUNT) |
3049		    u32_encode_bits(0, RX_MPDU_DESC_INFO0_FRAG_FLAG) |
3050		    u32_encode_bits(1, RX_MPDU_DESC_INFO0_RAW_MPDU) |
3051		    u32_encode_bits(1, RX_MPDU_DESC_INFO0_VALID_PN) |
3052		    u32_encode_bits(rx_tid->tid, RX_MPDU_DESC_INFO0_TID);
3053
3054	reo_ent_ring->rx_mpdu_info.info0 = cpu_to_le32(mpdu_info);
3055	reo_ent_ring->rx_mpdu_info.peer_meta_data =
3056		reo_dest_ring->rx_mpdu_info.peer_meta_data;
3057
3058	/* Firmware expects physical address to be filled in queue_addr_lo in
3059	 * the MLO scenario and in case of non MLO peer meta data needs to be
3060	 * filled.
3061	 * TODO: Need to handle for MLO scenario.
3062	 */
3063	reo_ent_ring->queue_addr_lo = reo_dest_ring->rx_mpdu_info.peer_meta_data;
3064	reo_ent_ring->info0 = le32_encode_bits(dst_ind,
3065					       HAL_REO_ENTR_RING_INFO0_DEST_IND);
3066
3067	reo_ent_ring->info1 = le32_encode_bits(rx_tid->cur_sn,
3068					       HAL_REO_ENTR_RING_INFO1_MPDU_SEQ_NUM);
3069	dest_ring_info0 = le32_get_bits(reo_dest_ring->info0,
3070					HAL_REO_DEST_RING_INFO0_SRC_LINK_ID);
3071	reo_ent_ring->info2 =
3072		cpu_to_le32(u32_get_bits(dest_ring_info0,
3073					 HAL_REO_ENTR_RING_INFO2_SRC_LINK_ID));
3074
3075	ath12k_hal_srng_access_end(ab, srng);
3076	spin_unlock_bh(&srng->lock);
3077
3078	return 0;
3079
3080err_free_desc:
3081	spin_lock_bh(&dp->rx_desc_lock);
3082	list_del(&desc_info->list);
3083	list_add_tail(&desc_info->list, &dp->rx_desc_free_list);
3084	desc_info->skb = NULL;
3085	spin_unlock_bh(&dp->rx_desc_lock);
3086err_unmap_dma:
3087	dma_unmap_single(ab->dev, buf_paddr, defrag_skb->len + skb_tailroom(defrag_skb),
3088			 DMA_FROM_DEVICE);
3089	return ret;
3090}
3091
3092static int ath12k_dp_rx_h_cmp_frags(struct ath12k_base *ab,
3093				    struct sk_buff *a, struct sk_buff *b)
3094{
3095	int frag1, frag2;
3096
3097	frag1 = ath12k_dp_rx_h_frag_no(ab, a);
3098	frag2 = ath12k_dp_rx_h_frag_no(ab, b);
3099
3100	return frag1 - frag2;
3101}
3102
3103static void ath12k_dp_rx_h_sort_frags(struct ath12k_base *ab,
3104				      struct sk_buff_head *frag_list,
3105				      struct sk_buff *cur_frag)
3106{
3107	struct sk_buff *skb;
3108	int cmp;
3109
3110	skb_queue_walk(frag_list, skb) {
3111		cmp = ath12k_dp_rx_h_cmp_frags(ab, skb, cur_frag);
3112		if (cmp < 0)
3113			continue;
3114		__skb_queue_before(frag_list, skb, cur_frag);
3115		return;
3116	}
3117	__skb_queue_tail(frag_list, cur_frag);
3118}
3119
3120static u64 ath12k_dp_rx_h_get_pn(struct ath12k *ar, struct sk_buff *skb)
3121{
3122	struct ieee80211_hdr *hdr;
3123	u64 pn = 0;
3124	u8 *ehdr;
3125	u32 hal_rx_desc_sz = ar->ab->hw_params->hal_desc_sz;
3126
3127	hdr = (struct ieee80211_hdr *)(skb->data + hal_rx_desc_sz);
3128	ehdr = skb->data + hal_rx_desc_sz + ieee80211_hdrlen(hdr->frame_control);
3129
3130	pn = ehdr[0];
3131	pn |= (u64)ehdr[1] << 8;
3132	pn |= (u64)ehdr[4] << 16;
3133	pn |= (u64)ehdr[5] << 24;
3134	pn |= (u64)ehdr[6] << 32;
3135	pn |= (u64)ehdr[7] << 40;
3136
3137	return pn;
3138}
3139
3140static bool
3141ath12k_dp_rx_h_defrag_validate_incr_pn(struct ath12k *ar, struct ath12k_dp_rx_tid *rx_tid)
3142{
3143	struct ath12k_base *ab = ar->ab;
3144	enum hal_encrypt_type encrypt_type;
3145	struct sk_buff *first_frag, *skb;
3146	struct hal_rx_desc *desc;
3147	u64 last_pn;
3148	u64 cur_pn;
3149
3150	first_frag = skb_peek(&rx_tid->rx_frags);
3151	desc = (struct hal_rx_desc *)first_frag->data;
3152
3153	encrypt_type = ath12k_dp_rx_h_enctype(ab, desc);
3154	if (encrypt_type != HAL_ENCRYPT_TYPE_CCMP_128 &&
3155	    encrypt_type != HAL_ENCRYPT_TYPE_CCMP_256 &&
3156	    encrypt_type != HAL_ENCRYPT_TYPE_GCMP_128 &&
3157	    encrypt_type != HAL_ENCRYPT_TYPE_AES_GCMP_256)
3158		return true;
3159
3160	last_pn = ath12k_dp_rx_h_get_pn(ar, first_frag);
3161	skb_queue_walk(&rx_tid->rx_frags, skb) {
3162		if (skb == first_frag)
3163			continue;
3164
3165		cur_pn = ath12k_dp_rx_h_get_pn(ar, skb);
3166		if (cur_pn != last_pn + 1)
3167			return false;
3168		last_pn = cur_pn;
3169	}
3170	return true;
3171}
3172
3173static int ath12k_dp_rx_frag_h_mpdu(struct ath12k *ar,
3174				    struct sk_buff *msdu,
3175				    struct hal_reo_dest_ring *ring_desc)
3176{
3177	struct ath12k_base *ab = ar->ab;
3178	struct hal_rx_desc *rx_desc;
3179	struct ath12k_peer *peer;
3180	struct ath12k_dp_rx_tid *rx_tid;
3181	struct sk_buff *defrag_skb = NULL;
3182	u32 peer_id;
3183	u16 seqno, frag_no;
3184	u8 tid;
3185	int ret = 0;
3186	bool more_frags;
3187
3188	rx_desc = (struct hal_rx_desc *)msdu->data;
3189	peer_id = ath12k_dp_rx_h_peer_id(ab, rx_desc);
3190	tid = ath12k_dp_rx_h_tid(ab, rx_desc);
3191	seqno = ath12k_dp_rx_h_seq_no(ab, rx_desc);
3192	frag_no = ath12k_dp_rx_h_frag_no(ab, msdu);
3193	more_frags = ath12k_dp_rx_h_more_frags(ab, msdu);
3194
3195	if (!ath12k_dp_rx_h_seq_ctrl_valid(ab, rx_desc) ||
3196	    !ath12k_dp_rx_h_fc_valid(ab, rx_desc) ||
3197	    tid > IEEE80211_NUM_TIDS)
3198		return -EINVAL;
3199
3200	/* received unfragmented packet in reo
3201	 * exception ring, this shouldn't happen
3202	 * as these packets typically come from
3203	 * reo2sw srngs.
3204	 */
3205	if (WARN_ON_ONCE(!frag_no && !more_frags))
3206		return -EINVAL;
3207
3208	spin_lock_bh(&ab->base_lock);
3209	peer = ath12k_peer_find_by_id(ab, peer_id);
3210	if (!peer) {
3211		ath12k_warn(ab, "failed to find the peer to de-fragment received fragment peer_id %d\n",
3212			    peer_id);
3213		ret = -ENOENT;
3214		goto out_unlock;
3215	}
3216
3217	if (!peer->dp_setup_done) {
3218		ath12k_warn(ab, "The peer %pM [%d] has uninitialized datapath\n",
3219			    peer->addr, peer_id);
3220		ret = -ENOENT;
3221		goto out_unlock;
3222	}
3223
3224	rx_tid = &peer->rx_tid[tid];
3225
3226	if ((!skb_queue_empty(&rx_tid->rx_frags) && seqno != rx_tid->cur_sn) ||
3227	    skb_queue_empty(&rx_tid->rx_frags)) {
3228		/* Flush stored fragments and start a new sequence */
3229		ath12k_dp_rx_frags_cleanup(rx_tid, true);
3230		rx_tid->cur_sn = seqno;
3231	}
3232
3233	if (rx_tid->rx_frag_bitmap & BIT(frag_no)) {
3234		/* Fragment already present */
3235		ret = -EINVAL;
3236		goto out_unlock;
3237	}
3238
3239	if ((!rx_tid->rx_frag_bitmap || frag_no > __fls(rx_tid->rx_frag_bitmap)))
3240		__skb_queue_tail(&rx_tid->rx_frags, msdu);
3241	else
3242		ath12k_dp_rx_h_sort_frags(ab, &rx_tid->rx_frags, msdu);
3243
3244	rx_tid->rx_frag_bitmap |= BIT(frag_no);
3245	if (!more_frags)
3246		rx_tid->last_frag_no = frag_no;
3247
3248	if (frag_no == 0) {
3249		rx_tid->dst_ring_desc = kmemdup(ring_desc,
3250						sizeof(*rx_tid->dst_ring_desc),
3251						GFP_ATOMIC);
3252		if (!rx_tid->dst_ring_desc) {
3253			ret = -ENOMEM;
3254			goto out_unlock;
3255		}
3256	} else {
3257		ath12k_dp_rx_link_desc_return(ab, ring_desc,
3258					      HAL_WBM_REL_BM_ACT_PUT_IN_IDLE);
3259	}
3260
3261	if (!rx_tid->last_frag_no ||
3262	    rx_tid->rx_frag_bitmap != GENMASK(rx_tid->last_frag_no, 0)) {
3263		mod_timer(&rx_tid->frag_timer, jiffies +
3264					       ATH12K_DP_RX_FRAGMENT_TIMEOUT_MS);
3265		goto out_unlock;
3266	}
3267
3268	spin_unlock_bh(&ab->base_lock);
3269	del_timer_sync(&rx_tid->frag_timer);
3270	spin_lock_bh(&ab->base_lock);
3271
3272	peer = ath12k_peer_find_by_id(ab, peer_id);
3273	if (!peer)
3274		goto err_frags_cleanup;
3275
3276	if (!ath12k_dp_rx_h_defrag_validate_incr_pn(ar, rx_tid))
3277		goto err_frags_cleanup;
3278
3279	if (ath12k_dp_rx_h_defrag(ar, peer, rx_tid, &defrag_skb))
3280		goto err_frags_cleanup;
3281
3282	if (!defrag_skb)
3283		goto err_frags_cleanup;
3284
3285	if (ath12k_dp_rx_h_defrag_reo_reinject(ar, rx_tid, defrag_skb))
3286		goto err_frags_cleanup;
3287
3288	ath12k_dp_rx_frags_cleanup(rx_tid, false);
3289	goto out_unlock;
3290
3291err_frags_cleanup:
3292	dev_kfree_skb_any(defrag_skb);
3293	ath12k_dp_rx_frags_cleanup(rx_tid, true);
3294out_unlock:
3295	spin_unlock_bh(&ab->base_lock);
3296	return ret;
3297}
3298
3299static int
3300ath12k_dp_process_rx_err_buf(struct ath12k *ar, struct hal_reo_dest_ring *desc,
3301			     bool drop, u32 cookie)
3302{
3303	struct ath12k_base *ab = ar->ab;
3304	struct sk_buff *msdu;
3305	struct ath12k_skb_rxcb *rxcb;
3306	struct hal_rx_desc *rx_desc;
3307	u16 msdu_len;
3308	u32 hal_rx_desc_sz = ab->hw_params->hal_desc_sz;
3309	struct ath12k_rx_desc_info *desc_info;
3310	u64 desc_va;
3311
3312	desc_va = ((u64)le32_to_cpu(desc->buf_va_hi) << 32 |
3313		   le32_to_cpu(desc->buf_va_lo));
3314	desc_info = (struct ath12k_rx_desc_info *)((unsigned long)desc_va);
3315
3316	/* retry manual desc retrieval */
3317	if (!desc_info) {
3318		desc_info = ath12k_dp_get_rx_desc(ab, cookie);
3319		if (!desc_info) {
3320			ath12k_warn(ab, "Invalid cookie in manual desc retrieval");
3321			return -EINVAL;
3322		}
3323	}
3324
3325	if (desc_info->magic != ATH12K_DP_RX_DESC_MAGIC)
3326		ath12k_warn(ab, " RX Exception, Check HW CC implementation");
3327
3328	msdu = desc_info->skb;
3329	desc_info->skb = NULL;
3330	spin_lock_bh(&ab->dp.rx_desc_lock);
3331	list_move_tail(&desc_info->list, &ab->dp.rx_desc_free_list);
3332	spin_unlock_bh(&ab->dp.rx_desc_lock);
3333
3334	rxcb = ATH12K_SKB_RXCB(msdu);
3335	dma_unmap_single(ar->ab->dev, rxcb->paddr,
3336			 msdu->len + skb_tailroom(msdu),
3337			 DMA_FROM_DEVICE);
3338
3339	if (drop) {
3340		dev_kfree_skb_any(msdu);
3341		return 0;
3342	}
3343
3344	rcu_read_lock();
3345	if (!rcu_dereference(ar->ab->pdevs_active[ar->pdev_idx])) {
3346		dev_kfree_skb_any(msdu);
3347		goto exit;
3348	}
3349
3350	if (test_bit(ATH12K_CAC_RUNNING, &ar->dev_flags)) {
3351		dev_kfree_skb_any(msdu);
3352		goto exit;
3353	}
3354
3355	rx_desc = (struct hal_rx_desc *)msdu->data;
3356	msdu_len = ath12k_dp_rx_h_msdu_len(ar->ab, rx_desc);
3357	if ((msdu_len + hal_rx_desc_sz) > DP_RX_BUFFER_SIZE) {
3358		ath12k_warn(ar->ab, "invalid msdu leng %u", msdu_len);
3359		ath12k_dbg_dump(ar->ab, ATH12K_DBG_DATA, NULL, "", rx_desc,
3360				sizeof(*rx_desc));
3361		dev_kfree_skb_any(msdu);
3362		goto exit;
3363	}
3364
3365	skb_put(msdu, hal_rx_desc_sz + msdu_len);
3366
3367	if (ath12k_dp_rx_frag_h_mpdu(ar, msdu, desc)) {
3368		dev_kfree_skb_any(msdu);
3369		ath12k_dp_rx_link_desc_return(ar->ab, desc,
3370					      HAL_WBM_REL_BM_ACT_PUT_IN_IDLE);
3371	}
3372exit:
3373	rcu_read_unlock();
3374	return 0;
3375}
3376
3377int ath12k_dp_rx_process_err(struct ath12k_base *ab, struct napi_struct *napi,
3378			     int budget)
3379{
3380	u32 msdu_cookies[HAL_NUM_RX_MSDUS_PER_LINK_DESC];
3381	struct dp_link_desc_bank *link_desc_banks;
3382	enum hal_rx_buf_return_buf_manager rbm;
3383	struct hal_rx_msdu_link *link_desc_va;
3384	int tot_n_bufs_reaped, quota, ret, i;
3385	struct hal_reo_dest_ring *reo_desc;
3386	struct dp_rxdma_ring *rx_ring;
3387	struct dp_srng *reo_except;
3388	u32 desc_bank, num_msdus;
3389	struct hal_srng *srng;
3390	struct ath12k_dp *dp;
3391	int mac_id;
3392	struct ath12k *ar;
3393	dma_addr_t paddr;
3394	bool is_frag;
3395	bool drop = false;
3396	int pdev_id;
3397
3398	tot_n_bufs_reaped = 0;
3399	quota = budget;
3400
3401	dp = &ab->dp;
3402	reo_except = &dp->reo_except_ring;
3403	link_desc_banks = dp->link_desc_banks;
3404
3405	srng = &ab->hal.srng_list[reo_except->ring_id];
3406
3407	spin_lock_bh(&srng->lock);
3408
3409	ath12k_hal_srng_access_begin(ab, srng);
3410
3411	while (budget &&
3412	       (reo_desc = ath12k_hal_srng_dst_get_next_entry(ab, srng))) {
3413		ab->soc_stats.err_ring_pkts++;
3414		ret = ath12k_hal_desc_reo_parse_err(ab, reo_desc, &paddr,
3415						    &desc_bank);
3416		if (ret) {
3417			ath12k_warn(ab, "failed to parse error reo desc %d\n",
3418				    ret);
3419			continue;
3420		}
3421		link_desc_va = link_desc_banks[desc_bank].vaddr +
3422			       (paddr - link_desc_banks[desc_bank].paddr);
3423		ath12k_hal_rx_msdu_link_info_get(link_desc_va, &num_msdus, msdu_cookies,
3424						 &rbm);
3425		if (rbm != HAL_RX_BUF_RBM_WBM_CHIP0_IDLE_DESC_LIST &&
3426		    rbm != HAL_RX_BUF_RBM_SW3_BM &&
3427		    rbm != ab->hw_params->hal_params->rx_buf_rbm) {
3428			ab->soc_stats.invalid_rbm++;
3429			ath12k_warn(ab, "invalid return buffer manager %d\n", rbm);
3430			ath12k_dp_rx_link_desc_return(ab, reo_desc,
3431						      HAL_WBM_REL_BM_ACT_REL_MSDU);
3432			continue;
3433		}
3434
3435		is_frag = !!(le32_to_cpu(reo_desc->rx_mpdu_info.info0) &
3436			     RX_MPDU_DESC_INFO0_FRAG_FLAG);
3437
3438		/* Process only rx fragments with one msdu per link desc below, and drop
3439		 * msdu's indicated due to error reasons.
3440		 */
3441		if (!is_frag || num_msdus > 1) {
3442			drop = true;
3443			/* Return the link desc back to wbm idle list */
3444			ath12k_dp_rx_link_desc_return(ab, reo_desc,
3445						      HAL_WBM_REL_BM_ACT_PUT_IN_IDLE);
3446		}
3447
3448		for (i = 0; i < num_msdus; i++) {
3449			mac_id = le32_get_bits(reo_desc->info0,
3450					       HAL_REO_DEST_RING_INFO0_SRC_LINK_ID);
3451
3452			pdev_id = ath12k_hw_mac_id_to_pdev_id(ab->hw_params, mac_id);
3453			ar = ab->pdevs[pdev_id].ar;
3454
3455			if (!ath12k_dp_process_rx_err_buf(ar, reo_desc, drop,
3456							  msdu_cookies[i]))
3457				tot_n_bufs_reaped++;
3458		}
3459
3460		if (tot_n_bufs_reaped >= quota) {
3461			tot_n_bufs_reaped = quota;
3462			goto exit;
3463		}
3464
3465		budget = quota - tot_n_bufs_reaped;
3466	}
3467
3468exit:
3469	ath12k_hal_srng_access_end(ab, srng);
3470
3471	spin_unlock_bh(&srng->lock);
3472
3473	rx_ring = &dp->rx_refill_buf_ring;
3474
3475	ath12k_dp_rx_bufs_replenish(ab, rx_ring, tot_n_bufs_reaped);
3476
3477	return tot_n_bufs_reaped;
3478}
3479
3480static void ath12k_dp_rx_null_q_desc_sg_drop(struct ath12k *ar,
3481					     int msdu_len,
3482					     struct sk_buff_head *msdu_list)
3483{
3484	struct sk_buff *skb, *tmp;
3485	struct ath12k_skb_rxcb *rxcb;
3486	int n_buffs;
3487
3488	n_buffs = DIV_ROUND_UP(msdu_len,
3489			       (DP_RX_BUFFER_SIZE - ar->ab->hw_params->hal_desc_sz));
3490
3491	skb_queue_walk_safe(msdu_list, skb, tmp) {
3492		rxcb = ATH12K_SKB_RXCB(skb);
3493		if (rxcb->err_rel_src == HAL_WBM_REL_SRC_MODULE_REO &&
3494		    rxcb->err_code == HAL_REO_DEST_RING_ERROR_CODE_DESC_ADDR_ZERO) {
3495			if (!n_buffs)
3496				break;
3497			__skb_unlink(skb, msdu_list);
3498			dev_kfree_skb_any(skb);
3499			n_buffs--;
3500		}
3501	}
3502}
3503
3504static int ath12k_dp_rx_h_null_q_desc(struct ath12k *ar, struct sk_buff *msdu,
3505				      struct ieee80211_rx_status *status,
3506				      struct sk_buff_head *msdu_list)
3507{
3508	struct ath12k_base *ab = ar->ab;
3509	u16 msdu_len;
3510	struct hal_rx_desc *desc = (struct hal_rx_desc *)msdu->data;
3511	u8 l3pad_bytes;
3512	struct ath12k_skb_rxcb *rxcb = ATH12K_SKB_RXCB(msdu);
3513	u32 hal_rx_desc_sz = ar->ab->hw_params->hal_desc_sz;
3514
3515	msdu_len = ath12k_dp_rx_h_msdu_len(ab, desc);
3516
3517	if (!rxcb->is_frag && ((msdu_len + hal_rx_desc_sz) > DP_RX_BUFFER_SIZE)) {
3518		/* First buffer will be freed by the caller, so deduct it's length */
3519		msdu_len = msdu_len - (DP_RX_BUFFER_SIZE - hal_rx_desc_sz);
3520		ath12k_dp_rx_null_q_desc_sg_drop(ar, msdu_len, msdu_list);
3521		return -EINVAL;
3522	}
3523
3524	/* Even after cleaning up the sg buffers in the msdu list with above check
3525	 * any msdu received with continuation flag needs to be dropped as invalid.
3526	 * This protects against some random err frame with continuation flag.
3527	 */
3528	if (rxcb->is_continuation)
3529		return -EINVAL;
3530
3531	if (!ath12k_dp_rx_h_msdu_done(ab, desc)) {
3532		ath12k_warn(ar->ab,
3533			    "msdu_done bit not set in null_q_des processing\n");
3534		__skb_queue_purge(msdu_list);
3535		return -EIO;
3536	}
3537
3538	/* Handle NULL queue descriptor violations arising out a missing
3539	 * REO queue for a given peer or a given TID. This typically
3540	 * may happen if a packet is received on a QOS enabled TID before the
3541	 * ADDBA negotiation for that TID, when the TID queue is setup. Or
3542	 * it may also happen for MC/BC frames if they are not routed to the
3543	 * non-QOS TID queue, in the absence of any other default TID queue.
3544	 * This error can show up both in a REO destination or WBM release ring.
3545	 */
3546
3547	if (rxcb->is_frag) {
3548		skb_pull(msdu, hal_rx_desc_sz);
3549	} else {
3550		l3pad_bytes = ath12k_dp_rx_h_l3pad(ab, desc);
3551
3552		if ((hal_rx_desc_sz + l3pad_bytes + msdu_len) > DP_RX_BUFFER_SIZE)
3553			return -EINVAL;
3554
3555		skb_put(msdu, hal_rx_desc_sz + l3pad_bytes + msdu_len);
3556		skb_pull(msdu, hal_rx_desc_sz + l3pad_bytes);
3557	}
3558	ath12k_dp_rx_h_ppdu(ar, desc, status);
3559
3560	ath12k_dp_rx_h_mpdu(ar, msdu, desc, status);
3561
3562	rxcb->tid = ath12k_dp_rx_h_tid(ab, desc);
3563
3564	/* Please note that caller will having the access to msdu and completing
3565	 * rx with mac80211. Need not worry about cleaning up amsdu_list.
3566	 */
3567
3568	return 0;
3569}
3570
3571static bool ath12k_dp_rx_h_reo_err(struct ath12k *ar, struct sk_buff *msdu,
3572				   struct ieee80211_rx_status *status,
3573				   struct sk_buff_head *msdu_list)
3574{
3575	struct ath12k_skb_rxcb *rxcb = ATH12K_SKB_RXCB(msdu);
3576	bool drop = false;
3577
3578	ar->ab->soc_stats.reo_error[rxcb->err_code]++;
3579
3580	switch (rxcb->err_code) {
3581	case HAL_REO_DEST_RING_ERROR_CODE_DESC_ADDR_ZERO:
3582		if (ath12k_dp_rx_h_null_q_desc(ar, msdu, status, msdu_list))
3583			drop = true;
3584		break;
3585	case HAL_REO_DEST_RING_ERROR_CODE_PN_CHECK_FAILED:
3586		/* TODO: Do not drop PN failed packets in the driver;
3587		 * instead, it is good to drop such packets in mac80211
3588		 * after incrementing the replay counters.
3589		 */
3590		fallthrough;
3591	default:
3592		/* TODO: Review other errors and process them to mac80211
3593		 * as appropriate.
3594		 */
3595		drop = true;
3596		break;
3597	}
3598
3599	return drop;
3600}
3601
3602static void ath12k_dp_rx_h_tkip_mic_err(struct ath12k *ar, struct sk_buff *msdu,
3603					struct ieee80211_rx_status *status)
3604{
3605	struct ath12k_base *ab = ar->ab;
3606	u16 msdu_len;
3607	struct hal_rx_desc *desc = (struct hal_rx_desc *)msdu->data;
3608	u8 l3pad_bytes;
3609	struct ath12k_skb_rxcb *rxcb = ATH12K_SKB_RXCB(msdu);
3610	u32 hal_rx_desc_sz = ar->ab->hw_params->hal_desc_sz;
3611
3612	rxcb->is_first_msdu = ath12k_dp_rx_h_first_msdu(ab, desc);
3613	rxcb->is_last_msdu = ath12k_dp_rx_h_last_msdu(ab, desc);
3614
3615	l3pad_bytes = ath12k_dp_rx_h_l3pad(ab, desc);
3616	msdu_len = ath12k_dp_rx_h_msdu_len(ab, desc);
3617	skb_put(msdu, hal_rx_desc_sz + l3pad_bytes + msdu_len);
3618	skb_pull(msdu, hal_rx_desc_sz + l3pad_bytes);
3619
3620	ath12k_dp_rx_h_ppdu(ar, desc, status);
3621
3622	status->flag |= (RX_FLAG_MMIC_STRIPPED | RX_FLAG_MMIC_ERROR |
3623			 RX_FLAG_DECRYPTED);
3624
3625	ath12k_dp_rx_h_undecap(ar, msdu, desc,
3626			       HAL_ENCRYPT_TYPE_TKIP_MIC, status, false);
3627}
3628
3629static bool ath12k_dp_rx_h_rxdma_err(struct ath12k *ar,  struct sk_buff *msdu,
3630				     struct ieee80211_rx_status *status)
3631{
3632	struct ath12k_base *ab = ar->ab;
3633	struct ath12k_skb_rxcb *rxcb = ATH12K_SKB_RXCB(msdu);
3634	struct hal_rx_desc *rx_desc = (struct hal_rx_desc *)msdu->data;
3635	bool drop = false;
3636	u32 err_bitmap;
3637
3638	ar->ab->soc_stats.rxdma_error[rxcb->err_code]++;
3639
3640	switch (rxcb->err_code) {
3641	case HAL_REO_ENTR_RING_RXDMA_ECODE_DECRYPT_ERR:
3642	case HAL_REO_ENTR_RING_RXDMA_ECODE_TKIP_MIC_ERR:
3643		err_bitmap = ath12k_dp_rx_h_mpdu_err(ab, rx_desc);
3644		if (err_bitmap & HAL_RX_MPDU_ERR_TKIP_MIC) {
3645			ath12k_dp_rx_h_tkip_mic_err(ar, msdu, status);
3646			break;
3647		}
3648		fallthrough;
3649	default:
3650		/* TODO: Review other rxdma error code to check if anything is
3651		 * worth reporting to mac80211
3652		 */
3653		drop = true;
3654		break;
3655	}
3656
3657	return drop;
3658}
3659
3660static void ath12k_dp_rx_wbm_err(struct ath12k *ar,
3661				 struct napi_struct *napi,
3662				 struct sk_buff *msdu,
3663				 struct sk_buff_head *msdu_list)
3664{
3665	struct ath12k_skb_rxcb *rxcb = ATH12K_SKB_RXCB(msdu);
3666	struct ieee80211_rx_status rxs = {0};
3667	bool drop = true;
3668
3669	switch (rxcb->err_rel_src) {
3670	case HAL_WBM_REL_SRC_MODULE_REO:
3671		drop = ath12k_dp_rx_h_reo_err(ar, msdu, &rxs, msdu_list);
3672		break;
3673	case HAL_WBM_REL_SRC_MODULE_RXDMA:
3674		drop = ath12k_dp_rx_h_rxdma_err(ar, msdu, &rxs);
3675		break;
3676	default:
3677		/* msdu will get freed */
3678		break;
3679	}
3680
3681	if (drop) {
3682		dev_kfree_skb_any(msdu);
3683		return;
3684	}
3685
3686	ath12k_dp_rx_deliver_msdu(ar, napi, msdu, &rxs);
3687}
3688
3689int ath12k_dp_rx_process_wbm_err(struct ath12k_base *ab,
3690				 struct napi_struct *napi, int budget)
3691{
3692	struct ath12k *ar;
3693	struct ath12k_dp *dp = &ab->dp;
3694	struct dp_rxdma_ring *rx_ring;
3695	struct hal_rx_wbm_rel_info err_info;
3696	struct hal_srng *srng;
3697	struct sk_buff *msdu;
3698	struct sk_buff_head msdu_list[MAX_RADIOS];
3699	struct ath12k_skb_rxcb *rxcb;
3700	void *rx_desc;
3701	int mac_id;
3702	int num_buffs_reaped = 0;
3703	struct ath12k_rx_desc_info *desc_info;
3704	int ret, i;
3705
3706	for (i = 0; i < ab->num_radios; i++)
3707		__skb_queue_head_init(&msdu_list[i]);
3708
3709	srng = &ab->hal.srng_list[dp->rx_rel_ring.ring_id];
3710	rx_ring = &dp->rx_refill_buf_ring;
3711
3712	spin_lock_bh(&srng->lock);
3713
3714	ath12k_hal_srng_access_begin(ab, srng);
3715
3716	while (budget) {
3717		rx_desc = ath12k_hal_srng_dst_get_next_entry(ab, srng);
3718		if (!rx_desc)
3719			break;
3720
3721		ret = ath12k_hal_wbm_desc_parse_err(ab, rx_desc, &err_info);
3722		if (ret) {
3723			ath12k_warn(ab,
3724				    "failed to parse rx error in wbm_rel ring desc %d\n",
3725				    ret);
3726			continue;
3727		}
3728
3729		desc_info = err_info.rx_desc;
3730
3731		/* retry manual desc retrieval if hw cc is not done */
3732		if (!desc_info) {
3733			desc_info = ath12k_dp_get_rx_desc(ab, err_info.cookie);
3734			if (!desc_info) {
3735				ath12k_warn(ab, "Invalid cookie in manual desc retrieval");
3736				continue;
3737			}
3738		}
3739
3740		/* FIXME: Extract mac id correctly. Since descs are not tied
3741		 * to mac, we can extract from vdev id in ring desc.
3742		 */
3743		mac_id = 0;
3744
3745		if (desc_info->magic != ATH12K_DP_RX_DESC_MAGIC)
3746			ath12k_warn(ab, "WBM RX err, Check HW CC implementation");
3747
3748		msdu = desc_info->skb;
3749		desc_info->skb = NULL;
3750
3751		spin_lock_bh(&dp->rx_desc_lock);
3752		list_move_tail(&desc_info->list, &dp->rx_desc_free_list);
3753		spin_unlock_bh(&dp->rx_desc_lock);
3754
3755		rxcb = ATH12K_SKB_RXCB(msdu);
3756		dma_unmap_single(ab->dev, rxcb->paddr,
3757				 msdu->len + skb_tailroom(msdu),
3758				 DMA_FROM_DEVICE);
3759
3760		num_buffs_reaped++;
3761
3762		if (!err_info.continuation)
3763			budget--;
3764
3765		if (err_info.push_reason !=
3766		    HAL_REO_DEST_RING_PUSH_REASON_ERR_DETECTED) {
3767			dev_kfree_skb_any(msdu);
3768			continue;
3769		}
3770
3771		rxcb->err_rel_src = err_info.err_rel_src;
3772		rxcb->err_code = err_info.err_code;
3773		rxcb->rx_desc = (struct hal_rx_desc *)msdu->data;
3774		__skb_queue_tail(&msdu_list[mac_id], msdu);
3775
3776		rxcb->is_first_msdu = err_info.first_msdu;
3777		rxcb->is_last_msdu = err_info.last_msdu;
3778		rxcb->is_continuation = err_info.continuation;
3779	}
3780
3781	ath12k_hal_srng_access_end(ab, srng);
3782
3783	spin_unlock_bh(&srng->lock);
3784
3785	if (!num_buffs_reaped)
3786		goto done;
3787
3788	ath12k_dp_rx_bufs_replenish(ab, rx_ring, num_buffs_reaped);
3789
3790	rcu_read_lock();
3791	for (i = 0; i <  ab->num_radios; i++) {
3792		if (!rcu_dereference(ab->pdevs_active[i])) {
3793			__skb_queue_purge(&msdu_list[i]);
3794			continue;
3795		}
3796
3797		ar = ab->pdevs[i].ar;
3798
3799		if (test_bit(ATH12K_CAC_RUNNING, &ar->dev_flags)) {
3800			__skb_queue_purge(&msdu_list[i]);
3801			continue;
3802		}
3803
3804		while ((msdu = __skb_dequeue(&msdu_list[i])) != NULL)
3805			ath12k_dp_rx_wbm_err(ar, napi, msdu, &msdu_list[i]);
3806	}
3807	rcu_read_unlock();
3808done:
3809	return num_buffs_reaped;
3810}
3811
3812void ath12k_dp_rx_process_reo_status(struct ath12k_base *ab)
3813{
3814	struct ath12k_dp *dp = &ab->dp;
3815	struct hal_tlv_64_hdr *hdr;
3816	struct hal_srng *srng;
3817	struct ath12k_dp_rx_reo_cmd *cmd, *tmp;
3818	bool found = false;
3819	u16 tag;
3820	struct hal_reo_status reo_status;
3821
3822	srng = &ab->hal.srng_list[dp->reo_status_ring.ring_id];
3823
3824	memset(&reo_status, 0, sizeof(reo_status));
3825
3826	spin_lock_bh(&srng->lock);
3827
3828	ath12k_hal_srng_access_begin(ab, srng);
3829
3830	while ((hdr = ath12k_hal_srng_dst_get_next_entry(ab, srng))) {
3831		tag = u64_get_bits(hdr->tl, HAL_SRNG_TLV_HDR_TAG);
3832
3833		switch (tag) {
3834		case HAL_REO_GET_QUEUE_STATS_STATUS:
3835			ath12k_hal_reo_status_queue_stats(ab, hdr,
3836							  &reo_status);
3837			break;
3838		case HAL_REO_FLUSH_QUEUE_STATUS:
3839			ath12k_hal_reo_flush_queue_status(ab, hdr,
3840							  &reo_status);
3841			break;
3842		case HAL_REO_FLUSH_CACHE_STATUS:
3843			ath12k_hal_reo_flush_cache_status(ab, hdr,
3844							  &reo_status);
3845			break;
3846		case HAL_REO_UNBLOCK_CACHE_STATUS:
3847			ath12k_hal_reo_unblk_cache_status(ab, hdr,
3848							  &reo_status);
3849			break;
3850		case HAL_REO_FLUSH_TIMEOUT_LIST_STATUS:
3851			ath12k_hal_reo_flush_timeout_list_status(ab, hdr,
3852								 &reo_status);
3853			break;
3854		case HAL_REO_DESCRIPTOR_THRESHOLD_REACHED_STATUS:
3855			ath12k_hal_reo_desc_thresh_reached_status(ab, hdr,
3856								  &reo_status);
3857			break;
3858		case HAL_REO_UPDATE_RX_REO_QUEUE_STATUS:
3859			ath12k_hal_reo_update_rx_reo_queue_status(ab, hdr,
3860								  &reo_status);
3861			break;
3862		default:
3863			ath12k_warn(ab, "Unknown reo status type %d\n", tag);
3864			continue;
3865		}
3866
3867		spin_lock_bh(&dp->reo_cmd_lock);
3868		list_for_each_entry_safe(cmd, tmp, &dp->reo_cmd_list, list) {
3869			if (reo_status.uniform_hdr.cmd_num == cmd->cmd_num) {
3870				found = true;
3871				list_del(&cmd->list);
3872				break;
3873			}
3874		}
3875		spin_unlock_bh(&dp->reo_cmd_lock);
3876
3877		if (found) {
3878			cmd->handler(dp, (void *)&cmd->data,
3879				     reo_status.uniform_hdr.cmd_status);
3880			kfree(cmd);
3881		}
3882
3883		found = false;
3884	}
3885
3886	ath12k_hal_srng_access_end(ab, srng);
3887
3888	spin_unlock_bh(&srng->lock);
3889}
3890
3891void ath12k_dp_rx_free(struct ath12k_base *ab)
3892{
3893	struct ath12k_dp *dp = &ab->dp;
3894	int i;
3895
3896	ath12k_dp_srng_cleanup(ab, &dp->rx_refill_buf_ring.refill_buf_ring);
3897
3898	for (i = 0; i < ab->hw_params->num_rxmda_per_pdev; i++) {
3899		if (ab->hw_params->rx_mac_buf_ring)
3900			ath12k_dp_srng_cleanup(ab, &dp->rx_mac_buf_ring[i]);
3901	}
3902
3903	for (i = 0; i < ab->hw_params->num_rxdma_dst_ring; i++)
3904		ath12k_dp_srng_cleanup(ab, &dp->rxdma_err_dst_ring[i]);
3905
3906	ath12k_dp_srng_cleanup(ab, &dp->rxdma_mon_buf_ring.refill_buf_ring);
3907	ath12k_dp_srng_cleanup(ab, &dp->tx_mon_buf_ring.refill_buf_ring);
3908
3909	ath12k_dp_rxdma_buf_free(ab);
3910}
3911
3912void ath12k_dp_rx_pdev_free(struct ath12k_base *ab, int mac_id)
3913{
3914	struct ath12k *ar = ab->pdevs[mac_id].ar;
3915
3916	ath12k_dp_rx_pdev_srng_free(ar);
3917}
3918
3919int ath12k_dp_rxdma_ring_sel_config_qcn9274(struct ath12k_base *ab)
3920{
3921	struct ath12k_dp *dp = &ab->dp;
3922	struct htt_rx_ring_tlv_filter tlv_filter = {0};
3923	u32 ring_id;
3924	int ret;
3925	u32 hal_rx_desc_sz = ab->hw_params->hal_desc_sz;
3926
3927	ring_id = dp->rx_refill_buf_ring.refill_buf_ring.ring_id;
3928
3929	tlv_filter.rx_filter = HTT_RX_TLV_FLAGS_RXDMA_RING;
3930	tlv_filter.pkt_filter_flags2 = HTT_RX_FP_CTRL_PKT_FILTER_TLV_FLAGS2_BAR;
3931	tlv_filter.pkt_filter_flags3 = HTT_RX_FP_DATA_PKT_FILTER_TLV_FLASG3_MCAST |
3932					HTT_RX_FP_DATA_PKT_FILTER_TLV_FLASG3_UCAST |
3933					HTT_RX_FP_DATA_PKT_FILTER_TLV_FLASG3_NULL_DATA;
3934	tlv_filter.offset_valid = true;
3935	tlv_filter.rx_packet_offset = hal_rx_desc_sz;
3936
3937	tlv_filter.rx_mpdu_start_offset =
3938			ab->hw_params->hal_ops->rx_desc_get_mpdu_start_offset();
3939	tlv_filter.rx_msdu_end_offset =
3940		ab->hw_params->hal_ops->rx_desc_get_msdu_end_offset();
3941
3942	/* TODO: Selectively subscribe to required qwords within msdu_end
3943	 * and mpdu_start and setup the mask in below msg
3944	 * and modify the rx_desc struct
3945	 */
3946	ret = ath12k_dp_tx_htt_rx_filter_setup(ab, ring_id, 0,
3947					       HAL_RXDMA_BUF,
3948					       DP_RXDMA_REFILL_RING_SIZE,
3949					       &tlv_filter);
3950
3951	return ret;
3952}
3953
3954int ath12k_dp_rxdma_ring_sel_config_wcn7850(struct ath12k_base *ab)
3955{
3956	struct ath12k_dp *dp = &ab->dp;
3957	struct htt_rx_ring_tlv_filter tlv_filter = {0};
3958	u32 ring_id;
3959	int ret;
3960	u32 hal_rx_desc_sz = ab->hw_params->hal_desc_sz;
3961	int i;
3962
3963	ring_id = dp->rx_refill_buf_ring.refill_buf_ring.ring_id;
3964
3965	tlv_filter.rx_filter = HTT_RX_TLV_FLAGS_RXDMA_RING;
3966	tlv_filter.pkt_filter_flags2 = HTT_RX_FP_CTRL_PKT_FILTER_TLV_FLAGS2_BAR;
3967	tlv_filter.pkt_filter_flags3 = HTT_RX_FP_DATA_PKT_FILTER_TLV_FLASG3_MCAST |
3968					HTT_RX_FP_DATA_PKT_FILTER_TLV_FLASG3_UCAST |
3969					HTT_RX_FP_DATA_PKT_FILTER_TLV_FLASG3_NULL_DATA;
3970	tlv_filter.offset_valid = true;
3971	tlv_filter.rx_packet_offset = hal_rx_desc_sz;
3972
3973	tlv_filter.rx_header_offset = offsetof(struct hal_rx_desc_wcn7850, pkt_hdr_tlv);
3974
3975	tlv_filter.rx_mpdu_start_offset =
3976			ab->hw_params->hal_ops->rx_desc_get_mpdu_start_offset();
3977	tlv_filter.rx_msdu_end_offset =
3978		ab->hw_params->hal_ops->rx_desc_get_msdu_end_offset();
3979
3980	/* TODO: Selectively subscribe to required qwords within msdu_end
3981	 * and mpdu_start and setup the mask in below msg
3982	 * and modify the rx_desc struct
3983	 */
3984
3985	for (i = 0; i < ab->hw_params->num_rxmda_per_pdev; i++) {
3986		ring_id = dp->rx_mac_buf_ring[i].ring_id;
3987		ret = ath12k_dp_tx_htt_rx_filter_setup(ab, ring_id, i,
3988						       HAL_RXDMA_BUF,
3989						       DP_RXDMA_REFILL_RING_SIZE,
3990						       &tlv_filter);
3991	}
3992
3993	return ret;
3994}
3995
3996int ath12k_dp_rx_htt_setup(struct ath12k_base *ab)
3997{
3998	struct ath12k_dp *dp = &ab->dp;
3999	u32 ring_id;
4000	int i, ret;
4001
4002	/* TODO: Need to verify the HTT setup for QCN9224 */
4003	ring_id = dp->rx_refill_buf_ring.refill_buf_ring.ring_id;
4004	ret = ath12k_dp_tx_htt_srng_setup(ab, ring_id, 0, HAL_RXDMA_BUF);
4005	if (ret) {
4006		ath12k_warn(ab, "failed to configure rx_refill_buf_ring %d\n",
4007			    ret);
4008		return ret;
4009	}
4010
4011	if (ab->hw_params->rx_mac_buf_ring) {
4012		for (i = 0; i < ab->hw_params->num_rxmda_per_pdev; i++) {
4013			ring_id = dp->rx_mac_buf_ring[i].ring_id;
4014			ret = ath12k_dp_tx_htt_srng_setup(ab, ring_id,
4015							  i, HAL_RXDMA_BUF);
4016			if (ret) {
4017				ath12k_warn(ab, "failed to configure rx_mac_buf_ring%d %d\n",
4018					    i, ret);
4019				return ret;
4020			}
4021		}
4022	}
4023
4024	for (i = 0; i < ab->hw_params->num_rxdma_dst_ring; i++) {
4025		ring_id = dp->rxdma_err_dst_ring[i].ring_id;
4026		ret = ath12k_dp_tx_htt_srng_setup(ab, ring_id,
4027						  i, HAL_RXDMA_DST);
4028		if (ret) {
4029			ath12k_warn(ab, "failed to configure rxdma_err_dest_ring%d %d\n",
4030				    i, ret);
4031			return ret;
4032		}
4033	}
4034
4035	if (ab->hw_params->rxdma1_enable) {
4036		ring_id = dp->rxdma_mon_buf_ring.refill_buf_ring.ring_id;
4037		ret = ath12k_dp_tx_htt_srng_setup(ab, ring_id,
4038						  0, HAL_RXDMA_MONITOR_BUF);
4039		if (ret) {
4040			ath12k_warn(ab, "failed to configure rxdma_mon_buf_ring %d\n",
4041				    ret);
4042			return ret;
4043		}
4044
4045		ring_id = dp->tx_mon_buf_ring.refill_buf_ring.ring_id;
4046		ret = ath12k_dp_tx_htt_srng_setup(ab, ring_id,
4047						  0, HAL_TX_MONITOR_BUF);
4048		if (ret) {
4049			ath12k_warn(ab, "failed to configure rxdma_mon_buf_ring %d\n",
4050				    ret);
4051			return ret;
4052		}
4053	}
4054
4055	ret = ab->hw_params->hw_ops->rxdma_ring_sel_config(ab);
4056	if (ret) {
4057		ath12k_warn(ab, "failed to setup rxdma ring selection config\n");
4058		return ret;
4059	}
4060
4061	return 0;
4062}
4063
4064int ath12k_dp_rx_alloc(struct ath12k_base *ab)
4065{
4066	struct ath12k_dp *dp = &ab->dp;
4067	int i, ret;
4068
4069	idr_init(&dp->rxdma_mon_buf_ring.bufs_idr);
4070	spin_lock_init(&dp->rxdma_mon_buf_ring.idr_lock);
4071
4072	idr_init(&dp->tx_mon_buf_ring.bufs_idr);
4073	spin_lock_init(&dp->tx_mon_buf_ring.idr_lock);
4074
4075	ret = ath12k_dp_srng_setup(ab,
4076				   &dp->rx_refill_buf_ring.refill_buf_ring,
4077				   HAL_RXDMA_BUF, 0, 0,
4078				   DP_RXDMA_BUF_RING_SIZE);
4079	if (ret) {
4080		ath12k_warn(ab, "failed to setup rx_refill_buf_ring\n");
4081		return ret;
4082	}
4083
4084	if (ab->hw_params->rx_mac_buf_ring) {
4085		for (i = 0; i < ab->hw_params->num_rxmda_per_pdev; i++) {
4086			ret = ath12k_dp_srng_setup(ab,
4087						   &dp->rx_mac_buf_ring[i],
4088						   HAL_RXDMA_BUF, 1,
4089						   i, 1024);
4090			if (ret) {
4091				ath12k_warn(ab, "failed to setup rx_mac_buf_ring %d\n",
4092					    i);
4093				return ret;
4094			}
4095		}
4096	}
4097
4098	for (i = 0; i < ab->hw_params->num_rxdma_dst_ring; i++) {
4099		ret = ath12k_dp_srng_setup(ab, &dp->rxdma_err_dst_ring[i],
4100					   HAL_RXDMA_DST, 0, i,
4101					   DP_RXDMA_ERR_DST_RING_SIZE);
4102		if (ret) {
4103			ath12k_warn(ab, "failed to setup rxdma_err_dst_ring %d\n", i);
4104			return ret;
4105		}
4106	}
4107
4108	if (ab->hw_params->rxdma1_enable) {
4109		ret = ath12k_dp_srng_setup(ab,
4110					   &dp->rxdma_mon_buf_ring.refill_buf_ring,
4111					   HAL_RXDMA_MONITOR_BUF, 0, 0,
4112					   DP_RXDMA_MONITOR_BUF_RING_SIZE);
4113		if (ret) {
4114			ath12k_warn(ab, "failed to setup HAL_RXDMA_MONITOR_BUF\n");
4115			return ret;
4116		}
4117
4118		ret = ath12k_dp_srng_setup(ab,
4119					   &dp->tx_mon_buf_ring.refill_buf_ring,
4120					   HAL_TX_MONITOR_BUF, 0, 0,
4121					   DP_TX_MONITOR_BUF_RING_SIZE);
4122		if (ret) {
4123			ath12k_warn(ab, "failed to setup DP_TX_MONITOR_BUF_RING_SIZE\n");
4124			return ret;
4125		}
4126	}
4127
4128	ret = ath12k_dp_rxdma_buf_setup(ab);
4129	if (ret) {
4130		ath12k_warn(ab, "failed to setup rxdma ring\n");
4131		return ret;
4132	}
4133
4134	return 0;
4135}
4136
4137int ath12k_dp_rx_pdev_alloc(struct ath12k_base *ab, int mac_id)
4138{
4139	struct ath12k *ar = ab->pdevs[mac_id].ar;
4140	struct ath12k_pdev_dp *dp = &ar->dp;
4141	u32 ring_id;
4142	int i;
4143	int ret;
4144
4145	if (!ab->hw_params->rxdma1_enable)
4146		goto out;
4147
4148	ret = ath12k_dp_rx_pdev_srng_alloc(ar);
4149	if (ret) {
4150		ath12k_warn(ab, "failed to setup rx srngs\n");
4151		return ret;
4152	}
4153
4154	for (i = 0; i < ab->hw_params->num_rxmda_per_pdev; i++) {
4155		ring_id = dp->rxdma_mon_dst_ring[i].ring_id;
4156		ret = ath12k_dp_tx_htt_srng_setup(ab, ring_id,
4157						  mac_id + i,
4158						  HAL_RXDMA_MONITOR_DST);
4159		if (ret) {
4160			ath12k_warn(ab,
4161				    "failed to configure rxdma_mon_dst_ring %d %d\n",
4162				    i, ret);
4163			return ret;
4164		}
4165
4166		ring_id = dp->tx_mon_dst_ring[i].ring_id;
4167		ret = ath12k_dp_tx_htt_srng_setup(ab, ring_id,
4168						  mac_id + i,
4169						  HAL_TX_MONITOR_DST);
4170		if (ret) {
4171			ath12k_warn(ab,
4172				    "failed to configure tx_mon_dst_ring %d %d\n",
4173				    i, ret);
4174			return ret;
4175		}
4176	}
4177out:
4178	return 0;
4179}
4180
4181static int ath12k_dp_rx_pdev_mon_status_attach(struct ath12k *ar)
4182{
4183	struct ath12k_pdev_dp *dp = &ar->dp;
4184	struct ath12k_mon_data *pmon = (struct ath12k_mon_data *)&dp->mon_data;
4185
4186	skb_queue_head_init(&pmon->rx_status_q);
4187
4188	pmon->mon_ppdu_status = DP_PPDU_STATUS_START;
4189
4190	memset(&pmon->rx_mon_stats, 0,
4191	       sizeof(pmon->rx_mon_stats));
4192	return 0;
4193}
4194
4195int ath12k_dp_rx_pdev_mon_attach(struct ath12k *ar)
4196{
4197	struct ath12k_pdev_dp *dp = &ar->dp;
4198	struct ath12k_mon_data *pmon = &dp->mon_data;
4199	int ret = 0;
4200
4201	ret = ath12k_dp_rx_pdev_mon_status_attach(ar);
4202	if (ret) {
4203		ath12k_warn(ar->ab, "pdev_mon_status_attach() failed");
4204		return ret;
4205	}
4206
4207	/* if rxdma1_enable is false, no need to setup
4208	 * rxdma_mon_desc_ring.
4209	 */
4210	if (!ar->ab->hw_params->rxdma1_enable)
4211		return 0;
4212
4213	pmon->mon_last_linkdesc_paddr = 0;
4214	pmon->mon_last_buf_cookie = DP_RX_DESC_COOKIE_MAX + 1;
4215	spin_lock_init(&pmon->mon_lock);
4216
4217	return 0;
4218}
4219
4220int ath12k_dp_rx_pktlog_start(struct ath12k_base *ab)
4221{
4222	/* start reap timer */
4223	mod_timer(&ab->mon_reap_timer,
4224		  jiffies + msecs_to_jiffies(ATH12K_MON_TIMER_INTERVAL));
4225
4226	return 0;
4227}
4228
4229int ath12k_dp_rx_pktlog_stop(struct ath12k_base *ab, bool stop_timer)
4230{
4231	int ret;
4232
4233	if (stop_timer)
4234		del_timer_sync(&ab->mon_reap_timer);
4235
4236	/* reap all the monitor related rings */
4237	ret = ath12k_dp_purge_mon_ring(ab);
4238	if (ret) {
4239		ath12k_warn(ab, "failed to purge dp mon ring: %d\n", ret);
4240		return ret;
4241	}
4242
4243	return 0;
4244}