Linux Audio

Check our new training course

Embedded Linux training

Mar 10-20, 2025, special US time zones
Register
Loading...
v6.8
   1// SPDX-License-Identifier: BSD-3-Clause-Clear
   2/*
   3 * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
   4 * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
   5 */
   6
   7#include <crypto/hash.h>
   8#include "core.h"
   9#include "dp_tx.h"
  10#include "hal_tx.h"
  11#include "hif.h"
  12#include "debug.h"
  13#include "dp_rx.h"
  14#include "peer.h"
  15
  16static void ath11k_dp_htt_htc_tx_complete(struct ath11k_base *ab,
  17					  struct sk_buff *skb)
  18{
  19	dev_kfree_skb_any(skb);
  20}
  21
  22void ath11k_dp_peer_cleanup(struct ath11k *ar, int vdev_id, const u8 *addr)
  23{
  24	struct ath11k_base *ab = ar->ab;
  25	struct ath11k_peer *peer;
  26
  27	/* TODO: Any other peer specific DP cleanup */
  28
  29	spin_lock_bh(&ab->base_lock);
  30	peer = ath11k_peer_find(ab, vdev_id, addr);
  31	if (!peer) {
  32		ath11k_warn(ab, "failed to lookup peer %pM on vdev %d\n",
  33			    addr, vdev_id);
  34		spin_unlock_bh(&ab->base_lock);
  35		return;
  36	}
  37
  38	ath11k_peer_rx_tid_cleanup(ar, peer);
  39	peer->dp_setup_done = false;
  40	crypto_free_shash(peer->tfm_mmic);
  41	spin_unlock_bh(&ab->base_lock);
  42}
  43
  44int ath11k_dp_peer_setup(struct ath11k *ar, int vdev_id, const u8 *addr)
  45{
  46	struct ath11k_base *ab = ar->ab;
  47	struct ath11k_peer *peer;
  48	u32 reo_dest;
  49	int ret = 0, tid;
  50
  51	/* NOTE: reo_dest ring id starts from 1 unlike mac_id which starts from 0 */
  52	reo_dest = ar->dp.mac_id + 1;
  53	ret = ath11k_wmi_set_peer_param(ar, addr, vdev_id,
  54					WMI_PEER_SET_DEFAULT_ROUTING,
  55					DP_RX_HASH_ENABLE | (reo_dest << 1));
  56
  57	if (ret) {
  58		ath11k_warn(ab, "failed to set default routing %d peer :%pM vdev_id :%d\n",
  59			    ret, addr, vdev_id);
  60		return ret;
  61	}
  62
  63	for (tid = 0; tid <= IEEE80211_NUM_TIDS; tid++) {
  64		ret = ath11k_peer_rx_tid_setup(ar, addr, vdev_id, tid, 1, 0,
  65					       HAL_PN_TYPE_NONE);
  66		if (ret) {
  67			ath11k_warn(ab, "failed to setup rxd tid queue for tid %d: %d\n",
  68				    tid, ret);
  69			goto peer_clean;
  70		}
  71	}
  72
  73	ret = ath11k_peer_rx_frag_setup(ar, addr, vdev_id);
  74	if (ret) {
  75		ath11k_warn(ab, "failed to setup rx defrag context\n");
  76		tid--;
  77		goto peer_clean;
  78	}
  79
  80	/* TODO: Setup other peer specific resource used in data path */
  81
  82	return 0;
  83
  84peer_clean:
  85	spin_lock_bh(&ab->base_lock);
  86
  87	peer = ath11k_peer_find(ab, vdev_id, addr);
  88	if (!peer) {
  89		ath11k_warn(ab, "failed to find the peer to del rx tid\n");
  90		spin_unlock_bh(&ab->base_lock);
  91		return -ENOENT;
  92	}
  93
  94	for (; tid >= 0; tid--)
  95		ath11k_peer_rx_tid_delete(ar, peer, tid);
  96
  97	spin_unlock_bh(&ab->base_lock);
  98
  99	return ret;
 100}
 101
 102void ath11k_dp_srng_cleanup(struct ath11k_base *ab, struct dp_srng *ring)
 103{
 104	if (!ring->vaddr_unaligned)
 105		return;
 106
 107	if (ring->cached)
 
 
 108		kfree(ring->vaddr_unaligned);
 109	else
 110		dma_free_coherent(ab->dev, ring->size, ring->vaddr_unaligned,
 111				  ring->paddr_unaligned);
 
 112
 113	ring->vaddr_unaligned = NULL;
 114}
 115
 116static int ath11k_dp_srng_find_ring_in_mask(int ring_num, const u8 *grp_mask)
 117{
 118	int ext_group_num;
 119	u8 mask = 1 << ring_num;
 120
 121	for (ext_group_num = 0; ext_group_num < ATH11K_EXT_IRQ_GRP_NUM_MAX;
 122	     ext_group_num++) {
 123		if (mask & grp_mask[ext_group_num])
 124			return ext_group_num;
 125	}
 126
 127	return -ENOENT;
 128}
 129
 130static int ath11k_dp_srng_calculate_msi_group(struct ath11k_base *ab,
 131					      enum hal_ring_type type, int ring_num)
 132{
 133	const u8 *grp_mask;
 134
 135	switch (type) {
 136	case HAL_WBM2SW_RELEASE:
 137		if (ring_num == DP_RX_RELEASE_RING_NUM) {
 138			grp_mask = &ab->hw_params.ring_mask->rx_wbm_rel[0];
 139			ring_num = 0;
 140		} else {
 141			grp_mask = &ab->hw_params.ring_mask->tx[0];
 142		}
 143		break;
 144	case HAL_REO_EXCEPTION:
 145		grp_mask = &ab->hw_params.ring_mask->rx_err[0];
 146		break;
 147	case HAL_REO_DST:
 148		grp_mask = &ab->hw_params.ring_mask->rx[0];
 149		break;
 150	case HAL_REO_STATUS:
 151		grp_mask = &ab->hw_params.ring_mask->reo_status[0];
 152		break;
 153	case HAL_RXDMA_MONITOR_STATUS:
 154	case HAL_RXDMA_MONITOR_DST:
 155		grp_mask = &ab->hw_params.ring_mask->rx_mon_status[0];
 156		break;
 157	case HAL_RXDMA_DST:
 158		grp_mask = &ab->hw_params.ring_mask->rxdma2host[0];
 159		break;
 160	case HAL_RXDMA_BUF:
 161		grp_mask = &ab->hw_params.ring_mask->host2rxdma[0];
 162		break;
 163	case HAL_RXDMA_MONITOR_BUF:
 164	case HAL_TCL_DATA:
 165	case HAL_TCL_CMD:
 166	case HAL_REO_CMD:
 167	case HAL_SW2WBM_RELEASE:
 168	case HAL_WBM_IDLE_LINK:
 169	case HAL_TCL_STATUS:
 170	case HAL_REO_REINJECT:
 171	case HAL_CE_SRC:
 172	case HAL_CE_DST:
 173	case HAL_CE_DST_STATUS:
 174	default:
 175		return -ENOENT;
 176	}
 177
 178	return ath11k_dp_srng_find_ring_in_mask(ring_num, grp_mask);
 179}
 180
 181static void ath11k_dp_srng_msi_setup(struct ath11k_base *ab,
 182				     struct hal_srng_params *ring_params,
 183				     enum hal_ring_type type, int ring_num)
 184{
 185	int msi_group_number, msi_data_count;
 186	u32 msi_data_start, msi_irq_start, addr_lo, addr_hi;
 187	int ret;
 188
 189	ret = ath11k_get_user_msi_vector(ab, "DP",
 190					 &msi_data_count, &msi_data_start,
 191					 &msi_irq_start);
 192	if (ret)
 193		return;
 194
 195	msi_group_number = ath11k_dp_srng_calculate_msi_group(ab, type,
 196							      ring_num);
 197	if (msi_group_number < 0) {
 198		ath11k_dbg(ab, ATH11K_DBG_PCI,
 199			   "ring not part of an ext_group; ring_type: %d,ring_num %d",
 200			   type, ring_num);
 201		ring_params->msi_addr = 0;
 202		ring_params->msi_data = 0;
 203		return;
 204	}
 205
 206	if (msi_group_number > msi_data_count) {
 207		ath11k_dbg(ab, ATH11K_DBG_PCI,
 208			   "multiple msi_groups share one msi, msi_group_num %d",
 209			   msi_group_number);
 210	}
 211
 212	ath11k_get_msi_address(ab, &addr_lo, &addr_hi);
 213
 214	ring_params->msi_addr = addr_lo;
 215	ring_params->msi_addr |= (dma_addr_t)(((uint64_t)addr_hi) << 32);
 216	ring_params->msi_data = (msi_group_number % msi_data_count)
 217		+ msi_data_start;
 218	ring_params->flags |= HAL_SRNG_FLAGS_MSI_INTR;
 219}
 220
 221int ath11k_dp_srng_setup(struct ath11k_base *ab, struct dp_srng *ring,
 222			 enum hal_ring_type type, int ring_num,
 223			 int mac_id, int num_entries)
 224{
 225	struct hal_srng_params params = { 0 };
 226	int entry_sz = ath11k_hal_srng_get_entrysize(ab, type);
 227	int max_entries = ath11k_hal_srng_get_max_entries(ab, type);
 228	int ret;
 229	bool cached = false;
 230
 231	if (max_entries < 0 || entry_sz < 0)
 232		return -EINVAL;
 233
 234	if (num_entries > max_entries)
 235		num_entries = max_entries;
 236
 237	ring->size = (num_entries * entry_sz) + HAL_RING_BASE_ALIGN - 1;
 238
 239	if (ab->hw_params.alloc_cacheable_memory) {
 240		/* Allocate the reo dst and tx completion rings from cacheable memory */
 241		switch (type) {
 242		case HAL_REO_DST:
 243		case HAL_WBM2SW_RELEASE:
 244			cached = true;
 245			break;
 246		default:
 247			cached = false;
 248		}
 249
 250		if (cached) {
 251			ring->vaddr_unaligned = kzalloc(ring->size, GFP_KERNEL);
 252			ring->paddr_unaligned = virt_to_phys(ring->vaddr_unaligned);
 
 
 
 
 
 
 
 
 
 
 
 253		}
 254	}
 255
 256	if (!cached)
 257		ring->vaddr_unaligned = dma_alloc_coherent(ab->dev, ring->size,
 258							   &ring->paddr_unaligned,
 259							   GFP_KERNEL);
 260
 261	if (!ring->vaddr_unaligned)
 262		return -ENOMEM;
 263
 264	ring->vaddr = PTR_ALIGN(ring->vaddr_unaligned, HAL_RING_BASE_ALIGN);
 265	ring->paddr = ring->paddr_unaligned + ((unsigned long)ring->vaddr -
 266		      (unsigned long)ring->vaddr_unaligned);
 267
 268	params.ring_base_vaddr = ring->vaddr;
 269	params.ring_base_paddr = ring->paddr;
 270	params.num_entries = num_entries;
 271	ath11k_dp_srng_msi_setup(ab, &params, type, ring_num + mac_id);
 272
 273	switch (type) {
 274	case HAL_REO_DST:
 275		params.intr_batch_cntr_thres_entries =
 276					HAL_SRNG_INT_BATCH_THRESHOLD_RX;
 277		params.intr_timer_thres_us = HAL_SRNG_INT_TIMER_THRESHOLD_RX;
 278		break;
 279	case HAL_RXDMA_BUF:
 280	case HAL_RXDMA_MONITOR_BUF:
 281	case HAL_RXDMA_MONITOR_STATUS:
 282		params.low_threshold = num_entries >> 3;
 283		params.flags |= HAL_SRNG_FLAGS_LOW_THRESH_INTR_EN;
 284		params.intr_batch_cntr_thres_entries = 0;
 285		params.intr_timer_thres_us = HAL_SRNG_INT_TIMER_THRESHOLD_RX;
 286		break;
 287	case HAL_WBM2SW_RELEASE:
 288		if (ring_num < 3) {
 289			params.intr_batch_cntr_thres_entries =
 290					HAL_SRNG_INT_BATCH_THRESHOLD_TX;
 291			params.intr_timer_thres_us =
 292					HAL_SRNG_INT_TIMER_THRESHOLD_TX;
 293			break;
 294		}
 295		/* follow through when ring_num >= 3 */
 296		fallthrough;
 297	case HAL_REO_EXCEPTION:
 298	case HAL_REO_REINJECT:
 299	case HAL_REO_CMD:
 300	case HAL_REO_STATUS:
 301	case HAL_TCL_DATA:
 302	case HAL_TCL_CMD:
 303	case HAL_TCL_STATUS:
 304	case HAL_WBM_IDLE_LINK:
 305	case HAL_SW2WBM_RELEASE:
 306	case HAL_RXDMA_DST:
 307	case HAL_RXDMA_MONITOR_DST:
 308	case HAL_RXDMA_MONITOR_DESC:
 309		params.intr_batch_cntr_thres_entries =
 310					HAL_SRNG_INT_BATCH_THRESHOLD_OTHER;
 311		params.intr_timer_thres_us = HAL_SRNG_INT_TIMER_THRESHOLD_OTHER;
 312		break;
 313	case HAL_RXDMA_DIR_BUF:
 314		break;
 315	default:
 316		ath11k_warn(ab, "Not a valid ring type in dp :%d\n", type);
 317		return -EINVAL;
 318	}
 319
 320	if (cached) {
 321		params.flags |= HAL_SRNG_FLAGS_CACHED;
 322		ring->cached = 1;
 323	}
 324
 325	ret = ath11k_hal_srng_setup(ab, type, ring_num, mac_id, &params);
 326	if (ret < 0) {
 327		ath11k_warn(ab, "failed to setup srng: %d ring_id %d\n",
 328			    ret, ring_num);
 329		return ret;
 330	}
 331
 332	ring->ring_id = ret;
 333
 334	return 0;
 335}
 336
 337void ath11k_dp_stop_shadow_timers(struct ath11k_base *ab)
 338{
 339	int i;
 340
 341	if (!ab->hw_params.supports_shadow_regs)
 342		return;
 343
 344	for (i = 0; i < ab->hw_params.max_tx_ring; i++)
 345		ath11k_dp_shadow_stop_timer(ab, &ab->dp.tx_ring_timer[i]);
 346
 347	ath11k_dp_shadow_stop_timer(ab, &ab->dp.reo_cmd_timer);
 348}
 349
 350static void ath11k_dp_srng_common_cleanup(struct ath11k_base *ab)
 351{
 352	struct ath11k_dp *dp = &ab->dp;
 353	int i;
 354
 355	ath11k_dp_stop_shadow_timers(ab);
 356	ath11k_dp_srng_cleanup(ab, &dp->wbm_desc_rel_ring);
 357	ath11k_dp_srng_cleanup(ab, &dp->tcl_cmd_ring);
 358	ath11k_dp_srng_cleanup(ab, &dp->tcl_status_ring);
 359	for (i = 0; i < ab->hw_params.max_tx_ring; i++) {
 360		ath11k_dp_srng_cleanup(ab, &dp->tx_ring[i].tcl_data_ring);
 361		ath11k_dp_srng_cleanup(ab, &dp->tx_ring[i].tcl_comp_ring);
 362	}
 363	ath11k_dp_srng_cleanup(ab, &dp->reo_reinject_ring);
 364	ath11k_dp_srng_cleanup(ab, &dp->rx_rel_ring);
 365	ath11k_dp_srng_cleanup(ab, &dp->reo_except_ring);
 366	ath11k_dp_srng_cleanup(ab, &dp->reo_cmd_ring);
 367	ath11k_dp_srng_cleanup(ab, &dp->reo_status_ring);
 368}
 369
 370static int ath11k_dp_srng_common_setup(struct ath11k_base *ab)
 371{
 372	struct ath11k_dp *dp = &ab->dp;
 373	struct hal_srng *srng;
 374	int i, ret;
 375	u8 tcl_num, wbm_num;
 376
 377	ret = ath11k_dp_srng_setup(ab, &dp->wbm_desc_rel_ring,
 378				   HAL_SW2WBM_RELEASE, 0, 0,
 379				   DP_WBM_RELEASE_RING_SIZE);
 380	if (ret) {
 381		ath11k_warn(ab, "failed to set up wbm2sw_release ring :%d\n",
 382			    ret);
 383		goto err;
 384	}
 385
 386	ret = ath11k_dp_srng_setup(ab, &dp->tcl_cmd_ring, HAL_TCL_CMD, 0, 0,
 387				   DP_TCL_CMD_RING_SIZE);
 388	if (ret) {
 389		ath11k_warn(ab, "failed to set up tcl_cmd ring :%d\n", ret);
 390		goto err;
 391	}
 392
 393	ret = ath11k_dp_srng_setup(ab, &dp->tcl_status_ring, HAL_TCL_STATUS,
 394				   0, 0, DP_TCL_STATUS_RING_SIZE);
 395	if (ret) {
 396		ath11k_warn(ab, "failed to set up tcl_status ring :%d\n", ret);
 397		goto err;
 398	}
 399
 400	for (i = 0; i < ab->hw_params.max_tx_ring; i++) {
 401		tcl_num = ab->hw_params.hal_params->tcl2wbm_rbm_map[i].tcl_ring_num;
 402		wbm_num = ab->hw_params.hal_params->tcl2wbm_rbm_map[i].wbm_ring_num;
 403
 404		ret = ath11k_dp_srng_setup(ab, &dp->tx_ring[i].tcl_data_ring,
 405					   HAL_TCL_DATA, tcl_num, 0,
 406					   ab->hw_params.tx_ring_size);
 407		if (ret) {
 408			ath11k_warn(ab, "failed to set up tcl_data ring (%d) :%d\n",
 409				    i, ret);
 410			goto err;
 411		}
 412
 413		ret = ath11k_dp_srng_setup(ab, &dp->tx_ring[i].tcl_comp_ring,
 414					   HAL_WBM2SW_RELEASE, wbm_num, 0,
 415					   DP_TX_COMP_RING_SIZE);
 416		if (ret) {
 417			ath11k_warn(ab, "failed to set up tcl_comp ring (%d) :%d\n",
 418				    i, ret);
 419			goto err;
 420		}
 421
 422		srng = &ab->hal.srng_list[dp->tx_ring[i].tcl_data_ring.ring_id];
 423		ath11k_hal_tx_init_data_ring(ab, srng);
 424
 425		ath11k_dp_shadow_init_timer(ab, &dp->tx_ring_timer[i],
 426					    ATH11K_SHADOW_DP_TIMER_INTERVAL,
 427					    dp->tx_ring[i].tcl_data_ring.ring_id);
 428	}
 429
 430	ret = ath11k_dp_srng_setup(ab, &dp->reo_reinject_ring, HAL_REO_REINJECT,
 431				   0, 0, DP_REO_REINJECT_RING_SIZE);
 432	if (ret) {
 433		ath11k_warn(ab, "failed to set up reo_reinject ring :%d\n",
 434			    ret);
 435		goto err;
 436	}
 437
 438	ret = ath11k_dp_srng_setup(ab, &dp->rx_rel_ring, HAL_WBM2SW_RELEASE,
 439				   DP_RX_RELEASE_RING_NUM, 0, DP_RX_RELEASE_RING_SIZE);
 440	if (ret) {
 441		ath11k_warn(ab, "failed to set up rx_rel ring :%d\n", ret);
 442		goto err;
 443	}
 444
 445	ret = ath11k_dp_srng_setup(ab, &dp->reo_except_ring, HAL_REO_EXCEPTION,
 446				   0, 0, DP_REO_EXCEPTION_RING_SIZE);
 447	if (ret) {
 448		ath11k_warn(ab, "failed to set up reo_exception ring :%d\n",
 449			    ret);
 450		goto err;
 451	}
 452
 453	ret = ath11k_dp_srng_setup(ab, &dp->reo_cmd_ring, HAL_REO_CMD,
 454				   0, 0, DP_REO_CMD_RING_SIZE);
 455	if (ret) {
 456		ath11k_warn(ab, "failed to set up reo_cmd ring :%d\n", ret);
 457		goto err;
 458	}
 459
 460	srng = &ab->hal.srng_list[dp->reo_cmd_ring.ring_id];
 461	ath11k_hal_reo_init_cmd_ring(ab, srng);
 462
 463	ath11k_dp_shadow_init_timer(ab, &dp->reo_cmd_timer,
 464				    ATH11K_SHADOW_CTRL_TIMER_INTERVAL,
 465				    dp->reo_cmd_ring.ring_id);
 466
 467	ret = ath11k_dp_srng_setup(ab, &dp->reo_status_ring, HAL_REO_STATUS,
 468				   0, 0, DP_REO_STATUS_RING_SIZE);
 469	if (ret) {
 470		ath11k_warn(ab, "failed to set up reo_status ring :%d\n", ret);
 471		goto err;
 472	}
 473
 474	/* When hash based routing of rx packet is enabled, 32 entries to map
 475	 * the hash values to the ring will be configured.
 476	 */
 477	ab->hw_params.hw_ops->reo_setup(ab);
 478
 479	return 0;
 480
 481err:
 482	ath11k_dp_srng_common_cleanup(ab);
 483
 484	return ret;
 485}
 486
 487static void ath11k_dp_scatter_idle_link_desc_cleanup(struct ath11k_base *ab)
 488{
 489	struct ath11k_dp *dp = &ab->dp;
 490	struct hal_wbm_idle_scatter_list *slist = dp->scatter_list;
 491	int i;
 492
 493	for (i = 0; i < DP_IDLE_SCATTER_BUFS_MAX; i++) {
 494		if (!slist[i].vaddr)
 495			continue;
 496
 497		dma_free_coherent(ab->dev, HAL_WBM_IDLE_SCATTER_BUF_SIZE_MAX,
 498				  slist[i].vaddr, slist[i].paddr);
 499		slist[i].vaddr = NULL;
 500	}
 501}
 502
 503static int ath11k_dp_scatter_idle_link_desc_setup(struct ath11k_base *ab,
 504						  int size,
 505						  u32 n_link_desc_bank,
 506						  u32 n_link_desc,
 507						  u32 last_bank_sz)
 508{
 509	struct ath11k_dp *dp = &ab->dp;
 510	struct dp_link_desc_bank *link_desc_banks = dp->link_desc_banks;
 511	struct hal_wbm_idle_scatter_list *slist = dp->scatter_list;
 512	u32 n_entries_per_buf;
 513	int num_scatter_buf, scatter_idx;
 514	struct hal_wbm_link_desc *scatter_buf;
 515	int align_bytes, n_entries;
 516	dma_addr_t paddr;
 517	int rem_entries;
 518	int i;
 519	int ret = 0;
 520	u32 end_offset;
 521
 522	n_entries_per_buf = HAL_WBM_IDLE_SCATTER_BUF_SIZE /
 523		ath11k_hal_srng_get_entrysize(ab, HAL_WBM_IDLE_LINK);
 524	num_scatter_buf = DIV_ROUND_UP(size, HAL_WBM_IDLE_SCATTER_BUF_SIZE);
 525
 526	if (num_scatter_buf > DP_IDLE_SCATTER_BUFS_MAX)
 527		return -EINVAL;
 528
 529	for (i = 0; i < num_scatter_buf; i++) {
 530		slist[i].vaddr = dma_alloc_coherent(ab->dev,
 531						    HAL_WBM_IDLE_SCATTER_BUF_SIZE_MAX,
 532						    &slist[i].paddr, GFP_KERNEL);
 533		if (!slist[i].vaddr) {
 534			ret = -ENOMEM;
 535			goto err;
 536		}
 537	}
 538
 539	scatter_idx = 0;
 540	scatter_buf = slist[scatter_idx].vaddr;
 541	rem_entries = n_entries_per_buf;
 542
 543	for (i = 0; i < n_link_desc_bank; i++) {
 544		align_bytes = link_desc_banks[i].vaddr -
 545			      link_desc_banks[i].vaddr_unaligned;
 546		n_entries = (DP_LINK_DESC_ALLOC_SIZE_THRESH - align_bytes) /
 547			     HAL_LINK_DESC_SIZE;
 548		paddr = link_desc_banks[i].paddr;
 549		while (n_entries) {
 550			ath11k_hal_set_link_desc_addr(scatter_buf, i, paddr);
 551			n_entries--;
 552			paddr += HAL_LINK_DESC_SIZE;
 553			if (rem_entries) {
 554				rem_entries--;
 555				scatter_buf++;
 556				continue;
 557			}
 558
 559			rem_entries = n_entries_per_buf;
 560			scatter_idx++;
 561			scatter_buf = slist[scatter_idx].vaddr;
 562		}
 563	}
 564
 565	end_offset = (scatter_buf - slist[scatter_idx].vaddr) *
 566		     sizeof(struct hal_wbm_link_desc);
 567	ath11k_hal_setup_link_idle_list(ab, slist, num_scatter_buf,
 568					n_link_desc, end_offset);
 569
 570	return 0;
 571
 572err:
 573	ath11k_dp_scatter_idle_link_desc_cleanup(ab);
 574
 575	return ret;
 576}
 577
 578static void
 579ath11k_dp_link_desc_bank_free(struct ath11k_base *ab,
 580			      struct dp_link_desc_bank *link_desc_banks)
 581{
 582	int i;
 583
 584	for (i = 0; i < DP_LINK_DESC_BANKS_MAX; i++) {
 585		if (link_desc_banks[i].vaddr_unaligned) {
 586			dma_free_coherent(ab->dev,
 587					  link_desc_banks[i].size,
 588					  link_desc_banks[i].vaddr_unaligned,
 589					  link_desc_banks[i].paddr_unaligned);
 590			link_desc_banks[i].vaddr_unaligned = NULL;
 591		}
 592	}
 593}
 594
 595static int ath11k_dp_link_desc_bank_alloc(struct ath11k_base *ab,
 596					  struct dp_link_desc_bank *desc_bank,
 597					  int n_link_desc_bank,
 598					  int last_bank_sz)
 599{
 600	struct ath11k_dp *dp = &ab->dp;
 601	int i;
 602	int ret = 0;
 603	int desc_sz = DP_LINK_DESC_ALLOC_SIZE_THRESH;
 604
 605	for (i = 0; i < n_link_desc_bank; i++) {
 606		if (i == (n_link_desc_bank - 1) && last_bank_sz)
 607			desc_sz = last_bank_sz;
 608
 609		desc_bank[i].vaddr_unaligned =
 610					dma_alloc_coherent(ab->dev, desc_sz,
 611							   &desc_bank[i].paddr_unaligned,
 612							   GFP_KERNEL);
 613		if (!desc_bank[i].vaddr_unaligned) {
 614			ret = -ENOMEM;
 615			goto err;
 616		}
 617
 618		desc_bank[i].vaddr = PTR_ALIGN(desc_bank[i].vaddr_unaligned,
 619					       HAL_LINK_DESC_ALIGN);
 620		desc_bank[i].paddr = desc_bank[i].paddr_unaligned +
 621				     ((unsigned long)desc_bank[i].vaddr -
 622				      (unsigned long)desc_bank[i].vaddr_unaligned);
 623		desc_bank[i].size = desc_sz;
 624	}
 625
 626	return 0;
 627
 628err:
 629	ath11k_dp_link_desc_bank_free(ab, dp->link_desc_banks);
 630
 631	return ret;
 632}
 633
 634void ath11k_dp_link_desc_cleanup(struct ath11k_base *ab,
 635				 struct dp_link_desc_bank *desc_bank,
 636				 u32 ring_type, struct dp_srng *ring)
 637{
 638	ath11k_dp_link_desc_bank_free(ab, desc_bank);
 639
 640	if (ring_type != HAL_RXDMA_MONITOR_DESC) {
 641		ath11k_dp_srng_cleanup(ab, ring);
 642		ath11k_dp_scatter_idle_link_desc_cleanup(ab);
 643	}
 644}
 645
 646static int ath11k_wbm_idle_ring_setup(struct ath11k_base *ab, u32 *n_link_desc)
 647{
 648	struct ath11k_dp *dp = &ab->dp;
 649	u32 n_mpdu_link_desc, n_mpdu_queue_desc;
 650	u32 n_tx_msdu_link_desc, n_rx_msdu_link_desc;
 651	int ret = 0;
 652
 653	n_mpdu_link_desc = (DP_NUM_TIDS_MAX * DP_AVG_MPDUS_PER_TID_MAX) /
 654			   HAL_NUM_MPDUS_PER_LINK_DESC;
 655
 656	n_mpdu_queue_desc = n_mpdu_link_desc /
 657			    HAL_NUM_MPDU_LINKS_PER_QUEUE_DESC;
 658
 659	n_tx_msdu_link_desc = (DP_NUM_TIDS_MAX * DP_AVG_FLOWS_PER_TID *
 660			       DP_AVG_MSDUS_PER_FLOW) /
 661			      HAL_NUM_TX_MSDUS_PER_LINK_DESC;
 662
 663	n_rx_msdu_link_desc = (DP_NUM_TIDS_MAX * DP_AVG_MPDUS_PER_TID_MAX *
 664			       DP_AVG_MSDUS_PER_MPDU) /
 665			      HAL_NUM_RX_MSDUS_PER_LINK_DESC;
 666
 667	*n_link_desc = n_mpdu_link_desc + n_mpdu_queue_desc +
 668		      n_tx_msdu_link_desc + n_rx_msdu_link_desc;
 669
 670	if (*n_link_desc & (*n_link_desc - 1))
 671		*n_link_desc = 1 << fls(*n_link_desc);
 672
 673	ret = ath11k_dp_srng_setup(ab, &dp->wbm_idle_ring,
 674				   HAL_WBM_IDLE_LINK, 0, 0, *n_link_desc);
 675	if (ret) {
 676		ath11k_warn(ab, "failed to setup wbm_idle_ring: %d\n", ret);
 677		return ret;
 678	}
 679	return ret;
 680}
 681
 682int ath11k_dp_link_desc_setup(struct ath11k_base *ab,
 683			      struct dp_link_desc_bank *link_desc_banks,
 684			      u32 ring_type, struct hal_srng *srng,
 685			      u32 n_link_desc)
 686{
 687	u32 tot_mem_sz;
 688	u32 n_link_desc_bank, last_bank_sz;
 689	u32 entry_sz, align_bytes, n_entries;
 690	u32 paddr;
 691	u32 *desc;
 692	int i, ret;
 693
 694	tot_mem_sz = n_link_desc * HAL_LINK_DESC_SIZE;
 695	tot_mem_sz += HAL_LINK_DESC_ALIGN;
 696
 697	if (tot_mem_sz <= DP_LINK_DESC_ALLOC_SIZE_THRESH) {
 698		n_link_desc_bank = 1;
 699		last_bank_sz = tot_mem_sz;
 700	} else {
 701		n_link_desc_bank = tot_mem_sz /
 702				   (DP_LINK_DESC_ALLOC_SIZE_THRESH -
 703				    HAL_LINK_DESC_ALIGN);
 704		last_bank_sz = tot_mem_sz %
 705			       (DP_LINK_DESC_ALLOC_SIZE_THRESH -
 706				HAL_LINK_DESC_ALIGN);
 707
 708		if (last_bank_sz)
 709			n_link_desc_bank += 1;
 710	}
 711
 712	if (n_link_desc_bank > DP_LINK_DESC_BANKS_MAX)
 713		return -EINVAL;
 714
 715	ret = ath11k_dp_link_desc_bank_alloc(ab, link_desc_banks,
 716					     n_link_desc_bank, last_bank_sz);
 717	if (ret)
 718		return ret;
 719
 720	/* Setup link desc idle list for HW internal usage */
 721	entry_sz = ath11k_hal_srng_get_entrysize(ab, ring_type);
 722	tot_mem_sz = entry_sz * n_link_desc;
 723
 724	/* Setup scatter desc list when the total memory requirement is more */
 725	if (tot_mem_sz > DP_LINK_DESC_ALLOC_SIZE_THRESH &&
 726	    ring_type != HAL_RXDMA_MONITOR_DESC) {
 727		ret = ath11k_dp_scatter_idle_link_desc_setup(ab, tot_mem_sz,
 728							     n_link_desc_bank,
 729							     n_link_desc,
 730							     last_bank_sz);
 731		if (ret) {
 732			ath11k_warn(ab, "failed to setup scatting idle list descriptor :%d\n",
 733				    ret);
 734			goto fail_desc_bank_free;
 735		}
 736
 737		return 0;
 738	}
 739
 740	spin_lock_bh(&srng->lock);
 741
 742	ath11k_hal_srng_access_begin(ab, srng);
 743
 744	for (i = 0; i < n_link_desc_bank; i++) {
 745		align_bytes = link_desc_banks[i].vaddr -
 746			      link_desc_banks[i].vaddr_unaligned;
 747		n_entries = (link_desc_banks[i].size - align_bytes) /
 748			    HAL_LINK_DESC_SIZE;
 749		paddr = link_desc_banks[i].paddr;
 750		while (n_entries &&
 751		       (desc = ath11k_hal_srng_src_get_next_entry(ab, srng))) {
 752			ath11k_hal_set_link_desc_addr((struct hal_wbm_link_desc *)desc,
 753						      i, paddr);
 754			n_entries--;
 755			paddr += HAL_LINK_DESC_SIZE;
 756		}
 757	}
 758
 759	ath11k_hal_srng_access_end(ab, srng);
 760
 761	spin_unlock_bh(&srng->lock);
 762
 763	return 0;
 764
 765fail_desc_bank_free:
 766	ath11k_dp_link_desc_bank_free(ab, link_desc_banks);
 767
 768	return ret;
 769}
 770
 771int ath11k_dp_service_srng(struct ath11k_base *ab,
 772			   struct ath11k_ext_irq_grp *irq_grp,
 773			   int budget)
 774{
 775	struct napi_struct *napi = &irq_grp->napi;
 776	const struct ath11k_hw_hal_params *hal_params;
 777	int grp_id = irq_grp->grp_id;
 778	int work_done = 0;
 779	int i, j;
 780	int tot_work_done = 0;
 781
 782	for (i = 0; i < ab->hw_params.max_tx_ring; i++) {
 783		if (BIT(ab->hw_params.hal_params->tcl2wbm_rbm_map[i].wbm_ring_num) &
 784		    ab->hw_params.ring_mask->tx[grp_id])
 785			ath11k_dp_tx_completion_handler(ab, i);
 786	}
 787
 788	if (ab->hw_params.ring_mask->rx_err[grp_id]) {
 789		work_done = ath11k_dp_process_rx_err(ab, napi, budget);
 790		budget -= work_done;
 791		tot_work_done += work_done;
 792		if (budget <= 0)
 793			goto done;
 794	}
 795
 796	if (ab->hw_params.ring_mask->rx_wbm_rel[grp_id]) {
 797		work_done = ath11k_dp_rx_process_wbm_err(ab,
 798							 napi,
 799							 budget);
 800		budget -= work_done;
 801		tot_work_done += work_done;
 802
 803		if (budget <= 0)
 804			goto done;
 805	}
 806
 807	if (ab->hw_params.ring_mask->rx[grp_id]) {
 808		i =  fls(ab->hw_params.ring_mask->rx[grp_id]) - 1;
 809		work_done = ath11k_dp_process_rx(ab, i, napi,
 810						 budget);
 811		budget -= work_done;
 812		tot_work_done += work_done;
 813		if (budget <= 0)
 814			goto done;
 815	}
 816
 817	if (ab->hw_params.ring_mask->rx_mon_status[grp_id]) {
 818		for (i = 0; i < ab->num_radios; i++) {
 819			for (j = 0; j < ab->hw_params.num_rxmda_per_pdev; j++) {
 820				int id = i * ab->hw_params.num_rxmda_per_pdev + j;
 821
 822				if (ab->hw_params.ring_mask->rx_mon_status[grp_id] &
 823					BIT(id)) {
 824					work_done =
 825					ath11k_dp_rx_process_mon_rings(ab,
 826								       id,
 827								       napi, budget);
 828					budget -= work_done;
 829					tot_work_done += work_done;
 830
 831					if (budget <= 0)
 832						goto done;
 833				}
 834			}
 835		}
 836	}
 837
 838	if (ab->hw_params.ring_mask->reo_status[grp_id])
 839		ath11k_dp_process_reo_status(ab);
 840
 841	for (i = 0; i < ab->num_radios; i++) {
 842		for (j = 0; j < ab->hw_params.num_rxmda_per_pdev; j++) {
 843			int id = i * ab->hw_params.num_rxmda_per_pdev + j;
 844
 845			if (ab->hw_params.ring_mask->rxdma2host[grp_id] & BIT(id)) {
 846				work_done = ath11k_dp_process_rxdma_err(ab, id, budget);
 847				budget -= work_done;
 848				tot_work_done += work_done;
 849			}
 850
 851			if (budget <= 0)
 852				goto done;
 853
 854			if (ab->hw_params.ring_mask->host2rxdma[grp_id] & BIT(id)) {
 855				struct ath11k *ar = ath11k_ab_to_ar(ab, id);
 856				struct ath11k_pdev_dp *dp = &ar->dp;
 857				struct dp_rxdma_ring *rx_ring = &dp->rx_refill_buf_ring;
 858
 859				hal_params = ab->hw_params.hal_params;
 860				ath11k_dp_rxbufs_replenish(ab, id, rx_ring, 0,
 861							   hal_params->rx_buf_rbm);
 862			}
 863		}
 864	}
 865	/* TODO: Implement handler for other interrupts */
 866
 867done:
 868	return tot_work_done;
 869}
 870EXPORT_SYMBOL(ath11k_dp_service_srng);
 871
 872void ath11k_dp_pdev_free(struct ath11k_base *ab)
 873{
 874	struct ath11k *ar;
 875	int i;
 876
 877	del_timer_sync(&ab->mon_reap_timer);
 878
 879	for (i = 0; i < ab->num_radios; i++) {
 880		ar = ab->pdevs[i].ar;
 881		ath11k_dp_rx_pdev_free(ab, i);
 882		ath11k_debugfs_unregister(ar);
 883		ath11k_dp_rx_pdev_mon_detach(ar);
 884	}
 885}
 886
 887void ath11k_dp_pdev_pre_alloc(struct ath11k_base *ab)
 888{
 889	struct ath11k *ar;
 890	struct ath11k_pdev_dp *dp;
 891	int i;
 892	int j;
 893
 894	for (i = 0; i <  ab->num_radios; i++) {
 895		ar = ab->pdevs[i].ar;
 896		dp = &ar->dp;
 897		dp->mac_id = i;
 898		idr_init(&dp->rx_refill_buf_ring.bufs_idr);
 899		spin_lock_init(&dp->rx_refill_buf_ring.idr_lock);
 900		atomic_set(&dp->num_tx_pending, 0);
 901		init_waitqueue_head(&dp->tx_empty_waitq);
 902		for (j = 0; j < ab->hw_params.num_rxmda_per_pdev; j++) {
 903			idr_init(&dp->rx_mon_status_refill_ring[j].bufs_idr);
 904			spin_lock_init(&dp->rx_mon_status_refill_ring[j].idr_lock);
 905		}
 906		idr_init(&dp->rxdma_mon_buf_ring.bufs_idr);
 907		spin_lock_init(&dp->rxdma_mon_buf_ring.idr_lock);
 908	}
 909}
 910
 911int ath11k_dp_pdev_alloc(struct ath11k_base *ab)
 912{
 913	struct ath11k *ar;
 914	int ret;
 915	int i;
 916
 917	/* TODO:Per-pdev rx ring unlike tx ring which is mapped to different AC's */
 918	for (i = 0; i < ab->num_radios; i++) {
 919		ar = ab->pdevs[i].ar;
 920		ret = ath11k_dp_rx_pdev_alloc(ab, i);
 921		if (ret) {
 922			ath11k_warn(ab, "failed to allocate pdev rx for pdev_id :%d\n",
 923				    i);
 924			goto err;
 925		}
 926		ret = ath11k_dp_rx_pdev_mon_attach(ar);
 927		if (ret) {
 928			ath11k_warn(ab, "failed to initialize mon pdev %d\n",
 929				    i);
 930			goto err;
 931		}
 932	}
 933
 934	return 0;
 935
 936err:
 937	ath11k_dp_pdev_free(ab);
 938
 939	return ret;
 940}
 941
 942int ath11k_dp_htt_connect(struct ath11k_dp *dp)
 943{
 944	struct ath11k_htc_svc_conn_req conn_req;
 945	struct ath11k_htc_svc_conn_resp conn_resp;
 946	int status;
 947
 948	memset(&conn_req, 0, sizeof(conn_req));
 949	memset(&conn_resp, 0, sizeof(conn_resp));
 950
 951	conn_req.ep_ops.ep_tx_complete = ath11k_dp_htt_htc_tx_complete;
 952	conn_req.ep_ops.ep_rx_complete = ath11k_dp_htt_htc_t2h_msg_handler;
 953
 954	/* connect to control service */
 955	conn_req.service_id = ATH11K_HTC_SVC_ID_HTT_DATA_MSG;
 956
 957	status = ath11k_htc_connect_service(&dp->ab->htc, &conn_req,
 958					    &conn_resp);
 959
 960	if (status)
 961		return status;
 962
 963	dp->eid = conn_resp.eid;
 964
 965	return 0;
 966}
 967
 968static void ath11k_dp_update_vdev_search(struct ath11k_vif *arvif)
 969{
 970	 /* When v2_map_support is true:for STA mode, enable address
 971	  * search index, tcl uses ast_hash value in the descriptor.
 972	  * When v2_map_support is false: for STA mode, don't enable
 973	  * address search index.
 974	  */
 975	switch (arvif->vdev_type) {
 976	case WMI_VDEV_TYPE_STA:
 977		if (arvif->ar->ab->hw_params.htt_peer_map_v2) {
 978			arvif->hal_addr_search_flags = HAL_TX_ADDRX_EN;
 979			arvif->search_type = HAL_TX_ADDR_SEARCH_INDEX;
 980		} else {
 981			arvif->hal_addr_search_flags = HAL_TX_ADDRY_EN;
 982			arvif->search_type = HAL_TX_ADDR_SEARCH_DEFAULT;
 983		}
 984		break;
 985	case WMI_VDEV_TYPE_AP:
 986	case WMI_VDEV_TYPE_IBSS:
 987		arvif->hal_addr_search_flags = HAL_TX_ADDRX_EN;
 988		arvif->search_type = HAL_TX_ADDR_SEARCH_DEFAULT;
 989		break;
 990	case WMI_VDEV_TYPE_MONITOR:
 991	default:
 992		return;
 993	}
 994}
 995
 996void ath11k_dp_vdev_tx_attach(struct ath11k *ar, struct ath11k_vif *arvif)
 997{
 998	arvif->tcl_metadata |= FIELD_PREP(HTT_TCL_META_DATA_TYPE, 1) |
 999			       FIELD_PREP(HTT_TCL_META_DATA_VDEV_ID,
1000					  arvif->vdev_id) |
1001			       FIELD_PREP(HTT_TCL_META_DATA_PDEV_ID,
1002					  ar->pdev->pdev_id);
1003
1004	/* set HTT extension valid bit to 0 by default */
1005	arvif->tcl_metadata &= ~HTT_TCL_META_DATA_VALID_HTT;
1006
1007	ath11k_dp_update_vdev_search(arvif);
1008}
1009
1010static int ath11k_dp_tx_pending_cleanup(int buf_id, void *skb, void *ctx)
1011{
1012	struct ath11k_base *ab = ctx;
1013	struct sk_buff *msdu = skb;
1014
1015	dma_unmap_single(ab->dev, ATH11K_SKB_CB(msdu)->paddr, msdu->len,
1016			 DMA_TO_DEVICE);
1017
1018	dev_kfree_skb_any(msdu);
1019
1020	return 0;
1021}
1022
1023void ath11k_dp_free(struct ath11k_base *ab)
1024{
1025	struct ath11k_dp *dp = &ab->dp;
1026	int i;
1027
1028	ath11k_dp_link_desc_cleanup(ab, dp->link_desc_banks,
1029				    HAL_WBM_IDLE_LINK, &dp->wbm_idle_ring);
1030
1031	ath11k_dp_srng_common_cleanup(ab);
1032
1033	ath11k_dp_reo_cmd_list_cleanup(ab);
1034
1035	for (i = 0; i < ab->hw_params.max_tx_ring; i++) {
1036		spin_lock_bh(&dp->tx_ring[i].tx_idr_lock);
1037		idr_for_each(&dp->tx_ring[i].txbuf_idr,
1038			     ath11k_dp_tx_pending_cleanup, ab);
1039		idr_destroy(&dp->tx_ring[i].txbuf_idr);
1040		spin_unlock_bh(&dp->tx_ring[i].tx_idr_lock);
1041		kfree(dp->tx_ring[i].tx_status);
1042	}
1043
1044	/* Deinit any SOC level resource */
1045}
1046
1047int ath11k_dp_alloc(struct ath11k_base *ab)
1048{
1049	struct ath11k_dp *dp = &ab->dp;
1050	struct hal_srng *srng = NULL;
1051	size_t size = 0;
1052	u32 n_link_desc = 0;
1053	int ret;
1054	int i;
1055
1056	dp->ab = ab;
1057
1058	INIT_LIST_HEAD(&dp->reo_cmd_list);
1059	INIT_LIST_HEAD(&dp->reo_cmd_cache_flush_list);
1060	INIT_LIST_HEAD(&dp->dp_full_mon_mpdu_list);
1061	spin_lock_init(&dp->reo_cmd_lock);
1062
1063	dp->reo_cmd_cache_flush_count = 0;
1064
1065	ret = ath11k_wbm_idle_ring_setup(ab, &n_link_desc);
1066	if (ret) {
1067		ath11k_warn(ab, "failed to setup wbm_idle_ring: %d\n", ret);
1068		return ret;
1069	}
1070
1071	srng = &ab->hal.srng_list[dp->wbm_idle_ring.ring_id];
1072
1073	ret = ath11k_dp_link_desc_setup(ab, dp->link_desc_banks,
1074					HAL_WBM_IDLE_LINK, srng, n_link_desc);
1075	if (ret) {
1076		ath11k_warn(ab, "failed to setup link desc: %d\n", ret);
1077		return ret;
1078	}
1079
1080	ret = ath11k_dp_srng_common_setup(ab);
1081	if (ret)
1082		goto fail_link_desc_cleanup;
1083
1084	size = sizeof(struct hal_wbm_release_ring) * DP_TX_COMP_RING_SIZE;
1085
1086	for (i = 0; i < ab->hw_params.max_tx_ring; i++) {
1087		idr_init(&dp->tx_ring[i].txbuf_idr);
1088		spin_lock_init(&dp->tx_ring[i].tx_idr_lock);
1089		dp->tx_ring[i].tcl_data_ring_id = i;
1090
1091		dp->tx_ring[i].tx_status_head = 0;
1092		dp->tx_ring[i].tx_status_tail = DP_TX_COMP_RING_SIZE - 1;
1093		dp->tx_ring[i].tx_status = kmalloc(size, GFP_KERNEL);
1094		if (!dp->tx_ring[i].tx_status) {
1095			ret = -ENOMEM;
1096			goto fail_cmn_srng_cleanup;
1097		}
1098	}
1099
1100	for (i = 0; i < HAL_DSCP_TID_MAP_TBL_NUM_ENTRIES_MAX; i++)
1101		ath11k_hal_tx_set_dscp_tid_map(ab, i);
1102
1103	/* Init any SOC level resource for DP */
1104
1105	return 0;
1106
1107fail_cmn_srng_cleanup:
1108	ath11k_dp_srng_common_cleanup(ab);
1109
1110fail_link_desc_cleanup:
1111	ath11k_dp_link_desc_cleanup(ab, dp->link_desc_banks,
1112				    HAL_WBM_IDLE_LINK, &dp->wbm_idle_ring);
1113
1114	return ret;
1115}
1116
1117static void ath11k_dp_shadow_timer_handler(struct timer_list *t)
1118{
1119	struct ath11k_hp_update_timer *update_timer = from_timer(update_timer,
1120								 t, timer);
1121	struct ath11k_base *ab = update_timer->ab;
1122	struct hal_srng	*srng = &ab->hal.srng_list[update_timer->ring_id];
1123
1124	spin_lock_bh(&srng->lock);
1125
1126	/* when the timer is fired, the handler checks whether there
1127	 * are new TX happened. The handler updates HP only when there
1128	 * are no TX operations during the timeout interval, and stop
1129	 * the timer. Timer will be started again when TX happens again.
1130	 */
1131	if (update_timer->timer_tx_num != update_timer->tx_num) {
1132		update_timer->timer_tx_num = update_timer->tx_num;
1133		mod_timer(&update_timer->timer, jiffies +
1134		  msecs_to_jiffies(update_timer->interval));
1135	} else {
1136		update_timer->started = false;
1137		ath11k_hal_srng_shadow_update_hp_tp(ab, srng);
1138	}
1139
1140	spin_unlock_bh(&srng->lock);
1141}
1142
1143void ath11k_dp_shadow_start_timer(struct ath11k_base *ab,
1144				  struct hal_srng *srng,
1145				  struct ath11k_hp_update_timer *update_timer)
1146{
1147	lockdep_assert_held(&srng->lock);
1148
1149	if (!ab->hw_params.supports_shadow_regs)
1150		return;
1151
1152	update_timer->tx_num++;
1153
1154	if (update_timer->started)
1155		return;
1156
1157	update_timer->started = true;
1158	update_timer->timer_tx_num = update_timer->tx_num;
1159	mod_timer(&update_timer->timer, jiffies +
1160		  msecs_to_jiffies(update_timer->interval));
1161}
1162
1163void ath11k_dp_shadow_stop_timer(struct ath11k_base *ab,
1164				 struct ath11k_hp_update_timer *update_timer)
1165{
1166	if (!ab->hw_params.supports_shadow_regs)
1167		return;
1168
1169	if (!update_timer->init)
1170		return;
1171
1172	del_timer_sync(&update_timer->timer);
1173}
1174
1175void ath11k_dp_shadow_init_timer(struct ath11k_base *ab,
1176				 struct ath11k_hp_update_timer *update_timer,
1177				 u32 interval, u32 ring_id)
1178{
1179	if (!ab->hw_params.supports_shadow_regs)
1180		return;
1181
1182	update_timer->tx_num = 0;
1183	update_timer->timer_tx_num = 0;
1184	update_timer->ab = ab;
1185	update_timer->ring_id = ring_id;
1186	update_timer->interval = interval;
1187	update_timer->init = true;
1188	timer_setup(&update_timer->timer,
1189		    ath11k_dp_shadow_timer_handler, 0);
1190}
v6.13.7
   1// SPDX-License-Identifier: BSD-3-Clause-Clear
   2/*
   3 * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
   4 * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
   5 */
   6
   7#include <crypto/hash.h>
   8#include "core.h"
   9#include "dp_tx.h"
  10#include "hal_tx.h"
  11#include "hif.h"
  12#include "debug.h"
  13#include "dp_rx.h"
  14#include "peer.h"
  15
  16static void ath11k_dp_htt_htc_tx_complete(struct ath11k_base *ab,
  17					  struct sk_buff *skb)
  18{
  19	dev_kfree_skb_any(skb);
  20}
  21
  22void ath11k_dp_peer_cleanup(struct ath11k *ar, int vdev_id, const u8 *addr)
  23{
  24	struct ath11k_base *ab = ar->ab;
  25	struct ath11k_peer *peer;
  26
  27	/* TODO: Any other peer specific DP cleanup */
  28
  29	spin_lock_bh(&ab->base_lock);
  30	peer = ath11k_peer_find(ab, vdev_id, addr);
  31	if (!peer) {
  32		ath11k_warn(ab, "failed to lookup peer %pM on vdev %d\n",
  33			    addr, vdev_id);
  34		spin_unlock_bh(&ab->base_lock);
  35		return;
  36	}
  37
  38	ath11k_peer_rx_tid_cleanup(ar, peer);
  39	peer->dp_setup_done = false;
  40	crypto_free_shash(peer->tfm_mmic);
  41	spin_unlock_bh(&ab->base_lock);
  42}
  43
  44int ath11k_dp_peer_setup(struct ath11k *ar, int vdev_id, const u8 *addr)
  45{
  46	struct ath11k_base *ab = ar->ab;
  47	struct ath11k_peer *peer;
  48	u32 reo_dest;
  49	int ret = 0, tid;
  50
  51	/* NOTE: reo_dest ring id starts from 1 unlike mac_id which starts from 0 */
  52	reo_dest = ar->dp.mac_id + 1;
  53	ret = ath11k_wmi_set_peer_param(ar, addr, vdev_id,
  54					WMI_PEER_SET_DEFAULT_ROUTING,
  55					DP_RX_HASH_ENABLE | (reo_dest << 1));
  56
  57	if (ret) {
  58		ath11k_warn(ab, "failed to set default routing %d peer :%pM vdev_id :%d\n",
  59			    ret, addr, vdev_id);
  60		return ret;
  61	}
  62
  63	for (tid = 0; tid <= IEEE80211_NUM_TIDS; tid++) {
  64		ret = ath11k_peer_rx_tid_setup(ar, addr, vdev_id, tid, 1, 0,
  65					       HAL_PN_TYPE_NONE);
  66		if (ret) {
  67			ath11k_warn(ab, "failed to setup rxd tid queue for tid %d: %d\n",
  68				    tid, ret);
  69			goto peer_clean;
  70		}
  71	}
  72
  73	ret = ath11k_peer_rx_frag_setup(ar, addr, vdev_id);
  74	if (ret) {
  75		ath11k_warn(ab, "failed to setup rx defrag context\n");
  76		tid--;
  77		goto peer_clean;
  78	}
  79
  80	/* TODO: Setup other peer specific resource used in data path */
  81
  82	return 0;
  83
  84peer_clean:
  85	spin_lock_bh(&ab->base_lock);
  86
  87	peer = ath11k_peer_find(ab, vdev_id, addr);
  88	if (!peer) {
  89		ath11k_warn(ab, "failed to find the peer to del rx tid\n");
  90		spin_unlock_bh(&ab->base_lock);
  91		return -ENOENT;
  92	}
  93
  94	for (; tid >= 0; tid--)
  95		ath11k_peer_rx_tid_delete(ar, peer, tid);
  96
  97	spin_unlock_bh(&ab->base_lock);
  98
  99	return ret;
 100}
 101
 102void ath11k_dp_srng_cleanup(struct ath11k_base *ab, struct dp_srng *ring)
 103{
 104	if (!ring->vaddr_unaligned)
 105		return;
 106
 107	if (ring->cached) {
 108		dma_unmap_single(ab->dev, ring->paddr_unaligned, ring->size,
 109				 DMA_FROM_DEVICE);
 110		kfree(ring->vaddr_unaligned);
 111	} else {
 112		dma_free_coherent(ab->dev, ring->size, ring->vaddr_unaligned,
 113				  ring->paddr_unaligned);
 114	}
 115
 116	ring->vaddr_unaligned = NULL;
 117}
 118
 119static int ath11k_dp_srng_find_ring_in_mask(int ring_num, const u8 *grp_mask)
 120{
 121	int ext_group_num;
 122	u8 mask = 1 << ring_num;
 123
 124	for (ext_group_num = 0; ext_group_num < ATH11K_EXT_IRQ_GRP_NUM_MAX;
 125	     ext_group_num++) {
 126		if (mask & grp_mask[ext_group_num])
 127			return ext_group_num;
 128	}
 129
 130	return -ENOENT;
 131}
 132
 133static int ath11k_dp_srng_calculate_msi_group(struct ath11k_base *ab,
 134					      enum hal_ring_type type, int ring_num)
 135{
 136	const u8 *grp_mask;
 137
 138	switch (type) {
 139	case HAL_WBM2SW_RELEASE:
 140		if (ring_num == DP_RX_RELEASE_RING_NUM) {
 141			grp_mask = &ab->hw_params.ring_mask->rx_wbm_rel[0];
 142			ring_num = 0;
 143		} else {
 144			grp_mask = &ab->hw_params.ring_mask->tx[0];
 145		}
 146		break;
 147	case HAL_REO_EXCEPTION:
 148		grp_mask = &ab->hw_params.ring_mask->rx_err[0];
 149		break;
 150	case HAL_REO_DST:
 151		grp_mask = &ab->hw_params.ring_mask->rx[0];
 152		break;
 153	case HAL_REO_STATUS:
 154		grp_mask = &ab->hw_params.ring_mask->reo_status[0];
 155		break;
 156	case HAL_RXDMA_MONITOR_STATUS:
 157	case HAL_RXDMA_MONITOR_DST:
 158		grp_mask = &ab->hw_params.ring_mask->rx_mon_status[0];
 159		break;
 160	case HAL_RXDMA_DST:
 161		grp_mask = &ab->hw_params.ring_mask->rxdma2host[0];
 162		break;
 163	case HAL_RXDMA_BUF:
 164		grp_mask = &ab->hw_params.ring_mask->host2rxdma[0];
 165		break;
 166	case HAL_RXDMA_MONITOR_BUF:
 167	case HAL_TCL_DATA:
 168	case HAL_TCL_CMD:
 169	case HAL_REO_CMD:
 170	case HAL_SW2WBM_RELEASE:
 171	case HAL_WBM_IDLE_LINK:
 172	case HAL_TCL_STATUS:
 173	case HAL_REO_REINJECT:
 174	case HAL_CE_SRC:
 175	case HAL_CE_DST:
 176	case HAL_CE_DST_STATUS:
 177	default:
 178		return -ENOENT;
 179	}
 180
 181	return ath11k_dp_srng_find_ring_in_mask(ring_num, grp_mask);
 182}
 183
 184static void ath11k_dp_srng_msi_setup(struct ath11k_base *ab,
 185				     struct hal_srng_params *ring_params,
 186				     enum hal_ring_type type, int ring_num)
 187{
 188	int msi_group_number, msi_data_count;
 189	u32 msi_data_start, msi_irq_start, addr_lo, addr_hi;
 190	int ret;
 191
 192	ret = ath11k_get_user_msi_vector(ab, "DP",
 193					 &msi_data_count, &msi_data_start,
 194					 &msi_irq_start);
 195	if (ret)
 196		return;
 197
 198	msi_group_number = ath11k_dp_srng_calculate_msi_group(ab, type,
 199							      ring_num);
 200	if (msi_group_number < 0) {
 201		ath11k_dbg(ab, ATH11K_DBG_PCI,
 202			   "ring not part of an ext_group; ring_type: %d,ring_num %d",
 203			   type, ring_num);
 204		ring_params->msi_addr = 0;
 205		ring_params->msi_data = 0;
 206		return;
 207	}
 208
 209	if (msi_group_number > msi_data_count) {
 210		ath11k_dbg(ab, ATH11K_DBG_PCI,
 211			   "multiple msi_groups share one msi, msi_group_num %d",
 212			   msi_group_number);
 213	}
 214
 215	ath11k_get_msi_address(ab, &addr_lo, &addr_hi);
 216
 217	ring_params->msi_addr = addr_lo;
 218	ring_params->msi_addr |= (dma_addr_t)(((uint64_t)addr_hi) << 32);
 219	ring_params->msi_data = (msi_group_number % msi_data_count)
 220		+ msi_data_start;
 221	ring_params->flags |= HAL_SRNG_FLAGS_MSI_INTR;
 222}
 223
 224int ath11k_dp_srng_setup(struct ath11k_base *ab, struct dp_srng *ring,
 225			 enum hal_ring_type type, int ring_num,
 226			 int mac_id, int num_entries)
 227{
 228	struct hal_srng_params params = { 0 };
 229	int entry_sz = ath11k_hal_srng_get_entrysize(ab, type);
 230	int max_entries = ath11k_hal_srng_get_max_entries(ab, type);
 231	int ret;
 232	bool cached = false;
 233
 234	if (max_entries < 0 || entry_sz < 0)
 235		return -EINVAL;
 236
 237	if (num_entries > max_entries)
 238		num_entries = max_entries;
 239
 240	ring->size = (num_entries * entry_sz) + HAL_RING_BASE_ALIGN - 1;
 241
 242	if (ab->hw_params.alloc_cacheable_memory) {
 243		/* Allocate the reo dst and tx completion rings from cacheable memory */
 244		switch (type) {
 245		case HAL_REO_DST:
 246		case HAL_WBM2SW_RELEASE:
 247			cached = true;
 248			break;
 249		default:
 250			cached = false;
 251		}
 252
 253		if (cached) {
 254			ring->vaddr_unaligned = kzalloc(ring->size, GFP_KERNEL);
 255			if (!ring->vaddr_unaligned)
 256				return -ENOMEM;
 257
 258			ring->paddr_unaligned = dma_map_single(ab->dev,
 259							       ring->vaddr_unaligned,
 260							       ring->size,
 261							       DMA_FROM_DEVICE);
 262			if (dma_mapping_error(ab->dev, ring->paddr_unaligned)) {
 263				kfree(ring->vaddr_unaligned);
 264				ring->vaddr_unaligned = NULL;
 265				return -ENOMEM;
 266			}
 267		}
 268	}
 269
 270	if (!cached)
 271		ring->vaddr_unaligned = dma_alloc_coherent(ab->dev, ring->size,
 272							   &ring->paddr_unaligned,
 273							   GFP_KERNEL);
 274
 275	if (!ring->vaddr_unaligned)
 276		return -ENOMEM;
 277
 278	ring->vaddr = PTR_ALIGN(ring->vaddr_unaligned, HAL_RING_BASE_ALIGN);
 279	ring->paddr = ring->paddr_unaligned + ((unsigned long)ring->vaddr -
 280		      (unsigned long)ring->vaddr_unaligned);
 281
 282	params.ring_base_vaddr = ring->vaddr;
 283	params.ring_base_paddr = ring->paddr;
 284	params.num_entries = num_entries;
 285	ath11k_dp_srng_msi_setup(ab, &params, type, ring_num + mac_id);
 286
 287	switch (type) {
 288	case HAL_REO_DST:
 289		params.intr_batch_cntr_thres_entries =
 290					HAL_SRNG_INT_BATCH_THRESHOLD_RX;
 291		params.intr_timer_thres_us = HAL_SRNG_INT_TIMER_THRESHOLD_RX;
 292		break;
 293	case HAL_RXDMA_BUF:
 294	case HAL_RXDMA_MONITOR_BUF:
 295	case HAL_RXDMA_MONITOR_STATUS:
 296		params.low_threshold = num_entries >> 3;
 297		params.flags |= HAL_SRNG_FLAGS_LOW_THRESH_INTR_EN;
 298		params.intr_batch_cntr_thres_entries = 0;
 299		params.intr_timer_thres_us = HAL_SRNG_INT_TIMER_THRESHOLD_RX;
 300		break;
 301	case HAL_WBM2SW_RELEASE:
 302		if (ring_num < 3) {
 303			params.intr_batch_cntr_thres_entries =
 304					HAL_SRNG_INT_BATCH_THRESHOLD_TX;
 305			params.intr_timer_thres_us =
 306					HAL_SRNG_INT_TIMER_THRESHOLD_TX;
 307			break;
 308		}
 309		/* follow through when ring_num >= 3 */
 310		fallthrough;
 311	case HAL_REO_EXCEPTION:
 312	case HAL_REO_REINJECT:
 313	case HAL_REO_CMD:
 314	case HAL_REO_STATUS:
 315	case HAL_TCL_DATA:
 316	case HAL_TCL_CMD:
 317	case HAL_TCL_STATUS:
 318	case HAL_WBM_IDLE_LINK:
 319	case HAL_SW2WBM_RELEASE:
 320	case HAL_RXDMA_DST:
 321	case HAL_RXDMA_MONITOR_DST:
 322	case HAL_RXDMA_MONITOR_DESC:
 323		params.intr_batch_cntr_thres_entries =
 324					HAL_SRNG_INT_BATCH_THRESHOLD_OTHER;
 325		params.intr_timer_thres_us = HAL_SRNG_INT_TIMER_THRESHOLD_OTHER;
 326		break;
 327	case HAL_RXDMA_DIR_BUF:
 328		break;
 329	default:
 330		ath11k_warn(ab, "Not a valid ring type in dp :%d\n", type);
 331		return -EINVAL;
 332	}
 333
 334	if (cached) {
 335		params.flags |= HAL_SRNG_FLAGS_CACHED;
 336		ring->cached = 1;
 337	}
 338
 339	ret = ath11k_hal_srng_setup(ab, type, ring_num, mac_id, &params);
 340	if (ret < 0) {
 341		ath11k_warn(ab, "failed to setup srng: %d ring_id %d\n",
 342			    ret, ring_num);
 343		return ret;
 344	}
 345
 346	ring->ring_id = ret;
 347
 348	return 0;
 349}
 350
 351void ath11k_dp_stop_shadow_timers(struct ath11k_base *ab)
 352{
 353	int i;
 354
 355	if (!ab->hw_params.supports_shadow_regs)
 356		return;
 357
 358	for (i = 0; i < ab->hw_params.max_tx_ring; i++)
 359		ath11k_dp_shadow_stop_timer(ab, &ab->dp.tx_ring_timer[i]);
 360
 361	ath11k_dp_shadow_stop_timer(ab, &ab->dp.reo_cmd_timer);
 362}
 363
 364static void ath11k_dp_srng_common_cleanup(struct ath11k_base *ab)
 365{
 366	struct ath11k_dp *dp = &ab->dp;
 367	int i;
 368
 369	ath11k_dp_stop_shadow_timers(ab);
 370	ath11k_dp_srng_cleanup(ab, &dp->wbm_desc_rel_ring);
 371	ath11k_dp_srng_cleanup(ab, &dp->tcl_cmd_ring);
 372	ath11k_dp_srng_cleanup(ab, &dp->tcl_status_ring);
 373	for (i = 0; i < ab->hw_params.max_tx_ring; i++) {
 374		ath11k_dp_srng_cleanup(ab, &dp->tx_ring[i].tcl_data_ring);
 375		ath11k_dp_srng_cleanup(ab, &dp->tx_ring[i].tcl_comp_ring);
 376	}
 377	ath11k_dp_srng_cleanup(ab, &dp->reo_reinject_ring);
 378	ath11k_dp_srng_cleanup(ab, &dp->rx_rel_ring);
 379	ath11k_dp_srng_cleanup(ab, &dp->reo_except_ring);
 380	ath11k_dp_srng_cleanup(ab, &dp->reo_cmd_ring);
 381	ath11k_dp_srng_cleanup(ab, &dp->reo_status_ring);
 382}
 383
 384static int ath11k_dp_srng_common_setup(struct ath11k_base *ab)
 385{
 386	struct ath11k_dp *dp = &ab->dp;
 387	struct hal_srng *srng;
 388	int i, ret;
 389	u8 tcl_num, wbm_num;
 390
 391	ret = ath11k_dp_srng_setup(ab, &dp->wbm_desc_rel_ring,
 392				   HAL_SW2WBM_RELEASE, 0, 0,
 393				   DP_WBM_RELEASE_RING_SIZE);
 394	if (ret) {
 395		ath11k_warn(ab, "failed to set up wbm2sw_release ring :%d\n",
 396			    ret);
 397		goto err;
 398	}
 399
 400	ret = ath11k_dp_srng_setup(ab, &dp->tcl_cmd_ring, HAL_TCL_CMD, 0, 0,
 401				   DP_TCL_CMD_RING_SIZE);
 402	if (ret) {
 403		ath11k_warn(ab, "failed to set up tcl_cmd ring :%d\n", ret);
 404		goto err;
 405	}
 406
 407	ret = ath11k_dp_srng_setup(ab, &dp->tcl_status_ring, HAL_TCL_STATUS,
 408				   0, 0, DP_TCL_STATUS_RING_SIZE);
 409	if (ret) {
 410		ath11k_warn(ab, "failed to set up tcl_status ring :%d\n", ret);
 411		goto err;
 412	}
 413
 414	for (i = 0; i < ab->hw_params.max_tx_ring; i++) {
 415		tcl_num = ab->hw_params.hal_params->tcl2wbm_rbm_map[i].tcl_ring_num;
 416		wbm_num = ab->hw_params.hal_params->tcl2wbm_rbm_map[i].wbm_ring_num;
 417
 418		ret = ath11k_dp_srng_setup(ab, &dp->tx_ring[i].tcl_data_ring,
 419					   HAL_TCL_DATA, tcl_num, 0,
 420					   ab->hw_params.tx_ring_size);
 421		if (ret) {
 422			ath11k_warn(ab, "failed to set up tcl_data ring (%d) :%d\n",
 423				    i, ret);
 424			goto err;
 425		}
 426
 427		ret = ath11k_dp_srng_setup(ab, &dp->tx_ring[i].tcl_comp_ring,
 428					   HAL_WBM2SW_RELEASE, wbm_num, 0,
 429					   DP_TX_COMP_RING_SIZE);
 430		if (ret) {
 431			ath11k_warn(ab, "failed to set up tcl_comp ring (%d) :%d\n",
 432				    i, ret);
 433			goto err;
 434		}
 435
 436		srng = &ab->hal.srng_list[dp->tx_ring[i].tcl_data_ring.ring_id];
 437		ath11k_hal_tx_init_data_ring(ab, srng);
 438
 439		ath11k_dp_shadow_init_timer(ab, &dp->tx_ring_timer[i],
 440					    ATH11K_SHADOW_DP_TIMER_INTERVAL,
 441					    dp->tx_ring[i].tcl_data_ring.ring_id);
 442	}
 443
 444	ret = ath11k_dp_srng_setup(ab, &dp->reo_reinject_ring, HAL_REO_REINJECT,
 445				   0, 0, DP_REO_REINJECT_RING_SIZE);
 446	if (ret) {
 447		ath11k_warn(ab, "failed to set up reo_reinject ring :%d\n",
 448			    ret);
 449		goto err;
 450	}
 451
 452	ret = ath11k_dp_srng_setup(ab, &dp->rx_rel_ring, HAL_WBM2SW_RELEASE,
 453				   DP_RX_RELEASE_RING_NUM, 0, DP_RX_RELEASE_RING_SIZE);
 454	if (ret) {
 455		ath11k_warn(ab, "failed to set up rx_rel ring :%d\n", ret);
 456		goto err;
 457	}
 458
 459	ret = ath11k_dp_srng_setup(ab, &dp->reo_except_ring, HAL_REO_EXCEPTION,
 460				   0, 0, DP_REO_EXCEPTION_RING_SIZE);
 461	if (ret) {
 462		ath11k_warn(ab, "failed to set up reo_exception ring :%d\n",
 463			    ret);
 464		goto err;
 465	}
 466
 467	ret = ath11k_dp_srng_setup(ab, &dp->reo_cmd_ring, HAL_REO_CMD,
 468				   0, 0, DP_REO_CMD_RING_SIZE);
 469	if (ret) {
 470		ath11k_warn(ab, "failed to set up reo_cmd ring :%d\n", ret);
 471		goto err;
 472	}
 473
 474	srng = &ab->hal.srng_list[dp->reo_cmd_ring.ring_id];
 475	ath11k_hal_reo_init_cmd_ring(ab, srng);
 476
 477	ath11k_dp_shadow_init_timer(ab, &dp->reo_cmd_timer,
 478				    ATH11K_SHADOW_CTRL_TIMER_INTERVAL,
 479				    dp->reo_cmd_ring.ring_id);
 480
 481	ret = ath11k_dp_srng_setup(ab, &dp->reo_status_ring, HAL_REO_STATUS,
 482				   0, 0, DP_REO_STATUS_RING_SIZE);
 483	if (ret) {
 484		ath11k_warn(ab, "failed to set up reo_status ring :%d\n", ret);
 485		goto err;
 486	}
 487
 488	/* When hash based routing of rx packet is enabled, 32 entries to map
 489	 * the hash values to the ring will be configured.
 490	 */
 491	ab->hw_params.hw_ops->reo_setup(ab);
 492
 493	return 0;
 494
 495err:
 496	ath11k_dp_srng_common_cleanup(ab);
 497
 498	return ret;
 499}
 500
 501static void ath11k_dp_scatter_idle_link_desc_cleanup(struct ath11k_base *ab)
 502{
 503	struct ath11k_dp *dp = &ab->dp;
 504	struct hal_wbm_idle_scatter_list *slist = dp->scatter_list;
 505	int i;
 506
 507	for (i = 0; i < DP_IDLE_SCATTER_BUFS_MAX; i++) {
 508		if (!slist[i].vaddr)
 509			continue;
 510
 511		dma_free_coherent(ab->dev, HAL_WBM_IDLE_SCATTER_BUF_SIZE_MAX,
 512				  slist[i].vaddr, slist[i].paddr);
 513		slist[i].vaddr = NULL;
 514	}
 515}
 516
 517static int ath11k_dp_scatter_idle_link_desc_setup(struct ath11k_base *ab,
 518						  int size,
 519						  u32 n_link_desc_bank,
 520						  u32 n_link_desc,
 521						  u32 last_bank_sz)
 522{
 523	struct ath11k_dp *dp = &ab->dp;
 524	struct dp_link_desc_bank *link_desc_banks = dp->link_desc_banks;
 525	struct hal_wbm_idle_scatter_list *slist = dp->scatter_list;
 526	u32 n_entries_per_buf;
 527	int num_scatter_buf, scatter_idx;
 528	struct hal_wbm_link_desc *scatter_buf;
 529	int align_bytes, n_entries;
 530	dma_addr_t paddr;
 531	int rem_entries;
 532	int i;
 533	int ret = 0;
 534	u32 end_offset;
 535
 536	n_entries_per_buf = HAL_WBM_IDLE_SCATTER_BUF_SIZE /
 537		ath11k_hal_srng_get_entrysize(ab, HAL_WBM_IDLE_LINK);
 538	num_scatter_buf = DIV_ROUND_UP(size, HAL_WBM_IDLE_SCATTER_BUF_SIZE);
 539
 540	if (num_scatter_buf > DP_IDLE_SCATTER_BUFS_MAX)
 541		return -EINVAL;
 542
 543	for (i = 0; i < num_scatter_buf; i++) {
 544		slist[i].vaddr = dma_alloc_coherent(ab->dev,
 545						    HAL_WBM_IDLE_SCATTER_BUF_SIZE_MAX,
 546						    &slist[i].paddr, GFP_KERNEL);
 547		if (!slist[i].vaddr) {
 548			ret = -ENOMEM;
 549			goto err;
 550		}
 551	}
 552
 553	scatter_idx = 0;
 554	scatter_buf = slist[scatter_idx].vaddr;
 555	rem_entries = n_entries_per_buf;
 556
 557	for (i = 0; i < n_link_desc_bank; i++) {
 558		align_bytes = link_desc_banks[i].vaddr -
 559			      link_desc_banks[i].vaddr_unaligned;
 560		n_entries = (DP_LINK_DESC_ALLOC_SIZE_THRESH - align_bytes) /
 561			     HAL_LINK_DESC_SIZE;
 562		paddr = link_desc_banks[i].paddr;
 563		while (n_entries) {
 564			ath11k_hal_set_link_desc_addr(scatter_buf, i, paddr);
 565			n_entries--;
 566			paddr += HAL_LINK_DESC_SIZE;
 567			if (rem_entries) {
 568				rem_entries--;
 569				scatter_buf++;
 570				continue;
 571			}
 572
 573			rem_entries = n_entries_per_buf;
 574			scatter_idx++;
 575			scatter_buf = slist[scatter_idx].vaddr;
 576		}
 577	}
 578
 579	end_offset = (scatter_buf - slist[scatter_idx].vaddr) *
 580		     sizeof(struct hal_wbm_link_desc);
 581	ath11k_hal_setup_link_idle_list(ab, slist, num_scatter_buf,
 582					n_link_desc, end_offset);
 583
 584	return 0;
 585
 586err:
 587	ath11k_dp_scatter_idle_link_desc_cleanup(ab);
 588
 589	return ret;
 590}
 591
 592static void
 593ath11k_dp_link_desc_bank_free(struct ath11k_base *ab,
 594			      struct dp_link_desc_bank *link_desc_banks)
 595{
 596	int i;
 597
 598	for (i = 0; i < DP_LINK_DESC_BANKS_MAX; i++) {
 599		if (link_desc_banks[i].vaddr_unaligned) {
 600			dma_free_coherent(ab->dev,
 601					  link_desc_banks[i].size,
 602					  link_desc_banks[i].vaddr_unaligned,
 603					  link_desc_banks[i].paddr_unaligned);
 604			link_desc_banks[i].vaddr_unaligned = NULL;
 605		}
 606	}
 607}
 608
 609static int ath11k_dp_link_desc_bank_alloc(struct ath11k_base *ab,
 610					  struct dp_link_desc_bank *desc_bank,
 611					  int n_link_desc_bank,
 612					  int last_bank_sz)
 613{
 614	struct ath11k_dp *dp = &ab->dp;
 615	int i;
 616	int ret = 0;
 617	int desc_sz = DP_LINK_DESC_ALLOC_SIZE_THRESH;
 618
 619	for (i = 0; i < n_link_desc_bank; i++) {
 620		if (i == (n_link_desc_bank - 1) && last_bank_sz)
 621			desc_sz = last_bank_sz;
 622
 623		desc_bank[i].vaddr_unaligned =
 624					dma_alloc_coherent(ab->dev, desc_sz,
 625							   &desc_bank[i].paddr_unaligned,
 626							   GFP_KERNEL);
 627		if (!desc_bank[i].vaddr_unaligned) {
 628			ret = -ENOMEM;
 629			goto err;
 630		}
 631
 632		desc_bank[i].vaddr = PTR_ALIGN(desc_bank[i].vaddr_unaligned,
 633					       HAL_LINK_DESC_ALIGN);
 634		desc_bank[i].paddr = desc_bank[i].paddr_unaligned +
 635				     ((unsigned long)desc_bank[i].vaddr -
 636				      (unsigned long)desc_bank[i].vaddr_unaligned);
 637		desc_bank[i].size = desc_sz;
 638	}
 639
 640	return 0;
 641
 642err:
 643	ath11k_dp_link_desc_bank_free(ab, dp->link_desc_banks);
 644
 645	return ret;
 646}
 647
 648void ath11k_dp_link_desc_cleanup(struct ath11k_base *ab,
 649				 struct dp_link_desc_bank *desc_bank,
 650				 u32 ring_type, struct dp_srng *ring)
 651{
 652	ath11k_dp_link_desc_bank_free(ab, desc_bank);
 653
 654	if (ring_type != HAL_RXDMA_MONITOR_DESC) {
 655		ath11k_dp_srng_cleanup(ab, ring);
 656		ath11k_dp_scatter_idle_link_desc_cleanup(ab);
 657	}
 658}
 659
 660static int ath11k_wbm_idle_ring_setup(struct ath11k_base *ab, u32 *n_link_desc)
 661{
 662	struct ath11k_dp *dp = &ab->dp;
 663	u32 n_mpdu_link_desc, n_mpdu_queue_desc;
 664	u32 n_tx_msdu_link_desc, n_rx_msdu_link_desc;
 665	int ret = 0;
 666
 667	n_mpdu_link_desc = (DP_NUM_TIDS_MAX * DP_AVG_MPDUS_PER_TID_MAX) /
 668			   HAL_NUM_MPDUS_PER_LINK_DESC;
 669
 670	n_mpdu_queue_desc = n_mpdu_link_desc /
 671			    HAL_NUM_MPDU_LINKS_PER_QUEUE_DESC;
 672
 673	n_tx_msdu_link_desc = (DP_NUM_TIDS_MAX * DP_AVG_FLOWS_PER_TID *
 674			       DP_AVG_MSDUS_PER_FLOW) /
 675			      HAL_NUM_TX_MSDUS_PER_LINK_DESC;
 676
 677	n_rx_msdu_link_desc = (DP_NUM_TIDS_MAX * DP_AVG_MPDUS_PER_TID_MAX *
 678			       DP_AVG_MSDUS_PER_MPDU) /
 679			      HAL_NUM_RX_MSDUS_PER_LINK_DESC;
 680
 681	*n_link_desc = n_mpdu_link_desc + n_mpdu_queue_desc +
 682		      n_tx_msdu_link_desc + n_rx_msdu_link_desc;
 683
 684	if (*n_link_desc & (*n_link_desc - 1))
 685		*n_link_desc = 1 << fls(*n_link_desc);
 686
 687	ret = ath11k_dp_srng_setup(ab, &dp->wbm_idle_ring,
 688				   HAL_WBM_IDLE_LINK, 0, 0, *n_link_desc);
 689	if (ret) {
 690		ath11k_warn(ab, "failed to setup wbm_idle_ring: %d\n", ret);
 691		return ret;
 692	}
 693	return ret;
 694}
 695
 696int ath11k_dp_link_desc_setup(struct ath11k_base *ab,
 697			      struct dp_link_desc_bank *link_desc_banks,
 698			      u32 ring_type, struct hal_srng *srng,
 699			      u32 n_link_desc)
 700{
 701	u32 tot_mem_sz;
 702	u32 n_link_desc_bank, last_bank_sz;
 703	u32 entry_sz, align_bytes, n_entries;
 704	u32 paddr;
 705	u32 *desc;
 706	int i, ret;
 707
 708	tot_mem_sz = n_link_desc * HAL_LINK_DESC_SIZE;
 709	tot_mem_sz += HAL_LINK_DESC_ALIGN;
 710
 711	if (tot_mem_sz <= DP_LINK_DESC_ALLOC_SIZE_THRESH) {
 712		n_link_desc_bank = 1;
 713		last_bank_sz = tot_mem_sz;
 714	} else {
 715		n_link_desc_bank = tot_mem_sz /
 716				   (DP_LINK_DESC_ALLOC_SIZE_THRESH -
 717				    HAL_LINK_DESC_ALIGN);
 718		last_bank_sz = tot_mem_sz %
 719			       (DP_LINK_DESC_ALLOC_SIZE_THRESH -
 720				HAL_LINK_DESC_ALIGN);
 721
 722		if (last_bank_sz)
 723			n_link_desc_bank += 1;
 724	}
 725
 726	if (n_link_desc_bank > DP_LINK_DESC_BANKS_MAX)
 727		return -EINVAL;
 728
 729	ret = ath11k_dp_link_desc_bank_alloc(ab, link_desc_banks,
 730					     n_link_desc_bank, last_bank_sz);
 731	if (ret)
 732		return ret;
 733
 734	/* Setup link desc idle list for HW internal usage */
 735	entry_sz = ath11k_hal_srng_get_entrysize(ab, ring_type);
 736	tot_mem_sz = entry_sz * n_link_desc;
 737
 738	/* Setup scatter desc list when the total memory requirement is more */
 739	if (tot_mem_sz > DP_LINK_DESC_ALLOC_SIZE_THRESH &&
 740	    ring_type != HAL_RXDMA_MONITOR_DESC) {
 741		ret = ath11k_dp_scatter_idle_link_desc_setup(ab, tot_mem_sz,
 742							     n_link_desc_bank,
 743							     n_link_desc,
 744							     last_bank_sz);
 745		if (ret) {
 746			ath11k_warn(ab, "failed to setup scatting idle list descriptor :%d\n",
 747				    ret);
 748			goto fail_desc_bank_free;
 749		}
 750
 751		return 0;
 752	}
 753
 754	spin_lock_bh(&srng->lock);
 755
 756	ath11k_hal_srng_access_begin(ab, srng);
 757
 758	for (i = 0; i < n_link_desc_bank; i++) {
 759		align_bytes = link_desc_banks[i].vaddr -
 760			      link_desc_banks[i].vaddr_unaligned;
 761		n_entries = (link_desc_banks[i].size - align_bytes) /
 762			    HAL_LINK_DESC_SIZE;
 763		paddr = link_desc_banks[i].paddr;
 764		while (n_entries &&
 765		       (desc = ath11k_hal_srng_src_get_next_entry(ab, srng))) {
 766			ath11k_hal_set_link_desc_addr((struct hal_wbm_link_desc *)desc,
 767						      i, paddr);
 768			n_entries--;
 769			paddr += HAL_LINK_DESC_SIZE;
 770		}
 771	}
 772
 773	ath11k_hal_srng_access_end(ab, srng);
 774
 775	spin_unlock_bh(&srng->lock);
 776
 777	return 0;
 778
 779fail_desc_bank_free:
 780	ath11k_dp_link_desc_bank_free(ab, link_desc_banks);
 781
 782	return ret;
 783}
 784
 785int ath11k_dp_service_srng(struct ath11k_base *ab,
 786			   struct ath11k_ext_irq_grp *irq_grp,
 787			   int budget)
 788{
 789	struct napi_struct *napi = &irq_grp->napi;
 790	const struct ath11k_hw_hal_params *hal_params;
 791	int grp_id = irq_grp->grp_id;
 792	int work_done = 0;
 793	int i, j;
 794	int tot_work_done = 0;
 795
 796	for (i = 0; i < ab->hw_params.max_tx_ring; i++) {
 797		if (BIT(ab->hw_params.hal_params->tcl2wbm_rbm_map[i].wbm_ring_num) &
 798		    ab->hw_params.ring_mask->tx[grp_id])
 799			ath11k_dp_tx_completion_handler(ab, i);
 800	}
 801
 802	if (ab->hw_params.ring_mask->rx_err[grp_id]) {
 803		work_done = ath11k_dp_process_rx_err(ab, napi, budget);
 804		budget -= work_done;
 805		tot_work_done += work_done;
 806		if (budget <= 0)
 807			goto done;
 808	}
 809
 810	if (ab->hw_params.ring_mask->rx_wbm_rel[grp_id]) {
 811		work_done = ath11k_dp_rx_process_wbm_err(ab,
 812							 napi,
 813							 budget);
 814		budget -= work_done;
 815		tot_work_done += work_done;
 816
 817		if (budget <= 0)
 818			goto done;
 819	}
 820
 821	if (ab->hw_params.ring_mask->rx[grp_id]) {
 822		i =  fls(ab->hw_params.ring_mask->rx[grp_id]) - 1;
 823		work_done = ath11k_dp_process_rx(ab, i, napi,
 824						 budget);
 825		budget -= work_done;
 826		tot_work_done += work_done;
 827		if (budget <= 0)
 828			goto done;
 829	}
 830
 831	if (ab->hw_params.ring_mask->rx_mon_status[grp_id]) {
 832		for (i = 0; i < ab->num_radios; i++) {
 833			for (j = 0; j < ab->hw_params.num_rxdma_per_pdev; j++) {
 834				int id = i * ab->hw_params.num_rxdma_per_pdev + j;
 835
 836				if (ab->hw_params.ring_mask->rx_mon_status[grp_id] &
 837					BIT(id)) {
 838					work_done =
 839					ath11k_dp_rx_process_mon_rings(ab,
 840								       id,
 841								       napi, budget);
 842					budget -= work_done;
 843					tot_work_done += work_done;
 844
 845					if (budget <= 0)
 846						goto done;
 847				}
 848			}
 849		}
 850	}
 851
 852	if (ab->hw_params.ring_mask->reo_status[grp_id])
 853		ath11k_dp_process_reo_status(ab);
 854
 855	for (i = 0; i < ab->num_radios; i++) {
 856		for (j = 0; j < ab->hw_params.num_rxdma_per_pdev; j++) {
 857			int id = i * ab->hw_params.num_rxdma_per_pdev + j;
 858
 859			if (ab->hw_params.ring_mask->rxdma2host[grp_id] & BIT(id)) {
 860				work_done = ath11k_dp_process_rxdma_err(ab, id, budget);
 861				budget -= work_done;
 862				tot_work_done += work_done;
 863			}
 864
 865			if (budget <= 0)
 866				goto done;
 867
 868			if (ab->hw_params.ring_mask->host2rxdma[grp_id] & BIT(id)) {
 869				struct ath11k *ar = ath11k_ab_to_ar(ab, id);
 870				struct ath11k_pdev_dp *dp = &ar->dp;
 871				struct dp_rxdma_ring *rx_ring = &dp->rx_refill_buf_ring;
 872
 873				hal_params = ab->hw_params.hal_params;
 874				ath11k_dp_rxbufs_replenish(ab, id, rx_ring, 0,
 875							   hal_params->rx_buf_rbm);
 876			}
 877		}
 878	}
 879	/* TODO: Implement handler for other interrupts */
 880
 881done:
 882	return tot_work_done;
 883}
 884EXPORT_SYMBOL(ath11k_dp_service_srng);
 885
 886void ath11k_dp_pdev_free(struct ath11k_base *ab)
 887{
 888	struct ath11k *ar;
 889	int i;
 890
 891	del_timer_sync(&ab->mon_reap_timer);
 892
 893	for (i = 0; i < ab->num_radios; i++) {
 894		ar = ab->pdevs[i].ar;
 895		ath11k_dp_rx_pdev_free(ab, i);
 896		ath11k_debugfs_unregister(ar);
 897		ath11k_dp_rx_pdev_mon_detach(ar);
 898	}
 899}
 900
 901void ath11k_dp_pdev_pre_alloc(struct ath11k_base *ab)
 902{
 903	struct ath11k *ar;
 904	struct ath11k_pdev_dp *dp;
 905	int i;
 906	int j;
 907
 908	for (i = 0; i <  ab->num_radios; i++) {
 909		ar = ab->pdevs[i].ar;
 910		dp = &ar->dp;
 911		dp->mac_id = i;
 912		idr_init(&dp->rx_refill_buf_ring.bufs_idr);
 913		spin_lock_init(&dp->rx_refill_buf_ring.idr_lock);
 914		atomic_set(&dp->num_tx_pending, 0);
 915		init_waitqueue_head(&dp->tx_empty_waitq);
 916		for (j = 0; j < ab->hw_params.num_rxdma_per_pdev; j++) {
 917			idr_init(&dp->rx_mon_status_refill_ring[j].bufs_idr);
 918			spin_lock_init(&dp->rx_mon_status_refill_ring[j].idr_lock);
 919		}
 920		idr_init(&dp->rxdma_mon_buf_ring.bufs_idr);
 921		spin_lock_init(&dp->rxdma_mon_buf_ring.idr_lock);
 922	}
 923}
 924
 925int ath11k_dp_pdev_alloc(struct ath11k_base *ab)
 926{
 927	struct ath11k *ar;
 928	int ret;
 929	int i;
 930
 931	/* TODO:Per-pdev rx ring unlike tx ring which is mapped to different AC's */
 932	for (i = 0; i < ab->num_radios; i++) {
 933		ar = ab->pdevs[i].ar;
 934		ret = ath11k_dp_rx_pdev_alloc(ab, i);
 935		if (ret) {
 936			ath11k_warn(ab, "failed to allocate pdev rx for pdev_id :%d\n",
 937				    i);
 938			goto err;
 939		}
 940		ret = ath11k_dp_rx_pdev_mon_attach(ar);
 941		if (ret) {
 942			ath11k_warn(ab, "failed to initialize mon pdev %d\n",
 943				    i);
 944			goto err;
 945		}
 946	}
 947
 948	return 0;
 949
 950err:
 951	ath11k_dp_pdev_free(ab);
 952
 953	return ret;
 954}
 955
 956int ath11k_dp_htt_connect(struct ath11k_dp *dp)
 957{
 958	struct ath11k_htc_svc_conn_req conn_req;
 959	struct ath11k_htc_svc_conn_resp conn_resp;
 960	int status;
 961
 962	memset(&conn_req, 0, sizeof(conn_req));
 963	memset(&conn_resp, 0, sizeof(conn_resp));
 964
 965	conn_req.ep_ops.ep_tx_complete = ath11k_dp_htt_htc_tx_complete;
 966	conn_req.ep_ops.ep_rx_complete = ath11k_dp_htt_htc_t2h_msg_handler;
 967
 968	/* connect to control service */
 969	conn_req.service_id = ATH11K_HTC_SVC_ID_HTT_DATA_MSG;
 970
 971	status = ath11k_htc_connect_service(&dp->ab->htc, &conn_req,
 972					    &conn_resp);
 973
 974	if (status)
 975		return status;
 976
 977	dp->eid = conn_resp.eid;
 978
 979	return 0;
 980}
 981
 982static void ath11k_dp_update_vdev_search(struct ath11k_vif *arvif)
 983{
 984	 /* When v2_map_support is true:for STA mode, enable address
 985	  * search index, tcl uses ast_hash value in the descriptor.
 986	  * When v2_map_support is false: for STA mode, don't enable
 987	  * address search index.
 988	  */
 989	switch (arvif->vdev_type) {
 990	case WMI_VDEV_TYPE_STA:
 991		if (arvif->ar->ab->hw_params.htt_peer_map_v2) {
 992			arvif->hal_addr_search_flags = HAL_TX_ADDRX_EN;
 993			arvif->search_type = HAL_TX_ADDR_SEARCH_INDEX;
 994		} else {
 995			arvif->hal_addr_search_flags = HAL_TX_ADDRY_EN;
 996			arvif->search_type = HAL_TX_ADDR_SEARCH_DEFAULT;
 997		}
 998		break;
 999	case WMI_VDEV_TYPE_AP:
1000	case WMI_VDEV_TYPE_IBSS:
1001		arvif->hal_addr_search_flags = HAL_TX_ADDRX_EN;
1002		arvif->search_type = HAL_TX_ADDR_SEARCH_DEFAULT;
1003		break;
1004	case WMI_VDEV_TYPE_MONITOR:
1005	default:
1006		return;
1007	}
1008}
1009
1010void ath11k_dp_vdev_tx_attach(struct ath11k *ar, struct ath11k_vif *arvif)
1011{
1012	arvif->tcl_metadata |= FIELD_PREP(HTT_TCL_META_DATA_TYPE, 1) |
1013			       FIELD_PREP(HTT_TCL_META_DATA_VDEV_ID,
1014					  arvif->vdev_id) |
1015			       FIELD_PREP(HTT_TCL_META_DATA_PDEV_ID,
1016					  ar->pdev->pdev_id);
1017
1018	/* set HTT extension valid bit to 0 by default */
1019	arvif->tcl_metadata &= ~HTT_TCL_META_DATA_VALID_HTT;
1020
1021	ath11k_dp_update_vdev_search(arvif);
1022}
1023
1024static int ath11k_dp_tx_pending_cleanup(int buf_id, void *skb, void *ctx)
1025{
1026	struct ath11k_base *ab = ctx;
1027	struct sk_buff *msdu = skb;
1028
1029	dma_unmap_single(ab->dev, ATH11K_SKB_CB(msdu)->paddr, msdu->len,
1030			 DMA_TO_DEVICE);
1031
1032	dev_kfree_skb_any(msdu);
1033
1034	return 0;
1035}
1036
1037void ath11k_dp_free(struct ath11k_base *ab)
1038{
1039	struct ath11k_dp *dp = &ab->dp;
1040	int i;
1041
1042	ath11k_dp_link_desc_cleanup(ab, dp->link_desc_banks,
1043				    HAL_WBM_IDLE_LINK, &dp->wbm_idle_ring);
1044
1045	ath11k_dp_srng_common_cleanup(ab);
1046
1047	ath11k_dp_reo_cmd_list_cleanup(ab);
1048
1049	for (i = 0; i < ab->hw_params.max_tx_ring; i++) {
1050		spin_lock_bh(&dp->tx_ring[i].tx_idr_lock);
1051		idr_for_each(&dp->tx_ring[i].txbuf_idr,
1052			     ath11k_dp_tx_pending_cleanup, ab);
1053		idr_destroy(&dp->tx_ring[i].txbuf_idr);
1054		spin_unlock_bh(&dp->tx_ring[i].tx_idr_lock);
1055		kfree(dp->tx_ring[i].tx_status);
1056	}
1057
1058	/* Deinit any SOC level resource */
1059}
1060
1061int ath11k_dp_alloc(struct ath11k_base *ab)
1062{
1063	struct ath11k_dp *dp = &ab->dp;
1064	struct hal_srng *srng = NULL;
1065	size_t size = 0;
1066	u32 n_link_desc = 0;
1067	int ret;
1068	int i;
1069
1070	dp->ab = ab;
1071
1072	INIT_LIST_HEAD(&dp->reo_cmd_list);
1073	INIT_LIST_HEAD(&dp->reo_cmd_cache_flush_list);
1074	INIT_LIST_HEAD(&dp->dp_full_mon_mpdu_list);
1075	spin_lock_init(&dp->reo_cmd_lock);
1076
1077	dp->reo_cmd_cache_flush_count = 0;
1078
1079	ret = ath11k_wbm_idle_ring_setup(ab, &n_link_desc);
1080	if (ret) {
1081		ath11k_warn(ab, "failed to setup wbm_idle_ring: %d\n", ret);
1082		return ret;
1083	}
1084
1085	srng = &ab->hal.srng_list[dp->wbm_idle_ring.ring_id];
1086
1087	ret = ath11k_dp_link_desc_setup(ab, dp->link_desc_banks,
1088					HAL_WBM_IDLE_LINK, srng, n_link_desc);
1089	if (ret) {
1090		ath11k_warn(ab, "failed to setup link desc: %d\n", ret);
1091		return ret;
1092	}
1093
1094	ret = ath11k_dp_srng_common_setup(ab);
1095	if (ret)
1096		goto fail_link_desc_cleanup;
1097
1098	size = sizeof(struct hal_wbm_release_ring) * DP_TX_COMP_RING_SIZE;
1099
1100	for (i = 0; i < ab->hw_params.max_tx_ring; i++) {
1101		idr_init(&dp->tx_ring[i].txbuf_idr);
1102		spin_lock_init(&dp->tx_ring[i].tx_idr_lock);
1103		dp->tx_ring[i].tcl_data_ring_id = i;
1104
1105		dp->tx_ring[i].tx_status_head = 0;
1106		dp->tx_ring[i].tx_status_tail = DP_TX_COMP_RING_SIZE - 1;
1107		dp->tx_ring[i].tx_status = kmalloc(size, GFP_KERNEL);
1108		if (!dp->tx_ring[i].tx_status) {
1109			ret = -ENOMEM;
1110			goto fail_cmn_srng_cleanup;
1111		}
1112	}
1113
1114	for (i = 0; i < HAL_DSCP_TID_MAP_TBL_NUM_ENTRIES_MAX; i++)
1115		ath11k_hal_tx_set_dscp_tid_map(ab, i);
1116
1117	/* Init any SOC level resource for DP */
1118
1119	return 0;
1120
1121fail_cmn_srng_cleanup:
1122	ath11k_dp_srng_common_cleanup(ab);
1123
1124fail_link_desc_cleanup:
1125	ath11k_dp_link_desc_cleanup(ab, dp->link_desc_banks,
1126				    HAL_WBM_IDLE_LINK, &dp->wbm_idle_ring);
1127
1128	return ret;
1129}
1130
1131static void ath11k_dp_shadow_timer_handler(struct timer_list *t)
1132{
1133	struct ath11k_hp_update_timer *update_timer = from_timer(update_timer,
1134								 t, timer);
1135	struct ath11k_base *ab = update_timer->ab;
1136	struct hal_srng	*srng = &ab->hal.srng_list[update_timer->ring_id];
1137
1138	spin_lock_bh(&srng->lock);
1139
1140	/* when the timer is fired, the handler checks whether there
1141	 * are new TX happened. The handler updates HP only when there
1142	 * are no TX operations during the timeout interval, and stop
1143	 * the timer. Timer will be started again when TX happens again.
1144	 */
1145	if (update_timer->timer_tx_num != update_timer->tx_num) {
1146		update_timer->timer_tx_num = update_timer->tx_num;
1147		mod_timer(&update_timer->timer, jiffies +
1148		  msecs_to_jiffies(update_timer->interval));
1149	} else {
1150		update_timer->started = false;
1151		ath11k_hal_srng_shadow_update_hp_tp(ab, srng);
1152	}
1153
1154	spin_unlock_bh(&srng->lock);
1155}
1156
1157void ath11k_dp_shadow_start_timer(struct ath11k_base *ab,
1158				  struct hal_srng *srng,
1159				  struct ath11k_hp_update_timer *update_timer)
1160{
1161	lockdep_assert_held(&srng->lock);
1162
1163	if (!ab->hw_params.supports_shadow_regs)
1164		return;
1165
1166	update_timer->tx_num++;
1167
1168	if (update_timer->started)
1169		return;
1170
1171	update_timer->started = true;
1172	update_timer->timer_tx_num = update_timer->tx_num;
1173	mod_timer(&update_timer->timer, jiffies +
1174		  msecs_to_jiffies(update_timer->interval));
1175}
1176
1177void ath11k_dp_shadow_stop_timer(struct ath11k_base *ab,
1178				 struct ath11k_hp_update_timer *update_timer)
1179{
1180	if (!ab->hw_params.supports_shadow_regs)
1181		return;
1182
1183	if (!update_timer->init)
1184		return;
1185
1186	del_timer_sync(&update_timer->timer);
1187}
1188
1189void ath11k_dp_shadow_init_timer(struct ath11k_base *ab,
1190				 struct ath11k_hp_update_timer *update_timer,
1191				 u32 interval, u32 ring_id)
1192{
1193	if (!ab->hw_params.supports_shadow_regs)
1194		return;
1195
1196	update_timer->tx_num = 0;
1197	update_timer->timer_tx_num = 0;
1198	update_timer->ab = ab;
1199	update_timer->ring_id = ring_id;
1200	update_timer->interval = interval;
1201	update_timer->init = true;
1202	timer_setup(&update_timer->timer,
1203		    ath11k_dp_shadow_timer_handler, 0);
1204}