Linux Audio

Check our new training course

Loading...
v6.8
   1// SPDX-License-Identifier: BSD-3-Clause-Clear
   2/*
   3 * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
   4 * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
   5 */
   6
   7#include <crypto/hash.h>
   8#include "core.h"
   9#include "dp_tx.h"
  10#include "hal_tx.h"
  11#include "hif.h"
  12#include "debug.h"
  13#include "dp_rx.h"
  14#include "peer.h"
  15
  16static void ath11k_dp_htt_htc_tx_complete(struct ath11k_base *ab,
  17					  struct sk_buff *skb)
  18{
  19	dev_kfree_skb_any(skb);
  20}
  21
  22void ath11k_dp_peer_cleanup(struct ath11k *ar, int vdev_id, const u8 *addr)
  23{
  24	struct ath11k_base *ab = ar->ab;
  25	struct ath11k_peer *peer;
  26
  27	/* TODO: Any other peer specific DP cleanup */
  28
  29	spin_lock_bh(&ab->base_lock);
  30	peer = ath11k_peer_find(ab, vdev_id, addr);
  31	if (!peer) {
  32		ath11k_warn(ab, "failed to lookup peer %pM on vdev %d\n",
  33			    addr, vdev_id);
  34		spin_unlock_bh(&ab->base_lock);
  35		return;
  36	}
  37
  38	ath11k_peer_rx_tid_cleanup(ar, peer);
  39	peer->dp_setup_done = false;
  40	crypto_free_shash(peer->tfm_mmic);
  41	spin_unlock_bh(&ab->base_lock);
  42}
  43
  44int ath11k_dp_peer_setup(struct ath11k *ar, int vdev_id, const u8 *addr)
  45{
  46	struct ath11k_base *ab = ar->ab;
  47	struct ath11k_peer *peer;
  48	u32 reo_dest;
  49	int ret = 0, tid;
  50
  51	/* NOTE: reo_dest ring id starts from 1 unlike mac_id which starts from 0 */
  52	reo_dest = ar->dp.mac_id + 1;
  53	ret = ath11k_wmi_set_peer_param(ar, addr, vdev_id,
  54					WMI_PEER_SET_DEFAULT_ROUTING,
  55					DP_RX_HASH_ENABLE | (reo_dest << 1));
  56
  57	if (ret) {
  58		ath11k_warn(ab, "failed to set default routing %d peer :%pM vdev_id :%d\n",
  59			    ret, addr, vdev_id);
  60		return ret;
  61	}
  62
  63	for (tid = 0; tid <= IEEE80211_NUM_TIDS; tid++) {
  64		ret = ath11k_peer_rx_tid_setup(ar, addr, vdev_id, tid, 1, 0,
  65					       HAL_PN_TYPE_NONE);
  66		if (ret) {
  67			ath11k_warn(ab, "failed to setup rxd tid queue for tid %d: %d\n",
  68				    tid, ret);
  69			goto peer_clean;
  70		}
  71	}
  72
  73	ret = ath11k_peer_rx_frag_setup(ar, addr, vdev_id);
  74	if (ret) {
  75		ath11k_warn(ab, "failed to setup rx defrag context\n");
  76		tid--;
  77		goto peer_clean;
  78	}
  79
  80	/* TODO: Setup other peer specific resource used in data path */
  81
  82	return 0;
  83
  84peer_clean:
  85	spin_lock_bh(&ab->base_lock);
  86
  87	peer = ath11k_peer_find(ab, vdev_id, addr);
  88	if (!peer) {
  89		ath11k_warn(ab, "failed to find the peer to del rx tid\n");
  90		spin_unlock_bh(&ab->base_lock);
  91		return -ENOENT;
  92	}
  93
  94	for (; tid >= 0; tid--)
  95		ath11k_peer_rx_tid_delete(ar, peer, tid);
  96
  97	spin_unlock_bh(&ab->base_lock);
  98
  99	return ret;
 100}
 101
 102void ath11k_dp_srng_cleanup(struct ath11k_base *ab, struct dp_srng *ring)
 103{
 104	if (!ring->vaddr_unaligned)
 105		return;
 106
 107	if (ring->cached)
 108		kfree(ring->vaddr_unaligned);
 109	else
 110		dma_free_coherent(ab->dev, ring->size, ring->vaddr_unaligned,
 111				  ring->paddr_unaligned);
 112
 113	ring->vaddr_unaligned = NULL;
 114}
 115
 116static int ath11k_dp_srng_find_ring_in_mask(int ring_num, const u8 *grp_mask)
 117{
 118	int ext_group_num;
 119	u8 mask = 1 << ring_num;
 120
 121	for (ext_group_num = 0; ext_group_num < ATH11K_EXT_IRQ_GRP_NUM_MAX;
 122	     ext_group_num++) {
 123		if (mask & grp_mask[ext_group_num])
 124			return ext_group_num;
 125	}
 126
 127	return -ENOENT;
 128}
 129
 130static int ath11k_dp_srng_calculate_msi_group(struct ath11k_base *ab,
 131					      enum hal_ring_type type, int ring_num)
 132{
 133	const u8 *grp_mask;
 134
 135	switch (type) {
 136	case HAL_WBM2SW_RELEASE:
 137		if (ring_num == DP_RX_RELEASE_RING_NUM) {
 138			grp_mask = &ab->hw_params.ring_mask->rx_wbm_rel[0];
 139			ring_num = 0;
 140		} else {
 141			grp_mask = &ab->hw_params.ring_mask->tx[0];
 142		}
 143		break;
 144	case HAL_REO_EXCEPTION:
 145		grp_mask = &ab->hw_params.ring_mask->rx_err[0];
 146		break;
 147	case HAL_REO_DST:
 148		grp_mask = &ab->hw_params.ring_mask->rx[0];
 149		break;
 150	case HAL_REO_STATUS:
 151		grp_mask = &ab->hw_params.ring_mask->reo_status[0];
 152		break;
 153	case HAL_RXDMA_MONITOR_STATUS:
 154	case HAL_RXDMA_MONITOR_DST:
 155		grp_mask = &ab->hw_params.ring_mask->rx_mon_status[0];
 156		break;
 157	case HAL_RXDMA_DST:
 158		grp_mask = &ab->hw_params.ring_mask->rxdma2host[0];
 159		break;
 160	case HAL_RXDMA_BUF:
 161		grp_mask = &ab->hw_params.ring_mask->host2rxdma[0];
 162		break;
 163	case HAL_RXDMA_MONITOR_BUF:
 164	case HAL_TCL_DATA:
 165	case HAL_TCL_CMD:
 166	case HAL_REO_CMD:
 167	case HAL_SW2WBM_RELEASE:
 168	case HAL_WBM_IDLE_LINK:
 169	case HAL_TCL_STATUS:
 170	case HAL_REO_REINJECT:
 171	case HAL_CE_SRC:
 172	case HAL_CE_DST:
 173	case HAL_CE_DST_STATUS:
 174	default:
 175		return -ENOENT;
 176	}
 177
 178	return ath11k_dp_srng_find_ring_in_mask(ring_num, grp_mask);
 179}
 180
 181static void ath11k_dp_srng_msi_setup(struct ath11k_base *ab,
 182				     struct hal_srng_params *ring_params,
 183				     enum hal_ring_type type, int ring_num)
 184{
 185	int msi_group_number, msi_data_count;
 186	u32 msi_data_start, msi_irq_start, addr_lo, addr_hi;
 187	int ret;
 188
 189	ret = ath11k_get_user_msi_vector(ab, "DP",
 190					 &msi_data_count, &msi_data_start,
 191					 &msi_irq_start);
 192	if (ret)
 193		return;
 194
 195	msi_group_number = ath11k_dp_srng_calculate_msi_group(ab, type,
 196							      ring_num);
 197	if (msi_group_number < 0) {
 198		ath11k_dbg(ab, ATH11K_DBG_PCI,
 199			   "ring not part of an ext_group; ring_type: %d,ring_num %d",
 200			   type, ring_num);
 201		ring_params->msi_addr = 0;
 202		ring_params->msi_data = 0;
 203		return;
 204	}
 205
 206	if (msi_group_number > msi_data_count) {
 207		ath11k_dbg(ab, ATH11K_DBG_PCI,
 208			   "multiple msi_groups share one msi, msi_group_num %d",
 209			   msi_group_number);
 210	}
 211
 212	ath11k_get_msi_address(ab, &addr_lo, &addr_hi);
 213
 214	ring_params->msi_addr = addr_lo;
 215	ring_params->msi_addr |= (dma_addr_t)(((uint64_t)addr_hi) << 32);
 216	ring_params->msi_data = (msi_group_number % msi_data_count)
 217		+ msi_data_start;
 218	ring_params->flags |= HAL_SRNG_FLAGS_MSI_INTR;
 219}
 220
 221int ath11k_dp_srng_setup(struct ath11k_base *ab, struct dp_srng *ring,
 222			 enum hal_ring_type type, int ring_num,
 223			 int mac_id, int num_entries)
 224{
 225	struct hal_srng_params params = { 0 };
 226	int entry_sz = ath11k_hal_srng_get_entrysize(ab, type);
 227	int max_entries = ath11k_hal_srng_get_max_entries(ab, type);
 228	int ret;
 229	bool cached = false;
 230
 231	if (max_entries < 0 || entry_sz < 0)
 232		return -EINVAL;
 233
 234	if (num_entries > max_entries)
 235		num_entries = max_entries;
 236
 237	ring->size = (num_entries * entry_sz) + HAL_RING_BASE_ALIGN - 1;
 238
 239	if (ab->hw_params.alloc_cacheable_memory) {
 240		/* Allocate the reo dst and tx completion rings from cacheable memory */
 241		switch (type) {
 242		case HAL_REO_DST:
 243		case HAL_WBM2SW_RELEASE:
 244			cached = true;
 245			break;
 246		default:
 247			cached = false;
 248		}
 249
 250		if (cached) {
 251			ring->vaddr_unaligned = kzalloc(ring->size, GFP_KERNEL);
 252			ring->paddr_unaligned = virt_to_phys(ring->vaddr_unaligned);
 253		}
 254	}
 255
 256	if (!cached)
 257		ring->vaddr_unaligned = dma_alloc_coherent(ab->dev, ring->size,
 258							   &ring->paddr_unaligned,
 259							   GFP_KERNEL);
 260
 261	if (!ring->vaddr_unaligned)
 262		return -ENOMEM;
 263
 264	ring->vaddr = PTR_ALIGN(ring->vaddr_unaligned, HAL_RING_BASE_ALIGN);
 265	ring->paddr = ring->paddr_unaligned + ((unsigned long)ring->vaddr -
 266		      (unsigned long)ring->vaddr_unaligned);
 267
 268	params.ring_base_vaddr = ring->vaddr;
 269	params.ring_base_paddr = ring->paddr;
 270	params.num_entries = num_entries;
 271	ath11k_dp_srng_msi_setup(ab, &params, type, ring_num + mac_id);
 272
 273	switch (type) {
 274	case HAL_REO_DST:
 275		params.intr_batch_cntr_thres_entries =
 276					HAL_SRNG_INT_BATCH_THRESHOLD_RX;
 277		params.intr_timer_thres_us = HAL_SRNG_INT_TIMER_THRESHOLD_RX;
 278		break;
 279	case HAL_RXDMA_BUF:
 280	case HAL_RXDMA_MONITOR_BUF:
 281	case HAL_RXDMA_MONITOR_STATUS:
 282		params.low_threshold = num_entries >> 3;
 283		params.flags |= HAL_SRNG_FLAGS_LOW_THRESH_INTR_EN;
 284		params.intr_batch_cntr_thres_entries = 0;
 285		params.intr_timer_thres_us = HAL_SRNG_INT_TIMER_THRESHOLD_RX;
 286		break;
 287	case HAL_WBM2SW_RELEASE:
 288		if (ring_num < 3) {
 289			params.intr_batch_cntr_thres_entries =
 290					HAL_SRNG_INT_BATCH_THRESHOLD_TX;
 291			params.intr_timer_thres_us =
 292					HAL_SRNG_INT_TIMER_THRESHOLD_TX;
 293			break;
 294		}
 295		/* follow through when ring_num >= 3 */
 296		fallthrough;
 297	case HAL_REO_EXCEPTION:
 298	case HAL_REO_REINJECT:
 299	case HAL_REO_CMD:
 300	case HAL_REO_STATUS:
 301	case HAL_TCL_DATA:
 302	case HAL_TCL_CMD:
 303	case HAL_TCL_STATUS:
 304	case HAL_WBM_IDLE_LINK:
 305	case HAL_SW2WBM_RELEASE:
 306	case HAL_RXDMA_DST:
 307	case HAL_RXDMA_MONITOR_DST:
 308	case HAL_RXDMA_MONITOR_DESC:
 309		params.intr_batch_cntr_thres_entries =
 310					HAL_SRNG_INT_BATCH_THRESHOLD_OTHER;
 311		params.intr_timer_thres_us = HAL_SRNG_INT_TIMER_THRESHOLD_OTHER;
 312		break;
 313	case HAL_RXDMA_DIR_BUF:
 314		break;
 315	default:
 316		ath11k_warn(ab, "Not a valid ring type in dp :%d\n", type);
 317		return -EINVAL;
 318	}
 319
 320	if (cached) {
 321		params.flags |= HAL_SRNG_FLAGS_CACHED;
 322		ring->cached = 1;
 323	}
 324
 325	ret = ath11k_hal_srng_setup(ab, type, ring_num, mac_id, &params);
 326	if (ret < 0) {
 327		ath11k_warn(ab, "failed to setup srng: %d ring_id %d\n",
 328			    ret, ring_num);
 329		return ret;
 330	}
 331
 332	ring->ring_id = ret;
 333
 334	return 0;
 335}
 336
 337void ath11k_dp_stop_shadow_timers(struct ath11k_base *ab)
 338{
 339	int i;
 340
 341	if (!ab->hw_params.supports_shadow_regs)
 342		return;
 343
 344	for (i = 0; i < ab->hw_params.max_tx_ring; i++)
 345		ath11k_dp_shadow_stop_timer(ab, &ab->dp.tx_ring_timer[i]);
 346
 347	ath11k_dp_shadow_stop_timer(ab, &ab->dp.reo_cmd_timer);
 348}
 349
 350static void ath11k_dp_srng_common_cleanup(struct ath11k_base *ab)
 351{
 352	struct ath11k_dp *dp = &ab->dp;
 353	int i;
 354
 355	ath11k_dp_stop_shadow_timers(ab);
 356	ath11k_dp_srng_cleanup(ab, &dp->wbm_desc_rel_ring);
 357	ath11k_dp_srng_cleanup(ab, &dp->tcl_cmd_ring);
 358	ath11k_dp_srng_cleanup(ab, &dp->tcl_status_ring);
 359	for (i = 0; i < ab->hw_params.max_tx_ring; i++) {
 360		ath11k_dp_srng_cleanup(ab, &dp->tx_ring[i].tcl_data_ring);
 361		ath11k_dp_srng_cleanup(ab, &dp->tx_ring[i].tcl_comp_ring);
 362	}
 363	ath11k_dp_srng_cleanup(ab, &dp->reo_reinject_ring);
 364	ath11k_dp_srng_cleanup(ab, &dp->rx_rel_ring);
 365	ath11k_dp_srng_cleanup(ab, &dp->reo_except_ring);
 366	ath11k_dp_srng_cleanup(ab, &dp->reo_cmd_ring);
 367	ath11k_dp_srng_cleanup(ab, &dp->reo_status_ring);
 368}
 369
 370static int ath11k_dp_srng_common_setup(struct ath11k_base *ab)
 371{
 372	struct ath11k_dp *dp = &ab->dp;
 373	struct hal_srng *srng;
 374	int i, ret;
 375	u8 tcl_num, wbm_num;
 376
 377	ret = ath11k_dp_srng_setup(ab, &dp->wbm_desc_rel_ring,
 378				   HAL_SW2WBM_RELEASE, 0, 0,
 379				   DP_WBM_RELEASE_RING_SIZE);
 380	if (ret) {
 381		ath11k_warn(ab, "failed to set up wbm2sw_release ring :%d\n",
 382			    ret);
 383		goto err;
 384	}
 385
 386	ret = ath11k_dp_srng_setup(ab, &dp->tcl_cmd_ring, HAL_TCL_CMD, 0, 0,
 387				   DP_TCL_CMD_RING_SIZE);
 388	if (ret) {
 389		ath11k_warn(ab, "failed to set up tcl_cmd ring :%d\n", ret);
 390		goto err;
 391	}
 392
 393	ret = ath11k_dp_srng_setup(ab, &dp->tcl_status_ring, HAL_TCL_STATUS,
 394				   0, 0, DP_TCL_STATUS_RING_SIZE);
 395	if (ret) {
 396		ath11k_warn(ab, "failed to set up tcl_status ring :%d\n", ret);
 397		goto err;
 398	}
 399
 400	for (i = 0; i < ab->hw_params.max_tx_ring; i++) {
 401		tcl_num = ab->hw_params.hal_params->tcl2wbm_rbm_map[i].tcl_ring_num;
 402		wbm_num = ab->hw_params.hal_params->tcl2wbm_rbm_map[i].wbm_ring_num;
 403
 404		ret = ath11k_dp_srng_setup(ab, &dp->tx_ring[i].tcl_data_ring,
 405					   HAL_TCL_DATA, tcl_num, 0,
 406					   ab->hw_params.tx_ring_size);
 407		if (ret) {
 408			ath11k_warn(ab, "failed to set up tcl_data ring (%d) :%d\n",
 409				    i, ret);
 410			goto err;
 411		}
 412
 413		ret = ath11k_dp_srng_setup(ab, &dp->tx_ring[i].tcl_comp_ring,
 414					   HAL_WBM2SW_RELEASE, wbm_num, 0,
 415					   DP_TX_COMP_RING_SIZE);
 416		if (ret) {
 417			ath11k_warn(ab, "failed to set up tcl_comp ring (%d) :%d\n",
 418				    i, ret);
 419			goto err;
 420		}
 421
 422		srng = &ab->hal.srng_list[dp->tx_ring[i].tcl_data_ring.ring_id];
 423		ath11k_hal_tx_init_data_ring(ab, srng);
 424
 425		ath11k_dp_shadow_init_timer(ab, &dp->tx_ring_timer[i],
 426					    ATH11K_SHADOW_DP_TIMER_INTERVAL,
 427					    dp->tx_ring[i].tcl_data_ring.ring_id);
 428	}
 429
 430	ret = ath11k_dp_srng_setup(ab, &dp->reo_reinject_ring, HAL_REO_REINJECT,
 431				   0, 0, DP_REO_REINJECT_RING_SIZE);
 432	if (ret) {
 433		ath11k_warn(ab, "failed to set up reo_reinject ring :%d\n",
 434			    ret);
 435		goto err;
 436	}
 437
 438	ret = ath11k_dp_srng_setup(ab, &dp->rx_rel_ring, HAL_WBM2SW_RELEASE,
 439				   DP_RX_RELEASE_RING_NUM, 0, DP_RX_RELEASE_RING_SIZE);
 440	if (ret) {
 441		ath11k_warn(ab, "failed to set up rx_rel ring :%d\n", ret);
 442		goto err;
 443	}
 444
 445	ret = ath11k_dp_srng_setup(ab, &dp->reo_except_ring, HAL_REO_EXCEPTION,
 446				   0, 0, DP_REO_EXCEPTION_RING_SIZE);
 447	if (ret) {
 448		ath11k_warn(ab, "failed to set up reo_exception ring :%d\n",
 449			    ret);
 450		goto err;
 451	}
 452
 453	ret = ath11k_dp_srng_setup(ab, &dp->reo_cmd_ring, HAL_REO_CMD,
 454				   0, 0, DP_REO_CMD_RING_SIZE);
 455	if (ret) {
 456		ath11k_warn(ab, "failed to set up reo_cmd ring :%d\n", ret);
 457		goto err;
 458	}
 459
 460	srng = &ab->hal.srng_list[dp->reo_cmd_ring.ring_id];
 461	ath11k_hal_reo_init_cmd_ring(ab, srng);
 462
 463	ath11k_dp_shadow_init_timer(ab, &dp->reo_cmd_timer,
 464				    ATH11K_SHADOW_CTRL_TIMER_INTERVAL,
 465				    dp->reo_cmd_ring.ring_id);
 466
 467	ret = ath11k_dp_srng_setup(ab, &dp->reo_status_ring, HAL_REO_STATUS,
 468				   0, 0, DP_REO_STATUS_RING_SIZE);
 469	if (ret) {
 470		ath11k_warn(ab, "failed to set up reo_status ring :%d\n", ret);
 471		goto err;
 472	}
 473
 474	/* When hash based routing of rx packet is enabled, 32 entries to map
 475	 * the hash values to the ring will be configured.
 476	 */
 477	ab->hw_params.hw_ops->reo_setup(ab);
 478
 479	return 0;
 480
 481err:
 482	ath11k_dp_srng_common_cleanup(ab);
 483
 484	return ret;
 485}
 486
 487static void ath11k_dp_scatter_idle_link_desc_cleanup(struct ath11k_base *ab)
 488{
 489	struct ath11k_dp *dp = &ab->dp;
 490	struct hal_wbm_idle_scatter_list *slist = dp->scatter_list;
 491	int i;
 492
 493	for (i = 0; i < DP_IDLE_SCATTER_BUFS_MAX; i++) {
 494		if (!slist[i].vaddr)
 495			continue;
 496
 497		dma_free_coherent(ab->dev, HAL_WBM_IDLE_SCATTER_BUF_SIZE_MAX,
 498				  slist[i].vaddr, slist[i].paddr);
 499		slist[i].vaddr = NULL;
 500	}
 501}
 502
 503static int ath11k_dp_scatter_idle_link_desc_setup(struct ath11k_base *ab,
 504						  int size,
 505						  u32 n_link_desc_bank,
 506						  u32 n_link_desc,
 507						  u32 last_bank_sz)
 508{
 509	struct ath11k_dp *dp = &ab->dp;
 510	struct dp_link_desc_bank *link_desc_banks = dp->link_desc_banks;
 511	struct hal_wbm_idle_scatter_list *slist = dp->scatter_list;
 512	u32 n_entries_per_buf;
 513	int num_scatter_buf, scatter_idx;
 514	struct hal_wbm_link_desc *scatter_buf;
 515	int align_bytes, n_entries;
 516	dma_addr_t paddr;
 517	int rem_entries;
 518	int i;
 519	int ret = 0;
 520	u32 end_offset;
 521
 522	n_entries_per_buf = HAL_WBM_IDLE_SCATTER_BUF_SIZE /
 523		ath11k_hal_srng_get_entrysize(ab, HAL_WBM_IDLE_LINK);
 524	num_scatter_buf = DIV_ROUND_UP(size, HAL_WBM_IDLE_SCATTER_BUF_SIZE);
 525
 526	if (num_scatter_buf > DP_IDLE_SCATTER_BUFS_MAX)
 527		return -EINVAL;
 528
 529	for (i = 0; i < num_scatter_buf; i++) {
 530		slist[i].vaddr = dma_alloc_coherent(ab->dev,
 531						    HAL_WBM_IDLE_SCATTER_BUF_SIZE_MAX,
 532						    &slist[i].paddr, GFP_KERNEL);
 533		if (!slist[i].vaddr) {
 534			ret = -ENOMEM;
 535			goto err;
 536		}
 537	}
 538
 539	scatter_idx = 0;
 540	scatter_buf = slist[scatter_idx].vaddr;
 541	rem_entries = n_entries_per_buf;
 542
 543	for (i = 0; i < n_link_desc_bank; i++) {
 544		align_bytes = link_desc_banks[i].vaddr -
 545			      link_desc_banks[i].vaddr_unaligned;
 546		n_entries = (DP_LINK_DESC_ALLOC_SIZE_THRESH - align_bytes) /
 547			     HAL_LINK_DESC_SIZE;
 548		paddr = link_desc_banks[i].paddr;
 549		while (n_entries) {
 550			ath11k_hal_set_link_desc_addr(scatter_buf, i, paddr);
 551			n_entries--;
 552			paddr += HAL_LINK_DESC_SIZE;
 553			if (rem_entries) {
 554				rem_entries--;
 555				scatter_buf++;
 556				continue;
 557			}
 558
 559			rem_entries = n_entries_per_buf;
 560			scatter_idx++;
 561			scatter_buf = slist[scatter_idx].vaddr;
 562		}
 563	}
 564
 565	end_offset = (scatter_buf - slist[scatter_idx].vaddr) *
 566		     sizeof(struct hal_wbm_link_desc);
 567	ath11k_hal_setup_link_idle_list(ab, slist, num_scatter_buf,
 568					n_link_desc, end_offset);
 569
 570	return 0;
 571
 572err:
 573	ath11k_dp_scatter_idle_link_desc_cleanup(ab);
 574
 575	return ret;
 576}
 577
 578static void
 579ath11k_dp_link_desc_bank_free(struct ath11k_base *ab,
 580			      struct dp_link_desc_bank *link_desc_banks)
 581{
 582	int i;
 583
 584	for (i = 0; i < DP_LINK_DESC_BANKS_MAX; i++) {
 585		if (link_desc_banks[i].vaddr_unaligned) {
 586			dma_free_coherent(ab->dev,
 587					  link_desc_banks[i].size,
 588					  link_desc_banks[i].vaddr_unaligned,
 589					  link_desc_banks[i].paddr_unaligned);
 590			link_desc_banks[i].vaddr_unaligned = NULL;
 591		}
 592	}
 593}
 594
 595static int ath11k_dp_link_desc_bank_alloc(struct ath11k_base *ab,
 596					  struct dp_link_desc_bank *desc_bank,
 597					  int n_link_desc_bank,
 598					  int last_bank_sz)
 599{
 600	struct ath11k_dp *dp = &ab->dp;
 601	int i;
 602	int ret = 0;
 603	int desc_sz = DP_LINK_DESC_ALLOC_SIZE_THRESH;
 604
 605	for (i = 0; i < n_link_desc_bank; i++) {
 606		if (i == (n_link_desc_bank - 1) && last_bank_sz)
 607			desc_sz = last_bank_sz;
 608
 609		desc_bank[i].vaddr_unaligned =
 610					dma_alloc_coherent(ab->dev, desc_sz,
 611							   &desc_bank[i].paddr_unaligned,
 612							   GFP_KERNEL);
 613		if (!desc_bank[i].vaddr_unaligned) {
 614			ret = -ENOMEM;
 615			goto err;
 616		}
 617
 618		desc_bank[i].vaddr = PTR_ALIGN(desc_bank[i].vaddr_unaligned,
 619					       HAL_LINK_DESC_ALIGN);
 620		desc_bank[i].paddr = desc_bank[i].paddr_unaligned +
 621				     ((unsigned long)desc_bank[i].vaddr -
 622				      (unsigned long)desc_bank[i].vaddr_unaligned);
 623		desc_bank[i].size = desc_sz;
 624	}
 625
 626	return 0;
 627
 628err:
 629	ath11k_dp_link_desc_bank_free(ab, dp->link_desc_banks);
 630
 631	return ret;
 632}
 633
 634void ath11k_dp_link_desc_cleanup(struct ath11k_base *ab,
 635				 struct dp_link_desc_bank *desc_bank,
 636				 u32 ring_type, struct dp_srng *ring)
 637{
 638	ath11k_dp_link_desc_bank_free(ab, desc_bank);
 639
 640	if (ring_type != HAL_RXDMA_MONITOR_DESC) {
 641		ath11k_dp_srng_cleanup(ab, ring);
 642		ath11k_dp_scatter_idle_link_desc_cleanup(ab);
 643	}
 644}
 645
 646static int ath11k_wbm_idle_ring_setup(struct ath11k_base *ab, u32 *n_link_desc)
 647{
 648	struct ath11k_dp *dp = &ab->dp;
 649	u32 n_mpdu_link_desc, n_mpdu_queue_desc;
 650	u32 n_tx_msdu_link_desc, n_rx_msdu_link_desc;
 651	int ret = 0;
 652
 653	n_mpdu_link_desc = (DP_NUM_TIDS_MAX * DP_AVG_MPDUS_PER_TID_MAX) /
 654			   HAL_NUM_MPDUS_PER_LINK_DESC;
 655
 656	n_mpdu_queue_desc = n_mpdu_link_desc /
 657			    HAL_NUM_MPDU_LINKS_PER_QUEUE_DESC;
 658
 659	n_tx_msdu_link_desc = (DP_NUM_TIDS_MAX * DP_AVG_FLOWS_PER_TID *
 660			       DP_AVG_MSDUS_PER_FLOW) /
 661			      HAL_NUM_TX_MSDUS_PER_LINK_DESC;
 662
 663	n_rx_msdu_link_desc = (DP_NUM_TIDS_MAX * DP_AVG_MPDUS_PER_TID_MAX *
 664			       DP_AVG_MSDUS_PER_MPDU) /
 665			      HAL_NUM_RX_MSDUS_PER_LINK_DESC;
 666
 667	*n_link_desc = n_mpdu_link_desc + n_mpdu_queue_desc +
 668		      n_tx_msdu_link_desc + n_rx_msdu_link_desc;
 669
 670	if (*n_link_desc & (*n_link_desc - 1))
 671		*n_link_desc = 1 << fls(*n_link_desc);
 672
 673	ret = ath11k_dp_srng_setup(ab, &dp->wbm_idle_ring,
 674				   HAL_WBM_IDLE_LINK, 0, 0, *n_link_desc);
 675	if (ret) {
 676		ath11k_warn(ab, "failed to setup wbm_idle_ring: %d\n", ret);
 677		return ret;
 678	}
 679	return ret;
 680}
 681
 682int ath11k_dp_link_desc_setup(struct ath11k_base *ab,
 683			      struct dp_link_desc_bank *link_desc_banks,
 684			      u32 ring_type, struct hal_srng *srng,
 685			      u32 n_link_desc)
 686{
 687	u32 tot_mem_sz;
 688	u32 n_link_desc_bank, last_bank_sz;
 689	u32 entry_sz, align_bytes, n_entries;
 690	u32 paddr;
 691	u32 *desc;
 692	int i, ret;
 693
 694	tot_mem_sz = n_link_desc * HAL_LINK_DESC_SIZE;
 695	tot_mem_sz += HAL_LINK_DESC_ALIGN;
 696
 697	if (tot_mem_sz <= DP_LINK_DESC_ALLOC_SIZE_THRESH) {
 698		n_link_desc_bank = 1;
 699		last_bank_sz = tot_mem_sz;
 700	} else {
 701		n_link_desc_bank = tot_mem_sz /
 702				   (DP_LINK_DESC_ALLOC_SIZE_THRESH -
 703				    HAL_LINK_DESC_ALIGN);
 704		last_bank_sz = tot_mem_sz %
 705			       (DP_LINK_DESC_ALLOC_SIZE_THRESH -
 706				HAL_LINK_DESC_ALIGN);
 707
 708		if (last_bank_sz)
 709			n_link_desc_bank += 1;
 710	}
 711
 712	if (n_link_desc_bank > DP_LINK_DESC_BANKS_MAX)
 713		return -EINVAL;
 714
 715	ret = ath11k_dp_link_desc_bank_alloc(ab, link_desc_banks,
 716					     n_link_desc_bank, last_bank_sz);
 717	if (ret)
 718		return ret;
 719
 720	/* Setup link desc idle list for HW internal usage */
 721	entry_sz = ath11k_hal_srng_get_entrysize(ab, ring_type);
 722	tot_mem_sz = entry_sz * n_link_desc;
 723
 724	/* Setup scatter desc list when the total memory requirement is more */
 725	if (tot_mem_sz > DP_LINK_DESC_ALLOC_SIZE_THRESH &&
 726	    ring_type != HAL_RXDMA_MONITOR_DESC) {
 727		ret = ath11k_dp_scatter_idle_link_desc_setup(ab, tot_mem_sz,
 728							     n_link_desc_bank,
 729							     n_link_desc,
 730							     last_bank_sz);
 731		if (ret) {
 732			ath11k_warn(ab, "failed to setup scatting idle list descriptor :%d\n",
 733				    ret);
 734			goto fail_desc_bank_free;
 735		}
 736
 737		return 0;
 738	}
 739
 740	spin_lock_bh(&srng->lock);
 741
 742	ath11k_hal_srng_access_begin(ab, srng);
 743
 744	for (i = 0; i < n_link_desc_bank; i++) {
 745		align_bytes = link_desc_banks[i].vaddr -
 746			      link_desc_banks[i].vaddr_unaligned;
 747		n_entries = (link_desc_banks[i].size - align_bytes) /
 748			    HAL_LINK_DESC_SIZE;
 749		paddr = link_desc_banks[i].paddr;
 750		while (n_entries &&
 751		       (desc = ath11k_hal_srng_src_get_next_entry(ab, srng))) {
 752			ath11k_hal_set_link_desc_addr((struct hal_wbm_link_desc *)desc,
 753						      i, paddr);
 754			n_entries--;
 755			paddr += HAL_LINK_DESC_SIZE;
 756		}
 757	}
 758
 759	ath11k_hal_srng_access_end(ab, srng);
 760
 761	spin_unlock_bh(&srng->lock);
 762
 763	return 0;
 764
 765fail_desc_bank_free:
 766	ath11k_dp_link_desc_bank_free(ab, link_desc_banks);
 767
 768	return ret;
 769}
 770
 771int ath11k_dp_service_srng(struct ath11k_base *ab,
 772			   struct ath11k_ext_irq_grp *irq_grp,
 773			   int budget)
 774{
 775	struct napi_struct *napi = &irq_grp->napi;
 776	const struct ath11k_hw_hal_params *hal_params;
 777	int grp_id = irq_grp->grp_id;
 778	int work_done = 0;
 779	int i, j;
 780	int tot_work_done = 0;
 781
 782	for (i = 0; i < ab->hw_params.max_tx_ring; i++) {
 783		if (BIT(ab->hw_params.hal_params->tcl2wbm_rbm_map[i].wbm_ring_num) &
 784		    ab->hw_params.ring_mask->tx[grp_id])
 785			ath11k_dp_tx_completion_handler(ab, i);
 786	}
 787
 788	if (ab->hw_params.ring_mask->rx_err[grp_id]) {
 789		work_done = ath11k_dp_process_rx_err(ab, napi, budget);
 790		budget -= work_done;
 791		tot_work_done += work_done;
 792		if (budget <= 0)
 793			goto done;
 794	}
 795
 796	if (ab->hw_params.ring_mask->rx_wbm_rel[grp_id]) {
 797		work_done = ath11k_dp_rx_process_wbm_err(ab,
 798							 napi,
 799							 budget);
 800		budget -= work_done;
 801		tot_work_done += work_done;
 802
 803		if (budget <= 0)
 804			goto done;
 805	}
 806
 807	if (ab->hw_params.ring_mask->rx[grp_id]) {
 808		i =  fls(ab->hw_params.ring_mask->rx[grp_id]) - 1;
 809		work_done = ath11k_dp_process_rx(ab, i, napi,
 810						 budget);
 811		budget -= work_done;
 812		tot_work_done += work_done;
 813		if (budget <= 0)
 814			goto done;
 815	}
 816
 817	if (ab->hw_params.ring_mask->rx_mon_status[grp_id]) {
 818		for (i = 0; i < ab->num_radios; i++) {
 819			for (j = 0; j < ab->hw_params.num_rxmda_per_pdev; j++) {
 820				int id = i * ab->hw_params.num_rxmda_per_pdev + j;
 821
 822				if (ab->hw_params.ring_mask->rx_mon_status[grp_id] &
 823					BIT(id)) {
 824					work_done =
 825					ath11k_dp_rx_process_mon_rings(ab,
 826								       id,
 827								       napi, budget);
 828					budget -= work_done;
 829					tot_work_done += work_done;
 830
 831					if (budget <= 0)
 832						goto done;
 833				}
 834			}
 835		}
 836	}
 837
 838	if (ab->hw_params.ring_mask->reo_status[grp_id])
 839		ath11k_dp_process_reo_status(ab);
 840
 841	for (i = 0; i < ab->num_radios; i++) {
 842		for (j = 0; j < ab->hw_params.num_rxmda_per_pdev; j++) {
 843			int id = i * ab->hw_params.num_rxmda_per_pdev + j;
 844
 845			if (ab->hw_params.ring_mask->rxdma2host[grp_id] & BIT(id)) {
 846				work_done = ath11k_dp_process_rxdma_err(ab, id, budget);
 847				budget -= work_done;
 848				tot_work_done += work_done;
 849			}
 850
 851			if (budget <= 0)
 852				goto done;
 853
 854			if (ab->hw_params.ring_mask->host2rxdma[grp_id] & BIT(id)) {
 855				struct ath11k *ar = ath11k_ab_to_ar(ab, id);
 856				struct ath11k_pdev_dp *dp = &ar->dp;
 857				struct dp_rxdma_ring *rx_ring = &dp->rx_refill_buf_ring;
 858
 859				hal_params = ab->hw_params.hal_params;
 860				ath11k_dp_rxbufs_replenish(ab, id, rx_ring, 0,
 861							   hal_params->rx_buf_rbm);
 862			}
 863		}
 864	}
 865	/* TODO: Implement handler for other interrupts */
 866
 867done:
 868	return tot_work_done;
 869}
 870EXPORT_SYMBOL(ath11k_dp_service_srng);
 871
 872void ath11k_dp_pdev_free(struct ath11k_base *ab)
 873{
 874	struct ath11k *ar;
 875	int i;
 876
 877	del_timer_sync(&ab->mon_reap_timer);
 878
 879	for (i = 0; i < ab->num_radios; i++) {
 880		ar = ab->pdevs[i].ar;
 881		ath11k_dp_rx_pdev_free(ab, i);
 882		ath11k_debugfs_unregister(ar);
 883		ath11k_dp_rx_pdev_mon_detach(ar);
 884	}
 885}
 886
 887void ath11k_dp_pdev_pre_alloc(struct ath11k_base *ab)
 888{
 889	struct ath11k *ar;
 890	struct ath11k_pdev_dp *dp;
 891	int i;
 892	int j;
 893
 894	for (i = 0; i <  ab->num_radios; i++) {
 895		ar = ab->pdevs[i].ar;
 896		dp = &ar->dp;
 897		dp->mac_id = i;
 898		idr_init(&dp->rx_refill_buf_ring.bufs_idr);
 899		spin_lock_init(&dp->rx_refill_buf_ring.idr_lock);
 900		atomic_set(&dp->num_tx_pending, 0);
 901		init_waitqueue_head(&dp->tx_empty_waitq);
 902		for (j = 0; j < ab->hw_params.num_rxmda_per_pdev; j++) {
 903			idr_init(&dp->rx_mon_status_refill_ring[j].bufs_idr);
 904			spin_lock_init(&dp->rx_mon_status_refill_ring[j].idr_lock);
 905		}
 906		idr_init(&dp->rxdma_mon_buf_ring.bufs_idr);
 907		spin_lock_init(&dp->rxdma_mon_buf_ring.idr_lock);
 908	}
 909}
 910
 911int ath11k_dp_pdev_alloc(struct ath11k_base *ab)
 912{
 913	struct ath11k *ar;
 914	int ret;
 915	int i;
 916
 917	/* TODO:Per-pdev rx ring unlike tx ring which is mapped to different AC's */
 918	for (i = 0; i < ab->num_radios; i++) {
 919		ar = ab->pdevs[i].ar;
 920		ret = ath11k_dp_rx_pdev_alloc(ab, i);
 921		if (ret) {
 922			ath11k_warn(ab, "failed to allocate pdev rx for pdev_id :%d\n",
 923				    i);
 924			goto err;
 925		}
 926		ret = ath11k_dp_rx_pdev_mon_attach(ar);
 927		if (ret) {
 928			ath11k_warn(ab, "failed to initialize mon pdev %d\n",
 929				    i);
 930			goto err;
 931		}
 932	}
 933
 934	return 0;
 935
 936err:
 937	ath11k_dp_pdev_free(ab);
 938
 939	return ret;
 940}
 941
 942int ath11k_dp_htt_connect(struct ath11k_dp *dp)
 943{
 944	struct ath11k_htc_svc_conn_req conn_req;
 945	struct ath11k_htc_svc_conn_resp conn_resp;
 946	int status;
 947
 948	memset(&conn_req, 0, sizeof(conn_req));
 949	memset(&conn_resp, 0, sizeof(conn_resp));
 950
 951	conn_req.ep_ops.ep_tx_complete = ath11k_dp_htt_htc_tx_complete;
 952	conn_req.ep_ops.ep_rx_complete = ath11k_dp_htt_htc_t2h_msg_handler;
 953
 954	/* connect to control service */
 955	conn_req.service_id = ATH11K_HTC_SVC_ID_HTT_DATA_MSG;
 956
 957	status = ath11k_htc_connect_service(&dp->ab->htc, &conn_req,
 958					    &conn_resp);
 959
 960	if (status)
 961		return status;
 962
 963	dp->eid = conn_resp.eid;
 964
 965	return 0;
 966}
 967
 968static void ath11k_dp_update_vdev_search(struct ath11k_vif *arvif)
 969{
 970	 /* When v2_map_support is true:for STA mode, enable address
 971	  * search index, tcl uses ast_hash value in the descriptor.
 972	  * When v2_map_support is false: for STA mode, don't enable
 973	  * address search index.
 974	  */
 975	switch (arvif->vdev_type) {
 976	case WMI_VDEV_TYPE_STA:
 977		if (arvif->ar->ab->hw_params.htt_peer_map_v2) {
 978			arvif->hal_addr_search_flags = HAL_TX_ADDRX_EN;
 979			arvif->search_type = HAL_TX_ADDR_SEARCH_INDEX;
 980		} else {
 981			arvif->hal_addr_search_flags = HAL_TX_ADDRY_EN;
 982			arvif->search_type = HAL_TX_ADDR_SEARCH_DEFAULT;
 983		}
 984		break;
 985	case WMI_VDEV_TYPE_AP:
 986	case WMI_VDEV_TYPE_IBSS:
 987		arvif->hal_addr_search_flags = HAL_TX_ADDRX_EN;
 988		arvif->search_type = HAL_TX_ADDR_SEARCH_DEFAULT;
 989		break;
 990	case WMI_VDEV_TYPE_MONITOR:
 991	default:
 992		return;
 993	}
 994}
 995
 996void ath11k_dp_vdev_tx_attach(struct ath11k *ar, struct ath11k_vif *arvif)
 997{
 998	arvif->tcl_metadata |= FIELD_PREP(HTT_TCL_META_DATA_TYPE, 1) |
 999			       FIELD_PREP(HTT_TCL_META_DATA_VDEV_ID,
1000					  arvif->vdev_id) |
1001			       FIELD_PREP(HTT_TCL_META_DATA_PDEV_ID,
1002					  ar->pdev->pdev_id);
1003
1004	/* set HTT extension valid bit to 0 by default */
1005	arvif->tcl_metadata &= ~HTT_TCL_META_DATA_VALID_HTT;
1006
1007	ath11k_dp_update_vdev_search(arvif);
1008}
1009
1010static int ath11k_dp_tx_pending_cleanup(int buf_id, void *skb, void *ctx)
1011{
1012	struct ath11k_base *ab = ctx;
1013	struct sk_buff *msdu = skb;
1014
1015	dma_unmap_single(ab->dev, ATH11K_SKB_CB(msdu)->paddr, msdu->len,
1016			 DMA_TO_DEVICE);
1017
1018	dev_kfree_skb_any(msdu);
1019
1020	return 0;
1021}
1022
1023void ath11k_dp_free(struct ath11k_base *ab)
1024{
1025	struct ath11k_dp *dp = &ab->dp;
1026	int i;
1027
1028	ath11k_dp_link_desc_cleanup(ab, dp->link_desc_banks,
1029				    HAL_WBM_IDLE_LINK, &dp->wbm_idle_ring);
1030
1031	ath11k_dp_srng_common_cleanup(ab);
1032
1033	ath11k_dp_reo_cmd_list_cleanup(ab);
1034
1035	for (i = 0; i < ab->hw_params.max_tx_ring; i++) {
1036		spin_lock_bh(&dp->tx_ring[i].tx_idr_lock);
1037		idr_for_each(&dp->tx_ring[i].txbuf_idr,
1038			     ath11k_dp_tx_pending_cleanup, ab);
1039		idr_destroy(&dp->tx_ring[i].txbuf_idr);
1040		spin_unlock_bh(&dp->tx_ring[i].tx_idr_lock);
1041		kfree(dp->tx_ring[i].tx_status);
1042	}
1043
1044	/* Deinit any SOC level resource */
1045}
1046
1047int ath11k_dp_alloc(struct ath11k_base *ab)
1048{
1049	struct ath11k_dp *dp = &ab->dp;
1050	struct hal_srng *srng = NULL;
1051	size_t size = 0;
1052	u32 n_link_desc = 0;
1053	int ret;
1054	int i;
1055
1056	dp->ab = ab;
1057
1058	INIT_LIST_HEAD(&dp->reo_cmd_list);
1059	INIT_LIST_HEAD(&dp->reo_cmd_cache_flush_list);
1060	INIT_LIST_HEAD(&dp->dp_full_mon_mpdu_list);
1061	spin_lock_init(&dp->reo_cmd_lock);
1062
1063	dp->reo_cmd_cache_flush_count = 0;
1064
1065	ret = ath11k_wbm_idle_ring_setup(ab, &n_link_desc);
1066	if (ret) {
1067		ath11k_warn(ab, "failed to setup wbm_idle_ring: %d\n", ret);
1068		return ret;
1069	}
1070
1071	srng = &ab->hal.srng_list[dp->wbm_idle_ring.ring_id];
1072
1073	ret = ath11k_dp_link_desc_setup(ab, dp->link_desc_banks,
1074					HAL_WBM_IDLE_LINK, srng, n_link_desc);
1075	if (ret) {
1076		ath11k_warn(ab, "failed to setup link desc: %d\n", ret);
1077		return ret;
1078	}
1079
1080	ret = ath11k_dp_srng_common_setup(ab);
1081	if (ret)
1082		goto fail_link_desc_cleanup;
1083
1084	size = sizeof(struct hal_wbm_release_ring) * DP_TX_COMP_RING_SIZE;
1085
1086	for (i = 0; i < ab->hw_params.max_tx_ring; i++) {
1087		idr_init(&dp->tx_ring[i].txbuf_idr);
1088		spin_lock_init(&dp->tx_ring[i].tx_idr_lock);
1089		dp->tx_ring[i].tcl_data_ring_id = i;
1090
1091		dp->tx_ring[i].tx_status_head = 0;
1092		dp->tx_ring[i].tx_status_tail = DP_TX_COMP_RING_SIZE - 1;
1093		dp->tx_ring[i].tx_status = kmalloc(size, GFP_KERNEL);
1094		if (!dp->tx_ring[i].tx_status) {
1095			ret = -ENOMEM;
1096			goto fail_cmn_srng_cleanup;
1097		}
1098	}
1099
1100	for (i = 0; i < HAL_DSCP_TID_MAP_TBL_NUM_ENTRIES_MAX; i++)
1101		ath11k_hal_tx_set_dscp_tid_map(ab, i);
1102
1103	/* Init any SOC level resource for DP */
1104
1105	return 0;
1106
1107fail_cmn_srng_cleanup:
1108	ath11k_dp_srng_common_cleanup(ab);
1109
1110fail_link_desc_cleanup:
1111	ath11k_dp_link_desc_cleanup(ab, dp->link_desc_banks,
1112				    HAL_WBM_IDLE_LINK, &dp->wbm_idle_ring);
1113
1114	return ret;
1115}
1116
1117static void ath11k_dp_shadow_timer_handler(struct timer_list *t)
1118{
1119	struct ath11k_hp_update_timer *update_timer = from_timer(update_timer,
1120								 t, timer);
1121	struct ath11k_base *ab = update_timer->ab;
1122	struct hal_srng	*srng = &ab->hal.srng_list[update_timer->ring_id];
1123
1124	spin_lock_bh(&srng->lock);
1125
1126	/* when the timer is fired, the handler checks whether there
1127	 * are new TX happened. The handler updates HP only when there
1128	 * are no TX operations during the timeout interval, and stop
1129	 * the timer. Timer will be started again when TX happens again.
1130	 */
1131	if (update_timer->timer_tx_num != update_timer->tx_num) {
1132		update_timer->timer_tx_num = update_timer->tx_num;
1133		mod_timer(&update_timer->timer, jiffies +
1134		  msecs_to_jiffies(update_timer->interval));
1135	} else {
1136		update_timer->started = false;
1137		ath11k_hal_srng_shadow_update_hp_tp(ab, srng);
1138	}
1139
1140	spin_unlock_bh(&srng->lock);
1141}
1142
1143void ath11k_dp_shadow_start_timer(struct ath11k_base *ab,
1144				  struct hal_srng *srng,
1145				  struct ath11k_hp_update_timer *update_timer)
1146{
1147	lockdep_assert_held(&srng->lock);
1148
1149	if (!ab->hw_params.supports_shadow_regs)
1150		return;
1151
1152	update_timer->tx_num++;
1153
1154	if (update_timer->started)
1155		return;
1156
1157	update_timer->started = true;
1158	update_timer->timer_tx_num = update_timer->tx_num;
1159	mod_timer(&update_timer->timer, jiffies +
1160		  msecs_to_jiffies(update_timer->interval));
1161}
1162
1163void ath11k_dp_shadow_stop_timer(struct ath11k_base *ab,
1164				 struct ath11k_hp_update_timer *update_timer)
1165{
1166	if (!ab->hw_params.supports_shadow_regs)
1167		return;
1168
1169	if (!update_timer->init)
1170		return;
1171
1172	del_timer_sync(&update_timer->timer);
1173}
1174
1175void ath11k_dp_shadow_init_timer(struct ath11k_base *ab,
1176				 struct ath11k_hp_update_timer *update_timer,
1177				 u32 interval, u32 ring_id)
1178{
1179	if (!ab->hw_params.supports_shadow_regs)
1180		return;
1181
1182	update_timer->tx_num = 0;
1183	update_timer->timer_tx_num = 0;
1184	update_timer->ab = ab;
1185	update_timer->ring_id = ring_id;
1186	update_timer->interval = interval;
1187	update_timer->init = true;
1188	timer_setup(&update_timer->timer,
1189		    ath11k_dp_shadow_timer_handler, 0);
1190}
v6.2
   1// SPDX-License-Identifier: BSD-3-Clause-Clear
   2/*
   3 * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
   4 * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
   5 */
   6
   7#include <crypto/hash.h>
   8#include "core.h"
   9#include "dp_tx.h"
  10#include "hal_tx.h"
  11#include "hif.h"
  12#include "debug.h"
  13#include "dp_rx.h"
  14#include "peer.h"
  15
  16static void ath11k_dp_htt_htc_tx_complete(struct ath11k_base *ab,
  17					  struct sk_buff *skb)
  18{
  19	dev_kfree_skb_any(skb);
  20}
  21
  22void ath11k_dp_peer_cleanup(struct ath11k *ar, int vdev_id, const u8 *addr)
  23{
  24	struct ath11k_base *ab = ar->ab;
  25	struct ath11k_peer *peer;
  26
  27	/* TODO: Any other peer specific DP cleanup */
  28
  29	spin_lock_bh(&ab->base_lock);
  30	peer = ath11k_peer_find(ab, vdev_id, addr);
  31	if (!peer) {
  32		ath11k_warn(ab, "failed to lookup peer %pM on vdev %d\n",
  33			    addr, vdev_id);
  34		spin_unlock_bh(&ab->base_lock);
  35		return;
  36	}
  37
  38	ath11k_peer_rx_tid_cleanup(ar, peer);
 
  39	crypto_free_shash(peer->tfm_mmic);
  40	spin_unlock_bh(&ab->base_lock);
  41}
  42
  43int ath11k_dp_peer_setup(struct ath11k *ar, int vdev_id, const u8 *addr)
  44{
  45	struct ath11k_base *ab = ar->ab;
  46	struct ath11k_peer *peer;
  47	u32 reo_dest;
  48	int ret = 0, tid;
  49
  50	/* NOTE: reo_dest ring id starts from 1 unlike mac_id which starts from 0 */
  51	reo_dest = ar->dp.mac_id + 1;
  52	ret = ath11k_wmi_set_peer_param(ar, addr, vdev_id,
  53					WMI_PEER_SET_DEFAULT_ROUTING,
  54					DP_RX_HASH_ENABLE | (reo_dest << 1));
  55
  56	if (ret) {
  57		ath11k_warn(ab, "failed to set default routing %d peer :%pM vdev_id :%d\n",
  58			    ret, addr, vdev_id);
  59		return ret;
  60	}
  61
  62	for (tid = 0; tid <= IEEE80211_NUM_TIDS; tid++) {
  63		ret = ath11k_peer_rx_tid_setup(ar, addr, vdev_id, tid, 1, 0,
  64					       HAL_PN_TYPE_NONE);
  65		if (ret) {
  66			ath11k_warn(ab, "failed to setup rxd tid queue for tid %d: %d\n",
  67				    tid, ret);
  68			goto peer_clean;
  69		}
  70	}
  71
  72	ret = ath11k_peer_rx_frag_setup(ar, addr, vdev_id);
  73	if (ret) {
  74		ath11k_warn(ab, "failed to setup rx defrag context\n");
  75		return ret;
 
  76	}
  77
  78	/* TODO: Setup other peer specific resource used in data path */
  79
  80	return 0;
  81
  82peer_clean:
  83	spin_lock_bh(&ab->base_lock);
  84
  85	peer = ath11k_peer_find(ab, vdev_id, addr);
  86	if (!peer) {
  87		ath11k_warn(ab, "failed to find the peer to del rx tid\n");
  88		spin_unlock_bh(&ab->base_lock);
  89		return -ENOENT;
  90	}
  91
  92	for (; tid >= 0; tid--)
  93		ath11k_peer_rx_tid_delete(ar, peer, tid);
  94
  95	spin_unlock_bh(&ab->base_lock);
  96
  97	return ret;
  98}
  99
 100void ath11k_dp_srng_cleanup(struct ath11k_base *ab, struct dp_srng *ring)
 101{
 102	if (!ring->vaddr_unaligned)
 103		return;
 104
 105	if (ring->cached)
 106		kfree(ring->vaddr_unaligned);
 107	else
 108		dma_free_coherent(ab->dev, ring->size, ring->vaddr_unaligned,
 109				  ring->paddr_unaligned);
 110
 111	ring->vaddr_unaligned = NULL;
 112}
 113
 114static int ath11k_dp_srng_find_ring_in_mask(int ring_num, const u8 *grp_mask)
 115{
 116	int ext_group_num;
 117	u8 mask = 1 << ring_num;
 118
 119	for (ext_group_num = 0; ext_group_num < ATH11K_EXT_IRQ_GRP_NUM_MAX;
 120	     ext_group_num++) {
 121		if (mask & grp_mask[ext_group_num])
 122			return ext_group_num;
 123	}
 124
 125	return -ENOENT;
 126}
 127
 128static int ath11k_dp_srng_calculate_msi_group(struct ath11k_base *ab,
 129					      enum hal_ring_type type, int ring_num)
 130{
 131	const u8 *grp_mask;
 132
 133	switch (type) {
 134	case HAL_WBM2SW_RELEASE:
 135		if (ring_num == DP_RX_RELEASE_RING_NUM) {
 136			grp_mask = &ab->hw_params.ring_mask->rx_wbm_rel[0];
 137			ring_num = 0;
 138		} else {
 139			grp_mask = &ab->hw_params.ring_mask->tx[0];
 140		}
 141		break;
 142	case HAL_REO_EXCEPTION:
 143		grp_mask = &ab->hw_params.ring_mask->rx_err[0];
 144		break;
 145	case HAL_REO_DST:
 146		grp_mask = &ab->hw_params.ring_mask->rx[0];
 147		break;
 148	case HAL_REO_STATUS:
 149		grp_mask = &ab->hw_params.ring_mask->reo_status[0];
 150		break;
 151	case HAL_RXDMA_MONITOR_STATUS:
 152	case HAL_RXDMA_MONITOR_DST:
 153		grp_mask = &ab->hw_params.ring_mask->rx_mon_status[0];
 154		break;
 155	case HAL_RXDMA_DST:
 156		grp_mask = &ab->hw_params.ring_mask->rxdma2host[0];
 157		break;
 158	case HAL_RXDMA_BUF:
 159		grp_mask = &ab->hw_params.ring_mask->host2rxdma[0];
 160		break;
 161	case HAL_RXDMA_MONITOR_BUF:
 162	case HAL_TCL_DATA:
 163	case HAL_TCL_CMD:
 164	case HAL_REO_CMD:
 165	case HAL_SW2WBM_RELEASE:
 166	case HAL_WBM_IDLE_LINK:
 167	case HAL_TCL_STATUS:
 168	case HAL_REO_REINJECT:
 169	case HAL_CE_SRC:
 170	case HAL_CE_DST:
 171	case HAL_CE_DST_STATUS:
 172	default:
 173		return -ENOENT;
 174	}
 175
 176	return ath11k_dp_srng_find_ring_in_mask(ring_num, grp_mask);
 177}
 178
 179static void ath11k_dp_srng_msi_setup(struct ath11k_base *ab,
 180				     struct hal_srng_params *ring_params,
 181				     enum hal_ring_type type, int ring_num)
 182{
 183	int msi_group_number, msi_data_count;
 184	u32 msi_data_start, msi_irq_start, addr_lo, addr_hi;
 185	int ret;
 186
 187	ret = ath11k_get_user_msi_vector(ab, "DP",
 188					 &msi_data_count, &msi_data_start,
 189					 &msi_irq_start);
 190	if (ret)
 191		return;
 192
 193	msi_group_number = ath11k_dp_srng_calculate_msi_group(ab, type,
 194							      ring_num);
 195	if (msi_group_number < 0) {
 196		ath11k_dbg(ab, ATH11K_DBG_PCI,
 197			   "ring not part of an ext_group; ring_type: %d,ring_num %d",
 198			   type, ring_num);
 199		ring_params->msi_addr = 0;
 200		ring_params->msi_data = 0;
 201		return;
 202	}
 203
 204	if (msi_group_number > msi_data_count) {
 205		ath11k_dbg(ab, ATH11K_DBG_PCI,
 206			   "multiple msi_groups share one msi, msi_group_num %d",
 207			   msi_group_number);
 208	}
 209
 210	ath11k_get_msi_address(ab, &addr_lo, &addr_hi);
 211
 212	ring_params->msi_addr = addr_lo;
 213	ring_params->msi_addr |= (dma_addr_t)(((uint64_t)addr_hi) << 32);
 214	ring_params->msi_data = (msi_group_number % msi_data_count)
 215		+ msi_data_start;
 216	ring_params->flags |= HAL_SRNG_FLAGS_MSI_INTR;
 217}
 218
 219int ath11k_dp_srng_setup(struct ath11k_base *ab, struct dp_srng *ring,
 220			 enum hal_ring_type type, int ring_num,
 221			 int mac_id, int num_entries)
 222{
 223	struct hal_srng_params params = { 0 };
 224	int entry_sz = ath11k_hal_srng_get_entrysize(ab, type);
 225	int max_entries = ath11k_hal_srng_get_max_entries(ab, type);
 226	int ret;
 227	bool cached = false;
 228
 229	if (max_entries < 0 || entry_sz < 0)
 230		return -EINVAL;
 231
 232	if (num_entries > max_entries)
 233		num_entries = max_entries;
 234
 235	ring->size = (num_entries * entry_sz) + HAL_RING_BASE_ALIGN - 1;
 236
 237	if (ab->hw_params.alloc_cacheable_memory) {
 238		/* Allocate the reo dst and tx completion rings from cacheable memory */
 239		switch (type) {
 240		case HAL_REO_DST:
 241		case HAL_WBM2SW_RELEASE:
 242			cached = true;
 243			break;
 244		default:
 245			cached = false;
 246		}
 247
 248		if (cached) {
 249			ring->vaddr_unaligned = kzalloc(ring->size, GFP_KERNEL);
 250			ring->paddr_unaligned = virt_to_phys(ring->vaddr_unaligned);
 251		}
 252	}
 253
 254	if (!cached)
 255		ring->vaddr_unaligned = dma_alloc_coherent(ab->dev, ring->size,
 256							   &ring->paddr_unaligned,
 257							   GFP_KERNEL);
 258
 259	if (!ring->vaddr_unaligned)
 260		return -ENOMEM;
 261
 262	ring->vaddr = PTR_ALIGN(ring->vaddr_unaligned, HAL_RING_BASE_ALIGN);
 263	ring->paddr = ring->paddr_unaligned + ((unsigned long)ring->vaddr -
 264		      (unsigned long)ring->vaddr_unaligned);
 265
 266	params.ring_base_vaddr = ring->vaddr;
 267	params.ring_base_paddr = ring->paddr;
 268	params.num_entries = num_entries;
 269	ath11k_dp_srng_msi_setup(ab, &params, type, ring_num + mac_id);
 270
 271	switch (type) {
 272	case HAL_REO_DST:
 273		params.intr_batch_cntr_thres_entries =
 274					HAL_SRNG_INT_BATCH_THRESHOLD_RX;
 275		params.intr_timer_thres_us = HAL_SRNG_INT_TIMER_THRESHOLD_RX;
 276		break;
 277	case HAL_RXDMA_BUF:
 278	case HAL_RXDMA_MONITOR_BUF:
 279	case HAL_RXDMA_MONITOR_STATUS:
 280		params.low_threshold = num_entries >> 3;
 281		params.flags |= HAL_SRNG_FLAGS_LOW_THRESH_INTR_EN;
 282		params.intr_batch_cntr_thres_entries = 0;
 283		params.intr_timer_thres_us = HAL_SRNG_INT_TIMER_THRESHOLD_RX;
 284		break;
 285	case HAL_WBM2SW_RELEASE:
 286		if (ring_num < 3) {
 287			params.intr_batch_cntr_thres_entries =
 288					HAL_SRNG_INT_BATCH_THRESHOLD_TX;
 289			params.intr_timer_thres_us =
 290					HAL_SRNG_INT_TIMER_THRESHOLD_TX;
 291			break;
 292		}
 293		/* follow through when ring_num >= 3 */
 294		fallthrough;
 295	case HAL_REO_EXCEPTION:
 296	case HAL_REO_REINJECT:
 297	case HAL_REO_CMD:
 298	case HAL_REO_STATUS:
 299	case HAL_TCL_DATA:
 300	case HAL_TCL_CMD:
 301	case HAL_TCL_STATUS:
 302	case HAL_WBM_IDLE_LINK:
 303	case HAL_SW2WBM_RELEASE:
 304	case HAL_RXDMA_DST:
 305	case HAL_RXDMA_MONITOR_DST:
 306	case HAL_RXDMA_MONITOR_DESC:
 307		params.intr_batch_cntr_thres_entries =
 308					HAL_SRNG_INT_BATCH_THRESHOLD_OTHER;
 309		params.intr_timer_thres_us = HAL_SRNG_INT_TIMER_THRESHOLD_OTHER;
 310		break;
 311	case HAL_RXDMA_DIR_BUF:
 312		break;
 313	default:
 314		ath11k_warn(ab, "Not a valid ring type in dp :%d\n", type);
 315		return -EINVAL;
 316	}
 317
 318	if (cached) {
 319		params.flags |= HAL_SRNG_FLAGS_CACHED;
 320		ring->cached = 1;
 321	}
 322
 323	ret = ath11k_hal_srng_setup(ab, type, ring_num, mac_id, &params);
 324	if (ret < 0) {
 325		ath11k_warn(ab, "failed to setup srng: %d ring_id %d\n",
 326			    ret, ring_num);
 327		return ret;
 328	}
 329
 330	ring->ring_id = ret;
 331
 332	return 0;
 333}
 334
 335void ath11k_dp_stop_shadow_timers(struct ath11k_base *ab)
 336{
 337	int i;
 338
 339	if (!ab->hw_params.supports_shadow_regs)
 340		return;
 341
 342	for (i = 0; i < ab->hw_params.max_tx_ring; i++)
 343		ath11k_dp_shadow_stop_timer(ab, &ab->dp.tx_ring_timer[i]);
 344
 345	ath11k_dp_shadow_stop_timer(ab, &ab->dp.reo_cmd_timer);
 346}
 347
 348static void ath11k_dp_srng_common_cleanup(struct ath11k_base *ab)
 349{
 350	struct ath11k_dp *dp = &ab->dp;
 351	int i;
 352
 353	ath11k_dp_stop_shadow_timers(ab);
 354	ath11k_dp_srng_cleanup(ab, &dp->wbm_desc_rel_ring);
 355	ath11k_dp_srng_cleanup(ab, &dp->tcl_cmd_ring);
 356	ath11k_dp_srng_cleanup(ab, &dp->tcl_status_ring);
 357	for (i = 0; i < ab->hw_params.max_tx_ring; i++) {
 358		ath11k_dp_srng_cleanup(ab, &dp->tx_ring[i].tcl_data_ring);
 359		ath11k_dp_srng_cleanup(ab, &dp->tx_ring[i].tcl_comp_ring);
 360	}
 361	ath11k_dp_srng_cleanup(ab, &dp->reo_reinject_ring);
 362	ath11k_dp_srng_cleanup(ab, &dp->rx_rel_ring);
 363	ath11k_dp_srng_cleanup(ab, &dp->reo_except_ring);
 364	ath11k_dp_srng_cleanup(ab, &dp->reo_cmd_ring);
 365	ath11k_dp_srng_cleanup(ab, &dp->reo_status_ring);
 366}
 367
 368static int ath11k_dp_srng_common_setup(struct ath11k_base *ab)
 369{
 370	struct ath11k_dp *dp = &ab->dp;
 371	struct hal_srng *srng;
 372	int i, ret;
 373	u8 tcl_num, wbm_num;
 374
 375	ret = ath11k_dp_srng_setup(ab, &dp->wbm_desc_rel_ring,
 376				   HAL_SW2WBM_RELEASE, 0, 0,
 377				   DP_WBM_RELEASE_RING_SIZE);
 378	if (ret) {
 379		ath11k_warn(ab, "failed to set up wbm2sw_release ring :%d\n",
 380			    ret);
 381		goto err;
 382	}
 383
 384	ret = ath11k_dp_srng_setup(ab, &dp->tcl_cmd_ring, HAL_TCL_CMD, 0, 0,
 385				   DP_TCL_CMD_RING_SIZE);
 386	if (ret) {
 387		ath11k_warn(ab, "failed to set up tcl_cmd ring :%d\n", ret);
 388		goto err;
 389	}
 390
 391	ret = ath11k_dp_srng_setup(ab, &dp->tcl_status_ring, HAL_TCL_STATUS,
 392				   0, 0, DP_TCL_STATUS_RING_SIZE);
 393	if (ret) {
 394		ath11k_warn(ab, "failed to set up tcl_status ring :%d\n", ret);
 395		goto err;
 396	}
 397
 398	for (i = 0; i < ab->hw_params.max_tx_ring; i++) {
 399		tcl_num = ab->hw_params.hal_params->tcl2wbm_rbm_map[i].tcl_ring_num;
 400		wbm_num = ab->hw_params.hal_params->tcl2wbm_rbm_map[i].wbm_ring_num;
 401
 402		ret = ath11k_dp_srng_setup(ab, &dp->tx_ring[i].tcl_data_ring,
 403					   HAL_TCL_DATA, tcl_num, 0,
 404					   ab->hw_params.tx_ring_size);
 405		if (ret) {
 406			ath11k_warn(ab, "failed to set up tcl_data ring (%d) :%d\n",
 407				    i, ret);
 408			goto err;
 409		}
 410
 411		ret = ath11k_dp_srng_setup(ab, &dp->tx_ring[i].tcl_comp_ring,
 412					   HAL_WBM2SW_RELEASE, wbm_num, 0,
 413					   DP_TX_COMP_RING_SIZE);
 414		if (ret) {
 415			ath11k_warn(ab, "failed to set up tcl_comp ring (%d) :%d\n",
 416				    i, ret);
 417			goto err;
 418		}
 419
 420		srng = &ab->hal.srng_list[dp->tx_ring[i].tcl_data_ring.ring_id];
 421		ath11k_hal_tx_init_data_ring(ab, srng);
 422
 423		ath11k_dp_shadow_init_timer(ab, &dp->tx_ring_timer[i],
 424					    ATH11K_SHADOW_DP_TIMER_INTERVAL,
 425					    dp->tx_ring[i].tcl_data_ring.ring_id);
 426	}
 427
 428	ret = ath11k_dp_srng_setup(ab, &dp->reo_reinject_ring, HAL_REO_REINJECT,
 429				   0, 0, DP_REO_REINJECT_RING_SIZE);
 430	if (ret) {
 431		ath11k_warn(ab, "failed to set up reo_reinject ring :%d\n",
 432			    ret);
 433		goto err;
 434	}
 435
 436	ret = ath11k_dp_srng_setup(ab, &dp->rx_rel_ring, HAL_WBM2SW_RELEASE,
 437				   DP_RX_RELEASE_RING_NUM, 0, DP_RX_RELEASE_RING_SIZE);
 438	if (ret) {
 439		ath11k_warn(ab, "failed to set up rx_rel ring :%d\n", ret);
 440		goto err;
 441	}
 442
 443	ret = ath11k_dp_srng_setup(ab, &dp->reo_except_ring, HAL_REO_EXCEPTION,
 444				   0, 0, DP_REO_EXCEPTION_RING_SIZE);
 445	if (ret) {
 446		ath11k_warn(ab, "failed to set up reo_exception ring :%d\n",
 447			    ret);
 448		goto err;
 449	}
 450
 451	ret = ath11k_dp_srng_setup(ab, &dp->reo_cmd_ring, HAL_REO_CMD,
 452				   0, 0, DP_REO_CMD_RING_SIZE);
 453	if (ret) {
 454		ath11k_warn(ab, "failed to set up reo_cmd ring :%d\n", ret);
 455		goto err;
 456	}
 457
 458	srng = &ab->hal.srng_list[dp->reo_cmd_ring.ring_id];
 459	ath11k_hal_reo_init_cmd_ring(ab, srng);
 460
 461	ath11k_dp_shadow_init_timer(ab, &dp->reo_cmd_timer,
 462				    ATH11K_SHADOW_CTRL_TIMER_INTERVAL,
 463				    dp->reo_cmd_ring.ring_id);
 464
 465	ret = ath11k_dp_srng_setup(ab, &dp->reo_status_ring, HAL_REO_STATUS,
 466				   0, 0, DP_REO_STATUS_RING_SIZE);
 467	if (ret) {
 468		ath11k_warn(ab, "failed to set up reo_status ring :%d\n", ret);
 469		goto err;
 470	}
 471
 472	/* When hash based routing of rx packet is enabled, 32 entries to map
 473	 * the hash values to the ring will be configured.
 474	 */
 475	ab->hw_params.hw_ops->reo_setup(ab);
 476
 477	return 0;
 478
 479err:
 480	ath11k_dp_srng_common_cleanup(ab);
 481
 482	return ret;
 483}
 484
 485static void ath11k_dp_scatter_idle_link_desc_cleanup(struct ath11k_base *ab)
 486{
 487	struct ath11k_dp *dp = &ab->dp;
 488	struct hal_wbm_idle_scatter_list *slist = dp->scatter_list;
 489	int i;
 490
 491	for (i = 0; i < DP_IDLE_SCATTER_BUFS_MAX; i++) {
 492		if (!slist[i].vaddr)
 493			continue;
 494
 495		dma_free_coherent(ab->dev, HAL_WBM_IDLE_SCATTER_BUF_SIZE_MAX,
 496				  slist[i].vaddr, slist[i].paddr);
 497		slist[i].vaddr = NULL;
 498	}
 499}
 500
 501static int ath11k_dp_scatter_idle_link_desc_setup(struct ath11k_base *ab,
 502						  int size,
 503						  u32 n_link_desc_bank,
 504						  u32 n_link_desc,
 505						  u32 last_bank_sz)
 506{
 507	struct ath11k_dp *dp = &ab->dp;
 508	struct dp_link_desc_bank *link_desc_banks = dp->link_desc_banks;
 509	struct hal_wbm_idle_scatter_list *slist = dp->scatter_list;
 510	u32 n_entries_per_buf;
 511	int num_scatter_buf, scatter_idx;
 512	struct hal_wbm_link_desc *scatter_buf;
 513	int align_bytes, n_entries;
 514	dma_addr_t paddr;
 515	int rem_entries;
 516	int i;
 517	int ret = 0;
 518	u32 end_offset;
 519
 520	n_entries_per_buf = HAL_WBM_IDLE_SCATTER_BUF_SIZE /
 521		ath11k_hal_srng_get_entrysize(ab, HAL_WBM_IDLE_LINK);
 522	num_scatter_buf = DIV_ROUND_UP(size, HAL_WBM_IDLE_SCATTER_BUF_SIZE);
 523
 524	if (num_scatter_buf > DP_IDLE_SCATTER_BUFS_MAX)
 525		return -EINVAL;
 526
 527	for (i = 0; i < num_scatter_buf; i++) {
 528		slist[i].vaddr = dma_alloc_coherent(ab->dev,
 529						    HAL_WBM_IDLE_SCATTER_BUF_SIZE_MAX,
 530						    &slist[i].paddr, GFP_KERNEL);
 531		if (!slist[i].vaddr) {
 532			ret = -ENOMEM;
 533			goto err;
 534		}
 535	}
 536
 537	scatter_idx = 0;
 538	scatter_buf = slist[scatter_idx].vaddr;
 539	rem_entries = n_entries_per_buf;
 540
 541	for (i = 0; i < n_link_desc_bank; i++) {
 542		align_bytes = link_desc_banks[i].vaddr -
 543			      link_desc_banks[i].vaddr_unaligned;
 544		n_entries = (DP_LINK_DESC_ALLOC_SIZE_THRESH - align_bytes) /
 545			     HAL_LINK_DESC_SIZE;
 546		paddr = link_desc_banks[i].paddr;
 547		while (n_entries) {
 548			ath11k_hal_set_link_desc_addr(scatter_buf, i, paddr);
 549			n_entries--;
 550			paddr += HAL_LINK_DESC_SIZE;
 551			if (rem_entries) {
 552				rem_entries--;
 553				scatter_buf++;
 554				continue;
 555			}
 556
 557			rem_entries = n_entries_per_buf;
 558			scatter_idx++;
 559			scatter_buf = slist[scatter_idx].vaddr;
 560		}
 561	}
 562
 563	end_offset = (scatter_buf - slist[scatter_idx].vaddr) *
 564		     sizeof(struct hal_wbm_link_desc);
 565	ath11k_hal_setup_link_idle_list(ab, slist, num_scatter_buf,
 566					n_link_desc, end_offset);
 567
 568	return 0;
 569
 570err:
 571	ath11k_dp_scatter_idle_link_desc_cleanup(ab);
 572
 573	return ret;
 574}
 575
 576static void
 577ath11k_dp_link_desc_bank_free(struct ath11k_base *ab,
 578			      struct dp_link_desc_bank *link_desc_banks)
 579{
 580	int i;
 581
 582	for (i = 0; i < DP_LINK_DESC_BANKS_MAX; i++) {
 583		if (link_desc_banks[i].vaddr_unaligned) {
 584			dma_free_coherent(ab->dev,
 585					  link_desc_banks[i].size,
 586					  link_desc_banks[i].vaddr_unaligned,
 587					  link_desc_banks[i].paddr_unaligned);
 588			link_desc_banks[i].vaddr_unaligned = NULL;
 589		}
 590	}
 591}
 592
 593static int ath11k_dp_link_desc_bank_alloc(struct ath11k_base *ab,
 594					  struct dp_link_desc_bank *desc_bank,
 595					  int n_link_desc_bank,
 596					  int last_bank_sz)
 597{
 598	struct ath11k_dp *dp = &ab->dp;
 599	int i;
 600	int ret = 0;
 601	int desc_sz = DP_LINK_DESC_ALLOC_SIZE_THRESH;
 602
 603	for (i = 0; i < n_link_desc_bank; i++) {
 604		if (i == (n_link_desc_bank - 1) && last_bank_sz)
 605			desc_sz = last_bank_sz;
 606
 607		desc_bank[i].vaddr_unaligned =
 608					dma_alloc_coherent(ab->dev, desc_sz,
 609							   &desc_bank[i].paddr_unaligned,
 610							   GFP_KERNEL);
 611		if (!desc_bank[i].vaddr_unaligned) {
 612			ret = -ENOMEM;
 613			goto err;
 614		}
 615
 616		desc_bank[i].vaddr = PTR_ALIGN(desc_bank[i].vaddr_unaligned,
 617					       HAL_LINK_DESC_ALIGN);
 618		desc_bank[i].paddr = desc_bank[i].paddr_unaligned +
 619				     ((unsigned long)desc_bank[i].vaddr -
 620				      (unsigned long)desc_bank[i].vaddr_unaligned);
 621		desc_bank[i].size = desc_sz;
 622	}
 623
 624	return 0;
 625
 626err:
 627	ath11k_dp_link_desc_bank_free(ab, dp->link_desc_banks);
 628
 629	return ret;
 630}
 631
 632void ath11k_dp_link_desc_cleanup(struct ath11k_base *ab,
 633				 struct dp_link_desc_bank *desc_bank,
 634				 u32 ring_type, struct dp_srng *ring)
 635{
 636	ath11k_dp_link_desc_bank_free(ab, desc_bank);
 637
 638	if (ring_type != HAL_RXDMA_MONITOR_DESC) {
 639		ath11k_dp_srng_cleanup(ab, ring);
 640		ath11k_dp_scatter_idle_link_desc_cleanup(ab);
 641	}
 642}
 643
 644static int ath11k_wbm_idle_ring_setup(struct ath11k_base *ab, u32 *n_link_desc)
 645{
 646	struct ath11k_dp *dp = &ab->dp;
 647	u32 n_mpdu_link_desc, n_mpdu_queue_desc;
 648	u32 n_tx_msdu_link_desc, n_rx_msdu_link_desc;
 649	int ret = 0;
 650
 651	n_mpdu_link_desc = (DP_NUM_TIDS_MAX * DP_AVG_MPDUS_PER_TID_MAX) /
 652			   HAL_NUM_MPDUS_PER_LINK_DESC;
 653
 654	n_mpdu_queue_desc = n_mpdu_link_desc /
 655			    HAL_NUM_MPDU_LINKS_PER_QUEUE_DESC;
 656
 657	n_tx_msdu_link_desc = (DP_NUM_TIDS_MAX * DP_AVG_FLOWS_PER_TID *
 658			       DP_AVG_MSDUS_PER_FLOW) /
 659			      HAL_NUM_TX_MSDUS_PER_LINK_DESC;
 660
 661	n_rx_msdu_link_desc = (DP_NUM_TIDS_MAX * DP_AVG_MPDUS_PER_TID_MAX *
 662			       DP_AVG_MSDUS_PER_MPDU) /
 663			      HAL_NUM_RX_MSDUS_PER_LINK_DESC;
 664
 665	*n_link_desc = n_mpdu_link_desc + n_mpdu_queue_desc +
 666		      n_tx_msdu_link_desc + n_rx_msdu_link_desc;
 667
 668	if (*n_link_desc & (*n_link_desc - 1))
 669		*n_link_desc = 1 << fls(*n_link_desc);
 670
 671	ret = ath11k_dp_srng_setup(ab, &dp->wbm_idle_ring,
 672				   HAL_WBM_IDLE_LINK, 0, 0, *n_link_desc);
 673	if (ret) {
 674		ath11k_warn(ab, "failed to setup wbm_idle_ring: %d\n", ret);
 675		return ret;
 676	}
 677	return ret;
 678}
 679
 680int ath11k_dp_link_desc_setup(struct ath11k_base *ab,
 681			      struct dp_link_desc_bank *link_desc_banks,
 682			      u32 ring_type, struct hal_srng *srng,
 683			      u32 n_link_desc)
 684{
 685	u32 tot_mem_sz;
 686	u32 n_link_desc_bank, last_bank_sz;
 687	u32 entry_sz, align_bytes, n_entries;
 688	u32 paddr;
 689	u32 *desc;
 690	int i, ret;
 691
 692	tot_mem_sz = n_link_desc * HAL_LINK_DESC_SIZE;
 693	tot_mem_sz += HAL_LINK_DESC_ALIGN;
 694
 695	if (tot_mem_sz <= DP_LINK_DESC_ALLOC_SIZE_THRESH) {
 696		n_link_desc_bank = 1;
 697		last_bank_sz = tot_mem_sz;
 698	} else {
 699		n_link_desc_bank = tot_mem_sz /
 700				   (DP_LINK_DESC_ALLOC_SIZE_THRESH -
 701				    HAL_LINK_DESC_ALIGN);
 702		last_bank_sz = tot_mem_sz %
 703			       (DP_LINK_DESC_ALLOC_SIZE_THRESH -
 704				HAL_LINK_DESC_ALIGN);
 705
 706		if (last_bank_sz)
 707			n_link_desc_bank += 1;
 708	}
 709
 710	if (n_link_desc_bank > DP_LINK_DESC_BANKS_MAX)
 711		return -EINVAL;
 712
 713	ret = ath11k_dp_link_desc_bank_alloc(ab, link_desc_banks,
 714					     n_link_desc_bank, last_bank_sz);
 715	if (ret)
 716		return ret;
 717
 718	/* Setup link desc idle list for HW internal usage */
 719	entry_sz = ath11k_hal_srng_get_entrysize(ab, ring_type);
 720	tot_mem_sz = entry_sz * n_link_desc;
 721
 722	/* Setup scatter desc list when the total memory requirement is more */
 723	if (tot_mem_sz > DP_LINK_DESC_ALLOC_SIZE_THRESH &&
 724	    ring_type != HAL_RXDMA_MONITOR_DESC) {
 725		ret = ath11k_dp_scatter_idle_link_desc_setup(ab, tot_mem_sz,
 726							     n_link_desc_bank,
 727							     n_link_desc,
 728							     last_bank_sz);
 729		if (ret) {
 730			ath11k_warn(ab, "failed to setup scatting idle list descriptor :%d\n",
 731				    ret);
 732			goto fail_desc_bank_free;
 733		}
 734
 735		return 0;
 736	}
 737
 738	spin_lock_bh(&srng->lock);
 739
 740	ath11k_hal_srng_access_begin(ab, srng);
 741
 742	for (i = 0; i < n_link_desc_bank; i++) {
 743		align_bytes = link_desc_banks[i].vaddr -
 744			      link_desc_banks[i].vaddr_unaligned;
 745		n_entries = (link_desc_banks[i].size - align_bytes) /
 746			    HAL_LINK_DESC_SIZE;
 747		paddr = link_desc_banks[i].paddr;
 748		while (n_entries &&
 749		       (desc = ath11k_hal_srng_src_get_next_entry(ab, srng))) {
 750			ath11k_hal_set_link_desc_addr((struct hal_wbm_link_desc *)desc,
 751						      i, paddr);
 752			n_entries--;
 753			paddr += HAL_LINK_DESC_SIZE;
 754		}
 755	}
 756
 757	ath11k_hal_srng_access_end(ab, srng);
 758
 759	spin_unlock_bh(&srng->lock);
 760
 761	return 0;
 762
 763fail_desc_bank_free:
 764	ath11k_dp_link_desc_bank_free(ab, link_desc_banks);
 765
 766	return ret;
 767}
 768
 769int ath11k_dp_service_srng(struct ath11k_base *ab,
 770			   struct ath11k_ext_irq_grp *irq_grp,
 771			   int budget)
 772{
 773	struct napi_struct *napi = &irq_grp->napi;
 774	const struct ath11k_hw_hal_params *hal_params;
 775	int grp_id = irq_grp->grp_id;
 776	int work_done = 0;
 777	int i, j;
 778	int tot_work_done = 0;
 779
 780	for (i = 0; i < ab->hw_params.max_tx_ring; i++) {
 781		if (BIT(ab->hw_params.hal_params->tcl2wbm_rbm_map[i].wbm_ring_num) &
 782		    ab->hw_params.ring_mask->tx[grp_id])
 783			ath11k_dp_tx_completion_handler(ab, i);
 784	}
 785
 786	if (ab->hw_params.ring_mask->rx_err[grp_id]) {
 787		work_done = ath11k_dp_process_rx_err(ab, napi, budget);
 788		budget -= work_done;
 789		tot_work_done += work_done;
 790		if (budget <= 0)
 791			goto done;
 792	}
 793
 794	if (ab->hw_params.ring_mask->rx_wbm_rel[grp_id]) {
 795		work_done = ath11k_dp_rx_process_wbm_err(ab,
 796							 napi,
 797							 budget);
 798		budget -= work_done;
 799		tot_work_done += work_done;
 800
 801		if (budget <= 0)
 802			goto done;
 803	}
 804
 805	if (ab->hw_params.ring_mask->rx[grp_id]) {
 806		i =  fls(ab->hw_params.ring_mask->rx[grp_id]) - 1;
 807		work_done = ath11k_dp_process_rx(ab, i, napi,
 808						 budget);
 809		budget -= work_done;
 810		tot_work_done += work_done;
 811		if (budget <= 0)
 812			goto done;
 813	}
 814
 815	if (ab->hw_params.ring_mask->rx_mon_status[grp_id]) {
 816		for (i = 0; i < ab->num_radios; i++) {
 817			for (j = 0; j < ab->hw_params.num_rxmda_per_pdev; j++) {
 818				int id = i * ab->hw_params.num_rxmda_per_pdev + j;
 819
 820				if (ab->hw_params.ring_mask->rx_mon_status[grp_id] &
 821					BIT(id)) {
 822					work_done =
 823					ath11k_dp_rx_process_mon_rings(ab,
 824								       id,
 825								       napi, budget);
 826					budget -= work_done;
 827					tot_work_done += work_done;
 828
 829					if (budget <= 0)
 830						goto done;
 831				}
 832			}
 833		}
 834	}
 835
 836	if (ab->hw_params.ring_mask->reo_status[grp_id])
 837		ath11k_dp_process_reo_status(ab);
 838
 839	for (i = 0; i < ab->num_radios; i++) {
 840		for (j = 0; j < ab->hw_params.num_rxmda_per_pdev; j++) {
 841			int id = i * ab->hw_params.num_rxmda_per_pdev + j;
 842
 843			if (ab->hw_params.ring_mask->rxdma2host[grp_id] & BIT(id)) {
 844				work_done = ath11k_dp_process_rxdma_err(ab, id, budget);
 845				budget -= work_done;
 846				tot_work_done += work_done;
 847			}
 848
 849			if (budget <= 0)
 850				goto done;
 851
 852			if (ab->hw_params.ring_mask->host2rxdma[grp_id] & BIT(id)) {
 853				struct ath11k *ar = ath11k_ab_to_ar(ab, id);
 854				struct ath11k_pdev_dp *dp = &ar->dp;
 855				struct dp_rxdma_ring *rx_ring = &dp->rx_refill_buf_ring;
 856
 857				hal_params = ab->hw_params.hal_params;
 858				ath11k_dp_rxbufs_replenish(ab, id, rx_ring, 0,
 859							   hal_params->rx_buf_rbm);
 860			}
 861		}
 862	}
 863	/* TODO: Implement handler for other interrupts */
 864
 865done:
 866	return tot_work_done;
 867}
 868EXPORT_SYMBOL(ath11k_dp_service_srng);
 869
 870void ath11k_dp_pdev_free(struct ath11k_base *ab)
 871{
 872	struct ath11k *ar;
 873	int i;
 874
 875	del_timer_sync(&ab->mon_reap_timer);
 876
 877	for (i = 0; i < ab->num_radios; i++) {
 878		ar = ab->pdevs[i].ar;
 879		ath11k_dp_rx_pdev_free(ab, i);
 880		ath11k_debugfs_unregister(ar);
 881		ath11k_dp_rx_pdev_mon_detach(ar);
 882	}
 883}
 884
 885void ath11k_dp_pdev_pre_alloc(struct ath11k_base *ab)
 886{
 887	struct ath11k *ar;
 888	struct ath11k_pdev_dp *dp;
 889	int i;
 890	int j;
 891
 892	for (i = 0; i <  ab->num_radios; i++) {
 893		ar = ab->pdevs[i].ar;
 894		dp = &ar->dp;
 895		dp->mac_id = i;
 896		idr_init(&dp->rx_refill_buf_ring.bufs_idr);
 897		spin_lock_init(&dp->rx_refill_buf_ring.idr_lock);
 898		atomic_set(&dp->num_tx_pending, 0);
 899		init_waitqueue_head(&dp->tx_empty_waitq);
 900		for (j = 0; j < ab->hw_params.num_rxmda_per_pdev; j++) {
 901			idr_init(&dp->rx_mon_status_refill_ring[j].bufs_idr);
 902			spin_lock_init(&dp->rx_mon_status_refill_ring[j].idr_lock);
 903		}
 904		idr_init(&dp->rxdma_mon_buf_ring.bufs_idr);
 905		spin_lock_init(&dp->rxdma_mon_buf_ring.idr_lock);
 906	}
 907}
 908
 909int ath11k_dp_pdev_alloc(struct ath11k_base *ab)
 910{
 911	struct ath11k *ar;
 912	int ret;
 913	int i;
 914
 915	/* TODO:Per-pdev rx ring unlike tx ring which is mapped to different AC's */
 916	for (i = 0; i < ab->num_radios; i++) {
 917		ar = ab->pdevs[i].ar;
 918		ret = ath11k_dp_rx_pdev_alloc(ab, i);
 919		if (ret) {
 920			ath11k_warn(ab, "failed to allocate pdev rx for pdev_id :%d\n",
 921				    i);
 922			goto err;
 923		}
 924		ret = ath11k_dp_rx_pdev_mon_attach(ar);
 925		if (ret) {
 926			ath11k_warn(ab, "failed to initialize mon pdev %d\n",
 927				    i);
 928			goto err;
 929		}
 930	}
 931
 932	return 0;
 933
 934err:
 935	ath11k_dp_pdev_free(ab);
 936
 937	return ret;
 938}
 939
 940int ath11k_dp_htt_connect(struct ath11k_dp *dp)
 941{
 942	struct ath11k_htc_svc_conn_req conn_req;
 943	struct ath11k_htc_svc_conn_resp conn_resp;
 944	int status;
 945
 946	memset(&conn_req, 0, sizeof(conn_req));
 947	memset(&conn_resp, 0, sizeof(conn_resp));
 948
 949	conn_req.ep_ops.ep_tx_complete = ath11k_dp_htt_htc_tx_complete;
 950	conn_req.ep_ops.ep_rx_complete = ath11k_dp_htt_htc_t2h_msg_handler;
 951
 952	/* connect to control service */
 953	conn_req.service_id = ATH11K_HTC_SVC_ID_HTT_DATA_MSG;
 954
 955	status = ath11k_htc_connect_service(&dp->ab->htc, &conn_req,
 956					    &conn_resp);
 957
 958	if (status)
 959		return status;
 960
 961	dp->eid = conn_resp.eid;
 962
 963	return 0;
 964}
 965
 966static void ath11k_dp_update_vdev_search(struct ath11k_vif *arvif)
 967{
 968	 /* When v2_map_support is true:for STA mode, enable address
 969	  * search index, tcl uses ast_hash value in the descriptor.
 970	  * When v2_map_support is false: for STA mode, don't enable
 971	  * address search index.
 972	  */
 973	switch (arvif->vdev_type) {
 974	case WMI_VDEV_TYPE_STA:
 975		if (arvif->ar->ab->hw_params.htt_peer_map_v2) {
 976			arvif->hal_addr_search_flags = HAL_TX_ADDRX_EN;
 977			arvif->search_type = HAL_TX_ADDR_SEARCH_INDEX;
 978		} else {
 979			arvif->hal_addr_search_flags = HAL_TX_ADDRY_EN;
 980			arvif->search_type = HAL_TX_ADDR_SEARCH_DEFAULT;
 981		}
 982		break;
 983	case WMI_VDEV_TYPE_AP:
 984	case WMI_VDEV_TYPE_IBSS:
 985		arvif->hal_addr_search_flags = HAL_TX_ADDRX_EN;
 986		arvif->search_type = HAL_TX_ADDR_SEARCH_DEFAULT;
 987		break;
 988	case WMI_VDEV_TYPE_MONITOR:
 989	default:
 990		return;
 991	}
 992}
 993
 994void ath11k_dp_vdev_tx_attach(struct ath11k *ar, struct ath11k_vif *arvif)
 995{
 996	arvif->tcl_metadata |= FIELD_PREP(HTT_TCL_META_DATA_TYPE, 1) |
 997			       FIELD_PREP(HTT_TCL_META_DATA_VDEV_ID,
 998					  arvif->vdev_id) |
 999			       FIELD_PREP(HTT_TCL_META_DATA_PDEV_ID,
1000					  ar->pdev->pdev_id);
1001
1002	/* set HTT extension valid bit to 0 by default */
1003	arvif->tcl_metadata &= ~HTT_TCL_META_DATA_VALID_HTT;
1004
1005	ath11k_dp_update_vdev_search(arvif);
1006}
1007
1008static int ath11k_dp_tx_pending_cleanup(int buf_id, void *skb, void *ctx)
1009{
1010	struct ath11k_base *ab = (struct ath11k_base *)ctx;
1011	struct sk_buff *msdu = skb;
1012
1013	dma_unmap_single(ab->dev, ATH11K_SKB_CB(msdu)->paddr, msdu->len,
1014			 DMA_TO_DEVICE);
1015
1016	dev_kfree_skb_any(msdu);
1017
1018	return 0;
1019}
1020
1021void ath11k_dp_free(struct ath11k_base *ab)
1022{
1023	struct ath11k_dp *dp = &ab->dp;
1024	int i;
1025
1026	ath11k_dp_link_desc_cleanup(ab, dp->link_desc_banks,
1027				    HAL_WBM_IDLE_LINK, &dp->wbm_idle_ring);
1028
1029	ath11k_dp_srng_common_cleanup(ab);
1030
1031	ath11k_dp_reo_cmd_list_cleanup(ab);
1032
1033	for (i = 0; i < ab->hw_params.max_tx_ring; i++) {
1034		spin_lock_bh(&dp->tx_ring[i].tx_idr_lock);
1035		idr_for_each(&dp->tx_ring[i].txbuf_idr,
1036			     ath11k_dp_tx_pending_cleanup, ab);
1037		idr_destroy(&dp->tx_ring[i].txbuf_idr);
1038		spin_unlock_bh(&dp->tx_ring[i].tx_idr_lock);
1039		kfree(dp->tx_ring[i].tx_status);
1040	}
1041
1042	/* Deinit any SOC level resource */
1043}
1044
1045int ath11k_dp_alloc(struct ath11k_base *ab)
1046{
1047	struct ath11k_dp *dp = &ab->dp;
1048	struct hal_srng *srng = NULL;
1049	size_t size = 0;
1050	u32 n_link_desc = 0;
1051	int ret;
1052	int i;
1053
1054	dp->ab = ab;
1055
1056	INIT_LIST_HEAD(&dp->reo_cmd_list);
1057	INIT_LIST_HEAD(&dp->reo_cmd_cache_flush_list);
1058	INIT_LIST_HEAD(&dp->dp_full_mon_mpdu_list);
1059	spin_lock_init(&dp->reo_cmd_lock);
1060
1061	dp->reo_cmd_cache_flush_count = 0;
1062
1063	ret = ath11k_wbm_idle_ring_setup(ab, &n_link_desc);
1064	if (ret) {
1065		ath11k_warn(ab, "failed to setup wbm_idle_ring: %d\n", ret);
1066		return ret;
1067	}
1068
1069	srng = &ab->hal.srng_list[dp->wbm_idle_ring.ring_id];
1070
1071	ret = ath11k_dp_link_desc_setup(ab, dp->link_desc_banks,
1072					HAL_WBM_IDLE_LINK, srng, n_link_desc);
1073	if (ret) {
1074		ath11k_warn(ab, "failed to setup link desc: %d\n", ret);
1075		return ret;
1076	}
1077
1078	ret = ath11k_dp_srng_common_setup(ab);
1079	if (ret)
1080		goto fail_link_desc_cleanup;
1081
1082	size = sizeof(struct hal_wbm_release_ring) * DP_TX_COMP_RING_SIZE;
1083
1084	for (i = 0; i < ab->hw_params.max_tx_ring; i++) {
1085		idr_init(&dp->tx_ring[i].txbuf_idr);
1086		spin_lock_init(&dp->tx_ring[i].tx_idr_lock);
1087		dp->tx_ring[i].tcl_data_ring_id = i;
1088
1089		dp->tx_ring[i].tx_status_head = 0;
1090		dp->tx_ring[i].tx_status_tail = DP_TX_COMP_RING_SIZE - 1;
1091		dp->tx_ring[i].tx_status = kmalloc(size, GFP_KERNEL);
1092		if (!dp->tx_ring[i].tx_status) {
1093			ret = -ENOMEM;
1094			goto fail_cmn_srng_cleanup;
1095		}
1096	}
1097
1098	for (i = 0; i < HAL_DSCP_TID_MAP_TBL_NUM_ENTRIES_MAX; i++)
1099		ath11k_hal_tx_set_dscp_tid_map(ab, i);
1100
1101	/* Init any SOC level resource for DP */
1102
1103	return 0;
1104
1105fail_cmn_srng_cleanup:
1106	ath11k_dp_srng_common_cleanup(ab);
1107
1108fail_link_desc_cleanup:
1109	ath11k_dp_link_desc_cleanup(ab, dp->link_desc_banks,
1110				    HAL_WBM_IDLE_LINK, &dp->wbm_idle_ring);
1111
1112	return ret;
1113}
1114
1115static void ath11k_dp_shadow_timer_handler(struct timer_list *t)
1116{
1117	struct ath11k_hp_update_timer *update_timer = from_timer(update_timer,
1118								 t, timer);
1119	struct ath11k_base *ab = update_timer->ab;
1120	struct hal_srng	*srng = &ab->hal.srng_list[update_timer->ring_id];
1121
1122	spin_lock_bh(&srng->lock);
1123
1124	/* when the timer is fired, the handler checks whether there
1125	 * are new TX happened. The handler updates HP only when there
1126	 * are no TX operations during the timeout interval, and stop
1127	 * the timer. Timer will be started again when TX happens again.
1128	 */
1129	if (update_timer->timer_tx_num != update_timer->tx_num) {
1130		update_timer->timer_tx_num = update_timer->tx_num;
1131		mod_timer(&update_timer->timer, jiffies +
1132		  msecs_to_jiffies(update_timer->interval));
1133	} else {
1134		update_timer->started = false;
1135		ath11k_hal_srng_shadow_update_hp_tp(ab, srng);
1136	}
1137
1138	spin_unlock_bh(&srng->lock);
1139}
1140
1141void ath11k_dp_shadow_start_timer(struct ath11k_base *ab,
1142				  struct hal_srng *srng,
1143				  struct ath11k_hp_update_timer *update_timer)
1144{
1145	lockdep_assert_held(&srng->lock);
1146
1147	if (!ab->hw_params.supports_shadow_regs)
1148		return;
1149
1150	update_timer->tx_num++;
1151
1152	if (update_timer->started)
1153		return;
1154
1155	update_timer->started = true;
1156	update_timer->timer_tx_num = update_timer->tx_num;
1157	mod_timer(&update_timer->timer, jiffies +
1158		  msecs_to_jiffies(update_timer->interval));
1159}
1160
1161void ath11k_dp_shadow_stop_timer(struct ath11k_base *ab,
1162				 struct ath11k_hp_update_timer *update_timer)
1163{
1164	if (!ab->hw_params.supports_shadow_regs)
1165		return;
1166
1167	if (!update_timer->init)
1168		return;
1169
1170	del_timer_sync(&update_timer->timer);
1171}
1172
1173void ath11k_dp_shadow_init_timer(struct ath11k_base *ab,
1174				 struct ath11k_hp_update_timer *update_timer,
1175				 u32 interval, u32 ring_id)
1176{
1177	if (!ab->hw_params.supports_shadow_regs)
1178		return;
1179
1180	update_timer->tx_num = 0;
1181	update_timer->timer_tx_num = 0;
1182	update_timer->ab = ab;
1183	update_timer->ring_id = ring_id;
1184	update_timer->interval = interval;
1185	update_timer->init = true;
1186	timer_setup(&update_timer->timer,
1187		    ath11k_dp_shadow_timer_handler, 0);
1188}