Linux Audio

Check our new training course

Buildroot integration, development and maintenance

Need a Buildroot system for your embedded project?
Loading...
v6.8
   1// SPDX-License-Identifier: BSD-3-Clause-Clear
   2/*
   3 * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
   4 * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
   5 */
   6
   7#include <crypto/hash.h>
   8#include "core.h"
   9#include "dp_tx.h"
  10#include "hal_tx.h"
  11#include "hif.h"
  12#include "debug.h"
  13#include "dp_rx.h"
  14#include "peer.h"
  15
  16static void ath11k_dp_htt_htc_tx_complete(struct ath11k_base *ab,
  17					  struct sk_buff *skb)
  18{
  19	dev_kfree_skb_any(skb);
  20}
  21
  22void ath11k_dp_peer_cleanup(struct ath11k *ar, int vdev_id, const u8 *addr)
  23{
  24	struct ath11k_base *ab = ar->ab;
  25	struct ath11k_peer *peer;
  26
  27	/* TODO: Any other peer specific DP cleanup */
  28
  29	spin_lock_bh(&ab->base_lock);
  30	peer = ath11k_peer_find(ab, vdev_id, addr);
  31	if (!peer) {
  32		ath11k_warn(ab, "failed to lookup peer %pM on vdev %d\n",
  33			    addr, vdev_id);
  34		spin_unlock_bh(&ab->base_lock);
  35		return;
  36	}
  37
  38	ath11k_peer_rx_tid_cleanup(ar, peer);
  39	peer->dp_setup_done = false;
  40	crypto_free_shash(peer->tfm_mmic);
  41	spin_unlock_bh(&ab->base_lock);
  42}
  43
  44int ath11k_dp_peer_setup(struct ath11k *ar, int vdev_id, const u8 *addr)
  45{
  46	struct ath11k_base *ab = ar->ab;
  47	struct ath11k_peer *peer;
  48	u32 reo_dest;
  49	int ret = 0, tid;
  50
  51	/* NOTE: reo_dest ring id starts from 1 unlike mac_id which starts from 0 */
  52	reo_dest = ar->dp.mac_id + 1;
  53	ret = ath11k_wmi_set_peer_param(ar, addr, vdev_id,
  54					WMI_PEER_SET_DEFAULT_ROUTING,
  55					DP_RX_HASH_ENABLE | (reo_dest << 1));
  56
  57	if (ret) {
  58		ath11k_warn(ab, "failed to set default routing %d peer :%pM vdev_id :%d\n",
  59			    ret, addr, vdev_id);
  60		return ret;
  61	}
  62
  63	for (tid = 0; tid <= IEEE80211_NUM_TIDS; tid++) {
  64		ret = ath11k_peer_rx_tid_setup(ar, addr, vdev_id, tid, 1, 0,
  65					       HAL_PN_TYPE_NONE);
  66		if (ret) {
  67			ath11k_warn(ab, "failed to setup rxd tid queue for tid %d: %d\n",
  68				    tid, ret);
  69			goto peer_clean;
  70		}
  71	}
  72
  73	ret = ath11k_peer_rx_frag_setup(ar, addr, vdev_id);
  74	if (ret) {
  75		ath11k_warn(ab, "failed to setup rx defrag context\n");
  76		tid--;
  77		goto peer_clean;
  78	}
  79
  80	/* TODO: Setup other peer specific resource used in data path */
  81
  82	return 0;
  83
  84peer_clean:
  85	spin_lock_bh(&ab->base_lock);
  86
  87	peer = ath11k_peer_find(ab, vdev_id, addr);
  88	if (!peer) {
  89		ath11k_warn(ab, "failed to find the peer to del rx tid\n");
  90		spin_unlock_bh(&ab->base_lock);
  91		return -ENOENT;
  92	}
  93
  94	for (; tid >= 0; tid--)
  95		ath11k_peer_rx_tid_delete(ar, peer, tid);
  96
  97	spin_unlock_bh(&ab->base_lock);
  98
  99	return ret;
 100}
 101
 102void ath11k_dp_srng_cleanup(struct ath11k_base *ab, struct dp_srng *ring)
 103{
 104	if (!ring->vaddr_unaligned)
 105		return;
 106
 107	if (ring->cached)
 108		kfree(ring->vaddr_unaligned);
 109	else
 110		dma_free_coherent(ab->dev, ring->size, ring->vaddr_unaligned,
 111				  ring->paddr_unaligned);
 112
 113	ring->vaddr_unaligned = NULL;
 114}
 115
 116static int ath11k_dp_srng_find_ring_in_mask(int ring_num, const u8 *grp_mask)
 117{
 118	int ext_group_num;
 119	u8 mask = 1 << ring_num;
 120
 121	for (ext_group_num = 0; ext_group_num < ATH11K_EXT_IRQ_GRP_NUM_MAX;
 122	     ext_group_num++) {
 123		if (mask & grp_mask[ext_group_num])
 124			return ext_group_num;
 125	}
 126
 127	return -ENOENT;
 128}
 129
 130static int ath11k_dp_srng_calculate_msi_group(struct ath11k_base *ab,
 131					      enum hal_ring_type type, int ring_num)
 132{
 133	const u8 *grp_mask;
 134
 135	switch (type) {
 136	case HAL_WBM2SW_RELEASE:
 137		if (ring_num == DP_RX_RELEASE_RING_NUM) {
 
 
 138			grp_mask = &ab->hw_params.ring_mask->rx_wbm_rel[0];
 139			ring_num = 0;
 140		} else {
 141			grp_mask = &ab->hw_params.ring_mask->tx[0];
 142		}
 143		break;
 144	case HAL_REO_EXCEPTION:
 145		grp_mask = &ab->hw_params.ring_mask->rx_err[0];
 146		break;
 147	case HAL_REO_DST:
 148		grp_mask = &ab->hw_params.ring_mask->rx[0];
 149		break;
 150	case HAL_REO_STATUS:
 151		grp_mask = &ab->hw_params.ring_mask->reo_status[0];
 152		break;
 153	case HAL_RXDMA_MONITOR_STATUS:
 154	case HAL_RXDMA_MONITOR_DST:
 155		grp_mask = &ab->hw_params.ring_mask->rx_mon_status[0];
 156		break;
 157	case HAL_RXDMA_DST:
 158		grp_mask = &ab->hw_params.ring_mask->rxdma2host[0];
 159		break;
 160	case HAL_RXDMA_BUF:
 161		grp_mask = &ab->hw_params.ring_mask->host2rxdma[0];
 162		break;
 163	case HAL_RXDMA_MONITOR_BUF:
 164	case HAL_TCL_DATA:
 165	case HAL_TCL_CMD:
 166	case HAL_REO_CMD:
 167	case HAL_SW2WBM_RELEASE:
 168	case HAL_WBM_IDLE_LINK:
 169	case HAL_TCL_STATUS:
 170	case HAL_REO_REINJECT:
 171	case HAL_CE_SRC:
 172	case HAL_CE_DST:
 173	case HAL_CE_DST_STATUS:
 174	default:
 175		return -ENOENT;
 176	}
 177
 178	return ath11k_dp_srng_find_ring_in_mask(ring_num, grp_mask);
 179}
 180
 181static void ath11k_dp_srng_msi_setup(struct ath11k_base *ab,
 182				     struct hal_srng_params *ring_params,
 183				     enum hal_ring_type type, int ring_num)
 184{
 185	int msi_group_number, msi_data_count;
 186	u32 msi_data_start, msi_irq_start, addr_lo, addr_hi;
 187	int ret;
 188
 189	ret = ath11k_get_user_msi_vector(ab, "DP",
 190					 &msi_data_count, &msi_data_start,
 191					 &msi_irq_start);
 192	if (ret)
 193		return;
 194
 195	msi_group_number = ath11k_dp_srng_calculate_msi_group(ab, type,
 196							      ring_num);
 197	if (msi_group_number < 0) {
 198		ath11k_dbg(ab, ATH11K_DBG_PCI,
 199			   "ring not part of an ext_group; ring_type: %d,ring_num %d",
 200			   type, ring_num);
 201		ring_params->msi_addr = 0;
 202		ring_params->msi_data = 0;
 203		return;
 204	}
 205
 206	if (msi_group_number > msi_data_count) {
 207		ath11k_dbg(ab, ATH11K_DBG_PCI,
 208			   "multiple msi_groups share one msi, msi_group_num %d",
 209			   msi_group_number);
 210	}
 211
 212	ath11k_get_msi_address(ab, &addr_lo, &addr_hi);
 213
 214	ring_params->msi_addr = addr_lo;
 215	ring_params->msi_addr |= (dma_addr_t)(((uint64_t)addr_hi) << 32);
 216	ring_params->msi_data = (msi_group_number % msi_data_count)
 217		+ msi_data_start;
 218	ring_params->flags |= HAL_SRNG_FLAGS_MSI_INTR;
 219}
 220
 221int ath11k_dp_srng_setup(struct ath11k_base *ab, struct dp_srng *ring,
 222			 enum hal_ring_type type, int ring_num,
 223			 int mac_id, int num_entries)
 224{
 225	struct hal_srng_params params = { 0 };
 226	int entry_sz = ath11k_hal_srng_get_entrysize(ab, type);
 227	int max_entries = ath11k_hal_srng_get_max_entries(ab, type);
 228	int ret;
 229	bool cached = false;
 230
 231	if (max_entries < 0 || entry_sz < 0)
 232		return -EINVAL;
 233
 234	if (num_entries > max_entries)
 235		num_entries = max_entries;
 236
 237	ring->size = (num_entries * entry_sz) + HAL_RING_BASE_ALIGN - 1;
 238
 239	if (ab->hw_params.alloc_cacheable_memory) {
 240		/* Allocate the reo dst and tx completion rings from cacheable memory */
 241		switch (type) {
 242		case HAL_REO_DST:
 243		case HAL_WBM2SW_RELEASE:
 244			cached = true;
 245			break;
 246		default:
 247			cached = false;
 248		}
 249
 250		if (cached) {
 251			ring->vaddr_unaligned = kzalloc(ring->size, GFP_KERNEL);
 252			ring->paddr_unaligned = virt_to_phys(ring->vaddr_unaligned);
 253		}
 254	}
 255
 256	if (!cached)
 257		ring->vaddr_unaligned = dma_alloc_coherent(ab->dev, ring->size,
 258							   &ring->paddr_unaligned,
 259							   GFP_KERNEL);
 260
 261	if (!ring->vaddr_unaligned)
 262		return -ENOMEM;
 263
 264	ring->vaddr = PTR_ALIGN(ring->vaddr_unaligned, HAL_RING_BASE_ALIGN);
 265	ring->paddr = ring->paddr_unaligned + ((unsigned long)ring->vaddr -
 266		      (unsigned long)ring->vaddr_unaligned);
 267
 268	params.ring_base_vaddr = ring->vaddr;
 269	params.ring_base_paddr = ring->paddr;
 270	params.num_entries = num_entries;
 271	ath11k_dp_srng_msi_setup(ab, &params, type, ring_num + mac_id);
 272
 273	switch (type) {
 274	case HAL_REO_DST:
 275		params.intr_batch_cntr_thres_entries =
 276					HAL_SRNG_INT_BATCH_THRESHOLD_RX;
 277		params.intr_timer_thres_us = HAL_SRNG_INT_TIMER_THRESHOLD_RX;
 278		break;
 279	case HAL_RXDMA_BUF:
 280	case HAL_RXDMA_MONITOR_BUF:
 281	case HAL_RXDMA_MONITOR_STATUS:
 282		params.low_threshold = num_entries >> 3;
 283		params.flags |= HAL_SRNG_FLAGS_LOW_THRESH_INTR_EN;
 284		params.intr_batch_cntr_thres_entries = 0;
 285		params.intr_timer_thres_us = HAL_SRNG_INT_TIMER_THRESHOLD_RX;
 286		break;
 287	case HAL_WBM2SW_RELEASE:
 288		if (ring_num < 3) {
 289			params.intr_batch_cntr_thres_entries =
 290					HAL_SRNG_INT_BATCH_THRESHOLD_TX;
 291			params.intr_timer_thres_us =
 292					HAL_SRNG_INT_TIMER_THRESHOLD_TX;
 293			break;
 294		}
 295		/* follow through when ring_num >= 3 */
 296		fallthrough;
 297	case HAL_REO_EXCEPTION:
 298	case HAL_REO_REINJECT:
 299	case HAL_REO_CMD:
 300	case HAL_REO_STATUS:
 301	case HAL_TCL_DATA:
 302	case HAL_TCL_CMD:
 303	case HAL_TCL_STATUS:
 304	case HAL_WBM_IDLE_LINK:
 305	case HAL_SW2WBM_RELEASE:
 306	case HAL_RXDMA_DST:
 307	case HAL_RXDMA_MONITOR_DST:
 308	case HAL_RXDMA_MONITOR_DESC:
 309		params.intr_batch_cntr_thres_entries =
 310					HAL_SRNG_INT_BATCH_THRESHOLD_OTHER;
 311		params.intr_timer_thres_us = HAL_SRNG_INT_TIMER_THRESHOLD_OTHER;
 312		break;
 313	case HAL_RXDMA_DIR_BUF:
 314		break;
 315	default:
 316		ath11k_warn(ab, "Not a valid ring type in dp :%d\n", type);
 317		return -EINVAL;
 318	}
 319
 320	if (cached) {
 321		params.flags |= HAL_SRNG_FLAGS_CACHED;
 322		ring->cached = 1;
 323	}
 324
 325	ret = ath11k_hal_srng_setup(ab, type, ring_num, mac_id, &params);
 326	if (ret < 0) {
 327		ath11k_warn(ab, "failed to setup srng: %d ring_id %d\n",
 328			    ret, ring_num);
 329		return ret;
 330	}
 331
 332	ring->ring_id = ret;
 333
 334	return 0;
 335}
 336
 337void ath11k_dp_stop_shadow_timers(struct ath11k_base *ab)
 338{
 339	int i;
 340
 341	if (!ab->hw_params.supports_shadow_regs)
 342		return;
 343
 344	for (i = 0; i < ab->hw_params.max_tx_ring; i++)
 345		ath11k_dp_shadow_stop_timer(ab, &ab->dp.tx_ring_timer[i]);
 346
 347	ath11k_dp_shadow_stop_timer(ab, &ab->dp.reo_cmd_timer);
 348}
 349
 350static void ath11k_dp_srng_common_cleanup(struct ath11k_base *ab)
 351{
 352	struct ath11k_dp *dp = &ab->dp;
 353	int i;
 354
 355	ath11k_dp_stop_shadow_timers(ab);
 356	ath11k_dp_srng_cleanup(ab, &dp->wbm_desc_rel_ring);
 357	ath11k_dp_srng_cleanup(ab, &dp->tcl_cmd_ring);
 358	ath11k_dp_srng_cleanup(ab, &dp->tcl_status_ring);
 359	for (i = 0; i < ab->hw_params.max_tx_ring; i++) {
 360		ath11k_dp_srng_cleanup(ab, &dp->tx_ring[i].tcl_data_ring);
 361		ath11k_dp_srng_cleanup(ab, &dp->tx_ring[i].tcl_comp_ring);
 362	}
 363	ath11k_dp_srng_cleanup(ab, &dp->reo_reinject_ring);
 364	ath11k_dp_srng_cleanup(ab, &dp->rx_rel_ring);
 365	ath11k_dp_srng_cleanup(ab, &dp->reo_except_ring);
 366	ath11k_dp_srng_cleanup(ab, &dp->reo_cmd_ring);
 367	ath11k_dp_srng_cleanup(ab, &dp->reo_status_ring);
 368}
 369
 370static int ath11k_dp_srng_common_setup(struct ath11k_base *ab)
 371{
 372	struct ath11k_dp *dp = &ab->dp;
 373	struct hal_srng *srng;
 374	int i, ret;
 375	u8 tcl_num, wbm_num;
 376
 377	ret = ath11k_dp_srng_setup(ab, &dp->wbm_desc_rel_ring,
 378				   HAL_SW2WBM_RELEASE, 0, 0,
 379				   DP_WBM_RELEASE_RING_SIZE);
 380	if (ret) {
 381		ath11k_warn(ab, "failed to set up wbm2sw_release ring :%d\n",
 382			    ret);
 383		goto err;
 384	}
 385
 386	ret = ath11k_dp_srng_setup(ab, &dp->tcl_cmd_ring, HAL_TCL_CMD, 0, 0,
 387				   DP_TCL_CMD_RING_SIZE);
 388	if (ret) {
 389		ath11k_warn(ab, "failed to set up tcl_cmd ring :%d\n", ret);
 390		goto err;
 391	}
 392
 393	ret = ath11k_dp_srng_setup(ab, &dp->tcl_status_ring, HAL_TCL_STATUS,
 394				   0, 0, DP_TCL_STATUS_RING_SIZE);
 395	if (ret) {
 396		ath11k_warn(ab, "failed to set up tcl_status ring :%d\n", ret);
 397		goto err;
 398	}
 399
 400	for (i = 0; i < ab->hw_params.max_tx_ring; i++) {
 401		tcl_num = ab->hw_params.hal_params->tcl2wbm_rbm_map[i].tcl_ring_num;
 402		wbm_num = ab->hw_params.hal_params->tcl2wbm_rbm_map[i].wbm_ring_num;
 403
 404		ret = ath11k_dp_srng_setup(ab, &dp->tx_ring[i].tcl_data_ring,
 405					   HAL_TCL_DATA, tcl_num, 0,
 406					   ab->hw_params.tx_ring_size);
 407		if (ret) {
 408			ath11k_warn(ab, "failed to set up tcl_data ring (%d) :%d\n",
 409				    i, ret);
 410			goto err;
 411		}
 412
 413		ret = ath11k_dp_srng_setup(ab, &dp->tx_ring[i].tcl_comp_ring,
 414					   HAL_WBM2SW_RELEASE, wbm_num, 0,
 415					   DP_TX_COMP_RING_SIZE);
 416		if (ret) {
 417			ath11k_warn(ab, "failed to set up tcl_comp ring (%d) :%d\n",
 418				    i, ret);
 419			goto err;
 420		}
 421
 422		srng = &ab->hal.srng_list[dp->tx_ring[i].tcl_data_ring.ring_id];
 423		ath11k_hal_tx_init_data_ring(ab, srng);
 424
 425		ath11k_dp_shadow_init_timer(ab, &dp->tx_ring_timer[i],
 426					    ATH11K_SHADOW_DP_TIMER_INTERVAL,
 427					    dp->tx_ring[i].tcl_data_ring.ring_id);
 428	}
 429
 430	ret = ath11k_dp_srng_setup(ab, &dp->reo_reinject_ring, HAL_REO_REINJECT,
 431				   0, 0, DP_REO_REINJECT_RING_SIZE);
 432	if (ret) {
 433		ath11k_warn(ab, "failed to set up reo_reinject ring :%d\n",
 434			    ret);
 435		goto err;
 436	}
 437
 438	ret = ath11k_dp_srng_setup(ab, &dp->rx_rel_ring, HAL_WBM2SW_RELEASE,
 439				   DP_RX_RELEASE_RING_NUM, 0, DP_RX_RELEASE_RING_SIZE);
 440	if (ret) {
 441		ath11k_warn(ab, "failed to set up rx_rel ring :%d\n", ret);
 442		goto err;
 443	}
 444
 445	ret = ath11k_dp_srng_setup(ab, &dp->reo_except_ring, HAL_REO_EXCEPTION,
 446				   0, 0, DP_REO_EXCEPTION_RING_SIZE);
 447	if (ret) {
 448		ath11k_warn(ab, "failed to set up reo_exception ring :%d\n",
 449			    ret);
 450		goto err;
 451	}
 452
 453	ret = ath11k_dp_srng_setup(ab, &dp->reo_cmd_ring, HAL_REO_CMD,
 454				   0, 0, DP_REO_CMD_RING_SIZE);
 455	if (ret) {
 456		ath11k_warn(ab, "failed to set up reo_cmd ring :%d\n", ret);
 457		goto err;
 458	}
 459
 460	srng = &ab->hal.srng_list[dp->reo_cmd_ring.ring_id];
 461	ath11k_hal_reo_init_cmd_ring(ab, srng);
 462
 463	ath11k_dp_shadow_init_timer(ab, &dp->reo_cmd_timer,
 464				    ATH11K_SHADOW_CTRL_TIMER_INTERVAL,
 465				    dp->reo_cmd_ring.ring_id);
 466
 467	ret = ath11k_dp_srng_setup(ab, &dp->reo_status_ring, HAL_REO_STATUS,
 468				   0, 0, DP_REO_STATUS_RING_SIZE);
 469	if (ret) {
 470		ath11k_warn(ab, "failed to set up reo_status ring :%d\n", ret);
 471		goto err;
 472	}
 473
 474	/* When hash based routing of rx packet is enabled, 32 entries to map
 475	 * the hash values to the ring will be configured.
 476	 */
 477	ab->hw_params.hw_ops->reo_setup(ab);
 478
 479	return 0;
 480
 481err:
 482	ath11k_dp_srng_common_cleanup(ab);
 483
 484	return ret;
 485}
 486
 487static void ath11k_dp_scatter_idle_link_desc_cleanup(struct ath11k_base *ab)
 488{
 489	struct ath11k_dp *dp = &ab->dp;
 490	struct hal_wbm_idle_scatter_list *slist = dp->scatter_list;
 491	int i;
 492
 493	for (i = 0; i < DP_IDLE_SCATTER_BUFS_MAX; i++) {
 494		if (!slist[i].vaddr)
 495			continue;
 496
 497		dma_free_coherent(ab->dev, HAL_WBM_IDLE_SCATTER_BUF_SIZE_MAX,
 498				  slist[i].vaddr, slist[i].paddr);
 499		slist[i].vaddr = NULL;
 500	}
 501}
 502
 503static int ath11k_dp_scatter_idle_link_desc_setup(struct ath11k_base *ab,
 504						  int size,
 505						  u32 n_link_desc_bank,
 506						  u32 n_link_desc,
 507						  u32 last_bank_sz)
 508{
 509	struct ath11k_dp *dp = &ab->dp;
 510	struct dp_link_desc_bank *link_desc_banks = dp->link_desc_banks;
 511	struct hal_wbm_idle_scatter_list *slist = dp->scatter_list;
 512	u32 n_entries_per_buf;
 513	int num_scatter_buf, scatter_idx;
 514	struct hal_wbm_link_desc *scatter_buf;
 515	int align_bytes, n_entries;
 516	dma_addr_t paddr;
 517	int rem_entries;
 518	int i;
 519	int ret = 0;
 520	u32 end_offset;
 521
 522	n_entries_per_buf = HAL_WBM_IDLE_SCATTER_BUF_SIZE /
 523		ath11k_hal_srng_get_entrysize(ab, HAL_WBM_IDLE_LINK);
 524	num_scatter_buf = DIV_ROUND_UP(size, HAL_WBM_IDLE_SCATTER_BUF_SIZE);
 525
 526	if (num_scatter_buf > DP_IDLE_SCATTER_BUFS_MAX)
 527		return -EINVAL;
 528
 529	for (i = 0; i < num_scatter_buf; i++) {
 530		slist[i].vaddr = dma_alloc_coherent(ab->dev,
 531						    HAL_WBM_IDLE_SCATTER_BUF_SIZE_MAX,
 532						    &slist[i].paddr, GFP_KERNEL);
 533		if (!slist[i].vaddr) {
 534			ret = -ENOMEM;
 535			goto err;
 536		}
 537	}
 538
 539	scatter_idx = 0;
 540	scatter_buf = slist[scatter_idx].vaddr;
 541	rem_entries = n_entries_per_buf;
 542
 543	for (i = 0; i < n_link_desc_bank; i++) {
 544		align_bytes = link_desc_banks[i].vaddr -
 545			      link_desc_banks[i].vaddr_unaligned;
 546		n_entries = (DP_LINK_DESC_ALLOC_SIZE_THRESH - align_bytes) /
 547			     HAL_LINK_DESC_SIZE;
 548		paddr = link_desc_banks[i].paddr;
 549		while (n_entries) {
 550			ath11k_hal_set_link_desc_addr(scatter_buf, i, paddr);
 551			n_entries--;
 552			paddr += HAL_LINK_DESC_SIZE;
 553			if (rem_entries) {
 554				rem_entries--;
 555				scatter_buf++;
 556				continue;
 557			}
 558
 559			rem_entries = n_entries_per_buf;
 560			scatter_idx++;
 561			scatter_buf = slist[scatter_idx].vaddr;
 562		}
 563	}
 564
 565	end_offset = (scatter_buf - slist[scatter_idx].vaddr) *
 566		     sizeof(struct hal_wbm_link_desc);
 567	ath11k_hal_setup_link_idle_list(ab, slist, num_scatter_buf,
 568					n_link_desc, end_offset);
 569
 570	return 0;
 571
 572err:
 573	ath11k_dp_scatter_idle_link_desc_cleanup(ab);
 574
 575	return ret;
 576}
 577
 578static void
 579ath11k_dp_link_desc_bank_free(struct ath11k_base *ab,
 580			      struct dp_link_desc_bank *link_desc_banks)
 581{
 582	int i;
 583
 584	for (i = 0; i < DP_LINK_DESC_BANKS_MAX; i++) {
 585		if (link_desc_banks[i].vaddr_unaligned) {
 586			dma_free_coherent(ab->dev,
 587					  link_desc_banks[i].size,
 588					  link_desc_banks[i].vaddr_unaligned,
 589					  link_desc_banks[i].paddr_unaligned);
 590			link_desc_banks[i].vaddr_unaligned = NULL;
 591		}
 592	}
 593}
 594
 595static int ath11k_dp_link_desc_bank_alloc(struct ath11k_base *ab,
 596					  struct dp_link_desc_bank *desc_bank,
 597					  int n_link_desc_bank,
 598					  int last_bank_sz)
 599{
 600	struct ath11k_dp *dp = &ab->dp;
 601	int i;
 602	int ret = 0;
 603	int desc_sz = DP_LINK_DESC_ALLOC_SIZE_THRESH;
 604
 605	for (i = 0; i < n_link_desc_bank; i++) {
 606		if (i == (n_link_desc_bank - 1) && last_bank_sz)
 607			desc_sz = last_bank_sz;
 608
 609		desc_bank[i].vaddr_unaligned =
 610					dma_alloc_coherent(ab->dev, desc_sz,
 611							   &desc_bank[i].paddr_unaligned,
 612							   GFP_KERNEL);
 613		if (!desc_bank[i].vaddr_unaligned) {
 614			ret = -ENOMEM;
 615			goto err;
 616		}
 617
 618		desc_bank[i].vaddr = PTR_ALIGN(desc_bank[i].vaddr_unaligned,
 619					       HAL_LINK_DESC_ALIGN);
 620		desc_bank[i].paddr = desc_bank[i].paddr_unaligned +
 621				     ((unsigned long)desc_bank[i].vaddr -
 622				      (unsigned long)desc_bank[i].vaddr_unaligned);
 623		desc_bank[i].size = desc_sz;
 624	}
 625
 626	return 0;
 627
 628err:
 629	ath11k_dp_link_desc_bank_free(ab, dp->link_desc_banks);
 630
 631	return ret;
 632}
 633
 634void ath11k_dp_link_desc_cleanup(struct ath11k_base *ab,
 635				 struct dp_link_desc_bank *desc_bank,
 636				 u32 ring_type, struct dp_srng *ring)
 637{
 638	ath11k_dp_link_desc_bank_free(ab, desc_bank);
 639
 640	if (ring_type != HAL_RXDMA_MONITOR_DESC) {
 641		ath11k_dp_srng_cleanup(ab, ring);
 642		ath11k_dp_scatter_idle_link_desc_cleanup(ab);
 643	}
 644}
 645
 646static int ath11k_wbm_idle_ring_setup(struct ath11k_base *ab, u32 *n_link_desc)
 647{
 648	struct ath11k_dp *dp = &ab->dp;
 649	u32 n_mpdu_link_desc, n_mpdu_queue_desc;
 650	u32 n_tx_msdu_link_desc, n_rx_msdu_link_desc;
 651	int ret = 0;
 652
 653	n_mpdu_link_desc = (DP_NUM_TIDS_MAX * DP_AVG_MPDUS_PER_TID_MAX) /
 654			   HAL_NUM_MPDUS_PER_LINK_DESC;
 655
 656	n_mpdu_queue_desc = n_mpdu_link_desc /
 657			    HAL_NUM_MPDU_LINKS_PER_QUEUE_DESC;
 658
 659	n_tx_msdu_link_desc = (DP_NUM_TIDS_MAX * DP_AVG_FLOWS_PER_TID *
 660			       DP_AVG_MSDUS_PER_FLOW) /
 661			      HAL_NUM_TX_MSDUS_PER_LINK_DESC;
 662
 663	n_rx_msdu_link_desc = (DP_NUM_TIDS_MAX * DP_AVG_MPDUS_PER_TID_MAX *
 664			       DP_AVG_MSDUS_PER_MPDU) /
 665			      HAL_NUM_RX_MSDUS_PER_LINK_DESC;
 666
 667	*n_link_desc = n_mpdu_link_desc + n_mpdu_queue_desc +
 668		      n_tx_msdu_link_desc + n_rx_msdu_link_desc;
 669
 670	if (*n_link_desc & (*n_link_desc - 1))
 671		*n_link_desc = 1 << fls(*n_link_desc);
 672
 673	ret = ath11k_dp_srng_setup(ab, &dp->wbm_idle_ring,
 674				   HAL_WBM_IDLE_LINK, 0, 0, *n_link_desc);
 675	if (ret) {
 676		ath11k_warn(ab, "failed to setup wbm_idle_ring: %d\n", ret);
 677		return ret;
 678	}
 679	return ret;
 680}
 681
 682int ath11k_dp_link_desc_setup(struct ath11k_base *ab,
 683			      struct dp_link_desc_bank *link_desc_banks,
 684			      u32 ring_type, struct hal_srng *srng,
 685			      u32 n_link_desc)
 686{
 687	u32 tot_mem_sz;
 688	u32 n_link_desc_bank, last_bank_sz;
 689	u32 entry_sz, align_bytes, n_entries;
 690	u32 paddr;
 691	u32 *desc;
 692	int i, ret;
 693
 694	tot_mem_sz = n_link_desc * HAL_LINK_DESC_SIZE;
 695	tot_mem_sz += HAL_LINK_DESC_ALIGN;
 696
 697	if (tot_mem_sz <= DP_LINK_DESC_ALLOC_SIZE_THRESH) {
 698		n_link_desc_bank = 1;
 699		last_bank_sz = tot_mem_sz;
 700	} else {
 701		n_link_desc_bank = tot_mem_sz /
 702				   (DP_LINK_DESC_ALLOC_SIZE_THRESH -
 703				    HAL_LINK_DESC_ALIGN);
 704		last_bank_sz = tot_mem_sz %
 705			       (DP_LINK_DESC_ALLOC_SIZE_THRESH -
 706				HAL_LINK_DESC_ALIGN);
 707
 708		if (last_bank_sz)
 709			n_link_desc_bank += 1;
 710	}
 711
 712	if (n_link_desc_bank > DP_LINK_DESC_BANKS_MAX)
 713		return -EINVAL;
 714
 715	ret = ath11k_dp_link_desc_bank_alloc(ab, link_desc_banks,
 716					     n_link_desc_bank, last_bank_sz);
 717	if (ret)
 718		return ret;
 719
 720	/* Setup link desc idle list for HW internal usage */
 721	entry_sz = ath11k_hal_srng_get_entrysize(ab, ring_type);
 722	tot_mem_sz = entry_sz * n_link_desc;
 723
 724	/* Setup scatter desc list when the total memory requirement is more */
 725	if (tot_mem_sz > DP_LINK_DESC_ALLOC_SIZE_THRESH &&
 726	    ring_type != HAL_RXDMA_MONITOR_DESC) {
 727		ret = ath11k_dp_scatter_idle_link_desc_setup(ab, tot_mem_sz,
 728							     n_link_desc_bank,
 729							     n_link_desc,
 730							     last_bank_sz);
 731		if (ret) {
 732			ath11k_warn(ab, "failed to setup scatting idle list descriptor :%d\n",
 733				    ret);
 734			goto fail_desc_bank_free;
 735		}
 736
 737		return 0;
 738	}
 739
 740	spin_lock_bh(&srng->lock);
 741
 742	ath11k_hal_srng_access_begin(ab, srng);
 743
 744	for (i = 0; i < n_link_desc_bank; i++) {
 745		align_bytes = link_desc_banks[i].vaddr -
 746			      link_desc_banks[i].vaddr_unaligned;
 747		n_entries = (link_desc_banks[i].size - align_bytes) /
 748			    HAL_LINK_DESC_SIZE;
 749		paddr = link_desc_banks[i].paddr;
 750		while (n_entries &&
 751		       (desc = ath11k_hal_srng_src_get_next_entry(ab, srng))) {
 752			ath11k_hal_set_link_desc_addr((struct hal_wbm_link_desc *)desc,
 753						      i, paddr);
 754			n_entries--;
 755			paddr += HAL_LINK_DESC_SIZE;
 756		}
 757	}
 758
 759	ath11k_hal_srng_access_end(ab, srng);
 760
 761	spin_unlock_bh(&srng->lock);
 762
 763	return 0;
 764
 765fail_desc_bank_free:
 766	ath11k_dp_link_desc_bank_free(ab, link_desc_banks);
 767
 768	return ret;
 769}
 770
 771int ath11k_dp_service_srng(struct ath11k_base *ab,
 772			   struct ath11k_ext_irq_grp *irq_grp,
 773			   int budget)
 774{
 775	struct napi_struct *napi = &irq_grp->napi;
 776	const struct ath11k_hw_hal_params *hal_params;
 777	int grp_id = irq_grp->grp_id;
 778	int work_done = 0;
 779	int i, j;
 780	int tot_work_done = 0;
 781
 782	for (i = 0; i < ab->hw_params.max_tx_ring; i++) {
 783		if (BIT(ab->hw_params.hal_params->tcl2wbm_rbm_map[i].wbm_ring_num) &
 784		    ab->hw_params.ring_mask->tx[grp_id])
 785			ath11k_dp_tx_completion_handler(ab, i);
 
 786	}
 787
 788	if (ab->hw_params.ring_mask->rx_err[grp_id]) {
 789		work_done = ath11k_dp_process_rx_err(ab, napi, budget);
 790		budget -= work_done;
 791		tot_work_done += work_done;
 792		if (budget <= 0)
 793			goto done;
 794	}
 795
 796	if (ab->hw_params.ring_mask->rx_wbm_rel[grp_id]) {
 797		work_done = ath11k_dp_rx_process_wbm_err(ab,
 798							 napi,
 799							 budget);
 800		budget -= work_done;
 801		tot_work_done += work_done;
 802
 803		if (budget <= 0)
 804			goto done;
 805	}
 806
 807	if (ab->hw_params.ring_mask->rx[grp_id]) {
 808		i =  fls(ab->hw_params.ring_mask->rx[grp_id]) - 1;
 809		work_done = ath11k_dp_process_rx(ab, i, napi,
 810						 budget);
 811		budget -= work_done;
 812		tot_work_done += work_done;
 813		if (budget <= 0)
 814			goto done;
 815	}
 816
 817	if (ab->hw_params.ring_mask->rx_mon_status[grp_id]) {
 818		for (i = 0; i < ab->num_radios; i++) {
 819			for (j = 0; j < ab->hw_params.num_rxmda_per_pdev; j++) {
 820				int id = i * ab->hw_params.num_rxmda_per_pdev + j;
 821
 822				if (ab->hw_params.ring_mask->rx_mon_status[grp_id] &
 823					BIT(id)) {
 824					work_done =
 825					ath11k_dp_rx_process_mon_rings(ab,
 826								       id,
 827								       napi, budget);
 828					budget -= work_done;
 829					tot_work_done += work_done;
 830
 831					if (budget <= 0)
 832						goto done;
 833				}
 834			}
 835		}
 836	}
 837
 838	if (ab->hw_params.ring_mask->reo_status[grp_id])
 839		ath11k_dp_process_reo_status(ab);
 840
 841	for (i = 0; i < ab->num_radios; i++) {
 842		for (j = 0; j < ab->hw_params.num_rxmda_per_pdev; j++) {
 843			int id = i * ab->hw_params.num_rxmda_per_pdev + j;
 844
 845			if (ab->hw_params.ring_mask->rxdma2host[grp_id] & BIT(id)) {
 846				work_done = ath11k_dp_process_rxdma_err(ab, id, budget);
 847				budget -= work_done;
 848				tot_work_done += work_done;
 849			}
 850
 851			if (budget <= 0)
 852				goto done;
 853
 854			if (ab->hw_params.ring_mask->host2rxdma[grp_id] & BIT(id)) {
 855				struct ath11k *ar = ath11k_ab_to_ar(ab, id);
 856				struct ath11k_pdev_dp *dp = &ar->dp;
 857				struct dp_rxdma_ring *rx_ring = &dp->rx_refill_buf_ring;
 858
 859				hal_params = ab->hw_params.hal_params;
 860				ath11k_dp_rxbufs_replenish(ab, id, rx_ring, 0,
 861							   hal_params->rx_buf_rbm);
 862			}
 863		}
 864	}
 865	/* TODO: Implement handler for other interrupts */
 866
 867done:
 868	return tot_work_done;
 869}
 870EXPORT_SYMBOL(ath11k_dp_service_srng);
 871
 872void ath11k_dp_pdev_free(struct ath11k_base *ab)
 873{
 874	struct ath11k *ar;
 875	int i;
 876
 877	del_timer_sync(&ab->mon_reap_timer);
 878
 879	for (i = 0; i < ab->num_radios; i++) {
 880		ar = ab->pdevs[i].ar;
 881		ath11k_dp_rx_pdev_free(ab, i);
 882		ath11k_debugfs_unregister(ar);
 883		ath11k_dp_rx_pdev_mon_detach(ar);
 884	}
 885}
 886
 887void ath11k_dp_pdev_pre_alloc(struct ath11k_base *ab)
 888{
 889	struct ath11k *ar;
 890	struct ath11k_pdev_dp *dp;
 891	int i;
 892	int j;
 893
 894	for (i = 0; i <  ab->num_radios; i++) {
 895		ar = ab->pdevs[i].ar;
 896		dp = &ar->dp;
 897		dp->mac_id = i;
 898		idr_init(&dp->rx_refill_buf_ring.bufs_idr);
 899		spin_lock_init(&dp->rx_refill_buf_ring.idr_lock);
 900		atomic_set(&dp->num_tx_pending, 0);
 901		init_waitqueue_head(&dp->tx_empty_waitq);
 902		for (j = 0; j < ab->hw_params.num_rxmda_per_pdev; j++) {
 903			idr_init(&dp->rx_mon_status_refill_ring[j].bufs_idr);
 904			spin_lock_init(&dp->rx_mon_status_refill_ring[j].idr_lock);
 905		}
 906		idr_init(&dp->rxdma_mon_buf_ring.bufs_idr);
 907		spin_lock_init(&dp->rxdma_mon_buf_ring.idr_lock);
 908	}
 909}
 910
 911int ath11k_dp_pdev_alloc(struct ath11k_base *ab)
 912{
 913	struct ath11k *ar;
 914	int ret;
 915	int i;
 916
 917	/* TODO:Per-pdev rx ring unlike tx ring which is mapped to different AC's */
 918	for (i = 0; i < ab->num_radios; i++) {
 919		ar = ab->pdevs[i].ar;
 920		ret = ath11k_dp_rx_pdev_alloc(ab, i);
 921		if (ret) {
 922			ath11k_warn(ab, "failed to allocate pdev rx for pdev_id :%d\n",
 923				    i);
 924			goto err;
 925		}
 926		ret = ath11k_dp_rx_pdev_mon_attach(ar);
 927		if (ret) {
 928			ath11k_warn(ab, "failed to initialize mon pdev %d\n",
 929				    i);
 930			goto err;
 931		}
 932	}
 933
 934	return 0;
 935
 936err:
 937	ath11k_dp_pdev_free(ab);
 938
 939	return ret;
 940}
 941
 942int ath11k_dp_htt_connect(struct ath11k_dp *dp)
 943{
 944	struct ath11k_htc_svc_conn_req conn_req;
 945	struct ath11k_htc_svc_conn_resp conn_resp;
 946	int status;
 947
 948	memset(&conn_req, 0, sizeof(conn_req));
 949	memset(&conn_resp, 0, sizeof(conn_resp));
 950
 951	conn_req.ep_ops.ep_tx_complete = ath11k_dp_htt_htc_tx_complete;
 952	conn_req.ep_ops.ep_rx_complete = ath11k_dp_htt_htc_t2h_msg_handler;
 953
 954	/* connect to control service */
 955	conn_req.service_id = ATH11K_HTC_SVC_ID_HTT_DATA_MSG;
 956
 957	status = ath11k_htc_connect_service(&dp->ab->htc, &conn_req,
 958					    &conn_resp);
 959
 960	if (status)
 961		return status;
 962
 963	dp->eid = conn_resp.eid;
 964
 965	return 0;
 966}
 967
 968static void ath11k_dp_update_vdev_search(struct ath11k_vif *arvif)
 969{
 970	 /* When v2_map_support is true:for STA mode, enable address
 971	  * search index, tcl uses ast_hash value in the descriptor.
 972	  * When v2_map_support is false: for STA mode, don't enable
 973	  * address search index.
 974	  */
 975	switch (arvif->vdev_type) {
 976	case WMI_VDEV_TYPE_STA:
 977		if (arvif->ar->ab->hw_params.htt_peer_map_v2) {
 978			arvif->hal_addr_search_flags = HAL_TX_ADDRX_EN;
 979			arvif->search_type = HAL_TX_ADDR_SEARCH_INDEX;
 980		} else {
 981			arvif->hal_addr_search_flags = HAL_TX_ADDRY_EN;
 982			arvif->search_type = HAL_TX_ADDR_SEARCH_DEFAULT;
 983		}
 984		break;
 985	case WMI_VDEV_TYPE_AP:
 986	case WMI_VDEV_TYPE_IBSS:
 987		arvif->hal_addr_search_flags = HAL_TX_ADDRX_EN;
 988		arvif->search_type = HAL_TX_ADDR_SEARCH_DEFAULT;
 989		break;
 990	case WMI_VDEV_TYPE_MONITOR:
 991	default:
 992		return;
 993	}
 994}
 995
 996void ath11k_dp_vdev_tx_attach(struct ath11k *ar, struct ath11k_vif *arvif)
 997{
 998	arvif->tcl_metadata |= FIELD_PREP(HTT_TCL_META_DATA_TYPE, 1) |
 999			       FIELD_PREP(HTT_TCL_META_DATA_VDEV_ID,
1000					  arvif->vdev_id) |
1001			       FIELD_PREP(HTT_TCL_META_DATA_PDEV_ID,
1002					  ar->pdev->pdev_id);
1003
1004	/* set HTT extension valid bit to 0 by default */
1005	arvif->tcl_metadata &= ~HTT_TCL_META_DATA_VALID_HTT;
1006
1007	ath11k_dp_update_vdev_search(arvif);
1008}
1009
1010static int ath11k_dp_tx_pending_cleanup(int buf_id, void *skb, void *ctx)
1011{
1012	struct ath11k_base *ab = ctx;
1013	struct sk_buff *msdu = skb;
1014
1015	dma_unmap_single(ab->dev, ATH11K_SKB_CB(msdu)->paddr, msdu->len,
1016			 DMA_TO_DEVICE);
1017
1018	dev_kfree_skb_any(msdu);
1019
1020	return 0;
1021}
1022
1023void ath11k_dp_free(struct ath11k_base *ab)
1024{
1025	struct ath11k_dp *dp = &ab->dp;
1026	int i;
1027
1028	ath11k_dp_link_desc_cleanup(ab, dp->link_desc_banks,
1029				    HAL_WBM_IDLE_LINK, &dp->wbm_idle_ring);
1030
1031	ath11k_dp_srng_common_cleanup(ab);
1032
1033	ath11k_dp_reo_cmd_list_cleanup(ab);
1034
1035	for (i = 0; i < ab->hw_params.max_tx_ring; i++) {
1036		spin_lock_bh(&dp->tx_ring[i].tx_idr_lock);
1037		idr_for_each(&dp->tx_ring[i].txbuf_idr,
1038			     ath11k_dp_tx_pending_cleanup, ab);
1039		idr_destroy(&dp->tx_ring[i].txbuf_idr);
1040		spin_unlock_bh(&dp->tx_ring[i].tx_idr_lock);
1041		kfree(dp->tx_ring[i].tx_status);
1042	}
1043
1044	/* Deinit any SOC level resource */
1045}
1046
1047int ath11k_dp_alloc(struct ath11k_base *ab)
1048{
1049	struct ath11k_dp *dp = &ab->dp;
1050	struct hal_srng *srng = NULL;
1051	size_t size = 0;
1052	u32 n_link_desc = 0;
1053	int ret;
1054	int i;
1055
1056	dp->ab = ab;
1057
1058	INIT_LIST_HEAD(&dp->reo_cmd_list);
1059	INIT_LIST_HEAD(&dp->reo_cmd_cache_flush_list);
1060	INIT_LIST_HEAD(&dp->dp_full_mon_mpdu_list);
1061	spin_lock_init(&dp->reo_cmd_lock);
1062
1063	dp->reo_cmd_cache_flush_count = 0;
1064
1065	ret = ath11k_wbm_idle_ring_setup(ab, &n_link_desc);
1066	if (ret) {
1067		ath11k_warn(ab, "failed to setup wbm_idle_ring: %d\n", ret);
1068		return ret;
1069	}
1070
1071	srng = &ab->hal.srng_list[dp->wbm_idle_ring.ring_id];
1072
1073	ret = ath11k_dp_link_desc_setup(ab, dp->link_desc_banks,
1074					HAL_WBM_IDLE_LINK, srng, n_link_desc);
1075	if (ret) {
1076		ath11k_warn(ab, "failed to setup link desc: %d\n", ret);
1077		return ret;
1078	}
1079
1080	ret = ath11k_dp_srng_common_setup(ab);
1081	if (ret)
1082		goto fail_link_desc_cleanup;
1083
1084	size = sizeof(struct hal_wbm_release_ring) * DP_TX_COMP_RING_SIZE;
1085
1086	for (i = 0; i < ab->hw_params.max_tx_ring; i++) {
1087		idr_init(&dp->tx_ring[i].txbuf_idr);
1088		spin_lock_init(&dp->tx_ring[i].tx_idr_lock);
1089		dp->tx_ring[i].tcl_data_ring_id = i;
1090
1091		dp->tx_ring[i].tx_status_head = 0;
1092		dp->tx_ring[i].tx_status_tail = DP_TX_COMP_RING_SIZE - 1;
1093		dp->tx_ring[i].tx_status = kmalloc(size, GFP_KERNEL);
1094		if (!dp->tx_ring[i].tx_status) {
1095			ret = -ENOMEM;
1096			goto fail_cmn_srng_cleanup;
1097		}
1098	}
1099
1100	for (i = 0; i < HAL_DSCP_TID_MAP_TBL_NUM_ENTRIES_MAX; i++)
1101		ath11k_hal_tx_set_dscp_tid_map(ab, i);
1102
1103	/* Init any SOC level resource for DP */
1104
1105	return 0;
1106
1107fail_cmn_srng_cleanup:
1108	ath11k_dp_srng_common_cleanup(ab);
1109
1110fail_link_desc_cleanup:
1111	ath11k_dp_link_desc_cleanup(ab, dp->link_desc_banks,
1112				    HAL_WBM_IDLE_LINK, &dp->wbm_idle_ring);
1113
1114	return ret;
1115}
1116
1117static void ath11k_dp_shadow_timer_handler(struct timer_list *t)
1118{
1119	struct ath11k_hp_update_timer *update_timer = from_timer(update_timer,
1120								 t, timer);
1121	struct ath11k_base *ab = update_timer->ab;
1122	struct hal_srng	*srng = &ab->hal.srng_list[update_timer->ring_id];
1123
1124	spin_lock_bh(&srng->lock);
1125
1126	/* when the timer is fired, the handler checks whether there
1127	 * are new TX happened. The handler updates HP only when there
1128	 * are no TX operations during the timeout interval, and stop
1129	 * the timer. Timer will be started again when TX happens again.
1130	 */
1131	if (update_timer->timer_tx_num != update_timer->tx_num) {
1132		update_timer->timer_tx_num = update_timer->tx_num;
1133		mod_timer(&update_timer->timer, jiffies +
1134		  msecs_to_jiffies(update_timer->interval));
1135	} else {
1136		update_timer->started = false;
1137		ath11k_hal_srng_shadow_update_hp_tp(ab, srng);
1138	}
1139
1140	spin_unlock_bh(&srng->lock);
1141}
1142
1143void ath11k_dp_shadow_start_timer(struct ath11k_base *ab,
1144				  struct hal_srng *srng,
1145				  struct ath11k_hp_update_timer *update_timer)
1146{
1147	lockdep_assert_held(&srng->lock);
1148
1149	if (!ab->hw_params.supports_shadow_regs)
1150		return;
1151
1152	update_timer->tx_num++;
1153
1154	if (update_timer->started)
1155		return;
1156
1157	update_timer->started = true;
1158	update_timer->timer_tx_num = update_timer->tx_num;
1159	mod_timer(&update_timer->timer, jiffies +
1160		  msecs_to_jiffies(update_timer->interval));
1161}
1162
1163void ath11k_dp_shadow_stop_timer(struct ath11k_base *ab,
1164				 struct ath11k_hp_update_timer *update_timer)
1165{
1166	if (!ab->hw_params.supports_shadow_regs)
1167		return;
1168
1169	if (!update_timer->init)
1170		return;
1171
1172	del_timer_sync(&update_timer->timer);
1173}
1174
1175void ath11k_dp_shadow_init_timer(struct ath11k_base *ab,
1176				 struct ath11k_hp_update_timer *update_timer,
1177				 u32 interval, u32 ring_id)
1178{
1179	if (!ab->hw_params.supports_shadow_regs)
1180		return;
1181
1182	update_timer->tx_num = 0;
1183	update_timer->timer_tx_num = 0;
1184	update_timer->ab = ab;
1185	update_timer->ring_id = ring_id;
1186	update_timer->interval = interval;
1187	update_timer->init = true;
1188	timer_setup(&update_timer->timer,
1189		    ath11k_dp_shadow_timer_handler, 0);
1190}
v5.14.15
   1// SPDX-License-Identifier: BSD-3-Clause-Clear
   2/*
   3 * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
 
   4 */
   5
   6#include <crypto/hash.h>
   7#include "core.h"
   8#include "dp_tx.h"
   9#include "hal_tx.h"
  10#include "hif.h"
  11#include "debug.h"
  12#include "dp_rx.h"
  13#include "peer.h"
  14
  15static void ath11k_dp_htt_htc_tx_complete(struct ath11k_base *ab,
  16					  struct sk_buff *skb)
  17{
  18	dev_kfree_skb_any(skb);
  19}
  20
  21void ath11k_dp_peer_cleanup(struct ath11k *ar, int vdev_id, const u8 *addr)
  22{
  23	struct ath11k_base *ab = ar->ab;
  24	struct ath11k_peer *peer;
  25
  26	/* TODO: Any other peer specific DP cleanup */
  27
  28	spin_lock_bh(&ab->base_lock);
  29	peer = ath11k_peer_find(ab, vdev_id, addr);
  30	if (!peer) {
  31		ath11k_warn(ab, "failed to lookup peer %pM on vdev %d\n",
  32			    addr, vdev_id);
  33		spin_unlock_bh(&ab->base_lock);
  34		return;
  35	}
  36
  37	ath11k_peer_rx_tid_cleanup(ar, peer);
 
  38	crypto_free_shash(peer->tfm_mmic);
  39	spin_unlock_bh(&ab->base_lock);
  40}
  41
  42int ath11k_dp_peer_setup(struct ath11k *ar, int vdev_id, const u8 *addr)
  43{
  44	struct ath11k_base *ab = ar->ab;
  45	struct ath11k_peer *peer;
  46	u32 reo_dest;
  47	int ret = 0, tid;
  48
  49	/* NOTE: reo_dest ring id starts from 1 unlike mac_id which starts from 0 */
  50	reo_dest = ar->dp.mac_id + 1;
  51	ret = ath11k_wmi_set_peer_param(ar, addr, vdev_id,
  52					WMI_PEER_SET_DEFAULT_ROUTING,
  53					DP_RX_HASH_ENABLE | (reo_dest << 1));
  54
  55	if (ret) {
  56		ath11k_warn(ab, "failed to set default routing %d peer :%pM vdev_id :%d\n",
  57			    ret, addr, vdev_id);
  58		return ret;
  59	}
  60
  61	for (tid = 0; tid <= IEEE80211_NUM_TIDS; tid++) {
  62		ret = ath11k_peer_rx_tid_setup(ar, addr, vdev_id, tid, 1, 0,
  63					       HAL_PN_TYPE_NONE);
  64		if (ret) {
  65			ath11k_warn(ab, "failed to setup rxd tid queue for tid %d: %d\n",
  66				    tid, ret);
  67			goto peer_clean;
  68		}
  69	}
  70
  71	ret = ath11k_peer_rx_frag_setup(ar, addr, vdev_id);
  72	if (ret) {
  73		ath11k_warn(ab, "failed to setup rx defrag context\n");
  74		return ret;
 
  75	}
  76
  77	/* TODO: Setup other peer specific resource used in data path */
  78
  79	return 0;
  80
  81peer_clean:
  82	spin_lock_bh(&ab->base_lock);
  83
  84	peer = ath11k_peer_find(ab, vdev_id, addr);
  85	if (!peer) {
  86		ath11k_warn(ab, "failed to find the peer to del rx tid\n");
  87		spin_unlock_bh(&ab->base_lock);
  88		return -ENOENT;
  89	}
  90
  91	for (; tid >= 0; tid--)
  92		ath11k_peer_rx_tid_delete(ar, peer, tid);
  93
  94	spin_unlock_bh(&ab->base_lock);
  95
  96	return ret;
  97}
  98
  99void ath11k_dp_srng_cleanup(struct ath11k_base *ab, struct dp_srng *ring)
 100{
 101	if (!ring->vaddr_unaligned)
 102		return;
 103
 104	dma_free_coherent(ab->dev, ring->size, ring->vaddr_unaligned,
 105			  ring->paddr_unaligned);
 
 
 
 106
 107	ring->vaddr_unaligned = NULL;
 108}
 109
 110static int ath11k_dp_srng_find_ring_in_mask(int ring_num, const u8 *grp_mask)
 111{
 112	int ext_group_num;
 113	u8 mask = 1 << ring_num;
 114
 115	for (ext_group_num = 0; ext_group_num < ATH11K_EXT_IRQ_GRP_NUM_MAX;
 116	     ext_group_num++) {
 117		if (mask & grp_mask[ext_group_num])
 118			return ext_group_num;
 119	}
 120
 121	return -ENOENT;
 122}
 123
 124static int ath11k_dp_srng_calculate_msi_group(struct ath11k_base *ab,
 125					      enum hal_ring_type type, int ring_num)
 126{
 127	const u8 *grp_mask;
 128
 129	switch (type) {
 130	case HAL_WBM2SW_RELEASE:
 131		if (ring_num < 3) {
 132			grp_mask = &ab->hw_params.ring_mask->tx[0];
 133		} else if (ring_num == 3) {
 134			grp_mask = &ab->hw_params.ring_mask->rx_wbm_rel[0];
 135			ring_num = 0;
 136		} else {
 137			return -ENOENT;
 138		}
 139		break;
 140	case HAL_REO_EXCEPTION:
 141		grp_mask = &ab->hw_params.ring_mask->rx_err[0];
 142		break;
 143	case HAL_REO_DST:
 144		grp_mask = &ab->hw_params.ring_mask->rx[0];
 145		break;
 146	case HAL_REO_STATUS:
 147		grp_mask = &ab->hw_params.ring_mask->reo_status[0];
 148		break;
 149	case HAL_RXDMA_MONITOR_STATUS:
 150	case HAL_RXDMA_MONITOR_DST:
 151		grp_mask = &ab->hw_params.ring_mask->rx_mon_status[0];
 152		break;
 153	case HAL_RXDMA_DST:
 154		grp_mask = &ab->hw_params.ring_mask->rxdma2host[0];
 155		break;
 156	case HAL_RXDMA_BUF:
 157		grp_mask = &ab->hw_params.ring_mask->host2rxdma[0];
 158		break;
 159	case HAL_RXDMA_MONITOR_BUF:
 160	case HAL_TCL_DATA:
 161	case HAL_TCL_CMD:
 162	case HAL_REO_CMD:
 163	case HAL_SW2WBM_RELEASE:
 164	case HAL_WBM_IDLE_LINK:
 165	case HAL_TCL_STATUS:
 166	case HAL_REO_REINJECT:
 167	case HAL_CE_SRC:
 168	case HAL_CE_DST:
 169	case HAL_CE_DST_STATUS:
 170	default:
 171		return -ENOENT;
 172	}
 173
 174	return ath11k_dp_srng_find_ring_in_mask(ring_num, grp_mask);
 175}
 176
 177static void ath11k_dp_srng_msi_setup(struct ath11k_base *ab,
 178				     struct hal_srng_params *ring_params,
 179				     enum hal_ring_type type, int ring_num)
 180{
 181	int msi_group_number, msi_data_count;
 182	u32 msi_data_start, msi_irq_start, addr_lo, addr_hi;
 183	int ret;
 184
 185	ret = ath11k_get_user_msi_vector(ab, "DP",
 186					 &msi_data_count, &msi_data_start,
 187					 &msi_irq_start);
 188	if (ret)
 189		return;
 190
 191	msi_group_number = ath11k_dp_srng_calculate_msi_group(ab, type,
 192							      ring_num);
 193	if (msi_group_number < 0) {
 194		ath11k_dbg(ab, ATH11K_DBG_PCI,
 195			   "ring not part of an ext_group; ring_type: %d,ring_num %d",
 196			   type, ring_num);
 197		ring_params->msi_addr = 0;
 198		ring_params->msi_data = 0;
 199		return;
 200	}
 201
 202	if (msi_group_number > msi_data_count) {
 203		ath11k_dbg(ab, ATH11K_DBG_PCI,
 204			   "multiple msi_groups share one msi, msi_group_num %d",
 205			   msi_group_number);
 206	}
 207
 208	ath11k_get_msi_address(ab, &addr_lo, &addr_hi);
 209
 210	ring_params->msi_addr = addr_lo;
 211	ring_params->msi_addr |= (dma_addr_t)(((uint64_t)addr_hi) << 32);
 212	ring_params->msi_data = (msi_group_number % msi_data_count)
 213		+ msi_data_start;
 214	ring_params->flags |= HAL_SRNG_FLAGS_MSI_INTR;
 215}
 216
 217int ath11k_dp_srng_setup(struct ath11k_base *ab, struct dp_srng *ring,
 218			 enum hal_ring_type type, int ring_num,
 219			 int mac_id, int num_entries)
 220{
 221	struct hal_srng_params params = { 0 };
 222	int entry_sz = ath11k_hal_srng_get_entrysize(ab, type);
 223	int max_entries = ath11k_hal_srng_get_max_entries(ab, type);
 224	int ret;
 
 225
 226	if (max_entries < 0 || entry_sz < 0)
 227		return -EINVAL;
 228
 229	if (num_entries > max_entries)
 230		num_entries = max_entries;
 231
 232	ring->size = (num_entries * entry_sz) + HAL_RING_BASE_ALIGN - 1;
 233	ring->vaddr_unaligned = dma_alloc_coherent(ab->dev, ring->size,
 234						   &ring->paddr_unaligned,
 235						   GFP_KERNEL);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 236	if (!ring->vaddr_unaligned)
 237		return -ENOMEM;
 238
 239	ring->vaddr = PTR_ALIGN(ring->vaddr_unaligned, HAL_RING_BASE_ALIGN);
 240	ring->paddr = ring->paddr_unaligned + ((unsigned long)ring->vaddr -
 241		      (unsigned long)ring->vaddr_unaligned);
 242
 243	params.ring_base_vaddr = ring->vaddr;
 244	params.ring_base_paddr = ring->paddr;
 245	params.num_entries = num_entries;
 246	ath11k_dp_srng_msi_setup(ab, &params, type, ring_num + mac_id);
 247
 248	switch (type) {
 249	case HAL_REO_DST:
 250		params.intr_batch_cntr_thres_entries =
 251					HAL_SRNG_INT_BATCH_THRESHOLD_RX;
 252		params.intr_timer_thres_us = HAL_SRNG_INT_TIMER_THRESHOLD_RX;
 253		break;
 254	case HAL_RXDMA_BUF:
 255	case HAL_RXDMA_MONITOR_BUF:
 256	case HAL_RXDMA_MONITOR_STATUS:
 257		params.low_threshold = num_entries >> 3;
 258		params.flags |= HAL_SRNG_FLAGS_LOW_THRESH_INTR_EN;
 259		params.intr_batch_cntr_thres_entries = 0;
 260		params.intr_timer_thres_us = HAL_SRNG_INT_TIMER_THRESHOLD_RX;
 261		break;
 262	case HAL_WBM2SW_RELEASE:
 263		if (ring_num < 3) {
 264			params.intr_batch_cntr_thres_entries =
 265					HAL_SRNG_INT_BATCH_THRESHOLD_TX;
 266			params.intr_timer_thres_us =
 267					HAL_SRNG_INT_TIMER_THRESHOLD_TX;
 268			break;
 269		}
 270		/* follow through when ring_num >= 3 */
 271		fallthrough;
 272	case HAL_REO_EXCEPTION:
 273	case HAL_REO_REINJECT:
 274	case HAL_REO_CMD:
 275	case HAL_REO_STATUS:
 276	case HAL_TCL_DATA:
 277	case HAL_TCL_CMD:
 278	case HAL_TCL_STATUS:
 279	case HAL_WBM_IDLE_LINK:
 280	case HAL_SW2WBM_RELEASE:
 281	case HAL_RXDMA_DST:
 282	case HAL_RXDMA_MONITOR_DST:
 283	case HAL_RXDMA_MONITOR_DESC:
 284		params.intr_batch_cntr_thres_entries =
 285					HAL_SRNG_INT_BATCH_THRESHOLD_OTHER;
 286		params.intr_timer_thres_us = HAL_SRNG_INT_TIMER_THRESHOLD_OTHER;
 287		break;
 288	case HAL_RXDMA_DIR_BUF:
 289		break;
 290	default:
 291		ath11k_warn(ab, "Not a valid ring type in dp :%d\n", type);
 292		return -EINVAL;
 293	}
 294
 
 
 
 
 
 295	ret = ath11k_hal_srng_setup(ab, type, ring_num, mac_id, &params);
 296	if (ret < 0) {
 297		ath11k_warn(ab, "failed to setup srng: %d ring_id %d\n",
 298			    ret, ring_num);
 299		return ret;
 300	}
 301
 302	ring->ring_id = ret;
 303
 304	return 0;
 305}
 306
 307void ath11k_dp_stop_shadow_timers(struct ath11k_base *ab)
 308{
 309	int i;
 310
 311	if (!ab->hw_params.supports_shadow_regs)
 312		return;
 313
 314	for (i = 0; i < DP_TCL_NUM_RING_MAX; i++)
 315		ath11k_dp_shadow_stop_timer(ab, &ab->dp.tx_ring_timer[i]);
 316
 317	ath11k_dp_shadow_stop_timer(ab, &ab->dp.reo_cmd_timer);
 318}
 319
 320static void ath11k_dp_srng_common_cleanup(struct ath11k_base *ab)
 321{
 322	struct ath11k_dp *dp = &ab->dp;
 323	int i;
 324
 325	ath11k_dp_stop_shadow_timers(ab);
 326	ath11k_dp_srng_cleanup(ab, &dp->wbm_desc_rel_ring);
 327	ath11k_dp_srng_cleanup(ab, &dp->tcl_cmd_ring);
 328	ath11k_dp_srng_cleanup(ab, &dp->tcl_status_ring);
 329	for (i = 0; i < DP_TCL_NUM_RING_MAX; i++) {
 330		ath11k_dp_srng_cleanup(ab, &dp->tx_ring[i].tcl_data_ring);
 331		ath11k_dp_srng_cleanup(ab, &dp->tx_ring[i].tcl_comp_ring);
 332	}
 333	ath11k_dp_srng_cleanup(ab, &dp->reo_reinject_ring);
 334	ath11k_dp_srng_cleanup(ab, &dp->rx_rel_ring);
 335	ath11k_dp_srng_cleanup(ab, &dp->reo_except_ring);
 336	ath11k_dp_srng_cleanup(ab, &dp->reo_cmd_ring);
 337	ath11k_dp_srng_cleanup(ab, &dp->reo_status_ring);
 338}
 339
 340static int ath11k_dp_srng_common_setup(struct ath11k_base *ab)
 341{
 342	struct ath11k_dp *dp = &ab->dp;
 343	struct hal_srng *srng;
 344	int i, ret;
 
 345
 346	ret = ath11k_dp_srng_setup(ab, &dp->wbm_desc_rel_ring,
 347				   HAL_SW2WBM_RELEASE, 0, 0,
 348				   DP_WBM_RELEASE_RING_SIZE);
 349	if (ret) {
 350		ath11k_warn(ab, "failed to set up wbm2sw_release ring :%d\n",
 351			    ret);
 352		goto err;
 353	}
 354
 355	ret = ath11k_dp_srng_setup(ab, &dp->tcl_cmd_ring, HAL_TCL_CMD, 0, 0,
 356				   DP_TCL_CMD_RING_SIZE);
 357	if (ret) {
 358		ath11k_warn(ab, "failed to set up tcl_cmd ring :%d\n", ret);
 359		goto err;
 360	}
 361
 362	ret = ath11k_dp_srng_setup(ab, &dp->tcl_status_ring, HAL_TCL_STATUS,
 363				   0, 0, DP_TCL_STATUS_RING_SIZE);
 364	if (ret) {
 365		ath11k_warn(ab, "failed to set up tcl_status ring :%d\n", ret);
 366		goto err;
 367	}
 368
 369	for (i = 0; i < DP_TCL_NUM_RING_MAX; i++) {
 
 
 
 370		ret = ath11k_dp_srng_setup(ab, &dp->tx_ring[i].tcl_data_ring,
 371					   HAL_TCL_DATA, i, 0,
 372					   DP_TCL_DATA_RING_SIZE);
 373		if (ret) {
 374			ath11k_warn(ab, "failed to set up tcl_data ring (%d) :%d\n",
 375				    i, ret);
 376			goto err;
 377		}
 378
 379		ret = ath11k_dp_srng_setup(ab, &dp->tx_ring[i].tcl_comp_ring,
 380					   HAL_WBM2SW_RELEASE, i, 0,
 381					   DP_TX_COMP_RING_SIZE);
 382		if (ret) {
 383			ath11k_warn(ab, "failed to set up tcl_comp ring (%d) :%d\n",
 384				    i, ret);
 385			goto err;
 386		}
 387
 388		srng = &ab->hal.srng_list[dp->tx_ring[i].tcl_data_ring.ring_id];
 389		ath11k_hal_tx_init_data_ring(ab, srng);
 390
 391		ath11k_dp_shadow_init_timer(ab, &dp->tx_ring_timer[i],
 392					    ATH11K_SHADOW_DP_TIMER_INTERVAL,
 393					    dp->tx_ring[i].tcl_data_ring.ring_id);
 394	}
 395
 396	ret = ath11k_dp_srng_setup(ab, &dp->reo_reinject_ring, HAL_REO_REINJECT,
 397				   0, 0, DP_REO_REINJECT_RING_SIZE);
 398	if (ret) {
 399		ath11k_warn(ab, "failed to set up reo_reinject ring :%d\n",
 400			    ret);
 401		goto err;
 402	}
 403
 404	ret = ath11k_dp_srng_setup(ab, &dp->rx_rel_ring, HAL_WBM2SW_RELEASE,
 405				   3, 0, DP_RX_RELEASE_RING_SIZE);
 406	if (ret) {
 407		ath11k_warn(ab, "failed to set up rx_rel ring :%d\n", ret);
 408		goto err;
 409	}
 410
 411	ret = ath11k_dp_srng_setup(ab, &dp->reo_except_ring, HAL_REO_EXCEPTION,
 412				   0, 0, DP_REO_EXCEPTION_RING_SIZE);
 413	if (ret) {
 414		ath11k_warn(ab, "failed to set up reo_exception ring :%d\n",
 415			    ret);
 416		goto err;
 417	}
 418
 419	ret = ath11k_dp_srng_setup(ab, &dp->reo_cmd_ring, HAL_REO_CMD,
 420				   0, 0, DP_REO_CMD_RING_SIZE);
 421	if (ret) {
 422		ath11k_warn(ab, "failed to set up reo_cmd ring :%d\n", ret);
 423		goto err;
 424	}
 425
 426	srng = &ab->hal.srng_list[dp->reo_cmd_ring.ring_id];
 427	ath11k_hal_reo_init_cmd_ring(ab, srng);
 428
 429	ath11k_dp_shadow_init_timer(ab, &dp->reo_cmd_timer,
 430				    ATH11K_SHADOW_CTRL_TIMER_INTERVAL,
 431				    dp->reo_cmd_ring.ring_id);
 432
 433	ret = ath11k_dp_srng_setup(ab, &dp->reo_status_ring, HAL_REO_STATUS,
 434				   0, 0, DP_REO_STATUS_RING_SIZE);
 435	if (ret) {
 436		ath11k_warn(ab, "failed to set up reo_status ring :%d\n", ret);
 437		goto err;
 438	}
 439
 440	/* When hash based routing of rx packet is enabled, 32 entries to map
 441	 * the hash values to the ring will be configured.
 442	 */
 443	ab->hw_params.hw_ops->reo_setup(ab);
 444
 445	return 0;
 446
 447err:
 448	ath11k_dp_srng_common_cleanup(ab);
 449
 450	return ret;
 451}
 452
 453static void ath11k_dp_scatter_idle_link_desc_cleanup(struct ath11k_base *ab)
 454{
 455	struct ath11k_dp *dp = &ab->dp;
 456	struct hal_wbm_idle_scatter_list *slist = dp->scatter_list;
 457	int i;
 458
 459	for (i = 0; i < DP_IDLE_SCATTER_BUFS_MAX; i++) {
 460		if (!slist[i].vaddr)
 461			continue;
 462
 463		dma_free_coherent(ab->dev, HAL_WBM_IDLE_SCATTER_BUF_SIZE_MAX,
 464				  slist[i].vaddr, slist[i].paddr);
 465		slist[i].vaddr = NULL;
 466	}
 467}
 468
 469static int ath11k_dp_scatter_idle_link_desc_setup(struct ath11k_base *ab,
 470						  int size,
 471						  u32 n_link_desc_bank,
 472						  u32 n_link_desc,
 473						  u32 last_bank_sz)
 474{
 475	struct ath11k_dp *dp = &ab->dp;
 476	struct dp_link_desc_bank *link_desc_banks = dp->link_desc_banks;
 477	struct hal_wbm_idle_scatter_list *slist = dp->scatter_list;
 478	u32 n_entries_per_buf;
 479	int num_scatter_buf, scatter_idx;
 480	struct hal_wbm_link_desc *scatter_buf;
 481	int align_bytes, n_entries;
 482	dma_addr_t paddr;
 483	int rem_entries;
 484	int i;
 485	int ret = 0;
 486	u32 end_offset;
 487
 488	n_entries_per_buf = HAL_WBM_IDLE_SCATTER_BUF_SIZE /
 489		ath11k_hal_srng_get_entrysize(ab, HAL_WBM_IDLE_LINK);
 490	num_scatter_buf = DIV_ROUND_UP(size, HAL_WBM_IDLE_SCATTER_BUF_SIZE);
 491
 492	if (num_scatter_buf > DP_IDLE_SCATTER_BUFS_MAX)
 493		return -EINVAL;
 494
 495	for (i = 0; i < num_scatter_buf; i++) {
 496		slist[i].vaddr = dma_alloc_coherent(ab->dev,
 497						    HAL_WBM_IDLE_SCATTER_BUF_SIZE_MAX,
 498						    &slist[i].paddr, GFP_KERNEL);
 499		if (!slist[i].vaddr) {
 500			ret = -ENOMEM;
 501			goto err;
 502		}
 503	}
 504
 505	scatter_idx = 0;
 506	scatter_buf = slist[scatter_idx].vaddr;
 507	rem_entries = n_entries_per_buf;
 508
 509	for (i = 0; i < n_link_desc_bank; i++) {
 510		align_bytes = link_desc_banks[i].vaddr -
 511			      link_desc_banks[i].vaddr_unaligned;
 512		n_entries = (DP_LINK_DESC_ALLOC_SIZE_THRESH - align_bytes) /
 513			     HAL_LINK_DESC_SIZE;
 514		paddr = link_desc_banks[i].paddr;
 515		while (n_entries) {
 516			ath11k_hal_set_link_desc_addr(scatter_buf, i, paddr);
 517			n_entries--;
 518			paddr += HAL_LINK_DESC_SIZE;
 519			if (rem_entries) {
 520				rem_entries--;
 521				scatter_buf++;
 522				continue;
 523			}
 524
 525			rem_entries = n_entries_per_buf;
 526			scatter_idx++;
 527			scatter_buf = slist[scatter_idx].vaddr;
 528		}
 529	}
 530
 531	end_offset = (scatter_buf - slist[scatter_idx].vaddr) *
 532		     sizeof(struct hal_wbm_link_desc);
 533	ath11k_hal_setup_link_idle_list(ab, slist, num_scatter_buf,
 534					n_link_desc, end_offset);
 535
 536	return 0;
 537
 538err:
 539	ath11k_dp_scatter_idle_link_desc_cleanup(ab);
 540
 541	return ret;
 542}
 543
 544static void
 545ath11k_dp_link_desc_bank_free(struct ath11k_base *ab,
 546			      struct dp_link_desc_bank *link_desc_banks)
 547{
 548	int i;
 549
 550	for (i = 0; i < DP_LINK_DESC_BANKS_MAX; i++) {
 551		if (link_desc_banks[i].vaddr_unaligned) {
 552			dma_free_coherent(ab->dev,
 553					  link_desc_banks[i].size,
 554					  link_desc_banks[i].vaddr_unaligned,
 555					  link_desc_banks[i].paddr_unaligned);
 556			link_desc_banks[i].vaddr_unaligned = NULL;
 557		}
 558	}
 559}
 560
 561static int ath11k_dp_link_desc_bank_alloc(struct ath11k_base *ab,
 562					  struct dp_link_desc_bank *desc_bank,
 563					  int n_link_desc_bank,
 564					  int last_bank_sz)
 565{
 566	struct ath11k_dp *dp = &ab->dp;
 567	int i;
 568	int ret = 0;
 569	int desc_sz = DP_LINK_DESC_ALLOC_SIZE_THRESH;
 570
 571	for (i = 0; i < n_link_desc_bank; i++) {
 572		if (i == (n_link_desc_bank - 1) && last_bank_sz)
 573			desc_sz = last_bank_sz;
 574
 575		desc_bank[i].vaddr_unaligned =
 576					dma_alloc_coherent(ab->dev, desc_sz,
 577							   &desc_bank[i].paddr_unaligned,
 578							   GFP_KERNEL);
 579		if (!desc_bank[i].vaddr_unaligned) {
 580			ret = -ENOMEM;
 581			goto err;
 582		}
 583
 584		desc_bank[i].vaddr = PTR_ALIGN(desc_bank[i].vaddr_unaligned,
 585					       HAL_LINK_DESC_ALIGN);
 586		desc_bank[i].paddr = desc_bank[i].paddr_unaligned +
 587				     ((unsigned long)desc_bank[i].vaddr -
 588				      (unsigned long)desc_bank[i].vaddr_unaligned);
 589		desc_bank[i].size = desc_sz;
 590	}
 591
 592	return 0;
 593
 594err:
 595	ath11k_dp_link_desc_bank_free(ab, dp->link_desc_banks);
 596
 597	return ret;
 598}
 599
 600void ath11k_dp_link_desc_cleanup(struct ath11k_base *ab,
 601				 struct dp_link_desc_bank *desc_bank,
 602				 u32 ring_type, struct dp_srng *ring)
 603{
 604	ath11k_dp_link_desc_bank_free(ab, desc_bank);
 605
 606	if (ring_type != HAL_RXDMA_MONITOR_DESC) {
 607		ath11k_dp_srng_cleanup(ab, ring);
 608		ath11k_dp_scatter_idle_link_desc_cleanup(ab);
 609	}
 610}
 611
 612static int ath11k_wbm_idle_ring_setup(struct ath11k_base *ab, u32 *n_link_desc)
 613{
 614	struct ath11k_dp *dp = &ab->dp;
 615	u32 n_mpdu_link_desc, n_mpdu_queue_desc;
 616	u32 n_tx_msdu_link_desc, n_rx_msdu_link_desc;
 617	int ret = 0;
 618
 619	n_mpdu_link_desc = (DP_NUM_TIDS_MAX * DP_AVG_MPDUS_PER_TID_MAX) /
 620			   HAL_NUM_MPDUS_PER_LINK_DESC;
 621
 622	n_mpdu_queue_desc = n_mpdu_link_desc /
 623			    HAL_NUM_MPDU_LINKS_PER_QUEUE_DESC;
 624
 625	n_tx_msdu_link_desc = (DP_NUM_TIDS_MAX * DP_AVG_FLOWS_PER_TID *
 626			       DP_AVG_MSDUS_PER_FLOW) /
 627			      HAL_NUM_TX_MSDUS_PER_LINK_DESC;
 628
 629	n_rx_msdu_link_desc = (DP_NUM_TIDS_MAX * DP_AVG_MPDUS_PER_TID_MAX *
 630			       DP_AVG_MSDUS_PER_MPDU) /
 631			      HAL_NUM_RX_MSDUS_PER_LINK_DESC;
 632
 633	*n_link_desc = n_mpdu_link_desc + n_mpdu_queue_desc +
 634		      n_tx_msdu_link_desc + n_rx_msdu_link_desc;
 635
 636	if (*n_link_desc & (*n_link_desc - 1))
 637		*n_link_desc = 1 << fls(*n_link_desc);
 638
 639	ret = ath11k_dp_srng_setup(ab, &dp->wbm_idle_ring,
 640				   HAL_WBM_IDLE_LINK, 0, 0, *n_link_desc);
 641	if (ret) {
 642		ath11k_warn(ab, "failed to setup wbm_idle_ring: %d\n", ret);
 643		return ret;
 644	}
 645	return ret;
 646}
 647
 648int ath11k_dp_link_desc_setup(struct ath11k_base *ab,
 649			      struct dp_link_desc_bank *link_desc_banks,
 650			      u32 ring_type, struct hal_srng *srng,
 651			      u32 n_link_desc)
 652{
 653	u32 tot_mem_sz;
 654	u32 n_link_desc_bank, last_bank_sz;
 655	u32 entry_sz, align_bytes, n_entries;
 656	u32 paddr;
 657	u32 *desc;
 658	int i, ret;
 659
 660	tot_mem_sz = n_link_desc * HAL_LINK_DESC_SIZE;
 661	tot_mem_sz += HAL_LINK_DESC_ALIGN;
 662
 663	if (tot_mem_sz <= DP_LINK_DESC_ALLOC_SIZE_THRESH) {
 664		n_link_desc_bank = 1;
 665		last_bank_sz = tot_mem_sz;
 666	} else {
 667		n_link_desc_bank = tot_mem_sz /
 668				   (DP_LINK_DESC_ALLOC_SIZE_THRESH -
 669				    HAL_LINK_DESC_ALIGN);
 670		last_bank_sz = tot_mem_sz %
 671			       (DP_LINK_DESC_ALLOC_SIZE_THRESH -
 672				HAL_LINK_DESC_ALIGN);
 673
 674		if (last_bank_sz)
 675			n_link_desc_bank += 1;
 676	}
 677
 678	if (n_link_desc_bank > DP_LINK_DESC_BANKS_MAX)
 679		return -EINVAL;
 680
 681	ret = ath11k_dp_link_desc_bank_alloc(ab, link_desc_banks,
 682					     n_link_desc_bank, last_bank_sz);
 683	if (ret)
 684		return ret;
 685
 686	/* Setup link desc idle list for HW internal usage */
 687	entry_sz = ath11k_hal_srng_get_entrysize(ab, ring_type);
 688	tot_mem_sz = entry_sz * n_link_desc;
 689
 690	/* Setup scatter desc list when the total memory requirement is more */
 691	if (tot_mem_sz > DP_LINK_DESC_ALLOC_SIZE_THRESH &&
 692	    ring_type != HAL_RXDMA_MONITOR_DESC) {
 693		ret = ath11k_dp_scatter_idle_link_desc_setup(ab, tot_mem_sz,
 694							     n_link_desc_bank,
 695							     n_link_desc,
 696							     last_bank_sz);
 697		if (ret) {
 698			ath11k_warn(ab, "failed to setup scatting idle list descriptor :%d\n",
 699				    ret);
 700			goto fail_desc_bank_free;
 701		}
 702
 703		return 0;
 704	}
 705
 706	spin_lock_bh(&srng->lock);
 707
 708	ath11k_hal_srng_access_begin(ab, srng);
 709
 710	for (i = 0; i < n_link_desc_bank; i++) {
 711		align_bytes = link_desc_banks[i].vaddr -
 712			      link_desc_banks[i].vaddr_unaligned;
 713		n_entries = (link_desc_banks[i].size - align_bytes) /
 714			    HAL_LINK_DESC_SIZE;
 715		paddr = link_desc_banks[i].paddr;
 716		while (n_entries &&
 717		       (desc = ath11k_hal_srng_src_get_next_entry(ab, srng))) {
 718			ath11k_hal_set_link_desc_addr((struct hal_wbm_link_desc *)desc,
 719						      i, paddr);
 720			n_entries--;
 721			paddr += HAL_LINK_DESC_SIZE;
 722		}
 723	}
 724
 725	ath11k_hal_srng_access_end(ab, srng);
 726
 727	spin_unlock_bh(&srng->lock);
 728
 729	return 0;
 730
 731fail_desc_bank_free:
 732	ath11k_dp_link_desc_bank_free(ab, link_desc_banks);
 733
 734	return ret;
 735}
 736
 737int ath11k_dp_service_srng(struct ath11k_base *ab,
 738			   struct ath11k_ext_irq_grp *irq_grp,
 739			   int budget)
 740{
 741	struct napi_struct *napi = &irq_grp->napi;
 
 742	int grp_id = irq_grp->grp_id;
 743	int work_done = 0;
 744	int i = 0, j;
 745	int tot_work_done = 0;
 746
 747	while (ab->hw_params.ring_mask->tx[grp_id] >> i) {
 748		if (ab->hw_params.ring_mask->tx[grp_id] & BIT(i))
 
 749			ath11k_dp_tx_completion_handler(ab, i);
 750		i++;
 751	}
 752
 753	if (ab->hw_params.ring_mask->rx_err[grp_id]) {
 754		work_done = ath11k_dp_process_rx_err(ab, napi, budget);
 755		budget -= work_done;
 756		tot_work_done += work_done;
 757		if (budget <= 0)
 758			goto done;
 759	}
 760
 761	if (ab->hw_params.ring_mask->rx_wbm_rel[grp_id]) {
 762		work_done = ath11k_dp_rx_process_wbm_err(ab,
 763							 napi,
 764							 budget);
 765		budget -= work_done;
 766		tot_work_done += work_done;
 767
 768		if (budget <= 0)
 769			goto done;
 770	}
 771
 772	if (ab->hw_params.ring_mask->rx[grp_id]) {
 773		i =  fls(ab->hw_params.ring_mask->rx[grp_id]) - 1;
 774		work_done = ath11k_dp_process_rx(ab, i, napi,
 775						 budget);
 776		budget -= work_done;
 777		tot_work_done += work_done;
 778		if (budget <= 0)
 779			goto done;
 780	}
 781
 782	if (ab->hw_params.ring_mask->rx_mon_status[grp_id]) {
 783		for (i = 0; i < ab->num_radios; i++) {
 784			for (j = 0; j < ab->hw_params.num_rxmda_per_pdev; j++) {
 785				int id = i * ab->hw_params.num_rxmda_per_pdev + j;
 786
 787				if (ab->hw_params.ring_mask->rx_mon_status[grp_id] &
 788					BIT(id)) {
 789					work_done =
 790					ath11k_dp_rx_process_mon_rings(ab,
 791								       id,
 792								       napi, budget);
 793					budget -= work_done;
 794					tot_work_done += work_done;
 795
 796					if (budget <= 0)
 797						goto done;
 798				}
 799			}
 800		}
 801	}
 802
 803	if (ab->hw_params.ring_mask->reo_status[grp_id])
 804		ath11k_dp_process_reo_status(ab);
 805
 806	for (i = 0; i < ab->num_radios; i++) {
 807		for (j = 0; j < ab->hw_params.num_rxmda_per_pdev; j++) {
 808			int id = i * ab->hw_params.num_rxmda_per_pdev + j;
 809
 810			if (ab->hw_params.ring_mask->rxdma2host[grp_id] & BIT(id)) {
 811				work_done = ath11k_dp_process_rxdma_err(ab, id, budget);
 812				budget -= work_done;
 813				tot_work_done += work_done;
 814			}
 815
 816			if (budget <= 0)
 817				goto done;
 818
 819			if (ab->hw_params.ring_mask->host2rxdma[grp_id] & BIT(id)) {
 820				struct ath11k *ar = ath11k_ab_to_ar(ab, id);
 821				struct ath11k_pdev_dp *dp = &ar->dp;
 822				struct dp_rxdma_ring *rx_ring = &dp->rx_refill_buf_ring;
 823
 
 824				ath11k_dp_rxbufs_replenish(ab, id, rx_ring, 0,
 825							   HAL_RX_BUF_RBM_SW3_BM);
 826			}
 827		}
 828	}
 829	/* TODO: Implement handler for other interrupts */
 830
 831done:
 832	return tot_work_done;
 833}
 834EXPORT_SYMBOL(ath11k_dp_service_srng);
 835
 836void ath11k_dp_pdev_free(struct ath11k_base *ab)
 837{
 838	struct ath11k *ar;
 839	int i;
 840
 841	del_timer_sync(&ab->mon_reap_timer);
 842
 843	for (i = 0; i < ab->num_radios; i++) {
 844		ar = ab->pdevs[i].ar;
 845		ath11k_dp_rx_pdev_free(ab, i);
 846		ath11k_debugfs_unregister(ar);
 847		ath11k_dp_rx_pdev_mon_detach(ar);
 848	}
 849}
 850
 851void ath11k_dp_pdev_pre_alloc(struct ath11k_base *ab)
 852{
 853	struct ath11k *ar;
 854	struct ath11k_pdev_dp *dp;
 855	int i;
 856	int j;
 857
 858	for (i = 0; i <  ab->num_radios; i++) {
 859		ar = ab->pdevs[i].ar;
 860		dp = &ar->dp;
 861		dp->mac_id = i;
 862		idr_init(&dp->rx_refill_buf_ring.bufs_idr);
 863		spin_lock_init(&dp->rx_refill_buf_ring.idr_lock);
 864		atomic_set(&dp->num_tx_pending, 0);
 865		init_waitqueue_head(&dp->tx_empty_waitq);
 866		for (j = 0; j < ab->hw_params.num_rxmda_per_pdev; j++) {
 867			idr_init(&dp->rx_mon_status_refill_ring[j].bufs_idr);
 868			spin_lock_init(&dp->rx_mon_status_refill_ring[j].idr_lock);
 869		}
 870		idr_init(&dp->rxdma_mon_buf_ring.bufs_idr);
 871		spin_lock_init(&dp->rxdma_mon_buf_ring.idr_lock);
 872	}
 873}
 874
 875int ath11k_dp_pdev_alloc(struct ath11k_base *ab)
 876{
 877	struct ath11k *ar;
 878	int ret;
 879	int i;
 880
 881	/* TODO:Per-pdev rx ring unlike tx ring which is mapped to different AC's */
 882	for (i = 0; i < ab->num_radios; i++) {
 883		ar = ab->pdevs[i].ar;
 884		ret = ath11k_dp_rx_pdev_alloc(ab, i);
 885		if (ret) {
 886			ath11k_warn(ab, "failed to allocate pdev rx for pdev_id :%d\n",
 887				    i);
 888			goto err;
 889		}
 890		ret = ath11k_dp_rx_pdev_mon_attach(ar);
 891		if (ret) {
 892			ath11k_warn(ab, "failed to initialize mon pdev %d\n",
 893				    i);
 894			goto err;
 895		}
 896	}
 897
 898	return 0;
 899
 900err:
 901	ath11k_dp_pdev_free(ab);
 902
 903	return ret;
 904}
 905
 906int ath11k_dp_htt_connect(struct ath11k_dp *dp)
 907{
 908	struct ath11k_htc_svc_conn_req conn_req;
 909	struct ath11k_htc_svc_conn_resp conn_resp;
 910	int status;
 911
 912	memset(&conn_req, 0, sizeof(conn_req));
 913	memset(&conn_resp, 0, sizeof(conn_resp));
 914
 915	conn_req.ep_ops.ep_tx_complete = ath11k_dp_htt_htc_tx_complete;
 916	conn_req.ep_ops.ep_rx_complete = ath11k_dp_htt_htc_t2h_msg_handler;
 917
 918	/* connect to control service */
 919	conn_req.service_id = ATH11K_HTC_SVC_ID_HTT_DATA_MSG;
 920
 921	status = ath11k_htc_connect_service(&dp->ab->htc, &conn_req,
 922					    &conn_resp);
 923
 924	if (status)
 925		return status;
 926
 927	dp->eid = conn_resp.eid;
 928
 929	return 0;
 930}
 931
 932static void ath11k_dp_update_vdev_search(struct ath11k_vif *arvif)
 933{
 934	 /* When v2_map_support is true:for STA mode, enable address
 935	  * search index, tcl uses ast_hash value in the descriptor.
 936	  * When v2_map_support is false: for STA mode, dont' enable
 937	  * address search index.
 938	  */
 939	switch (arvif->vdev_type) {
 940	case WMI_VDEV_TYPE_STA:
 941		if (arvif->ar->ab->hw_params.htt_peer_map_v2) {
 942			arvif->hal_addr_search_flags = HAL_TX_ADDRX_EN;
 943			arvif->search_type = HAL_TX_ADDR_SEARCH_INDEX;
 944		} else {
 945			arvif->hal_addr_search_flags = HAL_TX_ADDRY_EN;
 946			arvif->search_type = HAL_TX_ADDR_SEARCH_DEFAULT;
 947		}
 948		break;
 949	case WMI_VDEV_TYPE_AP:
 950	case WMI_VDEV_TYPE_IBSS:
 951		arvif->hal_addr_search_flags = HAL_TX_ADDRX_EN;
 952		arvif->search_type = HAL_TX_ADDR_SEARCH_DEFAULT;
 953		break;
 954	case WMI_VDEV_TYPE_MONITOR:
 955	default:
 956		return;
 957	}
 958}
 959
 960void ath11k_dp_vdev_tx_attach(struct ath11k *ar, struct ath11k_vif *arvif)
 961{
 962	arvif->tcl_metadata |= FIELD_PREP(HTT_TCL_META_DATA_TYPE, 1) |
 963			       FIELD_PREP(HTT_TCL_META_DATA_VDEV_ID,
 964					  arvif->vdev_id) |
 965			       FIELD_PREP(HTT_TCL_META_DATA_PDEV_ID,
 966					  ar->pdev->pdev_id);
 967
 968	/* set HTT extension valid bit to 0 by default */
 969	arvif->tcl_metadata &= ~HTT_TCL_META_DATA_VALID_HTT;
 970
 971	ath11k_dp_update_vdev_search(arvif);
 972}
 973
 974static int ath11k_dp_tx_pending_cleanup(int buf_id, void *skb, void *ctx)
 975{
 976	struct ath11k_base *ab = (struct ath11k_base *)ctx;
 977	struct sk_buff *msdu = skb;
 978
 979	dma_unmap_single(ab->dev, ATH11K_SKB_CB(msdu)->paddr, msdu->len,
 980			 DMA_TO_DEVICE);
 981
 982	dev_kfree_skb_any(msdu);
 983
 984	return 0;
 985}
 986
 987void ath11k_dp_free(struct ath11k_base *ab)
 988{
 989	struct ath11k_dp *dp = &ab->dp;
 990	int i;
 991
 992	ath11k_dp_link_desc_cleanup(ab, dp->link_desc_banks,
 993				    HAL_WBM_IDLE_LINK, &dp->wbm_idle_ring);
 994
 995	ath11k_dp_srng_common_cleanup(ab);
 996
 997	ath11k_dp_reo_cmd_list_cleanup(ab);
 998
 999	for (i = 0; i < DP_TCL_NUM_RING_MAX; i++) {
1000		spin_lock_bh(&dp->tx_ring[i].tx_idr_lock);
1001		idr_for_each(&dp->tx_ring[i].txbuf_idr,
1002			     ath11k_dp_tx_pending_cleanup, ab);
1003		idr_destroy(&dp->tx_ring[i].txbuf_idr);
1004		spin_unlock_bh(&dp->tx_ring[i].tx_idr_lock);
1005		kfree(dp->tx_ring[i].tx_status);
1006	}
1007
1008	/* Deinit any SOC level resource */
1009}
1010
1011int ath11k_dp_alloc(struct ath11k_base *ab)
1012{
1013	struct ath11k_dp *dp = &ab->dp;
1014	struct hal_srng *srng = NULL;
1015	size_t size = 0;
1016	u32 n_link_desc = 0;
1017	int ret;
1018	int i;
1019
1020	dp->ab = ab;
1021
1022	INIT_LIST_HEAD(&dp->reo_cmd_list);
1023	INIT_LIST_HEAD(&dp->reo_cmd_cache_flush_list);
 
1024	spin_lock_init(&dp->reo_cmd_lock);
1025
1026	dp->reo_cmd_cache_flush_count = 0;
1027
1028	ret = ath11k_wbm_idle_ring_setup(ab, &n_link_desc);
1029	if (ret) {
1030		ath11k_warn(ab, "failed to setup wbm_idle_ring: %d\n", ret);
1031		return ret;
1032	}
1033
1034	srng = &ab->hal.srng_list[dp->wbm_idle_ring.ring_id];
1035
1036	ret = ath11k_dp_link_desc_setup(ab, dp->link_desc_banks,
1037					HAL_WBM_IDLE_LINK, srng, n_link_desc);
1038	if (ret) {
1039		ath11k_warn(ab, "failed to setup link desc: %d\n", ret);
1040		return ret;
1041	}
1042
1043	ret = ath11k_dp_srng_common_setup(ab);
1044	if (ret)
1045		goto fail_link_desc_cleanup;
1046
1047	size = sizeof(struct hal_wbm_release_ring) * DP_TX_COMP_RING_SIZE;
1048
1049	for (i = 0; i < DP_TCL_NUM_RING_MAX; i++) {
1050		idr_init(&dp->tx_ring[i].txbuf_idr);
1051		spin_lock_init(&dp->tx_ring[i].tx_idr_lock);
1052		dp->tx_ring[i].tcl_data_ring_id = i;
1053
1054		dp->tx_ring[i].tx_status_head = 0;
1055		dp->tx_ring[i].tx_status_tail = DP_TX_COMP_RING_SIZE - 1;
1056		dp->tx_ring[i].tx_status = kmalloc(size, GFP_KERNEL);
1057		if (!dp->tx_ring[i].tx_status) {
1058			ret = -ENOMEM;
1059			goto fail_cmn_srng_cleanup;
1060		}
1061	}
1062
1063	for (i = 0; i < HAL_DSCP_TID_MAP_TBL_NUM_ENTRIES_MAX; i++)
1064		ath11k_hal_tx_set_dscp_tid_map(ab, i);
1065
1066	/* Init any SOC level resource for DP */
1067
1068	return 0;
1069
1070fail_cmn_srng_cleanup:
1071	ath11k_dp_srng_common_cleanup(ab);
1072
1073fail_link_desc_cleanup:
1074	ath11k_dp_link_desc_cleanup(ab, dp->link_desc_banks,
1075				    HAL_WBM_IDLE_LINK, &dp->wbm_idle_ring);
1076
1077	return ret;
1078}
1079
1080static void ath11k_dp_shadow_timer_handler(struct timer_list *t)
1081{
1082	struct ath11k_hp_update_timer *update_timer = from_timer(update_timer,
1083								 t, timer);
1084	struct ath11k_base *ab = update_timer->ab;
1085	struct hal_srng	*srng = &ab->hal.srng_list[update_timer->ring_id];
1086
1087	spin_lock_bh(&srng->lock);
1088
1089	/* when the timer is fired, the handler checks whether there
1090	 * are new TX happened. The handler updates HP only when there
1091	 * are no TX operations during the timeout interval, and stop
1092	 * the timer. Timer will be started again when TX happens again.
1093	 */
1094	if (update_timer->timer_tx_num != update_timer->tx_num) {
1095		update_timer->timer_tx_num = update_timer->tx_num;
1096		mod_timer(&update_timer->timer, jiffies +
1097		  msecs_to_jiffies(update_timer->interval));
1098	} else {
1099		update_timer->started = false;
1100		ath11k_hal_srng_shadow_update_hp_tp(ab, srng);
1101	}
1102
1103	spin_unlock_bh(&srng->lock);
1104}
1105
1106void ath11k_dp_shadow_start_timer(struct ath11k_base *ab,
1107				  struct hal_srng *srng,
1108				  struct ath11k_hp_update_timer *update_timer)
1109{
1110	lockdep_assert_held(&srng->lock);
1111
1112	if (!ab->hw_params.supports_shadow_regs)
1113		return;
1114
1115	update_timer->tx_num++;
1116
1117	if (update_timer->started)
1118		return;
1119
1120	update_timer->started = true;
1121	update_timer->timer_tx_num = update_timer->tx_num;
1122	mod_timer(&update_timer->timer, jiffies +
1123		  msecs_to_jiffies(update_timer->interval));
1124}
1125
1126void ath11k_dp_shadow_stop_timer(struct ath11k_base *ab,
1127				 struct ath11k_hp_update_timer *update_timer)
1128{
1129	if (!ab->hw_params.supports_shadow_regs)
1130		return;
1131
1132	if (!update_timer->init)
1133		return;
1134
1135	del_timer_sync(&update_timer->timer);
1136}
1137
1138void ath11k_dp_shadow_init_timer(struct ath11k_base *ab,
1139				 struct ath11k_hp_update_timer *update_timer,
1140				 u32 interval, u32 ring_id)
1141{
1142	if (!ab->hw_params.supports_shadow_regs)
1143		return;
1144
1145	update_timer->tx_num = 0;
1146	update_timer->timer_tx_num = 0;
1147	update_timer->ab = ab;
1148	update_timer->ring_id = ring_id;
1149	update_timer->interval = interval;
1150	update_timer->init = true;
1151	timer_setup(&update_timer->timer,
1152		    ath11k_dp_shadow_timer_handler, 0);
1153}