Linux Audio

Check our new training course

Loading...
Note: File does not exist in v5.9.
   1// SPDX-License-Identifier: BSD-3-Clause-Clear
   2/*
   3 * Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
   4 * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
   5 */
   6
   7#include <crypto/hash.h>
   8#include "core.h"
   9#include "dp_tx.h"
  10#include "hal_tx.h"
  11#include "hif.h"
  12#include "debug.h"
  13#include "dp_rx.h"
  14#include "peer.h"
  15#include "dp_mon.h"
  16
  17static void ath12k_dp_htt_htc_tx_complete(struct ath12k_base *ab,
  18					  struct sk_buff *skb)
  19{
  20	dev_kfree_skb_any(skb);
  21}
  22
  23void ath12k_dp_peer_cleanup(struct ath12k *ar, int vdev_id, const u8 *addr)
  24{
  25	struct ath12k_base *ab = ar->ab;
  26	struct ath12k_peer *peer;
  27
  28	/* TODO: Any other peer specific DP cleanup */
  29
  30	spin_lock_bh(&ab->base_lock);
  31	peer = ath12k_peer_find(ab, vdev_id, addr);
  32	if (!peer) {
  33		ath12k_warn(ab, "failed to lookup peer %pM on vdev %d\n",
  34			    addr, vdev_id);
  35		spin_unlock_bh(&ab->base_lock);
  36		return;
  37	}
  38
  39	ath12k_dp_rx_peer_tid_cleanup(ar, peer);
  40	crypto_free_shash(peer->tfm_mmic);
  41	peer->dp_setup_done = false;
  42	spin_unlock_bh(&ab->base_lock);
  43}
  44
  45int ath12k_dp_peer_setup(struct ath12k *ar, int vdev_id, const u8 *addr)
  46{
  47	struct ath12k_base *ab = ar->ab;
  48	struct ath12k_peer *peer;
  49	u32 reo_dest;
  50	int ret = 0, tid;
  51
  52	/* NOTE: reo_dest ring id starts from 1 unlike mac_id which starts from 0 */
  53	reo_dest = ar->dp.mac_id + 1;
  54	ret = ath12k_wmi_set_peer_param(ar, addr, vdev_id,
  55					WMI_PEER_SET_DEFAULT_ROUTING,
  56					DP_RX_HASH_ENABLE | (reo_dest << 1));
  57
  58	if (ret) {
  59		ath12k_warn(ab, "failed to set default routing %d peer :%pM vdev_id :%d\n",
  60			    ret, addr, vdev_id);
  61		return ret;
  62	}
  63
  64	for (tid = 0; tid <= IEEE80211_NUM_TIDS; tid++) {
  65		ret = ath12k_dp_rx_peer_tid_setup(ar, addr, vdev_id, tid, 1, 0,
  66						  HAL_PN_TYPE_NONE);
  67		if (ret) {
  68			ath12k_warn(ab, "failed to setup rxd tid queue for tid %d: %d\n",
  69				    tid, ret);
  70			goto peer_clean;
  71		}
  72	}
  73
  74	ret = ath12k_dp_rx_peer_frag_setup(ar, addr, vdev_id);
  75	if (ret) {
  76		ath12k_warn(ab, "failed to setup rx defrag context\n");
  77		goto peer_clean;
  78	}
  79
  80	/* TODO: Setup other peer specific resource used in data path */
  81
  82	return 0;
  83
  84peer_clean:
  85	spin_lock_bh(&ab->base_lock);
  86
  87	peer = ath12k_peer_find(ab, vdev_id, addr);
  88	if (!peer) {
  89		ath12k_warn(ab, "failed to find the peer to del rx tid\n");
  90		spin_unlock_bh(&ab->base_lock);
  91		return -ENOENT;
  92	}
  93
  94	for (; tid >= 0; tid--)
  95		ath12k_dp_rx_peer_tid_delete(ar, peer, tid);
  96
  97	spin_unlock_bh(&ab->base_lock);
  98
  99	return ret;
 100}
 101
 102void ath12k_dp_srng_cleanup(struct ath12k_base *ab, struct dp_srng *ring)
 103{
 104	if (!ring->vaddr_unaligned)
 105		return;
 106
 107	dma_free_coherent(ab->dev, ring->size, ring->vaddr_unaligned,
 108			  ring->paddr_unaligned);
 109
 110	ring->vaddr_unaligned = NULL;
 111}
 112
 113static int ath12k_dp_srng_find_ring_in_mask(int ring_num, const u8 *grp_mask)
 114{
 115	int ext_group_num;
 116	u8 mask = 1 << ring_num;
 117
 118	for (ext_group_num = 0; ext_group_num < ATH12K_EXT_IRQ_GRP_NUM_MAX;
 119	     ext_group_num++) {
 120		if (mask & grp_mask[ext_group_num])
 121			return ext_group_num;
 122	}
 123
 124	return -ENOENT;
 125}
 126
 127static int ath12k_dp_srng_calculate_msi_group(struct ath12k_base *ab,
 128					      enum hal_ring_type type, int ring_num)
 129{
 130	const u8 *grp_mask;
 131
 132	switch (type) {
 133	case HAL_WBM2SW_RELEASE:
 134		if (ring_num == HAL_WBM2SW_REL_ERR_RING_NUM) {
 135			grp_mask = &ab->hw_params->ring_mask->rx_wbm_rel[0];
 136			ring_num = 0;
 137		} else {
 138			grp_mask = &ab->hw_params->ring_mask->tx[0];
 139		}
 140		break;
 141	case HAL_REO_EXCEPTION:
 142		grp_mask = &ab->hw_params->ring_mask->rx_err[0];
 143		break;
 144	case HAL_REO_DST:
 145		grp_mask = &ab->hw_params->ring_mask->rx[0];
 146		break;
 147	case HAL_REO_STATUS:
 148		grp_mask = &ab->hw_params->ring_mask->reo_status[0];
 149		break;
 150	case HAL_RXDMA_MONITOR_STATUS:
 151	case HAL_RXDMA_MONITOR_DST:
 152		grp_mask = &ab->hw_params->ring_mask->rx_mon_dest[0];
 153		break;
 154	case HAL_TX_MONITOR_DST:
 155		grp_mask = &ab->hw_params->ring_mask->tx_mon_dest[0];
 156		break;
 157	case HAL_RXDMA_BUF:
 158		grp_mask = &ab->hw_params->ring_mask->host2rxdma[0];
 159		break;
 160	case HAL_RXDMA_MONITOR_BUF:
 161	case HAL_TCL_DATA:
 162	case HAL_TCL_CMD:
 163	case HAL_REO_CMD:
 164	case HAL_SW2WBM_RELEASE:
 165	case HAL_WBM_IDLE_LINK:
 166	case HAL_TCL_STATUS:
 167	case HAL_REO_REINJECT:
 168	case HAL_CE_SRC:
 169	case HAL_CE_DST:
 170	case HAL_CE_DST_STATUS:
 171	default:
 172		return -ENOENT;
 173	}
 174
 175	return ath12k_dp_srng_find_ring_in_mask(ring_num, grp_mask);
 176}
 177
 178static void ath12k_dp_srng_msi_setup(struct ath12k_base *ab,
 179				     struct hal_srng_params *ring_params,
 180				     enum hal_ring_type type, int ring_num)
 181{
 182	int msi_group_number, msi_data_count;
 183	u32 msi_data_start, msi_irq_start, addr_lo, addr_hi;
 184	int ret;
 185
 186	ret = ath12k_hif_get_user_msi_vector(ab, "DP",
 187					     &msi_data_count, &msi_data_start,
 188					     &msi_irq_start);
 189	if (ret)
 190		return;
 191
 192	msi_group_number = ath12k_dp_srng_calculate_msi_group(ab, type,
 193							      ring_num);
 194	if (msi_group_number < 0) {
 195		ath12k_dbg(ab, ATH12K_DBG_PCI,
 196			   "ring not part of an ext_group; ring_type: %d,ring_num %d",
 197			   type, ring_num);
 198		ring_params->msi_addr = 0;
 199		ring_params->msi_data = 0;
 200		return;
 201	}
 202
 203	if (msi_group_number > msi_data_count) {
 204		ath12k_dbg(ab, ATH12K_DBG_PCI,
 205			   "multiple msi_groups share one msi, msi_group_num %d",
 206			   msi_group_number);
 207	}
 208
 209	ath12k_hif_get_msi_address(ab, &addr_lo, &addr_hi);
 210
 211	ring_params->msi_addr = addr_lo;
 212	ring_params->msi_addr |= (dma_addr_t)(((uint64_t)addr_hi) << 32);
 213	ring_params->msi_data = (msi_group_number % msi_data_count)
 214		+ msi_data_start;
 215	ring_params->flags |= HAL_SRNG_FLAGS_MSI_INTR;
 216}
 217
 218int ath12k_dp_srng_setup(struct ath12k_base *ab, struct dp_srng *ring,
 219			 enum hal_ring_type type, int ring_num,
 220			 int mac_id, int num_entries)
 221{
 222	struct hal_srng_params params = { 0 };
 223	int entry_sz = ath12k_hal_srng_get_entrysize(ab, type);
 224	int max_entries = ath12k_hal_srng_get_max_entries(ab, type);
 225	int ret;
 226
 227	if (max_entries < 0 || entry_sz < 0)
 228		return -EINVAL;
 229
 230	if (num_entries > max_entries)
 231		num_entries = max_entries;
 232
 233	ring->size = (num_entries * entry_sz) + HAL_RING_BASE_ALIGN - 1;
 234	ring->vaddr_unaligned = dma_alloc_coherent(ab->dev, ring->size,
 235						   &ring->paddr_unaligned,
 236						   GFP_KERNEL);
 237	if (!ring->vaddr_unaligned)
 238		return -ENOMEM;
 239
 240	ring->vaddr = PTR_ALIGN(ring->vaddr_unaligned, HAL_RING_BASE_ALIGN);
 241	ring->paddr = ring->paddr_unaligned + ((unsigned long)ring->vaddr -
 242		      (unsigned long)ring->vaddr_unaligned);
 243
 244	params.ring_base_vaddr = ring->vaddr;
 245	params.ring_base_paddr = ring->paddr;
 246	params.num_entries = num_entries;
 247	ath12k_dp_srng_msi_setup(ab, &params, type, ring_num + mac_id);
 248
 249	switch (type) {
 250	case HAL_REO_DST:
 251		params.intr_batch_cntr_thres_entries =
 252					HAL_SRNG_INT_BATCH_THRESHOLD_RX;
 253		params.intr_timer_thres_us = HAL_SRNG_INT_TIMER_THRESHOLD_RX;
 254		break;
 255	case HAL_RXDMA_BUF:
 256	case HAL_RXDMA_MONITOR_BUF:
 257	case HAL_RXDMA_MONITOR_STATUS:
 258		params.low_threshold = num_entries >> 3;
 259		params.flags |= HAL_SRNG_FLAGS_LOW_THRESH_INTR_EN;
 260		params.intr_batch_cntr_thres_entries = 0;
 261		params.intr_timer_thres_us = HAL_SRNG_INT_TIMER_THRESHOLD_RX;
 262		break;
 263	case HAL_TX_MONITOR_DST:
 264		params.low_threshold = DP_TX_MONITOR_BUF_SIZE_MAX >> 3;
 265		params.flags |= HAL_SRNG_FLAGS_LOW_THRESH_INTR_EN;
 266		params.intr_batch_cntr_thres_entries = 0;
 267		params.intr_timer_thres_us = HAL_SRNG_INT_TIMER_THRESHOLD_RX;
 268		break;
 269	case HAL_WBM2SW_RELEASE:
 270		if (ab->hw_params->hw_ops->dp_srng_is_tx_comp_ring(ring_num)) {
 271			params.intr_batch_cntr_thres_entries =
 272					HAL_SRNG_INT_BATCH_THRESHOLD_TX;
 273			params.intr_timer_thres_us =
 274					HAL_SRNG_INT_TIMER_THRESHOLD_TX;
 275			break;
 276		}
 277		/* follow through when ring_num != HAL_WBM2SW_REL_ERR_RING_NUM */
 278		fallthrough;
 279	case HAL_REO_EXCEPTION:
 280	case HAL_REO_REINJECT:
 281	case HAL_REO_CMD:
 282	case HAL_REO_STATUS:
 283	case HAL_TCL_DATA:
 284	case HAL_TCL_CMD:
 285	case HAL_TCL_STATUS:
 286	case HAL_WBM_IDLE_LINK:
 287	case HAL_SW2WBM_RELEASE:
 288	case HAL_RXDMA_DST:
 289	case HAL_RXDMA_MONITOR_DST:
 290	case HAL_RXDMA_MONITOR_DESC:
 291		params.intr_batch_cntr_thres_entries =
 292					HAL_SRNG_INT_BATCH_THRESHOLD_OTHER;
 293		params.intr_timer_thres_us = HAL_SRNG_INT_TIMER_THRESHOLD_OTHER;
 294		break;
 295	case HAL_RXDMA_DIR_BUF:
 296		break;
 297	default:
 298		ath12k_warn(ab, "Not a valid ring type in dp :%d\n", type);
 299		return -EINVAL;
 300	}
 301
 302	ret = ath12k_hal_srng_setup(ab, type, ring_num, mac_id, &params);
 303	if (ret < 0) {
 304		ath12k_warn(ab, "failed to setup srng: %d ring_id %d\n",
 305			    ret, ring_num);
 306		return ret;
 307	}
 308
 309	ring->ring_id = ret;
 310
 311	return 0;
 312}
 313
 314static
 315u32 ath12k_dp_tx_get_vdev_bank_config(struct ath12k_base *ab, struct ath12k_vif *arvif)
 316{
 317	u32 bank_config = 0;
 318
 319	/* Only valid for raw frames with HW crypto enabled.
 320	 * With SW crypto, mac80211 sets key per packet
 321	 */
 322	if (arvif->tx_encap_type == HAL_TCL_ENCAP_TYPE_RAW &&
 323	    test_bit(ATH12K_FLAG_HW_CRYPTO_DISABLED, &ab->dev_flags))
 324		bank_config |=
 325			u32_encode_bits(ath12k_dp_tx_get_encrypt_type(arvif->key_cipher),
 326					HAL_TX_BANK_CONFIG_ENCRYPT_TYPE);
 327
 328	bank_config |= u32_encode_bits(arvif->tx_encap_type,
 329					HAL_TX_BANK_CONFIG_ENCAP_TYPE);
 330	bank_config |= u32_encode_bits(0, HAL_TX_BANK_CONFIG_SRC_BUFFER_SWAP) |
 331			u32_encode_bits(0, HAL_TX_BANK_CONFIG_LINK_META_SWAP) |
 332			u32_encode_bits(0, HAL_TX_BANK_CONFIG_EPD);
 333
 334	/* only valid if idx_lookup_override is not set in tcl_data_cmd */
 335	bank_config |= u32_encode_bits(0, HAL_TX_BANK_CONFIG_INDEX_LOOKUP_EN);
 336
 337	bank_config |= u32_encode_bits(arvif->hal_addr_search_flags & HAL_TX_ADDRX_EN,
 338					HAL_TX_BANK_CONFIG_ADDRX_EN) |
 339			u32_encode_bits(!!(arvif->hal_addr_search_flags &
 340					HAL_TX_ADDRY_EN),
 341					HAL_TX_BANK_CONFIG_ADDRY_EN);
 342
 343	bank_config |= u32_encode_bits(ieee80211_vif_is_mesh(arvif->vif) ? 3 : 0,
 344					HAL_TX_BANK_CONFIG_MESH_EN) |
 345			u32_encode_bits(arvif->vdev_id_check_en,
 346					HAL_TX_BANK_CONFIG_VDEV_ID_CHECK_EN);
 347
 348	bank_config |= u32_encode_bits(0, HAL_TX_BANK_CONFIG_DSCP_TIP_MAP_ID);
 349
 350	return bank_config;
 351}
 352
 353static int ath12k_dp_tx_get_bank_profile(struct ath12k_base *ab, struct ath12k_vif *arvif,
 354					 struct ath12k_dp *dp)
 355{
 356	int bank_id = DP_INVALID_BANK_ID;
 357	int i;
 358	u32 bank_config;
 359	bool configure_register = false;
 360
 361	/* convert vdev params into hal_tx_bank_config */
 362	bank_config = ath12k_dp_tx_get_vdev_bank_config(ab, arvif);
 363
 364	spin_lock_bh(&dp->tx_bank_lock);
 365	/* TODO: implement using idr kernel framework*/
 366	for (i = 0; i < dp->num_bank_profiles; i++) {
 367		if (dp->bank_profiles[i].is_configured &&
 368		    (dp->bank_profiles[i].bank_config ^ bank_config) == 0) {
 369			bank_id = i;
 370			goto inc_ref_and_return;
 371		}
 372		if (!dp->bank_profiles[i].is_configured ||
 373		    !dp->bank_profiles[i].num_users) {
 374			bank_id = i;
 375			goto configure_and_return;
 376		}
 377	}
 378
 379	if (bank_id == DP_INVALID_BANK_ID) {
 380		spin_unlock_bh(&dp->tx_bank_lock);
 381		ath12k_err(ab, "unable to find TX bank!");
 382		return bank_id;
 383	}
 384
 385configure_and_return:
 386	dp->bank_profiles[bank_id].is_configured = true;
 387	dp->bank_profiles[bank_id].bank_config = bank_config;
 388	configure_register = true;
 389inc_ref_and_return:
 390	dp->bank_profiles[bank_id].num_users++;
 391	spin_unlock_bh(&dp->tx_bank_lock);
 392
 393	if (configure_register)
 394		ath12k_hal_tx_configure_bank_register(ab, bank_config, bank_id);
 395
 396	ath12k_dbg(ab, ATH12K_DBG_DP_HTT, "dp_htt tcl bank_id %d input 0x%x match 0x%x num_users %u",
 397		   bank_id, bank_config, dp->bank_profiles[bank_id].bank_config,
 398		   dp->bank_profiles[bank_id].num_users);
 399
 400	return bank_id;
 401}
 402
 403void ath12k_dp_tx_put_bank_profile(struct ath12k_dp *dp, u8 bank_id)
 404{
 405	spin_lock_bh(&dp->tx_bank_lock);
 406	dp->bank_profiles[bank_id].num_users--;
 407	spin_unlock_bh(&dp->tx_bank_lock);
 408}
 409
 410static void ath12k_dp_deinit_bank_profiles(struct ath12k_base *ab)
 411{
 412	struct ath12k_dp *dp = &ab->dp;
 413
 414	kfree(dp->bank_profiles);
 415	dp->bank_profiles = NULL;
 416}
 417
 418static int ath12k_dp_init_bank_profiles(struct ath12k_base *ab)
 419{
 420	struct ath12k_dp *dp = &ab->dp;
 421	u32 num_tcl_banks = ab->hw_params->num_tcl_banks;
 422	int i;
 423
 424	dp->num_bank_profiles = num_tcl_banks;
 425	dp->bank_profiles = kmalloc_array(num_tcl_banks,
 426					  sizeof(struct ath12k_dp_tx_bank_profile),
 427					  GFP_KERNEL);
 428	if (!dp->bank_profiles)
 429		return -ENOMEM;
 430
 431	spin_lock_init(&dp->tx_bank_lock);
 432
 433	for (i = 0; i < num_tcl_banks; i++) {
 434		dp->bank_profiles[i].is_configured = false;
 435		dp->bank_profiles[i].num_users = 0;
 436	}
 437
 438	return 0;
 439}
 440
 441static void ath12k_dp_srng_common_cleanup(struct ath12k_base *ab)
 442{
 443	struct ath12k_dp *dp = &ab->dp;
 444	int i;
 445
 446	ath12k_dp_srng_cleanup(ab, &dp->reo_status_ring);
 447	ath12k_dp_srng_cleanup(ab, &dp->reo_cmd_ring);
 448	ath12k_dp_srng_cleanup(ab, &dp->reo_except_ring);
 449	ath12k_dp_srng_cleanup(ab, &dp->rx_rel_ring);
 450	ath12k_dp_srng_cleanup(ab, &dp->reo_reinject_ring);
 451	for (i = 0; i < ab->hw_params->max_tx_ring; i++) {
 452		ath12k_dp_srng_cleanup(ab, &dp->tx_ring[i].tcl_comp_ring);
 453		ath12k_dp_srng_cleanup(ab, &dp->tx_ring[i].tcl_data_ring);
 454	}
 455	ath12k_dp_srng_cleanup(ab, &dp->tcl_status_ring);
 456	ath12k_dp_srng_cleanup(ab, &dp->tcl_cmd_ring);
 457	ath12k_dp_srng_cleanup(ab, &dp->wbm_desc_rel_ring);
 458}
 459
 460static int ath12k_dp_srng_common_setup(struct ath12k_base *ab)
 461{
 462	struct ath12k_dp *dp = &ab->dp;
 463	const struct ath12k_hal_tcl_to_wbm_rbm_map *map;
 464	struct hal_srng *srng;
 465	int i, ret, tx_comp_ring_num;
 466	u32 ring_hash_map;
 467
 468	ret = ath12k_dp_srng_setup(ab, &dp->wbm_desc_rel_ring,
 469				   HAL_SW2WBM_RELEASE, 0, 0,
 470				   DP_WBM_RELEASE_RING_SIZE);
 471	if (ret) {
 472		ath12k_warn(ab, "failed to set up wbm2sw_release ring :%d\n",
 473			    ret);
 474		goto err;
 475	}
 476
 477	ret = ath12k_dp_srng_setup(ab, &dp->tcl_cmd_ring, HAL_TCL_CMD, 0, 0,
 478				   DP_TCL_CMD_RING_SIZE);
 479	if (ret) {
 480		ath12k_warn(ab, "failed to set up tcl_cmd ring :%d\n", ret);
 481		goto err;
 482	}
 483
 484	ret = ath12k_dp_srng_setup(ab, &dp->tcl_status_ring, HAL_TCL_STATUS,
 485				   0, 0, DP_TCL_STATUS_RING_SIZE);
 486	if (ret) {
 487		ath12k_warn(ab, "failed to set up tcl_status ring :%d\n", ret);
 488		goto err;
 489	}
 490
 491	for (i = 0; i < ab->hw_params->max_tx_ring; i++) {
 492		map = ab->hw_params->hal_ops->tcl_to_wbm_rbm_map;
 493		tx_comp_ring_num = map[i].wbm_ring_num;
 494
 495		ret = ath12k_dp_srng_setup(ab, &dp->tx_ring[i].tcl_data_ring,
 496					   HAL_TCL_DATA, i, 0,
 497					   DP_TCL_DATA_RING_SIZE);
 498		if (ret) {
 499			ath12k_warn(ab, "failed to set up tcl_data ring (%d) :%d\n",
 500				    i, ret);
 501			goto err;
 502		}
 503
 504		ret = ath12k_dp_srng_setup(ab, &dp->tx_ring[i].tcl_comp_ring,
 505					   HAL_WBM2SW_RELEASE, tx_comp_ring_num, 0,
 506					   DP_TX_COMP_RING_SIZE);
 507		if (ret) {
 508			ath12k_warn(ab, "failed to set up tcl_comp ring (%d) :%d\n",
 509				    tx_comp_ring_num, ret);
 510			goto err;
 511		}
 512	}
 513
 514	ret = ath12k_dp_srng_setup(ab, &dp->reo_reinject_ring, HAL_REO_REINJECT,
 515				   0, 0, DP_REO_REINJECT_RING_SIZE);
 516	if (ret) {
 517		ath12k_warn(ab, "failed to set up reo_reinject ring :%d\n",
 518			    ret);
 519		goto err;
 520	}
 521
 522	ret = ath12k_dp_srng_setup(ab, &dp->rx_rel_ring, HAL_WBM2SW_RELEASE,
 523				   HAL_WBM2SW_REL_ERR_RING_NUM, 0,
 524				   DP_RX_RELEASE_RING_SIZE);
 525	if (ret) {
 526		ath12k_warn(ab, "failed to set up rx_rel ring :%d\n", ret);
 527		goto err;
 528	}
 529
 530	ret = ath12k_dp_srng_setup(ab, &dp->reo_except_ring, HAL_REO_EXCEPTION,
 531				   0, 0, DP_REO_EXCEPTION_RING_SIZE);
 532	if (ret) {
 533		ath12k_warn(ab, "failed to set up reo_exception ring :%d\n",
 534			    ret);
 535		goto err;
 536	}
 537
 538	ret = ath12k_dp_srng_setup(ab, &dp->reo_cmd_ring, HAL_REO_CMD,
 539				   0, 0, DP_REO_CMD_RING_SIZE);
 540	if (ret) {
 541		ath12k_warn(ab, "failed to set up reo_cmd ring :%d\n", ret);
 542		goto err;
 543	}
 544
 545	srng = &ab->hal.srng_list[dp->reo_cmd_ring.ring_id];
 546	ath12k_hal_reo_init_cmd_ring(ab, srng);
 547
 548	ret = ath12k_dp_srng_setup(ab, &dp->reo_status_ring, HAL_REO_STATUS,
 549				   0, 0, DP_REO_STATUS_RING_SIZE);
 550	if (ret) {
 551		ath12k_warn(ab, "failed to set up reo_status ring :%d\n", ret);
 552		goto err;
 553	}
 554
 555	/* When hash based routing of rx packet is enabled, 32 entries to map
 556	 * the hash values to the ring will be configured. Each hash entry uses
 557	 * four bits to map to a particular ring. The ring mapping will be
 558	 * 0:TCL, 1:SW1, 2:SW2, 3:SW3, 4:SW4, 5:Release, 6:FW and 7:SW5
 559	 * 8:SW6, 9:SW7, 10:SW8, 11:Not used.
 560	 */
 561	ring_hash_map = HAL_HASH_ROUTING_RING_SW1 |
 562			HAL_HASH_ROUTING_RING_SW2 << 4 |
 563			HAL_HASH_ROUTING_RING_SW3 << 8 |
 564			HAL_HASH_ROUTING_RING_SW4 << 12 |
 565			HAL_HASH_ROUTING_RING_SW1 << 16 |
 566			HAL_HASH_ROUTING_RING_SW2 << 20 |
 567			HAL_HASH_ROUTING_RING_SW3 << 24 |
 568			HAL_HASH_ROUTING_RING_SW4 << 28;
 569
 570	ath12k_hal_reo_hw_setup(ab, ring_hash_map);
 571
 572	return 0;
 573
 574err:
 575	ath12k_dp_srng_common_cleanup(ab);
 576
 577	return ret;
 578}
 579
 580static void ath12k_dp_scatter_idle_link_desc_cleanup(struct ath12k_base *ab)
 581{
 582	struct ath12k_dp *dp = &ab->dp;
 583	struct hal_wbm_idle_scatter_list *slist = dp->scatter_list;
 584	int i;
 585
 586	for (i = 0; i < DP_IDLE_SCATTER_BUFS_MAX; i++) {
 587		if (!slist[i].vaddr)
 588			continue;
 589
 590		dma_free_coherent(ab->dev, HAL_WBM_IDLE_SCATTER_BUF_SIZE_MAX,
 591				  slist[i].vaddr, slist[i].paddr);
 592		slist[i].vaddr = NULL;
 593	}
 594}
 595
 596static int ath12k_dp_scatter_idle_link_desc_setup(struct ath12k_base *ab,
 597						  int size,
 598						  u32 n_link_desc_bank,
 599						  u32 n_link_desc,
 600						  u32 last_bank_sz)
 601{
 602	struct ath12k_dp *dp = &ab->dp;
 603	struct dp_link_desc_bank *link_desc_banks = dp->link_desc_banks;
 604	struct hal_wbm_idle_scatter_list *slist = dp->scatter_list;
 605	u32 n_entries_per_buf;
 606	int num_scatter_buf, scatter_idx;
 607	struct hal_wbm_link_desc *scatter_buf;
 608	int align_bytes, n_entries;
 609	dma_addr_t paddr;
 610	int rem_entries;
 611	int i;
 612	int ret = 0;
 613	u32 end_offset, cookie;
 614
 615	n_entries_per_buf = HAL_WBM_IDLE_SCATTER_BUF_SIZE /
 616		ath12k_hal_srng_get_entrysize(ab, HAL_WBM_IDLE_LINK);
 617	num_scatter_buf = DIV_ROUND_UP(size, HAL_WBM_IDLE_SCATTER_BUF_SIZE);
 618
 619	if (num_scatter_buf > DP_IDLE_SCATTER_BUFS_MAX)
 620		return -EINVAL;
 621
 622	for (i = 0; i < num_scatter_buf; i++) {
 623		slist[i].vaddr = dma_alloc_coherent(ab->dev,
 624						    HAL_WBM_IDLE_SCATTER_BUF_SIZE_MAX,
 625						    &slist[i].paddr, GFP_KERNEL);
 626		if (!slist[i].vaddr) {
 627			ret = -ENOMEM;
 628			goto err;
 629		}
 630	}
 631
 632	scatter_idx = 0;
 633	scatter_buf = slist[scatter_idx].vaddr;
 634	rem_entries = n_entries_per_buf;
 635
 636	for (i = 0; i < n_link_desc_bank; i++) {
 637		align_bytes = link_desc_banks[i].vaddr -
 638			      link_desc_banks[i].vaddr_unaligned;
 639		n_entries = (DP_LINK_DESC_ALLOC_SIZE_THRESH - align_bytes) /
 640			     HAL_LINK_DESC_SIZE;
 641		paddr = link_desc_banks[i].paddr;
 642		while (n_entries) {
 643			cookie = DP_LINK_DESC_COOKIE_SET(n_entries, i);
 644			ath12k_hal_set_link_desc_addr(scatter_buf, cookie, paddr);
 645			n_entries--;
 646			paddr += HAL_LINK_DESC_SIZE;
 647			if (rem_entries) {
 648				rem_entries--;
 649				scatter_buf++;
 650				continue;
 651			}
 652
 653			rem_entries = n_entries_per_buf;
 654			scatter_idx++;
 655			scatter_buf = slist[scatter_idx].vaddr;
 656		}
 657	}
 658
 659	end_offset = (scatter_buf - slist[scatter_idx].vaddr) *
 660		     sizeof(struct hal_wbm_link_desc);
 661	ath12k_hal_setup_link_idle_list(ab, slist, num_scatter_buf,
 662					n_link_desc, end_offset);
 663
 664	return 0;
 665
 666err:
 667	ath12k_dp_scatter_idle_link_desc_cleanup(ab);
 668
 669	return ret;
 670}
 671
 672static void
 673ath12k_dp_link_desc_bank_free(struct ath12k_base *ab,
 674			      struct dp_link_desc_bank *link_desc_banks)
 675{
 676	int i;
 677
 678	for (i = 0; i < DP_LINK_DESC_BANKS_MAX; i++) {
 679		if (link_desc_banks[i].vaddr_unaligned) {
 680			dma_free_coherent(ab->dev,
 681					  link_desc_banks[i].size,
 682					  link_desc_banks[i].vaddr_unaligned,
 683					  link_desc_banks[i].paddr_unaligned);
 684			link_desc_banks[i].vaddr_unaligned = NULL;
 685		}
 686	}
 687}
 688
 689static int ath12k_dp_link_desc_bank_alloc(struct ath12k_base *ab,
 690					  struct dp_link_desc_bank *desc_bank,
 691					  int n_link_desc_bank,
 692					  int last_bank_sz)
 693{
 694	struct ath12k_dp *dp = &ab->dp;
 695	int i;
 696	int ret = 0;
 697	int desc_sz = DP_LINK_DESC_ALLOC_SIZE_THRESH;
 698
 699	for (i = 0; i < n_link_desc_bank; i++) {
 700		if (i == (n_link_desc_bank - 1) && last_bank_sz)
 701			desc_sz = last_bank_sz;
 702
 703		desc_bank[i].vaddr_unaligned =
 704					dma_alloc_coherent(ab->dev, desc_sz,
 705							   &desc_bank[i].paddr_unaligned,
 706							   GFP_KERNEL);
 707		if (!desc_bank[i].vaddr_unaligned) {
 708			ret = -ENOMEM;
 709			goto err;
 710		}
 711
 712		desc_bank[i].vaddr = PTR_ALIGN(desc_bank[i].vaddr_unaligned,
 713					       HAL_LINK_DESC_ALIGN);
 714		desc_bank[i].paddr = desc_bank[i].paddr_unaligned +
 715				     ((unsigned long)desc_bank[i].vaddr -
 716				      (unsigned long)desc_bank[i].vaddr_unaligned);
 717		desc_bank[i].size = desc_sz;
 718	}
 719
 720	return 0;
 721
 722err:
 723	ath12k_dp_link_desc_bank_free(ab, dp->link_desc_banks);
 724
 725	return ret;
 726}
 727
 728void ath12k_dp_link_desc_cleanup(struct ath12k_base *ab,
 729				 struct dp_link_desc_bank *desc_bank,
 730				 u32 ring_type, struct dp_srng *ring)
 731{
 732	ath12k_dp_link_desc_bank_free(ab, desc_bank);
 733
 734	if (ring_type != HAL_RXDMA_MONITOR_DESC) {
 735		ath12k_dp_srng_cleanup(ab, ring);
 736		ath12k_dp_scatter_idle_link_desc_cleanup(ab);
 737	}
 738}
 739
 740static int ath12k_wbm_idle_ring_setup(struct ath12k_base *ab, u32 *n_link_desc)
 741{
 742	struct ath12k_dp *dp = &ab->dp;
 743	u32 n_mpdu_link_desc, n_mpdu_queue_desc;
 744	u32 n_tx_msdu_link_desc, n_rx_msdu_link_desc;
 745	int ret = 0;
 746
 747	n_mpdu_link_desc = (DP_NUM_TIDS_MAX * DP_AVG_MPDUS_PER_TID_MAX) /
 748			   HAL_NUM_MPDUS_PER_LINK_DESC;
 749
 750	n_mpdu_queue_desc = n_mpdu_link_desc /
 751			    HAL_NUM_MPDU_LINKS_PER_QUEUE_DESC;
 752
 753	n_tx_msdu_link_desc = (DP_NUM_TIDS_MAX * DP_AVG_FLOWS_PER_TID *
 754			       DP_AVG_MSDUS_PER_FLOW) /
 755			      HAL_NUM_TX_MSDUS_PER_LINK_DESC;
 756
 757	n_rx_msdu_link_desc = (DP_NUM_TIDS_MAX * DP_AVG_MPDUS_PER_TID_MAX *
 758			       DP_AVG_MSDUS_PER_MPDU) /
 759			      HAL_NUM_RX_MSDUS_PER_LINK_DESC;
 760
 761	*n_link_desc = n_mpdu_link_desc + n_mpdu_queue_desc +
 762		      n_tx_msdu_link_desc + n_rx_msdu_link_desc;
 763
 764	if (*n_link_desc & (*n_link_desc - 1))
 765		*n_link_desc = 1 << fls(*n_link_desc);
 766
 767	ret = ath12k_dp_srng_setup(ab, &dp->wbm_idle_ring,
 768				   HAL_WBM_IDLE_LINK, 0, 0, *n_link_desc);
 769	if (ret) {
 770		ath12k_warn(ab, "failed to setup wbm_idle_ring: %d\n", ret);
 771		return ret;
 772	}
 773	return ret;
 774}
 775
 776int ath12k_dp_link_desc_setup(struct ath12k_base *ab,
 777			      struct dp_link_desc_bank *link_desc_banks,
 778			      u32 ring_type, struct hal_srng *srng,
 779			      u32 n_link_desc)
 780{
 781	u32 tot_mem_sz;
 782	u32 n_link_desc_bank, last_bank_sz;
 783	u32 entry_sz, align_bytes, n_entries;
 784	struct hal_wbm_link_desc *desc;
 785	u32 paddr;
 786	int i, ret;
 787	u32 cookie;
 788
 789	tot_mem_sz = n_link_desc * HAL_LINK_DESC_SIZE;
 790	tot_mem_sz += HAL_LINK_DESC_ALIGN;
 791
 792	if (tot_mem_sz <= DP_LINK_DESC_ALLOC_SIZE_THRESH) {
 793		n_link_desc_bank = 1;
 794		last_bank_sz = tot_mem_sz;
 795	} else {
 796		n_link_desc_bank = tot_mem_sz /
 797				   (DP_LINK_DESC_ALLOC_SIZE_THRESH -
 798				    HAL_LINK_DESC_ALIGN);
 799		last_bank_sz = tot_mem_sz %
 800			       (DP_LINK_DESC_ALLOC_SIZE_THRESH -
 801				HAL_LINK_DESC_ALIGN);
 802
 803		if (last_bank_sz)
 804			n_link_desc_bank += 1;
 805	}
 806
 807	if (n_link_desc_bank > DP_LINK_DESC_BANKS_MAX)
 808		return -EINVAL;
 809
 810	ret = ath12k_dp_link_desc_bank_alloc(ab, link_desc_banks,
 811					     n_link_desc_bank, last_bank_sz);
 812	if (ret)
 813		return ret;
 814
 815	/* Setup link desc idle list for HW internal usage */
 816	entry_sz = ath12k_hal_srng_get_entrysize(ab, ring_type);
 817	tot_mem_sz = entry_sz * n_link_desc;
 818
 819	/* Setup scatter desc list when the total memory requirement is more */
 820	if (tot_mem_sz > DP_LINK_DESC_ALLOC_SIZE_THRESH &&
 821	    ring_type != HAL_RXDMA_MONITOR_DESC) {
 822		ret = ath12k_dp_scatter_idle_link_desc_setup(ab, tot_mem_sz,
 823							     n_link_desc_bank,
 824							     n_link_desc,
 825							     last_bank_sz);
 826		if (ret) {
 827			ath12k_warn(ab, "failed to setup scatting idle list descriptor :%d\n",
 828				    ret);
 829			goto fail_desc_bank_free;
 830		}
 831
 832		return 0;
 833	}
 834
 835	spin_lock_bh(&srng->lock);
 836
 837	ath12k_hal_srng_access_begin(ab, srng);
 838
 839	for (i = 0; i < n_link_desc_bank; i++) {
 840		align_bytes = link_desc_banks[i].vaddr -
 841			      link_desc_banks[i].vaddr_unaligned;
 842		n_entries = (link_desc_banks[i].size - align_bytes) /
 843			    HAL_LINK_DESC_SIZE;
 844		paddr = link_desc_banks[i].paddr;
 845		while (n_entries &&
 846		       (desc = ath12k_hal_srng_src_get_next_entry(ab, srng))) {
 847			cookie = DP_LINK_DESC_COOKIE_SET(n_entries, i);
 848			ath12k_hal_set_link_desc_addr(desc,
 849						      cookie, paddr);
 850			n_entries--;
 851			paddr += HAL_LINK_DESC_SIZE;
 852		}
 853	}
 854
 855	ath12k_hal_srng_access_end(ab, srng);
 856
 857	spin_unlock_bh(&srng->lock);
 858
 859	return 0;
 860
 861fail_desc_bank_free:
 862	ath12k_dp_link_desc_bank_free(ab, link_desc_banks);
 863
 864	return ret;
 865}
 866
 867int ath12k_dp_service_srng(struct ath12k_base *ab,
 868			   struct ath12k_ext_irq_grp *irq_grp,
 869			   int budget)
 870{
 871	struct napi_struct *napi = &irq_grp->napi;
 872	int grp_id = irq_grp->grp_id;
 873	int work_done = 0;
 874	int i = 0, j;
 875	int tot_work_done = 0;
 876	enum dp_monitor_mode monitor_mode;
 877	u8 ring_mask;
 878
 879	while (i < ab->hw_params->max_tx_ring) {
 880		if (ab->hw_params->ring_mask->tx[grp_id] &
 881			BIT(ab->hw_params->hal_ops->tcl_to_wbm_rbm_map[i].wbm_ring_num))
 882			ath12k_dp_tx_completion_handler(ab, i);
 883		i++;
 884	}
 885
 886	if (ab->hw_params->ring_mask->rx_err[grp_id]) {
 887		work_done = ath12k_dp_rx_process_err(ab, napi, budget);
 888		budget -= work_done;
 889		tot_work_done += work_done;
 890		if (budget <= 0)
 891			goto done;
 892	}
 893
 894	if (ab->hw_params->ring_mask->rx_wbm_rel[grp_id]) {
 895		work_done = ath12k_dp_rx_process_wbm_err(ab,
 896							 napi,
 897							 budget);
 898		budget -= work_done;
 899		tot_work_done += work_done;
 900
 901		if (budget <= 0)
 902			goto done;
 903	}
 904
 905	if (ab->hw_params->ring_mask->rx[grp_id]) {
 906		i = fls(ab->hw_params->ring_mask->rx[grp_id]) - 1;
 907		work_done = ath12k_dp_rx_process(ab, i, napi,
 908						 budget);
 909		budget -= work_done;
 910		tot_work_done += work_done;
 911		if (budget <= 0)
 912			goto done;
 913	}
 914
 915	if (ab->hw_params->ring_mask->rx_mon_dest[grp_id]) {
 916		monitor_mode = ATH12K_DP_RX_MONITOR_MODE;
 917		ring_mask = ab->hw_params->ring_mask->rx_mon_dest[grp_id];
 918		for (i = 0; i < ab->num_radios; i++) {
 919			for (j = 0; j < ab->hw_params->num_rxmda_per_pdev; j++) {
 920				int id = i * ab->hw_params->num_rxmda_per_pdev + j;
 921
 922				if (ring_mask & BIT(id)) {
 923					work_done =
 924					ath12k_dp_mon_process_ring(ab, id, napi, budget,
 925								   monitor_mode);
 926					budget -= work_done;
 927					tot_work_done += work_done;
 928
 929					if (budget <= 0)
 930						goto done;
 931				}
 932			}
 933		}
 934	}
 935
 936	if (ab->hw_params->ring_mask->tx_mon_dest[grp_id]) {
 937		monitor_mode = ATH12K_DP_TX_MONITOR_MODE;
 938		ring_mask = ab->hw_params->ring_mask->tx_mon_dest[grp_id];
 939		for (i = 0; i < ab->num_radios; i++) {
 940			for (j = 0; j < ab->hw_params->num_rxmda_per_pdev; j++) {
 941				int id = i * ab->hw_params->num_rxmda_per_pdev + j;
 942
 943				if (ring_mask & BIT(id)) {
 944					work_done =
 945					ath12k_dp_mon_process_ring(ab, id, napi, budget,
 946								   monitor_mode);
 947					budget -= work_done;
 948					tot_work_done += work_done;
 949
 950					if (budget <= 0)
 951						goto done;
 952				}
 953			}
 954		}
 955	}
 956
 957	if (ab->hw_params->ring_mask->reo_status[grp_id])
 958		ath12k_dp_rx_process_reo_status(ab);
 959
 960	if (ab->hw_params->ring_mask->host2rxdma[grp_id]) {
 961		struct ath12k_dp *dp = &ab->dp;
 962		struct dp_rxdma_ring *rx_ring = &dp->rx_refill_buf_ring;
 963
 964		ath12k_dp_rx_bufs_replenish(ab, rx_ring, 0);
 965	}
 966
 967	/* TODO: Implement handler for other interrupts */
 968
 969done:
 970	return tot_work_done;
 971}
 972
 973void ath12k_dp_pdev_free(struct ath12k_base *ab)
 974{
 975	int i;
 976
 977	del_timer_sync(&ab->mon_reap_timer);
 978
 979	for (i = 0; i < ab->num_radios; i++)
 980		ath12k_dp_rx_pdev_free(ab, i);
 981}
 982
 983void ath12k_dp_pdev_pre_alloc(struct ath12k_base *ab)
 984{
 985	struct ath12k *ar;
 986	struct ath12k_pdev_dp *dp;
 987	int i;
 988
 989	for (i = 0; i <  ab->num_radios; i++) {
 990		ar = ab->pdevs[i].ar;
 991		dp = &ar->dp;
 992		dp->mac_id = i;
 993		atomic_set(&dp->num_tx_pending, 0);
 994		init_waitqueue_head(&dp->tx_empty_waitq);
 995
 996		/* TODO: Add any RXDMA setup required per pdev */
 997	}
 998}
 999
1000static void ath12k_dp_service_mon_ring(struct timer_list *t)
1001{
1002	struct ath12k_base *ab = from_timer(ab, t, mon_reap_timer);
1003	int i;
1004
1005	for (i = 0; i < ab->hw_params->num_rxmda_per_pdev; i++)
1006		ath12k_dp_mon_process_ring(ab, i, NULL, DP_MON_SERVICE_BUDGET,
1007					   ATH12K_DP_RX_MONITOR_MODE);
1008
1009	mod_timer(&ab->mon_reap_timer, jiffies +
1010		  msecs_to_jiffies(ATH12K_MON_TIMER_INTERVAL));
1011}
1012
1013static void ath12k_dp_mon_reap_timer_init(struct ath12k_base *ab)
1014{
1015	if (ab->hw_params->rxdma1_enable)
1016		return;
1017
1018	timer_setup(&ab->mon_reap_timer, ath12k_dp_service_mon_ring, 0);
1019}
1020
1021int ath12k_dp_pdev_alloc(struct ath12k_base *ab)
1022{
1023	struct ath12k *ar;
1024	int ret;
1025	int i;
1026
1027	ret = ath12k_dp_rx_htt_setup(ab);
1028	if (ret)
1029		goto out;
1030
1031	ath12k_dp_mon_reap_timer_init(ab);
1032
1033	/* TODO: Per-pdev rx ring unlike tx ring which is mapped to different AC's */
1034	for (i = 0; i < ab->num_radios; i++) {
1035		ar = ab->pdevs[i].ar;
1036		ret = ath12k_dp_rx_pdev_alloc(ab, i);
1037		if (ret) {
1038			ath12k_warn(ab, "failed to allocate pdev rx for pdev_id :%d\n",
1039				    i);
1040			goto err;
1041		}
1042		ret = ath12k_dp_rx_pdev_mon_attach(ar);
1043		if (ret) {
1044			ath12k_warn(ab, "failed to initialize mon pdev %d\n", i);
1045			goto err;
1046		}
1047	}
1048
1049	return 0;
1050err:
1051	ath12k_dp_pdev_free(ab);
1052out:
1053	return ret;
1054}
1055
1056int ath12k_dp_htt_connect(struct ath12k_dp *dp)
1057{
1058	struct ath12k_htc_svc_conn_req conn_req = {0};
1059	struct ath12k_htc_svc_conn_resp conn_resp = {0};
1060	int status;
1061
1062	conn_req.ep_ops.ep_tx_complete = ath12k_dp_htt_htc_tx_complete;
1063	conn_req.ep_ops.ep_rx_complete = ath12k_dp_htt_htc_t2h_msg_handler;
1064
1065	/* connect to control service */
1066	conn_req.service_id = ATH12K_HTC_SVC_ID_HTT_DATA_MSG;
1067
1068	status = ath12k_htc_connect_service(&dp->ab->htc, &conn_req,
1069					    &conn_resp);
1070
1071	if (status)
1072		return status;
1073
1074	dp->eid = conn_resp.eid;
1075
1076	return 0;
1077}
1078
1079static void ath12k_dp_update_vdev_search(struct ath12k_vif *arvif)
1080{
1081	switch (arvif->vdev_type) {
1082	case WMI_VDEV_TYPE_STA:
1083		/* TODO: Verify the search type and flags since ast hash
1084		 * is not part of peer mapv3
1085		 */
1086		arvif->hal_addr_search_flags = HAL_TX_ADDRY_EN;
1087		arvif->search_type = HAL_TX_ADDR_SEARCH_DEFAULT;
1088		break;
1089	case WMI_VDEV_TYPE_AP:
1090	case WMI_VDEV_TYPE_IBSS:
1091		arvif->hal_addr_search_flags = HAL_TX_ADDRX_EN;
1092		arvif->search_type = HAL_TX_ADDR_SEARCH_DEFAULT;
1093		break;
1094	case WMI_VDEV_TYPE_MONITOR:
1095	default:
1096		return;
1097	}
1098}
1099
1100void ath12k_dp_vdev_tx_attach(struct ath12k *ar, struct ath12k_vif *arvif)
1101{
1102	struct ath12k_base *ab = ar->ab;
1103
1104	arvif->tcl_metadata |= u32_encode_bits(1, HTT_TCL_META_DATA_TYPE) |
1105			       u32_encode_bits(arvif->vdev_id,
1106					       HTT_TCL_META_DATA_VDEV_ID) |
1107			       u32_encode_bits(ar->pdev->pdev_id,
1108					       HTT_TCL_META_DATA_PDEV_ID);
1109
1110	/* set HTT extension valid bit to 0 by default */
1111	arvif->tcl_metadata &= ~HTT_TCL_META_DATA_VALID_HTT;
1112
1113	ath12k_dp_update_vdev_search(arvif);
1114	arvif->vdev_id_check_en = true;
1115	arvif->bank_id = ath12k_dp_tx_get_bank_profile(ab, arvif, &ab->dp);
1116
1117	/* TODO: error path for bank id failure */
1118	if (arvif->bank_id == DP_INVALID_BANK_ID) {
1119		ath12k_err(ar->ab, "Failed to initialize DP TX Banks");
1120		return;
1121	}
1122}
1123
1124static void ath12k_dp_cc_cleanup(struct ath12k_base *ab)
1125{
1126	struct ath12k_rx_desc_info *desc_info, *tmp;
1127	struct ath12k_tx_desc_info *tx_desc_info, *tmp1;
1128	struct ath12k_dp *dp = &ab->dp;
1129	struct sk_buff *skb;
1130	int i;
1131	u32 pool_id, tx_spt_page;
1132
1133	if (!dp->spt_info)
1134		return;
1135
1136	/* RX Descriptor cleanup */
1137	spin_lock_bh(&dp->rx_desc_lock);
1138
1139	list_for_each_entry_safe(desc_info, tmp, &dp->rx_desc_used_list, list) {
1140		list_del(&desc_info->list);
1141		skb = desc_info->skb;
1142
1143		if (!skb)
1144			continue;
1145
1146		dma_unmap_single(ab->dev, ATH12K_SKB_RXCB(skb)->paddr,
1147				 skb->len + skb_tailroom(skb), DMA_FROM_DEVICE);
1148		dev_kfree_skb_any(skb);
1149	}
1150
1151	for (i = 0; i < ATH12K_NUM_RX_SPT_PAGES; i++) {
1152		if (!dp->spt_info->rxbaddr[i])
1153			continue;
1154
1155		kfree(dp->spt_info->rxbaddr[i]);
1156		dp->spt_info->rxbaddr[i] = NULL;
1157	}
1158
1159	spin_unlock_bh(&dp->rx_desc_lock);
1160
1161	/* TX Descriptor cleanup */
1162	for (i = 0; i < ATH12K_HW_MAX_QUEUES; i++) {
1163		spin_lock_bh(&dp->tx_desc_lock[i]);
1164
1165		list_for_each_entry_safe(tx_desc_info, tmp1, &dp->tx_desc_used_list[i],
1166					 list) {
1167			list_del(&tx_desc_info->list);
1168			skb = tx_desc_info->skb;
1169
1170			if (!skb)
1171				continue;
1172
1173			dma_unmap_single(ab->dev, ATH12K_SKB_CB(skb)->paddr,
1174					 skb->len, DMA_TO_DEVICE);
1175			dev_kfree_skb_any(skb);
1176		}
1177
1178		spin_unlock_bh(&dp->tx_desc_lock[i]);
1179	}
1180
1181	for (pool_id = 0; pool_id < ATH12K_HW_MAX_QUEUES; pool_id++) {
1182		spin_lock_bh(&dp->tx_desc_lock[pool_id]);
1183
1184		for (i = 0; i < ATH12K_TX_SPT_PAGES_PER_POOL; i++) {
1185			tx_spt_page = i + pool_id * ATH12K_TX_SPT_PAGES_PER_POOL;
1186			if (!dp->spt_info->txbaddr[tx_spt_page])
1187				continue;
1188
1189			kfree(dp->spt_info->txbaddr[tx_spt_page]);
1190			dp->spt_info->txbaddr[tx_spt_page] = NULL;
1191		}
1192
1193		spin_unlock_bh(&dp->tx_desc_lock[pool_id]);
1194	}
1195
1196	/* unmap SPT pages */
1197	for (i = 0; i < dp->num_spt_pages; i++) {
1198		if (!dp->spt_info[i].vaddr)
1199			continue;
1200
1201		dma_free_coherent(ab->dev, ATH12K_PAGE_SIZE,
1202				  dp->spt_info[i].vaddr, dp->spt_info[i].paddr);
1203		dp->spt_info[i].vaddr = NULL;
1204	}
1205
1206	kfree(dp->spt_info);
1207}
1208
1209static void ath12k_dp_reoq_lut_cleanup(struct ath12k_base *ab)
1210{
1211	struct ath12k_dp *dp = &ab->dp;
1212
1213	if (!ab->hw_params->reoq_lut_support)
1214		return;
1215
1216	if (!dp->reoq_lut.vaddr)
1217		return;
1218
1219	dma_free_coherent(ab->dev, DP_REOQ_LUT_SIZE,
1220			  dp->reoq_lut.vaddr, dp->reoq_lut.paddr);
1221	dp->reoq_lut.vaddr = NULL;
1222
1223	ath12k_hif_write32(ab,
1224			   HAL_SEQ_WCSS_UMAC_REO_REG + HAL_REO1_QDESC_LUT_BASE0(ab), 0);
1225}
1226
1227void ath12k_dp_free(struct ath12k_base *ab)
1228{
1229	struct ath12k_dp *dp = &ab->dp;
1230	int i;
1231
1232	ath12k_dp_link_desc_cleanup(ab, dp->link_desc_banks,
1233				    HAL_WBM_IDLE_LINK, &dp->wbm_idle_ring);
1234
1235	ath12k_dp_cc_cleanup(ab);
1236	ath12k_dp_reoq_lut_cleanup(ab);
1237	ath12k_dp_deinit_bank_profiles(ab);
1238	ath12k_dp_srng_common_cleanup(ab);
1239
1240	ath12k_dp_rx_reo_cmd_list_cleanup(ab);
1241
1242	for (i = 0; i < ab->hw_params->max_tx_ring; i++)
1243		kfree(dp->tx_ring[i].tx_status);
1244
1245	ath12k_dp_rx_free(ab);
1246	/* Deinit any SOC level resource */
1247}
1248
1249void ath12k_dp_cc_config(struct ath12k_base *ab)
1250{
1251	u32 cmem_base = ab->qmi.dev_mem[ATH12K_QMI_DEVMEM_CMEM_INDEX].start;
1252	u32 reo_base = HAL_SEQ_WCSS_UMAC_REO_REG;
1253	u32 wbm_base = HAL_SEQ_WCSS_UMAC_WBM_REG;
1254	u32 val = 0;
1255
1256	ath12k_hif_write32(ab, reo_base + HAL_REO1_SW_COOKIE_CFG0(ab), cmem_base);
1257
1258	val |= u32_encode_bits(ATH12K_CMEM_ADDR_MSB,
1259			       HAL_REO1_SW_COOKIE_CFG_CMEM_BASE_ADDR_MSB) |
1260		u32_encode_bits(ATH12K_CC_PPT_MSB,
1261				HAL_REO1_SW_COOKIE_CFG_COOKIE_PPT_MSB) |
1262		u32_encode_bits(ATH12K_CC_SPT_MSB,
1263				HAL_REO1_SW_COOKIE_CFG_COOKIE_SPT_MSB) |
1264		u32_encode_bits(1, HAL_REO1_SW_COOKIE_CFG_ALIGN) |
1265		u32_encode_bits(1, HAL_REO1_SW_COOKIE_CFG_ENABLE) |
1266		u32_encode_bits(1, HAL_REO1_SW_COOKIE_CFG_GLOBAL_ENABLE);
1267
1268	ath12k_hif_write32(ab, reo_base + HAL_REO1_SW_COOKIE_CFG1(ab), val);
1269
1270	/* Enable HW CC for WBM */
1271	ath12k_hif_write32(ab, wbm_base + HAL_WBM_SW_COOKIE_CFG0, cmem_base);
1272
1273	val = u32_encode_bits(ATH12K_CMEM_ADDR_MSB,
1274			      HAL_WBM_SW_COOKIE_CFG_CMEM_BASE_ADDR_MSB) |
1275		u32_encode_bits(ATH12K_CC_PPT_MSB,
1276				HAL_WBM_SW_COOKIE_CFG_COOKIE_PPT_MSB) |
1277		u32_encode_bits(ATH12K_CC_SPT_MSB,
1278				HAL_WBM_SW_COOKIE_CFG_COOKIE_SPT_MSB) |
1279		u32_encode_bits(1, HAL_WBM_SW_COOKIE_CFG_ALIGN);
1280
1281	ath12k_hif_write32(ab, wbm_base + HAL_WBM_SW_COOKIE_CFG1, val);
1282
1283	/* Enable conversion complete indication */
1284	val = ath12k_hif_read32(ab, wbm_base + HAL_WBM_SW_COOKIE_CFG2);
1285	val |= u32_encode_bits(1, HAL_WBM_SW_COOKIE_CFG_RELEASE_PATH_EN) |
1286		u32_encode_bits(1, HAL_WBM_SW_COOKIE_CFG_ERR_PATH_EN) |
1287		u32_encode_bits(1, HAL_WBM_SW_COOKIE_CFG_CONV_IND_EN);
1288
1289	ath12k_hif_write32(ab, wbm_base + HAL_WBM_SW_COOKIE_CFG2, val);
1290
1291	/* Enable Cookie conversion for WBM2SW Rings */
1292	val = ath12k_hif_read32(ab, wbm_base + HAL_WBM_SW_COOKIE_CONVERT_CFG);
1293	val |= u32_encode_bits(1, HAL_WBM_SW_COOKIE_CONV_CFG_GLOBAL_EN) |
1294	       ab->hw_params->hal_params->wbm2sw_cc_enable;
1295
1296	ath12k_hif_write32(ab, wbm_base + HAL_WBM_SW_COOKIE_CONVERT_CFG, val);
1297}
1298
1299static u32 ath12k_dp_cc_cookie_gen(u16 ppt_idx, u16 spt_idx)
1300{
1301	return (u32)ppt_idx << ATH12K_CC_PPT_SHIFT | spt_idx;
1302}
1303
1304static inline void *ath12k_dp_cc_get_desc_addr_ptr(struct ath12k_base *ab,
1305						   u16 ppt_idx, u16 spt_idx)
1306{
1307	struct ath12k_dp *dp = &ab->dp;
1308
1309	return dp->spt_info[ppt_idx].vaddr + spt_idx;
1310}
1311
1312struct ath12k_rx_desc_info *ath12k_dp_get_rx_desc(struct ath12k_base *ab,
1313						  u32 cookie)
1314{
1315	struct ath12k_rx_desc_info **desc_addr_ptr;
1316	u16 ppt_idx, spt_idx;
1317
1318	ppt_idx = u32_get_bits(cookie, ATH12K_DP_CC_COOKIE_PPT);
1319	spt_idx = u32_get_bits(cookie, ATH12k_DP_CC_COOKIE_SPT);
1320
1321	if (ppt_idx > ATH12K_NUM_RX_SPT_PAGES ||
1322	    spt_idx > ATH12K_MAX_SPT_ENTRIES)
1323		return NULL;
1324
1325	desc_addr_ptr = ath12k_dp_cc_get_desc_addr_ptr(ab, ppt_idx, spt_idx);
1326
1327	return *desc_addr_ptr;
1328}
1329
1330struct ath12k_tx_desc_info *ath12k_dp_get_tx_desc(struct ath12k_base *ab,
1331						  u32 cookie)
1332{
1333	struct ath12k_tx_desc_info **desc_addr_ptr;
1334	u16 ppt_idx, spt_idx;
1335
1336	ppt_idx = u32_get_bits(cookie, ATH12K_DP_CC_COOKIE_PPT);
1337	spt_idx = u32_get_bits(cookie, ATH12k_DP_CC_COOKIE_SPT);
1338
1339	if (ppt_idx < ATH12K_NUM_RX_SPT_PAGES ||
1340	    ppt_idx > ab->dp.num_spt_pages ||
1341	    spt_idx > ATH12K_MAX_SPT_ENTRIES)
1342		return NULL;
1343
1344	desc_addr_ptr = ath12k_dp_cc_get_desc_addr_ptr(ab, ppt_idx, spt_idx);
1345
1346	return *desc_addr_ptr;
1347}
1348
1349static int ath12k_dp_cc_desc_init(struct ath12k_base *ab)
1350{
1351	struct ath12k_dp *dp = &ab->dp;
1352	struct ath12k_rx_desc_info *rx_descs, **rx_desc_addr;
1353	struct ath12k_tx_desc_info *tx_descs, **tx_desc_addr;
1354	u32 i, j, pool_id, tx_spt_page;
1355	u32 ppt_idx;
1356
1357	spin_lock_bh(&dp->rx_desc_lock);
1358
1359	/* First ATH12K_NUM_RX_SPT_PAGES of allocated SPT pages are used for RX */
1360	for (i = 0; i < ATH12K_NUM_RX_SPT_PAGES; i++) {
1361		rx_descs = kcalloc(ATH12K_MAX_SPT_ENTRIES, sizeof(*rx_descs),
1362				   GFP_ATOMIC);
1363
1364		if (!rx_descs) {
1365			spin_unlock_bh(&dp->rx_desc_lock);
1366			return -ENOMEM;
1367		}
1368
1369		dp->spt_info->rxbaddr[i] = &rx_descs[0];
1370
1371		for (j = 0; j < ATH12K_MAX_SPT_ENTRIES; j++) {
1372			rx_descs[j].cookie = ath12k_dp_cc_cookie_gen(i, j);
1373			rx_descs[j].magic = ATH12K_DP_RX_DESC_MAGIC;
1374			list_add_tail(&rx_descs[j].list, &dp->rx_desc_free_list);
1375
1376			/* Update descriptor VA in SPT */
1377			rx_desc_addr = ath12k_dp_cc_get_desc_addr_ptr(ab, i, j);
1378			*rx_desc_addr = &rx_descs[j];
1379		}
1380	}
1381
1382	spin_unlock_bh(&dp->rx_desc_lock);
1383
1384	for (pool_id = 0; pool_id < ATH12K_HW_MAX_QUEUES; pool_id++) {
1385		spin_lock_bh(&dp->tx_desc_lock[pool_id]);
1386		for (i = 0; i < ATH12K_TX_SPT_PAGES_PER_POOL; i++) {
1387			tx_descs = kcalloc(ATH12K_MAX_SPT_ENTRIES, sizeof(*tx_descs),
1388					   GFP_ATOMIC);
1389
1390			if (!tx_descs) {
1391				spin_unlock_bh(&dp->tx_desc_lock[pool_id]);
1392				/* Caller takes care of TX pending and RX desc cleanup */
1393				return -ENOMEM;
1394			}
1395
1396			tx_spt_page = i + pool_id * ATH12K_TX_SPT_PAGES_PER_POOL;
1397			dp->spt_info->txbaddr[tx_spt_page] = &tx_descs[0];
1398
1399			for (j = 0; j < ATH12K_MAX_SPT_ENTRIES; j++) {
1400				ppt_idx = ATH12K_NUM_RX_SPT_PAGES + tx_spt_page;
1401				tx_descs[j].desc_id = ath12k_dp_cc_cookie_gen(ppt_idx, j);
1402				tx_descs[j].pool_id = pool_id;
1403				list_add_tail(&tx_descs[j].list,
1404					      &dp->tx_desc_free_list[pool_id]);
1405
1406				/* Update descriptor VA in SPT */
1407				tx_desc_addr =
1408					ath12k_dp_cc_get_desc_addr_ptr(ab, ppt_idx, j);
1409				*tx_desc_addr = &tx_descs[j];
1410			}
1411		}
1412		spin_unlock_bh(&dp->tx_desc_lock[pool_id]);
1413	}
1414	return 0;
1415}
1416
1417static int ath12k_dp_cc_init(struct ath12k_base *ab)
1418{
1419	struct ath12k_dp *dp = &ab->dp;
1420	int i, ret = 0;
1421	u32 cmem_base;
1422
1423	INIT_LIST_HEAD(&dp->rx_desc_free_list);
1424	INIT_LIST_HEAD(&dp->rx_desc_used_list);
1425	spin_lock_init(&dp->rx_desc_lock);
1426
1427	for (i = 0; i < ATH12K_HW_MAX_QUEUES; i++) {
1428		INIT_LIST_HEAD(&dp->tx_desc_free_list[i]);
1429		INIT_LIST_HEAD(&dp->tx_desc_used_list[i]);
1430		spin_lock_init(&dp->tx_desc_lock[i]);
1431	}
1432
1433	dp->num_spt_pages = ATH12K_NUM_SPT_PAGES;
1434	if (dp->num_spt_pages > ATH12K_MAX_PPT_ENTRIES)
1435		dp->num_spt_pages = ATH12K_MAX_PPT_ENTRIES;
1436
1437	dp->spt_info = kcalloc(dp->num_spt_pages, sizeof(struct ath12k_spt_info),
1438			       GFP_KERNEL);
1439
1440	if (!dp->spt_info) {
1441		ath12k_warn(ab, "SPT page allocation failure");
1442		return -ENOMEM;
1443	}
1444
1445	cmem_base = ab->qmi.dev_mem[ATH12K_QMI_DEVMEM_CMEM_INDEX].start;
1446
1447	for (i = 0; i < dp->num_spt_pages; i++) {
1448		dp->spt_info[i].vaddr = dma_alloc_coherent(ab->dev,
1449							   ATH12K_PAGE_SIZE,
1450							   &dp->spt_info[i].paddr,
1451							   GFP_KERNEL);
1452
1453		if (!dp->spt_info[i].vaddr) {
1454			ret = -ENOMEM;
1455			goto free;
1456		}
1457
1458		if (dp->spt_info[i].paddr & ATH12K_SPT_4K_ALIGN_CHECK) {
1459			ath12k_warn(ab, "SPT allocated memory is not 4K aligned");
1460			ret = -EINVAL;
1461			goto free;
1462		}
1463
1464		/* Write to PPT in CMEM */
1465		ath12k_hif_write32(ab, cmem_base + ATH12K_PPT_ADDR_OFFSET(i),
1466				   dp->spt_info[i].paddr >> ATH12K_SPT_4K_ALIGN_OFFSET);
1467	}
1468
1469	ret = ath12k_dp_cc_desc_init(ab);
1470	if (ret) {
1471		ath12k_warn(ab, "HW CC desc init failed %d", ret);
1472		goto free;
1473	}
1474
1475	return 0;
1476free:
1477	ath12k_dp_cc_cleanup(ab);
1478	return ret;
1479}
1480
1481static int ath12k_dp_reoq_lut_setup(struct ath12k_base *ab)
1482{
1483	struct ath12k_dp *dp = &ab->dp;
1484
1485	if (!ab->hw_params->reoq_lut_support)
1486		return 0;
1487
1488	dp->reoq_lut.vaddr = dma_alloc_coherent(ab->dev,
1489						DP_REOQ_LUT_SIZE,
1490						&dp->reoq_lut.paddr,
1491						GFP_KERNEL | __GFP_ZERO);
1492	if (!dp->reoq_lut.vaddr) {
1493		ath12k_warn(ab, "failed to allocate memory for reoq table");
1494		return -ENOMEM;
1495	}
1496
1497	ath12k_hif_write32(ab, HAL_SEQ_WCSS_UMAC_REO_REG + HAL_REO1_QDESC_LUT_BASE0(ab),
1498			   dp->reoq_lut.paddr);
1499	return 0;
1500}
1501
1502int ath12k_dp_alloc(struct ath12k_base *ab)
1503{
1504	struct ath12k_dp *dp = &ab->dp;
1505	struct hal_srng *srng = NULL;
1506	size_t size = 0;
1507	u32 n_link_desc = 0;
1508	int ret;
1509	int i;
1510
1511	dp->ab = ab;
1512
1513	INIT_LIST_HEAD(&dp->reo_cmd_list);
1514	INIT_LIST_HEAD(&dp->reo_cmd_cache_flush_list);
1515	spin_lock_init(&dp->reo_cmd_lock);
1516
1517	dp->reo_cmd_cache_flush_count = 0;
1518
1519	ret = ath12k_wbm_idle_ring_setup(ab, &n_link_desc);
1520	if (ret) {
1521		ath12k_warn(ab, "failed to setup wbm_idle_ring: %d\n", ret);
1522		return ret;
1523	}
1524
1525	srng = &ab->hal.srng_list[dp->wbm_idle_ring.ring_id];
1526
1527	ret = ath12k_dp_link_desc_setup(ab, dp->link_desc_banks,
1528					HAL_WBM_IDLE_LINK, srng, n_link_desc);
1529	if (ret) {
1530		ath12k_warn(ab, "failed to setup link desc: %d\n", ret);
1531		return ret;
1532	}
1533
1534	ret = ath12k_dp_cc_init(ab);
1535
1536	if (ret) {
1537		ath12k_warn(ab, "failed to setup cookie converter %d\n", ret);
1538		goto fail_link_desc_cleanup;
1539	}
1540	ret = ath12k_dp_init_bank_profiles(ab);
1541	if (ret) {
1542		ath12k_warn(ab, "failed to setup bank profiles %d\n", ret);
1543		goto fail_hw_cc_cleanup;
1544	}
1545
1546	ret = ath12k_dp_srng_common_setup(ab);
1547	if (ret)
1548		goto fail_dp_bank_profiles_cleanup;
1549
1550	size = sizeof(struct hal_wbm_release_ring_tx) * DP_TX_COMP_RING_SIZE;
1551
1552	ret = ath12k_dp_reoq_lut_setup(ab);
1553	if (ret) {
1554		ath12k_warn(ab, "failed to setup reoq table %d\n", ret);
1555		goto fail_cmn_srng_cleanup;
1556	}
1557
1558	for (i = 0; i < ab->hw_params->max_tx_ring; i++) {
1559		dp->tx_ring[i].tcl_data_ring_id = i;
1560
1561		dp->tx_ring[i].tx_status_head = 0;
1562		dp->tx_ring[i].tx_status_tail = DP_TX_COMP_RING_SIZE - 1;
1563		dp->tx_ring[i].tx_status = kmalloc(size, GFP_KERNEL);
1564		if (!dp->tx_ring[i].tx_status) {
1565			ret = -ENOMEM;
1566			/* FIXME: The allocated tx status is not freed
1567			 * properly here
1568			 */
1569			goto fail_cmn_reoq_cleanup;
1570		}
1571	}
1572
1573	for (i = 0; i < HAL_DSCP_TID_MAP_TBL_NUM_ENTRIES_MAX; i++)
1574		ath12k_hal_tx_set_dscp_tid_map(ab, i);
1575
1576	ret = ath12k_dp_rx_alloc(ab);
1577	if (ret)
1578		goto fail_dp_rx_free;
1579
1580	/* Init any SOC level resource for DP */
1581
1582	return 0;
1583
1584fail_dp_rx_free:
1585	ath12k_dp_rx_free(ab);
1586
1587fail_cmn_reoq_cleanup:
1588	ath12k_dp_reoq_lut_cleanup(ab);
1589
1590fail_cmn_srng_cleanup:
1591	ath12k_dp_srng_common_cleanup(ab);
1592
1593fail_dp_bank_profiles_cleanup:
1594	ath12k_dp_deinit_bank_profiles(ab);
1595
1596fail_hw_cc_cleanup:
1597	ath12k_dp_cc_cleanup(ab);
1598
1599fail_link_desc_cleanup:
1600	ath12k_dp_link_desc_cleanup(ab, dp->link_desc_banks,
1601				    HAL_WBM_IDLE_LINK, &dp->wbm_idle_ring);
1602
1603	return ret;
1604}