Linux Audio

Check our new training course

Loading...
v6.13.7
   1// SPDX-License-Identifier: BSD-3-Clause-Clear
   2/*
   3 * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
   4 * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
   5 */
   6
   7#include "core.h"
   8#include "dp_tx.h"
   9#include "debug.h"
  10#include "debugfs_sta.h"
  11#include "hw.h"
  12#include "peer.h"
  13#include "mac.h"
  14
  15static enum hal_tcl_encap_type
  16ath11k_dp_tx_get_encap_type(struct ath11k_vif *arvif, struct sk_buff *skb)
  17{
  18	struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
  19	struct ath11k_base *ab = arvif->ar->ab;
  20
  21	if (test_bit(ATH11K_FLAG_RAW_MODE, &ab->dev_flags))
  22		return HAL_TCL_ENCAP_TYPE_RAW;
  23
  24	if (tx_info->flags & IEEE80211_TX_CTL_HW_80211_ENCAP)
  25		return HAL_TCL_ENCAP_TYPE_ETHERNET;
  26
  27	return HAL_TCL_ENCAP_TYPE_NATIVE_WIFI;
  28}
  29
  30static void ath11k_dp_tx_encap_nwifi(struct sk_buff *skb)
  31{
  32	struct ieee80211_hdr *hdr = (void *)skb->data;
  33	u8 *qos_ctl;
  34
  35	if (!ieee80211_is_data_qos(hdr->frame_control))
  36		return;
  37
  38	qos_ctl = ieee80211_get_qos_ctl(hdr);
  39	memmove(skb->data + IEEE80211_QOS_CTL_LEN,
  40		skb->data, (void *)qos_ctl - (void *)skb->data);
  41	skb_pull(skb, IEEE80211_QOS_CTL_LEN);
  42
  43	hdr = (void *)skb->data;
  44	hdr->frame_control &= ~__cpu_to_le16(IEEE80211_STYPE_QOS_DATA);
  45}
  46
  47static u8 ath11k_dp_tx_get_tid(struct sk_buff *skb)
  48{
  49	struct ieee80211_hdr *hdr = (void *)skb->data;
  50	struct ath11k_skb_cb *cb = ATH11K_SKB_CB(skb);
  51
  52	if (cb->flags & ATH11K_SKB_HW_80211_ENCAP)
  53		return skb->priority & IEEE80211_QOS_CTL_TID_MASK;
  54	else if (!ieee80211_is_data_qos(hdr->frame_control))
  55		return HAL_DESC_REO_NON_QOS_TID;
  56	else
  57		return skb->priority & IEEE80211_QOS_CTL_TID_MASK;
  58}
  59
  60enum hal_encrypt_type ath11k_dp_tx_get_encrypt_type(u32 cipher)
  61{
  62	switch (cipher) {
  63	case WLAN_CIPHER_SUITE_WEP40:
  64		return HAL_ENCRYPT_TYPE_WEP_40;
  65	case WLAN_CIPHER_SUITE_WEP104:
  66		return HAL_ENCRYPT_TYPE_WEP_104;
  67	case WLAN_CIPHER_SUITE_TKIP:
  68		return HAL_ENCRYPT_TYPE_TKIP_MIC;
  69	case WLAN_CIPHER_SUITE_CCMP:
  70		return HAL_ENCRYPT_TYPE_CCMP_128;
  71	case WLAN_CIPHER_SUITE_CCMP_256:
  72		return HAL_ENCRYPT_TYPE_CCMP_256;
  73	case WLAN_CIPHER_SUITE_GCMP:
  74		return HAL_ENCRYPT_TYPE_GCMP_128;
  75	case WLAN_CIPHER_SUITE_GCMP_256:
  76		return HAL_ENCRYPT_TYPE_AES_GCMP_256;
  77	default:
  78		return HAL_ENCRYPT_TYPE_OPEN;
  79	}
  80}
  81
  82int ath11k_dp_tx(struct ath11k *ar, struct ath11k_vif *arvif,
  83		 struct ath11k_sta *arsta, struct sk_buff *skb)
  84{
  85	struct ath11k_base *ab = ar->ab;
  86	struct ath11k_dp *dp = &ab->dp;
  87	struct hal_tx_info ti = {0};
  88	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
  89	struct ath11k_skb_cb *skb_cb = ATH11K_SKB_CB(skb);
  90	struct hal_srng *tcl_ring;
  91	struct ieee80211_hdr *hdr = (void *)skb->data;
  92	struct dp_tx_ring *tx_ring;
  93	void *hal_tcl_desc;
  94	u8 pool_id;
  95	u8 hal_ring_id;
  96	int ret;
  97	u32 ring_selector = 0;
  98	u8 ring_map = 0;
  99	bool tcl_ring_retry;
 100
 101	if (unlikely(test_bit(ATH11K_FLAG_CRASH_FLUSH, &ar->ab->dev_flags)))
 102		return -ESHUTDOWN;
 103
 104	if (unlikely(!(info->flags & IEEE80211_TX_CTL_HW_80211_ENCAP) &&
 105		     !ieee80211_is_data(hdr->frame_control)))
 106		return -EOPNOTSUPP;
 107
 108	pool_id = skb_get_queue_mapping(skb) & (ATH11K_HW_MAX_QUEUES - 1);
 109
 110	ring_selector = ab->hw_params.hw_ops->get_ring_selector(skb);
 
 
 
 
 
 
 
 111
 112tcl_ring_sel:
 113	tcl_ring_retry = false;
 114
 115	ti.ring_id = ring_selector % ab->hw_params.max_tx_ring;
 116	ti.rbm_id = ab->hw_params.hal_params->tcl2wbm_rbm_map[ti.ring_id].rbm_id;
 
 
 117
 118	ring_map |= BIT(ti.ring_id);
 119
 120	tx_ring = &dp->tx_ring[ti.ring_id];
 121
 122	spin_lock_bh(&tx_ring->tx_idr_lock);
 123	ret = idr_alloc(&tx_ring->txbuf_idr, skb, 0,
 124			DP_TX_IDR_SIZE - 1, GFP_ATOMIC);
 125	spin_unlock_bh(&tx_ring->tx_idr_lock);
 126
 127	if (unlikely(ret < 0)) {
 128		if (ring_map == (BIT(ab->hw_params.max_tx_ring) - 1) ||
 129		    !ab->hw_params.tcl_ring_retry) {
 130			atomic_inc(&ab->soc_stats.tx_err.misc_fail);
 131			return -ENOSPC;
 132		}
 133
 134		/* Check if the next ring is available */
 135		ring_selector++;
 136		goto tcl_ring_sel;
 137	}
 138
 139	ti.desc_id = FIELD_PREP(DP_TX_DESC_ID_MAC_ID, ar->pdev_idx) |
 140		     FIELD_PREP(DP_TX_DESC_ID_MSDU_ID, ret) |
 141		     FIELD_PREP(DP_TX_DESC_ID_POOL_ID, pool_id);
 142	ti.encap_type = ath11k_dp_tx_get_encap_type(arvif, skb);
 
 143
 144	if (ieee80211_has_a4(hdr->frame_control) &&
 145	    is_multicast_ether_addr(hdr->addr3) && arsta &&
 146	    arsta->use_4addr_set) {
 147		ti.meta_data_flags = arsta->tcl_metadata;
 148		ti.flags0 |= FIELD_PREP(HAL_TCL_DATA_CMD_INFO1_TO_FW, 1);
 149	} else {
 150		ti.meta_data_flags = arvif->tcl_metadata;
 151	}
 152
 153	if (unlikely(ti.encap_type == HAL_TCL_ENCAP_TYPE_RAW)) {
 154		if (skb_cb->flags & ATH11K_SKB_CIPHER_SET) {
 155			ti.encrypt_type =
 156				ath11k_dp_tx_get_encrypt_type(skb_cb->cipher);
 157
 158			if (ieee80211_has_protected(hdr->frame_control))
 159				skb_put(skb, IEEE80211_CCMP_MIC_LEN);
 160		} else {
 161			ti.encrypt_type = HAL_ENCRYPT_TYPE_OPEN;
 162		}
 163	}
 164
 165	ti.addr_search_flags = arvif->hal_addr_search_flags;
 166	ti.search_type = arvif->search_type;
 167	ti.type = HAL_TCL_DESC_TYPE_BUFFER;
 168	ti.pkt_offset = 0;
 169	ti.lmac_id = ar->lmac_id;
 170	ti.bss_ast_hash = arvif->ast_hash;
 171	ti.bss_ast_idx = arvif->ast_idx;
 172	ti.dscp_tid_tbl_idx = 0;
 173
 174	if (likely(skb->ip_summed == CHECKSUM_PARTIAL &&
 175		   ti.encap_type != HAL_TCL_ENCAP_TYPE_RAW)) {
 176		ti.flags0 |= FIELD_PREP(HAL_TCL_DATA_CMD_INFO1_IP4_CKSUM_EN, 1) |
 177			     FIELD_PREP(HAL_TCL_DATA_CMD_INFO1_UDP4_CKSUM_EN, 1) |
 178			     FIELD_PREP(HAL_TCL_DATA_CMD_INFO1_UDP6_CKSUM_EN, 1) |
 179			     FIELD_PREP(HAL_TCL_DATA_CMD_INFO1_TCP4_CKSUM_EN, 1) |
 180			     FIELD_PREP(HAL_TCL_DATA_CMD_INFO1_TCP6_CKSUM_EN, 1);
 181	}
 182
 183	if (ieee80211_vif_is_mesh(arvif->vif))
 184		ti.enable_mesh = true;
 185
 186	ti.flags1 |= FIELD_PREP(HAL_TCL_DATA_CMD_INFO2_TID_OVERWRITE, 1);
 187
 188	ti.tid = ath11k_dp_tx_get_tid(skb);
 189
 190	switch (ti.encap_type) {
 191	case HAL_TCL_ENCAP_TYPE_NATIVE_WIFI:
 192		ath11k_dp_tx_encap_nwifi(skb);
 193		break;
 194	case HAL_TCL_ENCAP_TYPE_RAW:
 195		if (!test_bit(ATH11K_FLAG_RAW_MODE, &ab->dev_flags)) {
 196			ret = -EINVAL;
 197			goto fail_remove_idr;
 198		}
 199		break;
 200	case HAL_TCL_ENCAP_TYPE_ETHERNET:
 201		/* no need to encap */
 202		break;
 203	case HAL_TCL_ENCAP_TYPE_802_3:
 204	default:
 205		/* TODO: Take care of other encap modes as well */
 206		ret = -EINVAL;
 207		atomic_inc(&ab->soc_stats.tx_err.misc_fail);
 208		goto fail_remove_idr;
 209	}
 210
 211	ti.paddr = dma_map_single(ab->dev, skb->data, skb->len, DMA_TO_DEVICE);
 212	if (unlikely(dma_mapping_error(ab->dev, ti.paddr))) {
 213		atomic_inc(&ab->soc_stats.tx_err.misc_fail);
 214		ath11k_warn(ab, "failed to DMA map data Tx buffer\n");
 215		ret = -ENOMEM;
 216		goto fail_remove_idr;
 217	}
 218
 219	ti.data_len = skb->len;
 220	skb_cb->paddr = ti.paddr;
 221	skb_cb->vif = arvif->vif;
 222	skb_cb->ar = ar;
 223
 224	hal_ring_id = tx_ring->tcl_data_ring.ring_id;
 225	tcl_ring = &ab->hal.srng_list[hal_ring_id];
 226
 227	spin_lock_bh(&tcl_ring->lock);
 228
 229	ath11k_hal_srng_access_begin(ab, tcl_ring);
 230
 231	hal_tcl_desc = (void *)ath11k_hal_srng_src_get_next_entry(ab, tcl_ring);
 232	if (unlikely(!hal_tcl_desc)) {
 233		/* NOTE: It is highly unlikely we'll be running out of tcl_ring
 234		 * desc because the desc is directly enqueued onto hw queue.
 235		 */
 236		ath11k_hal_srng_access_end(ab, tcl_ring);
 237		ab->soc_stats.tx_err.desc_na[ti.ring_id]++;
 238		spin_unlock_bh(&tcl_ring->lock);
 239		ret = -ENOMEM;
 240
 241		/* Checking for available tcl descriptors in another ring in
 242		 * case of failure due to full tcl ring now, is better than
 243		 * checking this ring earlier for each pkt tx.
 244		 * Restart ring selection if some rings are not checked yet.
 245		 */
 246		if (unlikely(ring_map != (BIT(ab->hw_params.max_tx_ring)) - 1) &&
 247		    ab->hw_params.tcl_ring_retry && ab->hw_params.max_tx_ring > 1) {
 248			tcl_ring_retry = true;
 249			ring_selector++;
 250		}
 251
 252		goto fail_unmap_dma;
 253	}
 254
 255	ath11k_hal_tx_cmd_desc_setup(ab, hal_tcl_desc +
 256					 sizeof(struct hal_tlv_hdr), &ti);
 257
 258	ath11k_hal_srng_access_end(ab, tcl_ring);
 259
 260	ath11k_dp_shadow_start_timer(ab, tcl_ring, &dp->tx_ring_timer[ti.ring_id]);
 261
 262	spin_unlock_bh(&tcl_ring->lock);
 263
 264	ath11k_dbg_dump(ab, ATH11K_DBG_DP_TX, NULL, "dp tx msdu: ",
 265			skb->data, skb->len);
 266
 267	atomic_inc(&ar->dp.num_tx_pending);
 268
 269	return 0;
 270
 271fail_unmap_dma:
 272	dma_unmap_single(ab->dev, ti.paddr, ti.data_len, DMA_TO_DEVICE);
 273
 274fail_remove_idr:
 275	spin_lock_bh(&tx_ring->tx_idr_lock);
 276	idr_remove(&tx_ring->txbuf_idr,
 277		   FIELD_GET(DP_TX_DESC_ID_MSDU_ID, ti.desc_id));
 278	spin_unlock_bh(&tx_ring->tx_idr_lock);
 279
 280	if (tcl_ring_retry)
 281		goto tcl_ring_sel;
 282
 283	return ret;
 284}
 285
 286static void ath11k_dp_tx_free_txbuf(struct ath11k_base *ab, u8 mac_id,
 287				    int msdu_id,
 288				    struct dp_tx_ring *tx_ring)
 289{
 290	struct ath11k *ar;
 291	struct sk_buff *msdu;
 292	struct ath11k_skb_cb *skb_cb;
 293
 294	spin_lock(&tx_ring->tx_idr_lock);
 295	msdu = idr_remove(&tx_ring->txbuf_idr, msdu_id);
 296	spin_unlock(&tx_ring->tx_idr_lock);
 297
 298	if (unlikely(!msdu)) {
 299		ath11k_warn(ab, "tx completion for unknown msdu_id %d\n",
 300			    msdu_id);
 
 301		return;
 302	}
 303
 304	skb_cb = ATH11K_SKB_CB(msdu);
 305
 
 
 
 306	dma_unmap_single(ab->dev, skb_cb->paddr, msdu->len, DMA_TO_DEVICE);
 307	dev_kfree_skb_any(msdu);
 308
 309	ar = ab->pdevs[mac_id].ar;
 310	if (atomic_dec_and_test(&ar->dp.num_tx_pending))
 311		wake_up(&ar->dp.tx_empty_waitq);
 312}
 313
 314static void
 315ath11k_dp_tx_htt_tx_complete_buf(struct ath11k_base *ab,
 316				 struct dp_tx_ring *tx_ring,
 317				 struct ath11k_dp_htt_wbm_tx_status *ts)
 318{
 319	struct ieee80211_tx_status status = { 0 };
 320	struct sk_buff *msdu;
 321	struct ieee80211_tx_info *info;
 322	struct ath11k_skb_cb *skb_cb;
 323	struct ath11k *ar;
 324	struct ath11k_peer *peer;
 325
 326	spin_lock(&tx_ring->tx_idr_lock);
 327	msdu = idr_remove(&tx_ring->txbuf_idr, ts->msdu_id);
 328	spin_unlock(&tx_ring->tx_idr_lock);
 329
 330	if (unlikely(!msdu)) {
 
 
 331		ath11k_warn(ab, "htt tx completion for unknown msdu_id %d\n",
 332			    ts->msdu_id);
 
 333		return;
 334	}
 335
 336	skb_cb = ATH11K_SKB_CB(msdu);
 337	info = IEEE80211_SKB_CB(msdu);
 338
 339	ar = skb_cb->ar;
 340
 
 
 
 341	if (atomic_dec_and_test(&ar->dp.num_tx_pending))
 342		wake_up(&ar->dp.tx_empty_waitq);
 343
 344	dma_unmap_single(ab->dev, skb_cb->paddr, msdu->len, DMA_TO_DEVICE);
 345
 346	if (!skb_cb->vif) {
 347		ieee80211_free_txskb(ar->hw, msdu);
 348		return;
 349	}
 350
 351	memset(&info->status, 0, sizeof(info->status));
 352
 353	if (ts->acked) {
 354		if (!(info->flags & IEEE80211_TX_CTL_NO_ACK)) {
 355			info->flags |= IEEE80211_TX_STAT_ACK;
 356			info->status.ack_signal = ts->ack_rssi;
 357
 358			if (!test_bit(WMI_TLV_SERVICE_HW_DB2DBM_CONVERSION_SUPPORT,
 359				      ab->wmi_ab.svc_map))
 360				info->status.ack_signal += ATH11K_DEFAULT_NOISE_FLOOR;
 361
 362			info->status.flags |=
 363				IEEE80211_TX_STATUS_ACK_SIGNAL_VALID;
 364		} else {
 365			info->flags |= IEEE80211_TX_STAT_NOACK_TRANSMITTED;
 366		}
 367	}
 368
 369	spin_lock_bh(&ab->base_lock);
 370	peer = ath11k_peer_find_by_id(ab, ts->peer_id);
 371	if (!peer || !peer->sta) {
 372		ath11k_dbg(ab, ATH11K_DBG_DATA,
 373			   "dp_tx: failed to find the peer with peer_id %d\n",
 374			    ts->peer_id);
 375		spin_unlock_bh(&ab->base_lock);
 376		ieee80211_free_txskb(ar->hw, msdu);
 377		return;
 378	}
 379	spin_unlock_bh(&ab->base_lock);
 380
 381	status.sta = peer->sta;
 382	status.info = info;
 383	status.skb = msdu;
 384
 385	ieee80211_tx_status_ext(ar->hw, &status);
 386}
 387
 388static void
 389ath11k_dp_tx_process_htt_tx_complete(struct ath11k_base *ab,
 390				     void *desc, u8 mac_id,
 391				     u32 msdu_id, struct dp_tx_ring *tx_ring)
 392{
 393	struct htt_tx_wbm_completion *status_desc;
 394	struct ath11k_dp_htt_wbm_tx_status ts = {0};
 395	enum hal_wbm_htt_tx_comp_status wbm_status;
 396
 397	status_desc = desc + HTT_TX_WBM_COMP_STATUS_OFFSET;
 398
 399	wbm_status = FIELD_GET(HTT_TX_WBM_COMP_INFO0_STATUS,
 400			       status_desc->info0);
 401	switch (wbm_status) {
 402	case HAL_WBM_REL_HTT_TX_COMP_STATUS_OK:
 403	case HAL_WBM_REL_HTT_TX_COMP_STATUS_DROP:
 404	case HAL_WBM_REL_HTT_TX_COMP_STATUS_TTL:
 405		ts.acked = (wbm_status == HAL_WBM_REL_HTT_TX_COMP_STATUS_OK);
 406		ts.msdu_id = msdu_id;
 407		ts.ack_rssi = FIELD_GET(HTT_TX_WBM_COMP_INFO1_ACK_RSSI,
 408					status_desc->info1);
 409
 410		if (FIELD_GET(HTT_TX_WBM_COMP_INFO2_VALID, status_desc->info2))
 411			ts.peer_id = FIELD_GET(HTT_TX_WBM_COMP_INFO2_SW_PEER_ID,
 412					       status_desc->info2);
 413		else
 414			ts.peer_id = HTT_INVALID_PEER_ID;
 415
 416		ath11k_dp_tx_htt_tx_complete_buf(ab, tx_ring, &ts);
 417
 418		break;
 419	case HAL_WBM_REL_HTT_TX_COMP_STATUS_REINJ:
 420	case HAL_WBM_REL_HTT_TX_COMP_STATUS_INSPECT:
 421		ath11k_dp_tx_free_txbuf(ab, mac_id, msdu_id, tx_ring);
 422		break;
 423	case HAL_WBM_REL_HTT_TX_COMP_STATUS_MEC_NOTIFY:
 424		/* This event is to be handled only when the driver decides to
 425		 * use WDS offload functionality.
 426		 */
 427		break;
 428	default:
 429		ath11k_warn(ab, "Unknown htt tx status %d\n", wbm_status);
 430		break;
 431	}
 432}
 433
 434static void ath11k_dp_tx_cache_peer_stats(struct ath11k *ar,
 435					  struct sk_buff *msdu,
 436					  struct hal_tx_status *ts)
 437{
 438	struct ath11k_per_peer_tx_stats *peer_stats = &ar->cached_stats;
 439
 440	if (ts->try_cnt > 1) {
 441		peer_stats->retry_pkts += ts->try_cnt - 1;
 442		peer_stats->retry_bytes += (ts->try_cnt - 1) * msdu->len;
 443
 444		if (ts->status != HAL_WBM_TQM_REL_REASON_FRAME_ACKED) {
 445			peer_stats->failed_pkts += 1;
 446			peer_stats->failed_bytes += msdu->len;
 447		}
 448	}
 449}
 450
 451void ath11k_dp_tx_update_txcompl(struct ath11k *ar, struct hal_tx_status *ts)
 452{
 453	struct ath11k_base *ab = ar->ab;
 454	struct ath11k_per_peer_tx_stats *peer_stats = &ar->cached_stats;
 455	enum hal_tx_rate_stats_pkt_type pkt_type;
 456	enum hal_tx_rate_stats_sgi sgi;
 457	enum hal_tx_rate_stats_bw bw;
 458	struct ath11k_peer *peer;
 459	struct ath11k_sta *arsta;
 460	struct ieee80211_sta *sta;
 461	u16 rate, ru_tones;
 462	u8 mcs, rate_idx = 0, ofdma;
 463	int ret;
 464
 465	spin_lock_bh(&ab->base_lock);
 466	peer = ath11k_peer_find_by_id(ab, ts->peer_id);
 467	if (!peer || !peer->sta) {
 468		ath11k_dbg(ab, ATH11K_DBG_DP_TX,
 469			   "failed to find the peer by id %u\n", ts->peer_id);
 470		goto err_out;
 471	}
 472
 473	sta = peer->sta;
 474	arsta = ath11k_sta_to_arsta(sta);
 475
 476	memset(&arsta->txrate, 0, sizeof(arsta->txrate));
 477	pkt_type = FIELD_GET(HAL_TX_RATE_STATS_INFO0_PKT_TYPE,
 478			     ts->rate_stats);
 479	mcs = FIELD_GET(HAL_TX_RATE_STATS_INFO0_MCS,
 480			ts->rate_stats);
 481	sgi = FIELD_GET(HAL_TX_RATE_STATS_INFO0_SGI,
 482			ts->rate_stats);
 483	bw = FIELD_GET(HAL_TX_RATE_STATS_INFO0_BW, ts->rate_stats);
 484	ru_tones = FIELD_GET(HAL_TX_RATE_STATS_INFO0_TONES_IN_RU, ts->rate_stats);
 485	ofdma = FIELD_GET(HAL_TX_RATE_STATS_INFO0_OFDMA_TX, ts->rate_stats);
 486
 487	/* This is to prefer choose the real NSS value arsta->last_txrate.nss,
 488	 * if it is invalid, then choose the NSS value while assoc.
 489	 */
 490	if (arsta->last_txrate.nss)
 491		arsta->txrate.nss = arsta->last_txrate.nss;
 492	else
 493		arsta->txrate.nss = arsta->peer_nss;
 494
 495	if (pkt_type == HAL_TX_RATE_STATS_PKT_TYPE_11A ||
 496	    pkt_type == HAL_TX_RATE_STATS_PKT_TYPE_11B) {
 497		ret = ath11k_mac_hw_ratecode_to_legacy_rate(mcs,
 498							    pkt_type,
 499							    &rate_idx,
 500							    &rate);
 501		if (ret < 0)
 502			goto err_out;
 503		arsta->txrate.legacy = rate;
 504	} else if (pkt_type == HAL_TX_RATE_STATS_PKT_TYPE_11N) {
 505		if (mcs > 7) {
 506			ath11k_warn(ab, "Invalid HT mcs index %d\n", mcs);
 507			goto err_out;
 508		}
 509
 510		if (arsta->txrate.nss != 0)
 511			arsta->txrate.mcs = mcs + 8 * (arsta->txrate.nss - 1);
 512		arsta->txrate.flags = RATE_INFO_FLAGS_MCS;
 513		if (sgi)
 514			arsta->txrate.flags |= RATE_INFO_FLAGS_SHORT_GI;
 515	} else if (pkt_type == HAL_TX_RATE_STATS_PKT_TYPE_11AC) {
 516		if (mcs > 9) {
 517			ath11k_warn(ab, "Invalid VHT mcs index %d\n", mcs);
 518			goto err_out;
 519		}
 520
 521		arsta->txrate.mcs = mcs;
 522		arsta->txrate.flags = RATE_INFO_FLAGS_VHT_MCS;
 523		if (sgi)
 524			arsta->txrate.flags |= RATE_INFO_FLAGS_SHORT_GI;
 525	} else if (pkt_type == HAL_TX_RATE_STATS_PKT_TYPE_11AX) {
 526		if (mcs > 11) {
 527			ath11k_warn(ab, "Invalid HE mcs index %d\n", mcs);
 528			goto err_out;
 529		}
 530
 531		arsta->txrate.mcs = mcs;
 532		arsta->txrate.flags = RATE_INFO_FLAGS_HE_MCS;
 533		arsta->txrate.he_gi = ath11k_mac_he_gi_to_nl80211_he_gi(sgi);
 534	}
 535
 536	arsta->txrate.bw = ath11k_mac_bw_to_mac80211_bw(bw);
 537	if (ofdma && pkt_type == HAL_TX_RATE_STATS_PKT_TYPE_11AX) {
 538		arsta->txrate.bw = RATE_INFO_BW_HE_RU;
 539		arsta->txrate.he_ru_alloc =
 540			ath11k_mac_he_ru_tones_to_nl80211_he_ru_alloc(ru_tones);
 541	}
 542
 543	if (ath11k_debugfs_is_extd_tx_stats_enabled(ar))
 544		ath11k_debugfs_sta_add_tx_stats(arsta, peer_stats, rate_idx);
 545
 546err_out:
 547	spin_unlock_bh(&ab->base_lock);
 548}
 549
 550static void ath11k_dp_tx_complete_msdu(struct ath11k *ar,
 551				       struct sk_buff *msdu,
 552				       struct hal_tx_status *ts)
 553{
 554	struct ieee80211_tx_status status = { 0 };
 555	struct ieee80211_rate_status status_rate = { 0 };
 556	struct ath11k_base *ab = ar->ab;
 557	struct ieee80211_tx_info *info;
 558	struct ath11k_skb_cb *skb_cb;
 559	struct ath11k_peer *peer;
 560	struct ath11k_sta *arsta;
 561	struct rate_info rate;
 562
 563	if (WARN_ON_ONCE(ts->buf_rel_source != HAL_WBM_REL_SRC_MODULE_TQM)) {
 564		/* Must not happen */
 565		return;
 566	}
 567
 568	skb_cb = ATH11K_SKB_CB(msdu);
 569
 570	dma_unmap_single(ab->dev, skb_cb->paddr, msdu->len, DMA_TO_DEVICE);
 571
 572	if (unlikely(!rcu_access_pointer(ab->pdevs_active[ar->pdev_idx]))) {
 573		ieee80211_free_txskb(ar->hw, msdu);
 574		return;
 
 
 575	}
 576
 577	if (unlikely(!skb_cb->vif)) {
 578		ieee80211_free_txskb(ar->hw, msdu);
 579		return;
 580	}
 581
 582	info = IEEE80211_SKB_CB(msdu);
 583	memset(&info->status, 0, sizeof(info->status));
 584
 585	/* skip tx rate update from ieee80211_status*/
 586	info->status.rates[0].idx = -1;
 587
 588	if (ts->status == HAL_WBM_TQM_REL_REASON_FRAME_ACKED &&
 589	    !(info->flags & IEEE80211_TX_CTL_NO_ACK)) {
 590		info->flags |= IEEE80211_TX_STAT_ACK;
 591		info->status.ack_signal = ts->ack_rssi;
 592
 593		if (!test_bit(WMI_TLV_SERVICE_HW_DB2DBM_CONVERSION_SUPPORT,
 594			      ab->wmi_ab.svc_map))
 595			info->status.ack_signal += ATH11K_DEFAULT_NOISE_FLOOR;
 596
 597		info->status.flags |= IEEE80211_TX_STATUS_ACK_SIGNAL_VALID;
 598	}
 599
 600	if (ts->status == HAL_WBM_TQM_REL_REASON_CMD_REMOVE_TX &&
 601	    (info->flags & IEEE80211_TX_CTL_NO_ACK))
 602		info->flags |= IEEE80211_TX_STAT_NOACK_TRANSMITTED;
 603
 604	if (unlikely(ath11k_debugfs_is_extd_tx_stats_enabled(ar)) ||
 605	    ab->hw_params.single_pdev_only) {
 606		if (ts->flags & HAL_TX_STATUS_FLAGS_FIRST_MSDU) {
 607			if (ar->last_ppdu_id == 0) {
 608				ar->last_ppdu_id = ts->ppdu_id;
 609			} else if (ar->last_ppdu_id == ts->ppdu_id ||
 610				   ar->cached_ppdu_id == ar->last_ppdu_id) {
 611				ar->cached_ppdu_id = ar->last_ppdu_id;
 612				ar->cached_stats.is_ampdu = true;
 613				ath11k_dp_tx_update_txcompl(ar, ts);
 614				memset(&ar->cached_stats, 0,
 615				       sizeof(struct ath11k_per_peer_tx_stats));
 616			} else {
 617				ar->cached_stats.is_ampdu = false;
 618				ath11k_dp_tx_update_txcompl(ar, ts);
 619				memset(&ar->cached_stats, 0,
 620				       sizeof(struct ath11k_per_peer_tx_stats));
 621			}
 622			ar->last_ppdu_id = ts->ppdu_id;
 623		}
 624
 625		ath11k_dp_tx_cache_peer_stats(ar, msdu, ts);
 626	}
 627
 628	spin_lock_bh(&ab->base_lock);
 629	peer = ath11k_peer_find_by_id(ab, ts->peer_id);
 630	if (!peer || !peer->sta) {
 631		ath11k_dbg(ab, ATH11K_DBG_DATA,
 632			   "dp_tx: failed to find the peer with peer_id %d\n",
 633			    ts->peer_id);
 634		spin_unlock_bh(&ab->base_lock);
 635		ieee80211_free_txskb(ar->hw, msdu);
 636		return;
 637	}
 638	arsta = ath11k_sta_to_arsta(peer->sta);
 639	status.sta = peer->sta;
 640	status.skb = msdu;
 641	status.info = info;
 642	rate = arsta->last_txrate;
 643
 644	status_rate.rate_idx = rate;
 645	status_rate.try_count = 1;
 646
 647	status.rates = &status_rate;
 648	status.n_rates = 1;
 649
 650	spin_unlock_bh(&ab->base_lock);
 651
 652	ieee80211_tx_status_ext(ar->hw, &status);
 
 653}
 654
 655static inline void ath11k_dp_tx_status_parse(struct ath11k_base *ab,
 656					     struct hal_wbm_release_ring *desc,
 657					     struct hal_tx_status *ts)
 658{
 659	ts->buf_rel_source =
 660		FIELD_GET(HAL_WBM_RELEASE_INFO0_REL_SRC_MODULE, desc->info0);
 661	if (unlikely(ts->buf_rel_source != HAL_WBM_REL_SRC_MODULE_FW &&
 662		     ts->buf_rel_source != HAL_WBM_REL_SRC_MODULE_TQM))
 663		return;
 664
 665	if (unlikely(ts->buf_rel_source == HAL_WBM_REL_SRC_MODULE_FW))
 666		return;
 667
 668	ts->status = FIELD_GET(HAL_WBM_RELEASE_INFO0_TQM_RELEASE_REASON,
 669			       desc->info0);
 670	ts->ppdu_id = FIELD_GET(HAL_WBM_RELEASE_INFO1_TQM_STATUS_NUMBER,
 671				desc->info1);
 672	ts->try_cnt = FIELD_GET(HAL_WBM_RELEASE_INFO1_TRANSMIT_COUNT,
 673				desc->info1);
 674	ts->ack_rssi = FIELD_GET(HAL_WBM_RELEASE_INFO2_ACK_FRAME_RSSI,
 675				 desc->info2);
 676	if (desc->info2 & HAL_WBM_RELEASE_INFO2_FIRST_MSDU)
 677		ts->flags |= HAL_TX_STATUS_FLAGS_FIRST_MSDU;
 678	ts->peer_id = FIELD_GET(HAL_WBM_RELEASE_INFO3_PEER_ID, desc->info3);
 679	ts->tid = FIELD_GET(HAL_WBM_RELEASE_INFO3_TID, desc->info3);
 680	if (desc->rate_stats.info0 & HAL_TX_RATE_STATS_INFO0_VALID)
 681		ts->rate_stats = desc->rate_stats.info0;
 682	else
 683		ts->rate_stats = 0;
 684}
 685
 686void ath11k_dp_tx_completion_handler(struct ath11k_base *ab, int ring_id)
 687{
 688	struct ath11k *ar;
 689	struct ath11k_dp *dp = &ab->dp;
 690	int hal_ring_id = dp->tx_ring[ring_id].tcl_comp_ring.ring_id;
 691	struct hal_srng *status_ring = &ab->hal.srng_list[hal_ring_id];
 692	struct sk_buff *msdu;
 693	struct hal_tx_status ts = { 0 };
 694	struct dp_tx_ring *tx_ring = &dp->tx_ring[ring_id];
 695	u32 *desc;
 696	u32 msdu_id;
 697	u8 mac_id;
 698
 699	spin_lock_bh(&status_ring->lock);
 700
 701	ath11k_hal_srng_access_begin(ab, status_ring);
 702
 703	while ((ATH11K_TX_COMPL_NEXT(tx_ring->tx_status_head) !=
 704		tx_ring->tx_status_tail) &&
 705	       (desc = ath11k_hal_srng_dst_get_next_entry(ab, status_ring))) {
 706		memcpy(&tx_ring->tx_status[tx_ring->tx_status_head],
 707		       desc, sizeof(struct hal_wbm_release_ring));
 708		tx_ring->tx_status_head =
 709			ATH11K_TX_COMPL_NEXT(tx_ring->tx_status_head);
 710	}
 711
 712	if (unlikely((ath11k_hal_srng_dst_peek(ab, status_ring) != NULL) &&
 713		     (ATH11K_TX_COMPL_NEXT(tx_ring->tx_status_head) ==
 714		      tx_ring->tx_status_tail))) {
 715		/* TODO: Process pending tx_status messages when kfifo_is_full() */
 716		ath11k_warn(ab, "Unable to process some of the tx_status ring desc because status_fifo is full\n");
 717	}
 718
 719	ath11k_hal_srng_access_end(ab, status_ring);
 720
 721	spin_unlock_bh(&status_ring->lock);
 722
 723	while (ATH11K_TX_COMPL_NEXT(tx_ring->tx_status_tail) != tx_ring->tx_status_head) {
 724		struct hal_wbm_release_ring *tx_status;
 725		u32 desc_id;
 726
 727		tx_ring->tx_status_tail =
 728			ATH11K_TX_COMPL_NEXT(tx_ring->tx_status_tail);
 729		tx_status = &tx_ring->tx_status[tx_ring->tx_status_tail];
 730		ath11k_dp_tx_status_parse(ab, tx_status, &ts);
 731
 732		desc_id = FIELD_GET(BUFFER_ADDR_INFO1_SW_COOKIE,
 733				    tx_status->buf_addr_info.info1);
 734		mac_id = FIELD_GET(DP_TX_DESC_ID_MAC_ID, desc_id);
 735		msdu_id = FIELD_GET(DP_TX_DESC_ID_MSDU_ID, desc_id);
 736
 737		if (unlikely(ts.buf_rel_source == HAL_WBM_REL_SRC_MODULE_FW)) {
 738			ath11k_dp_tx_process_htt_tx_complete(ab,
 739							     (void *)tx_status,
 740							     mac_id, msdu_id,
 741							     tx_ring);
 742			continue;
 743		}
 744
 745		spin_lock(&tx_ring->tx_idr_lock);
 746		msdu = idr_remove(&tx_ring->txbuf_idr, msdu_id);
 747		if (unlikely(!msdu)) {
 748			ath11k_warn(ab, "tx completion for unknown msdu_id %d\n",
 749				    msdu_id);
 750			spin_unlock(&tx_ring->tx_idr_lock);
 751			continue;
 752		}
 753
 754		spin_unlock(&tx_ring->tx_idr_lock);
 755
 756		ar = ab->pdevs[mac_id].ar;
 757
 758		if (atomic_dec_and_test(&ar->dp.num_tx_pending))
 759			wake_up(&ar->dp.tx_empty_waitq);
 760
 761		ath11k_dp_tx_complete_msdu(ar, msdu, &ts);
 762	}
 763}
 764
 765int ath11k_dp_tx_send_reo_cmd(struct ath11k_base *ab, struct dp_rx_tid *rx_tid,
 766			      enum hal_reo_cmd_type type,
 767			      struct ath11k_hal_reo_cmd *cmd,
 768			      void (*cb)(struct ath11k_dp *, void *,
 769					 enum hal_reo_cmd_status))
 770{
 771	struct ath11k_dp *dp = &ab->dp;
 772	struct dp_reo_cmd *dp_cmd;
 773	struct hal_srng *cmd_ring;
 774	int cmd_num;
 775
 776	if (test_bit(ATH11K_FLAG_CRASH_FLUSH, &ab->dev_flags))
 777		return -ESHUTDOWN;
 778
 779	cmd_ring = &ab->hal.srng_list[dp->reo_cmd_ring.ring_id];
 780	cmd_num = ath11k_hal_reo_cmd_send(ab, cmd_ring, type, cmd);
 781
 782	/* cmd_num should start from 1, during failure return the error code */
 783	if (cmd_num < 0)
 784		return cmd_num;
 785
 786	/* reo cmd ring descriptors has cmd_num starting from 1 */
 787	if (cmd_num == 0)
 788		return -EINVAL;
 789
 790	if (!cb)
 791		return 0;
 792
 793	/* Can this be optimized so that we keep the pending command list only
 794	 * for tid delete command to free up the resource on the command status
 795	 * indication?
 796	 */
 797	dp_cmd = kzalloc(sizeof(*dp_cmd), GFP_ATOMIC);
 798
 799	if (!dp_cmd)
 800		return -ENOMEM;
 801
 802	memcpy(&dp_cmd->data, rx_tid, sizeof(struct dp_rx_tid));
 803	dp_cmd->cmd_num = cmd_num;
 804	dp_cmd->handler = cb;
 805
 806	spin_lock_bh(&dp->reo_cmd_lock);
 807	list_add_tail(&dp_cmd->list, &dp->reo_cmd_list);
 808	spin_unlock_bh(&dp->reo_cmd_lock);
 809
 810	return 0;
 811}
 812
 813static int
 814ath11k_dp_tx_get_ring_id_type(struct ath11k_base *ab,
 815			      int mac_id, u32 ring_id,
 816			      enum hal_ring_type ring_type,
 817			      enum htt_srng_ring_type *htt_ring_type,
 818			      enum htt_srng_ring_id *htt_ring_id)
 819{
 820	int lmac_ring_id_offset = 0;
 821	int ret = 0;
 822
 823	switch (ring_type) {
 824	case HAL_RXDMA_BUF:
 825		lmac_ring_id_offset = mac_id * HAL_SRNG_RINGS_PER_LMAC;
 826
 827		/* for QCA6390, host fills rx buffer to fw and fw fills to
 828		 * rxbuf ring for each rxdma
 829		 */
 830		if (!ab->hw_params.rx_mac_buf_ring) {
 831			if (!(ring_id == (HAL_SRNG_RING_ID_WMAC1_SW2RXDMA0_BUF +
 832					  lmac_ring_id_offset) ||
 833				ring_id == (HAL_SRNG_RING_ID_WMAC1_SW2RXDMA1_BUF +
 834					lmac_ring_id_offset))) {
 835				ret = -EINVAL;
 836			}
 837			*htt_ring_id = HTT_RXDMA_HOST_BUF_RING;
 838			*htt_ring_type = HTT_SW_TO_HW_RING;
 839		} else {
 840			if (ring_id == HAL_SRNG_RING_ID_WMAC1_SW2RXDMA0_BUF) {
 841				*htt_ring_id = HTT_HOST1_TO_FW_RXBUF_RING;
 842				*htt_ring_type = HTT_SW_TO_SW_RING;
 843			} else {
 844				*htt_ring_id = HTT_RXDMA_HOST_BUF_RING;
 845				*htt_ring_type = HTT_SW_TO_HW_RING;
 846			}
 847		}
 848		break;
 849	case HAL_RXDMA_DST:
 850		*htt_ring_id = HTT_RXDMA_NON_MONITOR_DEST_RING;
 851		*htt_ring_type = HTT_HW_TO_SW_RING;
 852		break;
 853	case HAL_RXDMA_MONITOR_BUF:
 854		*htt_ring_id = HTT_RXDMA_MONITOR_BUF_RING;
 855		*htt_ring_type = HTT_SW_TO_HW_RING;
 856		break;
 857	case HAL_RXDMA_MONITOR_STATUS:
 858		*htt_ring_id = HTT_RXDMA_MONITOR_STATUS_RING;
 859		*htt_ring_type = HTT_SW_TO_HW_RING;
 860		break;
 861	case HAL_RXDMA_MONITOR_DST:
 862		*htt_ring_id = HTT_RXDMA_MONITOR_DEST_RING;
 863		*htt_ring_type = HTT_HW_TO_SW_RING;
 864		break;
 865	case HAL_RXDMA_MONITOR_DESC:
 866		*htt_ring_id = HTT_RXDMA_MONITOR_DESC_RING;
 867		*htt_ring_type = HTT_SW_TO_HW_RING;
 868		break;
 869	default:
 870		ath11k_warn(ab, "Unsupported ring type in DP :%d\n", ring_type);
 871		ret = -EINVAL;
 872	}
 873	return ret;
 874}
 875
 876int ath11k_dp_tx_htt_srng_setup(struct ath11k_base *ab, u32 ring_id,
 877				int mac_id, enum hal_ring_type ring_type)
 878{
 879	struct htt_srng_setup_cmd *cmd;
 880	struct hal_srng *srng = &ab->hal.srng_list[ring_id];
 881	struct hal_srng_params params;
 882	struct sk_buff *skb;
 883	u32 ring_entry_sz;
 884	int len = sizeof(*cmd);
 885	dma_addr_t hp_addr, tp_addr;
 886	enum htt_srng_ring_type htt_ring_type;
 887	enum htt_srng_ring_id htt_ring_id;
 888	int ret;
 889
 890	skb = ath11k_htc_alloc_skb(ab, len);
 891	if (!skb)
 892		return -ENOMEM;
 893
 894	memset(&params, 0, sizeof(params));
 895	ath11k_hal_srng_get_params(ab, srng, &params);
 896
 897	hp_addr = ath11k_hal_srng_get_hp_addr(ab, srng);
 898	tp_addr = ath11k_hal_srng_get_tp_addr(ab, srng);
 899
 900	ret = ath11k_dp_tx_get_ring_id_type(ab, mac_id, ring_id,
 901					    ring_type, &htt_ring_type,
 902					    &htt_ring_id);
 903	if (ret)
 904		goto err_free;
 905
 906	skb_put(skb, len);
 907	cmd = (struct htt_srng_setup_cmd *)skb->data;
 908	cmd->info0 = FIELD_PREP(HTT_SRNG_SETUP_CMD_INFO0_MSG_TYPE,
 909				HTT_H2T_MSG_TYPE_SRING_SETUP);
 910	if (htt_ring_type == HTT_SW_TO_HW_RING ||
 911	    htt_ring_type == HTT_HW_TO_SW_RING)
 912		cmd->info0 |= FIELD_PREP(HTT_SRNG_SETUP_CMD_INFO0_PDEV_ID,
 913					 DP_SW2HW_MACID(mac_id));
 914	else
 915		cmd->info0 |= FIELD_PREP(HTT_SRNG_SETUP_CMD_INFO0_PDEV_ID,
 916					 mac_id);
 917	cmd->info0 |= FIELD_PREP(HTT_SRNG_SETUP_CMD_INFO0_RING_TYPE,
 918				 htt_ring_type);
 919	cmd->info0 |= FIELD_PREP(HTT_SRNG_SETUP_CMD_INFO0_RING_ID, htt_ring_id);
 920
 921	cmd->ring_base_addr_lo = params.ring_base_paddr &
 922				 HAL_ADDR_LSB_REG_MASK;
 923
 924	cmd->ring_base_addr_hi = (u64)params.ring_base_paddr >>
 925				 HAL_ADDR_MSB_REG_SHIFT;
 926
 927	ret = ath11k_hal_srng_get_entrysize(ab, ring_type);
 928	if (ret < 0)
 929		goto err_free;
 930
 931	ring_entry_sz = ret;
 932
 933	ring_entry_sz >>= 2;
 934	cmd->info1 = FIELD_PREP(HTT_SRNG_SETUP_CMD_INFO1_RING_ENTRY_SIZE,
 935				ring_entry_sz);
 936	cmd->info1 |= FIELD_PREP(HTT_SRNG_SETUP_CMD_INFO1_RING_SIZE,
 937				 params.num_entries * ring_entry_sz);
 938	cmd->info1 |= FIELD_PREP(HTT_SRNG_SETUP_CMD_INFO1_RING_FLAGS_MSI_SWAP,
 939				 !!(params.flags & HAL_SRNG_FLAGS_MSI_SWAP));
 940	cmd->info1 |= FIELD_PREP(
 941			HTT_SRNG_SETUP_CMD_INFO1_RING_FLAGS_TLV_SWAP,
 942			!!(params.flags & HAL_SRNG_FLAGS_DATA_TLV_SWAP));
 943	cmd->info1 |= FIELD_PREP(
 944			HTT_SRNG_SETUP_CMD_INFO1_RING_FLAGS_HOST_FW_SWAP,
 945			!!(params.flags & HAL_SRNG_FLAGS_RING_PTR_SWAP));
 946	if (htt_ring_type == HTT_SW_TO_HW_RING)
 947		cmd->info1 |= HTT_SRNG_SETUP_CMD_INFO1_RING_LOOP_CNT_DIS;
 948
 949	cmd->ring_head_off32_remote_addr_lo = hp_addr & HAL_ADDR_LSB_REG_MASK;
 950	cmd->ring_head_off32_remote_addr_hi = (u64)hp_addr >>
 951					      HAL_ADDR_MSB_REG_SHIFT;
 952
 953	cmd->ring_tail_off32_remote_addr_lo = tp_addr & HAL_ADDR_LSB_REG_MASK;
 954	cmd->ring_tail_off32_remote_addr_hi = (u64)tp_addr >>
 955					      HAL_ADDR_MSB_REG_SHIFT;
 956
 957	cmd->ring_msi_addr_lo = lower_32_bits(params.msi_addr);
 958	cmd->ring_msi_addr_hi = upper_32_bits(params.msi_addr);
 959	cmd->msi_data = params.msi_data;
 960
 961	cmd->intr_info = FIELD_PREP(
 962			HTT_SRNG_SETUP_CMD_INTR_INFO_BATCH_COUNTER_THRESH,
 963			params.intr_batch_cntr_thres_entries * ring_entry_sz);
 964	cmd->intr_info |= FIELD_PREP(
 965			HTT_SRNG_SETUP_CMD_INTR_INFO_INTR_TIMER_THRESH,
 966			params.intr_timer_thres_us >> 3);
 967
 968	cmd->info2 = 0;
 969	if (params.flags & HAL_SRNG_FLAGS_LOW_THRESH_INTR_EN) {
 970		cmd->info2 = FIELD_PREP(
 971				HTT_SRNG_SETUP_CMD_INFO2_INTR_LOW_THRESH,
 972				params.low_threshold);
 973	}
 974
 975	ath11k_dbg(ab, ATH11K_DBG_DP_TX,
 976		   "htt srng setup msi_addr_lo 0x%x msi_addr_hi 0x%x msi_data 0x%x ring_id %d ring_type %d intr_info 0x%x flags 0x%x\n",
 977		   cmd->ring_msi_addr_lo, cmd->ring_msi_addr_hi,
 978		   cmd->msi_data, ring_id, ring_type, cmd->intr_info, cmd->info2);
 
 
 
 
 979
 980	ret = ath11k_htc_send(&ab->htc, ab->dp.eid, skb);
 981	if (ret)
 982		goto err_free;
 983
 984	return 0;
 985
 986err_free:
 987	dev_kfree_skb_any(skb);
 988
 989	return ret;
 990}
 991
 992#define HTT_TARGET_VERSION_TIMEOUT_HZ (3 * HZ)
 993
 994int ath11k_dp_tx_htt_h2t_ver_req_msg(struct ath11k_base *ab)
 995{
 996	struct ath11k_dp *dp = &ab->dp;
 997	struct sk_buff *skb;
 998	struct htt_ver_req_cmd *cmd;
 999	int len = sizeof(*cmd);
1000	int ret;
1001
1002	init_completion(&dp->htt_tgt_version_received);
1003
1004	skb = ath11k_htc_alloc_skb(ab, len);
1005	if (!skb)
1006		return -ENOMEM;
1007
1008	skb_put(skb, len);
1009	cmd = (struct htt_ver_req_cmd *)skb->data;
1010	cmd->ver_reg_info = FIELD_PREP(HTT_VER_REQ_INFO_MSG_ID,
1011				       HTT_H2T_MSG_TYPE_VERSION_REQ);
1012
1013	ret = ath11k_htc_send(&ab->htc, dp->eid, skb);
1014	if (ret) {
1015		dev_kfree_skb_any(skb);
1016		return ret;
1017	}
1018
1019	ret = wait_for_completion_timeout(&dp->htt_tgt_version_received,
1020					  HTT_TARGET_VERSION_TIMEOUT_HZ);
1021	if (ret == 0) {
1022		ath11k_warn(ab, "htt target version request timed out\n");
1023		return -ETIMEDOUT;
1024	}
1025
1026	if (dp->htt_tgt_ver_major != HTT_TARGET_VERSION_MAJOR) {
1027		ath11k_err(ab, "unsupported htt major version %d supported version is %d\n",
1028			   dp->htt_tgt_ver_major, HTT_TARGET_VERSION_MAJOR);
1029		return -EOPNOTSUPP;
1030	}
1031
1032	return 0;
1033}
1034
1035int ath11k_dp_tx_htt_h2t_ppdu_stats_req(struct ath11k *ar, u32 mask)
1036{
1037	struct ath11k_base *ab = ar->ab;
1038	struct ath11k_dp *dp = &ab->dp;
1039	struct sk_buff *skb;
1040	struct htt_ppdu_stats_cfg_cmd *cmd;
1041	int len = sizeof(*cmd);
1042	u8 pdev_mask;
1043	int ret;
1044	int i;
1045
1046	for (i = 0; i < ab->hw_params.num_rxdma_per_pdev; i++) {
1047		skb = ath11k_htc_alloc_skb(ab, len);
1048		if (!skb)
1049			return -ENOMEM;
1050
1051		skb_put(skb, len);
1052		cmd = (struct htt_ppdu_stats_cfg_cmd *)skb->data;
1053		cmd->msg = FIELD_PREP(HTT_PPDU_STATS_CFG_MSG_TYPE,
1054				      HTT_H2T_MSG_TYPE_PPDU_STATS_CFG);
1055
1056		pdev_mask = 1 << (ar->pdev_idx + i);
1057		cmd->msg |= FIELD_PREP(HTT_PPDU_STATS_CFG_PDEV_ID, pdev_mask);
1058		cmd->msg |= FIELD_PREP(HTT_PPDU_STATS_CFG_TLV_TYPE_BITMASK, mask);
1059
1060		ret = ath11k_htc_send(&ab->htc, dp->eid, skb);
1061		if (ret) {
1062			dev_kfree_skb_any(skb);
1063			return ret;
1064		}
1065	}
1066
1067	return 0;
1068}
1069
1070int ath11k_dp_tx_htt_rx_filter_setup(struct ath11k_base *ab, u32 ring_id,
1071				     int mac_id, enum hal_ring_type ring_type,
1072				     int rx_buf_size,
1073				     struct htt_rx_ring_tlv_filter *tlv_filter)
1074{
1075	struct htt_rx_ring_selection_cfg_cmd *cmd;
1076	struct hal_srng *srng = &ab->hal.srng_list[ring_id];
1077	struct hal_srng_params params;
1078	struct sk_buff *skb;
1079	int len = sizeof(*cmd);
1080	enum htt_srng_ring_type htt_ring_type;
1081	enum htt_srng_ring_id htt_ring_id;
1082	int ret;
1083
1084	skb = ath11k_htc_alloc_skb(ab, len);
1085	if (!skb)
1086		return -ENOMEM;
1087
1088	memset(&params, 0, sizeof(params));
1089	ath11k_hal_srng_get_params(ab, srng, &params);
1090
1091	ret = ath11k_dp_tx_get_ring_id_type(ab, mac_id, ring_id,
1092					    ring_type, &htt_ring_type,
1093					    &htt_ring_id);
1094	if (ret)
1095		goto err_free;
1096
1097	skb_put(skb, len);
1098	cmd = (struct htt_rx_ring_selection_cfg_cmd *)skb->data;
1099	cmd->info0 = FIELD_PREP(HTT_RX_RING_SELECTION_CFG_CMD_INFO0_MSG_TYPE,
1100				HTT_H2T_MSG_TYPE_RX_RING_SELECTION_CFG);
1101	if (htt_ring_type == HTT_SW_TO_HW_RING ||
1102	    htt_ring_type == HTT_HW_TO_SW_RING)
1103		cmd->info0 |=
1104			FIELD_PREP(HTT_RX_RING_SELECTION_CFG_CMD_INFO0_PDEV_ID,
1105				   DP_SW2HW_MACID(mac_id));
1106	else
1107		cmd->info0 |=
1108			FIELD_PREP(HTT_RX_RING_SELECTION_CFG_CMD_INFO0_PDEV_ID,
1109				   mac_id);
1110	cmd->info0 |= FIELD_PREP(HTT_RX_RING_SELECTION_CFG_CMD_INFO0_RING_ID,
1111				 htt_ring_id);
1112	cmd->info0 |= FIELD_PREP(HTT_RX_RING_SELECTION_CFG_CMD_INFO0_SS,
1113				 !!(params.flags & HAL_SRNG_FLAGS_MSI_SWAP));
1114	cmd->info0 |= FIELD_PREP(HTT_RX_RING_SELECTION_CFG_CMD_INFO0_PS,
1115				 !!(params.flags & HAL_SRNG_FLAGS_DATA_TLV_SWAP));
1116
1117	cmd->info1 = FIELD_PREP(HTT_RX_RING_SELECTION_CFG_CMD_INFO1_BUF_SIZE,
1118				rx_buf_size);
1119	cmd->pkt_type_en_flags0 = tlv_filter->pkt_filter_flags0;
1120	cmd->pkt_type_en_flags1 = tlv_filter->pkt_filter_flags1;
1121	cmd->pkt_type_en_flags2 = tlv_filter->pkt_filter_flags2;
1122	cmd->pkt_type_en_flags3 = tlv_filter->pkt_filter_flags3;
1123	cmd->rx_filter_tlv = tlv_filter->rx_filter;
1124
1125	ret = ath11k_htc_send(&ab->htc, ab->dp.eid, skb);
1126	if (ret)
1127		goto err_free;
1128
1129	return 0;
1130
1131err_free:
1132	dev_kfree_skb_any(skb);
1133
1134	return ret;
1135}
1136
1137int
1138ath11k_dp_tx_htt_h2t_ext_stats_req(struct ath11k *ar, u8 type,
1139				   struct htt_ext_stats_cfg_params *cfg_params,
1140				   u64 cookie)
1141{
1142	struct ath11k_base *ab = ar->ab;
1143	struct ath11k_dp *dp = &ab->dp;
1144	struct sk_buff *skb;
1145	struct htt_ext_stats_cfg_cmd *cmd;
1146	u32 pdev_id;
1147	int len = sizeof(*cmd);
1148	int ret;
1149
1150	skb = ath11k_htc_alloc_skb(ab, len);
1151	if (!skb)
1152		return -ENOMEM;
1153
1154	skb_put(skb, len);
1155
1156	cmd = (struct htt_ext_stats_cfg_cmd *)skb->data;
1157	memset(cmd, 0, sizeof(*cmd));
1158	cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_EXT_STATS_CFG;
1159
1160	if (ab->hw_params.single_pdev_only)
1161		pdev_id = ath11k_mac_get_target_pdev_id(ar);
1162	else
1163		pdev_id = ar->pdev->pdev_id;
1164
1165	cmd->hdr.pdev_mask = 1 << pdev_id;
1166
1167	cmd->hdr.stats_type = type;
1168	cmd->cfg_param0 = cfg_params->cfg0;
1169	cmd->cfg_param1 = cfg_params->cfg1;
1170	cmd->cfg_param2 = cfg_params->cfg2;
1171	cmd->cfg_param3 = cfg_params->cfg3;
1172	cmd->cookie_lsb = lower_32_bits(cookie);
1173	cmd->cookie_msb = upper_32_bits(cookie);
1174
1175	ret = ath11k_htc_send(&ab->htc, dp->eid, skb);
1176	if (ret) {
1177		ath11k_warn(ab, "failed to send htt type stats request: %d",
1178			    ret);
1179		dev_kfree_skb_any(skb);
1180		return ret;
1181	}
1182
1183	return 0;
1184}
1185
1186int ath11k_dp_tx_htt_monitor_mode_ring_config(struct ath11k *ar, bool reset)
1187{
1188	struct ath11k_pdev_dp *dp = &ar->dp;
1189	struct ath11k_base *ab = ar->ab;
1190	struct htt_rx_ring_tlv_filter tlv_filter = {0};
1191	int ret = 0, ring_id = 0, i;
1192
1193	if (ab->hw_params.full_monitor_mode) {
1194		ret = ath11k_dp_tx_htt_rx_full_mon_setup(ab,
1195							 dp->mac_id, !reset);
1196		if (ret < 0) {
1197			ath11k_err(ab, "failed to setup full monitor %d\n", ret);
1198			return ret;
1199		}
1200	}
1201
1202	ring_id = dp->rxdma_mon_buf_ring.refill_buf_ring.ring_id;
1203
1204	if (!reset) {
1205		tlv_filter.rx_filter = HTT_RX_MON_FILTER_TLV_FLAGS_MON_BUF_RING;
1206		tlv_filter.pkt_filter_flags0 =
1207					HTT_RX_MON_FP_MGMT_FILTER_FLAGS0 |
1208					HTT_RX_MON_MO_MGMT_FILTER_FLAGS0;
1209		tlv_filter.pkt_filter_flags1 =
1210					HTT_RX_MON_FP_MGMT_FILTER_FLAGS1 |
1211					HTT_RX_MON_MO_MGMT_FILTER_FLAGS1;
1212		tlv_filter.pkt_filter_flags2 =
1213					HTT_RX_MON_FP_CTRL_FILTER_FLASG2 |
1214					HTT_RX_MON_MO_CTRL_FILTER_FLASG2;
1215		tlv_filter.pkt_filter_flags3 =
1216					HTT_RX_MON_FP_CTRL_FILTER_FLASG3 |
1217					HTT_RX_MON_MO_CTRL_FILTER_FLASG3 |
1218					HTT_RX_MON_FP_DATA_FILTER_FLASG3 |
1219					HTT_RX_MON_MO_DATA_FILTER_FLASG3;
1220	}
1221
1222	if (ab->hw_params.rxdma1_enable) {
1223		ret = ath11k_dp_tx_htt_rx_filter_setup(ar->ab, ring_id, dp->mac_id,
1224						       HAL_RXDMA_MONITOR_BUF,
1225						       DP_RXDMA_REFILL_RING_SIZE,
1226						       &tlv_filter);
1227	} else if (!reset) {
1228		/* set in monitor mode only */
1229		for (i = 0; i < ab->hw_params.num_rxdma_per_pdev; i++) {
1230			ring_id = dp->rx_mac_buf_ring[i].ring_id;
1231			ret = ath11k_dp_tx_htt_rx_filter_setup(ar->ab, ring_id,
1232							       dp->mac_id + i,
1233							       HAL_RXDMA_BUF,
1234							       1024,
1235							       &tlv_filter);
1236		}
1237	}
1238
1239	if (ret)
1240		return ret;
1241
1242	for (i = 0; i < ab->hw_params.num_rxdma_per_pdev; i++) {
1243		ring_id = dp->rx_mon_status_refill_ring[i].refill_buf_ring.ring_id;
1244		if (!reset) {
1245			tlv_filter.rx_filter =
1246					HTT_RX_MON_FILTER_TLV_FLAGS_MON_STATUS_RING;
1247		} else {
1248			tlv_filter = ath11k_mac_mon_status_filter_default;
1249
1250			if (ath11k_debugfs_is_extd_rx_stats_enabled(ar))
1251				tlv_filter.rx_filter = ath11k_debugfs_rx_filter(ar);
1252		}
1253
1254		ret = ath11k_dp_tx_htt_rx_filter_setup(ab, ring_id,
1255						       dp->mac_id + i,
1256						       HAL_RXDMA_MONITOR_STATUS,
1257						       DP_RXDMA_REFILL_RING_SIZE,
1258						       &tlv_filter);
1259	}
1260
1261	if (!ar->ab->hw_params.rxdma1_enable)
1262		mod_timer(&ar->ab->mon_reap_timer, jiffies +
1263			  msecs_to_jiffies(ATH11K_MON_TIMER_INTERVAL));
1264
1265	return ret;
1266}
1267
1268int ath11k_dp_tx_htt_rx_full_mon_setup(struct ath11k_base *ab, int mac_id,
1269				       bool config)
1270{
1271	struct htt_rx_full_monitor_mode_cfg_cmd *cmd;
1272	struct sk_buff *skb;
1273	int ret, len = sizeof(*cmd);
1274
1275	skb = ath11k_htc_alloc_skb(ab, len);
1276	if (!skb)
1277		return -ENOMEM;
1278
1279	skb_put(skb, len);
1280	cmd = (struct htt_rx_full_monitor_mode_cfg_cmd *)skb->data;
1281	memset(cmd, 0, sizeof(*cmd));
1282	cmd->info0 = FIELD_PREP(HTT_RX_FULL_MON_MODE_CFG_CMD_INFO0_MSG_TYPE,
1283				HTT_H2T_MSG_TYPE_RX_FULL_MONITOR_MODE);
1284
1285	cmd->info0 |= FIELD_PREP(HTT_RX_FULL_MON_MODE_CFG_CMD_INFO0_PDEV_ID, mac_id);
1286
1287	cmd->cfg = HTT_RX_FULL_MON_MODE_CFG_CMD_CFG_ENABLE |
1288		   FIELD_PREP(HTT_RX_FULL_MON_MODE_CFG_CMD_CFG_RELEASE_RING,
1289			      HTT_RX_MON_RING_SW);
1290	if (config) {
1291		cmd->cfg |= HTT_RX_FULL_MON_MODE_CFG_CMD_CFG_ZERO_MPDUS_END |
1292			    HTT_RX_FULL_MON_MODE_CFG_CMD_CFG_NON_ZERO_MPDUS_END;
1293	}
1294
1295	ret = ath11k_htc_send(&ab->htc, ab->dp.eid, skb);
1296	if (ret)
1297		goto err_free;
1298
1299	return 0;
1300
1301err_free:
1302	dev_kfree_skb_any(skb);
1303
1304	return ret;
1305}
v5.14.15
   1// SPDX-License-Identifier: BSD-3-Clause-Clear
   2/*
   3 * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
 
   4 */
   5
   6#include "core.h"
   7#include "dp_tx.h"
   8#include "debug.h"
   9#include "debugfs_sta.h"
  10#include "hw.h"
  11#include "peer.h"
 
  12
  13static enum hal_tcl_encap_type
  14ath11k_dp_tx_get_encap_type(struct ath11k_vif *arvif, struct sk_buff *skb)
  15{
  16	struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
  17	struct ath11k_base *ab = arvif->ar->ab;
  18
  19	if (test_bit(ATH11K_FLAG_RAW_MODE, &ab->dev_flags))
  20		return HAL_TCL_ENCAP_TYPE_RAW;
  21
  22	if (tx_info->flags & IEEE80211_TX_CTL_HW_80211_ENCAP)
  23		return HAL_TCL_ENCAP_TYPE_ETHERNET;
  24
  25	return HAL_TCL_ENCAP_TYPE_NATIVE_WIFI;
  26}
  27
  28static void ath11k_dp_tx_encap_nwifi(struct sk_buff *skb)
  29{
  30	struct ieee80211_hdr *hdr = (void *)skb->data;
  31	u8 *qos_ctl;
  32
  33	if (!ieee80211_is_data_qos(hdr->frame_control))
  34		return;
  35
  36	qos_ctl = ieee80211_get_qos_ctl(hdr);
  37	memmove(skb->data + IEEE80211_QOS_CTL_LEN,
  38		skb->data, (void *)qos_ctl - (void *)skb->data);
  39	skb_pull(skb, IEEE80211_QOS_CTL_LEN);
  40
  41	hdr = (void *)skb->data;
  42	hdr->frame_control &= ~__cpu_to_le16(IEEE80211_STYPE_QOS_DATA);
  43}
  44
  45static u8 ath11k_dp_tx_get_tid(struct sk_buff *skb)
  46{
  47	struct ieee80211_hdr *hdr = (void *)skb->data;
  48	struct ath11k_skb_cb *cb = ATH11K_SKB_CB(skb);
  49
  50	if (cb->flags & ATH11K_SKB_HW_80211_ENCAP)
  51		return skb->priority & IEEE80211_QOS_CTL_TID_MASK;
  52	else if (!ieee80211_is_data_qos(hdr->frame_control))
  53		return HAL_DESC_REO_NON_QOS_TID;
  54	else
  55		return skb->priority & IEEE80211_QOS_CTL_TID_MASK;
  56}
  57
  58enum hal_encrypt_type ath11k_dp_tx_get_encrypt_type(u32 cipher)
  59{
  60	switch (cipher) {
  61	case WLAN_CIPHER_SUITE_WEP40:
  62		return HAL_ENCRYPT_TYPE_WEP_40;
  63	case WLAN_CIPHER_SUITE_WEP104:
  64		return HAL_ENCRYPT_TYPE_WEP_104;
  65	case WLAN_CIPHER_SUITE_TKIP:
  66		return HAL_ENCRYPT_TYPE_TKIP_MIC;
  67	case WLAN_CIPHER_SUITE_CCMP:
  68		return HAL_ENCRYPT_TYPE_CCMP_128;
  69	case WLAN_CIPHER_SUITE_CCMP_256:
  70		return HAL_ENCRYPT_TYPE_CCMP_256;
  71	case WLAN_CIPHER_SUITE_GCMP:
  72		return HAL_ENCRYPT_TYPE_GCMP_128;
  73	case WLAN_CIPHER_SUITE_GCMP_256:
  74		return HAL_ENCRYPT_TYPE_AES_GCMP_256;
  75	default:
  76		return HAL_ENCRYPT_TYPE_OPEN;
  77	}
  78}
  79
  80int ath11k_dp_tx(struct ath11k *ar, struct ath11k_vif *arvif,
  81		 struct sk_buff *skb)
  82{
  83	struct ath11k_base *ab = ar->ab;
  84	struct ath11k_dp *dp = &ab->dp;
  85	struct hal_tx_info ti = {0};
  86	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
  87	struct ath11k_skb_cb *skb_cb = ATH11K_SKB_CB(skb);
  88	struct hal_srng *tcl_ring;
  89	struct ieee80211_hdr *hdr = (void *)skb->data;
  90	struct dp_tx_ring *tx_ring;
  91	void *hal_tcl_desc;
  92	u8 pool_id;
  93	u8 hal_ring_id;
  94	int ret;
  95	u8 ring_selector = 0, ring_map = 0;
 
  96	bool tcl_ring_retry;
  97
  98	if (test_bit(ATH11K_FLAG_CRASH_FLUSH, &ar->ab->dev_flags))
  99		return -ESHUTDOWN;
 100
 101	if (!(info->flags & IEEE80211_TX_CTL_HW_80211_ENCAP) &&
 102	    !ieee80211_is_data(hdr->frame_control))
 103		return -ENOTSUPP;
 104
 105	pool_id = skb_get_queue_mapping(skb) & (ATH11K_HW_MAX_QUEUES - 1);
 106
 107	/* Let the default ring selection be based on current processor
 108	 * number, where one of the 3 tcl rings are selected based on
 109	 * the smp_processor_id(). In case that ring
 110	 * is full/busy, we resort to other available rings.
 111	 * If all rings are full, we drop the packet.
 112	 * //TODO Add throttling logic when all rings are full
 113	 */
 114	ring_selector = smp_processor_id();
 115
 116tcl_ring_sel:
 117	tcl_ring_retry = false;
 118	/* For some chip, it can only use tcl0 to tx */
 119	if (ar->ab->hw_params.tcl_0_only)
 120		ti.ring_id = 0;
 121	else
 122		ti.ring_id = ring_selector % DP_TCL_NUM_RING_MAX;
 123
 124	ring_map |= BIT(ti.ring_id);
 125
 126	tx_ring = &dp->tx_ring[ti.ring_id];
 127
 128	spin_lock_bh(&tx_ring->tx_idr_lock);
 129	ret = idr_alloc(&tx_ring->txbuf_idr, skb, 0,
 130			DP_TX_IDR_SIZE - 1, GFP_ATOMIC);
 131	spin_unlock_bh(&tx_ring->tx_idr_lock);
 132
 133	if (ret < 0) {
 134		if (ring_map == (BIT(DP_TCL_NUM_RING_MAX) - 1)) {
 
 135			atomic_inc(&ab->soc_stats.tx_err.misc_fail);
 136			return -ENOSPC;
 137		}
 138
 139		/* Check if the next ring is available */
 140		ring_selector++;
 141		goto tcl_ring_sel;
 142	}
 143
 144	ti.desc_id = FIELD_PREP(DP_TX_DESC_ID_MAC_ID, ar->pdev_idx) |
 145		     FIELD_PREP(DP_TX_DESC_ID_MSDU_ID, ret) |
 146		     FIELD_PREP(DP_TX_DESC_ID_POOL_ID, pool_id);
 147	ti.encap_type = ath11k_dp_tx_get_encap_type(arvif, skb);
 148	ti.meta_data_flags = arvif->tcl_metadata;
 149
 150	if (ti.encap_type == HAL_TCL_ENCAP_TYPE_RAW) {
 
 
 
 
 
 
 
 
 
 151		if (skb_cb->flags & ATH11K_SKB_CIPHER_SET) {
 152			ti.encrypt_type =
 153				ath11k_dp_tx_get_encrypt_type(skb_cb->cipher);
 154
 155			if (ieee80211_has_protected(hdr->frame_control))
 156				skb_put(skb, IEEE80211_CCMP_MIC_LEN);
 157		} else {
 158			ti.encrypt_type = HAL_ENCRYPT_TYPE_OPEN;
 159		}
 160	}
 161
 162	ti.addr_search_flags = arvif->hal_addr_search_flags;
 163	ti.search_type = arvif->search_type;
 164	ti.type = HAL_TCL_DESC_TYPE_BUFFER;
 165	ti.pkt_offset = 0;
 166	ti.lmac_id = ar->lmac_id;
 167	ti.bss_ast_hash = arvif->ast_hash;
 168	ti.bss_ast_idx = arvif->ast_idx;
 169	ti.dscp_tid_tbl_idx = 0;
 170
 171	if (skb->ip_summed == CHECKSUM_PARTIAL &&
 172	    ti.encap_type != HAL_TCL_ENCAP_TYPE_RAW) {
 173		ti.flags0 |= FIELD_PREP(HAL_TCL_DATA_CMD_INFO1_IP4_CKSUM_EN, 1) |
 174			     FIELD_PREP(HAL_TCL_DATA_CMD_INFO1_UDP4_CKSUM_EN, 1) |
 175			     FIELD_PREP(HAL_TCL_DATA_CMD_INFO1_UDP6_CKSUM_EN, 1) |
 176			     FIELD_PREP(HAL_TCL_DATA_CMD_INFO1_TCP4_CKSUM_EN, 1) |
 177			     FIELD_PREP(HAL_TCL_DATA_CMD_INFO1_TCP6_CKSUM_EN, 1);
 178	}
 179
 180	if (ieee80211_vif_is_mesh(arvif->vif))
 181		ti.enable_mesh = true;
 182
 183	ti.flags1 |= FIELD_PREP(HAL_TCL_DATA_CMD_INFO2_TID_OVERWRITE, 1);
 184
 185	ti.tid = ath11k_dp_tx_get_tid(skb);
 186
 187	switch (ti.encap_type) {
 188	case HAL_TCL_ENCAP_TYPE_NATIVE_WIFI:
 189		ath11k_dp_tx_encap_nwifi(skb);
 190		break;
 191	case HAL_TCL_ENCAP_TYPE_RAW:
 192		if (!test_bit(ATH11K_FLAG_RAW_MODE, &ab->dev_flags)) {
 193			ret = -EINVAL;
 194			goto fail_remove_idr;
 195		}
 196		break;
 197	case HAL_TCL_ENCAP_TYPE_ETHERNET:
 198		/* no need to encap */
 199		break;
 200	case HAL_TCL_ENCAP_TYPE_802_3:
 201	default:
 202		/* TODO: Take care of other encap modes as well */
 203		ret = -EINVAL;
 204		atomic_inc(&ab->soc_stats.tx_err.misc_fail);
 205		goto fail_remove_idr;
 206	}
 207
 208	ti.paddr = dma_map_single(ab->dev, skb->data, skb->len, DMA_TO_DEVICE);
 209	if (dma_mapping_error(ab->dev, ti.paddr)) {
 210		atomic_inc(&ab->soc_stats.tx_err.misc_fail);
 211		ath11k_warn(ab, "failed to DMA map data Tx buffer\n");
 212		ret = -ENOMEM;
 213		goto fail_remove_idr;
 214	}
 215
 216	ti.data_len = skb->len;
 217	skb_cb->paddr = ti.paddr;
 218	skb_cb->vif = arvif->vif;
 219	skb_cb->ar = ar;
 220
 221	hal_ring_id = tx_ring->tcl_data_ring.ring_id;
 222	tcl_ring = &ab->hal.srng_list[hal_ring_id];
 223
 224	spin_lock_bh(&tcl_ring->lock);
 225
 226	ath11k_hal_srng_access_begin(ab, tcl_ring);
 227
 228	hal_tcl_desc = (void *)ath11k_hal_srng_src_get_next_entry(ab, tcl_ring);
 229	if (!hal_tcl_desc) {
 230		/* NOTE: It is highly unlikely we'll be running out of tcl_ring
 231		 * desc because the desc is directly enqueued onto hw queue.
 232		 */
 233		ath11k_hal_srng_access_end(ab, tcl_ring);
 234		ab->soc_stats.tx_err.desc_na[ti.ring_id]++;
 235		spin_unlock_bh(&tcl_ring->lock);
 236		ret = -ENOMEM;
 237
 238		/* Checking for available tcl descritors in another ring in
 239		 * case of failure due to full tcl ring now, is better than
 240		 * checking this ring earlier for each pkt tx.
 241		 * Restart ring selection if some rings are not checked yet.
 242		 */
 243		if (ring_map != (BIT(DP_TCL_NUM_RING_MAX) - 1) &&
 244		    !ar->ab->hw_params.tcl_0_only) {
 245			tcl_ring_retry = true;
 246			ring_selector++;
 247		}
 248
 249		goto fail_unmap_dma;
 250	}
 251
 252	ath11k_hal_tx_cmd_desc_setup(ab, hal_tcl_desc +
 253					 sizeof(struct hal_tlv_hdr), &ti);
 254
 255	ath11k_hal_srng_access_end(ab, tcl_ring);
 256
 257	ath11k_dp_shadow_start_timer(ab, tcl_ring, &dp->tx_ring_timer[ti.ring_id]);
 258
 259	spin_unlock_bh(&tcl_ring->lock);
 260
 261	ath11k_dbg_dump(ab, ATH11K_DBG_DP_TX, NULL, "dp tx msdu: ",
 262			skb->data, skb->len);
 263
 264	atomic_inc(&ar->dp.num_tx_pending);
 265
 266	return 0;
 267
 268fail_unmap_dma:
 269	dma_unmap_single(ab->dev, ti.paddr, ti.data_len, DMA_TO_DEVICE);
 270
 271fail_remove_idr:
 272	spin_lock_bh(&tx_ring->tx_idr_lock);
 273	idr_remove(&tx_ring->txbuf_idr,
 274		   FIELD_GET(DP_TX_DESC_ID_MSDU_ID, ti.desc_id));
 275	spin_unlock_bh(&tx_ring->tx_idr_lock);
 276
 277	if (tcl_ring_retry)
 278		goto tcl_ring_sel;
 279
 280	return ret;
 281}
 282
 283static void ath11k_dp_tx_free_txbuf(struct ath11k_base *ab, u8 mac_id,
 284				    int msdu_id,
 285				    struct dp_tx_ring *tx_ring)
 286{
 287	struct ath11k *ar;
 288	struct sk_buff *msdu;
 289	struct ath11k_skb_cb *skb_cb;
 290
 291	spin_lock_bh(&tx_ring->tx_idr_lock);
 292	msdu = idr_find(&tx_ring->txbuf_idr, msdu_id);
 293	if (!msdu) {
 
 
 294		ath11k_warn(ab, "tx completion for unknown msdu_id %d\n",
 295			    msdu_id);
 296		spin_unlock_bh(&tx_ring->tx_idr_lock);
 297		return;
 298	}
 299
 300	skb_cb = ATH11K_SKB_CB(msdu);
 301
 302	idr_remove(&tx_ring->txbuf_idr, msdu_id);
 303	spin_unlock_bh(&tx_ring->tx_idr_lock);
 304
 305	dma_unmap_single(ab->dev, skb_cb->paddr, msdu->len, DMA_TO_DEVICE);
 306	dev_kfree_skb_any(msdu);
 307
 308	ar = ab->pdevs[mac_id].ar;
 309	if (atomic_dec_and_test(&ar->dp.num_tx_pending))
 310		wake_up(&ar->dp.tx_empty_waitq);
 311}
 312
 313static void
 314ath11k_dp_tx_htt_tx_complete_buf(struct ath11k_base *ab,
 315				 struct dp_tx_ring *tx_ring,
 316				 struct ath11k_dp_htt_wbm_tx_status *ts)
 317{
 
 318	struct sk_buff *msdu;
 319	struct ieee80211_tx_info *info;
 320	struct ath11k_skb_cb *skb_cb;
 321	struct ath11k *ar;
 
 
 
 
 
 322
 323	spin_lock_bh(&tx_ring->tx_idr_lock);
 324	msdu = idr_find(&tx_ring->txbuf_idr, ts->msdu_id);
 325	if (!msdu) {
 326		ath11k_warn(ab, "htt tx completion for unknown msdu_id %d\n",
 327			    ts->msdu_id);
 328		spin_unlock_bh(&tx_ring->tx_idr_lock);
 329		return;
 330	}
 331
 332	skb_cb = ATH11K_SKB_CB(msdu);
 333	info = IEEE80211_SKB_CB(msdu);
 334
 335	ar = skb_cb->ar;
 336
 337	idr_remove(&tx_ring->txbuf_idr, ts->msdu_id);
 338	spin_unlock_bh(&tx_ring->tx_idr_lock);
 339
 340	if (atomic_dec_and_test(&ar->dp.num_tx_pending))
 341		wake_up(&ar->dp.tx_empty_waitq);
 342
 343	dma_unmap_single(ab->dev, skb_cb->paddr, msdu->len, DMA_TO_DEVICE);
 344
 
 
 
 
 
 345	memset(&info->status, 0, sizeof(info->status));
 346
 347	if (ts->acked) {
 348		if (!(info->flags & IEEE80211_TX_CTL_NO_ACK)) {
 349			info->flags |= IEEE80211_TX_STAT_ACK;
 350			info->status.ack_signal = ATH11K_DEFAULT_NOISE_FLOOR +
 351						  ts->ack_rssi;
 352			info->status.is_valid_ack_signal = true;
 
 
 
 
 
 353		} else {
 354			info->flags |= IEEE80211_TX_STAT_NOACK_TRANSMITTED;
 355		}
 356	}
 357
 358	ieee80211_tx_status(ar->hw, msdu);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 359}
 360
 361static void
 362ath11k_dp_tx_process_htt_tx_complete(struct ath11k_base *ab,
 363				     void *desc, u8 mac_id,
 364				     u32 msdu_id, struct dp_tx_ring *tx_ring)
 365{
 366	struct htt_tx_wbm_completion *status_desc;
 367	struct ath11k_dp_htt_wbm_tx_status ts = {0};
 368	enum hal_wbm_htt_tx_comp_status wbm_status;
 369
 370	status_desc = desc + HTT_TX_WBM_COMP_STATUS_OFFSET;
 371
 372	wbm_status = FIELD_GET(HTT_TX_WBM_COMP_INFO0_STATUS,
 373			       status_desc->info0);
 374	switch (wbm_status) {
 375	case HAL_WBM_REL_HTT_TX_COMP_STATUS_OK:
 376	case HAL_WBM_REL_HTT_TX_COMP_STATUS_DROP:
 377	case HAL_WBM_REL_HTT_TX_COMP_STATUS_TTL:
 378		ts.acked = (wbm_status == HAL_WBM_REL_HTT_TX_COMP_STATUS_OK);
 379		ts.msdu_id = msdu_id;
 380		ts.ack_rssi = FIELD_GET(HTT_TX_WBM_COMP_INFO1_ACK_RSSI,
 381					status_desc->info1);
 
 
 
 
 
 
 
 382		ath11k_dp_tx_htt_tx_complete_buf(ab, tx_ring, &ts);
 
 383		break;
 384	case HAL_WBM_REL_HTT_TX_COMP_STATUS_REINJ:
 385	case HAL_WBM_REL_HTT_TX_COMP_STATUS_INSPECT:
 386		ath11k_dp_tx_free_txbuf(ab, mac_id, msdu_id, tx_ring);
 387		break;
 388	case HAL_WBM_REL_HTT_TX_COMP_STATUS_MEC_NOTIFY:
 389		/* This event is to be handled only when the driver decides to
 390		 * use WDS offload functionality.
 391		 */
 392		break;
 393	default:
 394		ath11k_warn(ab, "Unknown htt tx status %d\n", wbm_status);
 395		break;
 396	}
 397}
 398
 399static void ath11k_dp_tx_cache_peer_stats(struct ath11k *ar,
 400					  struct sk_buff *msdu,
 401					  struct hal_tx_status *ts)
 402{
 403	struct ath11k_per_peer_tx_stats *peer_stats = &ar->cached_stats;
 404
 405	if (ts->try_cnt > 1) {
 406		peer_stats->retry_pkts += ts->try_cnt - 1;
 407		peer_stats->retry_bytes += (ts->try_cnt - 1) * msdu->len;
 408
 409		if (ts->status != HAL_WBM_TQM_REL_REASON_FRAME_ACKED) {
 410			peer_stats->failed_pkts += 1;
 411			peer_stats->failed_bytes += msdu->len;
 412		}
 413	}
 414}
 415
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 416static void ath11k_dp_tx_complete_msdu(struct ath11k *ar,
 417				       struct sk_buff *msdu,
 418				       struct hal_tx_status *ts)
 419{
 
 
 420	struct ath11k_base *ab = ar->ab;
 421	struct ieee80211_tx_info *info;
 422	struct ath11k_skb_cb *skb_cb;
 
 
 
 423
 424	if (WARN_ON_ONCE(ts->buf_rel_source != HAL_WBM_REL_SRC_MODULE_TQM)) {
 425		/* Must not happen */
 426		return;
 427	}
 428
 429	skb_cb = ATH11K_SKB_CB(msdu);
 430
 431	dma_unmap_single(ab->dev, skb_cb->paddr, msdu->len, DMA_TO_DEVICE);
 432
 433	rcu_read_lock();
 434
 435	if (!rcu_dereference(ab->pdevs_active[ar->pdev_idx])) {
 436		dev_kfree_skb_any(msdu);
 437		goto exit;
 438	}
 439
 440	if (!skb_cb->vif) {
 441		dev_kfree_skb_any(msdu);
 442		goto exit;
 443	}
 444
 445	info = IEEE80211_SKB_CB(msdu);
 446	memset(&info->status, 0, sizeof(info->status));
 447
 448	/* skip tx rate update from ieee80211_status*/
 449	info->status.rates[0].idx = -1;
 450
 451	if (ts->status == HAL_WBM_TQM_REL_REASON_FRAME_ACKED &&
 452	    !(info->flags & IEEE80211_TX_CTL_NO_ACK)) {
 453		info->flags |= IEEE80211_TX_STAT_ACK;
 454		info->status.ack_signal = ATH11K_DEFAULT_NOISE_FLOOR +
 455					  ts->ack_rssi;
 456		info->status.is_valid_ack_signal = true;
 
 
 
 
 457	}
 458
 459	if (ts->status == HAL_WBM_TQM_REL_REASON_CMD_REMOVE_TX &&
 460	    (info->flags & IEEE80211_TX_CTL_NO_ACK))
 461		info->flags |= IEEE80211_TX_STAT_NOACK_TRANSMITTED;
 462
 463	if (ath11k_debugfs_is_extd_tx_stats_enabled(ar)) {
 
 464		if (ts->flags & HAL_TX_STATUS_FLAGS_FIRST_MSDU) {
 465			if (ar->last_ppdu_id == 0) {
 466				ar->last_ppdu_id = ts->ppdu_id;
 467			} else if (ar->last_ppdu_id == ts->ppdu_id ||
 468				   ar->cached_ppdu_id == ar->last_ppdu_id) {
 469				ar->cached_ppdu_id = ar->last_ppdu_id;
 470				ar->cached_stats.is_ampdu = true;
 471				ath11k_debugfs_sta_update_txcompl(ar, msdu, ts);
 472				memset(&ar->cached_stats, 0,
 473				       sizeof(struct ath11k_per_peer_tx_stats));
 474			} else {
 475				ar->cached_stats.is_ampdu = false;
 476				ath11k_debugfs_sta_update_txcompl(ar, msdu, ts);
 477				memset(&ar->cached_stats, 0,
 478				       sizeof(struct ath11k_per_peer_tx_stats));
 479			}
 480			ar->last_ppdu_id = ts->ppdu_id;
 481		}
 482
 483		ath11k_dp_tx_cache_peer_stats(ar, msdu, ts);
 484	}
 485
 486	/* NOTE: Tx rate status reporting. Tx completion status does not have
 487	 * necessary information (for example nss) to build the tx rate.
 488	 * Might end up reporting it out-of-band from HTT stats.
 489	 */
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 490
 491	ieee80211_tx_status(ar->hw, msdu);
 492
 493exit:
 494	rcu_read_unlock();
 495}
 496
 497static inline void ath11k_dp_tx_status_parse(struct ath11k_base *ab,
 498					     struct hal_wbm_release_ring *desc,
 499					     struct hal_tx_status *ts)
 500{
 501	ts->buf_rel_source =
 502		FIELD_GET(HAL_WBM_RELEASE_INFO0_REL_SRC_MODULE, desc->info0);
 503	if (ts->buf_rel_source != HAL_WBM_REL_SRC_MODULE_FW &&
 504	    ts->buf_rel_source != HAL_WBM_REL_SRC_MODULE_TQM)
 505		return;
 506
 507	if (ts->buf_rel_source == HAL_WBM_REL_SRC_MODULE_FW)
 508		return;
 509
 510	ts->status = FIELD_GET(HAL_WBM_RELEASE_INFO0_TQM_RELEASE_REASON,
 511			       desc->info0);
 512	ts->ppdu_id = FIELD_GET(HAL_WBM_RELEASE_INFO1_TQM_STATUS_NUMBER,
 513				desc->info1);
 514	ts->try_cnt = FIELD_GET(HAL_WBM_RELEASE_INFO1_TRANSMIT_COUNT,
 515				desc->info1);
 516	ts->ack_rssi = FIELD_GET(HAL_WBM_RELEASE_INFO2_ACK_FRAME_RSSI,
 517				 desc->info2);
 518	if (desc->info2 & HAL_WBM_RELEASE_INFO2_FIRST_MSDU)
 519		ts->flags |= HAL_TX_STATUS_FLAGS_FIRST_MSDU;
 520	ts->peer_id = FIELD_GET(HAL_WBM_RELEASE_INFO3_PEER_ID, desc->info3);
 521	ts->tid = FIELD_GET(HAL_WBM_RELEASE_INFO3_TID, desc->info3);
 522	if (desc->rate_stats.info0 & HAL_TX_RATE_STATS_INFO0_VALID)
 523		ts->rate_stats = desc->rate_stats.info0;
 524	else
 525		ts->rate_stats = 0;
 526}
 527
 528void ath11k_dp_tx_completion_handler(struct ath11k_base *ab, int ring_id)
 529{
 530	struct ath11k *ar;
 531	struct ath11k_dp *dp = &ab->dp;
 532	int hal_ring_id = dp->tx_ring[ring_id].tcl_comp_ring.ring_id;
 533	struct hal_srng *status_ring = &ab->hal.srng_list[hal_ring_id];
 534	struct sk_buff *msdu;
 535	struct hal_tx_status ts = { 0 };
 536	struct dp_tx_ring *tx_ring = &dp->tx_ring[ring_id];
 537	u32 *desc;
 538	u32 msdu_id;
 539	u8 mac_id;
 540
 541	spin_lock_bh(&status_ring->lock);
 542
 543	ath11k_hal_srng_access_begin(ab, status_ring);
 544
 545	while ((ATH11K_TX_COMPL_NEXT(tx_ring->tx_status_head) !=
 546		tx_ring->tx_status_tail) &&
 547	       (desc = ath11k_hal_srng_dst_get_next_entry(ab, status_ring))) {
 548		memcpy(&tx_ring->tx_status[tx_ring->tx_status_head],
 549		       desc, sizeof(struct hal_wbm_release_ring));
 550		tx_ring->tx_status_head =
 551			ATH11K_TX_COMPL_NEXT(tx_ring->tx_status_head);
 552	}
 553
 554	if ((ath11k_hal_srng_dst_peek(ab, status_ring) != NULL) &&
 555	    (ATH11K_TX_COMPL_NEXT(tx_ring->tx_status_head) == tx_ring->tx_status_tail)) {
 
 556		/* TODO: Process pending tx_status messages when kfifo_is_full() */
 557		ath11k_warn(ab, "Unable to process some of the tx_status ring desc because status_fifo is full\n");
 558	}
 559
 560	ath11k_hal_srng_access_end(ab, status_ring);
 561
 562	spin_unlock_bh(&status_ring->lock);
 563
 564	while (ATH11K_TX_COMPL_NEXT(tx_ring->tx_status_tail) != tx_ring->tx_status_head) {
 565		struct hal_wbm_release_ring *tx_status;
 566		u32 desc_id;
 567
 568		tx_ring->tx_status_tail =
 569			ATH11K_TX_COMPL_NEXT(tx_ring->tx_status_tail);
 570		tx_status = &tx_ring->tx_status[tx_ring->tx_status_tail];
 571		ath11k_dp_tx_status_parse(ab, tx_status, &ts);
 572
 573		desc_id = FIELD_GET(BUFFER_ADDR_INFO1_SW_COOKIE,
 574				    tx_status->buf_addr_info.info1);
 575		mac_id = FIELD_GET(DP_TX_DESC_ID_MAC_ID, desc_id);
 576		msdu_id = FIELD_GET(DP_TX_DESC_ID_MSDU_ID, desc_id);
 577
 578		if (ts.buf_rel_source == HAL_WBM_REL_SRC_MODULE_FW) {
 579			ath11k_dp_tx_process_htt_tx_complete(ab,
 580							     (void *)tx_status,
 581							     mac_id, msdu_id,
 582							     tx_ring);
 583			continue;
 584		}
 585
 586		spin_lock_bh(&tx_ring->tx_idr_lock);
 587		msdu = idr_find(&tx_ring->txbuf_idr, msdu_id);
 588		if (!msdu) {
 589			ath11k_warn(ab, "tx completion for unknown msdu_id %d\n",
 590				    msdu_id);
 591			spin_unlock_bh(&tx_ring->tx_idr_lock);
 592			continue;
 593		}
 594		idr_remove(&tx_ring->txbuf_idr, msdu_id);
 595		spin_unlock_bh(&tx_ring->tx_idr_lock);
 596
 597		ar = ab->pdevs[mac_id].ar;
 598
 599		if (atomic_dec_and_test(&ar->dp.num_tx_pending))
 600			wake_up(&ar->dp.tx_empty_waitq);
 601
 602		ath11k_dp_tx_complete_msdu(ar, msdu, &ts);
 603	}
 604}
 605
 606int ath11k_dp_tx_send_reo_cmd(struct ath11k_base *ab, struct dp_rx_tid *rx_tid,
 607			      enum hal_reo_cmd_type type,
 608			      struct ath11k_hal_reo_cmd *cmd,
 609			      void (*cb)(struct ath11k_dp *, void *,
 610					 enum hal_reo_cmd_status))
 611{
 612	struct ath11k_dp *dp = &ab->dp;
 613	struct dp_reo_cmd *dp_cmd;
 614	struct hal_srng *cmd_ring;
 615	int cmd_num;
 616
 
 
 
 617	cmd_ring = &ab->hal.srng_list[dp->reo_cmd_ring.ring_id];
 618	cmd_num = ath11k_hal_reo_cmd_send(ab, cmd_ring, type, cmd);
 619
 620	/* cmd_num should start from 1, during failure return the error code */
 621	if (cmd_num < 0)
 622		return cmd_num;
 623
 624	/* reo cmd ring descriptors has cmd_num starting from 1 */
 625	if (cmd_num == 0)
 626		return -EINVAL;
 627
 628	if (!cb)
 629		return 0;
 630
 631	/* Can this be optimized so that we keep the pending command list only
 632	 * for tid delete command to free up the resoruce on the command status
 633	 * indication?
 634	 */
 635	dp_cmd = kzalloc(sizeof(*dp_cmd), GFP_ATOMIC);
 636
 637	if (!dp_cmd)
 638		return -ENOMEM;
 639
 640	memcpy(&dp_cmd->data, rx_tid, sizeof(struct dp_rx_tid));
 641	dp_cmd->cmd_num = cmd_num;
 642	dp_cmd->handler = cb;
 643
 644	spin_lock_bh(&dp->reo_cmd_lock);
 645	list_add_tail(&dp_cmd->list, &dp->reo_cmd_list);
 646	spin_unlock_bh(&dp->reo_cmd_lock);
 647
 648	return 0;
 649}
 650
 651static int
 652ath11k_dp_tx_get_ring_id_type(struct ath11k_base *ab,
 653			      int mac_id, u32 ring_id,
 654			      enum hal_ring_type ring_type,
 655			      enum htt_srng_ring_type *htt_ring_type,
 656			      enum htt_srng_ring_id *htt_ring_id)
 657{
 658	int lmac_ring_id_offset = 0;
 659	int ret = 0;
 660
 661	switch (ring_type) {
 662	case HAL_RXDMA_BUF:
 663		lmac_ring_id_offset = mac_id * HAL_SRNG_RINGS_PER_LMAC;
 664
 665		/* for QCA6390, host fills rx buffer to fw and fw fills to
 666		 * rxbuf ring for each rxdma
 667		 */
 668		if (!ab->hw_params.rx_mac_buf_ring) {
 669			if (!(ring_id == (HAL_SRNG_RING_ID_WMAC1_SW2RXDMA0_BUF +
 670					  lmac_ring_id_offset) ||
 671				ring_id == (HAL_SRNG_RING_ID_WMAC1_SW2RXDMA1_BUF +
 672					lmac_ring_id_offset))) {
 673				ret = -EINVAL;
 674			}
 675			*htt_ring_id = HTT_RXDMA_HOST_BUF_RING;
 676			*htt_ring_type = HTT_SW_TO_HW_RING;
 677		} else {
 678			if (ring_id == HAL_SRNG_RING_ID_WMAC1_SW2RXDMA0_BUF) {
 679				*htt_ring_id = HTT_HOST1_TO_FW_RXBUF_RING;
 680				*htt_ring_type = HTT_SW_TO_SW_RING;
 681			} else {
 682				*htt_ring_id = HTT_RXDMA_HOST_BUF_RING;
 683				*htt_ring_type = HTT_SW_TO_HW_RING;
 684			}
 685		}
 686		break;
 687	case HAL_RXDMA_DST:
 688		*htt_ring_id = HTT_RXDMA_NON_MONITOR_DEST_RING;
 689		*htt_ring_type = HTT_HW_TO_SW_RING;
 690		break;
 691	case HAL_RXDMA_MONITOR_BUF:
 692		*htt_ring_id = HTT_RXDMA_MONITOR_BUF_RING;
 693		*htt_ring_type = HTT_SW_TO_HW_RING;
 694		break;
 695	case HAL_RXDMA_MONITOR_STATUS:
 696		*htt_ring_id = HTT_RXDMA_MONITOR_STATUS_RING;
 697		*htt_ring_type = HTT_SW_TO_HW_RING;
 698		break;
 699	case HAL_RXDMA_MONITOR_DST:
 700		*htt_ring_id = HTT_RXDMA_MONITOR_DEST_RING;
 701		*htt_ring_type = HTT_HW_TO_SW_RING;
 702		break;
 703	case HAL_RXDMA_MONITOR_DESC:
 704		*htt_ring_id = HTT_RXDMA_MONITOR_DESC_RING;
 705		*htt_ring_type = HTT_SW_TO_HW_RING;
 706		break;
 707	default:
 708		ath11k_warn(ab, "Unsupported ring type in DP :%d\n", ring_type);
 709		ret = -EINVAL;
 710	}
 711	return ret;
 712}
 713
 714int ath11k_dp_tx_htt_srng_setup(struct ath11k_base *ab, u32 ring_id,
 715				int mac_id, enum hal_ring_type ring_type)
 716{
 717	struct htt_srng_setup_cmd *cmd;
 718	struct hal_srng *srng = &ab->hal.srng_list[ring_id];
 719	struct hal_srng_params params;
 720	struct sk_buff *skb;
 721	u32 ring_entry_sz;
 722	int len = sizeof(*cmd);
 723	dma_addr_t hp_addr, tp_addr;
 724	enum htt_srng_ring_type htt_ring_type;
 725	enum htt_srng_ring_id htt_ring_id;
 726	int ret;
 727
 728	skb = ath11k_htc_alloc_skb(ab, len);
 729	if (!skb)
 730		return -ENOMEM;
 731
 732	memset(&params, 0, sizeof(params));
 733	ath11k_hal_srng_get_params(ab, srng, &params);
 734
 735	hp_addr = ath11k_hal_srng_get_hp_addr(ab, srng);
 736	tp_addr = ath11k_hal_srng_get_tp_addr(ab, srng);
 737
 738	ret = ath11k_dp_tx_get_ring_id_type(ab, mac_id, ring_id,
 739					    ring_type, &htt_ring_type,
 740					    &htt_ring_id);
 741	if (ret)
 742		goto err_free;
 743
 744	skb_put(skb, len);
 745	cmd = (struct htt_srng_setup_cmd *)skb->data;
 746	cmd->info0 = FIELD_PREP(HTT_SRNG_SETUP_CMD_INFO0_MSG_TYPE,
 747				HTT_H2T_MSG_TYPE_SRING_SETUP);
 748	if (htt_ring_type == HTT_SW_TO_HW_RING ||
 749	    htt_ring_type == HTT_HW_TO_SW_RING)
 750		cmd->info0 |= FIELD_PREP(HTT_SRNG_SETUP_CMD_INFO0_PDEV_ID,
 751					 DP_SW2HW_MACID(mac_id));
 752	else
 753		cmd->info0 |= FIELD_PREP(HTT_SRNG_SETUP_CMD_INFO0_PDEV_ID,
 754					 mac_id);
 755	cmd->info0 |= FIELD_PREP(HTT_SRNG_SETUP_CMD_INFO0_RING_TYPE,
 756				 htt_ring_type);
 757	cmd->info0 |= FIELD_PREP(HTT_SRNG_SETUP_CMD_INFO0_RING_ID, htt_ring_id);
 758
 759	cmd->ring_base_addr_lo = params.ring_base_paddr &
 760				 HAL_ADDR_LSB_REG_MASK;
 761
 762	cmd->ring_base_addr_hi = (u64)params.ring_base_paddr >>
 763				 HAL_ADDR_MSB_REG_SHIFT;
 764
 765	ret = ath11k_hal_srng_get_entrysize(ab, ring_type);
 766	if (ret < 0)
 767		goto err_free;
 768
 769	ring_entry_sz = ret;
 770
 771	ring_entry_sz >>= 2;
 772	cmd->info1 = FIELD_PREP(HTT_SRNG_SETUP_CMD_INFO1_RING_ENTRY_SIZE,
 773				ring_entry_sz);
 774	cmd->info1 |= FIELD_PREP(HTT_SRNG_SETUP_CMD_INFO1_RING_SIZE,
 775				 params.num_entries * ring_entry_sz);
 776	cmd->info1 |= FIELD_PREP(HTT_SRNG_SETUP_CMD_INFO1_RING_FLAGS_MSI_SWAP,
 777				 !!(params.flags & HAL_SRNG_FLAGS_MSI_SWAP));
 778	cmd->info1 |= FIELD_PREP(
 779			HTT_SRNG_SETUP_CMD_INFO1_RING_FLAGS_TLV_SWAP,
 780			!!(params.flags & HAL_SRNG_FLAGS_DATA_TLV_SWAP));
 781	cmd->info1 |= FIELD_PREP(
 782			HTT_SRNG_SETUP_CMD_INFO1_RING_FLAGS_HOST_FW_SWAP,
 783			!!(params.flags & HAL_SRNG_FLAGS_RING_PTR_SWAP));
 784	if (htt_ring_type == HTT_SW_TO_HW_RING)
 785		cmd->info1 |= HTT_SRNG_SETUP_CMD_INFO1_RING_LOOP_CNT_DIS;
 786
 787	cmd->ring_head_off32_remote_addr_lo = hp_addr & HAL_ADDR_LSB_REG_MASK;
 788	cmd->ring_head_off32_remote_addr_hi = (u64)hp_addr >>
 789					      HAL_ADDR_MSB_REG_SHIFT;
 790
 791	cmd->ring_tail_off32_remote_addr_lo = tp_addr & HAL_ADDR_LSB_REG_MASK;
 792	cmd->ring_tail_off32_remote_addr_hi = (u64)tp_addr >>
 793					      HAL_ADDR_MSB_REG_SHIFT;
 794
 795	cmd->ring_msi_addr_lo = lower_32_bits(params.msi_addr);
 796	cmd->ring_msi_addr_hi = upper_32_bits(params.msi_addr);
 797	cmd->msi_data = params.msi_data;
 798
 799	cmd->intr_info = FIELD_PREP(
 800			HTT_SRNG_SETUP_CMD_INTR_INFO_BATCH_COUNTER_THRESH,
 801			params.intr_batch_cntr_thres_entries * ring_entry_sz);
 802	cmd->intr_info |= FIELD_PREP(
 803			HTT_SRNG_SETUP_CMD_INTR_INFO_INTR_TIMER_THRESH,
 804			params.intr_timer_thres_us >> 3);
 805
 806	cmd->info2 = 0;
 807	if (params.flags & HAL_SRNG_FLAGS_LOW_THRESH_INTR_EN) {
 808		cmd->info2 = FIELD_PREP(
 809				HTT_SRNG_SETUP_CMD_INFO2_INTR_LOW_THRESH,
 810				params.low_threshold);
 811	}
 812
 813	ath11k_dbg(ab, ATH11k_DBG_HAL,
 814		   "%s msi_addr_lo:0x%x, msi_addr_hi:0x%x, msi_data:0x%x\n",
 815		   __func__, cmd->ring_msi_addr_lo, cmd->ring_msi_addr_hi,
 816		   cmd->msi_data);
 817
 818	ath11k_dbg(ab, ATH11k_DBG_HAL,
 819		   "ring_id:%d, ring_type:%d, intr_info:0x%x, flags:0x%x\n",
 820		   ring_id, ring_type, cmd->intr_info, cmd->info2);
 821
 822	ret = ath11k_htc_send(&ab->htc, ab->dp.eid, skb);
 823	if (ret)
 824		goto err_free;
 825
 826	return 0;
 827
 828err_free:
 829	dev_kfree_skb_any(skb);
 830
 831	return ret;
 832}
 833
 834#define HTT_TARGET_VERSION_TIMEOUT_HZ (3 * HZ)
 835
 836int ath11k_dp_tx_htt_h2t_ver_req_msg(struct ath11k_base *ab)
 837{
 838	struct ath11k_dp *dp = &ab->dp;
 839	struct sk_buff *skb;
 840	struct htt_ver_req_cmd *cmd;
 841	int len = sizeof(*cmd);
 842	int ret;
 843
 844	init_completion(&dp->htt_tgt_version_received);
 845
 846	skb = ath11k_htc_alloc_skb(ab, len);
 847	if (!skb)
 848		return -ENOMEM;
 849
 850	skb_put(skb, len);
 851	cmd = (struct htt_ver_req_cmd *)skb->data;
 852	cmd->ver_reg_info = FIELD_PREP(HTT_VER_REQ_INFO_MSG_ID,
 853				       HTT_H2T_MSG_TYPE_VERSION_REQ);
 854
 855	ret = ath11k_htc_send(&ab->htc, dp->eid, skb);
 856	if (ret) {
 857		dev_kfree_skb_any(skb);
 858		return ret;
 859	}
 860
 861	ret = wait_for_completion_timeout(&dp->htt_tgt_version_received,
 862					  HTT_TARGET_VERSION_TIMEOUT_HZ);
 863	if (ret == 0) {
 864		ath11k_warn(ab, "htt target version request timed out\n");
 865		return -ETIMEDOUT;
 866	}
 867
 868	if (dp->htt_tgt_ver_major != HTT_TARGET_VERSION_MAJOR) {
 869		ath11k_err(ab, "unsupported htt major version %d supported version is %d\n",
 870			   dp->htt_tgt_ver_major, HTT_TARGET_VERSION_MAJOR);
 871		return -ENOTSUPP;
 872	}
 873
 874	return 0;
 875}
 876
 877int ath11k_dp_tx_htt_h2t_ppdu_stats_req(struct ath11k *ar, u32 mask)
 878{
 879	struct ath11k_base *ab = ar->ab;
 880	struct ath11k_dp *dp = &ab->dp;
 881	struct sk_buff *skb;
 882	struct htt_ppdu_stats_cfg_cmd *cmd;
 883	int len = sizeof(*cmd);
 884	u8 pdev_mask;
 885	int ret;
 886	int i;
 887
 888	for (i = 0; i < ab->hw_params.num_rxmda_per_pdev; i++) {
 889		skb = ath11k_htc_alloc_skb(ab, len);
 890		if (!skb)
 891			return -ENOMEM;
 892
 893		skb_put(skb, len);
 894		cmd = (struct htt_ppdu_stats_cfg_cmd *)skb->data;
 895		cmd->msg = FIELD_PREP(HTT_PPDU_STATS_CFG_MSG_TYPE,
 896				      HTT_H2T_MSG_TYPE_PPDU_STATS_CFG);
 897
 898		pdev_mask = 1 << (i + 1);
 899		cmd->msg |= FIELD_PREP(HTT_PPDU_STATS_CFG_PDEV_ID, pdev_mask);
 900		cmd->msg |= FIELD_PREP(HTT_PPDU_STATS_CFG_TLV_TYPE_BITMASK, mask);
 901
 902		ret = ath11k_htc_send(&ab->htc, dp->eid, skb);
 903		if (ret) {
 904			dev_kfree_skb_any(skb);
 905			return ret;
 906		}
 907	}
 908
 909	return 0;
 910}
 911
 912int ath11k_dp_tx_htt_rx_filter_setup(struct ath11k_base *ab, u32 ring_id,
 913				     int mac_id, enum hal_ring_type ring_type,
 914				     int rx_buf_size,
 915				     struct htt_rx_ring_tlv_filter *tlv_filter)
 916{
 917	struct htt_rx_ring_selection_cfg_cmd *cmd;
 918	struct hal_srng *srng = &ab->hal.srng_list[ring_id];
 919	struct hal_srng_params params;
 920	struct sk_buff *skb;
 921	int len = sizeof(*cmd);
 922	enum htt_srng_ring_type htt_ring_type;
 923	enum htt_srng_ring_id htt_ring_id;
 924	int ret;
 925
 926	skb = ath11k_htc_alloc_skb(ab, len);
 927	if (!skb)
 928		return -ENOMEM;
 929
 930	memset(&params, 0, sizeof(params));
 931	ath11k_hal_srng_get_params(ab, srng, &params);
 932
 933	ret = ath11k_dp_tx_get_ring_id_type(ab, mac_id, ring_id,
 934					    ring_type, &htt_ring_type,
 935					    &htt_ring_id);
 936	if (ret)
 937		goto err_free;
 938
 939	skb_put(skb, len);
 940	cmd = (struct htt_rx_ring_selection_cfg_cmd *)skb->data;
 941	cmd->info0 = FIELD_PREP(HTT_RX_RING_SELECTION_CFG_CMD_INFO0_MSG_TYPE,
 942				HTT_H2T_MSG_TYPE_RX_RING_SELECTION_CFG);
 943	if (htt_ring_type == HTT_SW_TO_HW_RING ||
 944	    htt_ring_type == HTT_HW_TO_SW_RING)
 945		cmd->info0 |=
 946			FIELD_PREP(HTT_RX_RING_SELECTION_CFG_CMD_INFO0_PDEV_ID,
 947				   DP_SW2HW_MACID(mac_id));
 948	else
 949		cmd->info0 |=
 950			FIELD_PREP(HTT_RX_RING_SELECTION_CFG_CMD_INFO0_PDEV_ID,
 951				   mac_id);
 952	cmd->info0 |= FIELD_PREP(HTT_RX_RING_SELECTION_CFG_CMD_INFO0_RING_ID,
 953				 htt_ring_id);
 954	cmd->info0 |= FIELD_PREP(HTT_RX_RING_SELECTION_CFG_CMD_INFO0_SS,
 955				 !!(params.flags & HAL_SRNG_FLAGS_MSI_SWAP));
 956	cmd->info0 |= FIELD_PREP(HTT_RX_RING_SELECTION_CFG_CMD_INFO0_PS,
 957				 !!(params.flags & HAL_SRNG_FLAGS_DATA_TLV_SWAP));
 958
 959	cmd->info1 = FIELD_PREP(HTT_RX_RING_SELECTION_CFG_CMD_INFO1_BUF_SIZE,
 960				rx_buf_size);
 961	cmd->pkt_type_en_flags0 = tlv_filter->pkt_filter_flags0;
 962	cmd->pkt_type_en_flags1 = tlv_filter->pkt_filter_flags1;
 963	cmd->pkt_type_en_flags2 = tlv_filter->pkt_filter_flags2;
 964	cmd->pkt_type_en_flags3 = tlv_filter->pkt_filter_flags3;
 965	cmd->rx_filter_tlv = tlv_filter->rx_filter;
 966
 967	ret = ath11k_htc_send(&ab->htc, ab->dp.eid, skb);
 968	if (ret)
 969		goto err_free;
 970
 971	return 0;
 972
 973err_free:
 974	dev_kfree_skb_any(skb);
 975
 976	return ret;
 977}
 978
 979int
 980ath11k_dp_tx_htt_h2t_ext_stats_req(struct ath11k *ar, u8 type,
 981				   struct htt_ext_stats_cfg_params *cfg_params,
 982				   u64 cookie)
 983{
 984	struct ath11k_base *ab = ar->ab;
 985	struct ath11k_dp *dp = &ab->dp;
 986	struct sk_buff *skb;
 987	struct htt_ext_stats_cfg_cmd *cmd;
 
 988	int len = sizeof(*cmd);
 989	int ret;
 990
 991	skb = ath11k_htc_alloc_skb(ab, len);
 992	if (!skb)
 993		return -ENOMEM;
 994
 995	skb_put(skb, len);
 996
 997	cmd = (struct htt_ext_stats_cfg_cmd *)skb->data;
 998	memset(cmd, 0, sizeof(*cmd));
 999	cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_EXT_STATS_CFG;
1000
1001	cmd->hdr.pdev_mask = 1 << ar->pdev->pdev_id;
 
 
 
 
 
1002
1003	cmd->hdr.stats_type = type;
1004	cmd->cfg_param0 = cfg_params->cfg0;
1005	cmd->cfg_param1 = cfg_params->cfg1;
1006	cmd->cfg_param2 = cfg_params->cfg2;
1007	cmd->cfg_param3 = cfg_params->cfg3;
1008	cmd->cookie_lsb = lower_32_bits(cookie);
1009	cmd->cookie_msb = upper_32_bits(cookie);
1010
1011	ret = ath11k_htc_send(&ab->htc, dp->eid, skb);
1012	if (ret) {
1013		ath11k_warn(ab, "failed to send htt type stats request: %d",
1014			    ret);
1015		dev_kfree_skb_any(skb);
1016		return ret;
1017	}
1018
1019	return 0;
1020}
1021
1022int ath11k_dp_tx_htt_monitor_mode_ring_config(struct ath11k *ar, bool reset)
1023{
1024	struct ath11k_pdev_dp *dp = &ar->dp;
1025	struct ath11k_base *ab = ar->ab;
1026	struct htt_rx_ring_tlv_filter tlv_filter = {0};
1027	int ret = 0, ring_id = 0, i;
1028
 
 
 
 
 
 
 
 
 
1029	ring_id = dp->rxdma_mon_buf_ring.refill_buf_ring.ring_id;
1030
1031	if (!reset) {
1032		tlv_filter.rx_filter = HTT_RX_MON_FILTER_TLV_FLAGS_MON_BUF_RING;
1033		tlv_filter.pkt_filter_flags0 =
1034					HTT_RX_MON_FP_MGMT_FILTER_FLAGS0 |
1035					HTT_RX_MON_MO_MGMT_FILTER_FLAGS0;
1036		tlv_filter.pkt_filter_flags1 =
1037					HTT_RX_MON_FP_MGMT_FILTER_FLAGS1 |
1038					HTT_RX_MON_MO_MGMT_FILTER_FLAGS1;
1039		tlv_filter.pkt_filter_flags2 =
1040					HTT_RX_MON_FP_CTRL_FILTER_FLASG2 |
1041					HTT_RX_MON_MO_CTRL_FILTER_FLASG2;
1042		tlv_filter.pkt_filter_flags3 =
1043					HTT_RX_MON_FP_CTRL_FILTER_FLASG3 |
1044					HTT_RX_MON_MO_CTRL_FILTER_FLASG3 |
1045					HTT_RX_MON_FP_DATA_FILTER_FLASG3 |
1046					HTT_RX_MON_MO_DATA_FILTER_FLASG3;
1047	}
1048
1049	if (ab->hw_params.rxdma1_enable) {
1050		ret = ath11k_dp_tx_htt_rx_filter_setup(ar->ab, ring_id, dp->mac_id,
1051						       HAL_RXDMA_MONITOR_BUF,
1052						       DP_RXDMA_REFILL_RING_SIZE,
1053						       &tlv_filter);
1054	} else if (!reset) {
1055		/* set in monitor mode only */
1056		for (i = 0; i < ab->hw_params.num_rxmda_per_pdev; i++) {
1057			ring_id = dp->rx_mac_buf_ring[i].ring_id;
1058			ret = ath11k_dp_tx_htt_rx_filter_setup(ar->ab, ring_id,
1059							       dp->mac_id + i,
1060							       HAL_RXDMA_BUF,
1061							       1024,
1062							       &tlv_filter);
1063		}
1064	}
1065
1066	if (ret)
1067		return ret;
1068
1069	for (i = 0; i < ab->hw_params.num_rxmda_per_pdev; i++) {
1070		ring_id = dp->rx_mon_status_refill_ring[i].refill_buf_ring.ring_id;
1071		if (!reset)
1072			tlv_filter.rx_filter =
1073					HTT_RX_MON_FILTER_TLV_FLAGS_MON_STATUS_RING;
1074		else
1075			tlv_filter = ath11k_mac_mon_status_filter_default;
1076
 
 
 
 
1077		ret = ath11k_dp_tx_htt_rx_filter_setup(ab, ring_id,
1078						       dp->mac_id + i,
1079						       HAL_RXDMA_MONITOR_STATUS,
1080						       DP_RXDMA_REFILL_RING_SIZE,
1081						       &tlv_filter);
1082	}
1083
1084	if (!ar->ab->hw_params.rxdma1_enable)
1085		mod_timer(&ar->ab->mon_reap_timer, jiffies +
1086			  msecs_to_jiffies(ATH11K_MON_TIMER_INTERVAL));
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1087
1088	return ret;
1089}