Linux Audio

Check our new training course

Loading...
Note: File does not exist in v3.1.
   1/*
   2 * Copyright (c) 2005-2011 Atheros Communications Inc.
   3 * Copyright (c) 2011-2017 Qualcomm Atheros, Inc.
   4 * Copyright (c) 2018, The Linux Foundation. All rights reserved.
   5 *
   6 * Permission to use, copy, modify, and/or distribute this software for any
   7 * purpose with or without fee is hereby granted, provided that the above
   8 * copyright notice and this permission notice appear in all copies.
   9 *
  10 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
  11 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
  12 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
  13 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
  14 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
  15 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
  16 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
  17 */
  18
  19#include "core.h"
  20#include "htc.h"
  21#include "htt.h"
  22#include "txrx.h"
  23#include "debug.h"
  24#include "trace.h"
  25#include "mac.h"
  26
  27#include <linux/log2.h>
  28
  29/* when under memory pressure rx ring refill may fail and needs a retry */
  30#define HTT_RX_RING_REFILL_RETRY_MS 50
  31
  32#define HTT_RX_RING_REFILL_RESCHED_MS 5
  33
  34static int ath10k_htt_rx_get_csum_state(struct sk_buff *skb);
  35
  36static struct sk_buff *
  37ath10k_htt_rx_find_skb_paddr(struct ath10k *ar, u64 paddr)
  38{
  39	struct ath10k_skb_rxcb *rxcb;
  40
  41	hash_for_each_possible(ar->htt.rx_ring.skb_table, rxcb, hlist, paddr)
  42		if (rxcb->paddr == paddr)
  43			return ATH10K_RXCB_SKB(rxcb);
  44
  45	WARN_ON_ONCE(1);
  46	return NULL;
  47}
  48
  49static void ath10k_htt_rx_ring_free(struct ath10k_htt *htt)
  50{
  51	struct sk_buff *skb;
  52	struct ath10k_skb_rxcb *rxcb;
  53	struct hlist_node *n;
  54	int i;
  55
  56	if (htt->rx_ring.in_ord_rx) {
  57		hash_for_each_safe(htt->rx_ring.skb_table, i, n, rxcb, hlist) {
  58			skb = ATH10K_RXCB_SKB(rxcb);
  59			dma_unmap_single(htt->ar->dev, rxcb->paddr,
  60					 skb->len + skb_tailroom(skb),
  61					 DMA_FROM_DEVICE);
  62			hash_del(&rxcb->hlist);
  63			dev_kfree_skb_any(skb);
  64		}
  65	} else {
  66		for (i = 0; i < htt->rx_ring.size; i++) {
  67			skb = htt->rx_ring.netbufs_ring[i];
  68			if (!skb)
  69				continue;
  70
  71			rxcb = ATH10K_SKB_RXCB(skb);
  72			dma_unmap_single(htt->ar->dev, rxcb->paddr,
  73					 skb->len + skb_tailroom(skb),
  74					 DMA_FROM_DEVICE);
  75			dev_kfree_skb_any(skb);
  76		}
  77	}
  78
  79	htt->rx_ring.fill_cnt = 0;
  80	hash_init(htt->rx_ring.skb_table);
  81	memset(htt->rx_ring.netbufs_ring, 0,
  82	       htt->rx_ring.size * sizeof(htt->rx_ring.netbufs_ring[0]));
  83}
  84
  85static size_t ath10k_htt_get_rx_ring_size_32(struct ath10k_htt *htt)
  86{
  87	return htt->rx_ring.size * sizeof(htt->rx_ring.paddrs_ring_32);
  88}
  89
  90static size_t ath10k_htt_get_rx_ring_size_64(struct ath10k_htt *htt)
  91{
  92	return htt->rx_ring.size * sizeof(htt->rx_ring.paddrs_ring_64);
  93}
  94
  95static void ath10k_htt_config_paddrs_ring_32(struct ath10k_htt *htt,
  96					     void *vaddr)
  97{
  98	htt->rx_ring.paddrs_ring_32 = vaddr;
  99}
 100
 101static void ath10k_htt_config_paddrs_ring_64(struct ath10k_htt *htt,
 102					     void *vaddr)
 103{
 104	htt->rx_ring.paddrs_ring_64 = vaddr;
 105}
 106
 107static void ath10k_htt_set_paddrs_ring_32(struct ath10k_htt *htt,
 108					  dma_addr_t paddr, int idx)
 109{
 110	htt->rx_ring.paddrs_ring_32[idx] = __cpu_to_le32(paddr);
 111}
 112
 113static void ath10k_htt_set_paddrs_ring_64(struct ath10k_htt *htt,
 114					  dma_addr_t paddr, int idx)
 115{
 116	htt->rx_ring.paddrs_ring_64[idx] = __cpu_to_le64(paddr);
 117}
 118
 119static void ath10k_htt_reset_paddrs_ring_32(struct ath10k_htt *htt, int idx)
 120{
 121	htt->rx_ring.paddrs_ring_32[idx] = 0;
 122}
 123
 124static void ath10k_htt_reset_paddrs_ring_64(struct ath10k_htt *htt, int idx)
 125{
 126	htt->rx_ring.paddrs_ring_64[idx] = 0;
 127}
 128
 129static void *ath10k_htt_get_vaddr_ring_32(struct ath10k_htt *htt)
 130{
 131	return (void *)htt->rx_ring.paddrs_ring_32;
 132}
 133
 134static void *ath10k_htt_get_vaddr_ring_64(struct ath10k_htt *htt)
 135{
 136	return (void *)htt->rx_ring.paddrs_ring_64;
 137}
 138
 139static int __ath10k_htt_rx_ring_fill_n(struct ath10k_htt *htt, int num)
 140{
 141	struct htt_rx_desc *rx_desc;
 142	struct ath10k_skb_rxcb *rxcb;
 143	struct sk_buff *skb;
 144	dma_addr_t paddr;
 145	int ret = 0, idx;
 146
 147	/* The Full Rx Reorder firmware has no way of telling the host
 148	 * implicitly when it copied HTT Rx Ring buffers to MAC Rx Ring.
 149	 * To keep things simple make sure ring is always half empty. This
 150	 * guarantees there'll be no replenishment overruns possible.
 151	 */
 152	BUILD_BUG_ON(HTT_RX_RING_FILL_LEVEL >= HTT_RX_RING_SIZE / 2);
 153
 154	idx = __le32_to_cpu(*htt->rx_ring.alloc_idx.vaddr);
 155	while (num > 0) {
 156		skb = dev_alloc_skb(HTT_RX_BUF_SIZE + HTT_RX_DESC_ALIGN);
 157		if (!skb) {
 158			ret = -ENOMEM;
 159			goto fail;
 160		}
 161
 162		if (!IS_ALIGNED((unsigned long)skb->data, HTT_RX_DESC_ALIGN))
 163			skb_pull(skb,
 164				 PTR_ALIGN(skb->data, HTT_RX_DESC_ALIGN) -
 165				 skb->data);
 166
 167		/* Clear rx_desc attention word before posting to Rx ring */
 168		rx_desc = (struct htt_rx_desc *)skb->data;
 169		rx_desc->attention.flags = __cpu_to_le32(0);
 170
 171		paddr = dma_map_single(htt->ar->dev, skb->data,
 172				       skb->len + skb_tailroom(skb),
 173				       DMA_FROM_DEVICE);
 174
 175		if (unlikely(dma_mapping_error(htt->ar->dev, paddr))) {
 176			dev_kfree_skb_any(skb);
 177			ret = -ENOMEM;
 178			goto fail;
 179		}
 180
 181		rxcb = ATH10K_SKB_RXCB(skb);
 182		rxcb->paddr = paddr;
 183		htt->rx_ring.netbufs_ring[idx] = skb;
 184		htt->rx_ops->htt_set_paddrs_ring(htt, paddr, idx);
 185		htt->rx_ring.fill_cnt++;
 186
 187		if (htt->rx_ring.in_ord_rx) {
 188			hash_add(htt->rx_ring.skb_table,
 189				 &ATH10K_SKB_RXCB(skb)->hlist,
 190				 paddr);
 191		}
 192
 193		num--;
 194		idx++;
 195		idx &= htt->rx_ring.size_mask;
 196	}
 197
 198fail:
 199	/*
 200	 * Make sure the rx buffer is updated before available buffer
 201	 * index to avoid any potential rx ring corruption.
 202	 */
 203	mb();
 204	*htt->rx_ring.alloc_idx.vaddr = __cpu_to_le32(idx);
 205	return ret;
 206}
 207
 208static int ath10k_htt_rx_ring_fill_n(struct ath10k_htt *htt, int num)
 209{
 210	lockdep_assert_held(&htt->rx_ring.lock);
 211	return __ath10k_htt_rx_ring_fill_n(htt, num);
 212}
 213
 214static void ath10k_htt_rx_msdu_buff_replenish(struct ath10k_htt *htt)
 215{
 216	int ret, num_deficit, num_to_fill;
 217
 218	/* Refilling the whole RX ring buffer proves to be a bad idea. The
 219	 * reason is RX may take up significant amount of CPU cycles and starve
 220	 * other tasks, e.g. TX on an ethernet device while acting as a bridge
 221	 * with ath10k wlan interface. This ended up with very poor performance
 222	 * once CPU the host system was overwhelmed with RX on ath10k.
 223	 *
 224	 * By limiting the number of refills the replenishing occurs
 225	 * progressively. This in turns makes use of the fact tasklets are
 226	 * processed in FIFO order. This means actual RX processing can starve
 227	 * out refilling. If there's not enough buffers on RX ring FW will not
 228	 * report RX until it is refilled with enough buffers. This
 229	 * automatically balances load wrt to CPU power.
 230	 *
 231	 * This probably comes at a cost of lower maximum throughput but
 232	 * improves the average and stability.
 233	 */
 234	spin_lock_bh(&htt->rx_ring.lock);
 235	num_deficit = htt->rx_ring.fill_level - htt->rx_ring.fill_cnt;
 236	num_to_fill = min(ATH10K_HTT_MAX_NUM_REFILL, num_deficit);
 237	num_deficit -= num_to_fill;
 238	ret = ath10k_htt_rx_ring_fill_n(htt, num_to_fill);
 239	if (ret == -ENOMEM) {
 240		/*
 241		 * Failed to fill it to the desired level -
 242		 * we'll start a timer and try again next time.
 243		 * As long as enough buffers are left in the ring for
 244		 * another A-MPDU rx, no special recovery is needed.
 245		 */
 246		mod_timer(&htt->rx_ring.refill_retry_timer, jiffies +
 247			  msecs_to_jiffies(HTT_RX_RING_REFILL_RETRY_MS));
 248	} else if (num_deficit > 0) {
 249		mod_timer(&htt->rx_ring.refill_retry_timer, jiffies +
 250			  msecs_to_jiffies(HTT_RX_RING_REFILL_RESCHED_MS));
 251	}
 252	spin_unlock_bh(&htt->rx_ring.lock);
 253}
 254
 255static void ath10k_htt_rx_ring_refill_retry(struct timer_list *t)
 256{
 257	struct ath10k_htt *htt = from_timer(htt, t, rx_ring.refill_retry_timer);
 258
 259	ath10k_htt_rx_msdu_buff_replenish(htt);
 260}
 261
 262int ath10k_htt_rx_ring_refill(struct ath10k *ar)
 263{
 264	struct ath10k_htt *htt = &ar->htt;
 265	int ret;
 266
 267	spin_lock_bh(&htt->rx_ring.lock);
 268	ret = ath10k_htt_rx_ring_fill_n(htt, (htt->rx_ring.fill_level -
 269					      htt->rx_ring.fill_cnt));
 270	spin_unlock_bh(&htt->rx_ring.lock);
 271
 272	if (ret)
 273		ath10k_htt_rx_ring_free(htt);
 274
 275	return ret;
 276}
 277
 278void ath10k_htt_rx_free(struct ath10k_htt *htt)
 279{
 280	del_timer_sync(&htt->rx_ring.refill_retry_timer);
 281
 282	skb_queue_purge(&htt->rx_msdus_q);
 283	skb_queue_purge(&htt->rx_in_ord_compl_q);
 284	skb_queue_purge(&htt->tx_fetch_ind_q);
 285
 286	ath10k_htt_rx_ring_free(htt);
 287
 288	dma_free_coherent(htt->ar->dev,
 289			  htt->rx_ops->htt_get_rx_ring_size(htt),
 290			  htt->rx_ops->htt_get_vaddr_ring(htt),
 291			  htt->rx_ring.base_paddr);
 292
 293	dma_free_coherent(htt->ar->dev,
 294			  sizeof(*htt->rx_ring.alloc_idx.vaddr),
 295			  htt->rx_ring.alloc_idx.vaddr,
 296			  htt->rx_ring.alloc_idx.paddr);
 297
 298	kfree(htt->rx_ring.netbufs_ring);
 299}
 300
 301static inline struct sk_buff *ath10k_htt_rx_netbuf_pop(struct ath10k_htt *htt)
 302{
 303	struct ath10k *ar = htt->ar;
 304	int idx;
 305	struct sk_buff *msdu;
 306
 307	lockdep_assert_held(&htt->rx_ring.lock);
 308
 309	if (htt->rx_ring.fill_cnt == 0) {
 310		ath10k_warn(ar, "tried to pop sk_buff from an empty rx ring\n");
 311		return NULL;
 312	}
 313
 314	idx = htt->rx_ring.sw_rd_idx.msdu_payld;
 315	msdu = htt->rx_ring.netbufs_ring[idx];
 316	htt->rx_ring.netbufs_ring[idx] = NULL;
 317	htt->rx_ops->htt_reset_paddrs_ring(htt, idx);
 318
 319	idx++;
 320	idx &= htt->rx_ring.size_mask;
 321	htt->rx_ring.sw_rd_idx.msdu_payld = idx;
 322	htt->rx_ring.fill_cnt--;
 323
 324	dma_unmap_single(htt->ar->dev,
 325			 ATH10K_SKB_RXCB(msdu)->paddr,
 326			 msdu->len + skb_tailroom(msdu),
 327			 DMA_FROM_DEVICE);
 328	ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "htt rx netbuf pop: ",
 329			msdu->data, msdu->len + skb_tailroom(msdu));
 330
 331	return msdu;
 332}
 333
 334/* return: < 0 fatal error, 0 - non chained msdu, 1 chained msdu */
 335static int ath10k_htt_rx_amsdu_pop(struct ath10k_htt *htt,
 336				   struct sk_buff_head *amsdu)
 337{
 338	struct ath10k *ar = htt->ar;
 339	int msdu_len, msdu_chaining = 0;
 340	struct sk_buff *msdu;
 341	struct htt_rx_desc *rx_desc;
 342
 343	lockdep_assert_held(&htt->rx_ring.lock);
 344
 345	for (;;) {
 346		int last_msdu, msdu_len_invalid, msdu_chained;
 347
 348		msdu = ath10k_htt_rx_netbuf_pop(htt);
 349		if (!msdu) {
 350			__skb_queue_purge(amsdu);
 351			return -ENOENT;
 352		}
 353
 354		__skb_queue_tail(amsdu, msdu);
 355
 356		rx_desc = (struct htt_rx_desc *)msdu->data;
 357
 358		/* FIXME: we must report msdu payload since this is what caller
 359		 * expects now
 360		 */
 361		skb_put(msdu, offsetof(struct htt_rx_desc, msdu_payload));
 362		skb_pull(msdu, offsetof(struct htt_rx_desc, msdu_payload));
 363
 364		/*
 365		 * Sanity check - confirm the HW is finished filling in the
 366		 * rx data.
 367		 * If the HW and SW are working correctly, then it's guaranteed
 368		 * that the HW's MAC DMA is done before this point in the SW.
 369		 * To prevent the case that we handle a stale Rx descriptor,
 370		 * just assert for now until we have a way to recover.
 371		 */
 372		if (!(__le32_to_cpu(rx_desc->attention.flags)
 373				& RX_ATTENTION_FLAGS_MSDU_DONE)) {
 374			__skb_queue_purge(amsdu);
 375			return -EIO;
 376		}
 377
 378		msdu_len_invalid = !!(__le32_to_cpu(rx_desc->attention.flags)
 379					& (RX_ATTENTION_FLAGS_MPDU_LENGTH_ERR |
 380					   RX_ATTENTION_FLAGS_MSDU_LENGTH_ERR));
 381		msdu_len = MS(__le32_to_cpu(rx_desc->msdu_start.common.info0),
 382			      RX_MSDU_START_INFO0_MSDU_LENGTH);
 383		msdu_chained = rx_desc->frag_info.ring2_more_count;
 384
 385		if (msdu_len_invalid)
 386			msdu_len = 0;
 387
 388		skb_trim(msdu, 0);
 389		skb_put(msdu, min(msdu_len, HTT_RX_MSDU_SIZE));
 390		msdu_len -= msdu->len;
 391
 392		/* Note: Chained buffers do not contain rx descriptor */
 393		while (msdu_chained--) {
 394			msdu = ath10k_htt_rx_netbuf_pop(htt);
 395			if (!msdu) {
 396				__skb_queue_purge(amsdu);
 397				return -ENOENT;
 398			}
 399
 400			__skb_queue_tail(amsdu, msdu);
 401			skb_trim(msdu, 0);
 402			skb_put(msdu, min(msdu_len, HTT_RX_BUF_SIZE));
 403			msdu_len -= msdu->len;
 404			msdu_chaining = 1;
 405		}
 406
 407		last_msdu = __le32_to_cpu(rx_desc->msdu_end.common.info0) &
 408				RX_MSDU_END_INFO0_LAST_MSDU;
 409
 410		trace_ath10k_htt_rx_desc(ar, &rx_desc->attention,
 411					 sizeof(*rx_desc) - sizeof(u32));
 412
 413		if (last_msdu)
 414			break;
 415	}
 416
 417	if (skb_queue_empty(amsdu))
 418		msdu_chaining = -1;
 419
 420	/*
 421	 * Don't refill the ring yet.
 422	 *
 423	 * First, the elements popped here are still in use - it is not
 424	 * safe to overwrite them until the matching call to
 425	 * mpdu_desc_list_next. Second, for efficiency it is preferable to
 426	 * refill the rx ring with 1 PPDU's worth of rx buffers (something
 427	 * like 32 x 3 buffers), rather than one MPDU's worth of rx buffers
 428	 * (something like 3 buffers). Consequently, we'll rely on the txrx
 429	 * SW to tell us when it is done pulling all the PPDU's rx buffers
 430	 * out of the rx ring, and then refill it just once.
 431	 */
 432
 433	return msdu_chaining;
 434}
 435
 436static struct sk_buff *ath10k_htt_rx_pop_paddr(struct ath10k_htt *htt,
 437					       u64 paddr)
 438{
 439	struct ath10k *ar = htt->ar;
 440	struct ath10k_skb_rxcb *rxcb;
 441	struct sk_buff *msdu;
 442
 443	lockdep_assert_held(&htt->rx_ring.lock);
 444
 445	msdu = ath10k_htt_rx_find_skb_paddr(ar, paddr);
 446	if (!msdu)
 447		return NULL;
 448
 449	rxcb = ATH10K_SKB_RXCB(msdu);
 450	hash_del(&rxcb->hlist);
 451	htt->rx_ring.fill_cnt--;
 452
 453	dma_unmap_single(htt->ar->dev, rxcb->paddr,
 454			 msdu->len + skb_tailroom(msdu),
 455			 DMA_FROM_DEVICE);
 456	ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "htt rx netbuf pop: ",
 457			msdu->data, msdu->len + skb_tailroom(msdu));
 458
 459	return msdu;
 460}
 461
 462static int ath10k_htt_rx_pop_paddr32_list(struct ath10k_htt *htt,
 463					  struct htt_rx_in_ord_ind *ev,
 464					  struct sk_buff_head *list)
 465{
 466	struct ath10k *ar = htt->ar;
 467	struct htt_rx_in_ord_msdu_desc *msdu_desc = ev->msdu_descs32;
 468	struct htt_rx_desc *rxd;
 469	struct sk_buff *msdu;
 470	int msdu_count;
 471	bool is_offload;
 472	u32 paddr;
 473
 474	lockdep_assert_held(&htt->rx_ring.lock);
 475
 476	msdu_count = __le16_to_cpu(ev->msdu_count);
 477	is_offload = !!(ev->info & HTT_RX_IN_ORD_IND_INFO_OFFLOAD_MASK);
 478
 479	while (msdu_count--) {
 480		paddr = __le32_to_cpu(msdu_desc->msdu_paddr);
 481
 482		msdu = ath10k_htt_rx_pop_paddr(htt, paddr);
 483		if (!msdu) {
 484			__skb_queue_purge(list);
 485			return -ENOENT;
 486		}
 487
 488		__skb_queue_tail(list, msdu);
 489
 490		if (!is_offload) {
 491			rxd = (void *)msdu->data;
 492
 493			trace_ath10k_htt_rx_desc(ar, rxd, sizeof(*rxd));
 494
 495			skb_put(msdu, sizeof(*rxd));
 496			skb_pull(msdu, sizeof(*rxd));
 497			skb_put(msdu, __le16_to_cpu(msdu_desc->msdu_len));
 498
 499			if (!(__le32_to_cpu(rxd->attention.flags) &
 500			      RX_ATTENTION_FLAGS_MSDU_DONE)) {
 501				ath10k_warn(htt->ar, "tried to pop an incomplete frame, oops!\n");
 502				return -EIO;
 503			}
 504		}
 505
 506		msdu_desc++;
 507	}
 508
 509	return 0;
 510}
 511
 512static int ath10k_htt_rx_pop_paddr64_list(struct ath10k_htt *htt,
 513					  struct htt_rx_in_ord_ind *ev,
 514					  struct sk_buff_head *list)
 515{
 516	struct ath10k *ar = htt->ar;
 517	struct htt_rx_in_ord_msdu_desc_ext *msdu_desc = ev->msdu_descs64;
 518	struct htt_rx_desc *rxd;
 519	struct sk_buff *msdu;
 520	int msdu_count;
 521	bool is_offload;
 522	u64 paddr;
 523
 524	lockdep_assert_held(&htt->rx_ring.lock);
 525
 526	msdu_count = __le16_to_cpu(ev->msdu_count);
 527	is_offload = !!(ev->info & HTT_RX_IN_ORD_IND_INFO_OFFLOAD_MASK);
 528
 529	while (msdu_count--) {
 530		paddr = __le64_to_cpu(msdu_desc->msdu_paddr);
 531		msdu = ath10k_htt_rx_pop_paddr(htt, paddr);
 532		if (!msdu) {
 533			__skb_queue_purge(list);
 534			return -ENOENT;
 535		}
 536
 537		__skb_queue_tail(list, msdu);
 538
 539		if (!is_offload) {
 540			rxd = (void *)msdu->data;
 541
 542			trace_ath10k_htt_rx_desc(ar, rxd, sizeof(*rxd));
 543
 544			skb_put(msdu, sizeof(*rxd));
 545			skb_pull(msdu, sizeof(*rxd));
 546			skb_put(msdu, __le16_to_cpu(msdu_desc->msdu_len));
 547
 548			if (!(__le32_to_cpu(rxd->attention.flags) &
 549			      RX_ATTENTION_FLAGS_MSDU_DONE)) {
 550				ath10k_warn(htt->ar, "tried to pop an incomplete frame, oops!\n");
 551				return -EIO;
 552			}
 553		}
 554
 555		msdu_desc++;
 556	}
 557
 558	return 0;
 559}
 560
 561int ath10k_htt_rx_alloc(struct ath10k_htt *htt)
 562{
 563	struct ath10k *ar = htt->ar;
 564	dma_addr_t paddr;
 565	void *vaddr, *vaddr_ring;
 566	size_t size;
 567	struct timer_list *timer = &htt->rx_ring.refill_retry_timer;
 568
 569	htt->rx_confused = false;
 570
 571	/* XXX: The fill level could be changed during runtime in response to
 572	 * the host processing latency. Is this really worth it?
 573	 */
 574	htt->rx_ring.size = HTT_RX_RING_SIZE;
 575	htt->rx_ring.size_mask = htt->rx_ring.size - 1;
 576	htt->rx_ring.fill_level = ar->hw_params.rx_ring_fill_level;
 577
 578	if (!is_power_of_2(htt->rx_ring.size)) {
 579		ath10k_warn(ar, "htt rx ring size is not power of 2\n");
 580		return -EINVAL;
 581	}
 582
 583	htt->rx_ring.netbufs_ring =
 584		kzalloc(htt->rx_ring.size * sizeof(struct sk_buff *),
 585			GFP_KERNEL);
 586	if (!htt->rx_ring.netbufs_ring)
 587		goto err_netbuf;
 588
 589	size = htt->rx_ops->htt_get_rx_ring_size(htt);
 590
 591	vaddr_ring = dma_alloc_coherent(htt->ar->dev, size, &paddr, GFP_KERNEL);
 592	if (!vaddr_ring)
 593		goto err_dma_ring;
 594
 595	htt->rx_ops->htt_config_paddrs_ring(htt, vaddr_ring);
 596	htt->rx_ring.base_paddr = paddr;
 597
 598	vaddr = dma_alloc_coherent(htt->ar->dev,
 599				   sizeof(*htt->rx_ring.alloc_idx.vaddr),
 600				   &paddr, GFP_KERNEL);
 601	if (!vaddr)
 602		goto err_dma_idx;
 603
 604	htt->rx_ring.alloc_idx.vaddr = vaddr;
 605	htt->rx_ring.alloc_idx.paddr = paddr;
 606	htt->rx_ring.sw_rd_idx.msdu_payld = htt->rx_ring.size_mask;
 607	*htt->rx_ring.alloc_idx.vaddr = 0;
 608
 609	/* Initialize the Rx refill retry timer */
 610	timer_setup(timer, ath10k_htt_rx_ring_refill_retry, 0);
 611
 612	spin_lock_init(&htt->rx_ring.lock);
 613
 614	htt->rx_ring.fill_cnt = 0;
 615	htt->rx_ring.sw_rd_idx.msdu_payld = 0;
 616	hash_init(htt->rx_ring.skb_table);
 617
 618	skb_queue_head_init(&htt->rx_msdus_q);
 619	skb_queue_head_init(&htt->rx_in_ord_compl_q);
 620	skb_queue_head_init(&htt->tx_fetch_ind_q);
 621	atomic_set(&htt->num_mpdus_ready, 0);
 622
 623	ath10k_dbg(ar, ATH10K_DBG_BOOT, "htt rx ring size %d fill_level %d\n",
 624		   htt->rx_ring.size, htt->rx_ring.fill_level);
 625	return 0;
 626
 627err_dma_idx:
 628	dma_free_coherent(htt->ar->dev,
 629			  htt->rx_ops->htt_get_rx_ring_size(htt),
 630			  vaddr_ring,
 631			  htt->rx_ring.base_paddr);
 632err_dma_ring:
 633	kfree(htt->rx_ring.netbufs_ring);
 634err_netbuf:
 635	return -ENOMEM;
 636}
 637
 638static int ath10k_htt_rx_crypto_param_len(struct ath10k *ar,
 639					  enum htt_rx_mpdu_encrypt_type type)
 640{
 641	switch (type) {
 642	case HTT_RX_MPDU_ENCRYPT_NONE:
 643		return 0;
 644	case HTT_RX_MPDU_ENCRYPT_WEP40:
 645	case HTT_RX_MPDU_ENCRYPT_WEP104:
 646		return IEEE80211_WEP_IV_LEN;
 647	case HTT_RX_MPDU_ENCRYPT_TKIP_WITHOUT_MIC:
 648	case HTT_RX_MPDU_ENCRYPT_TKIP_WPA:
 649		return IEEE80211_TKIP_IV_LEN;
 650	case HTT_RX_MPDU_ENCRYPT_AES_CCM_WPA2:
 651		return IEEE80211_CCMP_HDR_LEN;
 652	case HTT_RX_MPDU_ENCRYPT_AES_CCM256_WPA2:
 653		return IEEE80211_CCMP_256_HDR_LEN;
 654	case HTT_RX_MPDU_ENCRYPT_AES_GCMP_WPA2:
 655	case HTT_RX_MPDU_ENCRYPT_AES_GCMP256_WPA2:
 656		return IEEE80211_GCMP_HDR_LEN;
 657	case HTT_RX_MPDU_ENCRYPT_WEP128:
 658	case HTT_RX_MPDU_ENCRYPT_WAPI:
 659		break;
 660	}
 661
 662	ath10k_warn(ar, "unsupported encryption type %d\n", type);
 663	return 0;
 664}
 665
 666#define MICHAEL_MIC_LEN 8
 667
 668static int ath10k_htt_rx_crypto_mic_len(struct ath10k *ar,
 669					enum htt_rx_mpdu_encrypt_type type)
 670{
 671	switch (type) {
 672	case HTT_RX_MPDU_ENCRYPT_NONE:
 673	case HTT_RX_MPDU_ENCRYPT_WEP40:
 674	case HTT_RX_MPDU_ENCRYPT_WEP104:
 675	case HTT_RX_MPDU_ENCRYPT_TKIP_WITHOUT_MIC:
 676	case HTT_RX_MPDU_ENCRYPT_TKIP_WPA:
 677		return 0;
 678	case HTT_RX_MPDU_ENCRYPT_AES_CCM_WPA2:
 679		return IEEE80211_CCMP_MIC_LEN;
 680	case HTT_RX_MPDU_ENCRYPT_AES_CCM256_WPA2:
 681		return IEEE80211_CCMP_256_MIC_LEN;
 682	case HTT_RX_MPDU_ENCRYPT_AES_GCMP_WPA2:
 683	case HTT_RX_MPDU_ENCRYPT_AES_GCMP256_WPA2:
 684		return IEEE80211_GCMP_MIC_LEN;
 685	case HTT_RX_MPDU_ENCRYPT_WEP128:
 686	case HTT_RX_MPDU_ENCRYPT_WAPI:
 687		break;
 688	}
 689
 690	ath10k_warn(ar, "unsupported encryption type %d\n", type);
 691	return 0;
 692}
 693
 694static int ath10k_htt_rx_crypto_icv_len(struct ath10k *ar,
 695					enum htt_rx_mpdu_encrypt_type type)
 696{
 697	switch (type) {
 698	case HTT_RX_MPDU_ENCRYPT_NONE:
 699	case HTT_RX_MPDU_ENCRYPT_AES_CCM_WPA2:
 700	case HTT_RX_MPDU_ENCRYPT_AES_CCM256_WPA2:
 701	case HTT_RX_MPDU_ENCRYPT_AES_GCMP_WPA2:
 702	case HTT_RX_MPDU_ENCRYPT_AES_GCMP256_WPA2:
 703		return 0;
 704	case HTT_RX_MPDU_ENCRYPT_WEP40:
 705	case HTT_RX_MPDU_ENCRYPT_WEP104:
 706		return IEEE80211_WEP_ICV_LEN;
 707	case HTT_RX_MPDU_ENCRYPT_TKIP_WITHOUT_MIC:
 708	case HTT_RX_MPDU_ENCRYPT_TKIP_WPA:
 709		return IEEE80211_TKIP_ICV_LEN;
 710	case HTT_RX_MPDU_ENCRYPT_WEP128:
 711	case HTT_RX_MPDU_ENCRYPT_WAPI:
 712		break;
 713	}
 714
 715	ath10k_warn(ar, "unsupported encryption type %d\n", type);
 716	return 0;
 717}
 718
 719struct amsdu_subframe_hdr {
 720	u8 dst[ETH_ALEN];
 721	u8 src[ETH_ALEN];
 722	__be16 len;
 723} __packed;
 724
 725#define GROUP_ID_IS_SU_MIMO(x) ((x) == 0 || (x) == 63)
 726
 727static inline u8 ath10k_bw_to_mac80211_bw(u8 bw)
 728{
 729	u8 ret = 0;
 730
 731	switch (bw) {
 732	case 0:
 733		ret = RATE_INFO_BW_20;
 734		break;
 735	case 1:
 736		ret = RATE_INFO_BW_40;
 737		break;
 738	case 2:
 739		ret = RATE_INFO_BW_80;
 740		break;
 741	case 3:
 742		ret = RATE_INFO_BW_160;
 743		break;
 744	}
 745
 746	return ret;
 747}
 748
 749static void ath10k_htt_rx_h_rates(struct ath10k *ar,
 750				  struct ieee80211_rx_status *status,
 751				  struct htt_rx_desc *rxd)
 752{
 753	struct ieee80211_supported_band *sband;
 754	u8 cck, rate, bw, sgi, mcs, nss;
 755	u8 preamble = 0;
 756	u8 group_id;
 757	u32 info1, info2, info3;
 758
 759	info1 = __le32_to_cpu(rxd->ppdu_start.info1);
 760	info2 = __le32_to_cpu(rxd->ppdu_start.info2);
 761	info3 = __le32_to_cpu(rxd->ppdu_start.info3);
 762
 763	preamble = MS(info1, RX_PPDU_START_INFO1_PREAMBLE_TYPE);
 764
 765	switch (preamble) {
 766	case HTT_RX_LEGACY:
 767		/* To get legacy rate index band is required. Since band can't
 768		 * be undefined check if freq is non-zero.
 769		 */
 770		if (!status->freq)
 771			return;
 772
 773		cck = info1 & RX_PPDU_START_INFO1_L_SIG_RATE_SELECT;
 774		rate = MS(info1, RX_PPDU_START_INFO1_L_SIG_RATE);
 775		rate &= ~RX_PPDU_START_RATE_FLAG;
 776
 777		sband = &ar->mac.sbands[status->band];
 778		status->rate_idx = ath10k_mac_hw_rate_to_idx(sband, rate, cck);
 779		break;
 780	case HTT_RX_HT:
 781	case HTT_RX_HT_WITH_TXBF:
 782		/* HT-SIG - Table 20-11 in info2 and info3 */
 783		mcs = info2 & 0x1F;
 784		nss = mcs >> 3;
 785		bw = (info2 >> 7) & 1;
 786		sgi = (info3 >> 7) & 1;
 787
 788		status->rate_idx = mcs;
 789		status->encoding = RX_ENC_HT;
 790		if (sgi)
 791			status->enc_flags |= RX_ENC_FLAG_SHORT_GI;
 792		if (bw)
 793			status->bw = RATE_INFO_BW_40;
 794		break;
 795	case HTT_RX_VHT:
 796	case HTT_RX_VHT_WITH_TXBF:
 797		/* VHT-SIG-A1 in info2, VHT-SIG-A2 in info3
 798		 * TODO check this
 799		 */
 800		bw = info2 & 3;
 801		sgi = info3 & 1;
 802		group_id = (info2 >> 4) & 0x3F;
 803
 804		if (GROUP_ID_IS_SU_MIMO(group_id)) {
 805			mcs = (info3 >> 4) & 0x0F;
 806			nss = ((info2 >> 10) & 0x07) + 1;
 807		} else {
 808			/* Hardware doesn't decode VHT-SIG-B into Rx descriptor
 809			 * so it's impossible to decode MCS. Also since
 810			 * firmware consumes Group Id Management frames host
 811			 * has no knowledge regarding group/user position
 812			 * mapping so it's impossible to pick the correct Nsts
 813			 * from VHT-SIG-A1.
 814			 *
 815			 * Bandwidth and SGI are valid so report the rateinfo
 816			 * on best-effort basis.
 817			 */
 818			mcs = 0;
 819			nss = 1;
 820		}
 821
 822		if (mcs > 0x09) {
 823			ath10k_warn(ar, "invalid MCS received %u\n", mcs);
 824			ath10k_warn(ar, "rxd %08x mpdu start %08x %08x msdu start %08x %08x ppdu start %08x %08x %08x %08x %08x\n",
 825				    __le32_to_cpu(rxd->attention.flags),
 826				    __le32_to_cpu(rxd->mpdu_start.info0),
 827				    __le32_to_cpu(rxd->mpdu_start.info1),
 828				    __le32_to_cpu(rxd->msdu_start.common.info0),
 829				    __le32_to_cpu(rxd->msdu_start.common.info1),
 830				    rxd->ppdu_start.info0,
 831				    __le32_to_cpu(rxd->ppdu_start.info1),
 832				    __le32_to_cpu(rxd->ppdu_start.info2),
 833				    __le32_to_cpu(rxd->ppdu_start.info3),
 834				    __le32_to_cpu(rxd->ppdu_start.info4));
 835
 836			ath10k_warn(ar, "msdu end %08x mpdu end %08x\n",
 837				    __le32_to_cpu(rxd->msdu_end.common.info0),
 838				    __le32_to_cpu(rxd->mpdu_end.info0));
 839
 840			ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL,
 841					"rx desc msdu payload: ",
 842					rxd->msdu_payload, 50);
 843		}
 844
 845		status->rate_idx = mcs;
 846		status->nss = nss;
 847
 848		if (sgi)
 849			status->enc_flags |= RX_ENC_FLAG_SHORT_GI;
 850
 851		status->bw = ath10k_bw_to_mac80211_bw(bw);
 852		status->encoding = RX_ENC_VHT;
 853		break;
 854	default:
 855		break;
 856	}
 857}
 858
 859static struct ieee80211_channel *
 860ath10k_htt_rx_h_peer_channel(struct ath10k *ar, struct htt_rx_desc *rxd)
 861{
 862	struct ath10k_peer *peer;
 863	struct ath10k_vif *arvif;
 864	struct cfg80211_chan_def def;
 865	u16 peer_id;
 866
 867	lockdep_assert_held(&ar->data_lock);
 868
 869	if (!rxd)
 870		return NULL;
 871
 872	if (rxd->attention.flags &
 873	    __cpu_to_le32(RX_ATTENTION_FLAGS_PEER_IDX_INVALID))
 874		return NULL;
 875
 876	if (!(rxd->msdu_end.common.info0 &
 877	      __cpu_to_le32(RX_MSDU_END_INFO0_FIRST_MSDU)))
 878		return NULL;
 879
 880	peer_id = MS(__le32_to_cpu(rxd->mpdu_start.info0),
 881		     RX_MPDU_START_INFO0_PEER_IDX);
 882
 883	peer = ath10k_peer_find_by_id(ar, peer_id);
 884	if (!peer)
 885		return NULL;
 886
 887	arvif = ath10k_get_arvif(ar, peer->vdev_id);
 888	if (WARN_ON_ONCE(!arvif))
 889		return NULL;
 890
 891	if (ath10k_mac_vif_chan(arvif->vif, &def))
 892		return NULL;
 893
 894	return def.chan;
 895}
 896
 897static struct ieee80211_channel *
 898ath10k_htt_rx_h_vdev_channel(struct ath10k *ar, u32 vdev_id)
 899{
 900	struct ath10k_vif *arvif;
 901	struct cfg80211_chan_def def;
 902
 903	lockdep_assert_held(&ar->data_lock);
 904
 905	list_for_each_entry(arvif, &ar->arvifs, list) {
 906		if (arvif->vdev_id == vdev_id &&
 907		    ath10k_mac_vif_chan(arvif->vif, &def) == 0)
 908			return def.chan;
 909	}
 910
 911	return NULL;
 912}
 913
 914static void
 915ath10k_htt_rx_h_any_chan_iter(struct ieee80211_hw *hw,
 916			      struct ieee80211_chanctx_conf *conf,
 917			      void *data)
 918{
 919	struct cfg80211_chan_def *def = data;
 920
 921	*def = conf->def;
 922}
 923
 924static struct ieee80211_channel *
 925ath10k_htt_rx_h_any_channel(struct ath10k *ar)
 926{
 927	struct cfg80211_chan_def def = {};
 928
 929	ieee80211_iter_chan_contexts_atomic(ar->hw,
 930					    ath10k_htt_rx_h_any_chan_iter,
 931					    &def);
 932
 933	return def.chan;
 934}
 935
 936static bool ath10k_htt_rx_h_channel(struct ath10k *ar,
 937				    struct ieee80211_rx_status *status,
 938				    struct htt_rx_desc *rxd,
 939				    u32 vdev_id)
 940{
 941	struct ieee80211_channel *ch;
 942
 943	spin_lock_bh(&ar->data_lock);
 944	ch = ar->scan_channel;
 945	if (!ch)
 946		ch = ar->rx_channel;
 947	if (!ch)
 948		ch = ath10k_htt_rx_h_peer_channel(ar, rxd);
 949	if (!ch)
 950		ch = ath10k_htt_rx_h_vdev_channel(ar, vdev_id);
 951	if (!ch)
 952		ch = ath10k_htt_rx_h_any_channel(ar);
 953	if (!ch)
 954		ch = ar->tgt_oper_chan;
 955	spin_unlock_bh(&ar->data_lock);
 956
 957	if (!ch)
 958		return false;
 959
 960	status->band = ch->band;
 961	status->freq = ch->center_freq;
 962
 963	return true;
 964}
 965
 966static void ath10k_htt_rx_h_signal(struct ath10k *ar,
 967				   struct ieee80211_rx_status *status,
 968				   struct htt_rx_desc *rxd)
 969{
 970	int i;
 971
 972	for (i = 0; i < IEEE80211_MAX_CHAINS ; i++) {
 973		status->chains &= ~BIT(i);
 974
 975		if (rxd->ppdu_start.rssi_chains[i].pri20_mhz != 0x80) {
 976			status->chain_signal[i] = ATH10K_DEFAULT_NOISE_FLOOR +
 977				rxd->ppdu_start.rssi_chains[i].pri20_mhz;
 978
 979			status->chains |= BIT(i);
 980		}
 981	}
 982
 983	/* FIXME: Get real NF */
 984	status->signal = ATH10K_DEFAULT_NOISE_FLOOR +
 985			 rxd->ppdu_start.rssi_comb;
 986	status->flag &= ~RX_FLAG_NO_SIGNAL_VAL;
 987}
 988
 989static void ath10k_htt_rx_h_mactime(struct ath10k *ar,
 990				    struct ieee80211_rx_status *status,
 991				    struct htt_rx_desc *rxd)
 992{
 993	/* FIXME: TSF is known only at the end of PPDU, in the last MPDU. This
 994	 * means all prior MSDUs in a PPDU are reported to mac80211 without the
 995	 * TSF. Is it worth holding frames until end of PPDU is known?
 996	 *
 997	 * FIXME: Can we get/compute 64bit TSF?
 998	 */
 999	status->mactime = __le32_to_cpu(rxd->ppdu_end.common.tsf_timestamp);
1000	status->flag |= RX_FLAG_MACTIME_END;
1001}
1002
1003static void ath10k_htt_rx_h_ppdu(struct ath10k *ar,
1004				 struct sk_buff_head *amsdu,
1005				 struct ieee80211_rx_status *status,
1006				 u32 vdev_id)
1007{
1008	struct sk_buff *first;
1009	struct htt_rx_desc *rxd;
1010	bool is_first_ppdu;
1011	bool is_last_ppdu;
1012
1013	if (skb_queue_empty(amsdu))
1014		return;
1015
1016	first = skb_peek(amsdu);
1017	rxd = (void *)first->data - sizeof(*rxd);
1018
1019	is_first_ppdu = !!(rxd->attention.flags &
1020			   __cpu_to_le32(RX_ATTENTION_FLAGS_FIRST_MPDU));
1021	is_last_ppdu = !!(rxd->attention.flags &
1022			  __cpu_to_le32(RX_ATTENTION_FLAGS_LAST_MPDU));
1023
1024	if (is_first_ppdu) {
1025		/* New PPDU starts so clear out the old per-PPDU status. */
1026		status->freq = 0;
1027		status->rate_idx = 0;
1028		status->nss = 0;
1029		status->encoding = RX_ENC_LEGACY;
1030		status->bw = RATE_INFO_BW_20;
1031
1032		status->flag &= ~RX_FLAG_MACTIME_END;
1033		status->flag |= RX_FLAG_NO_SIGNAL_VAL;
1034
1035		status->flag &= ~(RX_FLAG_AMPDU_IS_LAST);
1036		status->flag |= RX_FLAG_AMPDU_DETAILS | RX_FLAG_AMPDU_LAST_KNOWN;
1037		status->ampdu_reference = ar->ampdu_reference;
1038
1039		ath10k_htt_rx_h_signal(ar, status, rxd);
1040		ath10k_htt_rx_h_channel(ar, status, rxd, vdev_id);
1041		ath10k_htt_rx_h_rates(ar, status, rxd);
1042	}
1043
1044	if (is_last_ppdu) {
1045		ath10k_htt_rx_h_mactime(ar, status, rxd);
1046
1047		/* set ampdu last segment flag */
1048		status->flag |= RX_FLAG_AMPDU_IS_LAST;
1049		ar->ampdu_reference++;
1050	}
1051}
1052
1053static const char * const tid_to_ac[] = {
1054	"BE",
1055	"BK",
1056	"BK",
1057	"BE",
1058	"VI",
1059	"VI",
1060	"VO",
1061	"VO",
1062};
1063
1064static char *ath10k_get_tid(struct ieee80211_hdr *hdr, char *out, size_t size)
1065{
1066	u8 *qc;
1067	int tid;
1068
1069	if (!ieee80211_is_data_qos(hdr->frame_control))
1070		return "";
1071
1072	qc = ieee80211_get_qos_ctl(hdr);
1073	tid = *qc & IEEE80211_QOS_CTL_TID_MASK;
1074	if (tid < 8)
1075		snprintf(out, size, "tid %d (%s)", tid, tid_to_ac[tid]);
1076	else
1077		snprintf(out, size, "tid %d", tid);
1078
1079	return out;
1080}
1081
1082static void ath10k_htt_rx_h_queue_msdu(struct ath10k *ar,
1083				       struct ieee80211_rx_status *rx_status,
1084				       struct sk_buff *skb)
1085{
1086	struct ieee80211_rx_status *status;
1087
1088	status = IEEE80211_SKB_RXCB(skb);
1089	*status = *rx_status;
1090
1091	__skb_queue_tail(&ar->htt.rx_msdus_q, skb);
1092}
1093
1094static void ath10k_process_rx(struct ath10k *ar, struct sk_buff *skb)
1095{
1096	struct ieee80211_rx_status *status;
1097	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
1098	char tid[32];
1099
1100	status = IEEE80211_SKB_RXCB(skb);
1101
1102	ath10k_dbg(ar, ATH10K_DBG_DATA,
1103		   "rx skb %pK len %u peer %pM %s %s sn %u %s%s%s%s%s%s %srate_idx %u vht_nss %u freq %u band %u flag 0x%x fcs-err %i mic-err %i amsdu-more %i\n",
1104		   skb,
1105		   skb->len,
1106		   ieee80211_get_SA(hdr),
1107		   ath10k_get_tid(hdr, tid, sizeof(tid)),
1108		   is_multicast_ether_addr(ieee80211_get_DA(hdr)) ?
1109							"mcast" : "ucast",
1110		   (__le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_SEQ) >> 4,
1111		   (status->encoding == RX_ENC_LEGACY) ? "legacy" : "",
1112		   (status->encoding == RX_ENC_HT) ? "ht" : "",
1113		   (status->encoding == RX_ENC_VHT) ? "vht" : "",
1114		   (status->bw == RATE_INFO_BW_40) ? "40" : "",
1115		   (status->bw == RATE_INFO_BW_80) ? "80" : "",
1116		   (status->bw == RATE_INFO_BW_160) ? "160" : "",
1117		   status->enc_flags & RX_ENC_FLAG_SHORT_GI ? "sgi " : "",
1118		   status->rate_idx,
1119		   status->nss,
1120		   status->freq,
1121		   status->band, status->flag,
1122		   !!(status->flag & RX_FLAG_FAILED_FCS_CRC),
1123		   !!(status->flag & RX_FLAG_MMIC_ERROR),
1124		   !!(status->flag & RX_FLAG_AMSDU_MORE));
1125	ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "rx skb: ",
1126			skb->data, skb->len);
1127	trace_ath10k_rx_hdr(ar, skb->data, skb->len);
1128	trace_ath10k_rx_payload(ar, skb->data, skb->len);
1129
1130	ieee80211_rx_napi(ar->hw, NULL, skb, &ar->napi);
1131}
1132
1133static int ath10k_htt_rx_nwifi_hdrlen(struct ath10k *ar,
1134				      struct ieee80211_hdr *hdr)
1135{
1136	int len = ieee80211_hdrlen(hdr->frame_control);
1137
1138	if (!test_bit(ATH10K_FW_FEATURE_NO_NWIFI_DECAP_4ADDR_PADDING,
1139		      ar->running_fw->fw_file.fw_features))
1140		len = round_up(len, 4);
1141
1142	return len;
1143}
1144
1145static void ath10k_htt_rx_h_undecap_raw(struct ath10k *ar,
1146					struct sk_buff *msdu,
1147					struct ieee80211_rx_status *status,
1148					enum htt_rx_mpdu_encrypt_type enctype,
1149					bool is_decrypted)
1150{
1151	struct ieee80211_hdr *hdr;
1152	struct htt_rx_desc *rxd;
1153	size_t hdr_len;
1154	size_t crypto_len;
1155	bool is_first;
1156	bool is_last;
1157
1158	rxd = (void *)msdu->data - sizeof(*rxd);
1159	is_first = !!(rxd->msdu_end.common.info0 &
1160		      __cpu_to_le32(RX_MSDU_END_INFO0_FIRST_MSDU));
1161	is_last = !!(rxd->msdu_end.common.info0 &
1162		     __cpu_to_le32(RX_MSDU_END_INFO0_LAST_MSDU));
1163
1164	/* Delivered decapped frame:
1165	 * [802.11 header]
1166	 * [crypto param] <-- can be trimmed if !fcs_err &&
1167	 *                    !decrypt_err && !peer_idx_invalid
1168	 * [amsdu header] <-- only if A-MSDU
1169	 * [rfc1042/llc]
1170	 * [payload]
1171	 * [FCS] <-- at end, needs to be trimmed
1172	 */
1173
1174	/* This probably shouldn't happen but warn just in case */
1175	if (unlikely(WARN_ON_ONCE(!is_first)))
1176		return;
1177
1178	/* This probably shouldn't happen but warn just in case */
1179	if (unlikely(WARN_ON_ONCE(!(is_first && is_last))))
1180		return;
1181
1182	skb_trim(msdu, msdu->len - FCS_LEN);
1183
1184	/* In most cases this will be true for sniffed frames. It makes sense
1185	 * to deliver them as-is without stripping the crypto param. This is
1186	 * necessary for software based decryption.
1187	 *
1188	 * If there's no error then the frame is decrypted. At least that is
1189	 * the case for frames that come in via fragmented rx indication.
1190	 */
1191	if (!is_decrypted)
1192		return;
1193
1194	/* The payload is decrypted so strip crypto params. Start from tail
1195	 * since hdr is used to compute some stuff.
1196	 */
1197
1198	hdr = (void *)msdu->data;
1199
1200	/* Tail */
1201	if (status->flag & RX_FLAG_IV_STRIPPED) {
1202		skb_trim(msdu, msdu->len -
1203			 ath10k_htt_rx_crypto_mic_len(ar, enctype));
1204
1205		skb_trim(msdu, msdu->len -
1206			 ath10k_htt_rx_crypto_icv_len(ar, enctype));
1207	} else {
1208		/* MIC */
1209		if (status->flag & RX_FLAG_MIC_STRIPPED)
1210			skb_trim(msdu, msdu->len -
1211				 ath10k_htt_rx_crypto_mic_len(ar, enctype));
1212
1213		/* ICV */
1214		if (status->flag & RX_FLAG_ICV_STRIPPED)
1215			skb_trim(msdu, msdu->len -
1216				 ath10k_htt_rx_crypto_icv_len(ar, enctype));
1217	}
1218
1219	/* MMIC */
1220	if ((status->flag & RX_FLAG_MMIC_STRIPPED) &&
1221	    !ieee80211_has_morefrags(hdr->frame_control) &&
1222	    enctype == HTT_RX_MPDU_ENCRYPT_TKIP_WPA)
1223		skb_trim(msdu, msdu->len - MICHAEL_MIC_LEN);
1224
1225	/* Head */
1226	if (status->flag & RX_FLAG_IV_STRIPPED) {
1227		hdr_len = ieee80211_hdrlen(hdr->frame_control);
1228		crypto_len = ath10k_htt_rx_crypto_param_len(ar, enctype);
1229
1230		memmove((void *)msdu->data + crypto_len,
1231			(void *)msdu->data, hdr_len);
1232		skb_pull(msdu, crypto_len);
1233	}
1234}
1235
1236static void ath10k_htt_rx_h_undecap_nwifi(struct ath10k *ar,
1237					  struct sk_buff *msdu,
1238					  struct ieee80211_rx_status *status,
1239					  const u8 first_hdr[64],
1240					  enum htt_rx_mpdu_encrypt_type enctype)
1241{
1242	struct ieee80211_hdr *hdr;
1243	struct htt_rx_desc *rxd;
1244	size_t hdr_len;
1245	u8 da[ETH_ALEN];
1246	u8 sa[ETH_ALEN];
1247	int l3_pad_bytes;
1248	int bytes_aligned = ar->hw_params.decap_align_bytes;
1249
1250	/* Delivered decapped frame:
1251	 * [nwifi 802.11 header] <-- replaced with 802.11 hdr
1252	 * [rfc1042/llc]
1253	 *
1254	 * Note: The nwifi header doesn't have QoS Control and is
1255	 * (always?) a 3addr frame.
1256	 *
1257	 * Note2: There's no A-MSDU subframe header. Even if it's part
1258	 * of an A-MSDU.
1259	 */
1260
1261	/* pull decapped header and copy SA & DA */
1262	rxd = (void *)msdu->data - sizeof(*rxd);
1263
1264	l3_pad_bytes = ath10k_rx_desc_get_l3_pad_bytes(&ar->hw_params, rxd);
1265	skb_put(msdu, l3_pad_bytes);
1266
1267	hdr = (struct ieee80211_hdr *)(msdu->data + l3_pad_bytes);
1268
1269	hdr_len = ath10k_htt_rx_nwifi_hdrlen(ar, hdr);
1270	ether_addr_copy(da, ieee80211_get_DA(hdr));
1271	ether_addr_copy(sa, ieee80211_get_SA(hdr));
1272	skb_pull(msdu, hdr_len);
1273
1274	/* push original 802.11 header */
1275	hdr = (struct ieee80211_hdr *)first_hdr;
1276	hdr_len = ieee80211_hdrlen(hdr->frame_control);
1277
1278	if (!(status->flag & RX_FLAG_IV_STRIPPED)) {
1279		memcpy(skb_push(msdu,
1280				ath10k_htt_rx_crypto_param_len(ar, enctype)),
1281		       (void *)hdr + round_up(hdr_len, bytes_aligned),
1282			ath10k_htt_rx_crypto_param_len(ar, enctype));
1283	}
1284
1285	memcpy(skb_push(msdu, hdr_len), hdr, hdr_len);
1286
1287	/* original 802.11 header has a different DA and in
1288	 * case of 4addr it may also have different SA
1289	 */
1290	hdr = (struct ieee80211_hdr *)msdu->data;
1291	ether_addr_copy(ieee80211_get_DA(hdr), da);
1292	ether_addr_copy(ieee80211_get_SA(hdr), sa);
1293}
1294
1295static void *ath10k_htt_rx_h_find_rfc1042(struct ath10k *ar,
1296					  struct sk_buff *msdu,
1297					  enum htt_rx_mpdu_encrypt_type enctype)
1298{
1299	struct ieee80211_hdr *hdr;
1300	struct htt_rx_desc *rxd;
1301	size_t hdr_len, crypto_len;
1302	void *rfc1042;
1303	bool is_first, is_last, is_amsdu;
1304	int bytes_aligned = ar->hw_params.decap_align_bytes;
1305
1306	rxd = (void *)msdu->data - sizeof(*rxd);
1307	hdr = (void *)rxd->rx_hdr_status;
1308
1309	is_first = !!(rxd->msdu_end.common.info0 &
1310		      __cpu_to_le32(RX_MSDU_END_INFO0_FIRST_MSDU));
1311	is_last = !!(rxd->msdu_end.common.info0 &
1312		     __cpu_to_le32(RX_MSDU_END_INFO0_LAST_MSDU));
1313	is_amsdu = !(is_first && is_last);
1314
1315	rfc1042 = hdr;
1316
1317	if (is_first) {
1318		hdr_len = ieee80211_hdrlen(hdr->frame_control);
1319		crypto_len = ath10k_htt_rx_crypto_param_len(ar, enctype);
1320
1321		rfc1042 += round_up(hdr_len, bytes_aligned) +
1322			   round_up(crypto_len, bytes_aligned);
1323	}
1324
1325	if (is_amsdu)
1326		rfc1042 += sizeof(struct amsdu_subframe_hdr);
1327
1328	return rfc1042;
1329}
1330
1331static void ath10k_htt_rx_h_undecap_eth(struct ath10k *ar,
1332					struct sk_buff *msdu,
1333					struct ieee80211_rx_status *status,
1334					const u8 first_hdr[64],
1335					enum htt_rx_mpdu_encrypt_type enctype)
1336{
1337	struct ieee80211_hdr *hdr;
1338	struct ethhdr *eth;
1339	size_t hdr_len;
1340	void *rfc1042;
1341	u8 da[ETH_ALEN];
1342	u8 sa[ETH_ALEN];
1343	int l3_pad_bytes;
1344	struct htt_rx_desc *rxd;
1345	int bytes_aligned = ar->hw_params.decap_align_bytes;
1346
1347	/* Delivered decapped frame:
1348	 * [eth header] <-- replaced with 802.11 hdr & rfc1042/llc
1349	 * [payload]
1350	 */
1351
1352	rfc1042 = ath10k_htt_rx_h_find_rfc1042(ar, msdu, enctype);
1353	if (WARN_ON_ONCE(!rfc1042))
1354		return;
1355
1356	rxd = (void *)msdu->data - sizeof(*rxd);
1357	l3_pad_bytes = ath10k_rx_desc_get_l3_pad_bytes(&ar->hw_params, rxd);
1358	skb_put(msdu, l3_pad_bytes);
1359	skb_pull(msdu, l3_pad_bytes);
1360
1361	/* pull decapped header and copy SA & DA */
1362	eth = (struct ethhdr *)msdu->data;
1363	ether_addr_copy(da, eth->h_dest);
1364	ether_addr_copy(sa, eth->h_source);
1365	skb_pull(msdu, sizeof(struct ethhdr));
1366
1367	/* push rfc1042/llc/snap */
1368	memcpy(skb_push(msdu, sizeof(struct rfc1042_hdr)), rfc1042,
1369	       sizeof(struct rfc1042_hdr));
1370
1371	/* push original 802.11 header */
1372	hdr = (struct ieee80211_hdr *)first_hdr;
1373	hdr_len = ieee80211_hdrlen(hdr->frame_control);
1374
1375	if (!(status->flag & RX_FLAG_IV_STRIPPED)) {
1376		memcpy(skb_push(msdu,
1377				ath10k_htt_rx_crypto_param_len(ar, enctype)),
1378		       (void *)hdr + round_up(hdr_len, bytes_aligned),
1379			ath10k_htt_rx_crypto_param_len(ar, enctype));
1380	}
1381
1382	memcpy(skb_push(msdu, hdr_len), hdr, hdr_len);
1383
1384	/* original 802.11 header has a different DA and in
1385	 * case of 4addr it may also have different SA
1386	 */
1387	hdr = (struct ieee80211_hdr *)msdu->data;
1388	ether_addr_copy(ieee80211_get_DA(hdr), da);
1389	ether_addr_copy(ieee80211_get_SA(hdr), sa);
1390}
1391
1392static void ath10k_htt_rx_h_undecap_snap(struct ath10k *ar,
1393					 struct sk_buff *msdu,
1394					 struct ieee80211_rx_status *status,
1395					 const u8 first_hdr[64],
1396					 enum htt_rx_mpdu_encrypt_type enctype)
1397{
1398	struct ieee80211_hdr *hdr;
1399	size_t hdr_len;
1400	int l3_pad_bytes;
1401	struct htt_rx_desc *rxd;
1402	int bytes_aligned = ar->hw_params.decap_align_bytes;
1403
1404	/* Delivered decapped frame:
1405	 * [amsdu header] <-- replaced with 802.11 hdr
1406	 * [rfc1042/llc]
1407	 * [payload]
1408	 */
1409
1410	rxd = (void *)msdu->data - sizeof(*rxd);
1411	l3_pad_bytes = ath10k_rx_desc_get_l3_pad_bytes(&ar->hw_params, rxd);
1412
1413	skb_put(msdu, l3_pad_bytes);
1414	skb_pull(msdu, sizeof(struct amsdu_subframe_hdr) + l3_pad_bytes);
1415
1416	hdr = (struct ieee80211_hdr *)first_hdr;
1417	hdr_len = ieee80211_hdrlen(hdr->frame_control);
1418
1419	if (!(status->flag & RX_FLAG_IV_STRIPPED)) {
1420		memcpy(skb_push(msdu,
1421				ath10k_htt_rx_crypto_param_len(ar, enctype)),
1422		       (void *)hdr + round_up(hdr_len, bytes_aligned),
1423			ath10k_htt_rx_crypto_param_len(ar, enctype));
1424	}
1425
1426	memcpy(skb_push(msdu, hdr_len), hdr, hdr_len);
1427}
1428
1429static void ath10k_htt_rx_h_undecap(struct ath10k *ar,
1430				    struct sk_buff *msdu,
1431				    struct ieee80211_rx_status *status,
1432				    u8 first_hdr[64],
1433				    enum htt_rx_mpdu_encrypt_type enctype,
1434				    bool is_decrypted)
1435{
1436	struct htt_rx_desc *rxd;
1437	enum rx_msdu_decap_format decap;
1438
1439	/* First msdu's decapped header:
1440	 * [802.11 header] <-- padded to 4 bytes long
1441	 * [crypto param] <-- padded to 4 bytes long
1442	 * [amsdu header] <-- only if A-MSDU
1443	 * [rfc1042/llc]
1444	 *
1445	 * Other (2nd, 3rd, ..) msdu's decapped header:
1446	 * [amsdu header] <-- only if A-MSDU
1447	 * [rfc1042/llc]
1448	 */
1449
1450	rxd = (void *)msdu->data - sizeof(*rxd);
1451	decap = MS(__le32_to_cpu(rxd->msdu_start.common.info1),
1452		   RX_MSDU_START_INFO1_DECAP_FORMAT);
1453
1454	switch (decap) {
1455	case RX_MSDU_DECAP_RAW:
1456		ath10k_htt_rx_h_undecap_raw(ar, msdu, status, enctype,
1457					    is_decrypted);
1458		break;
1459	case RX_MSDU_DECAP_NATIVE_WIFI:
1460		ath10k_htt_rx_h_undecap_nwifi(ar, msdu, status, first_hdr,
1461					      enctype);
1462		break;
1463	case RX_MSDU_DECAP_ETHERNET2_DIX:
1464		ath10k_htt_rx_h_undecap_eth(ar, msdu, status, first_hdr, enctype);
1465		break;
1466	case RX_MSDU_DECAP_8023_SNAP_LLC:
1467		ath10k_htt_rx_h_undecap_snap(ar, msdu, status, first_hdr,
1468					     enctype);
1469		break;
1470	}
1471}
1472
1473static int ath10k_htt_rx_get_csum_state(struct sk_buff *skb)
1474{
1475	struct htt_rx_desc *rxd;
1476	u32 flags, info;
1477	bool is_ip4, is_ip6;
1478	bool is_tcp, is_udp;
1479	bool ip_csum_ok, tcpudp_csum_ok;
1480
1481	rxd = (void *)skb->data - sizeof(*rxd);
1482	flags = __le32_to_cpu(rxd->attention.flags);
1483	info = __le32_to_cpu(rxd->msdu_start.common.info1);
1484
1485	is_ip4 = !!(info & RX_MSDU_START_INFO1_IPV4_PROTO);
1486	is_ip6 = !!(info & RX_MSDU_START_INFO1_IPV6_PROTO);
1487	is_tcp = !!(info & RX_MSDU_START_INFO1_TCP_PROTO);
1488	is_udp = !!(info & RX_MSDU_START_INFO1_UDP_PROTO);
1489	ip_csum_ok = !(flags & RX_ATTENTION_FLAGS_IP_CHKSUM_FAIL);
1490	tcpudp_csum_ok = !(flags & RX_ATTENTION_FLAGS_TCP_UDP_CHKSUM_FAIL);
1491
1492	if (!is_ip4 && !is_ip6)
1493		return CHECKSUM_NONE;
1494	if (!is_tcp && !is_udp)
1495		return CHECKSUM_NONE;
1496	if (!ip_csum_ok)
1497		return CHECKSUM_NONE;
1498	if (!tcpudp_csum_ok)
1499		return CHECKSUM_NONE;
1500
1501	return CHECKSUM_UNNECESSARY;
1502}
1503
1504static void ath10k_htt_rx_h_csum_offload(struct sk_buff *msdu)
1505{
1506	msdu->ip_summed = ath10k_htt_rx_get_csum_state(msdu);
1507}
1508
1509static void ath10k_htt_rx_h_mpdu(struct ath10k *ar,
1510				 struct sk_buff_head *amsdu,
1511				 struct ieee80211_rx_status *status,
1512				 bool fill_crypt_header,
1513				 u8 *rx_hdr,
1514				 enum ath10k_pkt_rx_err *err)
1515{
1516	struct sk_buff *first;
1517	struct sk_buff *last;
1518	struct sk_buff *msdu;
1519	struct htt_rx_desc *rxd;
1520	struct ieee80211_hdr *hdr;
1521	enum htt_rx_mpdu_encrypt_type enctype;
1522	u8 first_hdr[64];
1523	u8 *qos;
1524	bool has_fcs_err;
1525	bool has_crypto_err;
1526	bool has_tkip_err;
1527	bool has_peer_idx_invalid;
1528	bool is_decrypted;
1529	bool is_mgmt;
1530	u32 attention;
1531
1532	if (skb_queue_empty(amsdu))
1533		return;
1534
1535	first = skb_peek(amsdu);
1536	rxd = (void *)first->data - sizeof(*rxd);
1537
1538	is_mgmt = !!(rxd->attention.flags &
1539		     __cpu_to_le32(RX_ATTENTION_FLAGS_MGMT_TYPE));
1540
1541	enctype = MS(__le32_to_cpu(rxd->mpdu_start.info0),
1542		     RX_MPDU_START_INFO0_ENCRYPT_TYPE);
1543
1544	/* First MSDU's Rx descriptor in an A-MSDU contains full 802.11
1545	 * decapped header. It'll be used for undecapping of each MSDU.
1546	 */
1547	hdr = (void *)rxd->rx_hdr_status;
1548	memcpy(first_hdr, hdr, RX_HTT_HDR_STATUS_LEN);
1549
1550	if (rx_hdr)
1551		memcpy(rx_hdr, hdr, RX_HTT_HDR_STATUS_LEN);
1552
1553	/* Each A-MSDU subframe will use the original header as the base and be
1554	 * reported as a separate MSDU so strip the A-MSDU bit from QoS Ctl.
1555	 */
1556	hdr = (void *)first_hdr;
1557
1558	if (ieee80211_is_data_qos(hdr->frame_control)) {
1559		qos = ieee80211_get_qos_ctl(hdr);
1560		qos[0] &= ~IEEE80211_QOS_CTL_A_MSDU_PRESENT;
1561	}
1562
1563	/* Some attention flags are valid only in the last MSDU. */
1564	last = skb_peek_tail(amsdu);
1565	rxd = (void *)last->data - sizeof(*rxd);
1566	attention = __le32_to_cpu(rxd->attention.flags);
1567
1568	has_fcs_err = !!(attention & RX_ATTENTION_FLAGS_FCS_ERR);
1569	has_crypto_err = !!(attention & RX_ATTENTION_FLAGS_DECRYPT_ERR);
1570	has_tkip_err = !!(attention & RX_ATTENTION_FLAGS_TKIP_MIC_ERR);
1571	has_peer_idx_invalid = !!(attention & RX_ATTENTION_FLAGS_PEER_IDX_INVALID);
1572
1573	/* Note: If hardware captures an encrypted frame that it can't decrypt,
1574	 * e.g. due to fcs error, missing peer or invalid key data it will
1575	 * report the frame as raw.
1576	 */
1577	is_decrypted = (enctype != HTT_RX_MPDU_ENCRYPT_NONE &&
1578			!has_fcs_err &&
1579			!has_crypto_err &&
1580			!has_peer_idx_invalid);
1581
1582	/* Clear per-MPDU flags while leaving per-PPDU flags intact. */
1583	status->flag &= ~(RX_FLAG_FAILED_FCS_CRC |
1584			  RX_FLAG_MMIC_ERROR |
1585			  RX_FLAG_DECRYPTED |
1586			  RX_FLAG_IV_STRIPPED |
1587			  RX_FLAG_ONLY_MONITOR |
1588			  RX_FLAG_MMIC_STRIPPED);
1589
1590	if (has_fcs_err)
1591		status->flag |= RX_FLAG_FAILED_FCS_CRC;
1592
1593	if (has_tkip_err)
1594		status->flag |= RX_FLAG_MMIC_ERROR;
1595
1596	if (err) {
1597		if (has_fcs_err)
1598			*err = ATH10K_PKT_RX_ERR_FCS;
1599		else if (has_tkip_err)
1600			*err = ATH10K_PKT_RX_ERR_TKIP;
1601		else if (has_crypto_err)
1602			*err = ATH10K_PKT_RX_ERR_CRYPT;
1603		else if (has_peer_idx_invalid)
1604			*err = ATH10K_PKT_RX_ERR_PEER_IDX_INVAL;
1605	}
1606
1607	/* Firmware reports all necessary management frames via WMI already.
1608	 * They are not reported to monitor interfaces at all so pass the ones
1609	 * coming via HTT to monitor interfaces instead. This simplifies
1610	 * matters a lot.
1611	 */
1612	if (is_mgmt)
1613		status->flag |= RX_FLAG_ONLY_MONITOR;
1614
1615	if (is_decrypted) {
1616		status->flag |= RX_FLAG_DECRYPTED;
1617
1618		if (likely(!is_mgmt))
1619			status->flag |= RX_FLAG_MMIC_STRIPPED;
1620
1621		if (fill_crypt_header)
1622			status->flag |= RX_FLAG_MIC_STRIPPED |
1623					RX_FLAG_ICV_STRIPPED;
1624		else
1625			status->flag |= RX_FLAG_IV_STRIPPED;
1626	}
1627
1628	skb_queue_walk(amsdu, msdu) {
1629		ath10k_htt_rx_h_csum_offload(msdu);
1630		ath10k_htt_rx_h_undecap(ar, msdu, status, first_hdr, enctype,
1631					is_decrypted);
1632
1633		/* Undecapping involves copying the original 802.11 header back
1634		 * to sk_buff. If frame is protected and hardware has decrypted
1635		 * it then remove the protected bit.
1636		 */
1637		if (!is_decrypted)
1638			continue;
1639		if (is_mgmt)
1640			continue;
1641
1642		if (fill_crypt_header)
1643			continue;
1644
1645		hdr = (void *)msdu->data;
1646		hdr->frame_control &= ~__cpu_to_le16(IEEE80211_FCTL_PROTECTED);
1647	}
1648}
1649
1650static void ath10k_htt_rx_h_enqueue(struct ath10k *ar,
1651				    struct sk_buff_head *amsdu,
1652				    struct ieee80211_rx_status *status)
1653{
1654	struct sk_buff *msdu;
1655	struct sk_buff *first_subframe;
1656
1657	first_subframe = skb_peek(amsdu);
1658
1659	while ((msdu = __skb_dequeue(amsdu))) {
1660		/* Setup per-MSDU flags */
1661		if (skb_queue_empty(amsdu))
1662			status->flag &= ~RX_FLAG_AMSDU_MORE;
1663		else
1664			status->flag |= RX_FLAG_AMSDU_MORE;
1665
1666		if (msdu == first_subframe) {
1667			first_subframe = NULL;
1668			status->flag &= ~RX_FLAG_ALLOW_SAME_PN;
1669		} else {
1670			status->flag |= RX_FLAG_ALLOW_SAME_PN;
1671		}
1672
1673		ath10k_htt_rx_h_queue_msdu(ar, status, msdu);
1674	}
1675}
1676
1677static int ath10k_unchain_msdu(struct sk_buff_head *amsdu,
1678			       unsigned long int *unchain_cnt)
1679{
1680	struct sk_buff *skb, *first;
1681	int space;
1682	int total_len = 0;
1683	int amsdu_len = skb_queue_len(amsdu);
1684
1685	/* TODO:  Might could optimize this by using
1686	 * skb_try_coalesce or similar method to
1687	 * decrease copying, or maybe get mac80211 to
1688	 * provide a way to just receive a list of
1689	 * skb?
1690	 */
1691
1692	first = __skb_dequeue(amsdu);
1693
1694	/* Allocate total length all at once. */
1695	skb_queue_walk(amsdu, skb)
1696		total_len += skb->len;
1697
1698	space = total_len - skb_tailroom(first);
1699	if ((space > 0) &&
1700	    (pskb_expand_head(first, 0, space, GFP_ATOMIC) < 0)) {
1701		/* TODO:  bump some rx-oom error stat */
1702		/* put it back together so we can free the
1703		 * whole list at once.
1704		 */
1705		__skb_queue_head(amsdu, first);
1706		return -1;
1707	}
1708
1709	/* Walk list again, copying contents into
1710	 * msdu_head
1711	 */
1712	while ((skb = __skb_dequeue(amsdu))) {
1713		skb_copy_from_linear_data(skb, skb_put(first, skb->len),
1714					  skb->len);
1715		dev_kfree_skb_any(skb);
1716	}
1717
1718	__skb_queue_head(amsdu, first);
1719
1720	*unchain_cnt += amsdu_len - 1;
1721
1722	return 0;
1723}
1724
1725static void ath10k_htt_rx_h_unchain(struct ath10k *ar,
1726				    struct sk_buff_head *amsdu,
1727				    unsigned long int *drop_cnt,
1728				    unsigned long int *unchain_cnt)
1729{
1730	struct sk_buff *first;
1731	struct htt_rx_desc *rxd;
1732	enum rx_msdu_decap_format decap;
1733
1734	first = skb_peek(amsdu);
1735	rxd = (void *)first->data - sizeof(*rxd);
1736	decap = MS(__le32_to_cpu(rxd->msdu_start.common.info1),
1737		   RX_MSDU_START_INFO1_DECAP_FORMAT);
1738
1739	/* FIXME: Current unchaining logic can only handle simple case of raw
1740	 * msdu chaining. If decapping is other than raw the chaining may be
1741	 * more complex and this isn't handled by the current code. Don't even
1742	 * try re-constructing such frames - it'll be pretty much garbage.
1743	 */
1744	if (decap != RX_MSDU_DECAP_RAW ||
1745	    skb_queue_len(amsdu) != 1 + rxd->frag_info.ring2_more_count) {
1746		*drop_cnt += skb_queue_len(amsdu);
1747		__skb_queue_purge(amsdu);
1748		return;
1749	}
1750
1751	ath10k_unchain_msdu(amsdu, unchain_cnt);
1752}
1753
1754static bool ath10k_htt_rx_amsdu_allowed(struct ath10k *ar,
1755					struct sk_buff_head *amsdu,
1756					struct ieee80211_rx_status *rx_status)
1757{
1758	/* FIXME: It might be a good idea to do some fuzzy-testing to drop
1759	 * invalid/dangerous frames.
1760	 */
1761
1762	if (!rx_status->freq) {
1763		ath10k_dbg(ar, ATH10K_DBG_HTT, "no channel configured; ignoring frame(s)!\n");
1764		return false;
1765	}
1766
1767	if (test_bit(ATH10K_CAC_RUNNING, &ar->dev_flags)) {
1768		ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx cac running\n");
1769		return false;
1770	}
1771
1772	return true;
1773}
1774
1775static void ath10k_htt_rx_h_filter(struct ath10k *ar,
1776				   struct sk_buff_head *amsdu,
1777				   struct ieee80211_rx_status *rx_status,
1778				   unsigned long int *drop_cnt)
1779{
1780	if (skb_queue_empty(amsdu))
1781		return;
1782
1783	if (ath10k_htt_rx_amsdu_allowed(ar, amsdu, rx_status))
1784		return;
1785
1786	if (drop_cnt)
1787		*drop_cnt += skb_queue_len(amsdu);
1788
1789	__skb_queue_purge(amsdu);
1790}
1791
1792static int ath10k_htt_rx_handle_amsdu(struct ath10k_htt *htt)
1793{
1794	struct ath10k *ar = htt->ar;
1795	struct ieee80211_rx_status *rx_status = &htt->rx_status;
1796	struct sk_buff_head amsdu;
1797	int ret;
1798	unsigned long int drop_cnt = 0;
1799	unsigned long int unchain_cnt = 0;
1800	unsigned long int drop_cnt_filter = 0;
1801	unsigned long int msdus_to_queue, num_msdus;
1802	enum ath10k_pkt_rx_err err = ATH10K_PKT_RX_ERR_MAX;
1803	u8 first_hdr[RX_HTT_HDR_STATUS_LEN];
1804
1805	__skb_queue_head_init(&amsdu);
1806
1807	spin_lock_bh(&htt->rx_ring.lock);
1808	if (htt->rx_confused) {
1809		spin_unlock_bh(&htt->rx_ring.lock);
1810		return -EIO;
1811	}
1812	ret = ath10k_htt_rx_amsdu_pop(htt, &amsdu);
1813	spin_unlock_bh(&htt->rx_ring.lock);
1814
1815	if (ret < 0) {
1816		ath10k_warn(ar, "rx ring became corrupted: %d\n", ret);
1817		__skb_queue_purge(&amsdu);
1818		/* FIXME: It's probably a good idea to reboot the
1819		 * device instead of leaving it inoperable.
1820		 */
1821		htt->rx_confused = true;
1822		return ret;
1823	}
1824
1825	num_msdus = skb_queue_len(&amsdu);
1826
1827	ath10k_htt_rx_h_ppdu(ar, &amsdu, rx_status, 0xffff);
1828
1829	/* only for ret = 1 indicates chained msdus */
1830	if (ret > 0)
1831		ath10k_htt_rx_h_unchain(ar, &amsdu, &drop_cnt, &unchain_cnt);
1832
1833	ath10k_htt_rx_h_filter(ar, &amsdu, rx_status, &drop_cnt_filter);
1834	ath10k_htt_rx_h_mpdu(ar, &amsdu, rx_status, true, first_hdr, &err);
1835	msdus_to_queue = skb_queue_len(&amsdu);
1836	ath10k_htt_rx_h_enqueue(ar, &amsdu, rx_status);
1837
1838	ath10k_sta_update_rx_tid_stats(ar, first_hdr, num_msdus, err,
1839				       unchain_cnt, drop_cnt, drop_cnt_filter,
1840				       msdus_to_queue);
1841
1842	return 0;
1843}
1844
1845static void ath10k_htt_rx_proc_rx_ind(struct ath10k_htt *htt,
1846				      struct htt_rx_indication *rx)
1847{
1848	struct ath10k *ar = htt->ar;
1849	struct htt_rx_indication_mpdu_range *mpdu_ranges;
1850	int num_mpdu_ranges;
1851	int i, mpdu_count = 0;
1852	u16 peer_id;
1853	u8 tid;
1854
1855	num_mpdu_ranges = MS(__le32_to_cpu(rx->hdr.info1),
1856			     HTT_RX_INDICATION_INFO1_NUM_MPDU_RANGES);
1857	peer_id = __le16_to_cpu(rx->hdr.peer_id);
1858	tid =  MS(rx->hdr.info0, HTT_RX_INDICATION_INFO0_EXT_TID);
1859
1860	mpdu_ranges = htt_rx_ind_get_mpdu_ranges(rx);
1861
1862	ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "htt rx ind: ",
1863			rx, sizeof(*rx) +
1864			(sizeof(struct htt_rx_indication_mpdu_range) *
1865				num_mpdu_ranges));
1866
1867	for (i = 0; i < num_mpdu_ranges; i++)
1868		mpdu_count += mpdu_ranges[i].mpdu_count;
1869
1870	atomic_add(mpdu_count, &htt->num_mpdus_ready);
1871
1872	ath10k_sta_update_rx_tid_stats_ampdu(ar, peer_id, tid, mpdu_ranges,
1873					     num_mpdu_ranges);
1874}
1875
1876static void ath10k_htt_rx_tx_compl_ind(struct ath10k *ar,
1877				       struct sk_buff *skb)
1878{
1879	struct ath10k_htt *htt = &ar->htt;
1880	struct htt_resp *resp = (struct htt_resp *)skb->data;
1881	struct htt_tx_done tx_done = {};
1882	int status = MS(resp->data_tx_completion.flags, HTT_DATA_TX_STATUS);
1883	__le16 msdu_id;
1884	int i;
1885
1886	switch (status) {
1887	case HTT_DATA_TX_STATUS_NO_ACK:
1888		tx_done.status = HTT_TX_COMPL_STATE_NOACK;
1889		break;
1890	case HTT_DATA_TX_STATUS_OK:
1891		tx_done.status = HTT_TX_COMPL_STATE_ACK;
1892		break;
1893	case HTT_DATA_TX_STATUS_DISCARD:
1894	case HTT_DATA_TX_STATUS_POSTPONE:
1895	case HTT_DATA_TX_STATUS_DOWNLOAD_FAIL:
1896		tx_done.status = HTT_TX_COMPL_STATE_DISCARD;
1897		break;
1898	default:
1899		ath10k_warn(ar, "unhandled tx completion status %d\n", status);
1900		tx_done.status = HTT_TX_COMPL_STATE_DISCARD;
1901		break;
1902	}
1903
1904	ath10k_dbg(ar, ATH10K_DBG_HTT, "htt tx completion num_msdus %d\n",
1905		   resp->data_tx_completion.num_msdus);
1906
1907	for (i = 0; i < resp->data_tx_completion.num_msdus; i++) {
1908		msdu_id = resp->data_tx_completion.msdus[i];
1909		tx_done.msdu_id = __le16_to_cpu(msdu_id);
1910
1911		/* kfifo_put: In practice firmware shouldn't fire off per-CE
1912		 * interrupt and main interrupt (MSI/-X range case) for the same
1913		 * HTC service so it should be safe to use kfifo_put w/o lock.
1914		 *
1915		 * From kfifo_put() documentation:
1916		 *  Note that with only one concurrent reader and one concurrent
1917		 *  writer, you don't need extra locking to use these macro.
1918		 */
1919		if (!kfifo_put(&htt->txdone_fifo, tx_done)) {
1920			ath10k_warn(ar, "txdone fifo overrun, msdu_id %d status %d\n",
1921				    tx_done.msdu_id, tx_done.status);
1922			ath10k_txrx_tx_unref(htt, &tx_done);
1923		}
1924	}
1925}
1926
1927static void ath10k_htt_rx_addba(struct ath10k *ar, struct htt_resp *resp)
1928{
1929	struct htt_rx_addba *ev = &resp->rx_addba;
1930	struct ath10k_peer *peer;
1931	struct ath10k_vif *arvif;
1932	u16 info0, tid, peer_id;
1933
1934	info0 = __le16_to_cpu(ev->info0);
1935	tid = MS(info0, HTT_RX_BA_INFO0_TID);
1936	peer_id = MS(info0, HTT_RX_BA_INFO0_PEER_ID);
1937
1938	ath10k_dbg(ar, ATH10K_DBG_HTT,
1939		   "htt rx addba tid %hu peer_id %hu size %hhu\n",
1940		   tid, peer_id, ev->window_size);
1941
1942	spin_lock_bh(&ar->data_lock);
1943	peer = ath10k_peer_find_by_id(ar, peer_id);
1944	if (!peer) {
1945		ath10k_warn(ar, "received addba event for invalid peer_id: %hu\n",
1946			    peer_id);
1947		spin_unlock_bh(&ar->data_lock);
1948		return;
1949	}
1950
1951	arvif = ath10k_get_arvif(ar, peer->vdev_id);
1952	if (!arvif) {
1953		ath10k_warn(ar, "received addba event for invalid vdev_id: %u\n",
1954			    peer->vdev_id);
1955		spin_unlock_bh(&ar->data_lock);
1956		return;
1957	}
1958
1959	ath10k_dbg(ar, ATH10K_DBG_HTT,
1960		   "htt rx start rx ba session sta %pM tid %hu size %hhu\n",
1961		   peer->addr, tid, ev->window_size);
1962
1963	ieee80211_start_rx_ba_session_offl(arvif->vif, peer->addr, tid);
1964	spin_unlock_bh(&ar->data_lock);
1965}
1966
1967static void ath10k_htt_rx_delba(struct ath10k *ar, struct htt_resp *resp)
1968{
1969	struct htt_rx_delba *ev = &resp->rx_delba;
1970	struct ath10k_peer *peer;
1971	struct ath10k_vif *arvif;
1972	u16 info0, tid, peer_id;
1973
1974	info0 = __le16_to_cpu(ev->info0);
1975	tid = MS(info0, HTT_RX_BA_INFO0_TID);
1976	peer_id = MS(info0, HTT_RX_BA_INFO0_PEER_ID);
1977
1978	ath10k_dbg(ar, ATH10K_DBG_HTT,
1979		   "htt rx delba tid %hu peer_id %hu\n",
1980		   tid, peer_id);
1981
1982	spin_lock_bh(&ar->data_lock);
1983	peer = ath10k_peer_find_by_id(ar, peer_id);
1984	if (!peer) {
1985		ath10k_warn(ar, "received addba event for invalid peer_id: %hu\n",
1986			    peer_id);
1987		spin_unlock_bh(&ar->data_lock);
1988		return;
1989	}
1990
1991	arvif = ath10k_get_arvif(ar, peer->vdev_id);
1992	if (!arvif) {
1993		ath10k_warn(ar, "received addba event for invalid vdev_id: %u\n",
1994			    peer->vdev_id);
1995		spin_unlock_bh(&ar->data_lock);
1996		return;
1997	}
1998
1999	ath10k_dbg(ar, ATH10K_DBG_HTT,
2000		   "htt rx stop rx ba session sta %pM tid %hu\n",
2001		   peer->addr, tid);
2002
2003	ieee80211_stop_rx_ba_session_offl(arvif->vif, peer->addr, tid);
2004	spin_unlock_bh(&ar->data_lock);
2005}
2006
2007static int ath10k_htt_rx_extract_amsdu(struct sk_buff_head *list,
2008				       struct sk_buff_head *amsdu)
2009{
2010	struct sk_buff *msdu;
2011	struct htt_rx_desc *rxd;
2012
2013	if (skb_queue_empty(list))
2014		return -ENOBUFS;
2015
2016	if (WARN_ON(!skb_queue_empty(amsdu)))
2017		return -EINVAL;
2018
2019	while ((msdu = __skb_dequeue(list))) {
2020		__skb_queue_tail(amsdu, msdu);
2021
2022		rxd = (void *)msdu->data - sizeof(*rxd);
2023		if (rxd->msdu_end.common.info0 &
2024		    __cpu_to_le32(RX_MSDU_END_INFO0_LAST_MSDU))
2025			break;
2026	}
2027
2028	msdu = skb_peek_tail(amsdu);
2029	rxd = (void *)msdu->data - sizeof(*rxd);
2030	if (!(rxd->msdu_end.common.info0 &
2031	      __cpu_to_le32(RX_MSDU_END_INFO0_LAST_MSDU))) {
2032		skb_queue_splice_init(amsdu, list);
2033		return -EAGAIN;
2034	}
2035
2036	return 0;
2037}
2038
2039static void ath10k_htt_rx_h_rx_offload_prot(struct ieee80211_rx_status *status,
2040					    struct sk_buff *skb)
2041{
2042	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
2043
2044	if (!ieee80211_has_protected(hdr->frame_control))
2045		return;
2046
2047	/* Offloaded frames are already decrypted but firmware insists they are
2048	 * protected in the 802.11 header. Strip the flag.  Otherwise mac80211
2049	 * will drop the frame.
2050	 */
2051
2052	hdr->frame_control &= ~__cpu_to_le16(IEEE80211_FCTL_PROTECTED);
2053	status->flag |= RX_FLAG_DECRYPTED |
2054			RX_FLAG_IV_STRIPPED |
2055			RX_FLAG_MMIC_STRIPPED;
2056}
2057
2058static void ath10k_htt_rx_h_rx_offload(struct ath10k *ar,
2059				       struct sk_buff_head *list)
2060{
2061	struct ath10k_htt *htt = &ar->htt;
2062	struct ieee80211_rx_status *status = &htt->rx_status;
2063	struct htt_rx_offload_msdu *rx;
2064	struct sk_buff *msdu;
2065	size_t offset;
2066
2067	while ((msdu = __skb_dequeue(list))) {
2068		/* Offloaded frames don't have Rx descriptor. Instead they have
2069		 * a short meta information header.
2070		 */
2071
2072		rx = (void *)msdu->data;
2073
2074		skb_put(msdu, sizeof(*rx));
2075		skb_pull(msdu, sizeof(*rx));
2076
2077		if (skb_tailroom(msdu) < __le16_to_cpu(rx->msdu_len)) {
2078			ath10k_warn(ar, "dropping frame: offloaded rx msdu is too long!\n");
2079			dev_kfree_skb_any(msdu);
2080			continue;
2081		}
2082
2083		skb_put(msdu, __le16_to_cpu(rx->msdu_len));
2084
2085		/* Offloaded rx header length isn't multiple of 2 nor 4 so the
2086		 * actual payload is unaligned. Align the frame.  Otherwise
2087		 * mac80211 complains.  This shouldn't reduce performance much
2088		 * because these offloaded frames are rare.
2089		 */
2090		offset = 4 - ((unsigned long)msdu->data & 3);
2091		skb_put(msdu, offset);
2092		memmove(msdu->data + offset, msdu->data, msdu->len);
2093		skb_pull(msdu, offset);
2094
2095		/* FIXME: The frame is NWifi. Re-construct QoS Control
2096		 * if possible later.
2097		 */
2098
2099		memset(status, 0, sizeof(*status));
2100		status->flag |= RX_FLAG_NO_SIGNAL_VAL;
2101
2102		ath10k_htt_rx_h_rx_offload_prot(status, msdu);
2103		ath10k_htt_rx_h_channel(ar, status, NULL, rx->vdev_id);
2104		ath10k_htt_rx_h_queue_msdu(ar, status, msdu);
2105	}
2106}
2107
2108static int ath10k_htt_rx_in_ord_ind(struct ath10k *ar, struct sk_buff *skb)
2109{
2110	struct ath10k_htt *htt = &ar->htt;
2111	struct htt_resp *resp = (void *)skb->data;
2112	struct ieee80211_rx_status *status = &htt->rx_status;
2113	struct sk_buff_head list;
2114	struct sk_buff_head amsdu;
2115	u16 peer_id;
2116	u16 msdu_count;
2117	u8 vdev_id;
2118	u8 tid;
2119	bool offload;
2120	bool frag;
2121	int ret;
2122
2123	lockdep_assert_held(&htt->rx_ring.lock);
2124
2125	if (htt->rx_confused)
2126		return -EIO;
2127
2128	skb_pull(skb, sizeof(resp->hdr));
2129	skb_pull(skb, sizeof(resp->rx_in_ord_ind));
2130
2131	peer_id = __le16_to_cpu(resp->rx_in_ord_ind.peer_id);
2132	msdu_count = __le16_to_cpu(resp->rx_in_ord_ind.msdu_count);
2133	vdev_id = resp->rx_in_ord_ind.vdev_id;
2134	tid = SM(resp->rx_in_ord_ind.info, HTT_RX_IN_ORD_IND_INFO_TID);
2135	offload = !!(resp->rx_in_ord_ind.info &
2136			HTT_RX_IN_ORD_IND_INFO_OFFLOAD_MASK);
2137	frag = !!(resp->rx_in_ord_ind.info & HTT_RX_IN_ORD_IND_INFO_FRAG_MASK);
2138
2139	ath10k_dbg(ar, ATH10K_DBG_HTT,
2140		   "htt rx in ord vdev %i peer %i tid %i offload %i frag %i msdu count %i\n",
2141		   vdev_id, peer_id, tid, offload, frag, msdu_count);
2142
2143	if (skb->len < msdu_count * sizeof(*resp->rx_in_ord_ind.msdu_descs32)) {
2144		ath10k_warn(ar, "dropping invalid in order rx indication\n");
2145		return -EINVAL;
2146	}
2147
2148	/* The event can deliver more than 1 A-MSDU. Each A-MSDU is later
2149	 * extracted and processed.
2150	 */
2151	__skb_queue_head_init(&list);
2152	if (ar->hw_params.target_64bit)
2153		ret = ath10k_htt_rx_pop_paddr64_list(htt, &resp->rx_in_ord_ind,
2154						     &list);
2155	else
2156		ret = ath10k_htt_rx_pop_paddr32_list(htt, &resp->rx_in_ord_ind,
2157						     &list);
2158
2159	if (ret < 0) {
2160		ath10k_warn(ar, "failed to pop paddr list: %d\n", ret);
2161		htt->rx_confused = true;
2162		return -EIO;
2163	}
2164
2165	/* Offloaded frames are very different and need to be handled
2166	 * separately.
2167	 */
2168	if (offload)
2169		ath10k_htt_rx_h_rx_offload(ar, &list);
2170
2171	while (!skb_queue_empty(&list)) {
2172		__skb_queue_head_init(&amsdu);
2173		ret = ath10k_htt_rx_extract_amsdu(&list, &amsdu);
2174		switch (ret) {
2175		case 0:
2176			/* Note: The in-order indication may report interleaved
2177			 * frames from different PPDUs meaning reported rx rate
2178			 * to mac80211 isn't accurate/reliable. It's still
2179			 * better to report something than nothing though. This
2180			 * should still give an idea about rx rate to the user.
2181			 */
2182			ath10k_htt_rx_h_ppdu(ar, &amsdu, status, vdev_id);
2183			ath10k_htt_rx_h_filter(ar, &amsdu, status, NULL);
2184			ath10k_htt_rx_h_mpdu(ar, &amsdu, status, false, NULL,
2185					     NULL);
2186			ath10k_htt_rx_h_enqueue(ar, &amsdu, status);
2187			break;
2188		case -EAGAIN:
2189			/* fall through */
2190		default:
2191			/* Should not happen. */
2192			ath10k_warn(ar, "failed to extract amsdu: %d\n", ret);
2193			htt->rx_confused = true;
2194			__skb_queue_purge(&list);
2195			return -EIO;
2196		}
2197	}
2198	return ret;
2199}
2200
2201static void ath10k_htt_rx_tx_fetch_resp_id_confirm(struct ath10k *ar,
2202						   const __le32 *resp_ids,
2203						   int num_resp_ids)
2204{
2205	int i;
2206	u32 resp_id;
2207
2208	ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx tx fetch confirm num_resp_ids %d\n",
2209		   num_resp_ids);
2210
2211	for (i = 0; i < num_resp_ids; i++) {
2212		resp_id = le32_to_cpu(resp_ids[i]);
2213
2214		ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx tx fetch confirm resp_id %u\n",
2215			   resp_id);
2216
2217		/* TODO: free resp_id */
2218	}
2219}
2220
2221static void ath10k_htt_rx_tx_fetch_ind(struct ath10k *ar, struct sk_buff *skb)
2222{
2223	struct ieee80211_hw *hw = ar->hw;
2224	struct ieee80211_txq *txq;
2225	struct htt_resp *resp = (struct htt_resp *)skb->data;
2226	struct htt_tx_fetch_record *record;
2227	size_t len;
2228	size_t max_num_bytes;
2229	size_t max_num_msdus;
2230	size_t num_bytes;
2231	size_t num_msdus;
2232	const __le32 *resp_ids;
2233	u16 num_records;
2234	u16 num_resp_ids;
2235	u16 peer_id;
2236	u8 tid;
2237	int ret;
2238	int i;
2239
2240	ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx tx fetch ind\n");
2241
2242	len = sizeof(resp->hdr) + sizeof(resp->tx_fetch_ind);
2243	if (unlikely(skb->len < len)) {
2244		ath10k_warn(ar, "received corrupted tx_fetch_ind event: buffer too short\n");
2245		return;
2246	}
2247
2248	num_records = le16_to_cpu(resp->tx_fetch_ind.num_records);
2249	num_resp_ids = le16_to_cpu(resp->tx_fetch_ind.num_resp_ids);
2250
2251	len += sizeof(resp->tx_fetch_ind.records[0]) * num_records;
2252	len += sizeof(resp->tx_fetch_ind.resp_ids[0]) * num_resp_ids;
2253
2254	if (unlikely(skb->len < len)) {
2255		ath10k_warn(ar, "received corrupted tx_fetch_ind event: too many records/resp_ids\n");
2256		return;
2257	}
2258
2259	ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx tx fetch ind num records %hu num resps %hu seq %hu\n",
2260		   num_records, num_resp_ids,
2261		   le16_to_cpu(resp->tx_fetch_ind.fetch_seq_num));
2262
2263	if (!ar->htt.tx_q_state.enabled) {
2264		ath10k_warn(ar, "received unexpected tx_fetch_ind event: not enabled\n");
2265		return;
2266	}
2267
2268	if (ar->htt.tx_q_state.mode == HTT_TX_MODE_SWITCH_PUSH) {
2269		ath10k_warn(ar, "received unexpected tx_fetch_ind event: in push mode\n");
2270		return;
2271	}
2272
2273	rcu_read_lock();
2274
2275	for (i = 0; i < num_records; i++) {
2276		record = &resp->tx_fetch_ind.records[i];
2277		peer_id = MS(le16_to_cpu(record->info),
2278			     HTT_TX_FETCH_RECORD_INFO_PEER_ID);
2279		tid = MS(le16_to_cpu(record->info),
2280			 HTT_TX_FETCH_RECORD_INFO_TID);
2281		max_num_msdus = le16_to_cpu(record->num_msdus);
2282		max_num_bytes = le32_to_cpu(record->num_bytes);
2283
2284		ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx tx fetch record %i peer_id %hu tid %hhu msdus %zu bytes %zu\n",
2285			   i, peer_id, tid, max_num_msdus, max_num_bytes);
2286
2287		if (unlikely(peer_id >= ar->htt.tx_q_state.num_peers) ||
2288		    unlikely(tid >= ar->htt.tx_q_state.num_tids)) {
2289			ath10k_warn(ar, "received out of range peer_id %hu tid %hhu\n",
2290				    peer_id, tid);
2291			continue;
2292		}
2293
2294		spin_lock_bh(&ar->data_lock);
2295		txq = ath10k_mac_txq_lookup(ar, peer_id, tid);
2296		spin_unlock_bh(&ar->data_lock);
2297
2298		/* It is okay to release the lock and use txq because RCU read
2299		 * lock is held.
2300		 */
2301
2302		if (unlikely(!txq)) {
2303			ath10k_warn(ar, "failed to lookup txq for peer_id %hu tid %hhu\n",
2304				    peer_id, tid);
2305			continue;
2306		}
2307
2308		num_msdus = 0;
2309		num_bytes = 0;
2310
2311		while (num_msdus < max_num_msdus &&
2312		       num_bytes < max_num_bytes) {
2313			ret = ath10k_mac_tx_push_txq(hw, txq);
2314			if (ret < 0)
2315				break;
2316
2317			num_msdus++;
2318			num_bytes += ret;
2319		}
2320
2321		record->num_msdus = cpu_to_le16(num_msdus);
2322		record->num_bytes = cpu_to_le32(num_bytes);
2323
2324		ath10k_htt_tx_txq_recalc(hw, txq);
2325	}
2326
2327	rcu_read_unlock();
2328
2329	resp_ids = ath10k_htt_get_tx_fetch_ind_resp_ids(&resp->tx_fetch_ind);
2330	ath10k_htt_rx_tx_fetch_resp_id_confirm(ar, resp_ids, num_resp_ids);
2331
2332	ret = ath10k_htt_tx_fetch_resp(ar,
2333				       resp->tx_fetch_ind.token,
2334				       resp->tx_fetch_ind.fetch_seq_num,
2335				       resp->tx_fetch_ind.records,
2336				       num_records);
2337	if (unlikely(ret)) {
2338		ath10k_warn(ar, "failed to submit tx fetch resp for token 0x%08x: %d\n",
2339			    le32_to_cpu(resp->tx_fetch_ind.token), ret);
2340		/* FIXME: request fw restart */
2341	}
2342
2343	ath10k_htt_tx_txq_sync(ar);
2344}
2345
2346static void ath10k_htt_rx_tx_fetch_confirm(struct ath10k *ar,
2347					   struct sk_buff *skb)
2348{
2349	const struct htt_resp *resp = (void *)skb->data;
2350	size_t len;
2351	int num_resp_ids;
2352
2353	ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx tx fetch confirm\n");
2354
2355	len = sizeof(resp->hdr) + sizeof(resp->tx_fetch_confirm);
2356	if (unlikely(skb->len < len)) {
2357		ath10k_warn(ar, "received corrupted tx_fetch_confirm event: buffer too short\n");
2358		return;
2359	}
2360
2361	num_resp_ids = le16_to_cpu(resp->tx_fetch_confirm.num_resp_ids);
2362	len += sizeof(resp->tx_fetch_confirm.resp_ids[0]) * num_resp_ids;
2363
2364	if (unlikely(skb->len < len)) {
2365		ath10k_warn(ar, "received corrupted tx_fetch_confirm event: resp_ids buffer overflow\n");
2366		return;
2367	}
2368
2369	ath10k_htt_rx_tx_fetch_resp_id_confirm(ar,
2370					       resp->tx_fetch_confirm.resp_ids,
2371					       num_resp_ids);
2372}
2373
2374static void ath10k_htt_rx_tx_mode_switch_ind(struct ath10k *ar,
2375					     struct sk_buff *skb)
2376{
2377	const struct htt_resp *resp = (void *)skb->data;
2378	const struct htt_tx_mode_switch_record *record;
2379	struct ieee80211_txq *txq;
2380	struct ath10k_txq *artxq;
2381	size_t len;
2382	size_t num_records;
2383	enum htt_tx_mode_switch_mode mode;
2384	bool enable;
2385	u16 info0;
2386	u16 info1;
2387	u16 threshold;
2388	u16 peer_id;
2389	u8 tid;
2390	int i;
2391
2392	ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx tx mode switch ind\n");
2393
2394	len = sizeof(resp->hdr) + sizeof(resp->tx_mode_switch_ind);
2395	if (unlikely(skb->len < len)) {
2396		ath10k_warn(ar, "received corrupted tx_mode_switch_ind event: buffer too short\n");
2397		return;
2398	}
2399
2400	info0 = le16_to_cpu(resp->tx_mode_switch_ind.info0);
2401	info1 = le16_to_cpu(resp->tx_mode_switch_ind.info1);
2402
2403	enable = !!(info0 & HTT_TX_MODE_SWITCH_IND_INFO0_ENABLE);
2404	num_records = MS(info0, HTT_TX_MODE_SWITCH_IND_INFO1_THRESHOLD);
2405	mode = MS(info1, HTT_TX_MODE_SWITCH_IND_INFO1_MODE);
2406	threshold = MS(info1, HTT_TX_MODE_SWITCH_IND_INFO1_THRESHOLD);
2407
2408	ath10k_dbg(ar, ATH10K_DBG_HTT,
2409		   "htt rx tx mode switch ind info0 0x%04hx info1 0x%04hx enable %d num records %zd mode %d threshold %hu\n",
2410		   info0, info1, enable, num_records, mode, threshold);
2411
2412	len += sizeof(resp->tx_mode_switch_ind.records[0]) * num_records;
2413
2414	if (unlikely(skb->len < len)) {
2415		ath10k_warn(ar, "received corrupted tx_mode_switch_mode_ind event: too many records\n");
2416		return;
2417	}
2418
2419	switch (mode) {
2420	case HTT_TX_MODE_SWITCH_PUSH:
2421	case HTT_TX_MODE_SWITCH_PUSH_PULL:
2422		break;
2423	default:
2424		ath10k_warn(ar, "received invalid tx_mode_switch_mode_ind mode %d, ignoring\n",
2425			    mode);
2426		return;
2427	}
2428
2429	if (!enable)
2430		return;
2431
2432	ar->htt.tx_q_state.enabled = enable;
2433	ar->htt.tx_q_state.mode = mode;
2434	ar->htt.tx_q_state.num_push_allowed = threshold;
2435
2436	rcu_read_lock();
2437
2438	for (i = 0; i < num_records; i++) {
2439		record = &resp->tx_mode_switch_ind.records[i];
2440		info0 = le16_to_cpu(record->info0);
2441		peer_id = MS(info0, HTT_TX_MODE_SWITCH_RECORD_INFO0_PEER_ID);
2442		tid = MS(info0, HTT_TX_MODE_SWITCH_RECORD_INFO0_TID);
2443
2444		if (unlikely(peer_id >= ar->htt.tx_q_state.num_peers) ||
2445		    unlikely(tid >= ar->htt.tx_q_state.num_tids)) {
2446			ath10k_warn(ar, "received out of range peer_id %hu tid %hhu\n",
2447				    peer_id, tid);
2448			continue;
2449		}
2450
2451		spin_lock_bh(&ar->data_lock);
2452		txq = ath10k_mac_txq_lookup(ar, peer_id, tid);
2453		spin_unlock_bh(&ar->data_lock);
2454
2455		/* It is okay to release the lock and use txq because RCU read
2456		 * lock is held.
2457		 */
2458
2459		if (unlikely(!txq)) {
2460			ath10k_warn(ar, "failed to lookup txq for peer_id %hu tid %hhu\n",
2461				    peer_id, tid);
2462			continue;
2463		}
2464
2465		spin_lock_bh(&ar->htt.tx_lock);
2466		artxq = (void *)txq->drv_priv;
2467		artxq->num_push_allowed = le16_to_cpu(record->num_max_msdus);
2468		spin_unlock_bh(&ar->htt.tx_lock);
2469	}
2470
2471	rcu_read_unlock();
2472
2473	ath10k_mac_tx_push_pending(ar);
2474}
2475
2476void ath10k_htt_htc_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb)
2477{
2478	bool release;
2479
2480	release = ath10k_htt_t2h_msg_handler(ar, skb);
2481
2482	/* Free the indication buffer */
2483	if (release)
2484		dev_kfree_skb_any(skb);
2485}
2486
2487static inline bool is_valid_legacy_rate(u8 rate)
2488{
2489	static const u8 legacy_rates[] = {1, 2, 5, 11, 6, 9, 12,
2490					  18, 24, 36, 48, 54};
2491	int i;
2492
2493	for (i = 0; i < ARRAY_SIZE(legacy_rates); i++) {
2494		if (rate == legacy_rates[i])
2495			return true;
2496	}
2497
2498	return false;
2499}
2500
2501static void
2502ath10k_update_per_peer_tx_stats(struct ath10k *ar,
2503				struct ieee80211_sta *sta,
2504				struct ath10k_per_peer_tx_stats *peer_stats)
2505{
2506	struct ath10k_sta *arsta = (struct ath10k_sta *)sta->drv_priv;
2507	u8 rate = 0, sgi;
2508	struct rate_info txrate;
2509
2510	lockdep_assert_held(&ar->data_lock);
2511
2512	txrate.flags = ATH10K_HW_PREAMBLE(peer_stats->ratecode);
2513	txrate.bw = ATH10K_HW_BW(peer_stats->flags);
2514	txrate.nss = ATH10K_HW_NSS(peer_stats->ratecode);
2515	txrate.mcs = ATH10K_HW_MCS_RATE(peer_stats->ratecode);
2516	sgi = ATH10K_HW_GI(peer_stats->flags);
2517
2518	if (txrate.flags == WMI_RATE_PREAMBLE_VHT && txrate.mcs > 9) {
2519		ath10k_warn(ar, "Invalid VHT mcs %hhd peer stats",  txrate.mcs);
2520		return;
2521	}
2522
2523	if (txrate.flags == WMI_RATE_PREAMBLE_HT &&
2524	    (txrate.mcs > 7 || txrate.nss < 1)) {
2525		ath10k_warn(ar, "Invalid HT mcs %hhd nss %hhd peer stats",
2526			    txrate.mcs, txrate.nss);
2527		return;
2528	}
2529
2530	memset(&arsta->txrate, 0, sizeof(arsta->txrate));
2531
2532	if (txrate.flags == WMI_RATE_PREAMBLE_CCK ||
2533	    txrate.flags == WMI_RATE_PREAMBLE_OFDM) {
2534		rate = ATH10K_HW_LEGACY_RATE(peer_stats->ratecode);
2535
2536		if (!is_valid_legacy_rate(rate)) {
2537			ath10k_warn(ar, "Invalid legacy rate %hhd peer stats",
2538				    rate);
2539			return;
2540		}
2541
2542		/* This is hacky, FW sends CCK rate 5.5Mbps as 6 */
2543		rate *= 10;
2544		if (rate == 60 && txrate.flags == WMI_RATE_PREAMBLE_CCK)
2545			rate = rate - 5;
2546		arsta->txrate.legacy = rate;
2547	} else if (txrate.flags == WMI_RATE_PREAMBLE_HT) {
2548		arsta->txrate.flags = RATE_INFO_FLAGS_MCS;
2549		arsta->txrate.mcs = txrate.mcs + 8 * (txrate.nss - 1);
2550	} else {
2551		arsta->txrate.flags = RATE_INFO_FLAGS_VHT_MCS;
2552		arsta->txrate.mcs = txrate.mcs;
2553	}
2554
2555	if (sgi)
2556		arsta->txrate.flags |= RATE_INFO_FLAGS_SHORT_GI;
2557
2558	arsta->txrate.nss = txrate.nss;
2559	arsta->txrate.bw = ath10k_bw_to_mac80211_bw(txrate.bw);
2560}
2561
2562static void ath10k_htt_fetch_peer_stats(struct ath10k *ar,
2563					struct sk_buff *skb)
2564{
2565	struct htt_resp *resp = (struct htt_resp *)skb->data;
2566	struct ath10k_per_peer_tx_stats *p_tx_stats = &ar->peer_tx_stats;
2567	struct htt_per_peer_tx_stats_ind *tx_stats;
2568	struct ieee80211_sta *sta;
2569	struct ath10k_peer *peer;
2570	int peer_id, i;
2571	u8 ppdu_len, num_ppdu;
2572
2573	num_ppdu = resp->peer_tx_stats.num_ppdu;
2574	ppdu_len = resp->peer_tx_stats.ppdu_len * sizeof(__le32);
2575
2576	if (skb->len < sizeof(struct htt_resp_hdr) + num_ppdu * ppdu_len) {
2577		ath10k_warn(ar, "Invalid peer stats buf length %d\n", skb->len);
2578		return;
2579	}
2580
2581	tx_stats = (struct htt_per_peer_tx_stats_ind *)
2582			(resp->peer_tx_stats.payload);
2583	peer_id = __le16_to_cpu(tx_stats->peer_id);
2584
2585	rcu_read_lock();
2586	spin_lock_bh(&ar->data_lock);
2587	peer = ath10k_peer_find_by_id(ar, peer_id);
2588	if (!peer) {
2589		ath10k_warn(ar, "Invalid peer id %d peer stats buffer\n",
2590			    peer_id);
2591		goto out;
2592	}
2593
2594	sta = peer->sta;
2595	for (i = 0; i < num_ppdu; i++) {
2596		tx_stats = (struct htt_per_peer_tx_stats_ind *)
2597			   (resp->peer_tx_stats.payload + i * ppdu_len);
2598
2599		p_tx_stats->succ_bytes = __le32_to_cpu(tx_stats->succ_bytes);
2600		p_tx_stats->retry_bytes = __le32_to_cpu(tx_stats->retry_bytes);
2601		p_tx_stats->failed_bytes =
2602				__le32_to_cpu(tx_stats->failed_bytes);
2603		p_tx_stats->ratecode = tx_stats->ratecode;
2604		p_tx_stats->flags = tx_stats->flags;
2605		p_tx_stats->succ_pkts = __le16_to_cpu(tx_stats->succ_pkts);
2606		p_tx_stats->retry_pkts = __le16_to_cpu(tx_stats->retry_pkts);
2607		p_tx_stats->failed_pkts = __le16_to_cpu(tx_stats->failed_pkts);
2608
2609		ath10k_update_per_peer_tx_stats(ar, sta, p_tx_stats);
2610	}
2611
2612out:
2613	spin_unlock_bh(&ar->data_lock);
2614	rcu_read_unlock();
2615}
2616
2617static void ath10k_fetch_10_2_tx_stats(struct ath10k *ar, u8 *data)
2618{
2619	struct ath10k_pktlog_hdr *hdr = (struct ath10k_pktlog_hdr *)data;
2620	struct ath10k_per_peer_tx_stats *p_tx_stats = &ar->peer_tx_stats;
2621	struct ath10k_10_2_peer_tx_stats *tx_stats;
2622	struct ieee80211_sta *sta;
2623	struct ath10k_peer *peer;
2624	u16 log_type = __le16_to_cpu(hdr->log_type);
2625	u32 peer_id = 0, i;
2626
2627	if (log_type != ATH_PKTLOG_TYPE_TX_STAT)
2628		return;
2629
2630	tx_stats = (struct ath10k_10_2_peer_tx_stats *)((hdr->payload) +
2631		    ATH10K_10_2_TX_STATS_OFFSET);
2632
2633	if (!tx_stats->tx_ppdu_cnt)
2634		return;
2635
2636	peer_id = tx_stats->peer_id;
2637
2638	rcu_read_lock();
2639	spin_lock_bh(&ar->data_lock);
2640	peer = ath10k_peer_find_by_id(ar, peer_id);
2641	if (!peer) {
2642		ath10k_warn(ar, "Invalid peer id %d in peer stats buffer\n",
2643			    peer_id);
2644		goto out;
2645	}
2646
2647	sta = peer->sta;
2648	for (i = 0; i < tx_stats->tx_ppdu_cnt; i++) {
2649		p_tx_stats->succ_bytes =
2650			__le16_to_cpu(tx_stats->success_bytes[i]);
2651		p_tx_stats->retry_bytes =
2652			__le16_to_cpu(tx_stats->retry_bytes[i]);
2653		p_tx_stats->failed_bytes =
2654			__le16_to_cpu(tx_stats->failed_bytes[i]);
2655		p_tx_stats->ratecode = tx_stats->ratecode[i];
2656		p_tx_stats->flags = tx_stats->flags[i];
2657		p_tx_stats->succ_pkts = tx_stats->success_pkts[i];
2658		p_tx_stats->retry_pkts = tx_stats->retry_pkts[i];
2659		p_tx_stats->failed_pkts = tx_stats->failed_pkts[i];
2660
2661		ath10k_update_per_peer_tx_stats(ar, sta, p_tx_stats);
2662	}
2663	spin_unlock_bh(&ar->data_lock);
2664	rcu_read_unlock();
2665
2666	return;
2667
2668out:
2669	spin_unlock_bh(&ar->data_lock);
2670	rcu_read_unlock();
2671}
2672
2673bool ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb)
2674{
2675	struct ath10k_htt *htt = &ar->htt;
2676	struct htt_resp *resp = (struct htt_resp *)skb->data;
2677	enum htt_t2h_msg_type type;
2678
2679	/* confirm alignment */
2680	if (!IS_ALIGNED((unsigned long)skb->data, 4))
2681		ath10k_warn(ar, "unaligned htt message, expect trouble\n");
2682
2683	ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx, msg_type: 0x%0X\n",
2684		   resp->hdr.msg_type);
2685
2686	if (resp->hdr.msg_type >= ar->htt.t2h_msg_types_max) {
2687		ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx, unsupported msg_type: 0x%0X\n max: 0x%0X",
2688			   resp->hdr.msg_type, ar->htt.t2h_msg_types_max);
2689		return true;
2690	}
2691	type = ar->htt.t2h_msg_types[resp->hdr.msg_type];
2692
2693	switch (type) {
2694	case HTT_T2H_MSG_TYPE_VERSION_CONF: {
2695		htt->target_version_major = resp->ver_resp.major;
2696		htt->target_version_minor = resp->ver_resp.minor;
2697		complete(&htt->target_version_received);
2698		break;
2699	}
2700	case HTT_T2H_MSG_TYPE_RX_IND:
2701		ath10k_htt_rx_proc_rx_ind(htt, &resp->rx_ind);
2702		break;
2703	case HTT_T2H_MSG_TYPE_PEER_MAP: {
2704		struct htt_peer_map_event ev = {
2705			.vdev_id = resp->peer_map.vdev_id,
2706			.peer_id = __le16_to_cpu(resp->peer_map.peer_id),
2707		};
2708		memcpy(ev.addr, resp->peer_map.addr, sizeof(ev.addr));
2709		ath10k_peer_map_event(htt, &ev);
2710		break;
2711	}
2712	case HTT_T2H_MSG_TYPE_PEER_UNMAP: {
2713		struct htt_peer_unmap_event ev = {
2714			.peer_id = __le16_to_cpu(resp->peer_unmap.peer_id),
2715		};
2716		ath10k_peer_unmap_event(htt, &ev);
2717		break;
2718	}
2719	case HTT_T2H_MSG_TYPE_MGMT_TX_COMPLETION: {
2720		struct htt_tx_done tx_done = {};
2721		int status = __le32_to_cpu(resp->mgmt_tx_completion.status);
2722
2723		tx_done.msdu_id = __le32_to_cpu(resp->mgmt_tx_completion.desc_id);
2724
2725		switch (status) {
2726		case HTT_MGMT_TX_STATUS_OK:
2727			tx_done.status = HTT_TX_COMPL_STATE_ACK;
2728			break;
2729		case HTT_MGMT_TX_STATUS_RETRY:
2730			tx_done.status = HTT_TX_COMPL_STATE_NOACK;
2731			break;
2732		case HTT_MGMT_TX_STATUS_DROP:
2733			tx_done.status = HTT_TX_COMPL_STATE_DISCARD;
2734			break;
2735		}
2736
2737		status = ath10k_txrx_tx_unref(htt, &tx_done);
2738		if (!status) {
2739			spin_lock_bh(&htt->tx_lock);
2740			ath10k_htt_tx_mgmt_dec_pending(htt);
2741			spin_unlock_bh(&htt->tx_lock);
2742		}
2743		break;
2744	}
2745	case HTT_T2H_MSG_TYPE_TX_COMPL_IND:
2746		ath10k_htt_rx_tx_compl_ind(htt->ar, skb);
2747		break;
2748	case HTT_T2H_MSG_TYPE_SEC_IND: {
2749		struct ath10k *ar = htt->ar;
2750		struct htt_security_indication *ev = &resp->security_indication;
2751
2752		ath10k_dbg(ar, ATH10K_DBG_HTT,
2753			   "sec ind peer_id %d unicast %d type %d\n",
2754			  __le16_to_cpu(ev->peer_id),
2755			  !!(ev->flags & HTT_SECURITY_IS_UNICAST),
2756			  MS(ev->flags, HTT_SECURITY_TYPE));
2757		complete(&ar->install_key_done);
2758		break;
2759	}
2760	case HTT_T2H_MSG_TYPE_RX_FRAG_IND: {
2761		ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "htt event: ",
2762				skb->data, skb->len);
2763		atomic_inc(&htt->num_mpdus_ready);
2764		break;
2765	}
2766	case HTT_T2H_MSG_TYPE_TEST:
2767		break;
2768	case HTT_T2H_MSG_TYPE_STATS_CONF:
2769		trace_ath10k_htt_stats(ar, skb->data, skb->len);
2770		break;
2771	case HTT_T2H_MSG_TYPE_TX_INSPECT_IND:
2772		/* Firmware can return tx frames if it's unable to fully
2773		 * process them and suspects host may be able to fix it. ath10k
2774		 * sends all tx frames as already inspected so this shouldn't
2775		 * happen unless fw has a bug.
2776		 */
2777		ath10k_warn(ar, "received an unexpected htt tx inspect event\n");
2778		break;
2779	case HTT_T2H_MSG_TYPE_RX_ADDBA:
2780		ath10k_htt_rx_addba(ar, resp);
2781		break;
2782	case HTT_T2H_MSG_TYPE_RX_DELBA:
2783		ath10k_htt_rx_delba(ar, resp);
2784		break;
2785	case HTT_T2H_MSG_TYPE_PKTLOG: {
2786		trace_ath10k_htt_pktlog(ar, resp->pktlog_msg.payload,
2787					skb->len -
2788					offsetof(struct htt_resp,
2789						 pktlog_msg.payload));
2790
2791		if (ath10k_peer_stats_enabled(ar))
2792			ath10k_fetch_10_2_tx_stats(ar,
2793						   resp->pktlog_msg.payload);
2794		break;
2795	}
2796	case HTT_T2H_MSG_TYPE_RX_FLUSH: {
2797		/* Ignore this event because mac80211 takes care of Rx
2798		 * aggregation reordering.
2799		 */
2800		break;
2801	}
2802	case HTT_T2H_MSG_TYPE_RX_IN_ORD_PADDR_IND: {
2803		__skb_queue_tail(&htt->rx_in_ord_compl_q, skb);
2804		return false;
2805	}
2806	case HTT_T2H_MSG_TYPE_TX_CREDIT_UPDATE_IND:
2807		break;
2808	case HTT_T2H_MSG_TYPE_CHAN_CHANGE: {
2809		u32 phymode = __le32_to_cpu(resp->chan_change.phymode);
2810		u32 freq = __le32_to_cpu(resp->chan_change.freq);
2811
2812		ar->tgt_oper_chan = ieee80211_get_channel(ar->hw->wiphy, freq);
2813		ath10k_dbg(ar, ATH10K_DBG_HTT,
2814			   "htt chan change freq %u phymode %s\n",
2815			   freq, ath10k_wmi_phymode_str(phymode));
2816		break;
2817	}
2818	case HTT_T2H_MSG_TYPE_AGGR_CONF:
2819		break;
2820	case HTT_T2H_MSG_TYPE_TX_FETCH_IND: {
2821		struct sk_buff *tx_fetch_ind = skb_copy(skb, GFP_ATOMIC);
2822
2823		if (!tx_fetch_ind) {
2824			ath10k_warn(ar, "failed to copy htt tx fetch ind\n");
2825			break;
2826		}
2827		skb_queue_tail(&htt->tx_fetch_ind_q, tx_fetch_ind);
2828		break;
2829	}
2830	case HTT_T2H_MSG_TYPE_TX_FETCH_CONFIRM:
2831		ath10k_htt_rx_tx_fetch_confirm(ar, skb);
2832		break;
2833	case HTT_T2H_MSG_TYPE_TX_MODE_SWITCH_IND:
2834		ath10k_htt_rx_tx_mode_switch_ind(ar, skb);
2835		break;
2836	case HTT_T2H_MSG_TYPE_PEER_STATS:
2837		ath10k_htt_fetch_peer_stats(ar, skb);
2838		break;
2839	case HTT_T2H_MSG_TYPE_EN_STATS:
2840	default:
2841		ath10k_warn(ar, "htt event (%d) not handled\n",
2842			    resp->hdr.msg_type);
2843		ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "htt event: ",
2844				skb->data, skb->len);
2845		break;
2846	}
2847	return true;
2848}
2849EXPORT_SYMBOL(ath10k_htt_t2h_msg_handler);
2850
2851void ath10k_htt_rx_pktlog_completion_handler(struct ath10k *ar,
2852					     struct sk_buff *skb)
2853{
2854	trace_ath10k_htt_pktlog(ar, skb->data, skb->len);
2855	dev_kfree_skb_any(skb);
2856}
2857EXPORT_SYMBOL(ath10k_htt_rx_pktlog_completion_handler);
2858
2859static int ath10k_htt_rx_deliver_msdu(struct ath10k *ar, int quota, int budget)
2860{
2861	struct sk_buff *skb;
2862
2863	while (quota < budget) {
2864		if (skb_queue_empty(&ar->htt.rx_msdus_q))
2865			break;
2866
2867		skb = __skb_dequeue(&ar->htt.rx_msdus_q);
2868		if (!skb)
2869			break;
2870		ath10k_process_rx(ar, skb);
2871		quota++;
2872	}
2873
2874	return quota;
2875}
2876
2877int ath10k_htt_txrx_compl_task(struct ath10k *ar, int budget)
2878{
2879	struct ath10k_htt *htt = &ar->htt;
2880	struct htt_tx_done tx_done = {};
2881	struct sk_buff_head tx_ind_q;
2882	struct sk_buff *skb;
2883	unsigned long flags;
2884	int quota = 0, done, ret;
2885	bool resched_napi = false;
2886
2887	__skb_queue_head_init(&tx_ind_q);
2888
2889	/* Process pending frames before dequeuing more data
2890	 * from hardware.
2891	 */
2892	quota = ath10k_htt_rx_deliver_msdu(ar, quota, budget);
2893	if (quota == budget) {
2894		resched_napi = true;
2895		goto exit;
2896	}
2897
2898	while ((skb = __skb_dequeue(&htt->rx_in_ord_compl_q))) {
2899		spin_lock_bh(&htt->rx_ring.lock);
2900		ret = ath10k_htt_rx_in_ord_ind(ar, skb);
2901		spin_unlock_bh(&htt->rx_ring.lock);
2902
2903		dev_kfree_skb_any(skb);
2904		if (ret == -EIO) {
2905			resched_napi = true;
2906			goto exit;
2907		}
2908	}
2909
2910	while (atomic_read(&htt->num_mpdus_ready)) {
2911		ret = ath10k_htt_rx_handle_amsdu(htt);
2912		if (ret == -EIO) {
2913			resched_napi = true;
2914			goto exit;
2915		}
2916		atomic_dec(&htt->num_mpdus_ready);
2917	}
2918
2919	/* Deliver received data after processing data from hardware */
2920	quota = ath10k_htt_rx_deliver_msdu(ar, quota, budget);
2921
2922	/* From NAPI documentation:
2923	 *  The napi poll() function may also process TX completions, in which
2924	 *  case if it processes the entire TX ring then it should count that
2925	 *  work as the rest of the budget.
2926	 */
2927	if ((quota < budget) && !kfifo_is_empty(&htt->txdone_fifo))
2928		quota = budget;
2929
2930	/* kfifo_get: called only within txrx_tasklet so it's neatly serialized.
2931	 * From kfifo_get() documentation:
2932	 *  Note that with only one concurrent reader and one concurrent writer,
2933	 *  you don't need extra locking to use these macro.
2934	 */
2935	while (kfifo_get(&htt->txdone_fifo, &tx_done))
2936		ath10k_txrx_tx_unref(htt, &tx_done);
2937
2938	ath10k_mac_tx_push_pending(ar);
2939
2940	spin_lock_irqsave(&htt->tx_fetch_ind_q.lock, flags);
2941	skb_queue_splice_init(&htt->tx_fetch_ind_q, &tx_ind_q);
2942	spin_unlock_irqrestore(&htt->tx_fetch_ind_q.lock, flags);
2943
2944	while ((skb = __skb_dequeue(&tx_ind_q))) {
2945		ath10k_htt_rx_tx_fetch_ind(ar, skb);
2946		dev_kfree_skb_any(skb);
2947	}
2948
2949exit:
2950	ath10k_htt_rx_msdu_buff_replenish(htt);
2951	/* In case of rx failure or more data to read, report budget
2952	 * to reschedule NAPI poll
2953	 */
2954	done = resched_napi ? budget : quota;
2955
2956	return done;
2957}
2958EXPORT_SYMBOL(ath10k_htt_txrx_compl_task);
2959
2960static const struct ath10k_htt_rx_ops htt_rx_ops_32 = {
2961	.htt_get_rx_ring_size = ath10k_htt_get_rx_ring_size_32,
2962	.htt_config_paddrs_ring = ath10k_htt_config_paddrs_ring_32,
2963	.htt_set_paddrs_ring = ath10k_htt_set_paddrs_ring_32,
2964	.htt_get_vaddr_ring = ath10k_htt_get_vaddr_ring_32,
2965	.htt_reset_paddrs_ring = ath10k_htt_reset_paddrs_ring_32,
2966};
2967
2968static const struct ath10k_htt_rx_ops htt_rx_ops_64 = {
2969	.htt_get_rx_ring_size = ath10k_htt_get_rx_ring_size_64,
2970	.htt_config_paddrs_ring = ath10k_htt_config_paddrs_ring_64,
2971	.htt_set_paddrs_ring = ath10k_htt_set_paddrs_ring_64,
2972	.htt_get_vaddr_ring = ath10k_htt_get_vaddr_ring_64,
2973	.htt_reset_paddrs_ring = ath10k_htt_reset_paddrs_ring_64,
2974};
2975
2976void ath10k_htt_set_rx_ops(struct ath10k_htt *htt)
2977{
2978	struct ath10k *ar = htt->ar;
2979
2980	if (ar->hw_params.target_64bit)
2981		htt->rx_ops = &htt_rx_ops_64;
2982	else
2983		htt->rx_ops = &htt_rx_ops_32;
2984}