Linux Audio

Check our new training course

Loading...
v6.8
   1/*
   2 * Copyright (c) 2013 Eugene Krasnikov <k.eugene.e@gmail.com>
   3 *
   4 * Permission to use, copy, modify, and/or distribute this software for any
   5 * purpose with or without fee is hereby granted, provided that the above
   6 * copyright notice and this permission notice appear in all copies.
   7 *
   8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
   9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
  10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
  11 * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
  12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
  13 * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
  14 * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
  15 */
  16
  17/* DXE - DMA transfer engine
  18 * we have 2 channels(High prio and Low prio) for TX and 2 channels for RX.
  19 * through low channels data packets are transfered
  20 * through high channels managment packets are transfered
  21 */
  22
  23#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  24
  25#include <linux/interrupt.h>
  26#include <linux/soc/qcom/smem_state.h>
  27#include "wcn36xx.h"
  28#include "txrx.h"
  29
  30static void wcn36xx_ccu_write_register(struct wcn36xx *wcn, int addr, int data)
  31{
  32	wcn36xx_dbg(WCN36XX_DBG_DXE,
  33		    "wcn36xx_ccu_write_register: addr=%x, data=%x\n",
  34		    addr, data);
  35
  36	writel(data, wcn->ccu_base + addr);
  37}
  38
  39static void wcn36xx_dxe_write_register(struct wcn36xx *wcn, int addr, int data)
  40{
  41	wcn36xx_dbg(WCN36XX_DBG_DXE,
  42		    "wcn36xx_dxe_write_register: addr=%x, data=%x\n",
  43		    addr, data);
  44
  45	writel(data, wcn->dxe_base + addr);
  46}
  47
  48static void wcn36xx_dxe_read_register(struct wcn36xx *wcn, int addr, int *data)
  49{
  50	*data = readl(wcn->dxe_base + addr);
  51
  52	wcn36xx_dbg(WCN36XX_DBG_DXE,
  53		    "wcn36xx_dxe_read_register: addr=%x, data=%x\n",
  54		    addr, *data);
  55}
  56
  57static void wcn36xx_dxe_free_ctl_block(struct wcn36xx_dxe_ch *ch)
  58{
  59	struct wcn36xx_dxe_ctl *ctl = ch->head_blk_ctl, *next;
  60	int i;
  61
  62	for (i = 0; i < ch->desc_num && ctl; i++) {
  63		next = ctl->next;
  64		kfree(ctl);
  65		ctl = next;
  66	}
  67}
  68
  69static int wcn36xx_dxe_allocate_ctl_block(struct wcn36xx_dxe_ch *ch)
  70{
  71	struct wcn36xx_dxe_ctl *prev_ctl = NULL;
  72	struct wcn36xx_dxe_ctl *cur_ctl = NULL;
  73	int i;
  74
  75	spin_lock_init(&ch->lock);
  76	for (i = 0; i < ch->desc_num; i++) {
  77		cur_ctl = kzalloc(sizeof(*cur_ctl), GFP_KERNEL);
  78		if (!cur_ctl)
  79			goto out_fail;
  80
  81		cur_ctl->ctl_blk_order = i;
  82		if (i == 0) {
  83			ch->head_blk_ctl = cur_ctl;
  84			ch->tail_blk_ctl = cur_ctl;
  85		} else if (ch->desc_num - 1 == i) {
  86			prev_ctl->next = cur_ctl;
  87			cur_ctl->next = ch->head_blk_ctl;
  88		} else {
  89			prev_ctl->next = cur_ctl;
  90		}
  91		prev_ctl = cur_ctl;
  92	}
  93
  94	return 0;
  95
  96out_fail:
  97	wcn36xx_dxe_free_ctl_block(ch);
  98	return -ENOMEM;
  99}
 100
 101int wcn36xx_dxe_alloc_ctl_blks(struct wcn36xx *wcn)
 102{
 103	int ret;
 104
 105	wcn->dxe_tx_l_ch.ch_type = WCN36XX_DXE_CH_TX_L;
 106	wcn->dxe_tx_h_ch.ch_type = WCN36XX_DXE_CH_TX_H;
 107	wcn->dxe_rx_l_ch.ch_type = WCN36XX_DXE_CH_RX_L;
 108	wcn->dxe_rx_h_ch.ch_type = WCN36XX_DXE_CH_RX_H;
 109
 110	wcn->dxe_tx_l_ch.desc_num = WCN36XX_DXE_CH_DESC_NUMB_TX_L;
 111	wcn->dxe_tx_h_ch.desc_num = WCN36XX_DXE_CH_DESC_NUMB_TX_H;
 112	wcn->dxe_rx_l_ch.desc_num = WCN36XX_DXE_CH_DESC_NUMB_RX_L;
 113	wcn->dxe_rx_h_ch.desc_num = WCN36XX_DXE_CH_DESC_NUMB_RX_H;
 114
 115	wcn->dxe_tx_l_ch.dxe_wq =  WCN36XX_DXE_WQ_TX_L(wcn);
 116	wcn->dxe_tx_h_ch.dxe_wq =  WCN36XX_DXE_WQ_TX_H(wcn);
 117
 118	wcn->dxe_tx_l_ch.ctrl_bd = WCN36XX_DXE_CTRL_TX_L_BD;
 119	wcn->dxe_tx_h_ch.ctrl_bd = WCN36XX_DXE_CTRL_TX_H_BD;
 120
 121	wcn->dxe_tx_l_ch.ctrl_skb = WCN36XX_DXE_CTRL_TX_L_SKB;
 122	wcn->dxe_tx_h_ch.ctrl_skb = WCN36XX_DXE_CTRL_TX_H_SKB;
 123
 124	wcn->dxe_tx_l_ch.reg_ctrl = WCN36XX_DXE_REG_CTL_TX_L;
 125	wcn->dxe_tx_h_ch.reg_ctrl = WCN36XX_DXE_REG_CTL_TX_H;
 126
 127	wcn->dxe_tx_l_ch.def_ctrl = WCN36XX_DXE_CH_DEFAULT_CTL_TX_L;
 128	wcn->dxe_tx_h_ch.def_ctrl = WCN36XX_DXE_CH_DEFAULT_CTL_TX_H;
 129
 130	/* DXE control block allocation */
 131	ret = wcn36xx_dxe_allocate_ctl_block(&wcn->dxe_tx_l_ch);
 132	if (ret)
 133		goto out_err;
 134	ret = wcn36xx_dxe_allocate_ctl_block(&wcn->dxe_tx_h_ch);
 135	if (ret)
 136		goto out_err;
 137	ret = wcn36xx_dxe_allocate_ctl_block(&wcn->dxe_rx_l_ch);
 138	if (ret)
 139		goto out_err;
 140	ret = wcn36xx_dxe_allocate_ctl_block(&wcn->dxe_rx_h_ch);
 141	if (ret)
 142		goto out_err;
 143
 144	/* Initialize SMSM state  Clear TX Enable RING EMPTY STATE */
 145	ret = qcom_smem_state_update_bits(wcn->tx_enable_state,
 146					  WCN36XX_SMSM_WLAN_TX_ENABLE |
 147					  WCN36XX_SMSM_WLAN_TX_RINGS_EMPTY,
 148					  WCN36XX_SMSM_WLAN_TX_RINGS_EMPTY);
 149	if (ret)
 150		goto out_err;
 151
 152	return 0;
 153
 154out_err:
 155	wcn36xx_err("Failed to allocate DXE control blocks\n");
 156	wcn36xx_dxe_free_ctl_blks(wcn);
 157	return -ENOMEM;
 158}
 159
 160void wcn36xx_dxe_free_ctl_blks(struct wcn36xx *wcn)
 161{
 162	wcn36xx_dxe_free_ctl_block(&wcn->dxe_tx_l_ch);
 163	wcn36xx_dxe_free_ctl_block(&wcn->dxe_tx_h_ch);
 164	wcn36xx_dxe_free_ctl_block(&wcn->dxe_rx_l_ch);
 165	wcn36xx_dxe_free_ctl_block(&wcn->dxe_rx_h_ch);
 166}
 167
 168static int wcn36xx_dxe_init_descs(struct wcn36xx *wcn, struct wcn36xx_dxe_ch *wcn_ch)
 169{
 170	struct device *dev = wcn->dev;
 171	struct wcn36xx_dxe_desc *cur_dxe = NULL;
 172	struct wcn36xx_dxe_desc *prev_dxe = NULL;
 173	struct wcn36xx_dxe_ctl *cur_ctl = NULL;
 174	size_t size;
 175	int i;
 176
 177	size = wcn_ch->desc_num * sizeof(struct wcn36xx_dxe_desc);
 178	wcn_ch->cpu_addr = dma_alloc_coherent(dev, size, &wcn_ch->dma_addr,
 179					      GFP_KERNEL);
 180	if (!wcn_ch->cpu_addr)
 181		return -ENOMEM;
 182
 183	cur_dxe = wcn_ch->cpu_addr;
 184	cur_ctl = wcn_ch->head_blk_ctl;
 185
 186	for (i = 0; i < wcn_ch->desc_num; i++) {
 187		cur_ctl->desc = cur_dxe;
 188		cur_ctl->desc_phy_addr = wcn_ch->dma_addr +
 189			i * sizeof(struct wcn36xx_dxe_desc);
 190
 191		switch (wcn_ch->ch_type) {
 192		case WCN36XX_DXE_CH_TX_L:
 193			cur_dxe->ctrl = WCN36XX_DXE_CTRL_TX_L;
 194			cur_dxe->dst_addr_l = WCN36XX_DXE_WQ_TX_L(wcn);
 195			break;
 196		case WCN36XX_DXE_CH_TX_H:
 197			cur_dxe->ctrl = WCN36XX_DXE_CTRL_TX_H;
 198			cur_dxe->dst_addr_l = WCN36XX_DXE_WQ_TX_H(wcn);
 199			break;
 200		case WCN36XX_DXE_CH_RX_L:
 201			cur_dxe->ctrl = WCN36XX_DXE_CTRL_RX_L;
 202			cur_dxe->src_addr_l = WCN36XX_DXE_WQ_RX_L;
 203			break;
 204		case WCN36XX_DXE_CH_RX_H:
 205			cur_dxe->ctrl = WCN36XX_DXE_CTRL_RX_H;
 206			cur_dxe->src_addr_l = WCN36XX_DXE_WQ_RX_H;
 207			break;
 208		}
 209		if (0 == i) {
 210			cur_dxe->phy_next_l = 0;
 211		} else if ((0 < i) && (i < wcn_ch->desc_num - 1)) {
 212			prev_dxe->phy_next_l =
 213				cur_ctl->desc_phy_addr;
 214		} else if (i == (wcn_ch->desc_num - 1)) {
 215			prev_dxe->phy_next_l =
 216				cur_ctl->desc_phy_addr;
 217			cur_dxe->phy_next_l =
 218				wcn_ch->head_blk_ctl->desc_phy_addr;
 219		}
 220		cur_ctl = cur_ctl->next;
 221		prev_dxe = cur_dxe;
 222		cur_dxe++;
 223	}
 224
 225	return 0;
 226}
 227
 228static void wcn36xx_dxe_deinit_descs(struct device *dev, struct wcn36xx_dxe_ch *wcn_ch)
 229{
 230	size_t size;
 231
 232	size = wcn_ch->desc_num * sizeof(struct wcn36xx_dxe_desc);
 233	dma_free_coherent(dev, size,wcn_ch->cpu_addr, wcn_ch->dma_addr);
 234}
 235
 236static void wcn36xx_dxe_init_tx_bd(struct wcn36xx_dxe_ch *ch,
 237				   struct wcn36xx_dxe_mem_pool *pool)
 238{
 239	int i, chunk_size = pool->chunk_size;
 240	dma_addr_t bd_phy_addr = pool->phy_addr;
 241	void *bd_cpu_addr = pool->virt_addr;
 242	struct wcn36xx_dxe_ctl *cur = ch->head_blk_ctl;
 243
 244	for (i = 0; i < ch->desc_num; i++) {
 245		/* Only every second dxe needs a bd pointer,
 246		   the other will point to the skb data */
 247		if (!(i & 1)) {
 248			cur->bd_phy_addr = bd_phy_addr;
 249			cur->bd_cpu_addr = bd_cpu_addr;
 250			bd_phy_addr += chunk_size;
 251			bd_cpu_addr += chunk_size;
 252		} else {
 253			cur->bd_phy_addr = 0;
 254			cur->bd_cpu_addr = NULL;
 255		}
 256		cur = cur->next;
 257	}
 258}
 259
 260static int wcn36xx_dxe_enable_ch_int(struct wcn36xx *wcn, u16 wcn_ch)
 261{
 262	int reg_data = 0;
 263
 264	wcn36xx_dxe_read_register(wcn,
 265				  WCN36XX_DXE_INT_MASK_REG,
 266				  &reg_data);
 267
 268	reg_data |= wcn_ch;
 269
 270	wcn36xx_dxe_write_register(wcn,
 271				   WCN36XX_DXE_INT_MASK_REG,
 272				   (int)reg_data);
 273	return 0;
 274}
 275
 276static void wcn36xx_dxe_disable_ch_int(struct wcn36xx *wcn, u16 wcn_ch)
 277{
 278	int reg_data = 0;
 279
 280	wcn36xx_dxe_read_register(wcn,
 281				  WCN36XX_DXE_INT_MASK_REG,
 282				  &reg_data);
 283
 284	reg_data &= ~wcn_ch;
 285
 286	wcn36xx_dxe_write_register(wcn,
 287				   WCN36XX_DXE_INT_MASK_REG,
 288				   (int)reg_data);
 289}
 290
 291static int wcn36xx_dxe_fill_skb(struct device *dev,
 292				struct wcn36xx_dxe_ctl *ctl,
 293				gfp_t gfp)
 294{
 295	struct wcn36xx_dxe_desc *dxe = ctl->desc;
 296	struct sk_buff *skb;
 297
 298	skb = alloc_skb(WCN36XX_PKT_SIZE, gfp);
 299	if (skb == NULL)
 300		return -ENOMEM;
 301
 302	dxe->dst_addr_l = dma_map_single(dev,
 303					 skb_tail_pointer(skb),
 304					 WCN36XX_PKT_SIZE,
 305					 DMA_FROM_DEVICE);
 306	if (dma_mapping_error(dev, dxe->dst_addr_l)) {
 307		dev_err(dev, "unable to map skb\n");
 308		kfree_skb(skb);
 309		return -ENOMEM;
 310	}
 311	ctl->skb = skb;
 312
 313	return 0;
 314}
 315
 316static int wcn36xx_dxe_ch_alloc_skb(struct wcn36xx *wcn,
 317				    struct wcn36xx_dxe_ch *wcn_ch)
 318{
 319	int i;
 320	struct wcn36xx_dxe_ctl *cur_ctl = NULL;
 321
 322	cur_ctl = wcn_ch->head_blk_ctl;
 323
 324	for (i = 0; i < wcn_ch->desc_num; i++) {
 325		wcn36xx_dxe_fill_skb(wcn->dev, cur_ctl, GFP_KERNEL);
 326		cur_ctl = cur_ctl->next;
 327	}
 328
 329	return 0;
 330}
 331
 332static void wcn36xx_dxe_ch_free_skbs(struct wcn36xx *wcn,
 333				     struct wcn36xx_dxe_ch *wcn_ch)
 334{
 335	struct wcn36xx_dxe_ctl *cur = wcn_ch->head_blk_ctl;
 336	int i;
 337
 338	for (i = 0; i < wcn_ch->desc_num; i++) {
 339		kfree_skb(cur->skb);
 340		cur = cur->next;
 341	}
 342}
 343
 344void wcn36xx_dxe_tx_ack_ind(struct wcn36xx *wcn, u32 status)
 345{
 346	struct ieee80211_tx_info *info;
 347	struct sk_buff *skb;
 348	unsigned long flags;
 349
 350	spin_lock_irqsave(&wcn->dxe_lock, flags);
 351	skb = wcn->tx_ack_skb;
 352	wcn->tx_ack_skb = NULL;
 353	del_timer(&wcn->tx_ack_timer);
 354	spin_unlock_irqrestore(&wcn->dxe_lock, flags);
 355
 356	if (!skb) {
 357		wcn36xx_warn("Spurious TX complete indication\n");
 358		return;
 359	}
 360
 361	info = IEEE80211_SKB_CB(skb);
 362
 363	if (status == 1)
 364		info->flags |= IEEE80211_TX_STAT_ACK;
 365	else
 366		info->flags &= ~IEEE80211_TX_STAT_ACK;
 367
 368	wcn36xx_dbg(WCN36XX_DBG_DXE, "dxe tx ack status: %d\n", status);
 369
 370	ieee80211_tx_status_irqsafe(wcn->hw, skb);
 371	ieee80211_wake_queues(wcn->hw);
 372}
 373
 374static void wcn36xx_dxe_tx_timer(struct timer_list *t)
 375{
 376	struct wcn36xx *wcn = from_timer(wcn, t, tx_ack_timer);
 377	struct ieee80211_tx_info *info;
 378	unsigned long flags;
 379	struct sk_buff *skb;
 380
 381	/* TX Timeout */
 382	wcn36xx_dbg(WCN36XX_DBG_DXE, "TX timeout\n");
 383
 384	spin_lock_irqsave(&wcn->dxe_lock, flags);
 385	skb = wcn->tx_ack_skb;
 386	wcn->tx_ack_skb = NULL;
 387	spin_unlock_irqrestore(&wcn->dxe_lock, flags);
 388
 389	if (!skb)
 390		return;
 391
 392	info = IEEE80211_SKB_CB(skb);
 393	info->flags &= ~IEEE80211_TX_STAT_ACK;
 394	info->flags &= ~IEEE80211_TX_STAT_NOACK_TRANSMITTED;
 395
 396	ieee80211_tx_status_irqsafe(wcn->hw, skb);
 397	ieee80211_wake_queues(wcn->hw);
 398}
 399
 400static void reap_tx_dxes(struct wcn36xx *wcn, struct wcn36xx_dxe_ch *ch)
 401{
 402	struct wcn36xx_dxe_ctl *ctl;
 403	struct ieee80211_tx_info *info;
 404	unsigned long flags;
 405
 406	/*
 407	 * Make at least one loop of do-while because in case ring is
 408	 * completely full head and tail are pointing to the same element
 409	 * and while-do will not make any cycles.
 410	 */
 411	spin_lock_irqsave(&ch->lock, flags);
 412	ctl = ch->tail_blk_ctl;
 413	do {
 414		if (READ_ONCE(ctl->desc->ctrl) & WCN36xx_DXE_CTRL_VLD)
 415			break;
 416
 417		if (ctl->skb &&
 418		    READ_ONCE(ctl->desc->ctrl) & WCN36xx_DXE_CTRL_EOP) {
 419			dma_unmap_single(wcn->dev, ctl->desc->src_addr_l,
 420					 ctl->skb->len, DMA_TO_DEVICE);
 421			info = IEEE80211_SKB_CB(ctl->skb);
 422			if (info->flags & IEEE80211_TX_CTL_REQ_TX_STATUS) {
 423				if (info->flags & IEEE80211_TX_CTL_NO_ACK) {
 424					info->flags |= IEEE80211_TX_STAT_NOACK_TRANSMITTED;
 425					ieee80211_tx_status_irqsafe(wcn->hw, ctl->skb);
 426				} else {
 427					/* Wait for the TX ack indication or timeout... */
 428					spin_lock(&wcn->dxe_lock);
 429					if (WARN_ON(wcn->tx_ack_skb))
 430						ieee80211_free_txskb(wcn->hw, wcn->tx_ack_skb);
 431					wcn->tx_ack_skb = ctl->skb; /* Tracking ref */
 432					mod_timer(&wcn->tx_ack_timer, jiffies + HZ / 10);
 433					spin_unlock(&wcn->dxe_lock);
 434				}
 435				/* do not free, ownership transferred to mac80211 status cb */
 436			} else {
 437				ieee80211_free_txskb(wcn->hw, ctl->skb);
 438			}
 439
 440			if (wcn->queues_stopped) {
 441				wcn->queues_stopped = false;
 442				ieee80211_wake_queues(wcn->hw);
 443			}
 444
 445			ctl->skb = NULL;
 446		}
 447		ctl = ctl->next;
 448	} while (ctl != ch->head_blk_ctl);
 449
 450	ch->tail_blk_ctl = ctl;
 451	spin_unlock_irqrestore(&ch->lock, flags);
 452}
 453
 454static irqreturn_t wcn36xx_irq_tx_complete(int irq, void *dev)
 455{
 456	struct wcn36xx *wcn = dev;
 457	int int_src, int_reason;
 458
 459	wcn36xx_dxe_read_register(wcn, WCN36XX_DXE_INT_SRC_RAW_REG, &int_src);
 460
 461	if (int_src & WCN36XX_INT_MASK_CHAN_TX_H) {
 462		wcn36xx_dxe_read_register(wcn,
 463					  WCN36XX_DXE_CH_STATUS_REG_ADDR_TX_H,
 464					  &int_reason);
 465
 466		wcn36xx_dxe_write_register(wcn,
 467					   WCN36XX_DXE_0_INT_CLR,
 468					   WCN36XX_INT_MASK_CHAN_TX_H);
 469
 470		if (int_reason & WCN36XX_CH_STAT_INT_ERR_MASK ) {
 471			wcn36xx_dxe_write_register(wcn,
 472						   WCN36XX_DXE_0_INT_ERR_CLR,
 473						   WCN36XX_INT_MASK_CHAN_TX_H);
 474
 475			wcn36xx_err("DXE IRQ reported error: 0x%x in high TX channel\n",
 476					int_src);
 477		}
 478
 479		if (int_reason & WCN36XX_CH_STAT_INT_DONE_MASK) {
 480			wcn36xx_dxe_write_register(wcn,
 481						   WCN36XX_DXE_0_INT_DONE_CLR,
 482						   WCN36XX_INT_MASK_CHAN_TX_H);
 483		}
 484
 485		if (int_reason & WCN36XX_CH_STAT_INT_ED_MASK) {
 486			wcn36xx_dxe_write_register(wcn,
 487						   WCN36XX_DXE_0_INT_ED_CLR,
 488						   WCN36XX_INT_MASK_CHAN_TX_H);
 489		}
 490
 491		wcn36xx_dbg(WCN36XX_DBG_DXE, "dxe tx ready high, reason %08x\n",
 492			    int_reason);
 493
 494		if (int_reason & (WCN36XX_CH_STAT_INT_DONE_MASK |
 495				  WCN36XX_CH_STAT_INT_ED_MASK)) {
 496			reap_tx_dxes(wcn, &wcn->dxe_tx_h_ch);
 497		}
 498	}
 499
 500	if (int_src & WCN36XX_INT_MASK_CHAN_TX_L) {
 501		wcn36xx_dxe_read_register(wcn,
 502					  WCN36XX_DXE_CH_STATUS_REG_ADDR_TX_L,
 503					  &int_reason);
 504
 505		wcn36xx_dxe_write_register(wcn,
 506					   WCN36XX_DXE_0_INT_CLR,
 507					   WCN36XX_INT_MASK_CHAN_TX_L);
 508
 
 509		if (int_reason & WCN36XX_CH_STAT_INT_ERR_MASK ) {
 510			wcn36xx_dxe_write_register(wcn,
 511						   WCN36XX_DXE_0_INT_ERR_CLR,
 512						   WCN36XX_INT_MASK_CHAN_TX_L);
 513
 514			wcn36xx_err("DXE IRQ reported error: 0x%x in low TX channel\n",
 515					int_src);
 516		}
 517
 518		if (int_reason & WCN36XX_CH_STAT_INT_DONE_MASK) {
 519			wcn36xx_dxe_write_register(wcn,
 520						   WCN36XX_DXE_0_INT_DONE_CLR,
 521						   WCN36XX_INT_MASK_CHAN_TX_L);
 522		}
 523
 524		if (int_reason & WCN36XX_CH_STAT_INT_ED_MASK) {
 525			wcn36xx_dxe_write_register(wcn,
 526						   WCN36XX_DXE_0_INT_ED_CLR,
 527						   WCN36XX_INT_MASK_CHAN_TX_L);
 528		}
 529
 530		wcn36xx_dbg(WCN36XX_DBG_DXE, "dxe tx ready low, reason %08x\n",
 531			    int_reason);
 532
 533		if (int_reason & (WCN36XX_CH_STAT_INT_DONE_MASK |
 534				  WCN36XX_CH_STAT_INT_ED_MASK)) {
 535			reap_tx_dxes(wcn, &wcn->dxe_tx_l_ch);
 536		}
 537	}
 538
 539	return IRQ_HANDLED;
 540}
 541
 542static irqreturn_t wcn36xx_irq_rx_ready(int irq, void *dev)
 543{
 544	struct wcn36xx *wcn = dev;
 545
 546	wcn36xx_dxe_rx_frame(wcn);
 547
 548	return IRQ_HANDLED;
 549}
 550
 551static int wcn36xx_dxe_request_irqs(struct wcn36xx *wcn)
 552{
 553	int ret;
 554
 555	ret = request_irq(wcn->tx_irq, wcn36xx_irq_tx_complete,
 556			  IRQF_TRIGGER_HIGH, "wcn36xx_tx", wcn);
 557	if (ret) {
 558		wcn36xx_err("failed to alloc tx irq\n");
 559		goto out_err;
 560	}
 561
 562	ret = request_irq(wcn->rx_irq, wcn36xx_irq_rx_ready, IRQF_TRIGGER_HIGH,
 563			  "wcn36xx_rx", wcn);
 564	if (ret) {
 565		wcn36xx_err("failed to alloc rx irq\n");
 566		goto out_txirq;
 567	}
 568
 569	enable_irq_wake(wcn->rx_irq);
 570
 571	return 0;
 572
 573out_txirq:
 574	free_irq(wcn->tx_irq, wcn);
 575out_err:
 576	return ret;
 577
 578}
 579
 580static int wcn36xx_rx_handle_packets(struct wcn36xx *wcn,
 581				     struct wcn36xx_dxe_ch *ch,
 582				     u32 ctrl,
 583				     u32 en_mask,
 584				     u32 int_mask,
 585				     u32 status_reg)
 586{
 587	struct wcn36xx_dxe_desc *dxe;
 588	struct wcn36xx_dxe_ctl *ctl;
 589	dma_addr_t  dma_addr;
 590	struct sk_buff *skb;
 591	u32 int_reason;
 592	int ret;
 593
 594	wcn36xx_dxe_read_register(wcn, status_reg, &int_reason);
 595	wcn36xx_dxe_write_register(wcn, WCN36XX_DXE_0_INT_CLR, int_mask);
 596
 597	if (int_reason & WCN36XX_CH_STAT_INT_ERR_MASK) {
 598		wcn36xx_dxe_write_register(wcn,
 599					   WCN36XX_DXE_0_INT_ERR_CLR,
 600					   int_mask);
 601
 602		wcn36xx_err("DXE IRQ reported error on RX channel\n");
 603	}
 604
 605	if (int_reason & WCN36XX_CH_STAT_INT_DONE_MASK)
 606		wcn36xx_dxe_write_register(wcn,
 607					   WCN36XX_DXE_0_INT_DONE_CLR,
 608					   int_mask);
 609
 610	if (int_reason & WCN36XX_CH_STAT_INT_ED_MASK)
 611		wcn36xx_dxe_write_register(wcn,
 612					   WCN36XX_DXE_0_INT_ED_CLR,
 613					   int_mask);
 614
 615	if (!(int_reason & (WCN36XX_CH_STAT_INT_DONE_MASK |
 616			    WCN36XX_CH_STAT_INT_ED_MASK)))
 617		return 0;
 618
 619	spin_lock(&ch->lock);
 620
 621	ctl = ch->head_blk_ctl;
 622	dxe = ctl->desc;
 623
 624	while (!(READ_ONCE(dxe->ctrl) & WCN36xx_DXE_CTRL_VLD)) {
 625		/* do not read until we own DMA descriptor */
 626		dma_rmb();
 627
 628		/* read/modify DMA descriptor */
 629		skb = ctl->skb;
 630		dma_addr = dxe->dst_addr_l;
 631		ret = wcn36xx_dxe_fill_skb(wcn->dev, ctl, GFP_ATOMIC);
 632		if (0 == ret) {
 633			/* new skb allocation ok. Use the new one and queue
 634			 * the old one to network system.
 635			 */
 636			dma_unmap_single(wcn->dev, dma_addr, WCN36XX_PKT_SIZE,
 637					DMA_FROM_DEVICE);
 638			wcn36xx_rx_skb(wcn, skb);
 639		}
 640		/* else keep old skb not submitted and reuse it for rx DMA
 641		 * (dropping the packet that it contained)
 642		 */
 643
 644		/* flush descriptor changes before re-marking as valid */
 645		dma_wmb();
 646		dxe->ctrl = ctrl;
 647
 648		ctl = ctl->next;
 649		dxe = ctl->desc;
 650	}
 651	wcn36xx_dxe_write_register(wcn, WCN36XX_DXE_ENCH_ADDR, en_mask);
 652
 653	ch->head_blk_ctl = ctl;
 654
 655	spin_unlock(&ch->lock);
 656
 657	return 0;
 658}
 659
 660void wcn36xx_dxe_rx_frame(struct wcn36xx *wcn)
 661{
 662	int int_src;
 663
 664	wcn36xx_dxe_read_register(wcn, WCN36XX_DXE_INT_SRC_RAW_REG, &int_src);
 665
 666	/* RX_LOW_PRI */
 667	if (int_src & WCN36XX_DXE_INT_CH1_MASK)
 668		wcn36xx_rx_handle_packets(wcn, &wcn->dxe_rx_l_ch,
 669					  WCN36XX_DXE_CTRL_RX_L,
 670					  WCN36XX_DXE_INT_CH1_MASK,
 671					  WCN36XX_INT_MASK_CHAN_RX_L,
 672					  WCN36XX_DXE_CH_STATUS_REG_ADDR_RX_L);
 673
 674	/* RX_HIGH_PRI */
 675	if (int_src & WCN36XX_DXE_INT_CH3_MASK)
 676		wcn36xx_rx_handle_packets(wcn, &wcn->dxe_rx_h_ch,
 677					  WCN36XX_DXE_CTRL_RX_H,
 678					  WCN36XX_DXE_INT_CH3_MASK,
 679					  WCN36XX_INT_MASK_CHAN_RX_H,
 680					  WCN36XX_DXE_CH_STATUS_REG_ADDR_RX_H);
 681
 682	if (!int_src)
 683		wcn36xx_warn("No DXE interrupt pending\n");
 684}
 685
 686int wcn36xx_dxe_allocate_mem_pools(struct wcn36xx *wcn)
 687{
 688	size_t s;
 689	void *cpu_addr;
 690
 691	/* Allocate BD headers for MGMT frames */
 692
 693	/* Where this come from ask QC */
 694	wcn->mgmt_mem_pool.chunk_size =	WCN36XX_BD_CHUNK_SIZE +
 695		16 - (WCN36XX_BD_CHUNK_SIZE % 8);
 696
 697	s = wcn->mgmt_mem_pool.chunk_size * WCN36XX_DXE_CH_DESC_NUMB_TX_H;
 698	cpu_addr = dma_alloc_coherent(wcn->dev, s,
 699				      &wcn->mgmt_mem_pool.phy_addr,
 700				      GFP_KERNEL);
 701	if (!cpu_addr)
 702		goto out_err;
 703
 704	wcn->mgmt_mem_pool.virt_addr = cpu_addr;
 705
 706	/* Allocate BD headers for DATA frames */
 707
 708	/* Where this come from ask QC */
 709	wcn->data_mem_pool.chunk_size = WCN36XX_BD_CHUNK_SIZE +
 710		16 - (WCN36XX_BD_CHUNK_SIZE % 8);
 711
 712	s = wcn->data_mem_pool.chunk_size * WCN36XX_DXE_CH_DESC_NUMB_TX_L;
 713	cpu_addr = dma_alloc_coherent(wcn->dev, s,
 714				      &wcn->data_mem_pool.phy_addr,
 715				      GFP_KERNEL);
 716	if (!cpu_addr)
 717		goto out_err;
 718
 719	wcn->data_mem_pool.virt_addr = cpu_addr;
 720
 721	return 0;
 722
 723out_err:
 724	wcn36xx_dxe_free_mem_pools(wcn);
 725	wcn36xx_err("Failed to allocate BD mempool\n");
 726	return -ENOMEM;
 727}
 728
 729void wcn36xx_dxe_free_mem_pools(struct wcn36xx *wcn)
 730{
 731	if (wcn->mgmt_mem_pool.virt_addr)
 732		dma_free_coherent(wcn->dev, wcn->mgmt_mem_pool.chunk_size *
 733				  WCN36XX_DXE_CH_DESC_NUMB_TX_H,
 734				  wcn->mgmt_mem_pool.virt_addr,
 735				  wcn->mgmt_mem_pool.phy_addr);
 736
 737	if (wcn->data_mem_pool.virt_addr) {
 738		dma_free_coherent(wcn->dev, wcn->data_mem_pool.chunk_size *
 739				  WCN36XX_DXE_CH_DESC_NUMB_TX_L,
 740				  wcn->data_mem_pool.virt_addr,
 741				  wcn->data_mem_pool.phy_addr);
 742	}
 743}
 744
 745int wcn36xx_dxe_tx_frame(struct wcn36xx *wcn,
 746			 struct wcn36xx_vif *vif_priv,
 747			 struct wcn36xx_tx_bd *bd,
 748			 struct sk_buff *skb,
 749			 bool is_low)
 750{
 751	struct wcn36xx_dxe_desc *desc_bd, *desc_skb;
 752	struct wcn36xx_dxe_ctl *ctl_bd, *ctl_skb;
 753	struct wcn36xx_dxe_ch *ch = NULL;
 754	unsigned long flags;
 755	int ret;
 756
 757	ch = is_low ? &wcn->dxe_tx_l_ch : &wcn->dxe_tx_h_ch;
 758
 759	spin_lock_irqsave(&ch->lock, flags);
 760	ctl_bd = ch->head_blk_ctl;
 761	ctl_skb = ctl_bd->next;
 762
 763	/*
 764	 * If skb is not null that means that we reached the tail of the ring
 765	 * hence ring is full. Stop queues to let mac80211 back off until ring
 766	 * has an empty slot again.
 767	 */
 768	if (NULL != ctl_skb->skb) {
 769		ieee80211_stop_queues(wcn->hw);
 770		wcn->queues_stopped = true;
 771		spin_unlock_irqrestore(&ch->lock, flags);
 772		return -EBUSY;
 773	}
 774
 775	if (unlikely(ctl_skb->bd_cpu_addr)) {
 776		wcn36xx_err("bd_cpu_addr cannot be NULL for skb DXE\n");
 777		ret = -EINVAL;
 778		goto unlock;
 779	}
 780
 781	desc_bd = ctl_bd->desc;
 782	desc_skb = ctl_skb->desc;
 783
 784	ctl_bd->skb = NULL;
 785
 786	/* write buffer descriptor */
 787	memcpy(ctl_bd->bd_cpu_addr, bd, sizeof(*bd));
 788
 789	/* Set source address of the BD we send */
 790	desc_bd->src_addr_l = ctl_bd->bd_phy_addr;
 791	desc_bd->dst_addr_l = ch->dxe_wq;
 792	desc_bd->fr_len = sizeof(struct wcn36xx_tx_bd);
 793
 794	wcn36xx_dbg(WCN36XX_DBG_DXE, "DXE TX\n");
 795
 796	wcn36xx_dbg_dump(WCN36XX_DBG_DXE_DUMP, "DESC1 >>> ",
 797			 (char *)desc_bd, sizeof(*desc_bd));
 798	wcn36xx_dbg_dump(WCN36XX_DBG_DXE_DUMP,
 799			 "BD   >>> ", (char *)ctl_bd->bd_cpu_addr,
 800			 sizeof(struct wcn36xx_tx_bd));
 801
 802	desc_skb->src_addr_l = dma_map_single(wcn->dev,
 803					      skb->data,
 804					      skb->len,
 805					      DMA_TO_DEVICE);
 806	if (dma_mapping_error(wcn->dev, desc_skb->src_addr_l)) {
 807		dev_err(wcn->dev, "unable to DMA map src_addr_l\n");
 808		ret = -ENOMEM;
 809		goto unlock;
 810	}
 811
 812	ctl_skb->skb = skb;
 813	desc_skb->dst_addr_l = ch->dxe_wq;
 814	desc_skb->fr_len = ctl_skb->skb->len;
 815
 816	wcn36xx_dbg_dump(WCN36XX_DBG_DXE_DUMP, "DESC2 >>> ",
 817			 (char *)desc_skb, sizeof(*desc_skb));
 818	wcn36xx_dbg_dump(WCN36XX_DBG_DXE_DUMP, "SKB   >>> ",
 819			 (char *)ctl_skb->skb->data, ctl_skb->skb->len);
 820
 821	/* Move the head of the ring to the next empty descriptor */
 822	ch->head_blk_ctl = ctl_skb->next;
 823
 824	/* Commit all previous writes and set descriptors to VALID */
 825	wmb();
 826	desc_skb->ctrl = ch->ctrl_skb;
 827	wmb();
 828	desc_bd->ctrl = ch->ctrl_bd;
 829
 830	/*
 831	 * When connected and trying to send data frame chip can be in sleep
 832	 * mode and writing to the register will not wake up the chip. Instead
 833	 * notify chip about new frame through SMSM bus.
 834	 */
 835	if (is_low &&  vif_priv->pw_state == WCN36XX_BMPS) {
 836		qcom_smem_state_update_bits(wcn->tx_rings_empty_state,
 837					    WCN36XX_SMSM_WLAN_TX_ENABLE,
 838					    WCN36XX_SMSM_WLAN_TX_ENABLE);
 839	} else {
 840		/* indicate End Of Packet and generate interrupt on descriptor
 841		 * done.
 842		 */
 843		wcn36xx_dxe_write_register(wcn,
 844			ch->reg_ctrl, ch->def_ctrl);
 845	}
 846
 847	ret = 0;
 848unlock:
 849	spin_unlock_irqrestore(&ch->lock, flags);
 850	return ret;
 851}
 852
 853static bool _wcn36xx_dxe_tx_channel_is_empty(struct wcn36xx_dxe_ch *ch)
 854{
 855	unsigned long flags;
 856	struct wcn36xx_dxe_ctl *ctl_bd_start, *ctl_skb_start;
 857	struct wcn36xx_dxe_ctl *ctl_bd, *ctl_skb;
 858	bool ret = true;
 859
 860	spin_lock_irqsave(&ch->lock, flags);
 861
 862	/* Loop through ring buffer looking for nonempty entries. */
 863	ctl_bd_start = ch->head_blk_ctl;
 864	ctl_bd = ctl_bd_start;
 865	ctl_skb_start = ctl_bd_start->next;
 866	ctl_skb = ctl_skb_start;
 867	do {
 868		if (ctl_skb->skb) {
 869			ret = false;
 870			goto unlock;
 871		}
 872		ctl_bd = ctl_skb->next;
 873		ctl_skb = ctl_bd->next;
 874	} while (ctl_skb != ctl_skb_start);
 875
 876unlock:
 877	spin_unlock_irqrestore(&ch->lock, flags);
 878	return ret;
 879}
 880
 881int wcn36xx_dxe_tx_flush(struct wcn36xx *wcn)
 882{
 883	int i = 0;
 884
 885	/* Called with mac80211 queues stopped. Wait for empty HW queues. */
 886	do {
 887		if (_wcn36xx_dxe_tx_channel_is_empty(&wcn->dxe_tx_l_ch) &&
 888		    _wcn36xx_dxe_tx_channel_is_empty(&wcn->dxe_tx_h_ch)) {
 889			return 0;
 890		}
 891		/* This ieee80211_ops callback is specifically allowed to
 892		 * sleep.
 893		 */
 894		usleep_range(1000, 1100);
 895	} while (++i < 100);
 896
 897	return -EBUSY;
 898}
 899
 900int wcn36xx_dxe_init(struct wcn36xx *wcn)
 901{
 902	int reg_data = 0, ret;
 903
 904	reg_data = WCN36XX_DXE_REG_RESET;
 905	wcn36xx_dxe_write_register(wcn, WCN36XX_DXE_REG_CSR_RESET, reg_data);
 906
 907	/* Select channels for rx avail and xfer done interrupts... */
 908	reg_data = (WCN36XX_DXE_INT_CH3_MASK | WCN36XX_DXE_INT_CH1_MASK) << 16 |
 909		    WCN36XX_DXE_INT_CH0_MASK | WCN36XX_DXE_INT_CH4_MASK;
 910	if (wcn->is_pronto)
 911		wcn36xx_ccu_write_register(wcn, WCN36XX_CCU_DXE_INT_SELECT_PRONTO, reg_data);
 912	else
 913		wcn36xx_ccu_write_register(wcn, WCN36XX_CCU_DXE_INT_SELECT_RIVA, reg_data);
 914
 915	/***************************************/
 916	/* Init descriptors for TX LOW channel */
 917	/***************************************/
 918	ret = wcn36xx_dxe_init_descs(wcn, &wcn->dxe_tx_l_ch);
 919	if (ret) {
 920		dev_err(wcn->dev, "Error allocating descriptor\n");
 921		return ret;
 922	}
 923	wcn36xx_dxe_init_tx_bd(&wcn->dxe_tx_l_ch, &wcn->data_mem_pool);
 924
 925	/* Write channel head to a NEXT register */
 926	wcn36xx_dxe_write_register(wcn, WCN36XX_DXE_CH_NEXT_DESC_ADDR_TX_L,
 927		wcn->dxe_tx_l_ch.head_blk_ctl->desc_phy_addr);
 928
 929	/* Program DMA destination addr for TX LOW */
 930	wcn36xx_dxe_write_register(wcn,
 931		WCN36XX_DXE_CH_DEST_ADDR_TX_L,
 932		WCN36XX_DXE_WQ_TX_L(wcn));
 933
 934	wcn36xx_dxe_read_register(wcn, WCN36XX_DXE_REG_CH_EN, &reg_data);
 
 935
 936	/***************************************/
 937	/* Init descriptors for TX HIGH channel */
 938	/***************************************/
 939	ret = wcn36xx_dxe_init_descs(wcn, &wcn->dxe_tx_h_ch);
 940	if (ret) {
 941		dev_err(wcn->dev, "Error allocating descriptor\n");
 942		goto out_err_txh_ch;
 943	}
 944
 945	wcn36xx_dxe_init_tx_bd(&wcn->dxe_tx_h_ch, &wcn->mgmt_mem_pool);
 946
 947	/* Write channel head to a NEXT register */
 948	wcn36xx_dxe_write_register(wcn, WCN36XX_DXE_CH_NEXT_DESC_ADDR_TX_H,
 949		wcn->dxe_tx_h_ch.head_blk_ctl->desc_phy_addr);
 950
 951	/* Program DMA destination addr for TX HIGH */
 952	wcn36xx_dxe_write_register(wcn,
 953		WCN36XX_DXE_CH_DEST_ADDR_TX_H,
 954		WCN36XX_DXE_WQ_TX_H(wcn));
 955
 956	wcn36xx_dxe_read_register(wcn, WCN36XX_DXE_REG_CH_EN, &reg_data);
 957
 
 
 
 958	/***************************************/
 959	/* Init descriptors for RX LOW channel */
 960	/***************************************/
 961	ret = wcn36xx_dxe_init_descs(wcn, &wcn->dxe_rx_l_ch);
 962	if (ret) {
 963		dev_err(wcn->dev, "Error allocating descriptor\n");
 964		goto out_err_rxl_ch;
 965	}
 966
 
 967	/* For RX we need to preallocated buffers */
 968	wcn36xx_dxe_ch_alloc_skb(wcn, &wcn->dxe_rx_l_ch);
 969
 970	/* Write channel head to a NEXT register */
 971	wcn36xx_dxe_write_register(wcn, WCN36XX_DXE_CH_NEXT_DESC_ADDR_RX_L,
 972		wcn->dxe_rx_l_ch.head_blk_ctl->desc_phy_addr);
 973
 974	/* Write DMA source address */
 975	wcn36xx_dxe_write_register(wcn,
 976		WCN36XX_DXE_CH_SRC_ADDR_RX_L,
 977		WCN36XX_DXE_WQ_RX_L);
 978
 979	/* Program preallocated destination address */
 980	wcn36xx_dxe_write_register(wcn,
 981		WCN36XX_DXE_CH_DEST_ADDR_RX_L,
 982		wcn->dxe_rx_l_ch.head_blk_ctl->desc->phy_next_l);
 983
 984	/* Enable default control registers */
 985	wcn36xx_dxe_write_register(wcn,
 986		WCN36XX_DXE_REG_CTL_RX_L,
 987		WCN36XX_DXE_CH_DEFAULT_CTL_RX_L);
 988
 
 
 
 989	/***************************************/
 990	/* Init descriptors for RX HIGH channel */
 991	/***************************************/
 992	ret = wcn36xx_dxe_init_descs(wcn, &wcn->dxe_rx_h_ch);
 993	if (ret) {
 994		dev_err(wcn->dev, "Error allocating descriptor\n");
 995		goto out_err_rxh_ch;
 996	}
 997
 998	/* For RX we need to prealocat buffers */
 999	wcn36xx_dxe_ch_alloc_skb(wcn, &wcn->dxe_rx_h_ch);
1000
1001	/* Write chanel head to a NEXT register */
1002	wcn36xx_dxe_write_register(wcn, WCN36XX_DXE_CH_NEXT_DESC_ADDR_RX_H,
1003		wcn->dxe_rx_h_ch.head_blk_ctl->desc_phy_addr);
1004
1005	/* Write DMA source address */
1006	wcn36xx_dxe_write_register(wcn,
1007		WCN36XX_DXE_CH_SRC_ADDR_RX_H,
1008		WCN36XX_DXE_WQ_RX_H);
1009
1010	/* Program preallocated destination address */
1011	wcn36xx_dxe_write_register(wcn,
1012		WCN36XX_DXE_CH_DEST_ADDR_RX_H,
1013		 wcn->dxe_rx_h_ch.head_blk_ctl->desc->phy_next_l);
1014
1015	/* Enable default control registers */
1016	wcn36xx_dxe_write_register(wcn,
1017		WCN36XX_DXE_REG_CTL_RX_H,
1018		WCN36XX_DXE_CH_DEFAULT_CTL_RX_H);
1019
 
 
 
1020	ret = wcn36xx_dxe_request_irqs(wcn);
1021	if (ret < 0)
1022		goto out_err_irq;
1023
1024	timer_setup(&wcn->tx_ack_timer, wcn36xx_dxe_tx_timer, 0);
1025
1026	/* Enable channel interrupts */
1027	wcn36xx_dxe_enable_ch_int(wcn, WCN36XX_INT_MASK_CHAN_TX_L);
1028	wcn36xx_dxe_enable_ch_int(wcn, WCN36XX_INT_MASK_CHAN_TX_H);
1029	wcn36xx_dxe_enable_ch_int(wcn, WCN36XX_INT_MASK_CHAN_RX_L);
1030	wcn36xx_dxe_enable_ch_int(wcn, WCN36XX_INT_MASK_CHAN_RX_H);
1031
1032	return 0;
1033
1034out_err_irq:
1035	wcn36xx_dxe_deinit_descs(wcn->dev, &wcn->dxe_rx_h_ch);
1036out_err_rxh_ch:
1037	wcn36xx_dxe_deinit_descs(wcn->dev, &wcn->dxe_rx_l_ch);
1038out_err_rxl_ch:
1039	wcn36xx_dxe_deinit_descs(wcn->dev, &wcn->dxe_tx_h_ch);
1040out_err_txh_ch:
1041	wcn36xx_dxe_deinit_descs(wcn->dev, &wcn->dxe_tx_l_ch);
1042
1043	return ret;
1044}
1045
1046void wcn36xx_dxe_deinit(struct wcn36xx *wcn)
1047{
1048	int reg_data = 0;
1049
1050	/* Disable channel interrupts */
1051	wcn36xx_dxe_disable_ch_int(wcn, WCN36XX_INT_MASK_CHAN_RX_H);
1052	wcn36xx_dxe_disable_ch_int(wcn, WCN36XX_INT_MASK_CHAN_RX_L);
1053	wcn36xx_dxe_disable_ch_int(wcn, WCN36XX_INT_MASK_CHAN_TX_H);
1054	wcn36xx_dxe_disable_ch_int(wcn, WCN36XX_INT_MASK_CHAN_TX_L);
1055
1056	free_irq(wcn->tx_irq, wcn);
1057	free_irq(wcn->rx_irq, wcn);
1058	del_timer(&wcn->tx_ack_timer);
1059
1060	if (wcn->tx_ack_skb) {
1061		ieee80211_tx_status_irqsafe(wcn->hw, wcn->tx_ack_skb);
1062		wcn->tx_ack_skb = NULL;
1063	}
1064
1065	/* Put the DXE block into reset before freeing memory */
1066	reg_data = WCN36XX_DXE_REG_RESET;
1067	wcn36xx_dxe_write_register(wcn, WCN36XX_DXE_REG_CSR_RESET, reg_data);
1068
1069	wcn36xx_dxe_ch_free_skbs(wcn, &wcn->dxe_rx_l_ch);
1070	wcn36xx_dxe_ch_free_skbs(wcn, &wcn->dxe_rx_h_ch);
1071
1072	wcn36xx_dxe_deinit_descs(wcn->dev, &wcn->dxe_tx_l_ch);
1073	wcn36xx_dxe_deinit_descs(wcn->dev, &wcn->dxe_tx_h_ch);
1074	wcn36xx_dxe_deinit_descs(wcn->dev, &wcn->dxe_rx_l_ch);
1075	wcn36xx_dxe_deinit_descs(wcn->dev, &wcn->dxe_rx_h_ch);
1076}
v5.4
  1/*
  2 * Copyright (c) 2013 Eugene Krasnikov <k.eugene.e@gmail.com>
  3 *
  4 * Permission to use, copy, modify, and/or distribute this software for any
  5 * purpose with or without fee is hereby granted, provided that the above
  6 * copyright notice and this permission notice appear in all copies.
  7 *
  8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
  9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
 10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
 11 * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
 12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
 13 * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
 14 * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
 15 */
 16
 17/* DXE - DMA transfer engine
 18 * we have 2 channels(High prio and Low prio) for TX and 2 channels for RX.
 19 * through low channels data packets are transfered
 20 * through high channels managment packets are transfered
 21 */
 22
 23#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 24
 25#include <linux/interrupt.h>
 26#include <linux/soc/qcom/smem_state.h>
 27#include "wcn36xx.h"
 28#include "txrx.h"
 29
 30static void wcn36xx_ccu_write_register(struct wcn36xx *wcn, int addr, int data)
 31{
 32	wcn36xx_dbg(WCN36XX_DBG_DXE,
 33		    "wcn36xx_ccu_write_register: addr=%x, data=%x\n",
 34		    addr, data);
 35
 36	writel(data, wcn->ccu_base + addr);
 37}
 38
 39static void wcn36xx_dxe_write_register(struct wcn36xx *wcn, int addr, int data)
 40{
 41	wcn36xx_dbg(WCN36XX_DBG_DXE,
 42		    "wcn36xx_dxe_write_register: addr=%x, data=%x\n",
 43		    addr, data);
 44
 45	writel(data, wcn->dxe_base + addr);
 46}
 47
 48static void wcn36xx_dxe_read_register(struct wcn36xx *wcn, int addr, int *data)
 49{
 50	*data = readl(wcn->dxe_base + addr);
 51
 52	wcn36xx_dbg(WCN36XX_DBG_DXE,
 53		    "wcn36xx_dxe_read_register: addr=%x, data=%x\n",
 54		    addr, *data);
 55}
 56
 57static void wcn36xx_dxe_free_ctl_block(struct wcn36xx_dxe_ch *ch)
 58{
 59	struct wcn36xx_dxe_ctl *ctl = ch->head_blk_ctl, *next;
 60	int i;
 61
 62	for (i = 0; i < ch->desc_num && ctl; i++) {
 63		next = ctl->next;
 64		kfree(ctl);
 65		ctl = next;
 66	}
 67}
 68
 69static int wcn36xx_dxe_allocate_ctl_block(struct wcn36xx_dxe_ch *ch)
 70{
 71	struct wcn36xx_dxe_ctl *prev_ctl = NULL;
 72	struct wcn36xx_dxe_ctl *cur_ctl = NULL;
 73	int i;
 74
 75	spin_lock_init(&ch->lock);
 76	for (i = 0; i < ch->desc_num; i++) {
 77		cur_ctl = kzalloc(sizeof(*cur_ctl), GFP_KERNEL);
 78		if (!cur_ctl)
 79			goto out_fail;
 80
 81		cur_ctl->ctl_blk_order = i;
 82		if (i == 0) {
 83			ch->head_blk_ctl = cur_ctl;
 84			ch->tail_blk_ctl = cur_ctl;
 85		} else if (ch->desc_num - 1 == i) {
 86			prev_ctl->next = cur_ctl;
 87			cur_ctl->next = ch->head_blk_ctl;
 88		} else {
 89			prev_ctl->next = cur_ctl;
 90		}
 91		prev_ctl = cur_ctl;
 92	}
 93
 94	return 0;
 95
 96out_fail:
 97	wcn36xx_dxe_free_ctl_block(ch);
 98	return -ENOMEM;
 99}
100
101int wcn36xx_dxe_alloc_ctl_blks(struct wcn36xx *wcn)
102{
103	int ret;
104
105	wcn->dxe_tx_l_ch.ch_type = WCN36XX_DXE_CH_TX_L;
106	wcn->dxe_tx_h_ch.ch_type = WCN36XX_DXE_CH_TX_H;
107	wcn->dxe_rx_l_ch.ch_type = WCN36XX_DXE_CH_RX_L;
108	wcn->dxe_rx_h_ch.ch_type = WCN36XX_DXE_CH_RX_H;
109
110	wcn->dxe_tx_l_ch.desc_num = WCN36XX_DXE_CH_DESC_NUMB_TX_L;
111	wcn->dxe_tx_h_ch.desc_num = WCN36XX_DXE_CH_DESC_NUMB_TX_H;
112	wcn->dxe_rx_l_ch.desc_num = WCN36XX_DXE_CH_DESC_NUMB_RX_L;
113	wcn->dxe_rx_h_ch.desc_num = WCN36XX_DXE_CH_DESC_NUMB_RX_H;
114
115	wcn->dxe_tx_l_ch.dxe_wq =  WCN36XX_DXE_WQ_TX_L;
116	wcn->dxe_tx_h_ch.dxe_wq =  WCN36XX_DXE_WQ_TX_H;
117
118	wcn->dxe_tx_l_ch.ctrl_bd = WCN36XX_DXE_CTRL_TX_L_BD;
119	wcn->dxe_tx_h_ch.ctrl_bd = WCN36XX_DXE_CTRL_TX_H_BD;
120
121	wcn->dxe_tx_l_ch.ctrl_skb = WCN36XX_DXE_CTRL_TX_L_SKB;
122	wcn->dxe_tx_h_ch.ctrl_skb = WCN36XX_DXE_CTRL_TX_H_SKB;
123
124	wcn->dxe_tx_l_ch.reg_ctrl = WCN36XX_DXE_REG_CTL_TX_L;
125	wcn->dxe_tx_h_ch.reg_ctrl = WCN36XX_DXE_REG_CTL_TX_H;
126
127	wcn->dxe_tx_l_ch.def_ctrl = WCN36XX_DXE_CH_DEFAULT_CTL_TX_L;
128	wcn->dxe_tx_h_ch.def_ctrl = WCN36XX_DXE_CH_DEFAULT_CTL_TX_H;
129
130	/* DXE control block allocation */
131	ret = wcn36xx_dxe_allocate_ctl_block(&wcn->dxe_tx_l_ch);
132	if (ret)
133		goto out_err;
134	ret = wcn36xx_dxe_allocate_ctl_block(&wcn->dxe_tx_h_ch);
135	if (ret)
136		goto out_err;
137	ret = wcn36xx_dxe_allocate_ctl_block(&wcn->dxe_rx_l_ch);
138	if (ret)
139		goto out_err;
140	ret = wcn36xx_dxe_allocate_ctl_block(&wcn->dxe_rx_h_ch);
141	if (ret)
142		goto out_err;
143
144	/* Initialize SMSM state  Clear TX Enable RING EMPTY STATE */
145	ret = qcom_smem_state_update_bits(wcn->tx_enable_state,
146					  WCN36XX_SMSM_WLAN_TX_ENABLE |
147					  WCN36XX_SMSM_WLAN_TX_RINGS_EMPTY,
148					  WCN36XX_SMSM_WLAN_TX_RINGS_EMPTY);
149	if (ret)
150		goto out_err;
151
152	return 0;
153
154out_err:
155	wcn36xx_err("Failed to allocate DXE control blocks\n");
156	wcn36xx_dxe_free_ctl_blks(wcn);
157	return -ENOMEM;
158}
159
160void wcn36xx_dxe_free_ctl_blks(struct wcn36xx *wcn)
161{
162	wcn36xx_dxe_free_ctl_block(&wcn->dxe_tx_l_ch);
163	wcn36xx_dxe_free_ctl_block(&wcn->dxe_tx_h_ch);
164	wcn36xx_dxe_free_ctl_block(&wcn->dxe_rx_l_ch);
165	wcn36xx_dxe_free_ctl_block(&wcn->dxe_rx_h_ch);
166}
167
168static int wcn36xx_dxe_init_descs(struct device *dev, struct wcn36xx_dxe_ch *wcn_ch)
169{
 
170	struct wcn36xx_dxe_desc *cur_dxe = NULL;
171	struct wcn36xx_dxe_desc *prev_dxe = NULL;
172	struct wcn36xx_dxe_ctl *cur_ctl = NULL;
173	size_t size;
174	int i;
175
176	size = wcn_ch->desc_num * sizeof(struct wcn36xx_dxe_desc);
177	wcn_ch->cpu_addr = dma_alloc_coherent(dev, size, &wcn_ch->dma_addr,
178					      GFP_KERNEL);
179	if (!wcn_ch->cpu_addr)
180		return -ENOMEM;
181
182	cur_dxe = (struct wcn36xx_dxe_desc *)wcn_ch->cpu_addr;
183	cur_ctl = wcn_ch->head_blk_ctl;
184
185	for (i = 0; i < wcn_ch->desc_num; i++) {
186		cur_ctl->desc = cur_dxe;
187		cur_ctl->desc_phy_addr = wcn_ch->dma_addr +
188			i * sizeof(struct wcn36xx_dxe_desc);
189
190		switch (wcn_ch->ch_type) {
191		case WCN36XX_DXE_CH_TX_L:
192			cur_dxe->ctrl = WCN36XX_DXE_CTRL_TX_L;
193			cur_dxe->dst_addr_l = WCN36XX_DXE_WQ_TX_L;
194			break;
195		case WCN36XX_DXE_CH_TX_H:
196			cur_dxe->ctrl = WCN36XX_DXE_CTRL_TX_H;
197			cur_dxe->dst_addr_l = WCN36XX_DXE_WQ_TX_H;
198			break;
199		case WCN36XX_DXE_CH_RX_L:
200			cur_dxe->ctrl = WCN36XX_DXE_CTRL_RX_L;
201			cur_dxe->src_addr_l = WCN36XX_DXE_WQ_RX_L;
202			break;
203		case WCN36XX_DXE_CH_RX_H:
204			cur_dxe->ctrl = WCN36XX_DXE_CTRL_RX_H;
205			cur_dxe->src_addr_l = WCN36XX_DXE_WQ_RX_H;
206			break;
207		}
208		if (0 == i) {
209			cur_dxe->phy_next_l = 0;
210		} else if ((0 < i) && (i < wcn_ch->desc_num - 1)) {
211			prev_dxe->phy_next_l =
212				cur_ctl->desc_phy_addr;
213		} else if (i == (wcn_ch->desc_num - 1)) {
214			prev_dxe->phy_next_l =
215				cur_ctl->desc_phy_addr;
216			cur_dxe->phy_next_l =
217				wcn_ch->head_blk_ctl->desc_phy_addr;
218		}
219		cur_ctl = cur_ctl->next;
220		prev_dxe = cur_dxe;
221		cur_dxe++;
222	}
223
224	return 0;
225}
226
227static void wcn36xx_dxe_deinit_descs(struct device *dev, struct wcn36xx_dxe_ch *wcn_ch)
228{
229	size_t size;
230
231	size = wcn_ch->desc_num * sizeof(struct wcn36xx_dxe_desc);
232	dma_free_coherent(dev, size,wcn_ch->cpu_addr, wcn_ch->dma_addr);
233}
234
235static void wcn36xx_dxe_init_tx_bd(struct wcn36xx_dxe_ch *ch,
236				   struct wcn36xx_dxe_mem_pool *pool)
237{
238	int i, chunk_size = pool->chunk_size;
239	dma_addr_t bd_phy_addr = pool->phy_addr;
240	void *bd_cpu_addr = pool->virt_addr;
241	struct wcn36xx_dxe_ctl *cur = ch->head_blk_ctl;
242
243	for (i = 0; i < ch->desc_num; i++) {
244		/* Only every second dxe needs a bd pointer,
245		   the other will point to the skb data */
246		if (!(i & 1)) {
247			cur->bd_phy_addr = bd_phy_addr;
248			cur->bd_cpu_addr = bd_cpu_addr;
249			bd_phy_addr += chunk_size;
250			bd_cpu_addr += chunk_size;
251		} else {
252			cur->bd_phy_addr = 0;
253			cur->bd_cpu_addr = NULL;
254		}
255		cur = cur->next;
256	}
257}
258
259static int wcn36xx_dxe_enable_ch_int(struct wcn36xx *wcn, u16 wcn_ch)
260{
261	int reg_data = 0;
262
263	wcn36xx_dxe_read_register(wcn,
264				  WCN36XX_DXE_INT_MASK_REG,
265				  &reg_data);
266
267	reg_data |= wcn_ch;
268
269	wcn36xx_dxe_write_register(wcn,
270				   WCN36XX_DXE_INT_MASK_REG,
271				   (int)reg_data);
272	return 0;
273}
274
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
275static int wcn36xx_dxe_fill_skb(struct device *dev,
276				struct wcn36xx_dxe_ctl *ctl,
277				gfp_t gfp)
278{
279	struct wcn36xx_dxe_desc *dxe = ctl->desc;
280	struct sk_buff *skb;
281
282	skb = alloc_skb(WCN36XX_PKT_SIZE, gfp);
283	if (skb == NULL)
284		return -ENOMEM;
285
286	dxe->dst_addr_l = dma_map_single(dev,
287					 skb_tail_pointer(skb),
288					 WCN36XX_PKT_SIZE,
289					 DMA_FROM_DEVICE);
290	if (dma_mapping_error(dev, dxe->dst_addr_l)) {
291		dev_err(dev, "unable to map skb\n");
292		kfree_skb(skb);
293		return -ENOMEM;
294	}
295	ctl->skb = skb;
296
297	return 0;
298}
299
300static int wcn36xx_dxe_ch_alloc_skb(struct wcn36xx *wcn,
301				    struct wcn36xx_dxe_ch *wcn_ch)
302{
303	int i;
304	struct wcn36xx_dxe_ctl *cur_ctl = NULL;
305
306	cur_ctl = wcn_ch->head_blk_ctl;
307
308	for (i = 0; i < wcn_ch->desc_num; i++) {
309		wcn36xx_dxe_fill_skb(wcn->dev, cur_ctl, GFP_KERNEL);
310		cur_ctl = cur_ctl->next;
311	}
312
313	return 0;
314}
315
316static void wcn36xx_dxe_ch_free_skbs(struct wcn36xx *wcn,
317				     struct wcn36xx_dxe_ch *wcn_ch)
318{
319	struct wcn36xx_dxe_ctl *cur = wcn_ch->head_blk_ctl;
320	int i;
321
322	for (i = 0; i < wcn_ch->desc_num; i++) {
323		kfree_skb(cur->skb);
324		cur = cur->next;
325	}
326}
327
328void wcn36xx_dxe_tx_ack_ind(struct wcn36xx *wcn, u32 status)
329{
330	struct ieee80211_tx_info *info;
331	struct sk_buff *skb;
332	unsigned long flags;
333
334	spin_lock_irqsave(&wcn->dxe_lock, flags);
335	skb = wcn->tx_ack_skb;
336	wcn->tx_ack_skb = NULL;
 
337	spin_unlock_irqrestore(&wcn->dxe_lock, flags);
338
339	if (!skb) {
340		wcn36xx_warn("Spurious TX complete indication\n");
341		return;
342	}
343
344	info = IEEE80211_SKB_CB(skb);
345
346	if (status == 1)
347		info->flags |= IEEE80211_TX_STAT_ACK;
 
 
348
349	wcn36xx_dbg(WCN36XX_DBG_DXE, "dxe tx ack status: %d\n", status);
350
351	ieee80211_tx_status_irqsafe(wcn->hw, skb);
352	ieee80211_wake_queues(wcn->hw);
353}
354
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
355static void reap_tx_dxes(struct wcn36xx *wcn, struct wcn36xx_dxe_ch *ch)
356{
357	struct wcn36xx_dxe_ctl *ctl;
358	struct ieee80211_tx_info *info;
359	unsigned long flags;
360
361	/*
362	 * Make at least one loop of do-while because in case ring is
363	 * completely full head and tail are pointing to the same element
364	 * and while-do will not make any cycles.
365	 */
366	spin_lock_irqsave(&ch->lock, flags);
367	ctl = ch->tail_blk_ctl;
368	do {
369		if (READ_ONCE(ctl->desc->ctrl) & WCN36xx_DXE_CTRL_VLD)
370			break;
371
372		if (ctl->skb &&
373		    READ_ONCE(ctl->desc->ctrl) & WCN36xx_DXE_CTRL_EOP) {
374			dma_unmap_single(wcn->dev, ctl->desc->src_addr_l,
375					 ctl->skb->len, DMA_TO_DEVICE);
376			info = IEEE80211_SKB_CB(ctl->skb);
377			if (!(info->flags & IEEE80211_TX_CTL_REQ_TX_STATUS)) {
378				/* Keep frame until TX status comes */
 
 
 
 
 
 
 
 
 
 
 
 
 
379				ieee80211_free_txskb(wcn->hw, ctl->skb);
380			}
381
382			if (wcn->queues_stopped) {
383				wcn->queues_stopped = false;
384				ieee80211_wake_queues(wcn->hw);
385			}
386
387			ctl->skb = NULL;
388		}
389		ctl = ctl->next;
390	} while (ctl != ch->head_blk_ctl);
391
392	ch->tail_blk_ctl = ctl;
393	spin_unlock_irqrestore(&ch->lock, flags);
394}
395
396static irqreturn_t wcn36xx_irq_tx_complete(int irq, void *dev)
397{
398	struct wcn36xx *wcn = (struct wcn36xx *)dev;
399	int int_src, int_reason;
400
401	wcn36xx_dxe_read_register(wcn, WCN36XX_DXE_INT_SRC_RAW_REG, &int_src);
402
403	if (int_src & WCN36XX_INT_MASK_CHAN_TX_H) {
404		wcn36xx_dxe_read_register(wcn,
405					  WCN36XX_DXE_CH_STATUS_REG_ADDR_TX_H,
406					  &int_reason);
407
408		wcn36xx_dxe_write_register(wcn,
409					   WCN36XX_DXE_0_INT_CLR,
410					   WCN36XX_INT_MASK_CHAN_TX_H);
411
412		if (int_reason & WCN36XX_CH_STAT_INT_ERR_MASK ) {
413			wcn36xx_dxe_write_register(wcn,
414						   WCN36XX_DXE_0_INT_ERR_CLR,
415						   WCN36XX_INT_MASK_CHAN_TX_H);
416
417			wcn36xx_err("DXE IRQ reported error: 0x%x in high TX channel\n",
418					int_src);
419		}
420
421		if (int_reason & WCN36XX_CH_STAT_INT_DONE_MASK) {
422			wcn36xx_dxe_write_register(wcn,
423						   WCN36XX_DXE_0_INT_DONE_CLR,
424						   WCN36XX_INT_MASK_CHAN_TX_H);
425		}
426
427		if (int_reason & WCN36XX_CH_STAT_INT_ED_MASK) {
428			wcn36xx_dxe_write_register(wcn,
429						   WCN36XX_DXE_0_INT_ED_CLR,
430						   WCN36XX_INT_MASK_CHAN_TX_H);
431		}
432
433		wcn36xx_dbg(WCN36XX_DBG_DXE, "dxe tx ready high, reason %08x\n",
434			    int_reason);
435
436		if (int_reason & (WCN36XX_CH_STAT_INT_DONE_MASK |
437				  WCN36XX_CH_STAT_INT_ED_MASK))
438			reap_tx_dxes(wcn, &wcn->dxe_tx_h_ch);
 
439	}
440
441	if (int_src & WCN36XX_INT_MASK_CHAN_TX_L) {
442		wcn36xx_dxe_read_register(wcn,
443					  WCN36XX_DXE_CH_STATUS_REG_ADDR_TX_L,
444					  &int_reason);
445
446		wcn36xx_dxe_write_register(wcn,
447					   WCN36XX_DXE_0_INT_CLR,
448					   WCN36XX_INT_MASK_CHAN_TX_L);
449
450
451		if (int_reason & WCN36XX_CH_STAT_INT_ERR_MASK ) {
452			wcn36xx_dxe_write_register(wcn,
453						   WCN36XX_DXE_0_INT_ERR_CLR,
454						   WCN36XX_INT_MASK_CHAN_TX_L);
455
456			wcn36xx_err("DXE IRQ reported error: 0x%x in low TX channel\n",
457					int_src);
458		}
459
460		if (int_reason & WCN36XX_CH_STAT_INT_DONE_MASK) {
461			wcn36xx_dxe_write_register(wcn,
462						   WCN36XX_DXE_0_INT_DONE_CLR,
463						   WCN36XX_INT_MASK_CHAN_TX_L);
464		}
465
466		if (int_reason & WCN36XX_CH_STAT_INT_ED_MASK) {
467			wcn36xx_dxe_write_register(wcn,
468						   WCN36XX_DXE_0_INT_ED_CLR,
469						   WCN36XX_INT_MASK_CHAN_TX_L);
470		}
471
472		wcn36xx_dbg(WCN36XX_DBG_DXE, "dxe tx ready low, reason %08x\n",
473			    int_reason);
474
475		if (int_reason & (WCN36XX_CH_STAT_INT_DONE_MASK |
476				  WCN36XX_CH_STAT_INT_ED_MASK))
477			reap_tx_dxes(wcn, &wcn->dxe_tx_l_ch);
 
478	}
479
480	return IRQ_HANDLED;
481}
482
483static irqreturn_t wcn36xx_irq_rx_ready(int irq, void *dev)
484{
485	struct wcn36xx *wcn = (struct wcn36xx *)dev;
486
487	wcn36xx_dxe_rx_frame(wcn);
488
489	return IRQ_HANDLED;
490}
491
492static int wcn36xx_dxe_request_irqs(struct wcn36xx *wcn)
493{
494	int ret;
495
496	ret = request_irq(wcn->tx_irq, wcn36xx_irq_tx_complete,
497			  IRQF_TRIGGER_HIGH, "wcn36xx_tx", wcn);
498	if (ret) {
499		wcn36xx_err("failed to alloc tx irq\n");
500		goto out_err;
501	}
502
503	ret = request_irq(wcn->rx_irq, wcn36xx_irq_rx_ready, IRQF_TRIGGER_HIGH,
504			  "wcn36xx_rx", wcn);
505	if (ret) {
506		wcn36xx_err("failed to alloc rx irq\n");
507		goto out_txirq;
508	}
509
510	enable_irq_wake(wcn->rx_irq);
511
512	return 0;
513
514out_txirq:
515	free_irq(wcn->tx_irq, wcn);
516out_err:
517	return ret;
518
519}
520
521static int wcn36xx_rx_handle_packets(struct wcn36xx *wcn,
522				     struct wcn36xx_dxe_ch *ch,
523				     u32 ctrl,
524				     u32 en_mask,
525				     u32 int_mask,
526				     u32 status_reg)
527{
528	struct wcn36xx_dxe_desc *dxe;
529	struct wcn36xx_dxe_ctl *ctl;
530	dma_addr_t  dma_addr;
531	struct sk_buff *skb;
532	u32 int_reason;
533	int ret;
534
535	wcn36xx_dxe_read_register(wcn, status_reg, &int_reason);
536	wcn36xx_dxe_write_register(wcn, WCN36XX_DXE_0_INT_CLR, int_mask);
537
538	if (int_reason & WCN36XX_CH_STAT_INT_ERR_MASK) {
539		wcn36xx_dxe_write_register(wcn,
540					   WCN36XX_DXE_0_INT_ERR_CLR,
541					   int_mask);
542
543		wcn36xx_err("DXE IRQ reported error on RX channel\n");
544	}
545
546	if (int_reason & WCN36XX_CH_STAT_INT_DONE_MASK)
547		wcn36xx_dxe_write_register(wcn,
548					   WCN36XX_DXE_0_INT_DONE_CLR,
549					   int_mask);
550
551	if (int_reason & WCN36XX_CH_STAT_INT_ED_MASK)
552		wcn36xx_dxe_write_register(wcn,
553					   WCN36XX_DXE_0_INT_ED_CLR,
554					   int_mask);
555
556	if (!(int_reason & (WCN36XX_CH_STAT_INT_DONE_MASK |
557			    WCN36XX_CH_STAT_INT_ED_MASK)))
558		return 0;
559
560	spin_lock(&ch->lock);
561
562	ctl = ch->head_blk_ctl;
563	dxe = ctl->desc;
564
565	while (!(READ_ONCE(dxe->ctrl) & WCN36xx_DXE_CTRL_VLD)) {
 
 
 
 
566		skb = ctl->skb;
567		dma_addr = dxe->dst_addr_l;
568		ret = wcn36xx_dxe_fill_skb(wcn->dev, ctl, GFP_ATOMIC);
569		if (0 == ret) {
570			/* new skb allocation ok. Use the new one and queue
571			 * the old one to network system.
572			 */
573			dma_unmap_single(wcn->dev, dma_addr, WCN36XX_PKT_SIZE,
574					DMA_FROM_DEVICE);
575			wcn36xx_rx_skb(wcn, skb);
576		} /* else keep old skb not submitted and use it for rx DMA */
 
 
 
577
 
 
578		dxe->ctrl = ctrl;
 
579		ctl = ctl->next;
580		dxe = ctl->desc;
581	}
582	wcn36xx_dxe_write_register(wcn, WCN36XX_DXE_ENCH_ADDR, en_mask);
583
584	ch->head_blk_ctl = ctl;
585
586	spin_unlock(&ch->lock);
587
588	return 0;
589}
590
591void wcn36xx_dxe_rx_frame(struct wcn36xx *wcn)
592{
593	int int_src;
594
595	wcn36xx_dxe_read_register(wcn, WCN36XX_DXE_INT_SRC_RAW_REG, &int_src);
596
597	/* RX_LOW_PRI */
598	if (int_src & WCN36XX_DXE_INT_CH1_MASK)
599		wcn36xx_rx_handle_packets(wcn, &wcn->dxe_rx_l_ch,
600					  WCN36XX_DXE_CTRL_RX_L,
601					  WCN36XX_DXE_INT_CH1_MASK,
602					  WCN36XX_INT_MASK_CHAN_RX_L,
603					  WCN36XX_DXE_CH_STATUS_REG_ADDR_RX_L);
604
605	/* RX_HIGH_PRI */
606	if (int_src & WCN36XX_DXE_INT_CH3_MASK)
607		wcn36xx_rx_handle_packets(wcn, &wcn->dxe_rx_h_ch,
608					  WCN36XX_DXE_CTRL_RX_H,
609					  WCN36XX_DXE_INT_CH3_MASK,
610					  WCN36XX_INT_MASK_CHAN_RX_H,
611					  WCN36XX_DXE_CH_STATUS_REG_ADDR_RX_H);
612
613	if (!int_src)
614		wcn36xx_warn("No DXE interrupt pending\n");
615}
616
617int wcn36xx_dxe_allocate_mem_pools(struct wcn36xx *wcn)
618{
619	size_t s;
620	void *cpu_addr;
621
622	/* Allocate BD headers for MGMT frames */
623
624	/* Where this come from ask QC */
625	wcn->mgmt_mem_pool.chunk_size =	WCN36XX_BD_CHUNK_SIZE +
626		16 - (WCN36XX_BD_CHUNK_SIZE % 8);
627
628	s = wcn->mgmt_mem_pool.chunk_size * WCN36XX_DXE_CH_DESC_NUMB_TX_H;
629	cpu_addr = dma_alloc_coherent(wcn->dev, s,
630				      &wcn->mgmt_mem_pool.phy_addr,
631				      GFP_KERNEL);
632	if (!cpu_addr)
633		goto out_err;
634
635	wcn->mgmt_mem_pool.virt_addr = cpu_addr;
636
637	/* Allocate BD headers for DATA frames */
638
639	/* Where this come from ask QC */
640	wcn->data_mem_pool.chunk_size = WCN36XX_BD_CHUNK_SIZE +
641		16 - (WCN36XX_BD_CHUNK_SIZE % 8);
642
643	s = wcn->data_mem_pool.chunk_size * WCN36XX_DXE_CH_DESC_NUMB_TX_L;
644	cpu_addr = dma_alloc_coherent(wcn->dev, s,
645				      &wcn->data_mem_pool.phy_addr,
646				      GFP_KERNEL);
647	if (!cpu_addr)
648		goto out_err;
649
650	wcn->data_mem_pool.virt_addr = cpu_addr;
651
652	return 0;
653
654out_err:
655	wcn36xx_dxe_free_mem_pools(wcn);
656	wcn36xx_err("Failed to allocate BD mempool\n");
657	return -ENOMEM;
658}
659
660void wcn36xx_dxe_free_mem_pools(struct wcn36xx *wcn)
661{
662	if (wcn->mgmt_mem_pool.virt_addr)
663		dma_free_coherent(wcn->dev, wcn->mgmt_mem_pool.chunk_size *
664				  WCN36XX_DXE_CH_DESC_NUMB_TX_H,
665				  wcn->mgmt_mem_pool.virt_addr,
666				  wcn->mgmt_mem_pool.phy_addr);
667
668	if (wcn->data_mem_pool.virt_addr) {
669		dma_free_coherent(wcn->dev, wcn->data_mem_pool.chunk_size *
670				  WCN36XX_DXE_CH_DESC_NUMB_TX_L,
671				  wcn->data_mem_pool.virt_addr,
672				  wcn->data_mem_pool.phy_addr);
673	}
674}
675
676int wcn36xx_dxe_tx_frame(struct wcn36xx *wcn,
677			 struct wcn36xx_vif *vif_priv,
678			 struct wcn36xx_tx_bd *bd,
679			 struct sk_buff *skb,
680			 bool is_low)
681{
682	struct wcn36xx_dxe_desc *desc_bd, *desc_skb;
683	struct wcn36xx_dxe_ctl *ctl_bd, *ctl_skb;
684	struct wcn36xx_dxe_ch *ch = NULL;
685	unsigned long flags;
686	int ret;
687
688	ch = is_low ? &wcn->dxe_tx_l_ch : &wcn->dxe_tx_h_ch;
689
690	spin_lock_irqsave(&ch->lock, flags);
691	ctl_bd = ch->head_blk_ctl;
692	ctl_skb = ctl_bd->next;
693
694	/*
695	 * If skb is not null that means that we reached the tail of the ring
696	 * hence ring is full. Stop queues to let mac80211 back off until ring
697	 * has an empty slot again.
698	 */
699	if (NULL != ctl_skb->skb) {
700		ieee80211_stop_queues(wcn->hw);
701		wcn->queues_stopped = true;
702		spin_unlock_irqrestore(&ch->lock, flags);
703		return -EBUSY;
704	}
705
706	if (unlikely(ctl_skb->bd_cpu_addr)) {
707		wcn36xx_err("bd_cpu_addr cannot be NULL for skb DXE\n");
708		ret = -EINVAL;
709		goto unlock;
710	}
711
712	desc_bd = ctl_bd->desc;
713	desc_skb = ctl_skb->desc;
714
715	ctl_bd->skb = NULL;
716
717	/* write buffer descriptor */
718	memcpy(ctl_bd->bd_cpu_addr, bd, sizeof(*bd));
719
720	/* Set source address of the BD we send */
721	desc_bd->src_addr_l = ctl_bd->bd_phy_addr;
722	desc_bd->dst_addr_l = ch->dxe_wq;
723	desc_bd->fr_len = sizeof(struct wcn36xx_tx_bd);
724
725	wcn36xx_dbg(WCN36XX_DBG_DXE, "DXE TX\n");
726
727	wcn36xx_dbg_dump(WCN36XX_DBG_DXE_DUMP, "DESC1 >>> ",
728			 (char *)desc_bd, sizeof(*desc_bd));
729	wcn36xx_dbg_dump(WCN36XX_DBG_DXE_DUMP,
730			 "BD   >>> ", (char *)ctl_bd->bd_cpu_addr,
731			 sizeof(struct wcn36xx_tx_bd));
732
733	desc_skb->src_addr_l = dma_map_single(wcn->dev,
734					      skb->data,
735					      skb->len,
736					      DMA_TO_DEVICE);
737	if (dma_mapping_error(wcn->dev, desc_skb->src_addr_l)) {
738		dev_err(wcn->dev, "unable to DMA map src_addr_l\n");
739		ret = -ENOMEM;
740		goto unlock;
741	}
742
743	ctl_skb->skb = skb;
744	desc_skb->dst_addr_l = ch->dxe_wq;
745	desc_skb->fr_len = ctl_skb->skb->len;
746
747	wcn36xx_dbg_dump(WCN36XX_DBG_DXE_DUMP, "DESC2 >>> ",
748			 (char *)desc_skb, sizeof(*desc_skb));
749	wcn36xx_dbg_dump(WCN36XX_DBG_DXE_DUMP, "SKB   >>> ",
750			 (char *)ctl_skb->skb->data, ctl_skb->skb->len);
751
752	/* Move the head of the ring to the next empty descriptor */
753	 ch->head_blk_ctl = ctl_skb->next;
754
755	/* Commit all previous writes and set descriptors to VALID */
756	wmb();
757	desc_skb->ctrl = ch->ctrl_skb;
758	wmb();
759	desc_bd->ctrl = ch->ctrl_bd;
760
761	/*
762	 * When connected and trying to send data frame chip can be in sleep
763	 * mode and writing to the register will not wake up the chip. Instead
764	 * notify chip about new frame through SMSM bus.
765	 */
766	if (is_low &&  vif_priv->pw_state == WCN36XX_BMPS) {
767		qcom_smem_state_update_bits(wcn->tx_rings_empty_state,
768					    WCN36XX_SMSM_WLAN_TX_ENABLE,
769					    WCN36XX_SMSM_WLAN_TX_ENABLE);
770	} else {
771		/* indicate End Of Packet and generate interrupt on descriptor
772		 * done.
773		 */
774		wcn36xx_dxe_write_register(wcn,
775			ch->reg_ctrl, ch->def_ctrl);
776	}
777
778	ret = 0;
779unlock:
780	spin_unlock_irqrestore(&ch->lock, flags);
781	return ret;
782}
783
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
784int wcn36xx_dxe_init(struct wcn36xx *wcn)
785{
786	int reg_data = 0, ret;
787
788	reg_data = WCN36XX_DXE_REG_RESET;
789	wcn36xx_dxe_write_register(wcn, WCN36XX_DXE_REG_CSR_RESET, reg_data);
790
791	/* Select channels for rx avail and xfer done interrupts... */
792	reg_data = (WCN36XX_DXE_INT_CH3_MASK | WCN36XX_DXE_INT_CH1_MASK) << 16 |
793		    WCN36XX_DXE_INT_CH0_MASK | WCN36XX_DXE_INT_CH4_MASK;
794	if (wcn->is_pronto)
795		wcn36xx_ccu_write_register(wcn, WCN36XX_CCU_DXE_INT_SELECT_PRONTO, reg_data);
796	else
797		wcn36xx_ccu_write_register(wcn, WCN36XX_CCU_DXE_INT_SELECT_RIVA, reg_data);
798
799	/***************************************/
800	/* Init descriptors for TX LOW channel */
801	/***************************************/
802	ret = wcn36xx_dxe_init_descs(wcn->dev, &wcn->dxe_tx_l_ch);
803	if (ret) {
804		dev_err(wcn->dev, "Error allocating descriptor\n");
805		return ret;
806	}
807	wcn36xx_dxe_init_tx_bd(&wcn->dxe_tx_l_ch, &wcn->data_mem_pool);
808
809	/* Write channel head to a NEXT register */
810	wcn36xx_dxe_write_register(wcn, WCN36XX_DXE_CH_NEXT_DESC_ADDR_TX_L,
811		wcn->dxe_tx_l_ch.head_blk_ctl->desc_phy_addr);
812
813	/* Program DMA destination addr for TX LOW */
814	wcn36xx_dxe_write_register(wcn,
815		WCN36XX_DXE_CH_DEST_ADDR_TX_L,
816		WCN36XX_DXE_WQ_TX_L);
817
818	wcn36xx_dxe_read_register(wcn, WCN36XX_DXE_REG_CH_EN, &reg_data);
819	wcn36xx_dxe_enable_ch_int(wcn, WCN36XX_INT_MASK_CHAN_TX_L);
820
821	/***************************************/
822	/* Init descriptors for TX HIGH channel */
823	/***************************************/
824	ret = wcn36xx_dxe_init_descs(wcn->dev, &wcn->dxe_tx_h_ch);
825	if (ret) {
826		dev_err(wcn->dev, "Error allocating descriptor\n");
827		goto out_err_txh_ch;
828	}
829
830	wcn36xx_dxe_init_tx_bd(&wcn->dxe_tx_h_ch, &wcn->mgmt_mem_pool);
831
832	/* Write channel head to a NEXT register */
833	wcn36xx_dxe_write_register(wcn, WCN36XX_DXE_CH_NEXT_DESC_ADDR_TX_H,
834		wcn->dxe_tx_h_ch.head_blk_ctl->desc_phy_addr);
835
836	/* Program DMA destination addr for TX HIGH */
837	wcn36xx_dxe_write_register(wcn,
838		WCN36XX_DXE_CH_DEST_ADDR_TX_H,
839		WCN36XX_DXE_WQ_TX_H);
840
841	wcn36xx_dxe_read_register(wcn, WCN36XX_DXE_REG_CH_EN, &reg_data);
842
843	/* Enable channel interrupts */
844	wcn36xx_dxe_enable_ch_int(wcn, WCN36XX_INT_MASK_CHAN_TX_H);
845
846	/***************************************/
847	/* Init descriptors for RX LOW channel */
848	/***************************************/
849	ret = wcn36xx_dxe_init_descs(wcn->dev, &wcn->dxe_rx_l_ch);
850	if (ret) {
851		dev_err(wcn->dev, "Error allocating descriptor\n");
852		goto out_err_rxl_ch;
853	}
854
855
856	/* For RX we need to preallocated buffers */
857	wcn36xx_dxe_ch_alloc_skb(wcn, &wcn->dxe_rx_l_ch);
858
859	/* Write channel head to a NEXT register */
860	wcn36xx_dxe_write_register(wcn, WCN36XX_DXE_CH_NEXT_DESC_ADDR_RX_L,
861		wcn->dxe_rx_l_ch.head_blk_ctl->desc_phy_addr);
862
863	/* Write DMA source address */
864	wcn36xx_dxe_write_register(wcn,
865		WCN36XX_DXE_CH_SRC_ADDR_RX_L,
866		WCN36XX_DXE_WQ_RX_L);
867
868	/* Program preallocated destination address */
869	wcn36xx_dxe_write_register(wcn,
870		WCN36XX_DXE_CH_DEST_ADDR_RX_L,
871		wcn->dxe_rx_l_ch.head_blk_ctl->desc->phy_next_l);
872
873	/* Enable default control registers */
874	wcn36xx_dxe_write_register(wcn,
875		WCN36XX_DXE_REG_CTL_RX_L,
876		WCN36XX_DXE_CH_DEFAULT_CTL_RX_L);
877
878	/* Enable channel interrupts */
879	wcn36xx_dxe_enable_ch_int(wcn, WCN36XX_INT_MASK_CHAN_RX_L);
880
881	/***************************************/
882	/* Init descriptors for RX HIGH channel */
883	/***************************************/
884	ret = wcn36xx_dxe_init_descs(wcn->dev, &wcn->dxe_rx_h_ch);
885	if (ret) {
886		dev_err(wcn->dev, "Error allocating descriptor\n");
887		goto out_err_rxh_ch;
888	}
889
890	/* For RX we need to prealocat buffers */
891	wcn36xx_dxe_ch_alloc_skb(wcn, &wcn->dxe_rx_h_ch);
892
893	/* Write chanel head to a NEXT register */
894	wcn36xx_dxe_write_register(wcn, WCN36XX_DXE_CH_NEXT_DESC_ADDR_RX_H,
895		wcn->dxe_rx_h_ch.head_blk_ctl->desc_phy_addr);
896
897	/* Write DMA source address */
898	wcn36xx_dxe_write_register(wcn,
899		WCN36XX_DXE_CH_SRC_ADDR_RX_H,
900		WCN36XX_DXE_WQ_RX_H);
901
902	/* Program preallocated destination address */
903	wcn36xx_dxe_write_register(wcn,
904		WCN36XX_DXE_CH_DEST_ADDR_RX_H,
905		 wcn->dxe_rx_h_ch.head_blk_ctl->desc->phy_next_l);
906
907	/* Enable default control registers */
908	wcn36xx_dxe_write_register(wcn,
909		WCN36XX_DXE_REG_CTL_RX_H,
910		WCN36XX_DXE_CH_DEFAULT_CTL_RX_H);
911
912	/* Enable channel interrupts */
913	wcn36xx_dxe_enable_ch_int(wcn, WCN36XX_INT_MASK_CHAN_RX_H);
914
915	ret = wcn36xx_dxe_request_irqs(wcn);
916	if (ret < 0)
917		goto out_err_irq;
918
 
 
 
 
 
 
 
 
919	return 0;
920
921out_err_irq:
922	wcn36xx_dxe_deinit_descs(wcn->dev, &wcn->dxe_rx_h_ch);
923out_err_rxh_ch:
924	wcn36xx_dxe_deinit_descs(wcn->dev, &wcn->dxe_rx_l_ch);
925out_err_rxl_ch:
926	wcn36xx_dxe_deinit_descs(wcn->dev, &wcn->dxe_tx_h_ch);
927out_err_txh_ch:
928	wcn36xx_dxe_deinit_descs(wcn->dev, &wcn->dxe_tx_l_ch);
929
930	return ret;
931}
932
933void wcn36xx_dxe_deinit(struct wcn36xx *wcn)
934{
 
 
 
 
 
 
 
 
935	free_irq(wcn->tx_irq, wcn);
936	free_irq(wcn->rx_irq, wcn);
 
937
938	if (wcn->tx_ack_skb) {
939		ieee80211_tx_status_irqsafe(wcn->hw, wcn->tx_ack_skb);
940		wcn->tx_ack_skb = NULL;
941	}
942
 
 
 
 
943	wcn36xx_dxe_ch_free_skbs(wcn, &wcn->dxe_rx_l_ch);
944	wcn36xx_dxe_ch_free_skbs(wcn, &wcn->dxe_rx_h_ch);
 
 
 
 
 
945}