Linux Audio

Check our new training course

Loading...
v6.8
   1/*
   2 * Copyright (c) 2013 Eugene Krasnikov <k.eugene.e@gmail.com>
   3 *
   4 * Permission to use, copy, modify, and/or distribute this software for any
   5 * purpose with or without fee is hereby granted, provided that the above
   6 * copyright notice and this permission notice appear in all copies.
   7 *
   8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
   9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
  10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
  11 * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
  12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
  13 * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
  14 * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
  15 */
  16
  17/* DXE - DMA transfer engine
  18 * we have 2 channels(High prio and Low prio) for TX and 2 channels for RX.
  19 * through low channels data packets are transfered
  20 * through high channels managment packets are transfered
  21 */
  22
  23#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  24
  25#include <linux/interrupt.h>
  26#include <linux/soc/qcom/smem_state.h>
  27#include "wcn36xx.h"
  28#include "txrx.h"
  29
  30static void wcn36xx_ccu_write_register(struct wcn36xx *wcn, int addr, int data)
  31{
  32	wcn36xx_dbg(WCN36XX_DBG_DXE,
  33		    "wcn36xx_ccu_write_register: addr=%x, data=%x\n",
  34		    addr, data);
  35
  36	writel(data, wcn->ccu_base + addr);
  37}
  38
  39static void wcn36xx_dxe_write_register(struct wcn36xx *wcn, int addr, int data)
  40{
  41	wcn36xx_dbg(WCN36XX_DBG_DXE,
  42		    "wcn36xx_dxe_write_register: addr=%x, data=%x\n",
  43		    addr, data);
  44
  45	writel(data, wcn->dxe_base + addr);
  46}
  47
 
 
 
 
 
 
 
 
  48static void wcn36xx_dxe_read_register(struct wcn36xx *wcn, int addr, int *data)
  49{
  50	*data = readl(wcn->dxe_base + addr);
  51
  52	wcn36xx_dbg(WCN36XX_DBG_DXE,
  53		    "wcn36xx_dxe_read_register: addr=%x, data=%x\n",
  54		    addr, *data);
  55}
  56
  57static void wcn36xx_dxe_free_ctl_block(struct wcn36xx_dxe_ch *ch)
  58{
  59	struct wcn36xx_dxe_ctl *ctl = ch->head_blk_ctl, *next;
  60	int i;
  61
  62	for (i = 0; i < ch->desc_num && ctl; i++) {
  63		next = ctl->next;
  64		kfree(ctl);
  65		ctl = next;
  66	}
  67}
  68
  69static int wcn36xx_dxe_allocate_ctl_block(struct wcn36xx_dxe_ch *ch)
  70{
  71	struct wcn36xx_dxe_ctl *prev_ctl = NULL;
  72	struct wcn36xx_dxe_ctl *cur_ctl = NULL;
  73	int i;
  74
  75	spin_lock_init(&ch->lock);
  76	for (i = 0; i < ch->desc_num; i++) {
  77		cur_ctl = kzalloc(sizeof(*cur_ctl), GFP_KERNEL);
  78		if (!cur_ctl)
  79			goto out_fail;
  80
 
  81		cur_ctl->ctl_blk_order = i;
  82		if (i == 0) {
  83			ch->head_blk_ctl = cur_ctl;
  84			ch->tail_blk_ctl = cur_ctl;
  85		} else if (ch->desc_num - 1 == i) {
  86			prev_ctl->next = cur_ctl;
  87			cur_ctl->next = ch->head_blk_ctl;
  88		} else {
  89			prev_ctl->next = cur_ctl;
  90		}
  91		prev_ctl = cur_ctl;
  92	}
  93
  94	return 0;
  95
  96out_fail:
  97	wcn36xx_dxe_free_ctl_block(ch);
  98	return -ENOMEM;
  99}
 100
 101int wcn36xx_dxe_alloc_ctl_blks(struct wcn36xx *wcn)
 102{
 103	int ret;
 104
 105	wcn->dxe_tx_l_ch.ch_type = WCN36XX_DXE_CH_TX_L;
 106	wcn->dxe_tx_h_ch.ch_type = WCN36XX_DXE_CH_TX_H;
 107	wcn->dxe_rx_l_ch.ch_type = WCN36XX_DXE_CH_RX_L;
 108	wcn->dxe_rx_h_ch.ch_type = WCN36XX_DXE_CH_RX_H;
 109
 110	wcn->dxe_tx_l_ch.desc_num = WCN36XX_DXE_CH_DESC_NUMB_TX_L;
 111	wcn->dxe_tx_h_ch.desc_num = WCN36XX_DXE_CH_DESC_NUMB_TX_H;
 112	wcn->dxe_rx_l_ch.desc_num = WCN36XX_DXE_CH_DESC_NUMB_RX_L;
 113	wcn->dxe_rx_h_ch.desc_num = WCN36XX_DXE_CH_DESC_NUMB_RX_H;
 114
 115	wcn->dxe_tx_l_ch.dxe_wq =  WCN36XX_DXE_WQ_TX_L(wcn);
 116	wcn->dxe_tx_h_ch.dxe_wq =  WCN36XX_DXE_WQ_TX_H(wcn);
 117
 118	wcn->dxe_tx_l_ch.ctrl_bd = WCN36XX_DXE_CTRL_TX_L_BD;
 119	wcn->dxe_tx_h_ch.ctrl_bd = WCN36XX_DXE_CTRL_TX_H_BD;
 120
 121	wcn->dxe_tx_l_ch.ctrl_skb = WCN36XX_DXE_CTRL_TX_L_SKB;
 122	wcn->dxe_tx_h_ch.ctrl_skb = WCN36XX_DXE_CTRL_TX_H_SKB;
 123
 124	wcn->dxe_tx_l_ch.reg_ctrl = WCN36XX_DXE_REG_CTL_TX_L;
 125	wcn->dxe_tx_h_ch.reg_ctrl = WCN36XX_DXE_REG_CTL_TX_H;
 126
 127	wcn->dxe_tx_l_ch.def_ctrl = WCN36XX_DXE_CH_DEFAULT_CTL_TX_L;
 128	wcn->dxe_tx_h_ch.def_ctrl = WCN36XX_DXE_CH_DEFAULT_CTL_TX_H;
 129
 130	/* DXE control block allocation */
 131	ret = wcn36xx_dxe_allocate_ctl_block(&wcn->dxe_tx_l_ch);
 132	if (ret)
 133		goto out_err;
 134	ret = wcn36xx_dxe_allocate_ctl_block(&wcn->dxe_tx_h_ch);
 135	if (ret)
 136		goto out_err;
 137	ret = wcn36xx_dxe_allocate_ctl_block(&wcn->dxe_rx_l_ch);
 138	if (ret)
 139		goto out_err;
 140	ret = wcn36xx_dxe_allocate_ctl_block(&wcn->dxe_rx_h_ch);
 141	if (ret)
 142		goto out_err;
 143
 144	/* Initialize SMSM state  Clear TX Enable RING EMPTY STATE */
 145	ret = qcom_smem_state_update_bits(wcn->tx_enable_state,
 146					  WCN36XX_SMSM_WLAN_TX_ENABLE |
 147					  WCN36XX_SMSM_WLAN_TX_RINGS_EMPTY,
 148					  WCN36XX_SMSM_WLAN_TX_RINGS_EMPTY);
 149	if (ret)
 150		goto out_err;
 151
 152	return 0;
 153
 154out_err:
 155	wcn36xx_err("Failed to allocate DXE control blocks\n");
 156	wcn36xx_dxe_free_ctl_blks(wcn);
 157	return -ENOMEM;
 158}
 159
 160void wcn36xx_dxe_free_ctl_blks(struct wcn36xx *wcn)
 161{
 162	wcn36xx_dxe_free_ctl_block(&wcn->dxe_tx_l_ch);
 163	wcn36xx_dxe_free_ctl_block(&wcn->dxe_tx_h_ch);
 164	wcn36xx_dxe_free_ctl_block(&wcn->dxe_rx_l_ch);
 165	wcn36xx_dxe_free_ctl_block(&wcn->dxe_rx_h_ch);
 166}
 167
 168static int wcn36xx_dxe_init_descs(struct wcn36xx *wcn, struct wcn36xx_dxe_ch *wcn_ch)
 169{
 170	struct device *dev = wcn->dev;
 171	struct wcn36xx_dxe_desc *cur_dxe = NULL;
 172	struct wcn36xx_dxe_desc *prev_dxe = NULL;
 173	struct wcn36xx_dxe_ctl *cur_ctl = NULL;
 174	size_t size;
 175	int i;
 176
 177	size = wcn_ch->desc_num * sizeof(struct wcn36xx_dxe_desc);
 178	wcn_ch->cpu_addr = dma_alloc_coherent(dev, size, &wcn_ch->dma_addr,
 179					      GFP_KERNEL);
 180	if (!wcn_ch->cpu_addr)
 181		return -ENOMEM;
 182
 183	cur_dxe = wcn_ch->cpu_addr;
 
 
 184	cur_ctl = wcn_ch->head_blk_ctl;
 185
 186	for (i = 0; i < wcn_ch->desc_num; i++) {
 187		cur_ctl->desc = cur_dxe;
 188		cur_ctl->desc_phy_addr = wcn_ch->dma_addr +
 189			i * sizeof(struct wcn36xx_dxe_desc);
 190
 191		switch (wcn_ch->ch_type) {
 192		case WCN36XX_DXE_CH_TX_L:
 193			cur_dxe->ctrl = WCN36XX_DXE_CTRL_TX_L;
 194			cur_dxe->dst_addr_l = WCN36XX_DXE_WQ_TX_L(wcn);
 195			break;
 196		case WCN36XX_DXE_CH_TX_H:
 197			cur_dxe->ctrl = WCN36XX_DXE_CTRL_TX_H;
 198			cur_dxe->dst_addr_l = WCN36XX_DXE_WQ_TX_H(wcn);
 199			break;
 200		case WCN36XX_DXE_CH_RX_L:
 201			cur_dxe->ctrl = WCN36XX_DXE_CTRL_RX_L;
 202			cur_dxe->src_addr_l = WCN36XX_DXE_WQ_RX_L;
 203			break;
 204		case WCN36XX_DXE_CH_RX_H:
 205			cur_dxe->ctrl = WCN36XX_DXE_CTRL_RX_H;
 206			cur_dxe->src_addr_l = WCN36XX_DXE_WQ_RX_H;
 207			break;
 208		}
 209		if (0 == i) {
 210			cur_dxe->phy_next_l = 0;
 211		} else if ((0 < i) && (i < wcn_ch->desc_num - 1)) {
 212			prev_dxe->phy_next_l =
 213				cur_ctl->desc_phy_addr;
 214		} else if (i == (wcn_ch->desc_num - 1)) {
 215			prev_dxe->phy_next_l =
 216				cur_ctl->desc_phy_addr;
 217			cur_dxe->phy_next_l =
 218				wcn_ch->head_blk_ctl->desc_phy_addr;
 219		}
 220		cur_ctl = cur_ctl->next;
 221		prev_dxe = cur_dxe;
 222		cur_dxe++;
 223	}
 224
 225	return 0;
 226}
 227
 228static void wcn36xx_dxe_deinit_descs(struct device *dev, struct wcn36xx_dxe_ch *wcn_ch)
 229{
 230	size_t size;
 231
 232	size = wcn_ch->desc_num * sizeof(struct wcn36xx_dxe_desc);
 233	dma_free_coherent(dev, size,wcn_ch->cpu_addr, wcn_ch->dma_addr);
 234}
 235
 236static void wcn36xx_dxe_init_tx_bd(struct wcn36xx_dxe_ch *ch,
 237				   struct wcn36xx_dxe_mem_pool *pool)
 238{
 239	int i, chunk_size = pool->chunk_size;
 240	dma_addr_t bd_phy_addr = pool->phy_addr;
 241	void *bd_cpu_addr = pool->virt_addr;
 242	struct wcn36xx_dxe_ctl *cur = ch->head_blk_ctl;
 243
 244	for (i = 0; i < ch->desc_num; i++) {
 245		/* Only every second dxe needs a bd pointer,
 246		   the other will point to the skb data */
 247		if (!(i & 1)) {
 248			cur->bd_phy_addr = bd_phy_addr;
 249			cur->bd_cpu_addr = bd_cpu_addr;
 250			bd_phy_addr += chunk_size;
 251			bd_cpu_addr += chunk_size;
 252		} else {
 253			cur->bd_phy_addr = 0;
 254			cur->bd_cpu_addr = NULL;
 255		}
 256		cur = cur->next;
 257	}
 258}
 259
 260static int wcn36xx_dxe_enable_ch_int(struct wcn36xx *wcn, u16 wcn_ch)
 261{
 262	int reg_data = 0;
 263
 264	wcn36xx_dxe_read_register(wcn,
 265				  WCN36XX_DXE_INT_MASK_REG,
 266				  &reg_data);
 267
 268	reg_data |= wcn_ch;
 269
 270	wcn36xx_dxe_write_register(wcn,
 271				   WCN36XX_DXE_INT_MASK_REG,
 272				   (int)reg_data);
 273	return 0;
 274}
 275
 276static void wcn36xx_dxe_disable_ch_int(struct wcn36xx *wcn, u16 wcn_ch)
 277{
 278	int reg_data = 0;
 279
 280	wcn36xx_dxe_read_register(wcn,
 281				  WCN36XX_DXE_INT_MASK_REG,
 282				  &reg_data);
 283
 284	reg_data &= ~wcn_ch;
 285
 286	wcn36xx_dxe_write_register(wcn,
 287				   WCN36XX_DXE_INT_MASK_REG,
 288				   (int)reg_data);
 289}
 290
 291static int wcn36xx_dxe_fill_skb(struct device *dev,
 292				struct wcn36xx_dxe_ctl *ctl,
 293				gfp_t gfp)
 294{
 295	struct wcn36xx_dxe_desc *dxe = ctl->desc;
 296	struct sk_buff *skb;
 297
 298	skb = alloc_skb(WCN36XX_PKT_SIZE, gfp);
 299	if (skb == NULL)
 300		return -ENOMEM;
 301
 302	dxe->dst_addr_l = dma_map_single(dev,
 303					 skb_tail_pointer(skb),
 304					 WCN36XX_PKT_SIZE,
 305					 DMA_FROM_DEVICE);
 306	if (dma_mapping_error(dev, dxe->dst_addr_l)) {
 307		dev_err(dev, "unable to map skb\n");
 308		kfree_skb(skb);
 309		return -ENOMEM;
 310	}
 311	ctl->skb = skb;
 312
 313	return 0;
 314}
 315
 316static int wcn36xx_dxe_ch_alloc_skb(struct wcn36xx *wcn,
 317				    struct wcn36xx_dxe_ch *wcn_ch)
 318{
 319	int i;
 320	struct wcn36xx_dxe_ctl *cur_ctl = NULL;
 321
 322	cur_ctl = wcn_ch->head_blk_ctl;
 323
 324	for (i = 0; i < wcn_ch->desc_num; i++) {
 325		wcn36xx_dxe_fill_skb(wcn->dev, cur_ctl, GFP_KERNEL);
 326		cur_ctl = cur_ctl->next;
 327	}
 328
 329	return 0;
 330}
 331
 332static void wcn36xx_dxe_ch_free_skbs(struct wcn36xx *wcn,
 333				     struct wcn36xx_dxe_ch *wcn_ch)
 334{
 335	struct wcn36xx_dxe_ctl *cur = wcn_ch->head_blk_ctl;
 336	int i;
 337
 338	for (i = 0; i < wcn_ch->desc_num; i++) {
 339		kfree_skb(cur->skb);
 340		cur = cur->next;
 341	}
 342}
 343
 344void wcn36xx_dxe_tx_ack_ind(struct wcn36xx *wcn, u32 status)
 345{
 346	struct ieee80211_tx_info *info;
 347	struct sk_buff *skb;
 348	unsigned long flags;
 349
 350	spin_lock_irqsave(&wcn->dxe_lock, flags);
 351	skb = wcn->tx_ack_skb;
 352	wcn->tx_ack_skb = NULL;
 353	del_timer(&wcn->tx_ack_timer);
 354	spin_unlock_irqrestore(&wcn->dxe_lock, flags);
 355
 356	if (!skb) {
 357		wcn36xx_warn("Spurious TX complete indication\n");
 358		return;
 359	}
 360
 361	info = IEEE80211_SKB_CB(skb);
 362
 363	if (status == 1)
 364		info->flags |= IEEE80211_TX_STAT_ACK;
 365	else
 366		info->flags &= ~IEEE80211_TX_STAT_ACK;
 367
 368	wcn36xx_dbg(WCN36XX_DBG_DXE, "dxe tx ack status: %d\n", status);
 369
 370	ieee80211_tx_status_irqsafe(wcn->hw, skb);
 371	ieee80211_wake_queues(wcn->hw);
 372}
 373
 374static void wcn36xx_dxe_tx_timer(struct timer_list *t)
 375{
 376	struct wcn36xx *wcn = from_timer(wcn, t, tx_ack_timer);
 377	struct ieee80211_tx_info *info;
 378	unsigned long flags;
 379	struct sk_buff *skb;
 380
 381	/* TX Timeout */
 382	wcn36xx_dbg(WCN36XX_DBG_DXE, "TX timeout\n");
 383
 384	spin_lock_irqsave(&wcn->dxe_lock, flags);
 385	skb = wcn->tx_ack_skb;
 386	wcn->tx_ack_skb = NULL;
 387	spin_unlock_irqrestore(&wcn->dxe_lock, flags);
 388
 389	if (!skb)
 390		return;
 391
 392	info = IEEE80211_SKB_CB(skb);
 393	info->flags &= ~IEEE80211_TX_STAT_ACK;
 394	info->flags &= ~IEEE80211_TX_STAT_NOACK_TRANSMITTED;
 395
 396	ieee80211_tx_status_irqsafe(wcn->hw, skb);
 397	ieee80211_wake_queues(wcn->hw);
 398}
 399
 400static void reap_tx_dxes(struct wcn36xx *wcn, struct wcn36xx_dxe_ch *ch)
 401{
 402	struct wcn36xx_dxe_ctl *ctl;
 403	struct ieee80211_tx_info *info;
 404	unsigned long flags;
 405
 406	/*
 407	 * Make at least one loop of do-while because in case ring is
 408	 * completely full head and tail are pointing to the same element
 409	 * and while-do will not make any cycles.
 410	 */
 411	spin_lock_irqsave(&ch->lock, flags);
 412	ctl = ch->tail_blk_ctl;
 413	do {
 414		if (READ_ONCE(ctl->desc->ctrl) & WCN36xx_DXE_CTRL_VLD)
 415			break;
 416
 417		if (ctl->skb &&
 418		    READ_ONCE(ctl->desc->ctrl) & WCN36xx_DXE_CTRL_EOP) {
 419			dma_unmap_single(wcn->dev, ctl->desc->src_addr_l,
 420					 ctl->skb->len, DMA_TO_DEVICE);
 421			info = IEEE80211_SKB_CB(ctl->skb);
 422			if (info->flags & IEEE80211_TX_CTL_REQ_TX_STATUS) {
 423				if (info->flags & IEEE80211_TX_CTL_NO_ACK) {
 424					info->flags |= IEEE80211_TX_STAT_NOACK_TRANSMITTED;
 425					ieee80211_tx_status_irqsafe(wcn->hw, ctl->skb);
 426				} else {
 427					/* Wait for the TX ack indication or timeout... */
 428					spin_lock(&wcn->dxe_lock);
 429					if (WARN_ON(wcn->tx_ack_skb))
 430						ieee80211_free_txskb(wcn->hw, wcn->tx_ack_skb);
 431					wcn->tx_ack_skb = ctl->skb; /* Tracking ref */
 432					mod_timer(&wcn->tx_ack_timer, jiffies + HZ / 10);
 433					spin_unlock(&wcn->dxe_lock);
 434				}
 435				/* do not free, ownership transferred to mac80211 status cb */
 436			} else {
 437				ieee80211_free_txskb(wcn->hw, ctl->skb);
 438			}
 439
 440			if (wcn->queues_stopped) {
 441				wcn->queues_stopped = false;
 442				ieee80211_wake_queues(wcn->hw);
 443			}
 
 444
 445			ctl->skb = NULL;
 446		}
 447		ctl = ctl->next;
 448	} while (ctl != ch->head_blk_ctl);
 
 449
 450	ch->tail_blk_ctl = ctl;
 451	spin_unlock_irqrestore(&ch->lock, flags);
 452}
 453
 454static irqreturn_t wcn36xx_irq_tx_complete(int irq, void *dev)
 455{
 456	struct wcn36xx *wcn = dev;
 457	int int_src, int_reason;
 458
 459	wcn36xx_dxe_read_register(wcn, WCN36XX_DXE_INT_SRC_RAW_REG, &int_src);
 460
 461	if (int_src & WCN36XX_INT_MASK_CHAN_TX_H) {
 462		wcn36xx_dxe_read_register(wcn,
 463					  WCN36XX_DXE_CH_STATUS_REG_ADDR_TX_H,
 464					  &int_reason);
 465
 
 
 466		wcn36xx_dxe_write_register(wcn,
 467					   WCN36XX_DXE_0_INT_CLR,
 468					   WCN36XX_INT_MASK_CHAN_TX_H);
 469
 470		if (int_reason & WCN36XX_CH_STAT_INT_ERR_MASK ) {
 471			wcn36xx_dxe_write_register(wcn,
 472						   WCN36XX_DXE_0_INT_ERR_CLR,
 473						   WCN36XX_INT_MASK_CHAN_TX_H);
 474
 475			wcn36xx_err("DXE IRQ reported error: 0x%x in high TX channel\n",
 476					int_src);
 477		}
 478
 479		if (int_reason & WCN36XX_CH_STAT_INT_DONE_MASK) {
 480			wcn36xx_dxe_write_register(wcn,
 481						   WCN36XX_DXE_0_INT_DONE_CLR,
 482						   WCN36XX_INT_MASK_CHAN_TX_H);
 483		}
 484
 485		if (int_reason & WCN36XX_CH_STAT_INT_ED_MASK) {
 486			wcn36xx_dxe_write_register(wcn,
 487						   WCN36XX_DXE_0_INT_ED_CLR,
 488						   WCN36XX_INT_MASK_CHAN_TX_H);
 489		}
 490
 491		wcn36xx_dbg(WCN36XX_DBG_DXE, "dxe tx ready high, reason %08x\n",
 492			    int_reason);
 493
 494		if (int_reason & (WCN36XX_CH_STAT_INT_DONE_MASK |
 495				  WCN36XX_CH_STAT_INT_ED_MASK)) {
 496			reap_tx_dxes(wcn, &wcn->dxe_tx_h_ch);
 497		}
 498	}
 499
 500	if (int_src & WCN36XX_INT_MASK_CHAN_TX_L) {
 501		wcn36xx_dxe_read_register(wcn,
 502					  WCN36XX_DXE_CH_STATUS_REG_ADDR_TX_L,
 503					  &int_reason);
 
 504
 505		wcn36xx_dxe_write_register(wcn,
 506					   WCN36XX_DXE_0_INT_CLR,
 507					   WCN36XX_INT_MASK_CHAN_TX_L);
 508
 509		if (int_reason & WCN36XX_CH_STAT_INT_ERR_MASK ) {
 510			wcn36xx_dxe_write_register(wcn,
 511						   WCN36XX_DXE_0_INT_ERR_CLR,
 512						   WCN36XX_INT_MASK_CHAN_TX_L);
 513
 514			wcn36xx_err("DXE IRQ reported error: 0x%x in low TX channel\n",
 515					int_src);
 516		}
 517
 518		if (int_reason & WCN36XX_CH_STAT_INT_DONE_MASK) {
 519			wcn36xx_dxe_write_register(wcn,
 520						   WCN36XX_DXE_0_INT_DONE_CLR,
 521						   WCN36XX_INT_MASK_CHAN_TX_L);
 522		}
 523
 524		if (int_reason & WCN36XX_CH_STAT_INT_ED_MASK) {
 525			wcn36xx_dxe_write_register(wcn,
 526						   WCN36XX_DXE_0_INT_ED_CLR,
 527						   WCN36XX_INT_MASK_CHAN_TX_L);
 528		}
 529
 530		wcn36xx_dbg(WCN36XX_DBG_DXE, "dxe tx ready low, reason %08x\n",
 531			    int_reason);
 532
 533		if (int_reason & (WCN36XX_CH_STAT_INT_DONE_MASK |
 534				  WCN36XX_CH_STAT_INT_ED_MASK)) {
 535			reap_tx_dxes(wcn, &wcn->dxe_tx_l_ch);
 536		}
 537	}
 538
 539	return IRQ_HANDLED;
 540}
 541
 542static irqreturn_t wcn36xx_irq_rx_ready(int irq, void *dev)
 543{
 544	struct wcn36xx *wcn = dev;
 545
 
 546	wcn36xx_dxe_rx_frame(wcn);
 547
 548	return IRQ_HANDLED;
 549}
 550
 551static int wcn36xx_dxe_request_irqs(struct wcn36xx *wcn)
 552{
 553	int ret;
 554
 555	ret = request_irq(wcn->tx_irq, wcn36xx_irq_tx_complete,
 556			  IRQF_TRIGGER_HIGH, "wcn36xx_tx", wcn);
 557	if (ret) {
 558		wcn36xx_err("failed to alloc tx irq\n");
 559		goto out_err;
 560	}
 561
 562	ret = request_irq(wcn->rx_irq, wcn36xx_irq_rx_ready, IRQF_TRIGGER_HIGH,
 563			  "wcn36xx_rx", wcn);
 564	if (ret) {
 565		wcn36xx_err("failed to alloc rx irq\n");
 566		goto out_txirq;
 567	}
 568
 569	enable_irq_wake(wcn->rx_irq);
 570
 571	return 0;
 572
 573out_txirq:
 574	free_irq(wcn->tx_irq, wcn);
 575out_err:
 576	return ret;
 577
 578}
 579
 580static int wcn36xx_rx_handle_packets(struct wcn36xx *wcn,
 581				     struct wcn36xx_dxe_ch *ch,
 582				     u32 ctrl,
 583				     u32 en_mask,
 584				     u32 int_mask,
 585				     u32 status_reg)
 586{
 587	struct wcn36xx_dxe_desc *dxe;
 588	struct wcn36xx_dxe_ctl *ctl;
 589	dma_addr_t  dma_addr;
 590	struct sk_buff *skb;
 591	u32 int_reason;
 592	int ret;
 593
 594	wcn36xx_dxe_read_register(wcn, status_reg, &int_reason);
 595	wcn36xx_dxe_write_register(wcn, WCN36XX_DXE_0_INT_CLR, int_mask);
 596
 597	if (int_reason & WCN36XX_CH_STAT_INT_ERR_MASK) {
 598		wcn36xx_dxe_write_register(wcn,
 599					   WCN36XX_DXE_0_INT_ERR_CLR,
 600					   int_mask);
 601
 602		wcn36xx_err("DXE IRQ reported error on RX channel\n");
 603	}
 604
 605	if (int_reason & WCN36XX_CH_STAT_INT_DONE_MASK)
 606		wcn36xx_dxe_write_register(wcn,
 607					   WCN36XX_DXE_0_INT_DONE_CLR,
 608					   int_mask);
 609
 610	if (int_reason & WCN36XX_CH_STAT_INT_ED_MASK)
 611		wcn36xx_dxe_write_register(wcn,
 612					   WCN36XX_DXE_0_INT_ED_CLR,
 613					   int_mask);
 614
 615	if (!(int_reason & (WCN36XX_CH_STAT_INT_DONE_MASK |
 616			    WCN36XX_CH_STAT_INT_ED_MASK)))
 617		return 0;
 618
 619	spin_lock(&ch->lock);
 620
 621	ctl = ch->head_blk_ctl;
 622	dxe = ctl->desc;
 623
 624	while (!(READ_ONCE(dxe->ctrl) & WCN36xx_DXE_CTRL_VLD)) {
 625		/* do not read until we own DMA descriptor */
 626		dma_rmb();
 627
 628		/* read/modify DMA descriptor */
 629		skb = ctl->skb;
 630		dma_addr = dxe->dst_addr_l;
 631		ret = wcn36xx_dxe_fill_skb(wcn->dev, ctl, GFP_ATOMIC);
 632		if (0 == ret) {
 633			/* new skb allocation ok. Use the new one and queue
 634			 * the old one to network system.
 635			 */
 636			dma_unmap_single(wcn->dev, dma_addr, WCN36XX_PKT_SIZE,
 637					DMA_FROM_DEVICE);
 638			wcn36xx_rx_skb(wcn, skb);
 639		}
 640		/* else keep old skb not submitted and reuse it for rx DMA
 641		 * (dropping the packet that it contained)
 642		 */
 643
 644		/* flush descriptor changes before re-marking as valid */
 645		dma_wmb();
 646		dxe->ctrl = ctrl;
 647
 
 648		ctl = ctl->next;
 649		dxe = ctl->desc;
 650	}
 651	wcn36xx_dxe_write_register(wcn, WCN36XX_DXE_ENCH_ADDR, en_mask);
 652
 653	ch->head_blk_ctl = ctl;
 654
 655	spin_unlock(&ch->lock);
 656
 657	return 0;
 658}
 659
 660void wcn36xx_dxe_rx_frame(struct wcn36xx *wcn)
 661{
 662	int int_src;
 663
 664	wcn36xx_dxe_read_register(wcn, WCN36XX_DXE_INT_SRC_RAW_REG, &int_src);
 665
 666	/* RX_LOW_PRI */
 667	if (int_src & WCN36XX_DXE_INT_CH1_MASK)
 668		wcn36xx_rx_handle_packets(wcn, &wcn->dxe_rx_l_ch,
 669					  WCN36XX_DXE_CTRL_RX_L,
 670					  WCN36XX_DXE_INT_CH1_MASK,
 671					  WCN36XX_INT_MASK_CHAN_RX_L,
 672					  WCN36XX_DXE_CH_STATUS_REG_ADDR_RX_L);
 673
 674	/* RX_HIGH_PRI */
 675	if (int_src & WCN36XX_DXE_INT_CH3_MASK)
 676		wcn36xx_rx_handle_packets(wcn, &wcn->dxe_rx_h_ch,
 677					  WCN36XX_DXE_CTRL_RX_H,
 678					  WCN36XX_DXE_INT_CH3_MASK,
 679					  WCN36XX_INT_MASK_CHAN_RX_H,
 680					  WCN36XX_DXE_CH_STATUS_REG_ADDR_RX_H);
 681
 682	if (!int_src)
 683		wcn36xx_warn("No DXE interrupt pending\n");
 684}
 685
 686int wcn36xx_dxe_allocate_mem_pools(struct wcn36xx *wcn)
 687{
 688	size_t s;
 689	void *cpu_addr;
 690
 691	/* Allocate BD headers for MGMT frames */
 692
 693	/* Where this come from ask QC */
 694	wcn->mgmt_mem_pool.chunk_size =	WCN36XX_BD_CHUNK_SIZE +
 695		16 - (WCN36XX_BD_CHUNK_SIZE % 8);
 696
 697	s = wcn->mgmt_mem_pool.chunk_size * WCN36XX_DXE_CH_DESC_NUMB_TX_H;
 698	cpu_addr = dma_alloc_coherent(wcn->dev, s,
 699				      &wcn->mgmt_mem_pool.phy_addr,
 700				      GFP_KERNEL);
 701	if (!cpu_addr)
 702		goto out_err;
 703
 704	wcn->mgmt_mem_pool.virt_addr = cpu_addr;
 
 705
 706	/* Allocate BD headers for DATA frames */
 707
 708	/* Where this come from ask QC */
 709	wcn->data_mem_pool.chunk_size = WCN36XX_BD_CHUNK_SIZE +
 710		16 - (WCN36XX_BD_CHUNK_SIZE % 8);
 711
 712	s = wcn->data_mem_pool.chunk_size * WCN36XX_DXE_CH_DESC_NUMB_TX_L;
 713	cpu_addr = dma_alloc_coherent(wcn->dev, s,
 714				      &wcn->data_mem_pool.phy_addr,
 715				      GFP_KERNEL);
 716	if (!cpu_addr)
 717		goto out_err;
 718
 719	wcn->data_mem_pool.virt_addr = cpu_addr;
 
 720
 721	return 0;
 722
 723out_err:
 724	wcn36xx_dxe_free_mem_pools(wcn);
 725	wcn36xx_err("Failed to allocate BD mempool\n");
 726	return -ENOMEM;
 727}
 728
 729void wcn36xx_dxe_free_mem_pools(struct wcn36xx *wcn)
 730{
 731	if (wcn->mgmt_mem_pool.virt_addr)
 732		dma_free_coherent(wcn->dev, wcn->mgmt_mem_pool.chunk_size *
 733				  WCN36XX_DXE_CH_DESC_NUMB_TX_H,
 734				  wcn->mgmt_mem_pool.virt_addr,
 735				  wcn->mgmt_mem_pool.phy_addr);
 736
 737	if (wcn->data_mem_pool.virt_addr) {
 738		dma_free_coherent(wcn->dev, wcn->data_mem_pool.chunk_size *
 739				  WCN36XX_DXE_CH_DESC_NUMB_TX_L,
 740				  wcn->data_mem_pool.virt_addr,
 741				  wcn->data_mem_pool.phy_addr);
 742	}
 743}
 744
 745int wcn36xx_dxe_tx_frame(struct wcn36xx *wcn,
 746			 struct wcn36xx_vif *vif_priv,
 747			 struct wcn36xx_tx_bd *bd,
 748			 struct sk_buff *skb,
 749			 bool is_low)
 750{
 751	struct wcn36xx_dxe_desc *desc_bd, *desc_skb;
 752	struct wcn36xx_dxe_ctl *ctl_bd, *ctl_skb;
 753	struct wcn36xx_dxe_ch *ch = NULL;
 754	unsigned long flags;
 755	int ret;
 756
 757	ch = is_low ? &wcn->dxe_tx_l_ch : &wcn->dxe_tx_h_ch;
 758
 759	spin_lock_irqsave(&ch->lock, flags);
 760	ctl_bd = ch->head_blk_ctl;
 761	ctl_skb = ctl_bd->next;
 
 762
 763	/*
 764	 * If skb is not null that means that we reached the tail of the ring
 765	 * hence ring is full. Stop queues to let mac80211 back off until ring
 766	 * has an empty slot again.
 767	 */
 768	if (NULL != ctl_skb->skb) {
 769		ieee80211_stop_queues(wcn->hw);
 770		wcn->queues_stopped = true;
 
 771		spin_unlock_irqrestore(&ch->lock, flags);
 772		return -EBUSY;
 773	}
 
 774
 775	if (unlikely(ctl_skb->bd_cpu_addr)) {
 776		wcn36xx_err("bd_cpu_addr cannot be NULL for skb DXE\n");
 777		ret = -EINVAL;
 778		goto unlock;
 779	}
 780
 781	desc_bd = ctl_bd->desc;
 782	desc_skb = ctl_skb->desc;
 783
 784	ctl_bd->skb = NULL;
 785
 786	/* write buffer descriptor */
 787	memcpy(ctl_bd->bd_cpu_addr, bd, sizeof(*bd));
 788
 789	/* Set source address of the BD we send */
 790	desc_bd->src_addr_l = ctl_bd->bd_phy_addr;
 791	desc_bd->dst_addr_l = ch->dxe_wq;
 792	desc_bd->fr_len = sizeof(struct wcn36xx_tx_bd);
 
 
 793
 794	wcn36xx_dbg(WCN36XX_DBG_DXE, "DXE TX\n");
 795
 796	wcn36xx_dbg_dump(WCN36XX_DBG_DXE_DUMP, "DESC1 >>> ",
 797			 (char *)desc_bd, sizeof(*desc_bd));
 798	wcn36xx_dbg_dump(WCN36XX_DBG_DXE_DUMP,
 799			 "BD   >>> ", (char *)ctl_bd->bd_cpu_addr,
 800			 sizeof(struct wcn36xx_tx_bd));
 801
 802	desc_skb->src_addr_l = dma_map_single(wcn->dev,
 803					      skb->data,
 804					      skb->len,
 805					      DMA_TO_DEVICE);
 806	if (dma_mapping_error(wcn->dev, desc_skb->src_addr_l)) {
 807		dev_err(wcn->dev, "unable to DMA map src_addr_l\n");
 808		ret = -ENOMEM;
 809		goto unlock;
 810	}
 811
 812	ctl_skb->skb = skb;
 813	desc_skb->dst_addr_l = ch->dxe_wq;
 814	desc_skb->fr_len = ctl_skb->skb->len;
 
 
 
 
 
 
 
 815
 816	wcn36xx_dbg_dump(WCN36XX_DBG_DXE_DUMP, "DESC2 >>> ",
 817			 (char *)desc_skb, sizeof(*desc_skb));
 818	wcn36xx_dbg_dump(WCN36XX_DBG_DXE_DUMP, "SKB   >>> ",
 819			 (char *)ctl_skb->skb->data, ctl_skb->skb->len);
 820
 821	/* Move the head of the ring to the next empty descriptor */
 822	ch->head_blk_ctl = ctl_skb->next;
 823
 824	/* Commit all previous writes and set descriptors to VALID */
 825	wmb();
 826	desc_skb->ctrl = ch->ctrl_skb;
 827	wmb();
 828	desc_bd->ctrl = ch->ctrl_bd;
 829
 830	/*
 831	 * When connected and trying to send data frame chip can be in sleep
 832	 * mode and writing to the register will not wake up the chip. Instead
 833	 * notify chip about new frame through SMSM bus.
 834	 */
 835	if (is_low &&  vif_priv->pw_state == WCN36XX_BMPS) {
 836		qcom_smem_state_update_bits(wcn->tx_rings_empty_state,
 837					    WCN36XX_SMSM_WLAN_TX_ENABLE,
 838					    WCN36XX_SMSM_WLAN_TX_ENABLE);
 839	} else {
 840		/* indicate End Of Packet and generate interrupt on descriptor
 841		 * done.
 842		 */
 843		wcn36xx_dxe_write_register(wcn,
 844			ch->reg_ctrl, ch->def_ctrl);
 845	}
 846
 847	ret = 0;
 848unlock:
 849	spin_unlock_irqrestore(&ch->lock, flags);
 850	return ret;
 851}
 852
 853static bool _wcn36xx_dxe_tx_channel_is_empty(struct wcn36xx_dxe_ch *ch)
 854{
 855	unsigned long flags;
 856	struct wcn36xx_dxe_ctl *ctl_bd_start, *ctl_skb_start;
 857	struct wcn36xx_dxe_ctl *ctl_bd, *ctl_skb;
 858	bool ret = true;
 859
 860	spin_lock_irqsave(&ch->lock, flags);
 861
 862	/* Loop through ring buffer looking for nonempty entries. */
 863	ctl_bd_start = ch->head_blk_ctl;
 864	ctl_bd = ctl_bd_start;
 865	ctl_skb_start = ctl_bd_start->next;
 866	ctl_skb = ctl_skb_start;
 867	do {
 868		if (ctl_skb->skb) {
 869			ret = false;
 870			goto unlock;
 871		}
 872		ctl_bd = ctl_skb->next;
 873		ctl_skb = ctl_bd->next;
 874	} while (ctl_skb != ctl_skb_start);
 875
 876unlock:
 877	spin_unlock_irqrestore(&ch->lock, flags);
 878	return ret;
 879}
 880
 881int wcn36xx_dxe_tx_flush(struct wcn36xx *wcn)
 882{
 883	int i = 0;
 884
 885	/* Called with mac80211 queues stopped. Wait for empty HW queues. */
 886	do {
 887		if (_wcn36xx_dxe_tx_channel_is_empty(&wcn->dxe_tx_l_ch) &&
 888		    _wcn36xx_dxe_tx_channel_is_empty(&wcn->dxe_tx_h_ch)) {
 889			return 0;
 890		}
 891		/* This ieee80211_ops callback is specifically allowed to
 892		 * sleep.
 893		 */
 894		usleep_range(1000, 1100);
 895	} while (++i < 100);
 896
 897	return -EBUSY;
 898}
 899
 900int wcn36xx_dxe_init(struct wcn36xx *wcn)
 901{
 902	int reg_data = 0, ret;
 903
 904	reg_data = WCN36XX_DXE_REG_RESET;
 905	wcn36xx_dxe_write_register(wcn, WCN36XX_DXE_REG_CSR_RESET, reg_data);
 906
 907	/* Select channels for rx avail and xfer done interrupts... */
 908	reg_data = (WCN36XX_DXE_INT_CH3_MASK | WCN36XX_DXE_INT_CH1_MASK) << 16 |
 909		    WCN36XX_DXE_INT_CH0_MASK | WCN36XX_DXE_INT_CH4_MASK;
 910	if (wcn->is_pronto)
 911		wcn36xx_ccu_write_register(wcn, WCN36XX_CCU_DXE_INT_SELECT_PRONTO, reg_data);
 912	else
 913		wcn36xx_ccu_write_register(wcn, WCN36XX_CCU_DXE_INT_SELECT_RIVA, reg_data);
 914
 915	/***************************************/
 916	/* Init descriptors for TX LOW channel */
 917	/***************************************/
 918	ret = wcn36xx_dxe_init_descs(wcn, &wcn->dxe_tx_l_ch);
 919	if (ret) {
 920		dev_err(wcn->dev, "Error allocating descriptor\n");
 921		return ret;
 922	}
 923	wcn36xx_dxe_init_tx_bd(&wcn->dxe_tx_l_ch, &wcn->data_mem_pool);
 924
 925	/* Write channel head to a NEXT register */
 926	wcn36xx_dxe_write_register(wcn, WCN36XX_DXE_CH_NEXT_DESC_ADDR_TX_L,
 927		wcn->dxe_tx_l_ch.head_blk_ctl->desc_phy_addr);
 928
 929	/* Program DMA destination addr for TX LOW */
 930	wcn36xx_dxe_write_register(wcn,
 931		WCN36XX_DXE_CH_DEST_ADDR_TX_L,
 932		WCN36XX_DXE_WQ_TX_L(wcn));
 933
 934	wcn36xx_dxe_read_register(wcn, WCN36XX_DXE_REG_CH_EN, &reg_data);
 
 935
 936	/***************************************/
 937	/* Init descriptors for TX HIGH channel */
 938	/***************************************/
 939	ret = wcn36xx_dxe_init_descs(wcn, &wcn->dxe_tx_h_ch);
 940	if (ret) {
 941		dev_err(wcn->dev, "Error allocating descriptor\n");
 942		goto out_err_txh_ch;
 943	}
 944
 945	wcn36xx_dxe_init_tx_bd(&wcn->dxe_tx_h_ch, &wcn->mgmt_mem_pool);
 946
 947	/* Write channel head to a NEXT register */
 948	wcn36xx_dxe_write_register(wcn, WCN36XX_DXE_CH_NEXT_DESC_ADDR_TX_H,
 949		wcn->dxe_tx_h_ch.head_blk_ctl->desc_phy_addr);
 950
 951	/* Program DMA destination addr for TX HIGH */
 952	wcn36xx_dxe_write_register(wcn,
 953		WCN36XX_DXE_CH_DEST_ADDR_TX_H,
 954		WCN36XX_DXE_WQ_TX_H(wcn));
 955
 956	wcn36xx_dxe_read_register(wcn, WCN36XX_DXE_REG_CH_EN, &reg_data);
 957
 
 
 
 958	/***************************************/
 959	/* Init descriptors for RX LOW channel */
 960	/***************************************/
 961	ret = wcn36xx_dxe_init_descs(wcn, &wcn->dxe_rx_l_ch);
 962	if (ret) {
 963		dev_err(wcn->dev, "Error allocating descriptor\n");
 964		goto out_err_rxl_ch;
 965	}
 966
 967	/* For RX we need to preallocated buffers */
 968	wcn36xx_dxe_ch_alloc_skb(wcn, &wcn->dxe_rx_l_ch);
 969
 970	/* Write channel head to a NEXT register */
 971	wcn36xx_dxe_write_register(wcn, WCN36XX_DXE_CH_NEXT_DESC_ADDR_RX_L,
 972		wcn->dxe_rx_l_ch.head_blk_ctl->desc_phy_addr);
 973
 974	/* Write DMA source address */
 975	wcn36xx_dxe_write_register(wcn,
 976		WCN36XX_DXE_CH_SRC_ADDR_RX_L,
 977		WCN36XX_DXE_WQ_RX_L);
 978
 979	/* Program preallocated destination address */
 980	wcn36xx_dxe_write_register(wcn,
 981		WCN36XX_DXE_CH_DEST_ADDR_RX_L,
 982		wcn->dxe_rx_l_ch.head_blk_ctl->desc->phy_next_l);
 983
 984	/* Enable default control registers */
 985	wcn36xx_dxe_write_register(wcn,
 986		WCN36XX_DXE_REG_CTL_RX_L,
 987		WCN36XX_DXE_CH_DEFAULT_CTL_RX_L);
 988
 
 
 
 989	/***************************************/
 990	/* Init descriptors for RX HIGH channel */
 991	/***************************************/
 992	ret = wcn36xx_dxe_init_descs(wcn, &wcn->dxe_rx_h_ch);
 993	if (ret) {
 994		dev_err(wcn->dev, "Error allocating descriptor\n");
 995		goto out_err_rxh_ch;
 996	}
 997
 998	/* For RX we need to prealocat buffers */
 999	wcn36xx_dxe_ch_alloc_skb(wcn, &wcn->dxe_rx_h_ch);
1000
1001	/* Write chanel head to a NEXT register */
1002	wcn36xx_dxe_write_register(wcn, WCN36XX_DXE_CH_NEXT_DESC_ADDR_RX_H,
1003		wcn->dxe_rx_h_ch.head_blk_ctl->desc_phy_addr);
1004
1005	/* Write DMA source address */
1006	wcn36xx_dxe_write_register(wcn,
1007		WCN36XX_DXE_CH_SRC_ADDR_RX_H,
1008		WCN36XX_DXE_WQ_RX_H);
1009
1010	/* Program preallocated destination address */
1011	wcn36xx_dxe_write_register(wcn,
1012		WCN36XX_DXE_CH_DEST_ADDR_RX_H,
1013		 wcn->dxe_rx_h_ch.head_blk_ctl->desc->phy_next_l);
1014
1015	/* Enable default control registers */
1016	wcn36xx_dxe_write_register(wcn,
1017		WCN36XX_DXE_REG_CTL_RX_H,
1018		WCN36XX_DXE_CH_DEFAULT_CTL_RX_H);
1019
1020	ret = wcn36xx_dxe_request_irqs(wcn);
1021	if (ret < 0)
1022		goto out_err_irq;
1023
1024	timer_setup(&wcn->tx_ack_timer, wcn36xx_dxe_tx_timer, 0);
1025
1026	/* Enable channel interrupts */
1027	wcn36xx_dxe_enable_ch_int(wcn, WCN36XX_INT_MASK_CHAN_TX_L);
1028	wcn36xx_dxe_enable_ch_int(wcn, WCN36XX_INT_MASK_CHAN_TX_H);
1029	wcn36xx_dxe_enable_ch_int(wcn, WCN36XX_INT_MASK_CHAN_RX_L);
1030	wcn36xx_dxe_enable_ch_int(wcn, WCN36XX_INT_MASK_CHAN_RX_H);
1031
1032	return 0;
 
 
1033
1034out_err_irq:
1035	wcn36xx_dxe_deinit_descs(wcn->dev, &wcn->dxe_rx_h_ch);
1036out_err_rxh_ch:
1037	wcn36xx_dxe_deinit_descs(wcn->dev, &wcn->dxe_rx_l_ch);
1038out_err_rxl_ch:
1039	wcn36xx_dxe_deinit_descs(wcn->dev, &wcn->dxe_tx_h_ch);
1040out_err_txh_ch:
1041	wcn36xx_dxe_deinit_descs(wcn->dev, &wcn->dxe_tx_l_ch);
1042
 
1043	return ret;
1044}
1045
1046void wcn36xx_dxe_deinit(struct wcn36xx *wcn)
1047{
1048	int reg_data = 0;
1049
1050	/* Disable channel interrupts */
1051	wcn36xx_dxe_disable_ch_int(wcn, WCN36XX_INT_MASK_CHAN_RX_H);
1052	wcn36xx_dxe_disable_ch_int(wcn, WCN36XX_INT_MASK_CHAN_RX_L);
1053	wcn36xx_dxe_disable_ch_int(wcn, WCN36XX_INT_MASK_CHAN_TX_H);
1054	wcn36xx_dxe_disable_ch_int(wcn, WCN36XX_INT_MASK_CHAN_TX_L);
1055
1056	free_irq(wcn->tx_irq, wcn);
1057	free_irq(wcn->rx_irq, wcn);
1058	del_timer(&wcn->tx_ack_timer);
1059
1060	if (wcn->tx_ack_skb) {
1061		ieee80211_tx_status_irqsafe(wcn->hw, wcn->tx_ack_skb);
1062		wcn->tx_ack_skb = NULL;
1063	}
1064
1065	/* Put the DXE block into reset before freeing memory */
1066	reg_data = WCN36XX_DXE_REG_RESET;
1067	wcn36xx_dxe_write_register(wcn, WCN36XX_DXE_REG_CSR_RESET, reg_data);
1068
1069	wcn36xx_dxe_ch_free_skbs(wcn, &wcn->dxe_rx_l_ch);
1070	wcn36xx_dxe_ch_free_skbs(wcn, &wcn->dxe_rx_h_ch);
1071
1072	wcn36xx_dxe_deinit_descs(wcn->dev, &wcn->dxe_tx_l_ch);
1073	wcn36xx_dxe_deinit_descs(wcn->dev, &wcn->dxe_tx_h_ch);
1074	wcn36xx_dxe_deinit_descs(wcn->dev, &wcn->dxe_rx_l_ch);
1075	wcn36xx_dxe_deinit_descs(wcn->dev, &wcn->dxe_rx_h_ch);
1076}
v4.6
  1/*
  2 * Copyright (c) 2013 Eugene Krasnikov <k.eugene.e@gmail.com>
  3 *
  4 * Permission to use, copy, modify, and/or distribute this software for any
  5 * purpose with or without fee is hereby granted, provided that the above
  6 * copyright notice and this permission notice appear in all copies.
  7 *
  8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
  9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
 10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
 11 * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
 12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
 13 * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
 14 * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
 15 */
 16
 17/* DXE - DMA transfer engine
 18 * we have 2 channels(High prio and Low prio) for TX and 2 channels for RX.
 19 * through low channels data packets are transfered
 20 * through high channels managment packets are transfered
 21 */
 22
 23#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 24
 25#include <linux/interrupt.h>
 
 26#include "wcn36xx.h"
 27#include "txrx.h"
 28
 29void *wcn36xx_dxe_get_next_bd(struct wcn36xx *wcn, bool is_low)
 30{
 31	struct wcn36xx_dxe_ch *ch = is_low ?
 32		&wcn->dxe_tx_l_ch :
 33		&wcn->dxe_tx_h_ch;
 34
 35	return ch->head_blk_ctl->bd_cpu_addr;
 36}
 37
 38static void wcn36xx_dxe_write_register(struct wcn36xx *wcn, int addr, int data)
 39{
 40	wcn36xx_dbg(WCN36XX_DBG_DXE,
 41		    "wcn36xx_dxe_write_register: addr=%x, data=%x\n",
 42		    addr, data);
 43
 44	writel(data, wcn->mmio + addr);
 45}
 46
 47#define wcn36xx_dxe_write_register_x(wcn, reg, reg_data)		 \
 48do {									 \
 49	if (wcn->chip_version == WCN36XX_CHIP_3680)			 \
 50		wcn36xx_dxe_write_register(wcn, reg ## _3680, reg_data); \
 51	else								 \
 52		wcn36xx_dxe_write_register(wcn, reg ## _3660, reg_data); \
 53} while (0)								 \
 54
 55static void wcn36xx_dxe_read_register(struct wcn36xx *wcn, int addr, int *data)
 56{
 57	*data = readl(wcn->mmio + addr);
 58
 59	wcn36xx_dbg(WCN36XX_DBG_DXE,
 60		    "wcn36xx_dxe_read_register: addr=%x, data=%x\n",
 61		    addr, *data);
 62}
 63
 64static void wcn36xx_dxe_free_ctl_block(struct wcn36xx_dxe_ch *ch)
 65{
 66	struct wcn36xx_dxe_ctl *ctl = ch->head_blk_ctl, *next;
 67	int i;
 68
 69	for (i = 0; i < ch->desc_num && ctl; i++) {
 70		next = ctl->next;
 71		kfree(ctl);
 72		ctl = next;
 73	}
 74}
 75
 76static int wcn36xx_dxe_allocate_ctl_block(struct wcn36xx_dxe_ch *ch)
 77{
 78	struct wcn36xx_dxe_ctl *prev_ctl = NULL;
 79	struct wcn36xx_dxe_ctl *cur_ctl = NULL;
 80	int i;
 81
 82	spin_lock_init(&ch->lock);
 83	for (i = 0; i < ch->desc_num; i++) {
 84		cur_ctl = kzalloc(sizeof(*cur_ctl), GFP_KERNEL);
 85		if (!cur_ctl)
 86			goto out_fail;
 87
 88		spin_lock_init(&cur_ctl->skb_lock);
 89		cur_ctl->ctl_blk_order = i;
 90		if (i == 0) {
 91			ch->head_blk_ctl = cur_ctl;
 92			ch->tail_blk_ctl = cur_ctl;
 93		} else if (ch->desc_num - 1 == i) {
 94			prev_ctl->next = cur_ctl;
 95			cur_ctl->next = ch->head_blk_ctl;
 96		} else {
 97			prev_ctl->next = cur_ctl;
 98		}
 99		prev_ctl = cur_ctl;
100	}
101
102	return 0;
103
104out_fail:
105	wcn36xx_dxe_free_ctl_block(ch);
106	return -ENOMEM;
107}
108
109int wcn36xx_dxe_alloc_ctl_blks(struct wcn36xx *wcn)
110{
111	int ret;
112
113	wcn->dxe_tx_l_ch.ch_type = WCN36XX_DXE_CH_TX_L;
114	wcn->dxe_tx_h_ch.ch_type = WCN36XX_DXE_CH_TX_H;
115	wcn->dxe_rx_l_ch.ch_type = WCN36XX_DXE_CH_RX_L;
116	wcn->dxe_rx_h_ch.ch_type = WCN36XX_DXE_CH_RX_H;
117
118	wcn->dxe_tx_l_ch.desc_num = WCN36XX_DXE_CH_DESC_NUMB_TX_L;
119	wcn->dxe_tx_h_ch.desc_num = WCN36XX_DXE_CH_DESC_NUMB_TX_H;
120	wcn->dxe_rx_l_ch.desc_num = WCN36XX_DXE_CH_DESC_NUMB_RX_L;
121	wcn->dxe_rx_h_ch.desc_num = WCN36XX_DXE_CH_DESC_NUMB_RX_H;
122
123	wcn->dxe_tx_l_ch.dxe_wq =  WCN36XX_DXE_WQ_TX_L;
124	wcn->dxe_tx_h_ch.dxe_wq =  WCN36XX_DXE_WQ_TX_H;
125
126	wcn->dxe_tx_l_ch.ctrl_bd = WCN36XX_DXE_CTRL_TX_L_BD;
127	wcn->dxe_tx_h_ch.ctrl_bd = WCN36XX_DXE_CTRL_TX_H_BD;
128
129	wcn->dxe_tx_l_ch.ctrl_skb = WCN36XX_DXE_CTRL_TX_L_SKB;
130	wcn->dxe_tx_h_ch.ctrl_skb = WCN36XX_DXE_CTRL_TX_H_SKB;
131
132	wcn->dxe_tx_l_ch.reg_ctrl = WCN36XX_DXE_REG_CTL_TX_L;
133	wcn->dxe_tx_h_ch.reg_ctrl = WCN36XX_DXE_REG_CTL_TX_H;
134
135	wcn->dxe_tx_l_ch.def_ctrl = WCN36XX_DXE_CH_DEFAULT_CTL_TX_L;
136	wcn->dxe_tx_h_ch.def_ctrl = WCN36XX_DXE_CH_DEFAULT_CTL_TX_H;
137
138	/* DXE control block allocation */
139	ret = wcn36xx_dxe_allocate_ctl_block(&wcn->dxe_tx_l_ch);
140	if (ret)
141		goto out_err;
142	ret = wcn36xx_dxe_allocate_ctl_block(&wcn->dxe_tx_h_ch);
143	if (ret)
144		goto out_err;
145	ret = wcn36xx_dxe_allocate_ctl_block(&wcn->dxe_rx_l_ch);
146	if (ret)
147		goto out_err;
148	ret = wcn36xx_dxe_allocate_ctl_block(&wcn->dxe_rx_h_ch);
149	if (ret)
150		goto out_err;
151
152	/* Initialize SMSM state  Clear TX Enable RING EMPTY STATE */
153	ret = wcn->ctrl_ops->smsm_change_state(
154		WCN36XX_SMSM_WLAN_TX_ENABLE,
155		WCN36XX_SMSM_WLAN_TX_RINGS_EMPTY);
 
 
 
156
157	return 0;
158
159out_err:
160	wcn36xx_err("Failed to allocate DXE control blocks\n");
161	wcn36xx_dxe_free_ctl_blks(wcn);
162	return -ENOMEM;
163}
164
165void wcn36xx_dxe_free_ctl_blks(struct wcn36xx *wcn)
166{
167	wcn36xx_dxe_free_ctl_block(&wcn->dxe_tx_l_ch);
168	wcn36xx_dxe_free_ctl_block(&wcn->dxe_tx_h_ch);
169	wcn36xx_dxe_free_ctl_block(&wcn->dxe_rx_l_ch);
170	wcn36xx_dxe_free_ctl_block(&wcn->dxe_rx_h_ch);
171}
172
173static int wcn36xx_dxe_init_descs(struct device *dev, struct wcn36xx_dxe_ch *wcn_ch)
174{
 
175	struct wcn36xx_dxe_desc *cur_dxe = NULL;
176	struct wcn36xx_dxe_desc *prev_dxe = NULL;
177	struct wcn36xx_dxe_ctl *cur_ctl = NULL;
178	size_t size;
179	int i;
180
181	size = wcn_ch->desc_num * sizeof(struct wcn36xx_dxe_desc);
182	wcn_ch->cpu_addr = dma_alloc_coherent(dev, size, &wcn_ch->dma_addr,
183					      GFP_KERNEL);
184	if (!wcn_ch->cpu_addr)
185		return -ENOMEM;
186
187	memset(wcn_ch->cpu_addr, 0, size);
188
189	cur_dxe = (struct wcn36xx_dxe_desc *)wcn_ch->cpu_addr;
190	cur_ctl = wcn_ch->head_blk_ctl;
191
192	for (i = 0; i < wcn_ch->desc_num; i++) {
193		cur_ctl->desc = cur_dxe;
194		cur_ctl->desc_phy_addr = wcn_ch->dma_addr +
195			i * sizeof(struct wcn36xx_dxe_desc);
196
197		switch (wcn_ch->ch_type) {
198		case WCN36XX_DXE_CH_TX_L:
199			cur_dxe->ctrl = WCN36XX_DXE_CTRL_TX_L;
200			cur_dxe->dst_addr_l = WCN36XX_DXE_WQ_TX_L;
201			break;
202		case WCN36XX_DXE_CH_TX_H:
203			cur_dxe->ctrl = WCN36XX_DXE_CTRL_TX_H;
204			cur_dxe->dst_addr_l = WCN36XX_DXE_WQ_TX_H;
205			break;
206		case WCN36XX_DXE_CH_RX_L:
207			cur_dxe->ctrl = WCN36XX_DXE_CTRL_RX_L;
208			cur_dxe->src_addr_l = WCN36XX_DXE_WQ_RX_L;
209			break;
210		case WCN36XX_DXE_CH_RX_H:
211			cur_dxe->ctrl = WCN36XX_DXE_CTRL_RX_H;
212			cur_dxe->src_addr_l = WCN36XX_DXE_WQ_RX_H;
213			break;
214		}
215		if (0 == i) {
216			cur_dxe->phy_next_l = 0;
217		} else if ((0 < i) && (i < wcn_ch->desc_num - 1)) {
218			prev_dxe->phy_next_l =
219				cur_ctl->desc_phy_addr;
220		} else if (i == (wcn_ch->desc_num - 1)) {
221			prev_dxe->phy_next_l =
222				cur_ctl->desc_phy_addr;
223			cur_dxe->phy_next_l =
224				wcn_ch->head_blk_ctl->desc_phy_addr;
225		}
226		cur_ctl = cur_ctl->next;
227		prev_dxe = cur_dxe;
228		cur_dxe++;
229	}
230
231	return 0;
232}
233
 
 
 
 
 
 
 
 
234static void wcn36xx_dxe_init_tx_bd(struct wcn36xx_dxe_ch *ch,
235				   struct wcn36xx_dxe_mem_pool *pool)
236{
237	int i, chunk_size = pool->chunk_size;
238	dma_addr_t bd_phy_addr = pool->phy_addr;
239	void *bd_cpu_addr = pool->virt_addr;
240	struct wcn36xx_dxe_ctl *cur = ch->head_blk_ctl;
241
242	for (i = 0; i < ch->desc_num; i++) {
243		/* Only every second dxe needs a bd pointer,
244		   the other will point to the skb data */
245		if (!(i & 1)) {
246			cur->bd_phy_addr = bd_phy_addr;
247			cur->bd_cpu_addr = bd_cpu_addr;
248			bd_phy_addr += chunk_size;
249			bd_cpu_addr += chunk_size;
250		} else {
251			cur->bd_phy_addr = 0;
252			cur->bd_cpu_addr = NULL;
253		}
254		cur = cur->next;
255	}
256}
257
258static int wcn36xx_dxe_enable_ch_int(struct wcn36xx *wcn, u16 wcn_ch)
259{
260	int reg_data = 0;
261
262	wcn36xx_dxe_read_register(wcn,
263				  WCN36XX_DXE_INT_MASK_REG,
264				  &reg_data);
265
266	reg_data |= wcn_ch;
267
268	wcn36xx_dxe_write_register(wcn,
269				   WCN36XX_DXE_INT_MASK_REG,
270				   (int)reg_data);
271	return 0;
272}
273
274static int wcn36xx_dxe_fill_skb(struct device *dev, struct wcn36xx_dxe_ctl *ctl)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
275{
276	struct wcn36xx_dxe_desc *dxe = ctl->desc;
277	struct sk_buff *skb;
278
279	skb = alloc_skb(WCN36XX_PKT_SIZE, GFP_ATOMIC);
280	if (skb == NULL)
281		return -ENOMEM;
282
283	dxe->dst_addr_l = dma_map_single(dev,
284					 skb_tail_pointer(skb),
285					 WCN36XX_PKT_SIZE,
286					 DMA_FROM_DEVICE);
 
 
 
 
 
287	ctl->skb = skb;
288
289	return 0;
290}
291
292static int wcn36xx_dxe_ch_alloc_skb(struct wcn36xx *wcn,
293				    struct wcn36xx_dxe_ch *wcn_ch)
294{
295	int i;
296	struct wcn36xx_dxe_ctl *cur_ctl = NULL;
297
298	cur_ctl = wcn_ch->head_blk_ctl;
299
300	for (i = 0; i < wcn_ch->desc_num; i++) {
301		wcn36xx_dxe_fill_skb(wcn->dev, cur_ctl);
302		cur_ctl = cur_ctl->next;
303	}
304
305	return 0;
306}
307
308static void wcn36xx_dxe_ch_free_skbs(struct wcn36xx *wcn,
309				     struct wcn36xx_dxe_ch *wcn_ch)
310{
311	struct wcn36xx_dxe_ctl *cur = wcn_ch->head_blk_ctl;
312	int i;
313
314	for (i = 0; i < wcn_ch->desc_num; i++) {
315		kfree_skb(cur->skb);
316		cur = cur->next;
317	}
318}
319
320void wcn36xx_dxe_tx_ack_ind(struct wcn36xx *wcn, u32 status)
321{
322	struct ieee80211_tx_info *info;
323	struct sk_buff *skb;
324	unsigned long flags;
325
326	spin_lock_irqsave(&wcn->dxe_lock, flags);
327	skb = wcn->tx_ack_skb;
328	wcn->tx_ack_skb = NULL;
 
329	spin_unlock_irqrestore(&wcn->dxe_lock, flags);
330
331	if (!skb) {
332		wcn36xx_warn("Spurious TX complete indication\n");
333		return;
334	}
335
336	info = IEEE80211_SKB_CB(skb);
337
338	if (status == 1)
339		info->flags |= IEEE80211_TX_STAT_ACK;
 
 
340
341	wcn36xx_dbg(WCN36XX_DBG_DXE, "dxe tx ack status: %d\n", status);
342
343	ieee80211_tx_status_irqsafe(wcn->hw, skb);
344	ieee80211_wake_queues(wcn->hw);
345}
346
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
347static void reap_tx_dxes(struct wcn36xx *wcn, struct wcn36xx_dxe_ch *ch)
348{
349	struct wcn36xx_dxe_ctl *ctl;
350	struct ieee80211_tx_info *info;
351	unsigned long flags;
352
353	/*
354	 * Make at least one loop of do-while because in case ring is
355	 * completely full head and tail are pointing to the same element
356	 * and while-do will not make any cycles.
357	 */
358	spin_lock_irqsave(&ch->lock, flags);
359	ctl = ch->tail_blk_ctl;
360	do {
361		if (ctl->desc->ctrl & WCN36XX_DXE_CTRL_VALID_MASK)
362			break;
363		if (ctl->skb) {
 
 
364			dma_unmap_single(wcn->dev, ctl->desc->src_addr_l,
365					 ctl->skb->len, DMA_TO_DEVICE);
366			info = IEEE80211_SKB_CB(ctl->skb);
367			if (!(info->flags & IEEE80211_TX_CTL_REQ_TX_STATUS)) {
368				/* Keep frame until TX status comes */
 
 
 
 
 
 
 
 
 
 
 
 
 
369				ieee80211_free_txskb(wcn->hw, ctl->skb);
370			}
371			spin_lock(&ctl->skb_lock);
372			if (wcn->queues_stopped) {
373				wcn->queues_stopped = false;
374				ieee80211_wake_queues(wcn->hw);
375			}
376			spin_unlock(&ctl->skb_lock);
377
378			ctl->skb = NULL;
379		}
380		ctl = ctl->next;
381	} while (ctl != ch->head_blk_ctl &&
382	       !(ctl->desc->ctrl & WCN36XX_DXE_CTRL_VALID_MASK));
383
384	ch->tail_blk_ctl = ctl;
385	spin_unlock_irqrestore(&ch->lock, flags);
386}
387
388static irqreturn_t wcn36xx_irq_tx_complete(int irq, void *dev)
389{
390	struct wcn36xx *wcn = (struct wcn36xx *)dev;
391	int int_src, int_reason;
392
393	wcn36xx_dxe_read_register(wcn, WCN36XX_DXE_INT_SRC_RAW_REG, &int_src);
394
395	if (int_src & WCN36XX_INT_MASK_CHAN_TX_H) {
396		wcn36xx_dxe_read_register(wcn,
397					  WCN36XX_DXE_CH_STATUS_REG_ADDR_TX_H,
398					  &int_reason);
399
400		/* TODO: Check int_reason */
401
402		wcn36xx_dxe_write_register(wcn,
403					   WCN36XX_DXE_0_INT_CLR,
404					   WCN36XX_INT_MASK_CHAN_TX_H);
405
406		wcn36xx_dxe_write_register(wcn, WCN36XX_DXE_0_INT_ED_CLR,
407					   WCN36XX_INT_MASK_CHAN_TX_H);
408		wcn36xx_dbg(WCN36XX_DBG_DXE, "dxe tx ready high\n");
409		reap_tx_dxes(wcn, &wcn->dxe_tx_h_ch);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
410	}
411
412	if (int_src & WCN36XX_INT_MASK_CHAN_TX_L) {
413		wcn36xx_dxe_read_register(wcn,
414					  WCN36XX_DXE_CH_STATUS_REG_ADDR_TX_L,
415					  &int_reason);
416		/* TODO: Check int_reason */
417
418		wcn36xx_dxe_write_register(wcn,
419					   WCN36XX_DXE_0_INT_CLR,
420					   WCN36XX_INT_MASK_CHAN_TX_L);
421
422		wcn36xx_dxe_write_register(wcn, WCN36XX_DXE_0_INT_ED_CLR,
423					   WCN36XX_INT_MASK_CHAN_TX_L);
424		wcn36xx_dbg(WCN36XX_DBG_DXE, "dxe tx ready low\n");
425		reap_tx_dxes(wcn, &wcn->dxe_tx_l_ch);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
426	}
427
428	return IRQ_HANDLED;
429}
430
431static irqreturn_t wcn36xx_irq_rx_ready(int irq, void *dev)
432{
433	struct wcn36xx *wcn = (struct wcn36xx *)dev;
434
435	disable_irq_nosync(wcn->rx_irq);
436	wcn36xx_dxe_rx_frame(wcn);
437	enable_irq(wcn->rx_irq);
438	return IRQ_HANDLED;
439}
440
441static int wcn36xx_dxe_request_irqs(struct wcn36xx *wcn)
442{
443	int ret;
444
445	ret = request_irq(wcn->tx_irq, wcn36xx_irq_tx_complete,
446			  IRQF_TRIGGER_HIGH, "wcn36xx_tx", wcn);
447	if (ret) {
448		wcn36xx_err("failed to alloc tx irq\n");
449		goto out_err;
450	}
451
452	ret = request_irq(wcn->rx_irq, wcn36xx_irq_rx_ready, IRQF_TRIGGER_HIGH,
453			  "wcn36xx_rx", wcn);
454	if (ret) {
455		wcn36xx_err("failed to alloc rx irq\n");
456		goto out_txirq;
457	}
458
459	enable_irq_wake(wcn->rx_irq);
460
461	return 0;
462
463out_txirq:
464	free_irq(wcn->tx_irq, wcn);
465out_err:
466	return ret;
467
468}
469
470static int wcn36xx_rx_handle_packets(struct wcn36xx *wcn,
471				     struct wcn36xx_dxe_ch *ch)
 
 
 
 
472{
473	struct wcn36xx_dxe_ctl *ctl = ch->head_blk_ctl;
474	struct wcn36xx_dxe_desc *dxe = ctl->desc;
475	dma_addr_t  dma_addr;
476	struct sk_buff *skb;
477	int ret = 0, int_mask;
478	u32 value;
479
480	if (ch->ch_type == WCN36XX_DXE_CH_RX_L) {
481		value = WCN36XX_DXE_CTRL_RX_L;
482		int_mask = WCN36XX_DXE_INT_CH1_MASK;
483	} else {
484		value = WCN36XX_DXE_CTRL_RX_H;
485		int_mask = WCN36XX_DXE_INT_CH3_MASK;
 
 
 
486	}
487
488	while (!(dxe->ctrl & WCN36XX_DXE_CTRL_VALID_MASK)) {
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
489		skb = ctl->skb;
490		dma_addr = dxe->dst_addr_l;
491		ret = wcn36xx_dxe_fill_skb(wcn->dev, ctl);
492		if (0 == ret) {
493			/* new skb allocation ok. Use the new one and queue
494			 * the old one to network system.
495			 */
496			dma_unmap_single(wcn->dev, dma_addr, WCN36XX_PKT_SIZE,
497					DMA_FROM_DEVICE);
498			wcn36xx_rx_skb(wcn, skb);
499		} /* else keep old skb not submitted and use it for rx DMA */
 
 
 
 
 
 
 
500
501		dxe->ctrl = value;
502		ctl = ctl->next;
503		dxe = ctl->desc;
504	}
505	wcn36xx_dxe_write_register(wcn, WCN36XX_DXE_ENCH_ADDR, int_mask);
506
507	ch->head_blk_ctl = ctl;
 
 
 
508	return 0;
509}
510
511void wcn36xx_dxe_rx_frame(struct wcn36xx *wcn)
512{
513	int int_src;
514
515	wcn36xx_dxe_read_register(wcn, WCN36XX_DXE_INT_SRC_RAW_REG, &int_src);
516
517	/* RX_LOW_PRI */
518	if (int_src & WCN36XX_DXE_INT_CH1_MASK) {
519		wcn36xx_dxe_write_register(wcn, WCN36XX_DXE_0_INT_CLR,
520					   WCN36XX_DXE_INT_CH1_MASK);
521		wcn36xx_rx_handle_packets(wcn, &(wcn->dxe_rx_l_ch));
522	}
 
523
524	/* RX_HIGH_PRI */
525	if (int_src & WCN36XX_DXE_INT_CH3_MASK) {
526		/* Clean up all the INT within this channel */
527		wcn36xx_dxe_write_register(wcn, WCN36XX_DXE_0_INT_CLR,
528					   WCN36XX_DXE_INT_CH3_MASK);
529		wcn36xx_rx_handle_packets(wcn, &(wcn->dxe_rx_h_ch));
530	}
531
532	if (!int_src)
533		wcn36xx_warn("No DXE interrupt pending\n");
534}
535
536int wcn36xx_dxe_allocate_mem_pools(struct wcn36xx *wcn)
537{
538	size_t s;
539	void *cpu_addr;
540
541	/* Allocate BD headers for MGMT frames */
542
543	/* Where this come from ask QC */
544	wcn->mgmt_mem_pool.chunk_size =	WCN36XX_BD_CHUNK_SIZE +
545		16 - (WCN36XX_BD_CHUNK_SIZE % 8);
546
547	s = wcn->mgmt_mem_pool.chunk_size * WCN36XX_DXE_CH_DESC_NUMB_TX_H;
548	cpu_addr = dma_alloc_coherent(wcn->dev, s, &wcn->mgmt_mem_pool.phy_addr,
 
549				      GFP_KERNEL);
550	if (!cpu_addr)
551		goto out_err;
552
553	wcn->mgmt_mem_pool.virt_addr = cpu_addr;
554	memset(cpu_addr, 0, s);
555
556	/* Allocate BD headers for DATA frames */
557
558	/* Where this come from ask QC */
559	wcn->data_mem_pool.chunk_size = WCN36XX_BD_CHUNK_SIZE +
560		16 - (WCN36XX_BD_CHUNK_SIZE % 8);
561
562	s = wcn->data_mem_pool.chunk_size * WCN36XX_DXE_CH_DESC_NUMB_TX_L;
563	cpu_addr = dma_alloc_coherent(wcn->dev, s, &wcn->data_mem_pool.phy_addr,
 
564				      GFP_KERNEL);
565	if (!cpu_addr)
566		goto out_err;
567
568	wcn->data_mem_pool.virt_addr = cpu_addr;
569	memset(cpu_addr, 0, s);
570
571	return 0;
572
573out_err:
574	wcn36xx_dxe_free_mem_pools(wcn);
575	wcn36xx_err("Failed to allocate BD mempool\n");
576	return -ENOMEM;
577}
578
579void wcn36xx_dxe_free_mem_pools(struct wcn36xx *wcn)
580{
581	if (wcn->mgmt_mem_pool.virt_addr)
582		dma_free_coherent(wcn->dev, wcn->mgmt_mem_pool.chunk_size *
583				  WCN36XX_DXE_CH_DESC_NUMB_TX_H,
584				  wcn->mgmt_mem_pool.virt_addr,
585				  wcn->mgmt_mem_pool.phy_addr);
586
587	if (wcn->data_mem_pool.virt_addr) {
588		dma_free_coherent(wcn->dev, wcn->data_mem_pool.chunk_size *
589				  WCN36XX_DXE_CH_DESC_NUMB_TX_L,
590				  wcn->data_mem_pool.virt_addr,
591				  wcn->data_mem_pool.phy_addr);
592	}
593}
594
595int wcn36xx_dxe_tx_frame(struct wcn36xx *wcn,
596			 struct wcn36xx_vif *vif_priv,
 
597			 struct sk_buff *skb,
598			 bool is_low)
599{
600	struct wcn36xx_dxe_ctl *ctl = NULL;
601	struct wcn36xx_dxe_desc *desc = NULL;
602	struct wcn36xx_dxe_ch *ch = NULL;
603	unsigned long flags;
604	int ret;
605
606	ch = is_low ? &wcn->dxe_tx_l_ch : &wcn->dxe_tx_h_ch;
607
608	spin_lock_irqsave(&ch->lock, flags);
609	ctl = ch->head_blk_ctl;
610
611	spin_lock(&ctl->next->skb_lock);
612
613	/*
614	 * If skb is not null that means that we reached the tail of the ring
615	 * hence ring is full. Stop queues to let mac80211 back off until ring
616	 * has an empty slot again.
617	 */
618	if (NULL != ctl->next->skb) {
619		ieee80211_stop_queues(wcn->hw);
620		wcn->queues_stopped = true;
621		spin_unlock(&ctl->next->skb_lock);
622		spin_unlock_irqrestore(&ch->lock, flags);
623		return -EBUSY;
624	}
625	spin_unlock(&ctl->next->skb_lock);
626
627	ctl->skb = NULL;
628	desc = ctl->desc;
 
 
 
 
 
 
 
 
 
 
 
629
630	/* Set source address of the BD we send */
631	desc->src_addr_l = ctl->bd_phy_addr;
632
633	desc->dst_addr_l = ch->dxe_wq;
634	desc->fr_len = sizeof(struct wcn36xx_tx_bd);
635	desc->ctrl = ch->ctrl_bd;
636
637	wcn36xx_dbg(WCN36XX_DBG_DXE, "DXE TX\n");
638
639	wcn36xx_dbg_dump(WCN36XX_DBG_DXE_DUMP, "DESC1 >>> ",
640			 (char *)desc, sizeof(*desc));
641	wcn36xx_dbg_dump(WCN36XX_DBG_DXE_DUMP,
642			 "BD   >>> ", (char *)ctl->bd_cpu_addr,
643			 sizeof(struct wcn36xx_tx_bd));
644
645	/* Set source address of the SKB we send */
646	ctl = ctl->next;
647	ctl->skb = skb;
648	desc = ctl->desc;
649	if (ctl->bd_cpu_addr) {
650		wcn36xx_err("bd_cpu_addr cannot be NULL for skb DXE\n");
651		ret = -EINVAL;
652		goto unlock;
653	}
654
655	desc->src_addr_l = dma_map_single(wcn->dev,
656					  ctl->skb->data,
657					  ctl->skb->len,
658					  DMA_TO_DEVICE);
659
660	desc->dst_addr_l = ch->dxe_wq;
661	desc->fr_len = ctl->skb->len;
662
663	/* set dxe descriptor to VALID */
664	desc->ctrl = ch->ctrl_skb;
665
666	wcn36xx_dbg_dump(WCN36XX_DBG_DXE_DUMP, "DESC2 >>> ",
667			 (char *)desc, sizeof(*desc));
668	wcn36xx_dbg_dump(WCN36XX_DBG_DXE_DUMP, "SKB   >>> ",
669			 (char *)ctl->skb->data, ctl->skb->len);
670
671	/* Move the head of the ring to the next empty descriptor */
672	 ch->head_blk_ctl = ctl->next;
 
 
 
 
 
 
673
674	/*
675	 * When connected and trying to send data frame chip can be in sleep
676	 * mode and writing to the register will not wake up the chip. Instead
677	 * notify chip about new frame through SMSM bus.
678	 */
679	if (is_low &&  vif_priv->pw_state == WCN36XX_BMPS) {
680		wcn->ctrl_ops->smsm_change_state(
681				  0,
682				  WCN36XX_SMSM_WLAN_TX_ENABLE);
683	} else {
684		/* indicate End Of Packet and generate interrupt on descriptor
685		 * done.
686		 */
687		wcn36xx_dxe_write_register(wcn,
688			ch->reg_ctrl, ch->def_ctrl);
689	}
690
691	ret = 0;
692unlock:
693	spin_unlock_irqrestore(&ch->lock, flags);
694	return ret;
695}
696
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
697int wcn36xx_dxe_init(struct wcn36xx *wcn)
698{
699	int reg_data = 0, ret;
700
701	reg_data = WCN36XX_DXE_REG_RESET;
702	wcn36xx_dxe_write_register(wcn, WCN36XX_DXE_REG_CSR_RESET, reg_data);
703
704	/* Setting interrupt path */
705	reg_data = WCN36XX_DXE_CCU_INT;
706	wcn36xx_dxe_write_register_x(wcn, WCN36XX_DXE_REG_CCU_INT, reg_data);
 
 
 
 
707
708	/***************************************/
709	/* Init descriptors for TX LOW channel */
710	/***************************************/
711	wcn36xx_dxe_init_descs(wcn->dev, &wcn->dxe_tx_l_ch);
 
 
 
 
712	wcn36xx_dxe_init_tx_bd(&wcn->dxe_tx_l_ch, &wcn->data_mem_pool);
713
714	/* Write channel head to a NEXT register */
715	wcn36xx_dxe_write_register(wcn, WCN36XX_DXE_CH_NEXT_DESC_ADDR_TX_L,
716		wcn->dxe_tx_l_ch.head_blk_ctl->desc_phy_addr);
717
718	/* Program DMA destination addr for TX LOW */
719	wcn36xx_dxe_write_register(wcn,
720		WCN36XX_DXE_CH_DEST_ADDR_TX_L,
721		WCN36XX_DXE_WQ_TX_L);
722
723	wcn36xx_dxe_read_register(wcn, WCN36XX_DXE_REG_CH_EN, &reg_data);
724	wcn36xx_dxe_enable_ch_int(wcn, WCN36XX_INT_MASK_CHAN_TX_L);
725
726	/***************************************/
727	/* Init descriptors for TX HIGH channel */
728	/***************************************/
729	wcn36xx_dxe_init_descs(wcn->dev, &wcn->dxe_tx_h_ch);
 
 
 
 
 
730	wcn36xx_dxe_init_tx_bd(&wcn->dxe_tx_h_ch, &wcn->mgmt_mem_pool);
731
732	/* Write channel head to a NEXT register */
733	wcn36xx_dxe_write_register(wcn, WCN36XX_DXE_CH_NEXT_DESC_ADDR_TX_H,
734		wcn->dxe_tx_h_ch.head_blk_ctl->desc_phy_addr);
735
736	/* Program DMA destination addr for TX HIGH */
737	wcn36xx_dxe_write_register(wcn,
738		WCN36XX_DXE_CH_DEST_ADDR_TX_H,
739		WCN36XX_DXE_WQ_TX_H);
740
741	wcn36xx_dxe_read_register(wcn, WCN36XX_DXE_REG_CH_EN, &reg_data);
742
743	/* Enable channel interrupts */
744	wcn36xx_dxe_enable_ch_int(wcn, WCN36XX_INT_MASK_CHAN_TX_H);
745
746	/***************************************/
747	/* Init descriptors for RX LOW channel */
748	/***************************************/
749	wcn36xx_dxe_init_descs(wcn->dev, &wcn->dxe_rx_l_ch);
 
 
 
 
750
751	/* For RX we need to preallocated buffers */
752	wcn36xx_dxe_ch_alloc_skb(wcn, &wcn->dxe_rx_l_ch);
753
754	/* Write channel head to a NEXT register */
755	wcn36xx_dxe_write_register(wcn, WCN36XX_DXE_CH_NEXT_DESC_ADDR_RX_L,
756		wcn->dxe_rx_l_ch.head_blk_ctl->desc_phy_addr);
757
758	/* Write DMA source address */
759	wcn36xx_dxe_write_register(wcn,
760		WCN36XX_DXE_CH_SRC_ADDR_RX_L,
761		WCN36XX_DXE_WQ_RX_L);
762
763	/* Program preallocated destination address */
764	wcn36xx_dxe_write_register(wcn,
765		WCN36XX_DXE_CH_DEST_ADDR_RX_L,
766		wcn->dxe_rx_l_ch.head_blk_ctl->desc->phy_next_l);
767
768	/* Enable default control registers */
769	wcn36xx_dxe_write_register(wcn,
770		WCN36XX_DXE_REG_CTL_RX_L,
771		WCN36XX_DXE_CH_DEFAULT_CTL_RX_L);
772
773	/* Enable channel interrupts */
774	wcn36xx_dxe_enable_ch_int(wcn, WCN36XX_INT_MASK_CHAN_RX_L);
775
776	/***************************************/
777	/* Init descriptors for RX HIGH channel */
778	/***************************************/
779	wcn36xx_dxe_init_descs(wcn->dev, &wcn->dxe_rx_h_ch);
 
 
 
 
780
781	/* For RX we need to prealocat buffers */
782	wcn36xx_dxe_ch_alloc_skb(wcn, &wcn->dxe_rx_h_ch);
783
784	/* Write chanel head to a NEXT register */
785	wcn36xx_dxe_write_register(wcn, WCN36XX_DXE_CH_NEXT_DESC_ADDR_RX_H,
786		wcn->dxe_rx_h_ch.head_blk_ctl->desc_phy_addr);
787
788	/* Write DMA source address */
789	wcn36xx_dxe_write_register(wcn,
790		WCN36XX_DXE_CH_SRC_ADDR_RX_H,
791		WCN36XX_DXE_WQ_RX_H);
792
793	/* Program preallocated destination address */
794	wcn36xx_dxe_write_register(wcn,
795		WCN36XX_DXE_CH_DEST_ADDR_RX_H,
796		 wcn->dxe_rx_h_ch.head_blk_ctl->desc->phy_next_l);
797
798	/* Enable default control registers */
799	wcn36xx_dxe_write_register(wcn,
800		WCN36XX_DXE_REG_CTL_RX_H,
801		WCN36XX_DXE_CH_DEFAULT_CTL_RX_H);
802
 
 
 
 
 
 
803	/* Enable channel interrupts */
 
 
 
804	wcn36xx_dxe_enable_ch_int(wcn, WCN36XX_INT_MASK_CHAN_RX_H);
805
806	ret = wcn36xx_dxe_request_irqs(wcn);
807	if (ret < 0)
808		goto out_err;
809
810	return 0;
 
 
 
 
 
 
 
811
812out_err:
813	return ret;
814}
815
816void wcn36xx_dxe_deinit(struct wcn36xx *wcn)
817{
 
 
 
 
 
 
 
 
818	free_irq(wcn->tx_irq, wcn);
819	free_irq(wcn->rx_irq, wcn);
 
820
821	if (wcn->tx_ack_skb) {
822		ieee80211_tx_status_irqsafe(wcn->hw, wcn->tx_ack_skb);
823		wcn->tx_ack_skb = NULL;
824	}
825
 
 
 
 
826	wcn36xx_dxe_ch_free_skbs(wcn, &wcn->dxe_rx_l_ch);
827	wcn36xx_dxe_ch_free_skbs(wcn, &wcn->dxe_rx_h_ch);
 
 
 
 
 
828}