Linux Audio

Check our new training course

Loading...
   1/******************************************************************************
   2 *
   3 * Copyright(c) 2003 - 2012 Intel Corporation. All rights reserved.
   4 *
   5 * Portions of this file are derived from the ipw3945 project, as well
   6 * as portions of the ieee80211 subsystem header files.
   7 *
   8 * This program is free software; you can redistribute it and/or modify it
   9 * under the terms of version 2 of the GNU General Public License as
  10 * published by the Free Software Foundation.
  11 *
  12 * This program is distributed in the hope that it will be useful, but WITHOUT
  13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  14 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
  15 * more details.
  16 *
  17 * You should have received a copy of the GNU General Public License along with
  18 * this program; if not, write to the Free Software Foundation, Inc.,
  19 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
  20 *
  21 * The full GNU General Public License is included in this distribution in the
  22 * file called LICENSE.
  23 *
  24 * Contact Information:
  25 *  Intel Linux Wireless <ilw@linux.intel.com>
  26 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
  27 *
  28 *****************************************************************************/
  29#include <linux/sched.h>
  30#include <linux/wait.h>
  31#include <linux/gfp.h>
  32
  33#include "iwl-prph.h"
  34#include "iwl-io.h"
  35#include "iwl-trans-pcie-int.h"
  36#include "iwl-op-mode.h"
  37
  38#ifdef CONFIG_IWLWIFI_IDI
  39#include "iwl-amfh.h"
  40#endif
  41
  42/******************************************************************************
  43 *
  44 * RX path functions
  45 *
  46 ******************************************************************************/
  47
  48/*
  49 * Rx theory of operation
  50 *
  51 * Driver allocates a circular buffer of Receive Buffer Descriptors (RBDs),
  52 * each of which point to Receive Buffers to be filled by the NIC.  These get
  53 * used not only for Rx frames, but for any command response or notification
  54 * from the NIC.  The driver and NIC manage the Rx buffers by means
  55 * of indexes into the circular buffer.
  56 *
  57 * Rx Queue Indexes
  58 * The host/firmware share two index registers for managing the Rx buffers.
  59 *
  60 * The READ index maps to the first position that the firmware may be writing
  61 * to -- the driver can read up to (but not including) this position and get
  62 * good data.
  63 * The READ index is managed by the firmware once the card is enabled.
  64 *
  65 * The WRITE index maps to the last position the driver has read from -- the
  66 * position preceding WRITE is the last slot the firmware can place a packet.
  67 *
  68 * The queue is empty (no good data) if WRITE = READ - 1, and is full if
  69 * WRITE = READ.
  70 *
  71 * During initialization, the host sets up the READ queue position to the first
  72 * INDEX position, and WRITE to the last (READ - 1 wrapped)
  73 *
  74 * When the firmware places a packet in a buffer, it will advance the READ index
  75 * and fire the RX interrupt.  The driver can then query the READ index and
  76 * process as many packets as possible, moving the WRITE index forward as it
  77 * resets the Rx queue buffers with new memory.
  78 *
  79 * The management in the driver is as follows:
  80 * + A list of pre-allocated SKBs is stored in iwl->rxq->rx_free.  When
  81 *   iwl->rxq->free_count drops to or below RX_LOW_WATERMARK, work is scheduled
  82 *   to replenish the iwl->rxq->rx_free.
  83 * + In iwl_rx_replenish (scheduled) if 'processed' != 'read' then the
  84 *   iwl->rxq is replenished and the READ INDEX is updated (updating the
  85 *   'processed' and 'read' driver indexes as well)
  86 * + A received packet is processed and handed to the kernel network stack,
  87 *   detached from the iwl->rxq.  The driver 'processed' index is updated.
  88 * + The Host/Firmware iwl->rxq is replenished at tasklet time from the rx_free
  89 *   list. If there are no allocated buffers in iwl->rxq->rx_free, the READ
  90 *   INDEX is not incremented and iwl->status(RX_STALLED) is set.  If there
  91 *   were enough free buffers and RX_STALLED is set it is cleared.
  92 *
  93 *
  94 * Driver sequence:
  95 *
  96 * iwl_rx_queue_alloc()   Allocates rx_free
  97 * iwl_rx_replenish()     Replenishes rx_free list from rx_used, and calls
  98 *                            iwl_rx_queue_restock
  99 * iwl_rx_queue_restock() Moves available buffers from rx_free into Rx
 100 *                            queue, updates firmware pointers, and updates
 101 *                            the WRITE index.  If insufficient rx_free buffers
 102 *                            are available, schedules iwl_rx_replenish
 103 *
 104 * -- enable interrupts --
 105 * ISR - iwl_rx()         Detach iwl_rx_mem_buffers from pool up to the
 106 *                            READ INDEX, detaching the SKB from the pool.
 107 *                            Moves the packet buffer from queue to rx_used.
 108 *                            Calls iwl_rx_queue_restock to refill any empty
 109 *                            slots.
 110 * ...
 111 *
 112 */
 113
 114/**
 115 * iwl_rx_queue_space - Return number of free slots available in queue.
 116 */
 117static int iwl_rx_queue_space(const struct iwl_rx_queue *q)
 118{
 119	int s = q->read - q->write;
 120	if (s <= 0)
 121		s += RX_QUEUE_SIZE;
 122	/* keep some buffer to not confuse full and empty queue */
 123	s -= 2;
 124	if (s < 0)
 125		s = 0;
 126	return s;
 127}
 128
 129/**
 130 * iwl_rx_queue_update_write_ptr - Update the write pointer for the RX queue
 131 */
 132void iwl_rx_queue_update_write_ptr(struct iwl_trans *trans,
 133			struct iwl_rx_queue *q)
 134{
 135	unsigned long flags;
 136	u32 reg;
 137
 138	spin_lock_irqsave(&q->lock, flags);
 139
 140	if (q->need_update == 0)
 141		goto exit_unlock;
 142
 143	if (trans->cfg->base_params->shadow_reg_enable) {
 144		/* shadow register enabled */
 145		/* Device expects a multiple of 8 */
 146		q->write_actual = (q->write & ~0x7);
 147		iwl_write32(trans, FH_RSCSR_CHNL0_WPTR, q->write_actual);
 148	} else {
 149		struct iwl_trans_pcie *trans_pcie =
 150			IWL_TRANS_GET_PCIE_TRANS(trans);
 151
 152		/* If power-saving is in use, make sure device is awake */
 153		if (test_bit(STATUS_TPOWER_PMI, &trans_pcie->status)) {
 154			reg = iwl_read32(trans, CSR_UCODE_DRV_GP1);
 155
 156			if (reg & CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP) {
 157				IWL_DEBUG_INFO(trans,
 158					"Rx queue requesting wakeup,"
 159					" GP1 = 0x%x\n", reg);
 160				iwl_set_bit(trans, CSR_GP_CNTRL,
 161					CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
 162				goto exit_unlock;
 163			}
 164
 165			q->write_actual = (q->write & ~0x7);
 166			iwl_write_direct32(trans, FH_RSCSR_CHNL0_WPTR,
 167					q->write_actual);
 168
 169		/* Else device is assumed to be awake */
 170		} else {
 171			/* Device expects a multiple of 8 */
 172			q->write_actual = (q->write & ~0x7);
 173			iwl_write_direct32(trans, FH_RSCSR_CHNL0_WPTR,
 174				q->write_actual);
 175		}
 176	}
 177	q->need_update = 0;
 178
 179 exit_unlock:
 180	spin_unlock_irqrestore(&q->lock, flags);
 181}
 182
 183/**
 184 * iwlagn_dma_addr2rbd_ptr - convert a DMA address to a uCode read buffer ptr
 185 */
 186static inline __le32 iwlagn_dma_addr2rbd_ptr(dma_addr_t dma_addr)
 187{
 188	return cpu_to_le32((u32)(dma_addr >> 8));
 189}
 190
 191/**
 192 * iwlagn_rx_queue_restock - refill RX queue from pre-allocated pool
 193 *
 194 * If there are slots in the RX queue that need to be restocked,
 195 * and we have free pre-allocated buffers, fill the ranks as much
 196 * as we can, pulling from rx_free.
 197 *
 198 * This moves the 'write' index forward to catch up with 'processed', and
 199 * also updates the memory address in the firmware to reference the new
 200 * target buffer.
 201 */
 202static void iwlagn_rx_queue_restock(struct iwl_trans *trans)
 203{
 204	struct iwl_trans_pcie *trans_pcie =
 205		IWL_TRANS_GET_PCIE_TRANS(trans);
 206
 207	struct iwl_rx_queue *rxq = &trans_pcie->rxq;
 208	struct list_head *element;
 209	struct iwl_rx_mem_buffer *rxb;
 210	unsigned long flags;
 211
 212	spin_lock_irqsave(&rxq->lock, flags);
 213	while ((iwl_rx_queue_space(rxq) > 0) && (rxq->free_count)) {
 214		/* The overwritten rxb must be a used one */
 215		rxb = rxq->queue[rxq->write];
 216		BUG_ON(rxb && rxb->page);
 217
 218		/* Get next free Rx buffer, remove from free list */
 219		element = rxq->rx_free.next;
 220		rxb = list_entry(element, struct iwl_rx_mem_buffer, list);
 221		list_del(element);
 222
 223		/* Point to Rx buffer via next RBD in circular buffer */
 224		rxq->bd[rxq->write] = iwlagn_dma_addr2rbd_ptr(rxb->page_dma);
 225		rxq->queue[rxq->write] = rxb;
 226		rxq->write = (rxq->write + 1) & RX_QUEUE_MASK;
 227		rxq->free_count--;
 228	}
 229	spin_unlock_irqrestore(&rxq->lock, flags);
 230	/* If the pre-allocated buffer pool is dropping low, schedule to
 231	 * refill it */
 232	if (rxq->free_count <= RX_LOW_WATERMARK)
 233		schedule_work(&trans_pcie->rx_replenish);
 234
 235
 236	/* If we've added more space for the firmware to place data, tell it.
 237	 * Increment device's write pointer in multiples of 8. */
 238	if (rxq->write_actual != (rxq->write & ~0x7)) {
 239		spin_lock_irqsave(&rxq->lock, flags);
 240		rxq->need_update = 1;
 241		spin_unlock_irqrestore(&rxq->lock, flags);
 242		iwl_rx_queue_update_write_ptr(trans, rxq);
 243	}
 244}
 245
 246/**
 247 * iwlagn_rx_replenish - Move all used packet from rx_used to rx_free
 248 *
 249 * When moving to rx_free an SKB is allocated for the slot.
 250 *
 251 * Also restock the Rx queue via iwl_rx_queue_restock.
 252 * This is called as a scheduled work item (except for during initialization)
 253 */
 254static void iwlagn_rx_allocate(struct iwl_trans *trans, gfp_t priority)
 255{
 256	struct iwl_trans_pcie *trans_pcie =
 257		IWL_TRANS_GET_PCIE_TRANS(trans);
 258
 259	struct iwl_rx_queue *rxq = &trans_pcie->rxq;
 260	struct list_head *element;
 261	struct iwl_rx_mem_buffer *rxb;
 262	struct page *page;
 263	unsigned long flags;
 264	gfp_t gfp_mask = priority;
 265
 266	while (1) {
 267		spin_lock_irqsave(&rxq->lock, flags);
 268		if (list_empty(&rxq->rx_used)) {
 269			spin_unlock_irqrestore(&rxq->lock, flags);
 270			return;
 271		}
 272		spin_unlock_irqrestore(&rxq->lock, flags);
 273
 274		if (rxq->free_count > RX_LOW_WATERMARK)
 275			gfp_mask |= __GFP_NOWARN;
 276
 277		if (trans_pcie->rx_page_order > 0)
 278			gfp_mask |= __GFP_COMP;
 279
 280		/* Alloc a new receive buffer */
 281		page = alloc_pages(gfp_mask,
 282				  trans_pcie->rx_page_order);
 283		if (!page) {
 284			if (net_ratelimit())
 285				IWL_DEBUG_INFO(trans, "alloc_pages failed, "
 286					   "order: %d\n",
 287					   trans_pcie->rx_page_order);
 288
 289			if ((rxq->free_count <= RX_LOW_WATERMARK) &&
 290			    net_ratelimit())
 291				IWL_CRIT(trans, "Failed to alloc_pages with %s."
 292					 "Only %u free buffers remaining.\n",
 293					 priority == GFP_ATOMIC ?
 294					 "GFP_ATOMIC" : "GFP_KERNEL",
 295					 rxq->free_count);
 296			/* We don't reschedule replenish work here -- we will
 297			 * call the restock method and if it still needs
 298			 * more buffers it will schedule replenish */
 299			return;
 300		}
 301
 302		spin_lock_irqsave(&rxq->lock, flags);
 303
 304		if (list_empty(&rxq->rx_used)) {
 305			spin_unlock_irqrestore(&rxq->lock, flags);
 306			__free_pages(page, trans_pcie->rx_page_order);
 307			return;
 308		}
 309		element = rxq->rx_used.next;
 310		rxb = list_entry(element, struct iwl_rx_mem_buffer, list);
 311		list_del(element);
 312
 313		spin_unlock_irqrestore(&rxq->lock, flags);
 314
 315		BUG_ON(rxb->page);
 316		rxb->page = page;
 317		/* Get physical address of the RB */
 318		rxb->page_dma = dma_map_page(trans->dev, page, 0,
 319				PAGE_SIZE << trans_pcie->rx_page_order,
 320				DMA_FROM_DEVICE);
 321		/* dma address must be no more than 36 bits */
 322		BUG_ON(rxb->page_dma & ~DMA_BIT_MASK(36));
 323		/* and also 256 byte aligned! */
 324		BUG_ON(rxb->page_dma & DMA_BIT_MASK(8));
 325
 326		spin_lock_irqsave(&rxq->lock, flags);
 327
 328		list_add_tail(&rxb->list, &rxq->rx_free);
 329		rxq->free_count++;
 330
 331		spin_unlock_irqrestore(&rxq->lock, flags);
 332	}
 333}
 334
 335void iwlagn_rx_replenish(struct iwl_trans *trans)
 336{
 337	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
 338	unsigned long flags;
 339
 340	iwlagn_rx_allocate(trans, GFP_KERNEL);
 341
 342	spin_lock_irqsave(&trans_pcie->irq_lock, flags);
 343	iwlagn_rx_queue_restock(trans);
 344	spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
 345}
 346
 347static void iwlagn_rx_replenish_now(struct iwl_trans *trans)
 348{
 349	iwlagn_rx_allocate(trans, GFP_ATOMIC);
 350
 351	iwlagn_rx_queue_restock(trans);
 352}
 353
 354void iwl_bg_rx_replenish(struct work_struct *data)
 355{
 356	struct iwl_trans_pcie *trans_pcie =
 357	    container_of(data, struct iwl_trans_pcie, rx_replenish);
 358
 359	iwlagn_rx_replenish(trans_pcie->trans);
 360}
 361
 362static void iwl_rx_handle_rxbuf(struct iwl_trans *trans,
 363				struct iwl_rx_mem_buffer *rxb)
 364{
 365	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
 366	struct iwl_rx_queue *rxq = &trans_pcie->rxq;
 367	struct iwl_tx_queue *txq = &trans_pcie->txq[trans_pcie->cmd_queue];
 368	unsigned long flags;
 369	bool page_stolen = false;
 370	int max_len = PAGE_SIZE << trans_pcie->rx_page_order;
 371	u32 offset = 0;
 372
 373	if (WARN_ON(!rxb))
 374		return;
 375
 376	dma_unmap_page(trans->dev, rxb->page_dma, max_len, DMA_FROM_DEVICE);
 377
 378	while (offset + sizeof(u32) + sizeof(struct iwl_cmd_header) < max_len) {
 379		struct iwl_rx_packet *pkt;
 380		struct iwl_device_cmd *cmd;
 381		u16 sequence;
 382		bool reclaim;
 383		int index, cmd_index, err, len;
 384		struct iwl_rx_cmd_buffer rxcb = {
 385			._offset = offset,
 386			._page = rxb->page,
 387			._page_stolen = false,
 388			.truesize = max_len,
 389		};
 390
 391		pkt = rxb_addr(&rxcb);
 392
 393		if (pkt->len_n_flags == cpu_to_le32(FH_RSCSR_FRAME_INVALID))
 394			break;
 395
 396		IWL_DEBUG_RX(trans, "cmd at offset %d: %s (0x%.2x)\n",
 397			rxcb._offset,
 398			trans_pcie_get_cmd_string(trans_pcie, pkt->hdr.cmd),
 399			pkt->hdr.cmd);
 400
 401		len = le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK;
 402		len += sizeof(u32); /* account for status word */
 403		trace_iwlwifi_dev_rx(trans->dev, pkt, len);
 404
 405		/* Reclaim a command buffer only if this packet is a response
 406		 *   to a (driver-originated) command.
 407		 * If the packet (e.g. Rx frame) originated from uCode,
 408		 *   there is no command buffer to reclaim.
 409		 * Ucode should set SEQ_RX_FRAME bit if ucode-originated,
 410		 *   but apparently a few don't get set; catch them here. */
 411		reclaim = !(pkt->hdr.sequence & SEQ_RX_FRAME);
 412		if (reclaim) {
 413			int i;
 414
 415			for (i = 0; i < trans_pcie->n_no_reclaim_cmds; i++) {
 416				if (trans_pcie->no_reclaim_cmds[i] ==
 417							pkt->hdr.cmd) {
 418					reclaim = false;
 419					break;
 420				}
 421			}
 422		}
 423
 424		sequence = le16_to_cpu(pkt->hdr.sequence);
 425		index = SEQ_TO_INDEX(sequence);
 426		cmd_index = get_cmd_index(&txq->q, index);
 427
 428		if (reclaim)
 429			cmd = txq->entries[cmd_index].cmd;
 430		else
 431			cmd = NULL;
 432
 433		err = iwl_op_mode_rx(trans->op_mode, &rxcb, cmd);
 434
 435		/*
 436		 * After here, we should always check rxcb._page_stolen,
 437		 * if it is true then one of the handlers took the page.
 438		 */
 439
 440		if (reclaim) {
 441			/* Invoke any callbacks, transfer the buffer to caller,
 442			 * and fire off the (possibly) blocking
 443			 * iwl_trans_send_cmd()
 444			 * as we reclaim the driver command queue */
 445			if (!rxcb._page_stolen)
 446				iwl_tx_cmd_complete(trans, &rxcb, err);
 447			else
 448				IWL_WARN(trans, "Claim null rxb?\n");
 449		}
 450
 451		page_stolen |= rxcb._page_stolen;
 452		offset += ALIGN(len, FH_RSCSR_FRAME_ALIGN);
 453	}
 454
 455	/* page was stolen from us -- free our reference */
 456	if (page_stolen) {
 457		__free_pages(rxb->page, trans_pcie->rx_page_order);
 458		rxb->page = NULL;
 459	}
 460
 461	/* Reuse the page if possible. For notification packets and
 462	 * SKBs that fail to Rx correctly, add them back into the
 463	 * rx_free list for reuse later. */
 464	spin_lock_irqsave(&rxq->lock, flags);
 465	if (rxb->page != NULL) {
 466		rxb->page_dma =
 467			dma_map_page(trans->dev, rxb->page, 0,
 468				PAGE_SIZE << trans_pcie->rx_page_order,
 469				DMA_FROM_DEVICE);
 470		list_add_tail(&rxb->list, &rxq->rx_free);
 471		rxq->free_count++;
 472	} else
 473		list_add_tail(&rxb->list, &rxq->rx_used);
 474	spin_unlock_irqrestore(&rxq->lock, flags);
 475}
 476
 477/**
 478 * iwl_rx_handle - Main entry function for receiving responses from uCode
 479 *
 480 * Uses the priv->rx_handlers callback function array to invoke
 481 * the appropriate handlers, including command responses,
 482 * frame-received notifications, and other notifications.
 483 */
 484static void iwl_rx_handle(struct iwl_trans *trans)
 485{
 486	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
 487	struct iwl_rx_queue *rxq = &trans_pcie->rxq;
 488	u32 r, i;
 489	u8 fill_rx = 0;
 490	u32 count = 8;
 491	int total_empty;
 492
 493	/* uCode's read index (stored in shared DRAM) indicates the last Rx
 494	 * buffer that the driver may process (last buffer filled by ucode). */
 495	r = le16_to_cpu(rxq->rb_stts->closed_rb_num) &  0x0FFF;
 496	i = rxq->read;
 497
 498	/* Rx interrupt, but nothing sent from uCode */
 499	if (i == r)
 500		IWL_DEBUG_RX(trans, "r = %d, i = %d\n", r, i);
 501
 502	/* calculate total frames need to be restock after handling RX */
 503	total_empty = r - rxq->write_actual;
 504	if (total_empty < 0)
 505		total_empty += RX_QUEUE_SIZE;
 506
 507	if (total_empty > (RX_QUEUE_SIZE / 2))
 508		fill_rx = 1;
 509
 510	while (i != r) {
 511		struct iwl_rx_mem_buffer *rxb;
 512
 513		rxb = rxq->queue[i];
 514		rxq->queue[i] = NULL;
 515
 516		IWL_DEBUG_RX(trans, "rxbuf: r = %d, i = %d (%p)\n", rxb);
 517
 518		iwl_rx_handle_rxbuf(trans, rxb);
 519
 520		i = (i + 1) & RX_QUEUE_MASK;
 521		/* If there are a lot of unused frames,
 522		 * restock the Rx queue so ucode wont assert. */
 523		if (fill_rx) {
 524			count++;
 525			if (count >= 8) {
 526				rxq->read = i;
 527				iwlagn_rx_replenish_now(trans);
 528				count = 0;
 529			}
 530		}
 531	}
 532
 533	/* Backtrack one entry */
 534	rxq->read = i;
 535	if (fill_rx)
 536		iwlagn_rx_replenish_now(trans);
 537	else
 538		iwlagn_rx_queue_restock(trans);
 539}
 540
 541/**
 542 * iwl_irq_handle_error - called for HW or SW error interrupt from card
 543 */
 544static void iwl_irq_handle_error(struct iwl_trans *trans)
 545{
 546	/* W/A for WiFi/WiMAX coex and WiMAX own the RF */
 547	if (trans->cfg->internal_wimax_coex &&
 548	    (!(iwl_read_prph(trans, APMG_CLK_CTRL_REG) &
 549			APMS_CLK_VAL_MRB_FUNC_MODE) ||
 550	     (iwl_read_prph(trans, APMG_PS_CTRL_REG) &
 551			APMG_PS_CTRL_VAL_RESET_REQ))) {
 552		struct iwl_trans_pcie *trans_pcie;
 553
 554		trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
 555		clear_bit(STATUS_HCMD_ACTIVE, &trans_pcie->status);
 556		iwl_op_mode_wimax_active(trans->op_mode);
 557		wake_up(&trans->wait_command_queue);
 558		return;
 559	}
 560
 561	iwl_dump_csr(trans);
 562	iwl_dump_fh(trans, NULL);
 563
 564	iwl_op_mode_nic_error(trans->op_mode);
 565}
 566
 567/* tasklet for iwlagn interrupt */
 568void iwl_irq_tasklet(struct iwl_trans *trans)
 569{
 570	u32 inta = 0;
 571	u32 handled = 0;
 572	unsigned long flags;
 573	u32 i;
 574#ifdef CONFIG_IWLWIFI_DEBUG
 575	u32 inta_mask;
 576#endif
 577
 578	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
 579	struct isr_statistics *isr_stats = &trans_pcie->isr_stats;
 580
 581
 582	spin_lock_irqsave(&trans_pcie->irq_lock, flags);
 583
 584	/* Ack/clear/reset pending uCode interrupts.
 585	 * Note:  Some bits in CSR_INT are "OR" of bits in CSR_FH_INT_STATUS,
 586	 */
 587	/* There is a hardware bug in the interrupt mask function that some
 588	 * interrupts (i.e. CSR_INT_BIT_SCD) can still be generated even if
 589	 * they are disabled in the CSR_INT_MASK register. Furthermore the
 590	 * ICT interrupt handling mechanism has another bug that might cause
 591	 * these unmasked interrupts fail to be detected. We workaround the
 592	 * hardware bugs here by ACKing all the possible interrupts so that
 593	 * interrupt coalescing can still be achieved.
 594	 */
 595	iwl_write32(trans, CSR_INT,
 596		trans_pcie->inta | ~trans_pcie->inta_mask);
 597
 598	inta = trans_pcie->inta;
 599
 600#ifdef CONFIG_IWLWIFI_DEBUG
 601	if (iwl_have_debug_level(IWL_DL_ISR)) {
 602		/* just for debug */
 603		inta_mask = iwl_read32(trans, CSR_INT_MASK);
 604		IWL_DEBUG_ISR(trans, "inta 0x%08x, enabled 0x%08x\n",
 605				inta, inta_mask);
 606	}
 607#endif
 608
 609	/* saved interrupt in inta variable now we can reset trans_pcie->inta */
 610	trans_pcie->inta = 0;
 611
 612	spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
 613
 614	/* Now service all interrupt bits discovered above. */
 615	if (inta & CSR_INT_BIT_HW_ERR) {
 616		IWL_ERR(trans, "Hardware error detected.  Restarting.\n");
 617
 618		/* Tell the device to stop sending interrupts */
 619		iwl_disable_interrupts(trans);
 620
 621		isr_stats->hw++;
 622		iwl_irq_handle_error(trans);
 623
 624		handled |= CSR_INT_BIT_HW_ERR;
 625
 626		return;
 627	}
 628
 629#ifdef CONFIG_IWLWIFI_DEBUG
 630	if (iwl_have_debug_level(IWL_DL_ISR)) {
 631		/* NIC fires this, but we don't use it, redundant with WAKEUP */
 632		if (inta & CSR_INT_BIT_SCD) {
 633			IWL_DEBUG_ISR(trans, "Scheduler finished to transmit "
 634				      "the frame/frames.\n");
 635			isr_stats->sch++;
 636		}
 637
 638		/* Alive notification via Rx interrupt will do the real work */
 639		if (inta & CSR_INT_BIT_ALIVE) {
 640			IWL_DEBUG_ISR(trans, "Alive interrupt\n");
 641			isr_stats->alive++;
 642		}
 643	}
 644#endif
 645	/* Safely ignore these bits for debug checks below */
 646	inta &= ~(CSR_INT_BIT_SCD | CSR_INT_BIT_ALIVE);
 647
 648	/* HW RF KILL switch toggled */
 649	if (inta & CSR_INT_BIT_RF_KILL) {
 650		bool hw_rfkill;
 651
 652		hw_rfkill = iwl_is_rfkill_set(trans);
 653		IWL_WARN(trans, "RF_KILL bit toggled to %s.\n",
 654				hw_rfkill ? "disable radio" : "enable radio");
 655
 656		isr_stats->rfkill++;
 657
 658		iwl_op_mode_hw_rf_kill(trans->op_mode, hw_rfkill);
 659
 660		handled |= CSR_INT_BIT_RF_KILL;
 661	}
 662
 663	/* Chip got too hot and stopped itself */
 664	if (inta & CSR_INT_BIT_CT_KILL) {
 665		IWL_ERR(trans, "Microcode CT kill error detected.\n");
 666		isr_stats->ctkill++;
 667		handled |= CSR_INT_BIT_CT_KILL;
 668	}
 669
 670	/* Error detected by uCode */
 671	if (inta & CSR_INT_BIT_SW_ERR) {
 672		IWL_ERR(trans, "Microcode SW error detected. "
 673			" Restarting 0x%X.\n", inta);
 674		isr_stats->sw++;
 675		iwl_irq_handle_error(trans);
 676		handled |= CSR_INT_BIT_SW_ERR;
 677	}
 678
 679	/* uCode wakes up after power-down sleep */
 680	if (inta & CSR_INT_BIT_WAKEUP) {
 681		IWL_DEBUG_ISR(trans, "Wakeup interrupt\n");
 682		iwl_rx_queue_update_write_ptr(trans, &trans_pcie->rxq);
 683		for (i = 0; i < trans->cfg->base_params->num_of_queues; i++)
 684			iwl_txq_update_write_ptr(trans,
 685						 &trans_pcie->txq[i]);
 686
 687		isr_stats->wakeup++;
 688
 689		handled |= CSR_INT_BIT_WAKEUP;
 690	}
 691
 692	/* All uCode command responses, including Tx command responses,
 693	 * Rx "responses" (frame-received notification), and other
 694	 * notifications from uCode come through here*/
 695	if (inta & (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX |
 696			CSR_INT_BIT_RX_PERIODIC)) {
 697		IWL_DEBUG_ISR(trans, "Rx interrupt\n");
 698		if (inta & (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX)) {
 699			handled |= (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX);
 700			iwl_write32(trans, CSR_FH_INT_STATUS,
 701					CSR_FH_INT_RX_MASK);
 702		}
 703		if (inta & CSR_INT_BIT_RX_PERIODIC) {
 704			handled |= CSR_INT_BIT_RX_PERIODIC;
 705			iwl_write32(trans,
 706				CSR_INT, CSR_INT_BIT_RX_PERIODIC);
 707		}
 708		/* Sending RX interrupt require many steps to be done in the
 709		 * the device:
 710		 * 1- write interrupt to current index in ICT table.
 711		 * 2- dma RX frame.
 712		 * 3- update RX shared data to indicate last write index.
 713		 * 4- send interrupt.
 714		 * This could lead to RX race, driver could receive RX interrupt
 715		 * but the shared data changes does not reflect this;
 716		 * periodic interrupt will detect any dangling Rx activity.
 717		 */
 718
 719		/* Disable periodic interrupt; we use it as just a one-shot. */
 720		iwl_write8(trans, CSR_INT_PERIODIC_REG,
 721			    CSR_INT_PERIODIC_DIS);
 722#ifdef CONFIG_IWLWIFI_IDI
 723		iwl_amfh_rx_handler();
 724#else
 725		iwl_rx_handle(trans);
 726#endif
 727		/*
 728		 * Enable periodic interrupt in 8 msec only if we received
 729		 * real RX interrupt (instead of just periodic int), to catch
 730		 * any dangling Rx interrupt.  If it was just the periodic
 731		 * interrupt, there was no dangling Rx activity, and no need
 732		 * to extend the periodic interrupt; one-shot is enough.
 733		 */
 734		if (inta & (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX))
 735			iwl_write8(trans, CSR_INT_PERIODIC_REG,
 736				    CSR_INT_PERIODIC_ENA);
 737
 738		isr_stats->rx++;
 739	}
 740
 741	/* This "Tx" DMA channel is used only for loading uCode */
 742	if (inta & CSR_INT_BIT_FH_TX) {
 743		iwl_write32(trans, CSR_FH_INT_STATUS, CSR_FH_INT_TX_MASK);
 744		IWL_DEBUG_ISR(trans, "uCode load interrupt\n");
 745		isr_stats->tx++;
 746		handled |= CSR_INT_BIT_FH_TX;
 747		/* Wake up uCode load routine, now that load is complete */
 748		trans_pcie->ucode_write_complete = true;
 749		wake_up(&trans_pcie->ucode_write_waitq);
 750	}
 751
 752	if (inta & ~handled) {
 753		IWL_ERR(trans, "Unhandled INTA bits 0x%08x\n", inta & ~handled);
 754		isr_stats->unhandled++;
 755	}
 756
 757	if (inta & ~(trans_pcie->inta_mask)) {
 758		IWL_WARN(trans, "Disabled INTA bits 0x%08x were pending\n",
 759			 inta & ~trans_pcie->inta_mask);
 760	}
 761
 762	/* Re-enable all interrupts */
 763	/* only Re-enable if disabled by irq */
 764	if (test_bit(STATUS_INT_ENABLED, &trans_pcie->status))
 765		iwl_enable_interrupts(trans);
 766	/* Re-enable RF_KILL if it occurred */
 767	else if (handled & CSR_INT_BIT_RF_KILL)
 768		iwl_enable_rfkill_int(trans);
 769}
 770
 771/******************************************************************************
 772 *
 773 * ICT functions
 774 *
 775 ******************************************************************************/
 776
 777/* a device (PCI-E) page is 4096 bytes long */
 778#define ICT_SHIFT	12
 779#define ICT_SIZE	(1 << ICT_SHIFT)
 780#define ICT_COUNT	(ICT_SIZE / sizeof(u32))
 781
 782/* Free dram table */
 783void iwl_free_isr_ict(struct iwl_trans *trans)
 784{
 785	struct iwl_trans_pcie *trans_pcie =
 786		IWL_TRANS_GET_PCIE_TRANS(trans);
 787
 788	if (trans_pcie->ict_tbl) {
 789		dma_free_coherent(trans->dev, ICT_SIZE,
 790				  trans_pcie->ict_tbl,
 791				  trans_pcie->ict_tbl_dma);
 792		trans_pcie->ict_tbl = NULL;
 793		trans_pcie->ict_tbl_dma = 0;
 794	}
 795}
 796
 797
 798/*
 799 * allocate dram shared table, it is an aligned memory
 800 * block of ICT_SIZE.
 801 * also reset all data related to ICT table interrupt.
 802 */
 803int iwl_alloc_isr_ict(struct iwl_trans *trans)
 804{
 805	struct iwl_trans_pcie *trans_pcie =
 806		IWL_TRANS_GET_PCIE_TRANS(trans);
 807
 808	trans_pcie->ict_tbl =
 809		dma_alloc_coherent(trans->dev, ICT_SIZE,
 810				   &trans_pcie->ict_tbl_dma,
 811				   GFP_KERNEL);
 812	if (!trans_pcie->ict_tbl)
 813		return -ENOMEM;
 814
 815	/* just an API sanity check ... it is guaranteed to be aligned */
 816	if (WARN_ON(trans_pcie->ict_tbl_dma & (ICT_SIZE - 1))) {
 817		iwl_free_isr_ict(trans);
 818		return -EINVAL;
 819	}
 820
 821	IWL_DEBUG_ISR(trans, "ict dma addr %Lx\n",
 822		      (unsigned long long)trans_pcie->ict_tbl_dma);
 823
 824	IWL_DEBUG_ISR(trans, "ict vir addr %p\n", trans_pcie->ict_tbl);
 825
 826	/* reset table and index to all 0 */
 827	memset(trans_pcie->ict_tbl, 0, ICT_SIZE);
 828	trans_pcie->ict_index = 0;
 829
 830	/* add periodic RX interrupt */
 831	trans_pcie->inta_mask |= CSR_INT_BIT_RX_PERIODIC;
 832	return 0;
 833}
 834
 835/* Device is going up inform it about using ICT interrupt table,
 836 * also we need to tell the driver to start using ICT interrupt.
 837 */
 838void iwl_reset_ict(struct iwl_trans *trans)
 839{
 840	u32 val;
 841	unsigned long flags;
 842	struct iwl_trans_pcie *trans_pcie =
 843		IWL_TRANS_GET_PCIE_TRANS(trans);
 844
 845	if (!trans_pcie->ict_tbl)
 846		return;
 847
 848	spin_lock_irqsave(&trans_pcie->irq_lock, flags);
 849	iwl_disable_interrupts(trans);
 850
 851	memset(trans_pcie->ict_tbl, 0, ICT_SIZE);
 852
 853	val = trans_pcie->ict_tbl_dma >> ICT_SHIFT;
 854
 855	val |= CSR_DRAM_INT_TBL_ENABLE;
 856	val |= CSR_DRAM_INIT_TBL_WRAP_CHECK;
 857
 858	IWL_DEBUG_ISR(trans, "CSR_DRAM_INT_TBL_REG =0x%x\n", val);
 859
 860	iwl_write32(trans, CSR_DRAM_INT_TBL_REG, val);
 861	trans_pcie->use_ict = true;
 862	trans_pcie->ict_index = 0;
 863	iwl_write32(trans, CSR_INT, trans_pcie->inta_mask);
 864	iwl_enable_interrupts(trans);
 865	spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
 866}
 867
 868/* Device is going down disable ict interrupt usage */
 869void iwl_disable_ict(struct iwl_trans *trans)
 870{
 871	struct iwl_trans_pcie *trans_pcie =
 872		IWL_TRANS_GET_PCIE_TRANS(trans);
 873
 874	unsigned long flags;
 875
 876	spin_lock_irqsave(&trans_pcie->irq_lock, flags);
 877	trans_pcie->use_ict = false;
 878	spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
 879}
 880
 881static irqreturn_t iwl_isr(int irq, void *data)
 882{
 883	struct iwl_trans *trans = data;
 884	struct iwl_trans_pcie *trans_pcie;
 885	u32 inta, inta_mask;
 886	unsigned long flags;
 887#ifdef CONFIG_IWLWIFI_DEBUG
 888	u32 inta_fh;
 889#endif
 890	if (!trans)
 891		return IRQ_NONE;
 892
 893	trace_iwlwifi_dev_irq(trans->dev);
 894
 895	trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
 896
 897	spin_lock_irqsave(&trans_pcie->irq_lock, flags);
 898
 899	/* Disable (but don't clear!) interrupts here to avoid
 900	 *    back-to-back ISRs and sporadic interrupts from our NIC.
 901	 * If we have something to service, the tasklet will re-enable ints.
 902	 * If we *don't* have something, we'll re-enable before leaving here. */
 903	inta_mask = iwl_read32(trans, CSR_INT_MASK);  /* just for debug */
 904	iwl_write32(trans, CSR_INT_MASK, 0x00000000);
 905
 906	/* Discover which interrupts are active/pending */
 907	inta = iwl_read32(trans, CSR_INT);
 908
 909	/* Ignore interrupt if there's nothing in NIC to service.
 910	 * This may be due to IRQ shared with another device,
 911	 * or due to sporadic interrupts thrown from our NIC. */
 912	if (!inta) {
 913		IWL_DEBUG_ISR(trans, "Ignore interrupt, inta == 0\n");
 914		goto none;
 915	}
 916
 917	if ((inta == 0xFFFFFFFF) || ((inta & 0xFFFFFFF0) == 0xa5a5a5a0)) {
 918		/* Hardware disappeared. It might have already raised
 919		 * an interrupt */
 920		IWL_WARN(trans, "HARDWARE GONE?? INTA == 0x%08x\n", inta);
 921		goto unplugged;
 922	}
 923
 924#ifdef CONFIG_IWLWIFI_DEBUG
 925	if (iwl_have_debug_level(IWL_DL_ISR)) {
 926		inta_fh = iwl_read32(trans, CSR_FH_INT_STATUS);
 927		IWL_DEBUG_ISR(trans, "ISR inta 0x%08x, enabled 0x%08x, "
 928			      "fh 0x%08x\n", inta, inta_mask, inta_fh);
 929	}
 930#endif
 931
 932	trans_pcie->inta |= inta;
 933	/* iwl_irq_tasklet() will service interrupts and re-enable them */
 934	if (likely(inta))
 935		tasklet_schedule(&trans_pcie->irq_tasklet);
 936	else if (test_bit(STATUS_INT_ENABLED, &trans_pcie->status) &&
 937			!trans_pcie->inta)
 938		iwl_enable_interrupts(trans);
 939
 940 unplugged:
 941	spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
 942	return IRQ_HANDLED;
 943
 944 none:
 945	/* re-enable interrupts here since we don't have anything to service. */
 946	/* only Re-enable if disabled by irq  and no schedules tasklet. */
 947	if (test_bit(STATUS_INT_ENABLED, &trans_pcie->status) &&
 948		!trans_pcie->inta)
 949		iwl_enable_interrupts(trans);
 950
 951	spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
 952	return IRQ_NONE;
 953}
 954
 955/* interrupt handler using ict table, with this interrupt driver will
 956 * stop using INTA register to get device's interrupt, reading this register
 957 * is expensive, device will write interrupts in ICT dram table, increment
 958 * index then will fire interrupt to driver, driver will OR all ICT table
 959 * entries from current index up to table entry with 0 value. the result is
 960 * the interrupt we need to service, driver will set the entries back to 0 and
 961 * set index.
 962 */
 963irqreturn_t iwl_isr_ict(int irq, void *data)
 964{
 965	struct iwl_trans *trans = data;
 966	struct iwl_trans_pcie *trans_pcie;
 967	u32 inta, inta_mask;
 968	u32 val = 0;
 969	u32 read;
 970	unsigned long flags;
 971
 972	if (!trans)
 973		return IRQ_NONE;
 974
 975	trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
 976
 977	/* dram interrupt table not set yet,
 978	 * use legacy interrupt.
 979	 */
 980	if (!trans_pcie->use_ict)
 981		return iwl_isr(irq, data);
 982
 983	trace_iwlwifi_dev_irq(trans->dev);
 984
 985	spin_lock_irqsave(&trans_pcie->irq_lock, flags);
 986
 987	/* Disable (but don't clear!) interrupts here to avoid
 988	 * back-to-back ISRs and sporadic interrupts from our NIC.
 989	 * If we have something to service, the tasklet will re-enable ints.
 990	 * If we *don't* have something, we'll re-enable before leaving here.
 991	 */
 992	inta_mask = iwl_read32(trans, CSR_INT_MASK);  /* just for debug */
 993	iwl_write32(trans, CSR_INT_MASK, 0x00000000);
 994
 995
 996	/* Ignore interrupt if there's nothing in NIC to service.
 997	 * This may be due to IRQ shared with another device,
 998	 * or due to sporadic interrupts thrown from our NIC. */
 999	read = le32_to_cpu(trans_pcie->ict_tbl[trans_pcie->ict_index]);
1000	trace_iwlwifi_dev_ict_read(trans->dev, trans_pcie->ict_index, read);
1001	if (!read) {
1002		IWL_DEBUG_ISR(trans, "Ignore interrupt, inta == 0\n");
1003		goto none;
1004	}
1005
1006	/*
1007	 * Collect all entries up to the first 0, starting from ict_index;
1008	 * note we already read at ict_index.
1009	 */
1010	do {
1011		val |= read;
1012		IWL_DEBUG_ISR(trans, "ICT index %d value 0x%08X\n",
1013				trans_pcie->ict_index, read);
1014		trans_pcie->ict_tbl[trans_pcie->ict_index] = 0;
1015		trans_pcie->ict_index =
1016			iwl_queue_inc_wrap(trans_pcie->ict_index, ICT_COUNT);
1017
1018		read = le32_to_cpu(trans_pcie->ict_tbl[trans_pcie->ict_index]);
1019		trace_iwlwifi_dev_ict_read(trans->dev, trans_pcie->ict_index,
1020					   read);
1021	} while (read);
1022
1023	/* We should not get this value, just ignore it. */
1024	if (val == 0xffffffff)
1025		val = 0;
1026
1027	/*
1028	 * this is a w/a for a h/w bug. the h/w bug may cause the Rx bit
1029	 * (bit 15 before shifting it to 31) to clear when using interrupt
1030	 * coalescing. fortunately, bits 18 and 19 stay set when this happens
1031	 * so we use them to decide on the real state of the Rx bit.
1032	 * In order words, bit 15 is set if bit 18 or bit 19 are set.
1033	 */
1034	if (val & 0xC0000)
1035		val |= 0x8000;
1036
1037	inta = (0xff & val) | ((0xff00 & val) << 16);
1038	IWL_DEBUG_ISR(trans, "ISR inta 0x%08x, enabled 0x%08x ict 0x%08x\n",
1039			inta, inta_mask, val);
1040
1041	inta &= trans_pcie->inta_mask;
1042	trans_pcie->inta |= inta;
1043
1044	/* iwl_irq_tasklet() will service interrupts and re-enable them */
1045	if (likely(inta))
1046		tasklet_schedule(&trans_pcie->irq_tasklet);
1047	else if (test_bit(STATUS_INT_ENABLED, &trans_pcie->status) &&
1048		 !trans_pcie->inta) {
1049		/* Allow interrupt if was disabled by this handler and
1050		 * no tasklet was schedules, We should not enable interrupt,
1051		 * tasklet will enable it.
1052		 */
1053		iwl_enable_interrupts(trans);
1054	}
1055
1056	spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
1057	return IRQ_HANDLED;
1058
1059 none:
1060	/* re-enable interrupts here since we don't have anything to service.
1061	 * only Re-enable if disabled by irq.
1062	 */
1063	if (test_bit(STATUS_INT_ENABLED, &trans_pcie->status) &&
1064	    !trans_pcie->inta)
1065		iwl_enable_interrupts(trans);
1066
1067	spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
1068	return IRQ_NONE;
1069}