Linux Audio

Check our new training course

Loading...
Note: File does not exist in v6.13.7.
   1/*
   2 * This file is part of the Chelsio T4 Ethernet driver for Linux.
   3 *
   4 * Copyright (c) 2003-2010 Chelsio Communications, Inc. All rights reserved.
   5 *
   6 * This software is available to you under a choice of one of two
   7 * licenses.  You may choose to be licensed under the terms of the GNU
   8 * General Public License (GPL) Version 2, available from the file
   9 * COPYING in the main directory of this source tree, or the
  10 * OpenIB.org BSD license below:
  11 *
  12 *     Redistribution and use in source and binary forms, with or
  13 *     without modification, are permitted provided that the following
  14 *     conditions are met:
  15 *
  16 *      - Redistributions of source code must retain the above
  17 *        copyright notice, this list of conditions and the following
  18 *        disclaimer.
  19 *
  20 *      - Redistributions in binary form must reproduce the above
  21 *        copyright notice, this list of conditions and the following
  22 *        disclaimer in the documentation and/or other materials
  23 *        provided with the distribution.
  24 *
  25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  32 * SOFTWARE.
  33 */
  34
  35#include <linux/skbuff.h>
  36#include <linux/netdevice.h>
  37#include <linux/etherdevice.h>
  38#include <linux/if_vlan.h>
  39#include <linux/ip.h>
  40#include <linux/dma-mapping.h>
  41#include <linux/jiffies.h>
  42#include <linux/prefetch.h>
  43#include <net/ipv6.h>
  44#include <net/tcp.h>
  45#include "cxgb4.h"
  46#include "t4_regs.h"
  47#include "t4_msg.h"
  48#include "t4fw_api.h"
  49
  50/*
  51 * Rx buffer size.  We use largish buffers if possible but settle for single
  52 * pages under memory shortage.
  53 */
  54#if PAGE_SHIFT >= 16
  55# define FL_PG_ORDER 0
  56#else
  57# define FL_PG_ORDER (16 - PAGE_SHIFT)
  58#endif
  59
  60/* RX_PULL_LEN should be <= RX_COPY_THRES */
  61#define RX_COPY_THRES    256
  62#define RX_PULL_LEN      128
  63
  64/*
  65 * Main body length for sk_buffs used for Rx Ethernet packets with fragments.
  66 * Should be >= RX_PULL_LEN but possibly bigger to give pskb_may_pull some room.
  67 */
  68#define RX_PKT_SKB_LEN   512
  69
  70/* Ethernet header padding prepended to RX_PKTs */
  71#define RX_PKT_PAD 2
  72
  73/*
  74 * Max number of Tx descriptors we clean up at a time.  Should be modest as
  75 * freeing skbs isn't cheap and it happens while holding locks.  We just need
  76 * to free packets faster than they arrive, we eventually catch up and keep
  77 * the amortized cost reasonable.  Must be >= 2 * TXQ_STOP_THRES.
  78 */
  79#define MAX_TX_RECLAIM 16
  80
  81/*
  82 * Max number of Rx buffers we replenish at a time.  Again keep this modest,
  83 * allocating buffers isn't cheap either.
  84 */
  85#define MAX_RX_REFILL 16U
  86
  87/*
  88 * Period of the Rx queue check timer.  This timer is infrequent as it has
  89 * something to do only when the system experiences severe memory shortage.
  90 */
  91#define RX_QCHECK_PERIOD (HZ / 2)
  92
  93/*
  94 * Period of the Tx queue check timer.
  95 */
  96#define TX_QCHECK_PERIOD (HZ / 2)
  97
  98/*
  99 * Max number of Tx descriptors to be reclaimed by the Tx timer.
 100 */
 101#define MAX_TIMER_TX_RECLAIM 100
 102
 103/*
 104 * Timer index used when backing off due to memory shortage.
 105 */
 106#define NOMEM_TMR_IDX (SGE_NTIMERS - 1)
 107
 108/*
 109 * An FL with <= FL_STARVE_THRES buffers is starving and a periodic timer will
 110 * attempt to refill it.
 111 */
 112#define FL_STARVE_THRES 4
 113
 114/*
 115 * Suspend an Ethernet Tx queue with fewer available descriptors than this.
 116 * This is the same as calc_tx_descs() for a TSO packet with
 117 * nr_frags == MAX_SKB_FRAGS.
 118 */
 119#define ETHTXQ_STOP_THRES \
 120	(1 + DIV_ROUND_UP((3 * MAX_SKB_FRAGS) / 2 + (MAX_SKB_FRAGS & 1), 8))
 121
 122/*
 123 * Suspension threshold for non-Ethernet Tx queues.  We require enough room
 124 * for a full sized WR.
 125 */
 126#define TXQ_STOP_THRES (SGE_MAX_WR_LEN / sizeof(struct tx_desc))
 127
 128/*
 129 * Max Tx descriptor space we allow for an Ethernet packet to be inlined
 130 * into a WR.
 131 */
 132#define MAX_IMM_TX_PKT_LEN 128
 133
 134/*
 135 * Max size of a WR sent through a control Tx queue.
 136 */
 137#define MAX_CTRL_WR_LEN SGE_MAX_WR_LEN
 138
 139enum {
 140	/* packet alignment in FL buffers */
 141	FL_ALIGN = L1_CACHE_BYTES < 32 ? 32 : L1_CACHE_BYTES,
 142	/* egress status entry size */
 143	STAT_LEN = L1_CACHE_BYTES > 64 ? 128 : 64
 144};
 145
 146struct tx_sw_desc {                /* SW state per Tx descriptor */
 147	struct sk_buff *skb;
 148	struct ulptx_sgl *sgl;
 149};
 150
 151struct rx_sw_desc {                /* SW state per Rx descriptor */
 152	struct page *page;
 153	dma_addr_t dma_addr;
 154};
 155
 156/*
 157 * The low bits of rx_sw_desc.dma_addr have special meaning.
 158 */
 159enum {
 160	RX_LARGE_BUF    = 1 << 0, /* buffer is larger than PAGE_SIZE */
 161	RX_UNMAPPED_BUF = 1 << 1, /* buffer is not mapped */
 162};
 163
 164static inline dma_addr_t get_buf_addr(const struct rx_sw_desc *d)
 165{
 166	return d->dma_addr & ~(dma_addr_t)(RX_LARGE_BUF | RX_UNMAPPED_BUF);
 167}
 168
 169static inline bool is_buf_mapped(const struct rx_sw_desc *d)
 170{
 171	return !(d->dma_addr & RX_UNMAPPED_BUF);
 172}
 173
 174/**
 175 *	txq_avail - return the number of available slots in a Tx queue
 176 *	@q: the Tx queue
 177 *
 178 *	Returns the number of descriptors in a Tx queue available to write new
 179 *	packets.
 180 */
 181static inline unsigned int txq_avail(const struct sge_txq *q)
 182{
 183	return q->size - 1 - q->in_use;
 184}
 185
 186/**
 187 *	fl_cap - return the capacity of a free-buffer list
 188 *	@fl: the FL
 189 *
 190 *	Returns the capacity of a free-buffer list.  The capacity is less than
 191 *	the size because one descriptor needs to be left unpopulated, otherwise
 192 *	HW will think the FL is empty.
 193 */
 194static inline unsigned int fl_cap(const struct sge_fl *fl)
 195{
 196	return fl->size - 8;   /* 1 descriptor = 8 buffers */
 197}
 198
 199static inline bool fl_starving(const struct sge_fl *fl)
 200{
 201	return fl->avail - fl->pend_cred <= FL_STARVE_THRES;
 202}
 203
 204static int map_skb(struct device *dev, const struct sk_buff *skb,
 205		   dma_addr_t *addr)
 206{
 207	const skb_frag_t *fp, *end;
 208	const struct skb_shared_info *si;
 209
 210	*addr = dma_map_single(dev, skb->data, skb_headlen(skb), DMA_TO_DEVICE);
 211	if (dma_mapping_error(dev, *addr))
 212		goto out_err;
 213
 214	si = skb_shinfo(skb);
 215	end = &si->frags[si->nr_frags];
 216
 217	for (fp = si->frags; fp < end; fp++) {
 218		*++addr = dma_map_page(dev, fp->page, fp->page_offset, fp->size,
 219				       DMA_TO_DEVICE);
 220		if (dma_mapping_error(dev, *addr))
 221			goto unwind;
 222	}
 223	return 0;
 224
 225unwind:
 226	while (fp-- > si->frags)
 227		dma_unmap_page(dev, *--addr, fp->size, DMA_TO_DEVICE);
 228
 229	dma_unmap_single(dev, addr[-1], skb_headlen(skb), DMA_TO_DEVICE);
 230out_err:
 231	return -ENOMEM;
 232}
 233
 234#ifdef CONFIG_NEED_DMA_MAP_STATE
 235static void unmap_skb(struct device *dev, const struct sk_buff *skb,
 236		      const dma_addr_t *addr)
 237{
 238	const skb_frag_t *fp, *end;
 239	const struct skb_shared_info *si;
 240
 241	dma_unmap_single(dev, *addr++, skb_headlen(skb), DMA_TO_DEVICE);
 242
 243	si = skb_shinfo(skb);
 244	end = &si->frags[si->nr_frags];
 245	for (fp = si->frags; fp < end; fp++)
 246		dma_unmap_page(dev, *addr++, fp->size, DMA_TO_DEVICE);
 247}
 248
 249/**
 250 *	deferred_unmap_destructor - unmap a packet when it is freed
 251 *	@skb: the packet
 252 *
 253 *	This is the packet destructor used for Tx packets that need to remain
 254 *	mapped until they are freed rather than until their Tx descriptors are
 255 *	freed.
 256 */
 257static void deferred_unmap_destructor(struct sk_buff *skb)
 258{
 259	unmap_skb(skb->dev->dev.parent, skb, (dma_addr_t *)skb->head);
 260}
 261#endif
 262
 263static void unmap_sgl(struct device *dev, const struct sk_buff *skb,
 264		      const struct ulptx_sgl *sgl, const struct sge_txq *q)
 265{
 266	const struct ulptx_sge_pair *p;
 267	unsigned int nfrags = skb_shinfo(skb)->nr_frags;
 268
 269	if (likely(skb_headlen(skb)))
 270		dma_unmap_single(dev, be64_to_cpu(sgl->addr0), ntohl(sgl->len0),
 271				 DMA_TO_DEVICE);
 272	else {
 273		dma_unmap_page(dev, be64_to_cpu(sgl->addr0), ntohl(sgl->len0),
 274			       DMA_TO_DEVICE);
 275		nfrags--;
 276	}
 277
 278	/*
 279	 * the complexity below is because of the possibility of a wrap-around
 280	 * in the middle of an SGL
 281	 */
 282	for (p = sgl->sge; nfrags >= 2; nfrags -= 2) {
 283		if (likely((u8 *)(p + 1) <= (u8 *)q->stat)) {
 284unmap:			dma_unmap_page(dev, be64_to_cpu(p->addr[0]),
 285				       ntohl(p->len[0]), DMA_TO_DEVICE);
 286			dma_unmap_page(dev, be64_to_cpu(p->addr[1]),
 287				       ntohl(p->len[1]), DMA_TO_DEVICE);
 288			p++;
 289		} else if ((u8 *)p == (u8 *)q->stat) {
 290			p = (const struct ulptx_sge_pair *)q->desc;
 291			goto unmap;
 292		} else if ((u8 *)p + 8 == (u8 *)q->stat) {
 293			const __be64 *addr = (const __be64 *)q->desc;
 294
 295			dma_unmap_page(dev, be64_to_cpu(addr[0]),
 296				       ntohl(p->len[0]), DMA_TO_DEVICE);
 297			dma_unmap_page(dev, be64_to_cpu(addr[1]),
 298				       ntohl(p->len[1]), DMA_TO_DEVICE);
 299			p = (const struct ulptx_sge_pair *)&addr[2];
 300		} else {
 301			const __be64 *addr = (const __be64 *)q->desc;
 302
 303			dma_unmap_page(dev, be64_to_cpu(p->addr[0]),
 304				       ntohl(p->len[0]), DMA_TO_DEVICE);
 305			dma_unmap_page(dev, be64_to_cpu(addr[0]),
 306				       ntohl(p->len[1]), DMA_TO_DEVICE);
 307			p = (const struct ulptx_sge_pair *)&addr[1];
 308		}
 309	}
 310	if (nfrags) {
 311		__be64 addr;
 312
 313		if ((u8 *)p == (u8 *)q->stat)
 314			p = (const struct ulptx_sge_pair *)q->desc;
 315		addr = (u8 *)p + 16 <= (u8 *)q->stat ? p->addr[0] :
 316						       *(const __be64 *)q->desc;
 317		dma_unmap_page(dev, be64_to_cpu(addr), ntohl(p->len[0]),
 318			       DMA_TO_DEVICE);
 319	}
 320}
 321
 322/**
 323 *	free_tx_desc - reclaims Tx descriptors and their buffers
 324 *	@adapter: the adapter
 325 *	@q: the Tx queue to reclaim descriptors from
 326 *	@n: the number of descriptors to reclaim
 327 *	@unmap: whether the buffers should be unmapped for DMA
 328 *
 329 *	Reclaims Tx descriptors from an SGE Tx queue and frees the associated
 330 *	Tx buffers.  Called with the Tx queue lock held.
 331 */
 332static void free_tx_desc(struct adapter *adap, struct sge_txq *q,
 333			 unsigned int n, bool unmap)
 334{
 335	struct tx_sw_desc *d;
 336	unsigned int cidx = q->cidx;
 337	struct device *dev = adap->pdev_dev;
 338
 339	d = &q->sdesc[cidx];
 340	while (n--) {
 341		if (d->skb) {                       /* an SGL is present */
 342			if (unmap)
 343				unmap_sgl(dev, d->skb, d->sgl, q);
 344			kfree_skb(d->skb);
 345			d->skb = NULL;
 346		}
 347		++d;
 348		if (++cidx == q->size) {
 349			cidx = 0;
 350			d = q->sdesc;
 351		}
 352	}
 353	q->cidx = cidx;
 354}
 355
 356/*
 357 * Return the number of reclaimable descriptors in a Tx queue.
 358 */
 359static inline int reclaimable(const struct sge_txq *q)
 360{
 361	int hw_cidx = ntohs(q->stat->cidx);
 362	hw_cidx -= q->cidx;
 363	return hw_cidx < 0 ? hw_cidx + q->size : hw_cidx;
 364}
 365
 366/**
 367 *	reclaim_completed_tx - reclaims completed Tx descriptors
 368 *	@adap: the adapter
 369 *	@q: the Tx queue to reclaim completed descriptors from
 370 *	@unmap: whether the buffers should be unmapped for DMA
 371 *
 372 *	Reclaims Tx descriptors that the SGE has indicated it has processed,
 373 *	and frees the associated buffers if possible.  Called with the Tx
 374 *	queue locked.
 375 */
 376static inline void reclaim_completed_tx(struct adapter *adap, struct sge_txq *q,
 377					bool unmap)
 378{
 379	int avail = reclaimable(q);
 380
 381	if (avail) {
 382		/*
 383		 * Limit the amount of clean up work we do at a time to keep
 384		 * the Tx lock hold time O(1).
 385		 */
 386		if (avail > MAX_TX_RECLAIM)
 387			avail = MAX_TX_RECLAIM;
 388
 389		free_tx_desc(adap, q, avail, unmap);
 390		q->in_use -= avail;
 391	}
 392}
 393
 394static inline int get_buf_size(const struct rx_sw_desc *d)
 395{
 396#if FL_PG_ORDER > 0
 397	return (d->dma_addr & RX_LARGE_BUF) ? (PAGE_SIZE << FL_PG_ORDER) :
 398					      PAGE_SIZE;
 399#else
 400	return PAGE_SIZE;
 401#endif
 402}
 403
 404/**
 405 *	free_rx_bufs - free the Rx buffers on an SGE free list
 406 *	@adap: the adapter
 407 *	@q: the SGE free list to free buffers from
 408 *	@n: how many buffers to free
 409 *
 410 *	Release the next @n buffers on an SGE free-buffer Rx queue.   The
 411 *	buffers must be made inaccessible to HW before calling this function.
 412 */
 413static void free_rx_bufs(struct adapter *adap, struct sge_fl *q, int n)
 414{
 415	while (n--) {
 416		struct rx_sw_desc *d = &q->sdesc[q->cidx];
 417
 418		if (is_buf_mapped(d))
 419			dma_unmap_page(adap->pdev_dev, get_buf_addr(d),
 420				       get_buf_size(d), PCI_DMA_FROMDEVICE);
 421		put_page(d->page);
 422		d->page = NULL;
 423		if (++q->cidx == q->size)
 424			q->cidx = 0;
 425		q->avail--;
 426	}
 427}
 428
 429/**
 430 *	unmap_rx_buf - unmap the current Rx buffer on an SGE free list
 431 *	@adap: the adapter
 432 *	@q: the SGE free list
 433 *
 434 *	Unmap the current buffer on an SGE free-buffer Rx queue.   The
 435 *	buffer must be made inaccessible to HW before calling this function.
 436 *
 437 *	This is similar to @free_rx_bufs above but does not free the buffer.
 438 *	Do note that the FL still loses any further access to the buffer.
 439 */
 440static void unmap_rx_buf(struct adapter *adap, struct sge_fl *q)
 441{
 442	struct rx_sw_desc *d = &q->sdesc[q->cidx];
 443
 444	if (is_buf_mapped(d))
 445		dma_unmap_page(adap->pdev_dev, get_buf_addr(d),
 446			       get_buf_size(d), PCI_DMA_FROMDEVICE);
 447	d->page = NULL;
 448	if (++q->cidx == q->size)
 449		q->cidx = 0;
 450	q->avail--;
 451}
 452
 453static inline void ring_fl_db(struct adapter *adap, struct sge_fl *q)
 454{
 455	if (q->pend_cred >= 8) {
 456		wmb();
 457		t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL), DBPRIO |
 458			     QID(q->cntxt_id) | PIDX(q->pend_cred / 8));
 459		q->pend_cred &= 7;
 460	}
 461}
 462
 463static inline void set_rx_sw_desc(struct rx_sw_desc *sd, struct page *pg,
 464				  dma_addr_t mapping)
 465{
 466	sd->page = pg;
 467	sd->dma_addr = mapping;      /* includes size low bits */
 468}
 469
 470/**
 471 *	refill_fl - refill an SGE Rx buffer ring
 472 *	@adap: the adapter
 473 *	@q: the ring to refill
 474 *	@n: the number of new buffers to allocate
 475 *	@gfp: the gfp flags for the allocations
 476 *
 477 *	(Re)populate an SGE free-buffer queue with up to @n new packet buffers,
 478 *	allocated with the supplied gfp flags.  The caller must assure that
 479 *	@n does not exceed the queue's capacity.  If afterwards the queue is
 480 *	found critically low mark it as starving in the bitmap of starving FLs.
 481 *
 482 *	Returns the number of buffers allocated.
 483 */
 484static unsigned int refill_fl(struct adapter *adap, struct sge_fl *q, int n,
 485			      gfp_t gfp)
 486{
 487	struct page *pg;
 488	dma_addr_t mapping;
 489	unsigned int cred = q->avail;
 490	__be64 *d = &q->desc[q->pidx];
 491	struct rx_sw_desc *sd = &q->sdesc[q->pidx];
 492
 493	gfp |= __GFP_NOWARN;         /* failures are expected */
 494
 495#if FL_PG_ORDER > 0
 496	/*
 497	 * Prefer large buffers
 498	 */
 499	while (n) {
 500		pg = alloc_pages(gfp | __GFP_COMP, FL_PG_ORDER);
 501		if (unlikely(!pg)) {
 502			q->large_alloc_failed++;
 503			break;       /* fall back to single pages */
 504		}
 505
 506		mapping = dma_map_page(adap->pdev_dev, pg, 0,
 507				       PAGE_SIZE << FL_PG_ORDER,
 508				       PCI_DMA_FROMDEVICE);
 509		if (unlikely(dma_mapping_error(adap->pdev_dev, mapping))) {
 510			__free_pages(pg, FL_PG_ORDER);
 511			goto out;   /* do not try small pages for this error */
 512		}
 513		mapping |= RX_LARGE_BUF;
 514		*d++ = cpu_to_be64(mapping);
 515
 516		set_rx_sw_desc(sd, pg, mapping);
 517		sd++;
 518
 519		q->avail++;
 520		if (++q->pidx == q->size) {
 521			q->pidx = 0;
 522			sd = q->sdesc;
 523			d = q->desc;
 524		}
 525		n--;
 526	}
 527#endif
 528
 529	while (n--) {
 530		pg = __netdev_alloc_page(adap->port[0], gfp);
 531		if (unlikely(!pg)) {
 532			q->alloc_failed++;
 533			break;
 534		}
 535
 536		mapping = dma_map_page(adap->pdev_dev, pg, 0, PAGE_SIZE,
 537				       PCI_DMA_FROMDEVICE);
 538		if (unlikely(dma_mapping_error(adap->pdev_dev, mapping))) {
 539			netdev_free_page(adap->port[0], pg);
 540			goto out;
 541		}
 542		*d++ = cpu_to_be64(mapping);
 543
 544		set_rx_sw_desc(sd, pg, mapping);
 545		sd++;
 546
 547		q->avail++;
 548		if (++q->pidx == q->size) {
 549			q->pidx = 0;
 550			sd = q->sdesc;
 551			d = q->desc;
 552		}
 553	}
 554
 555out:	cred = q->avail - cred;
 556	q->pend_cred += cred;
 557	ring_fl_db(adap, q);
 558
 559	if (unlikely(fl_starving(q))) {
 560		smp_wmb();
 561		set_bit(q->cntxt_id - adap->sge.egr_start,
 562			adap->sge.starving_fl);
 563	}
 564
 565	return cred;
 566}
 567
 568static inline void __refill_fl(struct adapter *adap, struct sge_fl *fl)
 569{
 570	refill_fl(adap, fl, min(MAX_RX_REFILL, fl_cap(fl) - fl->avail),
 571		  GFP_ATOMIC);
 572}
 573
 574/**
 575 *	alloc_ring - allocate resources for an SGE descriptor ring
 576 *	@dev: the PCI device's core device
 577 *	@nelem: the number of descriptors
 578 *	@elem_size: the size of each descriptor
 579 *	@sw_size: the size of the SW state associated with each ring element
 580 *	@phys: the physical address of the allocated ring
 581 *	@metadata: address of the array holding the SW state for the ring
 582 *	@stat_size: extra space in HW ring for status information
 583 *	@node: preferred node for memory allocations
 584 *
 585 *	Allocates resources for an SGE descriptor ring, such as Tx queues,
 586 *	free buffer lists, or response queues.  Each SGE ring requires
 587 *	space for its HW descriptors plus, optionally, space for the SW state
 588 *	associated with each HW entry (the metadata).  The function returns
 589 *	three values: the virtual address for the HW ring (the return value
 590 *	of the function), the bus address of the HW ring, and the address
 591 *	of the SW ring.
 592 */
 593static void *alloc_ring(struct device *dev, size_t nelem, size_t elem_size,
 594			size_t sw_size, dma_addr_t *phys, void *metadata,
 595			size_t stat_size, int node)
 596{
 597	size_t len = nelem * elem_size + stat_size;
 598	void *s = NULL;
 599	void *p = dma_alloc_coherent(dev, len, phys, GFP_KERNEL);
 600
 601	if (!p)
 602		return NULL;
 603	if (sw_size) {
 604		s = kzalloc_node(nelem * sw_size, GFP_KERNEL, node);
 605
 606		if (!s) {
 607			dma_free_coherent(dev, len, p, *phys);
 608			return NULL;
 609		}
 610	}
 611	if (metadata)
 612		*(void **)metadata = s;
 613	memset(p, 0, len);
 614	return p;
 615}
 616
 617/**
 618 *	sgl_len - calculates the size of an SGL of the given capacity
 619 *	@n: the number of SGL entries
 620 *
 621 *	Calculates the number of flits needed for a scatter/gather list that
 622 *	can hold the given number of entries.
 623 */
 624static inline unsigned int sgl_len(unsigned int n)
 625{
 626	n--;
 627	return (3 * n) / 2 + (n & 1) + 2;
 628}
 629
 630/**
 631 *	flits_to_desc - returns the num of Tx descriptors for the given flits
 632 *	@n: the number of flits
 633 *
 634 *	Returns the number of Tx descriptors needed for the supplied number
 635 *	of flits.
 636 */
 637static inline unsigned int flits_to_desc(unsigned int n)
 638{
 639	BUG_ON(n > SGE_MAX_WR_LEN / 8);
 640	return DIV_ROUND_UP(n, 8);
 641}
 642
 643/**
 644 *	is_eth_imm - can an Ethernet packet be sent as immediate data?
 645 *	@skb: the packet
 646 *
 647 *	Returns whether an Ethernet packet is small enough to fit as
 648 *	immediate data.
 649 */
 650static inline int is_eth_imm(const struct sk_buff *skb)
 651{
 652	return skb->len <= MAX_IMM_TX_PKT_LEN - sizeof(struct cpl_tx_pkt);
 653}
 654
 655/**
 656 *	calc_tx_flits - calculate the number of flits for a packet Tx WR
 657 *	@skb: the packet
 658 *
 659 *	Returns the number of flits needed for a Tx WR for the given Ethernet
 660 *	packet, including the needed WR and CPL headers.
 661 */
 662static inline unsigned int calc_tx_flits(const struct sk_buff *skb)
 663{
 664	unsigned int flits;
 665
 666	if (is_eth_imm(skb))
 667		return DIV_ROUND_UP(skb->len + sizeof(struct cpl_tx_pkt), 8);
 668
 669	flits = sgl_len(skb_shinfo(skb)->nr_frags + 1) + 4;
 670	if (skb_shinfo(skb)->gso_size)
 671		flits += 2;
 672	return flits;
 673}
 674
 675/**
 676 *	calc_tx_descs - calculate the number of Tx descriptors for a packet
 677 *	@skb: the packet
 678 *
 679 *	Returns the number of Tx descriptors needed for the given Ethernet
 680 *	packet, including the needed WR and CPL headers.
 681 */
 682static inline unsigned int calc_tx_descs(const struct sk_buff *skb)
 683{
 684	return flits_to_desc(calc_tx_flits(skb));
 685}
 686
 687/**
 688 *	write_sgl - populate a scatter/gather list for a packet
 689 *	@skb: the packet
 690 *	@q: the Tx queue we are writing into
 691 *	@sgl: starting location for writing the SGL
 692 *	@end: points right after the end of the SGL
 693 *	@start: start offset into skb main-body data to include in the SGL
 694 *	@addr: the list of bus addresses for the SGL elements
 695 *
 696 *	Generates a gather list for the buffers that make up a packet.
 697 *	The caller must provide adequate space for the SGL that will be written.
 698 *	The SGL includes all of the packet's page fragments and the data in its
 699 *	main body except for the first @start bytes.  @sgl must be 16-byte
 700 *	aligned and within a Tx descriptor with available space.  @end points
 701 *	right after the end of the SGL but does not account for any potential
 702 *	wrap around, i.e., @end > @sgl.
 703 */
 704static void write_sgl(const struct sk_buff *skb, struct sge_txq *q,
 705		      struct ulptx_sgl *sgl, u64 *end, unsigned int start,
 706		      const dma_addr_t *addr)
 707{
 708	unsigned int i, len;
 709	struct ulptx_sge_pair *to;
 710	const struct skb_shared_info *si = skb_shinfo(skb);
 711	unsigned int nfrags = si->nr_frags;
 712	struct ulptx_sge_pair buf[MAX_SKB_FRAGS / 2 + 1];
 713
 714	len = skb_headlen(skb) - start;
 715	if (likely(len)) {
 716		sgl->len0 = htonl(len);
 717		sgl->addr0 = cpu_to_be64(addr[0] + start);
 718		nfrags++;
 719	} else {
 720		sgl->len0 = htonl(si->frags[0].size);
 721		sgl->addr0 = cpu_to_be64(addr[1]);
 722	}
 723
 724	sgl->cmd_nsge = htonl(ULPTX_CMD(ULP_TX_SC_DSGL) | ULPTX_NSGE(nfrags));
 725	if (likely(--nfrags == 0))
 726		return;
 727	/*
 728	 * Most of the complexity below deals with the possibility we hit the
 729	 * end of the queue in the middle of writing the SGL.  For this case
 730	 * only we create the SGL in a temporary buffer and then copy it.
 731	 */
 732	to = (u8 *)end > (u8 *)q->stat ? buf : sgl->sge;
 733
 734	for (i = (nfrags != si->nr_frags); nfrags >= 2; nfrags -= 2, to++) {
 735		to->len[0] = cpu_to_be32(si->frags[i].size);
 736		to->len[1] = cpu_to_be32(si->frags[++i].size);
 737		to->addr[0] = cpu_to_be64(addr[i]);
 738		to->addr[1] = cpu_to_be64(addr[++i]);
 739	}
 740	if (nfrags) {
 741		to->len[0] = cpu_to_be32(si->frags[i].size);
 742		to->len[1] = cpu_to_be32(0);
 743		to->addr[0] = cpu_to_be64(addr[i + 1]);
 744	}
 745	if (unlikely((u8 *)end > (u8 *)q->stat)) {
 746		unsigned int part0 = (u8 *)q->stat - (u8 *)sgl->sge, part1;
 747
 748		if (likely(part0))
 749			memcpy(sgl->sge, buf, part0);
 750		part1 = (u8 *)end - (u8 *)q->stat;
 751		memcpy(q->desc, (u8 *)buf + part0, part1);
 752		end = (void *)q->desc + part1;
 753	}
 754	if ((uintptr_t)end & 8)           /* 0-pad to multiple of 16 */
 755		*(u64 *)end = 0;
 756}
 757
 758/**
 759 *	ring_tx_db - check and potentially ring a Tx queue's doorbell
 760 *	@adap: the adapter
 761 *	@q: the Tx queue
 762 *	@n: number of new descriptors to give to HW
 763 *
 764 *	Ring the doorbel for a Tx queue.
 765 */
 766static inline void ring_tx_db(struct adapter *adap, struct sge_txq *q, int n)
 767{
 768	wmb();            /* write descriptors before telling HW */
 769	t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL),
 770		     QID(q->cntxt_id) | PIDX(n));
 771}
 772
 773/**
 774 *	inline_tx_skb - inline a packet's data into Tx descriptors
 775 *	@skb: the packet
 776 *	@q: the Tx queue where the packet will be inlined
 777 *	@pos: starting position in the Tx queue where to inline the packet
 778 *
 779 *	Inline a packet's contents directly into Tx descriptors, starting at
 780 *	the given position within the Tx DMA ring.
 781 *	Most of the complexity of this operation is dealing with wrap arounds
 782 *	in the middle of the packet we want to inline.
 783 */
 784static void inline_tx_skb(const struct sk_buff *skb, const struct sge_txq *q,
 785			  void *pos)
 786{
 787	u64 *p;
 788	int left = (void *)q->stat - pos;
 789
 790	if (likely(skb->len <= left)) {
 791		if (likely(!skb->data_len))
 792			skb_copy_from_linear_data(skb, pos, skb->len);
 793		else
 794			skb_copy_bits(skb, 0, pos, skb->len);
 795		pos += skb->len;
 796	} else {
 797		skb_copy_bits(skb, 0, pos, left);
 798		skb_copy_bits(skb, left, q->desc, skb->len - left);
 799		pos = (void *)q->desc + (skb->len - left);
 800	}
 801
 802	/* 0-pad to multiple of 16 */
 803	p = PTR_ALIGN(pos, 8);
 804	if ((uintptr_t)p & 8)
 805		*p = 0;
 806}
 807
 808/*
 809 * Figure out what HW csum a packet wants and return the appropriate control
 810 * bits.
 811 */
 812static u64 hwcsum(const struct sk_buff *skb)
 813{
 814	int csum_type;
 815	const struct iphdr *iph = ip_hdr(skb);
 816
 817	if (iph->version == 4) {
 818		if (iph->protocol == IPPROTO_TCP)
 819			csum_type = TX_CSUM_TCPIP;
 820		else if (iph->protocol == IPPROTO_UDP)
 821			csum_type = TX_CSUM_UDPIP;
 822		else {
 823nocsum:			/*
 824			 * unknown protocol, disable HW csum
 825			 * and hope a bad packet is detected
 826			 */
 827			return TXPKT_L4CSUM_DIS;
 828		}
 829	} else {
 830		/*
 831		 * this doesn't work with extension headers
 832		 */
 833		const struct ipv6hdr *ip6h = (const struct ipv6hdr *)iph;
 834
 835		if (ip6h->nexthdr == IPPROTO_TCP)
 836			csum_type = TX_CSUM_TCPIP6;
 837		else if (ip6h->nexthdr == IPPROTO_UDP)
 838			csum_type = TX_CSUM_UDPIP6;
 839		else
 840			goto nocsum;
 841	}
 842
 843	if (likely(csum_type >= TX_CSUM_TCPIP))
 844		return TXPKT_CSUM_TYPE(csum_type) |
 845			TXPKT_IPHDR_LEN(skb_network_header_len(skb)) |
 846			TXPKT_ETHHDR_LEN(skb_network_offset(skb) - ETH_HLEN);
 847	else {
 848		int start = skb_transport_offset(skb);
 849
 850		return TXPKT_CSUM_TYPE(csum_type) | TXPKT_CSUM_START(start) |
 851			TXPKT_CSUM_LOC(start + skb->csum_offset);
 852	}
 853}
 854
 855static void eth_txq_stop(struct sge_eth_txq *q)
 856{
 857	netif_tx_stop_queue(q->txq);
 858	q->q.stops++;
 859}
 860
 861static inline void txq_advance(struct sge_txq *q, unsigned int n)
 862{
 863	q->in_use += n;
 864	q->pidx += n;
 865	if (q->pidx >= q->size)
 866		q->pidx -= q->size;
 867}
 868
 869/**
 870 *	t4_eth_xmit - add a packet to an Ethernet Tx queue
 871 *	@skb: the packet
 872 *	@dev: the egress net device
 873 *
 874 *	Add a packet to an SGE Ethernet Tx queue.  Runs with softirqs disabled.
 875 */
 876netdev_tx_t t4_eth_xmit(struct sk_buff *skb, struct net_device *dev)
 877{
 878	u32 wr_mid;
 879	u64 cntrl, *end;
 880	int qidx, credits;
 881	unsigned int flits, ndesc;
 882	struct adapter *adap;
 883	struct sge_eth_txq *q;
 884	const struct port_info *pi;
 885	struct fw_eth_tx_pkt_wr *wr;
 886	struct cpl_tx_pkt_core *cpl;
 887	const struct skb_shared_info *ssi;
 888	dma_addr_t addr[MAX_SKB_FRAGS + 1];
 889
 890	/*
 891	 * The chip min packet length is 10 octets but play safe and reject
 892	 * anything shorter than an Ethernet header.
 893	 */
 894	if (unlikely(skb->len < ETH_HLEN)) {
 895out_free:	dev_kfree_skb(skb);
 896		return NETDEV_TX_OK;
 897	}
 898
 899	pi = netdev_priv(dev);
 900	adap = pi->adapter;
 901	qidx = skb_get_queue_mapping(skb);
 902	q = &adap->sge.ethtxq[qidx + pi->first_qset];
 903
 904	reclaim_completed_tx(adap, &q->q, true);
 905
 906	flits = calc_tx_flits(skb);
 907	ndesc = flits_to_desc(flits);
 908	credits = txq_avail(&q->q) - ndesc;
 909
 910	if (unlikely(credits < 0)) {
 911		eth_txq_stop(q);
 912		dev_err(adap->pdev_dev,
 913			"%s: Tx ring %u full while queue awake!\n",
 914			dev->name, qidx);
 915		return NETDEV_TX_BUSY;
 916	}
 917
 918	if (!is_eth_imm(skb) &&
 919	    unlikely(map_skb(adap->pdev_dev, skb, addr) < 0)) {
 920		q->mapping_err++;
 921		goto out_free;
 922	}
 923
 924	wr_mid = FW_WR_LEN16(DIV_ROUND_UP(flits, 2));
 925	if (unlikely(credits < ETHTXQ_STOP_THRES)) {
 926		eth_txq_stop(q);
 927		wr_mid |= FW_WR_EQUEQ | FW_WR_EQUIQ;
 928	}
 929
 930	wr = (void *)&q->q.desc[q->q.pidx];
 931	wr->equiq_to_len16 = htonl(wr_mid);
 932	wr->r3 = cpu_to_be64(0);
 933	end = (u64 *)wr + flits;
 934
 935	ssi = skb_shinfo(skb);
 936	if (ssi->gso_size) {
 937		struct cpl_tx_pkt_lso *lso = (void *)wr;
 938		bool v6 = (ssi->gso_type & SKB_GSO_TCPV6) != 0;
 939		int l3hdr_len = skb_network_header_len(skb);
 940		int eth_xtra_len = skb_network_offset(skb) - ETH_HLEN;
 941
 942		wr->op_immdlen = htonl(FW_WR_OP(FW_ETH_TX_PKT_WR) |
 943				       FW_WR_IMMDLEN(sizeof(*lso)));
 944		lso->c.lso_ctrl = htonl(LSO_OPCODE(CPL_TX_PKT_LSO) |
 945					LSO_FIRST_SLICE | LSO_LAST_SLICE |
 946					LSO_IPV6(v6) |
 947					LSO_ETHHDR_LEN(eth_xtra_len / 4) |
 948					LSO_IPHDR_LEN(l3hdr_len / 4) |
 949					LSO_TCPHDR_LEN(tcp_hdr(skb)->doff));
 950		lso->c.ipid_ofst = htons(0);
 951		lso->c.mss = htons(ssi->gso_size);
 952		lso->c.seqno_offset = htonl(0);
 953		lso->c.len = htonl(skb->len);
 954		cpl = (void *)(lso + 1);
 955		cntrl = TXPKT_CSUM_TYPE(v6 ? TX_CSUM_TCPIP6 : TX_CSUM_TCPIP) |
 956			TXPKT_IPHDR_LEN(l3hdr_len) |
 957			TXPKT_ETHHDR_LEN(eth_xtra_len);
 958		q->tso++;
 959		q->tx_cso += ssi->gso_segs;
 960	} else {
 961		int len;
 962
 963		len = is_eth_imm(skb) ? skb->len + sizeof(*cpl) : sizeof(*cpl);
 964		wr->op_immdlen = htonl(FW_WR_OP(FW_ETH_TX_PKT_WR) |
 965				       FW_WR_IMMDLEN(len));
 966		cpl = (void *)(wr + 1);
 967		if (skb->ip_summed == CHECKSUM_PARTIAL) {
 968			cntrl = hwcsum(skb) | TXPKT_IPCSUM_DIS;
 969			q->tx_cso++;
 970		} else
 971			cntrl = TXPKT_L4CSUM_DIS | TXPKT_IPCSUM_DIS;
 972	}
 973
 974	if (vlan_tx_tag_present(skb)) {
 975		q->vlan_ins++;
 976		cntrl |= TXPKT_VLAN_VLD | TXPKT_VLAN(vlan_tx_tag_get(skb));
 977	}
 978
 979	cpl->ctrl0 = htonl(TXPKT_OPCODE(CPL_TX_PKT_XT) |
 980			   TXPKT_INTF(pi->tx_chan) | TXPKT_PF(adap->fn));
 981	cpl->pack = htons(0);
 982	cpl->len = htons(skb->len);
 983	cpl->ctrl1 = cpu_to_be64(cntrl);
 984
 985	if (is_eth_imm(skb)) {
 986		inline_tx_skb(skb, &q->q, cpl + 1);
 987		dev_kfree_skb(skb);
 988	} else {
 989		int last_desc;
 990
 991		write_sgl(skb, &q->q, (struct ulptx_sgl *)(cpl + 1), end, 0,
 992			  addr);
 993		skb_orphan(skb);
 994
 995		last_desc = q->q.pidx + ndesc - 1;
 996		if (last_desc >= q->q.size)
 997			last_desc -= q->q.size;
 998		q->q.sdesc[last_desc].skb = skb;
 999		q->q.sdesc[last_desc].sgl = (struct ulptx_sgl *)(cpl + 1);
1000	}
1001
1002	txq_advance(&q->q, ndesc);
1003
1004	ring_tx_db(adap, &q->q, ndesc);
1005	return NETDEV_TX_OK;
1006}
1007
1008/**
1009 *	reclaim_completed_tx_imm - reclaim completed control-queue Tx descs
1010 *	@q: the SGE control Tx queue
1011 *
1012 *	This is a variant of reclaim_completed_tx() that is used for Tx queues
1013 *	that send only immediate data (presently just the control queues) and
1014 *	thus do not have any sk_buffs to release.
1015 */
1016static inline void reclaim_completed_tx_imm(struct sge_txq *q)
1017{
1018	int hw_cidx = ntohs(q->stat->cidx);
1019	int reclaim = hw_cidx - q->cidx;
1020
1021	if (reclaim < 0)
1022		reclaim += q->size;
1023
1024	q->in_use -= reclaim;
1025	q->cidx = hw_cidx;
1026}
1027
1028/**
1029 *	is_imm - check whether a packet can be sent as immediate data
1030 *	@skb: the packet
1031 *
1032 *	Returns true if a packet can be sent as a WR with immediate data.
1033 */
1034static inline int is_imm(const struct sk_buff *skb)
1035{
1036	return skb->len <= MAX_CTRL_WR_LEN;
1037}
1038
1039/**
1040 *	ctrlq_check_stop - check if a control queue is full and should stop
1041 *	@q: the queue
1042 *	@wr: most recent WR written to the queue
1043 *
1044 *	Check if a control queue has become full and should be stopped.
1045 *	We clean up control queue descriptors very lazily, only when we are out.
1046 *	If the queue is still full after reclaiming any completed descriptors
1047 *	we suspend it and have the last WR wake it up.
1048 */
1049static void ctrlq_check_stop(struct sge_ctrl_txq *q, struct fw_wr_hdr *wr)
1050{
1051	reclaim_completed_tx_imm(&q->q);
1052	if (unlikely(txq_avail(&q->q) < TXQ_STOP_THRES)) {
1053		wr->lo |= htonl(FW_WR_EQUEQ | FW_WR_EQUIQ);
1054		q->q.stops++;
1055		q->full = 1;
1056	}
1057}
1058
1059/**
1060 *	ctrl_xmit - send a packet through an SGE control Tx queue
1061 *	@q: the control queue
1062 *	@skb: the packet
1063 *
1064 *	Send a packet through an SGE control Tx queue.  Packets sent through
1065 *	a control queue must fit entirely as immediate data.
1066 */
1067static int ctrl_xmit(struct sge_ctrl_txq *q, struct sk_buff *skb)
1068{
1069	unsigned int ndesc;
1070	struct fw_wr_hdr *wr;
1071
1072	if (unlikely(!is_imm(skb))) {
1073		WARN_ON(1);
1074		dev_kfree_skb(skb);
1075		return NET_XMIT_DROP;
1076	}
1077
1078	ndesc = DIV_ROUND_UP(skb->len, sizeof(struct tx_desc));
1079	spin_lock(&q->sendq.lock);
1080
1081	if (unlikely(q->full)) {
1082		skb->priority = ndesc;                  /* save for restart */
1083		__skb_queue_tail(&q->sendq, skb);
1084		spin_unlock(&q->sendq.lock);
1085		return NET_XMIT_CN;
1086	}
1087
1088	wr = (struct fw_wr_hdr *)&q->q.desc[q->q.pidx];
1089	inline_tx_skb(skb, &q->q, wr);
1090
1091	txq_advance(&q->q, ndesc);
1092	if (unlikely(txq_avail(&q->q) < TXQ_STOP_THRES))
1093		ctrlq_check_stop(q, wr);
1094
1095	ring_tx_db(q->adap, &q->q, ndesc);
1096	spin_unlock(&q->sendq.lock);
1097
1098	kfree_skb(skb);
1099	return NET_XMIT_SUCCESS;
1100}
1101
1102/**
1103 *	restart_ctrlq - restart a suspended control queue
1104 *	@data: the control queue to restart
1105 *
1106 *	Resumes transmission on a suspended Tx control queue.
1107 */
1108static void restart_ctrlq(unsigned long data)
1109{
1110	struct sk_buff *skb;
1111	unsigned int written = 0;
1112	struct sge_ctrl_txq *q = (struct sge_ctrl_txq *)data;
1113
1114	spin_lock(&q->sendq.lock);
1115	reclaim_completed_tx_imm(&q->q);
1116	BUG_ON(txq_avail(&q->q) < TXQ_STOP_THRES);  /* q should be empty */
1117
1118	while ((skb = __skb_dequeue(&q->sendq)) != NULL) {
1119		struct fw_wr_hdr *wr;
1120		unsigned int ndesc = skb->priority;     /* previously saved */
1121
1122		/*
1123		 * Write descriptors and free skbs outside the lock to limit
1124		 * wait times.  q->full is still set so new skbs will be queued.
1125		 */
1126		spin_unlock(&q->sendq.lock);
1127
1128		wr = (struct fw_wr_hdr *)&q->q.desc[q->q.pidx];
1129		inline_tx_skb(skb, &q->q, wr);
1130		kfree_skb(skb);
1131
1132		written += ndesc;
1133		txq_advance(&q->q, ndesc);
1134		if (unlikely(txq_avail(&q->q) < TXQ_STOP_THRES)) {
1135			unsigned long old = q->q.stops;
1136
1137			ctrlq_check_stop(q, wr);
1138			if (q->q.stops != old) {          /* suspended anew */
1139				spin_lock(&q->sendq.lock);
1140				goto ringdb;
1141			}
1142		}
1143		if (written > 16) {
1144			ring_tx_db(q->adap, &q->q, written);
1145			written = 0;
1146		}
1147		spin_lock(&q->sendq.lock);
1148	}
1149	q->full = 0;
1150ringdb: if (written)
1151		ring_tx_db(q->adap, &q->q, written);
1152	spin_unlock(&q->sendq.lock);
1153}
1154
1155/**
1156 *	t4_mgmt_tx - send a management message
1157 *	@adap: the adapter
1158 *	@skb: the packet containing the management message
1159 *
1160 *	Send a management message through control queue 0.
1161 */
1162int t4_mgmt_tx(struct adapter *adap, struct sk_buff *skb)
1163{
1164	int ret;
1165
1166	local_bh_disable();
1167	ret = ctrl_xmit(&adap->sge.ctrlq[0], skb);
1168	local_bh_enable();
1169	return ret;
1170}
1171
1172/**
1173 *	is_ofld_imm - check whether a packet can be sent as immediate data
1174 *	@skb: the packet
1175 *
1176 *	Returns true if a packet can be sent as an offload WR with immediate
1177 *	data.  We currently use the same limit as for Ethernet packets.
1178 */
1179static inline int is_ofld_imm(const struct sk_buff *skb)
1180{
1181	return skb->len <= MAX_IMM_TX_PKT_LEN;
1182}
1183
1184/**
1185 *	calc_tx_flits_ofld - calculate # of flits for an offload packet
1186 *	@skb: the packet
1187 *
1188 *	Returns the number of flits needed for the given offload packet.
1189 *	These packets are already fully constructed and no additional headers
1190 *	will be added.
1191 */
1192static inline unsigned int calc_tx_flits_ofld(const struct sk_buff *skb)
1193{
1194	unsigned int flits, cnt;
1195
1196	if (is_ofld_imm(skb))
1197		return DIV_ROUND_UP(skb->len, 8);
1198
1199	flits = skb_transport_offset(skb) / 8U;   /* headers */
1200	cnt = skb_shinfo(skb)->nr_frags;
1201	if (skb->tail != skb->transport_header)
1202		cnt++;
1203	return flits + sgl_len(cnt);
1204}
1205
1206/**
1207 *	txq_stop_maperr - stop a Tx queue due to I/O MMU exhaustion
1208 *	@adap: the adapter
1209 *	@q: the queue to stop
1210 *
1211 *	Mark a Tx queue stopped due to I/O MMU exhaustion and resulting
1212 *	inability to map packets.  A periodic timer attempts to restart
1213 *	queues so marked.
1214 */
1215static void txq_stop_maperr(struct sge_ofld_txq *q)
1216{
1217	q->mapping_err++;
1218	q->q.stops++;
1219	set_bit(q->q.cntxt_id - q->adap->sge.egr_start,
1220		q->adap->sge.txq_maperr);
1221}
1222
1223/**
1224 *	ofldtxq_stop - stop an offload Tx queue that has become full
1225 *	@q: the queue to stop
1226 *	@skb: the packet causing the queue to become full
1227 *
1228 *	Stops an offload Tx queue that has become full and modifies the packet
1229 *	being written to request a wakeup.
1230 */
1231static void ofldtxq_stop(struct sge_ofld_txq *q, struct sk_buff *skb)
1232{
1233	struct fw_wr_hdr *wr = (struct fw_wr_hdr *)skb->data;
1234
1235	wr->lo |= htonl(FW_WR_EQUEQ | FW_WR_EQUIQ);
1236	q->q.stops++;
1237	q->full = 1;
1238}
1239
1240/**
1241 *	service_ofldq - restart a suspended offload queue
1242 *	@q: the offload queue
1243 *
1244 *	Services an offload Tx queue by moving packets from its packet queue
1245 *	to the HW Tx ring.  The function starts and ends with the queue locked.
1246 */
1247static void service_ofldq(struct sge_ofld_txq *q)
1248{
1249	u64 *pos;
1250	int credits;
1251	struct sk_buff *skb;
1252	unsigned int written = 0;
1253	unsigned int flits, ndesc;
1254
1255	while ((skb = skb_peek(&q->sendq)) != NULL && !q->full) {
1256		/*
1257		 * We drop the lock but leave skb on sendq, thus retaining
1258		 * exclusive access to the state of the queue.
1259		 */
1260		spin_unlock(&q->sendq.lock);
1261
1262		reclaim_completed_tx(q->adap, &q->q, false);
1263
1264		flits = skb->priority;                /* previously saved */
1265		ndesc = flits_to_desc(flits);
1266		credits = txq_avail(&q->q) - ndesc;
1267		BUG_ON(credits < 0);
1268		if (unlikely(credits < TXQ_STOP_THRES))
1269			ofldtxq_stop(q, skb);
1270
1271		pos = (u64 *)&q->q.desc[q->q.pidx];
1272		if (is_ofld_imm(skb))
1273			inline_tx_skb(skb, &q->q, pos);
1274		else if (map_skb(q->adap->pdev_dev, skb,
1275				 (dma_addr_t *)skb->head)) {
1276			txq_stop_maperr(q);
1277			spin_lock(&q->sendq.lock);
1278			break;
1279		} else {
1280			int last_desc, hdr_len = skb_transport_offset(skb);
1281
1282			memcpy(pos, skb->data, hdr_len);
1283			write_sgl(skb, &q->q, (void *)pos + hdr_len,
1284				  pos + flits, hdr_len,
1285				  (dma_addr_t *)skb->head);
1286#ifdef CONFIG_NEED_DMA_MAP_STATE
1287			skb->dev = q->adap->port[0];
1288			skb->destructor = deferred_unmap_destructor;
1289#endif
1290			last_desc = q->q.pidx + ndesc - 1;
1291			if (last_desc >= q->q.size)
1292				last_desc -= q->q.size;
1293			q->q.sdesc[last_desc].skb = skb;
1294		}
1295
1296		txq_advance(&q->q, ndesc);
1297		written += ndesc;
1298		if (unlikely(written > 32)) {
1299			ring_tx_db(q->adap, &q->q, written);
1300			written = 0;
1301		}
1302
1303		spin_lock(&q->sendq.lock);
1304		__skb_unlink(skb, &q->sendq);
1305		if (is_ofld_imm(skb))
1306			kfree_skb(skb);
1307	}
1308	if (likely(written))
1309		ring_tx_db(q->adap, &q->q, written);
1310}
1311
1312/**
1313 *	ofld_xmit - send a packet through an offload queue
1314 *	@q: the Tx offload queue
1315 *	@skb: the packet
1316 *
1317 *	Send an offload packet through an SGE offload queue.
1318 */
1319static int ofld_xmit(struct sge_ofld_txq *q, struct sk_buff *skb)
1320{
1321	skb->priority = calc_tx_flits_ofld(skb);       /* save for restart */
1322	spin_lock(&q->sendq.lock);
1323	__skb_queue_tail(&q->sendq, skb);
1324	if (q->sendq.qlen == 1)
1325		service_ofldq(q);
1326	spin_unlock(&q->sendq.lock);
1327	return NET_XMIT_SUCCESS;
1328}
1329
1330/**
1331 *	restart_ofldq - restart a suspended offload queue
1332 *	@data: the offload queue to restart
1333 *
1334 *	Resumes transmission on a suspended Tx offload queue.
1335 */
1336static void restart_ofldq(unsigned long data)
1337{
1338	struct sge_ofld_txq *q = (struct sge_ofld_txq *)data;
1339
1340	spin_lock(&q->sendq.lock);
1341	q->full = 0;            /* the queue actually is completely empty now */
1342	service_ofldq(q);
1343	spin_unlock(&q->sendq.lock);
1344}
1345
1346/**
1347 *	skb_txq - return the Tx queue an offload packet should use
1348 *	@skb: the packet
1349 *
1350 *	Returns the Tx queue an offload packet should use as indicated by bits
1351 *	1-15 in the packet's queue_mapping.
1352 */
1353static inline unsigned int skb_txq(const struct sk_buff *skb)
1354{
1355	return skb->queue_mapping >> 1;
1356}
1357
1358/**
1359 *	is_ctrl_pkt - return whether an offload packet is a control packet
1360 *	@skb: the packet
1361 *
1362 *	Returns whether an offload packet should use an OFLD or a CTRL
1363 *	Tx queue as indicated by bit 0 in the packet's queue_mapping.
1364 */
1365static inline unsigned int is_ctrl_pkt(const struct sk_buff *skb)
1366{
1367	return skb->queue_mapping & 1;
1368}
1369
1370static inline int ofld_send(struct adapter *adap, struct sk_buff *skb)
1371{
1372	unsigned int idx = skb_txq(skb);
1373
1374	if (unlikely(is_ctrl_pkt(skb)))
1375		return ctrl_xmit(&adap->sge.ctrlq[idx], skb);
1376	return ofld_xmit(&adap->sge.ofldtxq[idx], skb);
1377}
1378
1379/**
1380 *	t4_ofld_send - send an offload packet
1381 *	@adap: the adapter
1382 *	@skb: the packet
1383 *
1384 *	Sends an offload packet.  We use the packet queue_mapping to select the
1385 *	appropriate Tx queue as follows: bit 0 indicates whether the packet
1386 *	should be sent as regular or control, bits 1-15 select the queue.
1387 */
1388int t4_ofld_send(struct adapter *adap, struct sk_buff *skb)
1389{
1390	int ret;
1391
1392	local_bh_disable();
1393	ret = ofld_send(adap, skb);
1394	local_bh_enable();
1395	return ret;
1396}
1397
1398/**
1399 *	cxgb4_ofld_send - send an offload packet
1400 *	@dev: the net device
1401 *	@skb: the packet
1402 *
1403 *	Sends an offload packet.  This is an exported version of @t4_ofld_send,
1404 *	intended for ULDs.
1405 */
1406int cxgb4_ofld_send(struct net_device *dev, struct sk_buff *skb)
1407{
1408	return t4_ofld_send(netdev2adap(dev), skb);
1409}
1410EXPORT_SYMBOL(cxgb4_ofld_send);
1411
1412static inline void copy_frags(struct skb_shared_info *ssi,
1413			      const struct pkt_gl *gl, unsigned int offset)
1414{
1415	unsigned int n;
1416
1417	/* usually there's just one frag */
1418	ssi->frags[0].page = gl->frags[0].page;
1419	ssi->frags[0].page_offset = gl->frags[0].page_offset + offset;
1420	ssi->frags[0].size = gl->frags[0].size - offset;
1421	ssi->nr_frags = gl->nfrags;
1422	n = gl->nfrags - 1;
1423	if (n)
1424		memcpy(&ssi->frags[1], &gl->frags[1], n * sizeof(skb_frag_t));
1425
1426	/* get a reference to the last page, we don't own it */
1427	get_page(gl->frags[n].page);
1428}
1429
1430/**
1431 *	cxgb4_pktgl_to_skb - build an sk_buff from a packet gather list
1432 *	@gl: the gather list
1433 *	@skb_len: size of sk_buff main body if it carries fragments
1434 *	@pull_len: amount of data to move to the sk_buff's main body
1435 *
1436 *	Builds an sk_buff from the given packet gather list.  Returns the
1437 *	sk_buff or %NULL if sk_buff allocation failed.
1438 */
1439struct sk_buff *cxgb4_pktgl_to_skb(const struct pkt_gl *gl,
1440				   unsigned int skb_len, unsigned int pull_len)
1441{
1442	struct sk_buff *skb;
1443
1444	/*
1445	 * Below we rely on RX_COPY_THRES being less than the smallest Rx buffer
1446	 * size, which is expected since buffers are at least PAGE_SIZEd.
1447	 * In this case packets up to RX_COPY_THRES have only one fragment.
1448	 */
1449	if (gl->tot_len <= RX_COPY_THRES) {
1450		skb = dev_alloc_skb(gl->tot_len);
1451		if (unlikely(!skb))
1452			goto out;
1453		__skb_put(skb, gl->tot_len);
1454		skb_copy_to_linear_data(skb, gl->va, gl->tot_len);
1455	} else {
1456		skb = dev_alloc_skb(skb_len);
1457		if (unlikely(!skb))
1458			goto out;
1459		__skb_put(skb, pull_len);
1460		skb_copy_to_linear_data(skb, gl->va, pull_len);
1461
1462		copy_frags(skb_shinfo(skb), gl, pull_len);
1463		skb->len = gl->tot_len;
1464		skb->data_len = skb->len - pull_len;
1465		skb->truesize += skb->data_len;
1466	}
1467out:	return skb;
1468}
1469EXPORT_SYMBOL(cxgb4_pktgl_to_skb);
1470
1471/**
1472 *	t4_pktgl_free - free a packet gather list
1473 *	@gl: the gather list
1474 *
1475 *	Releases the pages of a packet gather list.  We do not own the last
1476 *	page on the list and do not free it.
1477 */
1478static void t4_pktgl_free(const struct pkt_gl *gl)
1479{
1480	int n;
1481	const skb_frag_t *p;
1482
1483	for (p = gl->frags, n = gl->nfrags - 1; n--; p++)
1484		put_page(p->page);
1485}
1486
1487/*
1488 * Process an MPS trace packet.  Give it an unused protocol number so it won't
1489 * be delivered to anyone and send it to the stack for capture.
1490 */
1491static noinline int handle_trace_pkt(struct adapter *adap,
1492				     const struct pkt_gl *gl)
1493{
1494	struct sk_buff *skb;
1495	struct cpl_trace_pkt *p;
1496
1497	skb = cxgb4_pktgl_to_skb(gl, RX_PULL_LEN, RX_PULL_LEN);
1498	if (unlikely(!skb)) {
1499		t4_pktgl_free(gl);
1500		return 0;
1501	}
1502
1503	p = (struct cpl_trace_pkt *)skb->data;
1504	__skb_pull(skb, sizeof(*p));
1505	skb_reset_mac_header(skb);
1506	skb->protocol = htons(0xffff);
1507	skb->dev = adap->port[0];
1508	netif_receive_skb(skb);
1509	return 0;
1510}
1511
1512static void do_gro(struct sge_eth_rxq *rxq, const struct pkt_gl *gl,
1513		   const struct cpl_rx_pkt *pkt)
1514{
1515	int ret;
1516	struct sk_buff *skb;
1517
1518	skb = napi_get_frags(&rxq->rspq.napi);
1519	if (unlikely(!skb)) {
1520		t4_pktgl_free(gl);
1521		rxq->stats.rx_drops++;
1522		return;
1523	}
1524
1525	copy_frags(skb_shinfo(skb), gl, RX_PKT_PAD);
1526	skb->len = gl->tot_len - RX_PKT_PAD;
1527	skb->data_len = skb->len;
1528	skb->truesize += skb->data_len;
1529	skb->ip_summed = CHECKSUM_UNNECESSARY;
1530	skb_record_rx_queue(skb, rxq->rspq.idx);
1531	if (rxq->rspq.netdev->features & NETIF_F_RXHASH)
1532		skb->rxhash = (__force u32)pkt->rsshdr.hash_val;
1533
1534	if (unlikely(pkt->vlan_ex)) {
1535		__vlan_hwaccel_put_tag(skb, ntohs(pkt->vlan));
1536		rxq->stats.vlan_ex++;
1537	}
1538	ret = napi_gro_frags(&rxq->rspq.napi);
1539	if (ret == GRO_HELD)
1540		rxq->stats.lro_pkts++;
1541	else if (ret == GRO_MERGED || ret == GRO_MERGED_FREE)
1542		rxq->stats.lro_merged++;
1543	rxq->stats.pkts++;
1544	rxq->stats.rx_cso++;
1545}
1546
1547/**
1548 *	t4_ethrx_handler - process an ingress ethernet packet
1549 *	@q: the response queue that received the packet
1550 *	@rsp: the response queue descriptor holding the RX_PKT message
1551 *	@si: the gather list of packet fragments
1552 *
1553 *	Process an ingress ethernet packet and deliver it to the stack.
1554 */
1555int t4_ethrx_handler(struct sge_rspq *q, const __be64 *rsp,
1556		     const struct pkt_gl *si)
1557{
1558	bool csum_ok;
1559	struct sk_buff *skb;
1560	const struct cpl_rx_pkt *pkt;
1561	struct sge_eth_rxq *rxq = container_of(q, struct sge_eth_rxq, rspq);
1562
1563	if (unlikely(*(u8 *)rsp == CPL_TRACE_PKT))
1564		return handle_trace_pkt(q->adap, si);
1565
1566	pkt = (const struct cpl_rx_pkt *)rsp;
1567	csum_ok = pkt->csum_calc && !pkt->err_vec;
1568	if ((pkt->l2info & htonl(RXF_TCP)) &&
1569	    (q->netdev->features & NETIF_F_GRO) && csum_ok && !pkt->ip_frag) {
1570		do_gro(rxq, si, pkt);
1571		return 0;
1572	}
1573
1574	skb = cxgb4_pktgl_to_skb(si, RX_PKT_SKB_LEN, RX_PULL_LEN);
1575	if (unlikely(!skb)) {
1576		t4_pktgl_free(si);
1577		rxq->stats.rx_drops++;
1578		return 0;
1579	}
1580
1581	__skb_pull(skb, RX_PKT_PAD);      /* remove ethernet header padding */
1582	skb->protocol = eth_type_trans(skb, q->netdev);
1583	skb_record_rx_queue(skb, q->idx);
1584	if (skb->dev->features & NETIF_F_RXHASH)
1585		skb->rxhash = (__force u32)pkt->rsshdr.hash_val;
1586
1587	rxq->stats.pkts++;
1588
1589	if (csum_ok && (q->netdev->features & NETIF_F_RXCSUM) &&
1590	    (pkt->l2info & htonl(RXF_UDP | RXF_TCP))) {
1591		if (!pkt->ip_frag) {
1592			skb->ip_summed = CHECKSUM_UNNECESSARY;
1593			rxq->stats.rx_cso++;
1594		} else if (pkt->l2info & htonl(RXF_IP)) {
1595			__sum16 c = (__force __sum16)pkt->csum;
1596			skb->csum = csum_unfold(c);
1597			skb->ip_summed = CHECKSUM_COMPLETE;
1598			rxq->stats.rx_cso++;
1599		}
1600	} else
1601		skb_checksum_none_assert(skb);
1602
1603	if (unlikely(pkt->vlan_ex)) {
1604		__vlan_hwaccel_put_tag(skb, ntohs(pkt->vlan));
1605		rxq->stats.vlan_ex++;
1606	}
1607	netif_receive_skb(skb);
1608	return 0;
1609}
1610
1611/**
1612 *	restore_rx_bufs - put back a packet's Rx buffers
1613 *	@si: the packet gather list
1614 *	@q: the SGE free list
1615 *	@frags: number of FL buffers to restore
1616 *
1617 *	Puts back on an FL the Rx buffers associated with @si.  The buffers
1618 *	have already been unmapped and are left unmapped, we mark them so to
1619 *	prevent further unmapping attempts.
1620 *
1621 *	This function undoes a series of @unmap_rx_buf calls when we find out
1622 *	that the current packet can't be processed right away afterall and we
1623 *	need to come back to it later.  This is a very rare event and there's
1624 *	no effort to make this particularly efficient.
1625 */
1626static void restore_rx_bufs(const struct pkt_gl *si, struct sge_fl *q,
1627			    int frags)
1628{
1629	struct rx_sw_desc *d;
1630
1631	while (frags--) {
1632		if (q->cidx == 0)
1633			q->cidx = q->size - 1;
1634		else
1635			q->cidx--;
1636		d = &q->sdesc[q->cidx];
1637		d->page = si->frags[frags].page;
1638		d->dma_addr |= RX_UNMAPPED_BUF;
1639		q->avail++;
1640	}
1641}
1642
1643/**
1644 *	is_new_response - check if a response is newly written
1645 *	@r: the response descriptor
1646 *	@q: the response queue
1647 *
1648 *	Returns true if a response descriptor contains a yet unprocessed
1649 *	response.
1650 */
1651static inline bool is_new_response(const struct rsp_ctrl *r,
1652				   const struct sge_rspq *q)
1653{
1654	return RSPD_GEN(r->type_gen) == q->gen;
1655}
1656
1657/**
1658 *	rspq_next - advance to the next entry in a response queue
1659 *	@q: the queue
1660 *
1661 *	Updates the state of a response queue to advance it to the next entry.
1662 */
1663static inline void rspq_next(struct sge_rspq *q)
1664{
1665	q->cur_desc = (void *)q->cur_desc + q->iqe_len;
1666	if (unlikely(++q->cidx == q->size)) {
1667		q->cidx = 0;
1668		q->gen ^= 1;
1669		q->cur_desc = q->desc;
1670	}
1671}
1672
1673/**
1674 *	process_responses - process responses from an SGE response queue
1675 *	@q: the ingress queue to process
1676 *	@budget: how many responses can be processed in this round
1677 *
1678 *	Process responses from an SGE response queue up to the supplied budget.
1679 *	Responses include received packets as well as control messages from FW
1680 *	or HW.
1681 *
1682 *	Additionally choose the interrupt holdoff time for the next interrupt
1683 *	on this queue.  If the system is under memory shortage use a fairly
1684 *	long delay to help recovery.
1685 */
1686static int process_responses(struct sge_rspq *q, int budget)
1687{
1688	int ret, rsp_type;
1689	int budget_left = budget;
1690	const struct rsp_ctrl *rc;
1691	struct sge_eth_rxq *rxq = container_of(q, struct sge_eth_rxq, rspq);
1692
1693	while (likely(budget_left)) {
1694		rc = (void *)q->cur_desc + (q->iqe_len - sizeof(*rc));
1695		if (!is_new_response(rc, q))
1696			break;
1697
1698		rmb();
1699		rsp_type = RSPD_TYPE(rc->type_gen);
1700		if (likely(rsp_type == RSP_TYPE_FLBUF)) {
1701			skb_frag_t *fp;
1702			struct pkt_gl si;
1703			const struct rx_sw_desc *rsd;
1704			u32 len = ntohl(rc->pldbuflen_qid), bufsz, frags;
1705
1706			if (len & RSPD_NEWBUF) {
1707				if (likely(q->offset > 0)) {
1708					free_rx_bufs(q->adap, &rxq->fl, 1);
1709					q->offset = 0;
1710				}
1711				len = RSPD_LEN(len);
1712			}
1713			si.tot_len = len;
1714
1715			/* gather packet fragments */
1716			for (frags = 0, fp = si.frags; ; frags++, fp++) {
1717				rsd = &rxq->fl.sdesc[rxq->fl.cidx];
1718				bufsz = get_buf_size(rsd);
1719				fp->page = rsd->page;
1720				fp->page_offset = q->offset;
1721				fp->size = min(bufsz, len);
1722				len -= fp->size;
1723				if (!len)
1724					break;
1725				unmap_rx_buf(q->adap, &rxq->fl);
1726			}
1727
1728			/*
1729			 * Last buffer remains mapped so explicitly make it
1730			 * coherent for CPU access.
1731			 */
1732			dma_sync_single_for_cpu(q->adap->pdev_dev,
1733						get_buf_addr(rsd),
1734						fp->size, DMA_FROM_DEVICE);
1735
1736			si.va = page_address(si.frags[0].page) +
1737				si.frags[0].page_offset;
1738			prefetch(si.va);
1739
1740			si.nfrags = frags + 1;
1741			ret = q->handler(q, q->cur_desc, &si);
1742			if (likely(ret == 0))
1743				q->offset += ALIGN(fp->size, FL_ALIGN);
1744			else
1745				restore_rx_bufs(&si, &rxq->fl, frags);
1746		} else if (likely(rsp_type == RSP_TYPE_CPL)) {
1747			ret = q->handler(q, q->cur_desc, NULL);
1748		} else {
1749			ret = q->handler(q, (const __be64 *)rc, CXGB4_MSG_AN);
1750		}
1751
1752		if (unlikely(ret)) {
1753			/* couldn't process descriptor, back off for recovery */
1754			q->next_intr_params = QINTR_TIMER_IDX(NOMEM_TMR_IDX);
1755			break;
1756		}
1757
1758		rspq_next(q);
1759		budget_left--;
1760	}
1761
1762	if (q->offset >= 0 && rxq->fl.size - rxq->fl.avail >= 16)
1763		__refill_fl(q->adap, &rxq->fl);
1764	return budget - budget_left;
1765}
1766
1767/**
1768 *	napi_rx_handler - the NAPI handler for Rx processing
1769 *	@napi: the napi instance
1770 *	@budget: how many packets we can process in this round
1771 *
1772 *	Handler for new data events when using NAPI.  This does not need any
1773 *	locking or protection from interrupts as data interrupts are off at
1774 *	this point and other adapter interrupts do not interfere (the latter
1775 *	in not a concern at all with MSI-X as non-data interrupts then have
1776 *	a separate handler).
1777 */
1778static int napi_rx_handler(struct napi_struct *napi, int budget)
1779{
1780	unsigned int params;
1781	struct sge_rspq *q = container_of(napi, struct sge_rspq, napi);
1782	int work_done = process_responses(q, budget);
1783
1784	if (likely(work_done < budget)) {
1785		napi_complete(napi);
1786		params = q->next_intr_params;
1787		q->next_intr_params = q->intr_params;
1788	} else
1789		params = QINTR_TIMER_IDX(7);
1790
1791	t4_write_reg(q->adap, MYPF_REG(SGE_PF_GTS), CIDXINC(work_done) |
1792		     INGRESSQID((u32)q->cntxt_id) | SEINTARM(params));
1793	return work_done;
1794}
1795
1796/*
1797 * The MSI-X interrupt handler for an SGE response queue.
1798 */
1799irqreturn_t t4_sge_intr_msix(int irq, void *cookie)
1800{
1801	struct sge_rspq *q = cookie;
1802
1803	napi_schedule(&q->napi);
1804	return IRQ_HANDLED;
1805}
1806
1807/*
1808 * Process the indirect interrupt entries in the interrupt queue and kick off
1809 * NAPI for each queue that has generated an entry.
1810 */
1811static unsigned int process_intrq(struct adapter *adap)
1812{
1813	unsigned int credits;
1814	const struct rsp_ctrl *rc;
1815	struct sge_rspq *q = &adap->sge.intrq;
1816
1817	spin_lock(&adap->sge.intrq_lock);
1818	for (credits = 0; ; credits++) {
1819		rc = (void *)q->cur_desc + (q->iqe_len - sizeof(*rc));
1820		if (!is_new_response(rc, q))
1821			break;
1822
1823		rmb();
1824		if (RSPD_TYPE(rc->type_gen) == RSP_TYPE_INTR) {
1825			unsigned int qid = ntohl(rc->pldbuflen_qid);
1826
1827			qid -= adap->sge.ingr_start;
1828			napi_schedule(&adap->sge.ingr_map[qid]->napi);
1829		}
1830
1831		rspq_next(q);
1832	}
1833
1834	t4_write_reg(adap, MYPF_REG(SGE_PF_GTS), CIDXINC(credits) |
1835		     INGRESSQID(q->cntxt_id) | SEINTARM(q->intr_params));
1836	spin_unlock(&adap->sge.intrq_lock);
1837	return credits;
1838}
1839
1840/*
1841 * The MSI interrupt handler, which handles data events from SGE response queues
1842 * as well as error and other async events as they all use the same MSI vector.
1843 */
1844static irqreturn_t t4_intr_msi(int irq, void *cookie)
1845{
1846	struct adapter *adap = cookie;
1847
1848	t4_slow_intr_handler(adap);
1849	process_intrq(adap);
1850	return IRQ_HANDLED;
1851}
1852
1853/*
1854 * Interrupt handler for legacy INTx interrupts.
1855 * Handles data events from SGE response queues as well as error and other
1856 * async events as they all use the same interrupt line.
1857 */
1858static irqreturn_t t4_intr_intx(int irq, void *cookie)
1859{
1860	struct adapter *adap = cookie;
1861
1862	t4_write_reg(adap, MYPF_REG(PCIE_PF_CLI), 0);
1863	if (t4_slow_intr_handler(adap) | process_intrq(adap))
1864		return IRQ_HANDLED;
1865	return IRQ_NONE;             /* probably shared interrupt */
1866}
1867
1868/**
1869 *	t4_intr_handler - select the top-level interrupt handler
1870 *	@adap: the adapter
1871 *
1872 *	Selects the top-level interrupt handler based on the type of interrupts
1873 *	(MSI-X, MSI, or INTx).
1874 */
1875irq_handler_t t4_intr_handler(struct adapter *adap)
1876{
1877	if (adap->flags & USING_MSIX)
1878		return t4_sge_intr_msix;
1879	if (adap->flags & USING_MSI)
1880		return t4_intr_msi;
1881	return t4_intr_intx;
1882}
1883
1884static void sge_rx_timer_cb(unsigned long data)
1885{
1886	unsigned long m;
1887	unsigned int i, cnt[2];
1888	struct adapter *adap = (struct adapter *)data;
1889	struct sge *s = &adap->sge;
1890
1891	for (i = 0; i < ARRAY_SIZE(s->starving_fl); i++)
1892		for (m = s->starving_fl[i]; m; m &= m - 1) {
1893			struct sge_eth_rxq *rxq;
1894			unsigned int id = __ffs(m) + i * BITS_PER_LONG;
1895			struct sge_fl *fl = s->egr_map[id];
1896
1897			clear_bit(id, s->starving_fl);
1898			smp_mb__after_clear_bit();
1899
1900			if (fl_starving(fl)) {
1901				rxq = container_of(fl, struct sge_eth_rxq, fl);
1902				if (napi_reschedule(&rxq->rspq.napi))
1903					fl->starving++;
1904				else
1905					set_bit(id, s->starving_fl);
1906			}
1907		}
1908
1909	t4_write_reg(adap, SGE_DEBUG_INDEX, 13);
1910	cnt[0] = t4_read_reg(adap, SGE_DEBUG_DATA_HIGH);
1911	cnt[1] = t4_read_reg(adap, SGE_DEBUG_DATA_LOW);
1912
1913	for (i = 0; i < 2; i++)
1914		if (cnt[i] >= s->starve_thres) {
1915			if (s->idma_state[i] || cnt[i] == 0xffffffff)
1916				continue;
1917			s->idma_state[i] = 1;
1918			t4_write_reg(adap, SGE_DEBUG_INDEX, 11);
1919			m = t4_read_reg(adap, SGE_DEBUG_DATA_LOW) >> (i * 16);
1920			dev_warn(adap->pdev_dev,
1921				 "SGE idma%u starvation detected for "
1922				 "queue %lu\n", i, m & 0xffff);
1923		} else if (s->idma_state[i])
1924			s->idma_state[i] = 0;
1925
1926	mod_timer(&s->rx_timer, jiffies + RX_QCHECK_PERIOD);
1927}
1928
1929static void sge_tx_timer_cb(unsigned long data)
1930{
1931	unsigned long m;
1932	unsigned int i, budget;
1933	struct adapter *adap = (struct adapter *)data;
1934	struct sge *s = &adap->sge;
1935
1936	for (i = 0; i < ARRAY_SIZE(s->txq_maperr); i++)
1937		for (m = s->txq_maperr[i]; m; m &= m - 1) {
1938			unsigned long id = __ffs(m) + i * BITS_PER_LONG;
1939			struct sge_ofld_txq *txq = s->egr_map[id];
1940
1941			clear_bit(id, s->txq_maperr);
1942			tasklet_schedule(&txq->qresume_tsk);
1943		}
1944
1945	budget = MAX_TIMER_TX_RECLAIM;
1946	i = s->ethtxq_rover;
1947	do {
1948		struct sge_eth_txq *q = &s->ethtxq[i];
1949
1950		if (q->q.in_use &&
1951		    time_after_eq(jiffies, q->txq->trans_start + HZ / 100) &&
1952		    __netif_tx_trylock(q->txq)) {
1953			int avail = reclaimable(&q->q);
1954
1955			if (avail) {
1956				if (avail > budget)
1957					avail = budget;
1958
1959				free_tx_desc(adap, &q->q, avail, true);
1960				q->q.in_use -= avail;
1961				budget -= avail;
1962			}
1963			__netif_tx_unlock(q->txq);
1964		}
1965
1966		if (++i >= s->ethqsets)
1967			i = 0;
1968	} while (budget && i != s->ethtxq_rover);
1969	s->ethtxq_rover = i;
1970	mod_timer(&s->tx_timer, jiffies + (budget ? TX_QCHECK_PERIOD : 2));
1971}
1972
1973int t4_sge_alloc_rxq(struct adapter *adap, struct sge_rspq *iq, bool fwevtq,
1974		     struct net_device *dev, int intr_idx,
1975		     struct sge_fl *fl, rspq_handler_t hnd)
1976{
1977	int ret, flsz = 0;
1978	struct fw_iq_cmd c;
1979	struct port_info *pi = netdev_priv(dev);
1980
1981	/* Size needs to be multiple of 16, including status entry. */
1982	iq->size = roundup(iq->size, 16);
1983
1984	iq->desc = alloc_ring(adap->pdev_dev, iq->size, iq->iqe_len, 0,
1985			      &iq->phys_addr, NULL, 0, NUMA_NO_NODE);
1986	if (!iq->desc)
1987		return -ENOMEM;
1988
1989	memset(&c, 0, sizeof(c));
1990	c.op_to_vfn = htonl(FW_CMD_OP(FW_IQ_CMD) | FW_CMD_REQUEST |
1991			    FW_CMD_WRITE | FW_CMD_EXEC |
1992			    FW_IQ_CMD_PFN(adap->fn) | FW_IQ_CMD_VFN(0));
1993	c.alloc_to_len16 = htonl(FW_IQ_CMD_ALLOC | FW_IQ_CMD_IQSTART(1) |
1994				 FW_LEN16(c));
1995	c.type_to_iqandstindex = htonl(FW_IQ_CMD_TYPE(FW_IQ_TYPE_FL_INT_CAP) |
1996		FW_IQ_CMD_IQASYNCH(fwevtq) | FW_IQ_CMD_VIID(pi->viid) |
1997		FW_IQ_CMD_IQANDST(intr_idx < 0) | FW_IQ_CMD_IQANUD(1) |
1998		FW_IQ_CMD_IQANDSTINDEX(intr_idx >= 0 ? intr_idx :
1999							-intr_idx - 1));
2000	c.iqdroprss_to_iqesize = htons(FW_IQ_CMD_IQPCIECH(pi->tx_chan) |
2001		FW_IQ_CMD_IQGTSMODE |
2002		FW_IQ_CMD_IQINTCNTTHRESH(iq->pktcnt_idx) |
2003		FW_IQ_CMD_IQESIZE(ilog2(iq->iqe_len) - 4));
2004	c.iqsize = htons(iq->size);
2005	c.iqaddr = cpu_to_be64(iq->phys_addr);
2006
2007	if (fl) {
2008		fl->size = roundup(fl->size, 8);
2009		fl->desc = alloc_ring(adap->pdev_dev, fl->size, sizeof(__be64),
2010				      sizeof(struct rx_sw_desc), &fl->addr,
2011				      &fl->sdesc, STAT_LEN, NUMA_NO_NODE);
2012		if (!fl->desc)
2013			goto fl_nomem;
2014
2015		flsz = fl->size / 8 + STAT_LEN / sizeof(struct tx_desc);
2016		c.iqns_to_fl0congen = htonl(FW_IQ_CMD_FL0PACKEN |
2017					    FW_IQ_CMD_FL0FETCHRO(1) |
2018					    FW_IQ_CMD_FL0DATARO(1) |
2019					    FW_IQ_CMD_FL0PADEN);
2020		c.fl0dcaen_to_fl0cidxfthresh = htons(FW_IQ_CMD_FL0FBMIN(2) |
2021				FW_IQ_CMD_FL0FBMAX(3));
2022		c.fl0size = htons(flsz);
2023		c.fl0addr = cpu_to_be64(fl->addr);
2024	}
2025
2026	ret = t4_wr_mbox(adap, adap->fn, &c, sizeof(c), &c);
2027	if (ret)
2028		goto err;
2029
2030	netif_napi_add(dev, &iq->napi, napi_rx_handler, 64);
2031	iq->cur_desc = iq->desc;
2032	iq->cidx = 0;
2033	iq->gen = 1;
2034	iq->next_intr_params = iq->intr_params;
2035	iq->cntxt_id = ntohs(c.iqid);
2036	iq->abs_id = ntohs(c.physiqid);
2037	iq->size--;                           /* subtract status entry */
2038	iq->adap = adap;
2039	iq->netdev = dev;
2040	iq->handler = hnd;
2041
2042	/* set offset to -1 to distinguish ingress queues without FL */
2043	iq->offset = fl ? 0 : -1;
2044
2045	adap->sge.ingr_map[iq->cntxt_id - adap->sge.ingr_start] = iq;
2046
2047	if (fl) {
2048		fl->cntxt_id = ntohs(c.fl0id);
2049		fl->avail = fl->pend_cred = 0;
2050		fl->pidx = fl->cidx = 0;
2051		fl->alloc_failed = fl->large_alloc_failed = fl->starving = 0;
2052		adap->sge.egr_map[fl->cntxt_id - adap->sge.egr_start] = fl;
2053		refill_fl(adap, fl, fl_cap(fl), GFP_KERNEL);
2054	}
2055	return 0;
2056
2057fl_nomem:
2058	ret = -ENOMEM;
2059err:
2060	if (iq->desc) {
2061		dma_free_coherent(adap->pdev_dev, iq->size * iq->iqe_len,
2062				  iq->desc, iq->phys_addr);
2063		iq->desc = NULL;
2064	}
2065	if (fl && fl->desc) {
2066		kfree(fl->sdesc);
2067		fl->sdesc = NULL;
2068		dma_free_coherent(adap->pdev_dev, flsz * sizeof(struct tx_desc),
2069				  fl->desc, fl->addr);
2070		fl->desc = NULL;
2071	}
2072	return ret;
2073}
2074
2075static void init_txq(struct adapter *adap, struct sge_txq *q, unsigned int id)
2076{
2077	q->in_use = 0;
2078	q->cidx = q->pidx = 0;
2079	q->stops = q->restarts = 0;
2080	q->stat = (void *)&q->desc[q->size];
2081	q->cntxt_id = id;
2082	adap->sge.egr_map[id - adap->sge.egr_start] = q;
2083}
2084
2085int t4_sge_alloc_eth_txq(struct adapter *adap, struct sge_eth_txq *txq,
2086			 struct net_device *dev, struct netdev_queue *netdevq,
2087			 unsigned int iqid)
2088{
2089	int ret, nentries;
2090	struct fw_eq_eth_cmd c;
2091	struct port_info *pi = netdev_priv(dev);
2092
2093	/* Add status entries */
2094	nentries = txq->q.size + STAT_LEN / sizeof(struct tx_desc);
2095
2096	txq->q.desc = alloc_ring(adap->pdev_dev, txq->q.size,
2097			sizeof(struct tx_desc), sizeof(struct tx_sw_desc),
2098			&txq->q.phys_addr, &txq->q.sdesc, STAT_LEN,
2099			netdev_queue_numa_node_read(netdevq));
2100	if (!txq->q.desc)
2101		return -ENOMEM;
2102
2103	memset(&c, 0, sizeof(c));
2104	c.op_to_vfn = htonl(FW_CMD_OP(FW_EQ_ETH_CMD) | FW_CMD_REQUEST |
2105			    FW_CMD_WRITE | FW_CMD_EXEC |
2106			    FW_EQ_ETH_CMD_PFN(adap->fn) | FW_EQ_ETH_CMD_VFN(0));
2107	c.alloc_to_len16 = htonl(FW_EQ_ETH_CMD_ALLOC |
2108				 FW_EQ_ETH_CMD_EQSTART | FW_LEN16(c));
2109	c.viid_pkd = htonl(FW_EQ_ETH_CMD_VIID(pi->viid));
2110	c.fetchszm_to_iqid = htonl(FW_EQ_ETH_CMD_HOSTFCMODE(2) |
2111				   FW_EQ_ETH_CMD_PCIECHN(pi->tx_chan) |
2112				   FW_EQ_ETH_CMD_FETCHRO(1) |
2113				   FW_EQ_ETH_CMD_IQID(iqid));
2114	c.dcaen_to_eqsize = htonl(FW_EQ_ETH_CMD_FBMIN(2) |
2115				  FW_EQ_ETH_CMD_FBMAX(3) |
2116				  FW_EQ_ETH_CMD_CIDXFTHRESH(5) |
2117				  FW_EQ_ETH_CMD_EQSIZE(nentries));
2118	c.eqaddr = cpu_to_be64(txq->q.phys_addr);
2119
2120	ret = t4_wr_mbox(adap, adap->fn, &c, sizeof(c), &c);
2121	if (ret) {
2122		kfree(txq->q.sdesc);
2123		txq->q.sdesc = NULL;
2124		dma_free_coherent(adap->pdev_dev,
2125				  nentries * sizeof(struct tx_desc),
2126				  txq->q.desc, txq->q.phys_addr);
2127		txq->q.desc = NULL;
2128		return ret;
2129	}
2130
2131	init_txq(adap, &txq->q, FW_EQ_ETH_CMD_EQID_GET(ntohl(c.eqid_pkd)));
2132	txq->txq = netdevq;
2133	txq->tso = txq->tx_cso = txq->vlan_ins = 0;
2134	txq->mapping_err = 0;
2135	return 0;
2136}
2137
2138int t4_sge_alloc_ctrl_txq(struct adapter *adap, struct sge_ctrl_txq *txq,
2139			  struct net_device *dev, unsigned int iqid,
2140			  unsigned int cmplqid)
2141{
2142	int ret, nentries;
2143	struct fw_eq_ctrl_cmd c;
2144	struct port_info *pi = netdev_priv(dev);
2145
2146	/* Add status entries */
2147	nentries = txq->q.size + STAT_LEN / sizeof(struct tx_desc);
2148
2149	txq->q.desc = alloc_ring(adap->pdev_dev, nentries,
2150				 sizeof(struct tx_desc), 0, &txq->q.phys_addr,
2151				 NULL, 0, NUMA_NO_NODE);
2152	if (!txq->q.desc)
2153		return -ENOMEM;
2154
2155	c.op_to_vfn = htonl(FW_CMD_OP(FW_EQ_CTRL_CMD) | FW_CMD_REQUEST |
2156			    FW_CMD_WRITE | FW_CMD_EXEC |
2157			    FW_EQ_CTRL_CMD_PFN(adap->fn) |
2158			    FW_EQ_CTRL_CMD_VFN(0));
2159	c.alloc_to_len16 = htonl(FW_EQ_CTRL_CMD_ALLOC |
2160				 FW_EQ_CTRL_CMD_EQSTART | FW_LEN16(c));
2161	c.cmpliqid_eqid = htonl(FW_EQ_CTRL_CMD_CMPLIQID(cmplqid));
2162	c.physeqid_pkd = htonl(0);
2163	c.fetchszm_to_iqid = htonl(FW_EQ_CTRL_CMD_HOSTFCMODE(2) |
2164				   FW_EQ_CTRL_CMD_PCIECHN(pi->tx_chan) |
2165				   FW_EQ_CTRL_CMD_FETCHRO |
2166				   FW_EQ_CTRL_CMD_IQID(iqid));
2167	c.dcaen_to_eqsize = htonl(FW_EQ_CTRL_CMD_FBMIN(2) |
2168				  FW_EQ_CTRL_CMD_FBMAX(3) |
2169				  FW_EQ_CTRL_CMD_CIDXFTHRESH(5) |
2170				  FW_EQ_CTRL_CMD_EQSIZE(nentries));
2171	c.eqaddr = cpu_to_be64(txq->q.phys_addr);
2172
2173	ret = t4_wr_mbox(adap, adap->fn, &c, sizeof(c), &c);
2174	if (ret) {
2175		dma_free_coherent(adap->pdev_dev,
2176				  nentries * sizeof(struct tx_desc),
2177				  txq->q.desc, txq->q.phys_addr);
2178		txq->q.desc = NULL;
2179		return ret;
2180	}
2181
2182	init_txq(adap, &txq->q, FW_EQ_CTRL_CMD_EQID_GET(ntohl(c.cmpliqid_eqid)));
2183	txq->adap = adap;
2184	skb_queue_head_init(&txq->sendq);
2185	tasklet_init(&txq->qresume_tsk, restart_ctrlq, (unsigned long)txq);
2186	txq->full = 0;
2187	return 0;
2188}
2189
2190int t4_sge_alloc_ofld_txq(struct adapter *adap, struct sge_ofld_txq *txq,
2191			  struct net_device *dev, unsigned int iqid)
2192{
2193	int ret, nentries;
2194	struct fw_eq_ofld_cmd c;
2195	struct port_info *pi = netdev_priv(dev);
2196
2197	/* Add status entries */
2198	nentries = txq->q.size + STAT_LEN / sizeof(struct tx_desc);
2199
2200	txq->q.desc = alloc_ring(adap->pdev_dev, txq->q.size,
2201			sizeof(struct tx_desc), sizeof(struct tx_sw_desc),
2202			&txq->q.phys_addr, &txq->q.sdesc, STAT_LEN,
2203			NUMA_NO_NODE);
2204	if (!txq->q.desc)
2205		return -ENOMEM;
2206
2207	memset(&c, 0, sizeof(c));
2208	c.op_to_vfn = htonl(FW_CMD_OP(FW_EQ_OFLD_CMD) | FW_CMD_REQUEST |
2209			    FW_CMD_WRITE | FW_CMD_EXEC |
2210			    FW_EQ_OFLD_CMD_PFN(adap->fn) |
2211			    FW_EQ_OFLD_CMD_VFN(0));
2212	c.alloc_to_len16 = htonl(FW_EQ_OFLD_CMD_ALLOC |
2213				 FW_EQ_OFLD_CMD_EQSTART | FW_LEN16(c));
2214	c.fetchszm_to_iqid = htonl(FW_EQ_OFLD_CMD_HOSTFCMODE(2) |
2215				   FW_EQ_OFLD_CMD_PCIECHN(pi->tx_chan) |
2216				   FW_EQ_OFLD_CMD_FETCHRO(1) |
2217				   FW_EQ_OFLD_CMD_IQID(iqid));
2218	c.dcaen_to_eqsize = htonl(FW_EQ_OFLD_CMD_FBMIN(2) |
2219				  FW_EQ_OFLD_CMD_FBMAX(3) |
2220				  FW_EQ_OFLD_CMD_CIDXFTHRESH(5) |
2221				  FW_EQ_OFLD_CMD_EQSIZE(nentries));
2222	c.eqaddr = cpu_to_be64(txq->q.phys_addr);
2223
2224	ret = t4_wr_mbox(adap, adap->fn, &c, sizeof(c), &c);
2225	if (ret) {
2226		kfree(txq->q.sdesc);
2227		txq->q.sdesc = NULL;
2228		dma_free_coherent(adap->pdev_dev,
2229				  nentries * sizeof(struct tx_desc),
2230				  txq->q.desc, txq->q.phys_addr);
2231		txq->q.desc = NULL;
2232		return ret;
2233	}
2234
2235	init_txq(adap, &txq->q, FW_EQ_OFLD_CMD_EQID_GET(ntohl(c.eqid_pkd)));
2236	txq->adap = adap;
2237	skb_queue_head_init(&txq->sendq);
2238	tasklet_init(&txq->qresume_tsk, restart_ofldq, (unsigned long)txq);
2239	txq->full = 0;
2240	txq->mapping_err = 0;
2241	return 0;
2242}
2243
2244static void free_txq(struct adapter *adap, struct sge_txq *q)
2245{
2246	dma_free_coherent(adap->pdev_dev,
2247			  q->size * sizeof(struct tx_desc) + STAT_LEN,
2248			  q->desc, q->phys_addr);
2249	q->cntxt_id = 0;
2250	q->sdesc = NULL;
2251	q->desc = NULL;
2252}
2253
2254static void free_rspq_fl(struct adapter *adap, struct sge_rspq *rq,
2255			 struct sge_fl *fl)
2256{
2257	unsigned int fl_id = fl ? fl->cntxt_id : 0xffff;
2258
2259	adap->sge.ingr_map[rq->cntxt_id - adap->sge.ingr_start] = NULL;
2260	t4_iq_free(adap, adap->fn, adap->fn, 0, FW_IQ_TYPE_FL_INT_CAP,
2261		   rq->cntxt_id, fl_id, 0xffff);
2262	dma_free_coherent(adap->pdev_dev, (rq->size + 1) * rq->iqe_len,
2263			  rq->desc, rq->phys_addr);
2264	netif_napi_del(&rq->napi);
2265	rq->netdev = NULL;
2266	rq->cntxt_id = rq->abs_id = 0;
2267	rq->desc = NULL;
2268
2269	if (fl) {
2270		free_rx_bufs(adap, fl, fl->avail);
2271		dma_free_coherent(adap->pdev_dev, fl->size * 8 + STAT_LEN,
2272				  fl->desc, fl->addr);
2273		kfree(fl->sdesc);
2274		fl->sdesc = NULL;
2275		fl->cntxt_id = 0;
2276		fl->desc = NULL;
2277	}
2278}
2279
2280/**
2281 *	t4_free_sge_resources - free SGE resources
2282 *	@adap: the adapter
2283 *
2284 *	Frees resources used by the SGE queue sets.
2285 */
2286void t4_free_sge_resources(struct adapter *adap)
2287{
2288	int i;
2289	struct sge_eth_rxq *eq = adap->sge.ethrxq;
2290	struct sge_eth_txq *etq = adap->sge.ethtxq;
2291	struct sge_ofld_rxq *oq = adap->sge.ofldrxq;
2292
2293	/* clean up Ethernet Tx/Rx queues */
2294	for (i = 0; i < adap->sge.ethqsets; i++, eq++, etq++) {
2295		if (eq->rspq.desc)
2296			free_rspq_fl(adap, &eq->rspq, &eq->fl);
2297		if (etq->q.desc) {
2298			t4_eth_eq_free(adap, adap->fn, adap->fn, 0,
2299				       etq->q.cntxt_id);
2300			free_tx_desc(adap, &etq->q, etq->q.in_use, true);
2301			kfree(etq->q.sdesc);
2302			free_txq(adap, &etq->q);
2303		}
2304	}
2305
2306	/* clean up RDMA and iSCSI Rx queues */
2307	for (i = 0; i < adap->sge.ofldqsets; i++, oq++) {
2308		if (oq->rspq.desc)
2309			free_rspq_fl(adap, &oq->rspq, &oq->fl);
2310	}
2311	for (i = 0, oq = adap->sge.rdmarxq; i < adap->sge.rdmaqs; i++, oq++) {
2312		if (oq->rspq.desc)
2313			free_rspq_fl(adap, &oq->rspq, &oq->fl);
2314	}
2315
2316	/* clean up offload Tx queues */
2317	for (i = 0; i < ARRAY_SIZE(adap->sge.ofldtxq); i++) {
2318		struct sge_ofld_txq *q = &adap->sge.ofldtxq[i];
2319
2320		if (q->q.desc) {
2321			tasklet_kill(&q->qresume_tsk);
2322			t4_ofld_eq_free(adap, adap->fn, adap->fn, 0,
2323					q->q.cntxt_id);
2324			free_tx_desc(adap, &q->q, q->q.in_use, false);
2325			kfree(q->q.sdesc);
2326			__skb_queue_purge(&q->sendq);
2327			free_txq(adap, &q->q);
2328		}
2329	}
2330
2331	/* clean up control Tx queues */
2332	for (i = 0; i < ARRAY_SIZE(adap->sge.ctrlq); i++) {
2333		struct sge_ctrl_txq *cq = &adap->sge.ctrlq[i];
2334
2335		if (cq->q.desc) {
2336			tasklet_kill(&cq->qresume_tsk);
2337			t4_ctrl_eq_free(adap, adap->fn, adap->fn, 0,
2338					cq->q.cntxt_id);
2339			__skb_queue_purge(&cq->sendq);
2340			free_txq(adap, &cq->q);
2341		}
2342	}
2343
2344	if (adap->sge.fw_evtq.desc)
2345		free_rspq_fl(adap, &adap->sge.fw_evtq, NULL);
2346
2347	if (adap->sge.intrq.desc)
2348		free_rspq_fl(adap, &adap->sge.intrq, NULL);
2349
2350	/* clear the reverse egress queue map */
2351	memset(adap->sge.egr_map, 0, sizeof(adap->sge.egr_map));
2352}
2353
2354void t4_sge_start(struct adapter *adap)
2355{
2356	adap->sge.ethtxq_rover = 0;
2357	mod_timer(&adap->sge.rx_timer, jiffies + RX_QCHECK_PERIOD);
2358	mod_timer(&adap->sge.tx_timer, jiffies + TX_QCHECK_PERIOD);
2359}
2360
2361/**
2362 *	t4_sge_stop - disable SGE operation
2363 *	@adap: the adapter
2364 *
2365 *	Stop tasklets and timers associated with the DMA engine.  Note that
2366 *	this is effective only if measures have been taken to disable any HW
2367 *	events that may restart them.
2368 */
2369void t4_sge_stop(struct adapter *adap)
2370{
2371	int i;
2372	struct sge *s = &adap->sge;
2373
2374	if (in_interrupt())  /* actions below require waiting */
2375		return;
2376
2377	if (s->rx_timer.function)
2378		del_timer_sync(&s->rx_timer);
2379	if (s->tx_timer.function)
2380		del_timer_sync(&s->tx_timer);
2381
2382	for (i = 0; i < ARRAY_SIZE(s->ofldtxq); i++) {
2383		struct sge_ofld_txq *q = &s->ofldtxq[i];
2384
2385		if (q->q.desc)
2386			tasklet_kill(&q->qresume_tsk);
2387	}
2388	for (i = 0; i < ARRAY_SIZE(s->ctrlq); i++) {
2389		struct sge_ctrl_txq *cq = &s->ctrlq[i];
2390
2391		if (cq->q.desc)
2392			tasklet_kill(&cq->qresume_tsk);
2393	}
2394}
2395
2396/**
2397 *	t4_sge_init - initialize SGE
2398 *	@adap: the adapter
2399 *
2400 *	Performs SGE initialization needed every time after a chip reset.
2401 *	We do not initialize any of the queues here, instead the driver
2402 *	top-level must request them individually.
2403 */
2404void t4_sge_init(struct adapter *adap)
2405{
2406	unsigned int i, v;
2407	struct sge *s = &adap->sge;
2408	unsigned int fl_align_log = ilog2(FL_ALIGN);
2409
2410	t4_set_reg_field(adap, SGE_CONTROL, PKTSHIFT_MASK |
2411			 INGPADBOUNDARY_MASK | EGRSTATUSPAGESIZE,
2412			 INGPADBOUNDARY(fl_align_log - 5) | PKTSHIFT(2) |
2413			 RXPKTCPLMODE |
2414			 (STAT_LEN == 128 ? EGRSTATUSPAGESIZE : 0));
2415
2416	for (i = v = 0; i < 32; i += 4)
2417		v |= (PAGE_SHIFT - 10) << i;
2418	t4_write_reg(adap, SGE_HOST_PAGE_SIZE, v);
2419	t4_write_reg(adap, SGE_FL_BUFFER_SIZE0, PAGE_SIZE);
2420#if FL_PG_ORDER > 0
2421	t4_write_reg(adap, SGE_FL_BUFFER_SIZE1, PAGE_SIZE << FL_PG_ORDER);
2422#endif
2423	t4_write_reg(adap, SGE_INGRESS_RX_THRESHOLD,
2424		     THRESHOLD_0(s->counter_val[0]) |
2425		     THRESHOLD_1(s->counter_val[1]) |
2426		     THRESHOLD_2(s->counter_val[2]) |
2427		     THRESHOLD_3(s->counter_val[3]));
2428	t4_write_reg(adap, SGE_TIMER_VALUE_0_AND_1,
2429		     TIMERVALUE0(us_to_core_ticks(adap, s->timer_val[0])) |
2430		     TIMERVALUE1(us_to_core_ticks(adap, s->timer_val[1])));
2431	t4_write_reg(adap, SGE_TIMER_VALUE_2_AND_3,
2432		     TIMERVALUE0(us_to_core_ticks(adap, s->timer_val[2])) |
2433		     TIMERVALUE1(us_to_core_ticks(adap, s->timer_val[3])));
2434	t4_write_reg(adap, SGE_TIMER_VALUE_4_AND_5,
2435		     TIMERVALUE0(us_to_core_ticks(adap, s->timer_val[4])) |
2436		     TIMERVALUE1(us_to_core_ticks(adap, s->timer_val[5])));
2437	setup_timer(&s->rx_timer, sge_rx_timer_cb, (unsigned long)adap);
2438	setup_timer(&s->tx_timer, sge_tx_timer_cb, (unsigned long)adap);
2439	s->starve_thres = core_ticks_per_usec(adap) * 1000000;  /* 1 s */
2440	s->idma_state[0] = s->idma_state[1] = 0;
2441	spin_lock_init(&s->intrq_lock);
2442}