Linux Audio

Check our new training course

Loading...
   1/*
   2 * This file is part of the Chelsio T4 Ethernet driver for Linux.
   3 *
   4 * Copyright (c) 2003-2010 Chelsio Communications, Inc. All rights reserved.
   5 *
   6 * This software is available to you under a choice of one of two
   7 * licenses.  You may choose to be licensed under the terms of the GNU
   8 * General Public License (GPL) Version 2, available from the file
   9 * COPYING in the main directory of this source tree, or the
  10 * OpenIB.org BSD license below:
  11 *
  12 *     Redistribution and use in source and binary forms, with or
  13 *     without modification, are permitted provided that the following
  14 *     conditions are met:
  15 *
  16 *      - Redistributions of source code must retain the above
  17 *        copyright notice, this list of conditions and the following
  18 *        disclaimer.
  19 *
  20 *      - Redistributions in binary form must reproduce the above
  21 *        copyright notice, this list of conditions and the following
  22 *        disclaimer in the documentation and/or other materials
  23 *        provided with the distribution.
  24 *
  25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  32 * SOFTWARE.
  33 */
  34
  35#include <linux/skbuff.h>
  36#include <linux/netdevice.h>
  37#include <linux/etherdevice.h>
  38#include <linux/if_vlan.h>
  39#include <linux/ip.h>
  40#include <linux/dma-mapping.h>
  41#include <linux/jiffies.h>
  42#include <linux/prefetch.h>
  43#include <linux/export.h>
  44#include <net/ipv6.h>
  45#include <net/tcp.h>
  46#include "cxgb4.h"
  47#include "t4_regs.h"
  48#include "t4_msg.h"
  49#include "t4fw_api.h"
  50
  51/*
  52 * Rx buffer size.  We use largish buffers if possible but settle for single
  53 * pages under memory shortage.
  54 */
  55#if PAGE_SHIFT >= 16
  56# define FL_PG_ORDER 0
  57#else
  58# define FL_PG_ORDER (16 - PAGE_SHIFT)
  59#endif
  60
  61/* RX_PULL_LEN should be <= RX_COPY_THRES */
  62#define RX_COPY_THRES    256
  63#define RX_PULL_LEN      128
  64
  65/*
  66 * Main body length for sk_buffs used for Rx Ethernet packets with fragments.
  67 * Should be >= RX_PULL_LEN but possibly bigger to give pskb_may_pull some room.
  68 */
  69#define RX_PKT_SKB_LEN   512
  70
  71/* Ethernet header padding prepended to RX_PKTs */
  72#define RX_PKT_PAD 2
  73
  74/*
  75 * Max number of Tx descriptors we clean up at a time.  Should be modest as
  76 * freeing skbs isn't cheap and it happens while holding locks.  We just need
  77 * to free packets faster than they arrive, we eventually catch up and keep
  78 * the amortized cost reasonable.  Must be >= 2 * TXQ_STOP_THRES.
  79 */
  80#define MAX_TX_RECLAIM 16
  81
  82/*
  83 * Max number of Rx buffers we replenish at a time.  Again keep this modest,
  84 * allocating buffers isn't cheap either.
  85 */
  86#define MAX_RX_REFILL 16U
  87
  88/*
  89 * Period of the Rx queue check timer.  This timer is infrequent as it has
  90 * something to do only when the system experiences severe memory shortage.
  91 */
  92#define RX_QCHECK_PERIOD (HZ / 2)
  93
  94/*
  95 * Period of the Tx queue check timer.
  96 */
  97#define TX_QCHECK_PERIOD (HZ / 2)
  98
  99/*
 100 * Max number of Tx descriptors to be reclaimed by the Tx timer.
 101 */
 102#define MAX_TIMER_TX_RECLAIM 100
 103
 104/*
 105 * Timer index used when backing off due to memory shortage.
 106 */
 107#define NOMEM_TMR_IDX (SGE_NTIMERS - 1)
 108
 109/*
 110 * An FL with <= FL_STARVE_THRES buffers is starving and a periodic timer will
 111 * attempt to refill it.
 112 */
 113#define FL_STARVE_THRES 4
 114
 115/*
 116 * Suspend an Ethernet Tx queue with fewer available descriptors than this.
 117 * This is the same as calc_tx_descs() for a TSO packet with
 118 * nr_frags == MAX_SKB_FRAGS.
 119 */
 120#define ETHTXQ_STOP_THRES \
 121	(1 + DIV_ROUND_UP((3 * MAX_SKB_FRAGS) / 2 + (MAX_SKB_FRAGS & 1), 8))
 122
 123/*
 124 * Suspension threshold for non-Ethernet Tx queues.  We require enough room
 125 * for a full sized WR.
 126 */
 127#define TXQ_STOP_THRES (SGE_MAX_WR_LEN / sizeof(struct tx_desc))
 128
 129/*
 130 * Max Tx descriptor space we allow for an Ethernet packet to be inlined
 131 * into a WR.
 132 */
 133#define MAX_IMM_TX_PKT_LEN 128
 134
 135/*
 136 * Max size of a WR sent through a control Tx queue.
 137 */
 138#define MAX_CTRL_WR_LEN SGE_MAX_WR_LEN
 139
 140enum {
 141	/* packet alignment in FL buffers */
 142	FL_ALIGN = L1_CACHE_BYTES < 32 ? 32 : L1_CACHE_BYTES,
 143	/* egress status entry size */
 144	STAT_LEN = L1_CACHE_BYTES > 64 ? 128 : 64
 145};
 146
 147struct tx_sw_desc {                /* SW state per Tx descriptor */
 148	struct sk_buff *skb;
 149	struct ulptx_sgl *sgl;
 150};
 151
 152struct rx_sw_desc {                /* SW state per Rx descriptor */
 153	struct page *page;
 154	dma_addr_t dma_addr;
 155};
 156
 157/*
 158 * The low bits of rx_sw_desc.dma_addr have special meaning.
 159 */
 160enum {
 161	RX_LARGE_BUF    = 1 << 0, /* buffer is larger than PAGE_SIZE */
 162	RX_UNMAPPED_BUF = 1 << 1, /* buffer is not mapped */
 163};
 164
 165static inline dma_addr_t get_buf_addr(const struct rx_sw_desc *d)
 166{
 167	return d->dma_addr & ~(dma_addr_t)(RX_LARGE_BUF | RX_UNMAPPED_BUF);
 168}
 169
 170static inline bool is_buf_mapped(const struct rx_sw_desc *d)
 171{
 172	return !(d->dma_addr & RX_UNMAPPED_BUF);
 173}
 174
 175/**
 176 *	txq_avail - return the number of available slots in a Tx queue
 177 *	@q: the Tx queue
 178 *
 179 *	Returns the number of descriptors in a Tx queue available to write new
 180 *	packets.
 181 */
 182static inline unsigned int txq_avail(const struct sge_txq *q)
 183{
 184	return q->size - 1 - q->in_use;
 185}
 186
 187/**
 188 *	fl_cap - return the capacity of a free-buffer list
 189 *	@fl: the FL
 190 *
 191 *	Returns the capacity of a free-buffer list.  The capacity is less than
 192 *	the size because one descriptor needs to be left unpopulated, otherwise
 193 *	HW will think the FL is empty.
 194 */
 195static inline unsigned int fl_cap(const struct sge_fl *fl)
 196{
 197	return fl->size - 8;   /* 1 descriptor = 8 buffers */
 198}
 199
 200static inline bool fl_starving(const struct sge_fl *fl)
 201{
 202	return fl->avail - fl->pend_cred <= FL_STARVE_THRES;
 203}
 204
 205static int map_skb(struct device *dev, const struct sk_buff *skb,
 206		   dma_addr_t *addr)
 207{
 208	const skb_frag_t *fp, *end;
 209	const struct skb_shared_info *si;
 210
 211	*addr = dma_map_single(dev, skb->data, skb_headlen(skb), DMA_TO_DEVICE);
 212	if (dma_mapping_error(dev, *addr))
 213		goto out_err;
 214
 215	si = skb_shinfo(skb);
 216	end = &si->frags[si->nr_frags];
 217
 218	for (fp = si->frags; fp < end; fp++) {
 219		*++addr = skb_frag_dma_map(dev, fp, 0, skb_frag_size(fp),
 220					   DMA_TO_DEVICE);
 221		if (dma_mapping_error(dev, *addr))
 222			goto unwind;
 223	}
 224	return 0;
 225
 226unwind:
 227	while (fp-- > si->frags)
 228		dma_unmap_page(dev, *--addr, skb_frag_size(fp), DMA_TO_DEVICE);
 229
 230	dma_unmap_single(dev, addr[-1], skb_headlen(skb), DMA_TO_DEVICE);
 231out_err:
 232	return -ENOMEM;
 233}
 234
 235#ifdef CONFIG_NEED_DMA_MAP_STATE
 236static void unmap_skb(struct device *dev, const struct sk_buff *skb,
 237		      const dma_addr_t *addr)
 238{
 239	const skb_frag_t *fp, *end;
 240	const struct skb_shared_info *si;
 241
 242	dma_unmap_single(dev, *addr++, skb_headlen(skb), DMA_TO_DEVICE);
 243
 244	si = skb_shinfo(skb);
 245	end = &si->frags[si->nr_frags];
 246	for (fp = si->frags; fp < end; fp++)
 247		dma_unmap_page(dev, *addr++, skb_frag_size(fp), DMA_TO_DEVICE);
 248}
 249
 250/**
 251 *	deferred_unmap_destructor - unmap a packet when it is freed
 252 *	@skb: the packet
 253 *
 254 *	This is the packet destructor used for Tx packets that need to remain
 255 *	mapped until they are freed rather than until their Tx descriptors are
 256 *	freed.
 257 */
 258static void deferred_unmap_destructor(struct sk_buff *skb)
 259{
 260	unmap_skb(skb->dev->dev.parent, skb, (dma_addr_t *)skb->head);
 261}
 262#endif
 263
 264static void unmap_sgl(struct device *dev, const struct sk_buff *skb,
 265		      const struct ulptx_sgl *sgl, const struct sge_txq *q)
 266{
 267	const struct ulptx_sge_pair *p;
 268	unsigned int nfrags = skb_shinfo(skb)->nr_frags;
 269
 270	if (likely(skb_headlen(skb)))
 271		dma_unmap_single(dev, be64_to_cpu(sgl->addr0), ntohl(sgl->len0),
 272				 DMA_TO_DEVICE);
 273	else {
 274		dma_unmap_page(dev, be64_to_cpu(sgl->addr0), ntohl(sgl->len0),
 275			       DMA_TO_DEVICE);
 276		nfrags--;
 277	}
 278
 279	/*
 280	 * the complexity below is because of the possibility of a wrap-around
 281	 * in the middle of an SGL
 282	 */
 283	for (p = sgl->sge; nfrags >= 2; nfrags -= 2) {
 284		if (likely((u8 *)(p + 1) <= (u8 *)q->stat)) {
 285unmap:			dma_unmap_page(dev, be64_to_cpu(p->addr[0]),
 286				       ntohl(p->len[0]), DMA_TO_DEVICE);
 287			dma_unmap_page(dev, be64_to_cpu(p->addr[1]),
 288				       ntohl(p->len[1]), DMA_TO_DEVICE);
 289			p++;
 290		} else if ((u8 *)p == (u8 *)q->stat) {
 291			p = (const struct ulptx_sge_pair *)q->desc;
 292			goto unmap;
 293		} else if ((u8 *)p + 8 == (u8 *)q->stat) {
 294			const __be64 *addr = (const __be64 *)q->desc;
 295
 296			dma_unmap_page(dev, be64_to_cpu(addr[0]),
 297				       ntohl(p->len[0]), DMA_TO_DEVICE);
 298			dma_unmap_page(dev, be64_to_cpu(addr[1]),
 299				       ntohl(p->len[1]), DMA_TO_DEVICE);
 300			p = (const struct ulptx_sge_pair *)&addr[2];
 301		} else {
 302			const __be64 *addr = (const __be64 *)q->desc;
 303
 304			dma_unmap_page(dev, be64_to_cpu(p->addr[0]),
 305				       ntohl(p->len[0]), DMA_TO_DEVICE);
 306			dma_unmap_page(dev, be64_to_cpu(addr[0]),
 307				       ntohl(p->len[1]), DMA_TO_DEVICE);
 308			p = (const struct ulptx_sge_pair *)&addr[1];
 309		}
 310	}
 311	if (nfrags) {
 312		__be64 addr;
 313
 314		if ((u8 *)p == (u8 *)q->stat)
 315			p = (const struct ulptx_sge_pair *)q->desc;
 316		addr = (u8 *)p + 16 <= (u8 *)q->stat ? p->addr[0] :
 317						       *(const __be64 *)q->desc;
 318		dma_unmap_page(dev, be64_to_cpu(addr), ntohl(p->len[0]),
 319			       DMA_TO_DEVICE);
 320	}
 321}
 322
 323/**
 324 *	free_tx_desc - reclaims Tx descriptors and their buffers
 325 *	@adapter: the adapter
 326 *	@q: the Tx queue to reclaim descriptors from
 327 *	@n: the number of descriptors to reclaim
 328 *	@unmap: whether the buffers should be unmapped for DMA
 329 *
 330 *	Reclaims Tx descriptors from an SGE Tx queue and frees the associated
 331 *	Tx buffers.  Called with the Tx queue lock held.
 332 */
 333static void free_tx_desc(struct adapter *adap, struct sge_txq *q,
 334			 unsigned int n, bool unmap)
 335{
 336	struct tx_sw_desc *d;
 337	unsigned int cidx = q->cidx;
 338	struct device *dev = adap->pdev_dev;
 339
 340	d = &q->sdesc[cidx];
 341	while (n--) {
 342		if (d->skb) {                       /* an SGL is present */
 343			if (unmap)
 344				unmap_sgl(dev, d->skb, d->sgl, q);
 345			kfree_skb(d->skb);
 346			d->skb = NULL;
 347		}
 348		++d;
 349		if (++cidx == q->size) {
 350			cidx = 0;
 351			d = q->sdesc;
 352		}
 353	}
 354	q->cidx = cidx;
 355}
 356
 357/*
 358 * Return the number of reclaimable descriptors in a Tx queue.
 359 */
 360static inline int reclaimable(const struct sge_txq *q)
 361{
 362	int hw_cidx = ntohs(q->stat->cidx);
 363	hw_cidx -= q->cidx;
 364	return hw_cidx < 0 ? hw_cidx + q->size : hw_cidx;
 365}
 366
 367/**
 368 *	reclaim_completed_tx - reclaims completed Tx descriptors
 369 *	@adap: the adapter
 370 *	@q: the Tx queue to reclaim completed descriptors from
 371 *	@unmap: whether the buffers should be unmapped for DMA
 372 *
 373 *	Reclaims Tx descriptors that the SGE has indicated it has processed,
 374 *	and frees the associated buffers if possible.  Called with the Tx
 375 *	queue locked.
 376 */
 377static inline void reclaim_completed_tx(struct adapter *adap, struct sge_txq *q,
 378					bool unmap)
 379{
 380	int avail = reclaimable(q);
 381
 382	if (avail) {
 383		/*
 384		 * Limit the amount of clean up work we do at a time to keep
 385		 * the Tx lock hold time O(1).
 386		 */
 387		if (avail > MAX_TX_RECLAIM)
 388			avail = MAX_TX_RECLAIM;
 389
 390		free_tx_desc(adap, q, avail, unmap);
 391		q->in_use -= avail;
 392	}
 393}
 394
 395static inline int get_buf_size(const struct rx_sw_desc *d)
 396{
 397#if FL_PG_ORDER > 0
 398	return (d->dma_addr & RX_LARGE_BUF) ? (PAGE_SIZE << FL_PG_ORDER) :
 399					      PAGE_SIZE;
 400#else
 401	return PAGE_SIZE;
 402#endif
 403}
 404
 405/**
 406 *	free_rx_bufs - free the Rx buffers on an SGE free list
 407 *	@adap: the adapter
 408 *	@q: the SGE free list to free buffers from
 409 *	@n: how many buffers to free
 410 *
 411 *	Release the next @n buffers on an SGE free-buffer Rx queue.   The
 412 *	buffers must be made inaccessible to HW before calling this function.
 413 */
 414static void free_rx_bufs(struct adapter *adap, struct sge_fl *q, int n)
 415{
 416	while (n--) {
 417		struct rx_sw_desc *d = &q->sdesc[q->cidx];
 418
 419		if (is_buf_mapped(d))
 420			dma_unmap_page(adap->pdev_dev, get_buf_addr(d),
 421				       get_buf_size(d), PCI_DMA_FROMDEVICE);
 422		put_page(d->page);
 423		d->page = NULL;
 424		if (++q->cidx == q->size)
 425			q->cidx = 0;
 426		q->avail--;
 427	}
 428}
 429
 430/**
 431 *	unmap_rx_buf - unmap the current Rx buffer on an SGE free list
 432 *	@adap: the adapter
 433 *	@q: the SGE free list
 434 *
 435 *	Unmap the current buffer on an SGE free-buffer Rx queue.   The
 436 *	buffer must be made inaccessible to HW before calling this function.
 437 *
 438 *	This is similar to @free_rx_bufs above but does not free the buffer.
 439 *	Do note that the FL still loses any further access to the buffer.
 440 */
 441static void unmap_rx_buf(struct adapter *adap, struct sge_fl *q)
 442{
 443	struct rx_sw_desc *d = &q->sdesc[q->cidx];
 444
 445	if (is_buf_mapped(d))
 446		dma_unmap_page(adap->pdev_dev, get_buf_addr(d),
 447			       get_buf_size(d), PCI_DMA_FROMDEVICE);
 448	d->page = NULL;
 449	if (++q->cidx == q->size)
 450		q->cidx = 0;
 451	q->avail--;
 452}
 453
 454static inline void ring_fl_db(struct adapter *adap, struct sge_fl *q)
 455{
 456	if (q->pend_cred >= 8) {
 457		wmb();
 458		t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL), DBPRIO |
 459			     QID(q->cntxt_id) | PIDX(q->pend_cred / 8));
 460		q->pend_cred &= 7;
 461	}
 462}
 463
 464static inline void set_rx_sw_desc(struct rx_sw_desc *sd, struct page *pg,
 465				  dma_addr_t mapping)
 466{
 467	sd->page = pg;
 468	sd->dma_addr = mapping;      /* includes size low bits */
 469}
 470
 471/**
 472 *	refill_fl - refill an SGE Rx buffer ring
 473 *	@adap: the adapter
 474 *	@q: the ring to refill
 475 *	@n: the number of new buffers to allocate
 476 *	@gfp: the gfp flags for the allocations
 477 *
 478 *	(Re)populate an SGE free-buffer queue with up to @n new packet buffers,
 479 *	allocated with the supplied gfp flags.  The caller must assure that
 480 *	@n does not exceed the queue's capacity.  If afterwards the queue is
 481 *	found critically low mark it as starving in the bitmap of starving FLs.
 482 *
 483 *	Returns the number of buffers allocated.
 484 */
 485static unsigned int refill_fl(struct adapter *adap, struct sge_fl *q, int n,
 486			      gfp_t gfp)
 487{
 488	struct page *pg;
 489	dma_addr_t mapping;
 490	unsigned int cred = q->avail;
 491	__be64 *d = &q->desc[q->pidx];
 492	struct rx_sw_desc *sd = &q->sdesc[q->pidx];
 493
 494	gfp |= __GFP_NOWARN | __GFP_COLD;
 495
 496#if FL_PG_ORDER > 0
 497	/*
 498	 * Prefer large buffers
 499	 */
 500	while (n) {
 501		pg = alloc_pages(gfp | __GFP_COMP, FL_PG_ORDER);
 502		if (unlikely(!pg)) {
 503			q->large_alloc_failed++;
 504			break;       /* fall back to single pages */
 505		}
 506
 507		mapping = dma_map_page(adap->pdev_dev, pg, 0,
 508				       PAGE_SIZE << FL_PG_ORDER,
 509				       PCI_DMA_FROMDEVICE);
 510		if (unlikely(dma_mapping_error(adap->pdev_dev, mapping))) {
 511			__free_pages(pg, FL_PG_ORDER);
 512			goto out;   /* do not try small pages for this error */
 513		}
 514		mapping |= RX_LARGE_BUF;
 515		*d++ = cpu_to_be64(mapping);
 516
 517		set_rx_sw_desc(sd, pg, mapping);
 518		sd++;
 519
 520		q->avail++;
 521		if (++q->pidx == q->size) {
 522			q->pidx = 0;
 523			sd = q->sdesc;
 524			d = q->desc;
 525		}
 526		n--;
 527	}
 528#endif
 529
 530	while (n--) {
 531		pg = alloc_page(gfp);
 532		if (unlikely(!pg)) {
 533			q->alloc_failed++;
 534			break;
 535		}
 536
 537		mapping = dma_map_page(adap->pdev_dev, pg, 0, PAGE_SIZE,
 538				       PCI_DMA_FROMDEVICE);
 539		if (unlikely(dma_mapping_error(adap->pdev_dev, mapping))) {
 540			put_page(pg);
 541			goto out;
 542		}
 543		*d++ = cpu_to_be64(mapping);
 544
 545		set_rx_sw_desc(sd, pg, mapping);
 546		sd++;
 547
 548		q->avail++;
 549		if (++q->pidx == q->size) {
 550			q->pidx = 0;
 551			sd = q->sdesc;
 552			d = q->desc;
 553		}
 554	}
 555
 556out:	cred = q->avail - cred;
 557	q->pend_cred += cred;
 558	ring_fl_db(adap, q);
 559
 560	if (unlikely(fl_starving(q))) {
 561		smp_wmb();
 562		set_bit(q->cntxt_id - adap->sge.egr_start,
 563			adap->sge.starving_fl);
 564	}
 565
 566	return cred;
 567}
 568
 569static inline void __refill_fl(struct adapter *adap, struct sge_fl *fl)
 570{
 571	refill_fl(adap, fl, min(MAX_RX_REFILL, fl_cap(fl) - fl->avail),
 572		  GFP_ATOMIC);
 573}
 574
 575/**
 576 *	alloc_ring - allocate resources for an SGE descriptor ring
 577 *	@dev: the PCI device's core device
 578 *	@nelem: the number of descriptors
 579 *	@elem_size: the size of each descriptor
 580 *	@sw_size: the size of the SW state associated with each ring element
 581 *	@phys: the physical address of the allocated ring
 582 *	@metadata: address of the array holding the SW state for the ring
 583 *	@stat_size: extra space in HW ring for status information
 584 *	@node: preferred node for memory allocations
 585 *
 586 *	Allocates resources for an SGE descriptor ring, such as Tx queues,
 587 *	free buffer lists, or response queues.  Each SGE ring requires
 588 *	space for its HW descriptors plus, optionally, space for the SW state
 589 *	associated with each HW entry (the metadata).  The function returns
 590 *	three values: the virtual address for the HW ring (the return value
 591 *	of the function), the bus address of the HW ring, and the address
 592 *	of the SW ring.
 593 */
 594static void *alloc_ring(struct device *dev, size_t nelem, size_t elem_size,
 595			size_t sw_size, dma_addr_t *phys, void *metadata,
 596			size_t stat_size, int node)
 597{
 598	size_t len = nelem * elem_size + stat_size;
 599	void *s = NULL;
 600	void *p = dma_alloc_coherent(dev, len, phys, GFP_KERNEL);
 601
 602	if (!p)
 603		return NULL;
 604	if (sw_size) {
 605		s = kzalloc_node(nelem * sw_size, GFP_KERNEL, node);
 606
 607		if (!s) {
 608			dma_free_coherent(dev, len, p, *phys);
 609			return NULL;
 610		}
 611	}
 612	if (metadata)
 613		*(void **)metadata = s;
 614	memset(p, 0, len);
 615	return p;
 616}
 617
 618/**
 619 *	sgl_len - calculates the size of an SGL of the given capacity
 620 *	@n: the number of SGL entries
 621 *
 622 *	Calculates the number of flits needed for a scatter/gather list that
 623 *	can hold the given number of entries.
 624 */
 625static inline unsigned int sgl_len(unsigned int n)
 626{
 627	n--;
 628	return (3 * n) / 2 + (n & 1) + 2;
 629}
 630
 631/**
 632 *	flits_to_desc - returns the num of Tx descriptors for the given flits
 633 *	@n: the number of flits
 634 *
 635 *	Returns the number of Tx descriptors needed for the supplied number
 636 *	of flits.
 637 */
 638static inline unsigned int flits_to_desc(unsigned int n)
 639{
 640	BUG_ON(n > SGE_MAX_WR_LEN / 8);
 641	return DIV_ROUND_UP(n, 8);
 642}
 643
 644/**
 645 *	is_eth_imm - can an Ethernet packet be sent as immediate data?
 646 *	@skb: the packet
 647 *
 648 *	Returns whether an Ethernet packet is small enough to fit as
 649 *	immediate data.
 650 */
 651static inline int is_eth_imm(const struct sk_buff *skb)
 652{
 653	return skb->len <= MAX_IMM_TX_PKT_LEN - sizeof(struct cpl_tx_pkt);
 654}
 655
 656/**
 657 *	calc_tx_flits - calculate the number of flits for a packet Tx WR
 658 *	@skb: the packet
 659 *
 660 *	Returns the number of flits needed for a Tx WR for the given Ethernet
 661 *	packet, including the needed WR and CPL headers.
 662 */
 663static inline unsigned int calc_tx_flits(const struct sk_buff *skb)
 664{
 665	unsigned int flits;
 666
 667	if (is_eth_imm(skb))
 668		return DIV_ROUND_UP(skb->len + sizeof(struct cpl_tx_pkt), 8);
 669
 670	flits = sgl_len(skb_shinfo(skb)->nr_frags + 1) + 4;
 671	if (skb_shinfo(skb)->gso_size)
 672		flits += 2;
 673	return flits;
 674}
 675
 676/**
 677 *	calc_tx_descs - calculate the number of Tx descriptors for a packet
 678 *	@skb: the packet
 679 *
 680 *	Returns the number of Tx descriptors needed for the given Ethernet
 681 *	packet, including the needed WR and CPL headers.
 682 */
 683static inline unsigned int calc_tx_descs(const struct sk_buff *skb)
 684{
 685	return flits_to_desc(calc_tx_flits(skb));
 686}
 687
 688/**
 689 *	write_sgl - populate a scatter/gather list for a packet
 690 *	@skb: the packet
 691 *	@q: the Tx queue we are writing into
 692 *	@sgl: starting location for writing the SGL
 693 *	@end: points right after the end of the SGL
 694 *	@start: start offset into skb main-body data to include in the SGL
 695 *	@addr: the list of bus addresses for the SGL elements
 696 *
 697 *	Generates a gather list for the buffers that make up a packet.
 698 *	The caller must provide adequate space for the SGL that will be written.
 699 *	The SGL includes all of the packet's page fragments and the data in its
 700 *	main body except for the first @start bytes.  @sgl must be 16-byte
 701 *	aligned and within a Tx descriptor with available space.  @end points
 702 *	right after the end of the SGL but does not account for any potential
 703 *	wrap around, i.e., @end > @sgl.
 704 */
 705static void write_sgl(const struct sk_buff *skb, struct sge_txq *q,
 706		      struct ulptx_sgl *sgl, u64 *end, unsigned int start,
 707		      const dma_addr_t *addr)
 708{
 709	unsigned int i, len;
 710	struct ulptx_sge_pair *to;
 711	const struct skb_shared_info *si = skb_shinfo(skb);
 712	unsigned int nfrags = si->nr_frags;
 713	struct ulptx_sge_pair buf[MAX_SKB_FRAGS / 2 + 1];
 714
 715	len = skb_headlen(skb) - start;
 716	if (likely(len)) {
 717		sgl->len0 = htonl(len);
 718		sgl->addr0 = cpu_to_be64(addr[0] + start);
 719		nfrags++;
 720	} else {
 721		sgl->len0 = htonl(skb_frag_size(&si->frags[0]));
 722		sgl->addr0 = cpu_to_be64(addr[1]);
 723	}
 724
 725	sgl->cmd_nsge = htonl(ULPTX_CMD(ULP_TX_SC_DSGL) | ULPTX_NSGE(nfrags));
 726	if (likely(--nfrags == 0))
 727		return;
 728	/*
 729	 * Most of the complexity below deals with the possibility we hit the
 730	 * end of the queue in the middle of writing the SGL.  For this case
 731	 * only we create the SGL in a temporary buffer and then copy it.
 732	 */
 733	to = (u8 *)end > (u8 *)q->stat ? buf : sgl->sge;
 734
 735	for (i = (nfrags != si->nr_frags); nfrags >= 2; nfrags -= 2, to++) {
 736		to->len[0] = cpu_to_be32(skb_frag_size(&si->frags[i]));
 737		to->len[1] = cpu_to_be32(skb_frag_size(&si->frags[++i]));
 738		to->addr[0] = cpu_to_be64(addr[i]);
 739		to->addr[1] = cpu_to_be64(addr[++i]);
 740	}
 741	if (nfrags) {
 742		to->len[0] = cpu_to_be32(skb_frag_size(&si->frags[i]));
 743		to->len[1] = cpu_to_be32(0);
 744		to->addr[0] = cpu_to_be64(addr[i + 1]);
 745	}
 746	if (unlikely((u8 *)end > (u8 *)q->stat)) {
 747		unsigned int part0 = (u8 *)q->stat - (u8 *)sgl->sge, part1;
 748
 749		if (likely(part0))
 750			memcpy(sgl->sge, buf, part0);
 751		part1 = (u8 *)end - (u8 *)q->stat;
 752		memcpy(q->desc, (u8 *)buf + part0, part1);
 753		end = (void *)q->desc + part1;
 754	}
 755	if ((uintptr_t)end & 8)           /* 0-pad to multiple of 16 */
 756		*(u64 *)end = 0;
 757}
 758
 759/**
 760 *	ring_tx_db - check and potentially ring a Tx queue's doorbell
 761 *	@adap: the adapter
 762 *	@q: the Tx queue
 763 *	@n: number of new descriptors to give to HW
 764 *
 765 *	Ring the doorbel for a Tx queue.
 766 */
 767static inline void ring_tx_db(struct adapter *adap, struct sge_txq *q, int n)
 768{
 769	wmb();            /* write descriptors before telling HW */
 770	spin_lock(&q->db_lock);
 771	if (!q->db_disabled) {
 772		t4_write_reg(adap, MYPF_REG(A_SGE_PF_KDOORBELL),
 773			     V_QID(q->cntxt_id) | V_PIDX(n));
 774	}
 775	q->db_pidx = q->pidx;
 776	spin_unlock(&q->db_lock);
 777}
 778
 779/**
 780 *	inline_tx_skb - inline a packet's data into Tx descriptors
 781 *	@skb: the packet
 782 *	@q: the Tx queue where the packet will be inlined
 783 *	@pos: starting position in the Tx queue where to inline the packet
 784 *
 785 *	Inline a packet's contents directly into Tx descriptors, starting at
 786 *	the given position within the Tx DMA ring.
 787 *	Most of the complexity of this operation is dealing with wrap arounds
 788 *	in the middle of the packet we want to inline.
 789 */
 790static void inline_tx_skb(const struct sk_buff *skb, const struct sge_txq *q,
 791			  void *pos)
 792{
 793	u64 *p;
 794	int left = (void *)q->stat - pos;
 795
 796	if (likely(skb->len <= left)) {
 797		if (likely(!skb->data_len))
 798			skb_copy_from_linear_data(skb, pos, skb->len);
 799		else
 800			skb_copy_bits(skb, 0, pos, skb->len);
 801		pos += skb->len;
 802	} else {
 803		skb_copy_bits(skb, 0, pos, left);
 804		skb_copy_bits(skb, left, q->desc, skb->len - left);
 805		pos = (void *)q->desc + (skb->len - left);
 806	}
 807
 808	/* 0-pad to multiple of 16 */
 809	p = PTR_ALIGN(pos, 8);
 810	if ((uintptr_t)p & 8)
 811		*p = 0;
 812}
 813
 814/*
 815 * Figure out what HW csum a packet wants and return the appropriate control
 816 * bits.
 817 */
 818static u64 hwcsum(const struct sk_buff *skb)
 819{
 820	int csum_type;
 821	const struct iphdr *iph = ip_hdr(skb);
 822
 823	if (iph->version == 4) {
 824		if (iph->protocol == IPPROTO_TCP)
 825			csum_type = TX_CSUM_TCPIP;
 826		else if (iph->protocol == IPPROTO_UDP)
 827			csum_type = TX_CSUM_UDPIP;
 828		else {
 829nocsum:			/*
 830			 * unknown protocol, disable HW csum
 831			 * and hope a bad packet is detected
 832			 */
 833			return TXPKT_L4CSUM_DIS;
 834		}
 835	} else {
 836		/*
 837		 * this doesn't work with extension headers
 838		 */
 839		const struct ipv6hdr *ip6h = (const struct ipv6hdr *)iph;
 840
 841		if (ip6h->nexthdr == IPPROTO_TCP)
 842			csum_type = TX_CSUM_TCPIP6;
 843		else if (ip6h->nexthdr == IPPROTO_UDP)
 844			csum_type = TX_CSUM_UDPIP6;
 845		else
 846			goto nocsum;
 847	}
 848
 849	if (likely(csum_type >= TX_CSUM_TCPIP))
 850		return TXPKT_CSUM_TYPE(csum_type) |
 851			TXPKT_IPHDR_LEN(skb_network_header_len(skb)) |
 852			TXPKT_ETHHDR_LEN(skb_network_offset(skb) - ETH_HLEN);
 853	else {
 854		int start = skb_transport_offset(skb);
 855
 856		return TXPKT_CSUM_TYPE(csum_type) | TXPKT_CSUM_START(start) |
 857			TXPKT_CSUM_LOC(start + skb->csum_offset);
 858	}
 859}
 860
 861static void eth_txq_stop(struct sge_eth_txq *q)
 862{
 863	netif_tx_stop_queue(q->txq);
 864	q->q.stops++;
 865}
 866
 867static inline void txq_advance(struct sge_txq *q, unsigned int n)
 868{
 869	q->in_use += n;
 870	q->pidx += n;
 871	if (q->pidx >= q->size)
 872		q->pidx -= q->size;
 873}
 874
 875/**
 876 *	t4_eth_xmit - add a packet to an Ethernet Tx queue
 877 *	@skb: the packet
 878 *	@dev: the egress net device
 879 *
 880 *	Add a packet to an SGE Ethernet Tx queue.  Runs with softirqs disabled.
 881 */
 882netdev_tx_t t4_eth_xmit(struct sk_buff *skb, struct net_device *dev)
 883{
 884	u32 wr_mid;
 885	u64 cntrl, *end;
 886	int qidx, credits;
 887	unsigned int flits, ndesc;
 888	struct adapter *adap;
 889	struct sge_eth_txq *q;
 890	const struct port_info *pi;
 891	struct fw_eth_tx_pkt_wr *wr;
 892	struct cpl_tx_pkt_core *cpl;
 893	const struct skb_shared_info *ssi;
 894	dma_addr_t addr[MAX_SKB_FRAGS + 1];
 895
 896	/*
 897	 * The chip min packet length is 10 octets but play safe and reject
 898	 * anything shorter than an Ethernet header.
 899	 */
 900	if (unlikely(skb->len < ETH_HLEN)) {
 901out_free:	dev_kfree_skb(skb);
 902		return NETDEV_TX_OK;
 903	}
 904
 905	pi = netdev_priv(dev);
 906	adap = pi->adapter;
 907	qidx = skb_get_queue_mapping(skb);
 908	q = &adap->sge.ethtxq[qidx + pi->first_qset];
 909
 910	reclaim_completed_tx(adap, &q->q, true);
 911
 912	flits = calc_tx_flits(skb);
 913	ndesc = flits_to_desc(flits);
 914	credits = txq_avail(&q->q) - ndesc;
 915
 916	if (unlikely(credits < 0)) {
 917		eth_txq_stop(q);
 918		dev_err(adap->pdev_dev,
 919			"%s: Tx ring %u full while queue awake!\n",
 920			dev->name, qidx);
 921		return NETDEV_TX_BUSY;
 922	}
 923
 924	if (!is_eth_imm(skb) &&
 925	    unlikely(map_skb(adap->pdev_dev, skb, addr) < 0)) {
 926		q->mapping_err++;
 927		goto out_free;
 928	}
 929
 930	wr_mid = FW_WR_LEN16(DIV_ROUND_UP(flits, 2));
 931	if (unlikely(credits < ETHTXQ_STOP_THRES)) {
 932		eth_txq_stop(q);
 933		wr_mid |= FW_WR_EQUEQ | FW_WR_EQUIQ;
 934	}
 935
 936	wr = (void *)&q->q.desc[q->q.pidx];
 937	wr->equiq_to_len16 = htonl(wr_mid);
 938	wr->r3 = cpu_to_be64(0);
 939	end = (u64 *)wr + flits;
 940
 941	ssi = skb_shinfo(skb);
 942	if (ssi->gso_size) {
 943		struct cpl_tx_pkt_lso *lso = (void *)wr;
 944		bool v6 = (ssi->gso_type & SKB_GSO_TCPV6) != 0;
 945		int l3hdr_len = skb_network_header_len(skb);
 946		int eth_xtra_len = skb_network_offset(skb) - ETH_HLEN;
 947
 948		wr->op_immdlen = htonl(FW_WR_OP(FW_ETH_TX_PKT_WR) |
 949				       FW_WR_IMMDLEN(sizeof(*lso)));
 950		lso->c.lso_ctrl = htonl(LSO_OPCODE(CPL_TX_PKT_LSO) |
 951					LSO_FIRST_SLICE | LSO_LAST_SLICE |
 952					LSO_IPV6(v6) |
 953					LSO_ETHHDR_LEN(eth_xtra_len / 4) |
 954					LSO_IPHDR_LEN(l3hdr_len / 4) |
 955					LSO_TCPHDR_LEN(tcp_hdr(skb)->doff));
 956		lso->c.ipid_ofst = htons(0);
 957		lso->c.mss = htons(ssi->gso_size);
 958		lso->c.seqno_offset = htonl(0);
 959		lso->c.len = htonl(skb->len);
 960		cpl = (void *)(lso + 1);
 961		cntrl = TXPKT_CSUM_TYPE(v6 ? TX_CSUM_TCPIP6 : TX_CSUM_TCPIP) |
 962			TXPKT_IPHDR_LEN(l3hdr_len) |
 963			TXPKT_ETHHDR_LEN(eth_xtra_len);
 964		q->tso++;
 965		q->tx_cso += ssi->gso_segs;
 966	} else {
 967		int len;
 968
 969		len = is_eth_imm(skb) ? skb->len + sizeof(*cpl) : sizeof(*cpl);
 970		wr->op_immdlen = htonl(FW_WR_OP(FW_ETH_TX_PKT_WR) |
 971				       FW_WR_IMMDLEN(len));
 972		cpl = (void *)(wr + 1);
 973		if (skb->ip_summed == CHECKSUM_PARTIAL) {
 974			cntrl = hwcsum(skb) | TXPKT_IPCSUM_DIS;
 975			q->tx_cso++;
 976		} else
 977			cntrl = TXPKT_L4CSUM_DIS | TXPKT_IPCSUM_DIS;
 978	}
 979
 980	if (vlan_tx_tag_present(skb)) {
 981		q->vlan_ins++;
 982		cntrl |= TXPKT_VLAN_VLD | TXPKT_VLAN(vlan_tx_tag_get(skb));
 983	}
 984
 985	cpl->ctrl0 = htonl(TXPKT_OPCODE(CPL_TX_PKT_XT) |
 986			   TXPKT_INTF(pi->tx_chan) | TXPKT_PF(adap->fn));
 987	cpl->pack = htons(0);
 988	cpl->len = htons(skb->len);
 989	cpl->ctrl1 = cpu_to_be64(cntrl);
 990
 991	if (is_eth_imm(skb)) {
 992		inline_tx_skb(skb, &q->q, cpl + 1);
 993		dev_kfree_skb(skb);
 994	} else {
 995		int last_desc;
 996
 997		write_sgl(skb, &q->q, (struct ulptx_sgl *)(cpl + 1), end, 0,
 998			  addr);
 999		skb_orphan(skb);
1000
1001		last_desc = q->q.pidx + ndesc - 1;
1002		if (last_desc >= q->q.size)
1003			last_desc -= q->q.size;
1004		q->q.sdesc[last_desc].skb = skb;
1005		q->q.sdesc[last_desc].sgl = (struct ulptx_sgl *)(cpl + 1);
1006	}
1007
1008	txq_advance(&q->q, ndesc);
1009
1010	ring_tx_db(adap, &q->q, ndesc);
1011	return NETDEV_TX_OK;
1012}
1013
1014/**
1015 *	reclaim_completed_tx_imm - reclaim completed control-queue Tx descs
1016 *	@q: the SGE control Tx queue
1017 *
1018 *	This is a variant of reclaim_completed_tx() that is used for Tx queues
1019 *	that send only immediate data (presently just the control queues) and
1020 *	thus do not have any sk_buffs to release.
1021 */
1022static inline void reclaim_completed_tx_imm(struct sge_txq *q)
1023{
1024	int hw_cidx = ntohs(q->stat->cidx);
1025	int reclaim = hw_cidx - q->cidx;
1026
1027	if (reclaim < 0)
1028		reclaim += q->size;
1029
1030	q->in_use -= reclaim;
1031	q->cidx = hw_cidx;
1032}
1033
1034/**
1035 *	is_imm - check whether a packet can be sent as immediate data
1036 *	@skb: the packet
1037 *
1038 *	Returns true if a packet can be sent as a WR with immediate data.
1039 */
1040static inline int is_imm(const struct sk_buff *skb)
1041{
1042	return skb->len <= MAX_CTRL_WR_LEN;
1043}
1044
1045/**
1046 *	ctrlq_check_stop - check if a control queue is full and should stop
1047 *	@q: the queue
1048 *	@wr: most recent WR written to the queue
1049 *
1050 *	Check if a control queue has become full and should be stopped.
1051 *	We clean up control queue descriptors very lazily, only when we are out.
1052 *	If the queue is still full after reclaiming any completed descriptors
1053 *	we suspend it and have the last WR wake it up.
1054 */
1055static void ctrlq_check_stop(struct sge_ctrl_txq *q, struct fw_wr_hdr *wr)
1056{
1057	reclaim_completed_tx_imm(&q->q);
1058	if (unlikely(txq_avail(&q->q) < TXQ_STOP_THRES)) {
1059		wr->lo |= htonl(FW_WR_EQUEQ | FW_WR_EQUIQ);
1060		q->q.stops++;
1061		q->full = 1;
1062	}
1063}
1064
1065/**
1066 *	ctrl_xmit - send a packet through an SGE control Tx queue
1067 *	@q: the control queue
1068 *	@skb: the packet
1069 *
1070 *	Send a packet through an SGE control Tx queue.  Packets sent through
1071 *	a control queue must fit entirely as immediate data.
1072 */
1073static int ctrl_xmit(struct sge_ctrl_txq *q, struct sk_buff *skb)
1074{
1075	unsigned int ndesc;
1076	struct fw_wr_hdr *wr;
1077
1078	if (unlikely(!is_imm(skb))) {
1079		WARN_ON(1);
1080		dev_kfree_skb(skb);
1081		return NET_XMIT_DROP;
1082	}
1083
1084	ndesc = DIV_ROUND_UP(skb->len, sizeof(struct tx_desc));
1085	spin_lock(&q->sendq.lock);
1086
1087	if (unlikely(q->full)) {
1088		skb->priority = ndesc;                  /* save for restart */
1089		__skb_queue_tail(&q->sendq, skb);
1090		spin_unlock(&q->sendq.lock);
1091		return NET_XMIT_CN;
1092	}
1093
1094	wr = (struct fw_wr_hdr *)&q->q.desc[q->q.pidx];
1095	inline_tx_skb(skb, &q->q, wr);
1096
1097	txq_advance(&q->q, ndesc);
1098	if (unlikely(txq_avail(&q->q) < TXQ_STOP_THRES))
1099		ctrlq_check_stop(q, wr);
1100
1101	ring_tx_db(q->adap, &q->q, ndesc);
1102	spin_unlock(&q->sendq.lock);
1103
1104	kfree_skb(skb);
1105	return NET_XMIT_SUCCESS;
1106}
1107
1108/**
1109 *	restart_ctrlq - restart a suspended control queue
1110 *	@data: the control queue to restart
1111 *
1112 *	Resumes transmission on a suspended Tx control queue.
1113 */
1114static void restart_ctrlq(unsigned long data)
1115{
1116	struct sk_buff *skb;
1117	unsigned int written = 0;
1118	struct sge_ctrl_txq *q = (struct sge_ctrl_txq *)data;
1119
1120	spin_lock(&q->sendq.lock);
1121	reclaim_completed_tx_imm(&q->q);
1122	BUG_ON(txq_avail(&q->q) < TXQ_STOP_THRES);  /* q should be empty */
1123
1124	while ((skb = __skb_dequeue(&q->sendq)) != NULL) {
1125		struct fw_wr_hdr *wr;
1126		unsigned int ndesc = skb->priority;     /* previously saved */
1127
1128		/*
1129		 * Write descriptors and free skbs outside the lock to limit
1130		 * wait times.  q->full is still set so new skbs will be queued.
1131		 */
1132		spin_unlock(&q->sendq.lock);
1133
1134		wr = (struct fw_wr_hdr *)&q->q.desc[q->q.pidx];
1135		inline_tx_skb(skb, &q->q, wr);
1136		kfree_skb(skb);
1137
1138		written += ndesc;
1139		txq_advance(&q->q, ndesc);
1140		if (unlikely(txq_avail(&q->q) < TXQ_STOP_THRES)) {
1141			unsigned long old = q->q.stops;
1142
1143			ctrlq_check_stop(q, wr);
1144			if (q->q.stops != old) {          /* suspended anew */
1145				spin_lock(&q->sendq.lock);
1146				goto ringdb;
1147			}
1148		}
1149		if (written > 16) {
1150			ring_tx_db(q->adap, &q->q, written);
1151			written = 0;
1152		}
1153		spin_lock(&q->sendq.lock);
1154	}
1155	q->full = 0;
1156ringdb: if (written)
1157		ring_tx_db(q->adap, &q->q, written);
1158	spin_unlock(&q->sendq.lock);
1159}
1160
1161/**
1162 *	t4_mgmt_tx - send a management message
1163 *	@adap: the adapter
1164 *	@skb: the packet containing the management message
1165 *
1166 *	Send a management message through control queue 0.
1167 */
1168int t4_mgmt_tx(struct adapter *adap, struct sk_buff *skb)
1169{
1170	int ret;
1171
1172	local_bh_disable();
1173	ret = ctrl_xmit(&adap->sge.ctrlq[0], skb);
1174	local_bh_enable();
1175	return ret;
1176}
1177
1178/**
1179 *	is_ofld_imm - check whether a packet can be sent as immediate data
1180 *	@skb: the packet
1181 *
1182 *	Returns true if a packet can be sent as an offload WR with immediate
1183 *	data.  We currently use the same limit as for Ethernet packets.
1184 */
1185static inline int is_ofld_imm(const struct sk_buff *skb)
1186{
1187	return skb->len <= MAX_IMM_TX_PKT_LEN;
1188}
1189
1190/**
1191 *	calc_tx_flits_ofld - calculate # of flits for an offload packet
1192 *	@skb: the packet
1193 *
1194 *	Returns the number of flits needed for the given offload packet.
1195 *	These packets are already fully constructed and no additional headers
1196 *	will be added.
1197 */
1198static inline unsigned int calc_tx_flits_ofld(const struct sk_buff *skb)
1199{
1200	unsigned int flits, cnt;
1201
1202	if (is_ofld_imm(skb))
1203		return DIV_ROUND_UP(skb->len, 8);
1204
1205	flits = skb_transport_offset(skb) / 8U;   /* headers */
1206	cnt = skb_shinfo(skb)->nr_frags;
1207	if (skb->tail != skb->transport_header)
1208		cnt++;
1209	return flits + sgl_len(cnt);
1210}
1211
1212/**
1213 *	txq_stop_maperr - stop a Tx queue due to I/O MMU exhaustion
1214 *	@adap: the adapter
1215 *	@q: the queue to stop
1216 *
1217 *	Mark a Tx queue stopped due to I/O MMU exhaustion and resulting
1218 *	inability to map packets.  A periodic timer attempts to restart
1219 *	queues so marked.
1220 */
1221static void txq_stop_maperr(struct sge_ofld_txq *q)
1222{
1223	q->mapping_err++;
1224	q->q.stops++;
1225	set_bit(q->q.cntxt_id - q->adap->sge.egr_start,
1226		q->adap->sge.txq_maperr);
1227}
1228
1229/**
1230 *	ofldtxq_stop - stop an offload Tx queue that has become full
1231 *	@q: the queue to stop
1232 *	@skb: the packet causing the queue to become full
1233 *
1234 *	Stops an offload Tx queue that has become full and modifies the packet
1235 *	being written to request a wakeup.
1236 */
1237static void ofldtxq_stop(struct sge_ofld_txq *q, struct sk_buff *skb)
1238{
1239	struct fw_wr_hdr *wr = (struct fw_wr_hdr *)skb->data;
1240
1241	wr->lo |= htonl(FW_WR_EQUEQ | FW_WR_EQUIQ);
1242	q->q.stops++;
1243	q->full = 1;
1244}
1245
1246/**
1247 *	service_ofldq - restart a suspended offload queue
1248 *	@q: the offload queue
1249 *
1250 *	Services an offload Tx queue by moving packets from its packet queue
1251 *	to the HW Tx ring.  The function starts and ends with the queue locked.
1252 */
1253static void service_ofldq(struct sge_ofld_txq *q)
1254{
1255	u64 *pos;
1256	int credits;
1257	struct sk_buff *skb;
1258	unsigned int written = 0;
1259	unsigned int flits, ndesc;
1260
1261	while ((skb = skb_peek(&q->sendq)) != NULL && !q->full) {
1262		/*
1263		 * We drop the lock but leave skb on sendq, thus retaining
1264		 * exclusive access to the state of the queue.
1265		 */
1266		spin_unlock(&q->sendq.lock);
1267
1268		reclaim_completed_tx(q->adap, &q->q, false);
1269
1270		flits = skb->priority;                /* previously saved */
1271		ndesc = flits_to_desc(flits);
1272		credits = txq_avail(&q->q) - ndesc;
1273		BUG_ON(credits < 0);
1274		if (unlikely(credits < TXQ_STOP_THRES))
1275			ofldtxq_stop(q, skb);
1276
1277		pos = (u64 *)&q->q.desc[q->q.pidx];
1278		if (is_ofld_imm(skb))
1279			inline_tx_skb(skb, &q->q, pos);
1280		else if (map_skb(q->adap->pdev_dev, skb,
1281				 (dma_addr_t *)skb->head)) {
1282			txq_stop_maperr(q);
1283			spin_lock(&q->sendq.lock);
1284			break;
1285		} else {
1286			int last_desc, hdr_len = skb_transport_offset(skb);
1287
1288			memcpy(pos, skb->data, hdr_len);
1289			write_sgl(skb, &q->q, (void *)pos + hdr_len,
1290				  pos + flits, hdr_len,
1291				  (dma_addr_t *)skb->head);
1292#ifdef CONFIG_NEED_DMA_MAP_STATE
1293			skb->dev = q->adap->port[0];
1294			skb->destructor = deferred_unmap_destructor;
1295#endif
1296			last_desc = q->q.pidx + ndesc - 1;
1297			if (last_desc >= q->q.size)
1298				last_desc -= q->q.size;
1299			q->q.sdesc[last_desc].skb = skb;
1300		}
1301
1302		txq_advance(&q->q, ndesc);
1303		written += ndesc;
1304		if (unlikely(written > 32)) {
1305			ring_tx_db(q->adap, &q->q, written);
1306			written = 0;
1307		}
1308
1309		spin_lock(&q->sendq.lock);
1310		__skb_unlink(skb, &q->sendq);
1311		if (is_ofld_imm(skb))
1312			kfree_skb(skb);
1313	}
1314	if (likely(written))
1315		ring_tx_db(q->adap, &q->q, written);
1316}
1317
1318/**
1319 *	ofld_xmit - send a packet through an offload queue
1320 *	@q: the Tx offload queue
1321 *	@skb: the packet
1322 *
1323 *	Send an offload packet through an SGE offload queue.
1324 */
1325static int ofld_xmit(struct sge_ofld_txq *q, struct sk_buff *skb)
1326{
1327	skb->priority = calc_tx_flits_ofld(skb);       /* save for restart */
1328	spin_lock(&q->sendq.lock);
1329	__skb_queue_tail(&q->sendq, skb);
1330	if (q->sendq.qlen == 1)
1331		service_ofldq(q);
1332	spin_unlock(&q->sendq.lock);
1333	return NET_XMIT_SUCCESS;
1334}
1335
1336/**
1337 *	restart_ofldq - restart a suspended offload queue
1338 *	@data: the offload queue to restart
1339 *
1340 *	Resumes transmission on a suspended Tx offload queue.
1341 */
1342static void restart_ofldq(unsigned long data)
1343{
1344	struct sge_ofld_txq *q = (struct sge_ofld_txq *)data;
1345
1346	spin_lock(&q->sendq.lock);
1347	q->full = 0;            /* the queue actually is completely empty now */
1348	service_ofldq(q);
1349	spin_unlock(&q->sendq.lock);
1350}
1351
1352/**
1353 *	skb_txq - return the Tx queue an offload packet should use
1354 *	@skb: the packet
1355 *
1356 *	Returns the Tx queue an offload packet should use as indicated by bits
1357 *	1-15 in the packet's queue_mapping.
1358 */
1359static inline unsigned int skb_txq(const struct sk_buff *skb)
1360{
1361	return skb->queue_mapping >> 1;
1362}
1363
1364/**
1365 *	is_ctrl_pkt - return whether an offload packet is a control packet
1366 *	@skb: the packet
1367 *
1368 *	Returns whether an offload packet should use an OFLD or a CTRL
1369 *	Tx queue as indicated by bit 0 in the packet's queue_mapping.
1370 */
1371static inline unsigned int is_ctrl_pkt(const struct sk_buff *skb)
1372{
1373	return skb->queue_mapping & 1;
1374}
1375
1376static inline int ofld_send(struct adapter *adap, struct sk_buff *skb)
1377{
1378	unsigned int idx = skb_txq(skb);
1379
1380	if (unlikely(is_ctrl_pkt(skb)))
1381		return ctrl_xmit(&adap->sge.ctrlq[idx], skb);
1382	return ofld_xmit(&adap->sge.ofldtxq[idx], skb);
1383}
1384
1385/**
1386 *	t4_ofld_send - send an offload packet
1387 *	@adap: the adapter
1388 *	@skb: the packet
1389 *
1390 *	Sends an offload packet.  We use the packet queue_mapping to select the
1391 *	appropriate Tx queue as follows: bit 0 indicates whether the packet
1392 *	should be sent as regular or control, bits 1-15 select the queue.
1393 */
1394int t4_ofld_send(struct adapter *adap, struct sk_buff *skb)
1395{
1396	int ret;
1397
1398	local_bh_disable();
1399	ret = ofld_send(adap, skb);
1400	local_bh_enable();
1401	return ret;
1402}
1403
1404/**
1405 *	cxgb4_ofld_send - send an offload packet
1406 *	@dev: the net device
1407 *	@skb: the packet
1408 *
1409 *	Sends an offload packet.  This is an exported version of @t4_ofld_send,
1410 *	intended for ULDs.
1411 */
1412int cxgb4_ofld_send(struct net_device *dev, struct sk_buff *skb)
1413{
1414	return t4_ofld_send(netdev2adap(dev), skb);
1415}
1416EXPORT_SYMBOL(cxgb4_ofld_send);
1417
1418static inline void copy_frags(struct sk_buff *skb,
1419			      const struct pkt_gl *gl, unsigned int offset)
1420{
1421	int i;
1422
1423	/* usually there's just one frag */
1424	__skb_fill_page_desc(skb, 0, gl->frags[0].page,
1425			     gl->frags[0].offset + offset,
1426			     gl->frags[0].size - offset);
1427	skb_shinfo(skb)->nr_frags = gl->nfrags;
1428	for (i = 1; i < gl->nfrags; i++)
1429		__skb_fill_page_desc(skb, i, gl->frags[i].page,
1430				     gl->frags[i].offset,
1431				     gl->frags[i].size);
1432
1433	/* get a reference to the last page, we don't own it */
1434	get_page(gl->frags[gl->nfrags - 1].page);
1435}
1436
1437/**
1438 *	cxgb4_pktgl_to_skb - build an sk_buff from a packet gather list
1439 *	@gl: the gather list
1440 *	@skb_len: size of sk_buff main body if it carries fragments
1441 *	@pull_len: amount of data to move to the sk_buff's main body
1442 *
1443 *	Builds an sk_buff from the given packet gather list.  Returns the
1444 *	sk_buff or %NULL if sk_buff allocation failed.
1445 */
1446struct sk_buff *cxgb4_pktgl_to_skb(const struct pkt_gl *gl,
1447				   unsigned int skb_len, unsigned int pull_len)
1448{
1449	struct sk_buff *skb;
1450
1451	/*
1452	 * Below we rely on RX_COPY_THRES being less than the smallest Rx buffer
1453	 * size, which is expected since buffers are at least PAGE_SIZEd.
1454	 * In this case packets up to RX_COPY_THRES have only one fragment.
1455	 */
1456	if (gl->tot_len <= RX_COPY_THRES) {
1457		skb = dev_alloc_skb(gl->tot_len);
1458		if (unlikely(!skb))
1459			goto out;
1460		__skb_put(skb, gl->tot_len);
1461		skb_copy_to_linear_data(skb, gl->va, gl->tot_len);
1462	} else {
1463		skb = dev_alloc_skb(skb_len);
1464		if (unlikely(!skb))
1465			goto out;
1466		__skb_put(skb, pull_len);
1467		skb_copy_to_linear_data(skb, gl->va, pull_len);
1468
1469		copy_frags(skb, gl, pull_len);
1470		skb->len = gl->tot_len;
1471		skb->data_len = skb->len - pull_len;
1472		skb->truesize += skb->data_len;
1473	}
1474out:	return skb;
1475}
1476EXPORT_SYMBOL(cxgb4_pktgl_to_skb);
1477
1478/**
1479 *	t4_pktgl_free - free a packet gather list
1480 *	@gl: the gather list
1481 *
1482 *	Releases the pages of a packet gather list.  We do not own the last
1483 *	page on the list and do not free it.
1484 */
1485static void t4_pktgl_free(const struct pkt_gl *gl)
1486{
1487	int n;
1488	const struct page_frag *p;
1489
1490	for (p = gl->frags, n = gl->nfrags - 1; n--; p++)
1491		put_page(p->page);
1492}
1493
1494/*
1495 * Process an MPS trace packet.  Give it an unused protocol number so it won't
1496 * be delivered to anyone and send it to the stack for capture.
1497 */
1498static noinline int handle_trace_pkt(struct adapter *adap,
1499				     const struct pkt_gl *gl)
1500{
1501	struct sk_buff *skb;
1502	struct cpl_trace_pkt *p;
1503
1504	skb = cxgb4_pktgl_to_skb(gl, RX_PULL_LEN, RX_PULL_LEN);
1505	if (unlikely(!skb)) {
1506		t4_pktgl_free(gl);
1507		return 0;
1508	}
1509
1510	p = (struct cpl_trace_pkt *)skb->data;
1511	__skb_pull(skb, sizeof(*p));
1512	skb_reset_mac_header(skb);
1513	skb->protocol = htons(0xffff);
1514	skb->dev = adap->port[0];
1515	netif_receive_skb(skb);
1516	return 0;
1517}
1518
1519static void do_gro(struct sge_eth_rxq *rxq, const struct pkt_gl *gl,
1520		   const struct cpl_rx_pkt *pkt)
1521{
1522	int ret;
1523	struct sk_buff *skb;
1524
1525	skb = napi_get_frags(&rxq->rspq.napi);
1526	if (unlikely(!skb)) {
1527		t4_pktgl_free(gl);
1528		rxq->stats.rx_drops++;
1529		return;
1530	}
1531
1532	copy_frags(skb, gl, RX_PKT_PAD);
1533	skb->len = gl->tot_len - RX_PKT_PAD;
1534	skb->data_len = skb->len;
1535	skb->truesize += skb->data_len;
1536	skb->ip_summed = CHECKSUM_UNNECESSARY;
1537	skb_record_rx_queue(skb, rxq->rspq.idx);
1538	if (rxq->rspq.netdev->features & NETIF_F_RXHASH)
1539		skb->rxhash = (__force u32)pkt->rsshdr.hash_val;
1540
1541	if (unlikely(pkt->vlan_ex)) {
1542		__vlan_hwaccel_put_tag(skb, ntohs(pkt->vlan));
1543		rxq->stats.vlan_ex++;
1544	}
1545	ret = napi_gro_frags(&rxq->rspq.napi);
1546	if (ret == GRO_HELD)
1547		rxq->stats.lro_pkts++;
1548	else if (ret == GRO_MERGED || ret == GRO_MERGED_FREE)
1549		rxq->stats.lro_merged++;
1550	rxq->stats.pkts++;
1551	rxq->stats.rx_cso++;
1552}
1553
1554/**
1555 *	t4_ethrx_handler - process an ingress ethernet packet
1556 *	@q: the response queue that received the packet
1557 *	@rsp: the response queue descriptor holding the RX_PKT message
1558 *	@si: the gather list of packet fragments
1559 *
1560 *	Process an ingress ethernet packet and deliver it to the stack.
1561 */
1562int t4_ethrx_handler(struct sge_rspq *q, const __be64 *rsp,
1563		     const struct pkt_gl *si)
1564{
1565	bool csum_ok;
1566	struct sk_buff *skb;
1567	const struct cpl_rx_pkt *pkt;
1568	struct sge_eth_rxq *rxq = container_of(q, struct sge_eth_rxq, rspq);
1569
1570	if (unlikely(*(u8 *)rsp == CPL_TRACE_PKT))
1571		return handle_trace_pkt(q->adap, si);
1572
1573	pkt = (const struct cpl_rx_pkt *)rsp;
1574	csum_ok = pkt->csum_calc && !pkt->err_vec;
1575	if ((pkt->l2info & htonl(RXF_TCP)) &&
1576	    (q->netdev->features & NETIF_F_GRO) && csum_ok && !pkt->ip_frag) {
1577		do_gro(rxq, si, pkt);
1578		return 0;
1579	}
1580
1581	skb = cxgb4_pktgl_to_skb(si, RX_PKT_SKB_LEN, RX_PULL_LEN);
1582	if (unlikely(!skb)) {
1583		t4_pktgl_free(si);
1584		rxq->stats.rx_drops++;
1585		return 0;
1586	}
1587
1588	__skb_pull(skb, RX_PKT_PAD);      /* remove ethernet header padding */
1589	skb->protocol = eth_type_trans(skb, q->netdev);
1590	skb_record_rx_queue(skb, q->idx);
1591	if (skb->dev->features & NETIF_F_RXHASH)
1592		skb->rxhash = (__force u32)pkt->rsshdr.hash_val;
1593
1594	rxq->stats.pkts++;
1595
1596	if (csum_ok && (q->netdev->features & NETIF_F_RXCSUM) &&
1597	    (pkt->l2info & htonl(RXF_UDP | RXF_TCP))) {
1598		if (!pkt->ip_frag) {
1599			skb->ip_summed = CHECKSUM_UNNECESSARY;
1600			rxq->stats.rx_cso++;
1601		} else if (pkt->l2info & htonl(RXF_IP)) {
1602			__sum16 c = (__force __sum16)pkt->csum;
1603			skb->csum = csum_unfold(c);
1604			skb->ip_summed = CHECKSUM_COMPLETE;
1605			rxq->stats.rx_cso++;
1606		}
1607	} else
1608		skb_checksum_none_assert(skb);
1609
1610	if (unlikely(pkt->vlan_ex)) {
1611		__vlan_hwaccel_put_tag(skb, ntohs(pkt->vlan));
1612		rxq->stats.vlan_ex++;
1613	}
1614	netif_receive_skb(skb);
1615	return 0;
1616}
1617
1618/**
1619 *	restore_rx_bufs - put back a packet's Rx buffers
1620 *	@si: the packet gather list
1621 *	@q: the SGE free list
1622 *	@frags: number of FL buffers to restore
1623 *
1624 *	Puts back on an FL the Rx buffers associated with @si.  The buffers
1625 *	have already been unmapped and are left unmapped, we mark them so to
1626 *	prevent further unmapping attempts.
1627 *
1628 *	This function undoes a series of @unmap_rx_buf calls when we find out
1629 *	that the current packet can't be processed right away afterall and we
1630 *	need to come back to it later.  This is a very rare event and there's
1631 *	no effort to make this particularly efficient.
1632 */
1633static void restore_rx_bufs(const struct pkt_gl *si, struct sge_fl *q,
1634			    int frags)
1635{
1636	struct rx_sw_desc *d;
1637
1638	while (frags--) {
1639		if (q->cidx == 0)
1640			q->cidx = q->size - 1;
1641		else
1642			q->cidx--;
1643		d = &q->sdesc[q->cidx];
1644		d->page = si->frags[frags].page;
1645		d->dma_addr |= RX_UNMAPPED_BUF;
1646		q->avail++;
1647	}
1648}
1649
1650/**
1651 *	is_new_response - check if a response is newly written
1652 *	@r: the response descriptor
1653 *	@q: the response queue
1654 *
1655 *	Returns true if a response descriptor contains a yet unprocessed
1656 *	response.
1657 */
1658static inline bool is_new_response(const struct rsp_ctrl *r,
1659				   const struct sge_rspq *q)
1660{
1661	return RSPD_GEN(r->type_gen) == q->gen;
1662}
1663
1664/**
1665 *	rspq_next - advance to the next entry in a response queue
1666 *	@q: the queue
1667 *
1668 *	Updates the state of a response queue to advance it to the next entry.
1669 */
1670static inline void rspq_next(struct sge_rspq *q)
1671{
1672	q->cur_desc = (void *)q->cur_desc + q->iqe_len;
1673	if (unlikely(++q->cidx == q->size)) {
1674		q->cidx = 0;
1675		q->gen ^= 1;
1676		q->cur_desc = q->desc;
1677	}
1678}
1679
1680/**
1681 *	process_responses - process responses from an SGE response queue
1682 *	@q: the ingress queue to process
1683 *	@budget: how many responses can be processed in this round
1684 *
1685 *	Process responses from an SGE response queue up to the supplied budget.
1686 *	Responses include received packets as well as control messages from FW
1687 *	or HW.
1688 *
1689 *	Additionally choose the interrupt holdoff time for the next interrupt
1690 *	on this queue.  If the system is under memory shortage use a fairly
1691 *	long delay to help recovery.
1692 */
1693static int process_responses(struct sge_rspq *q, int budget)
1694{
1695	int ret, rsp_type;
1696	int budget_left = budget;
1697	const struct rsp_ctrl *rc;
1698	struct sge_eth_rxq *rxq = container_of(q, struct sge_eth_rxq, rspq);
1699
1700	while (likely(budget_left)) {
1701		rc = (void *)q->cur_desc + (q->iqe_len - sizeof(*rc));
1702		if (!is_new_response(rc, q))
1703			break;
1704
1705		rmb();
1706		rsp_type = RSPD_TYPE(rc->type_gen);
1707		if (likely(rsp_type == RSP_TYPE_FLBUF)) {
1708			struct page_frag *fp;
1709			struct pkt_gl si;
1710			const struct rx_sw_desc *rsd;
1711			u32 len = ntohl(rc->pldbuflen_qid), bufsz, frags;
1712
1713			if (len & RSPD_NEWBUF) {
1714				if (likely(q->offset > 0)) {
1715					free_rx_bufs(q->adap, &rxq->fl, 1);
1716					q->offset = 0;
1717				}
1718				len = RSPD_LEN(len);
1719			}
1720			si.tot_len = len;
1721
1722			/* gather packet fragments */
1723			for (frags = 0, fp = si.frags; ; frags++, fp++) {
1724				rsd = &rxq->fl.sdesc[rxq->fl.cidx];
1725				bufsz = get_buf_size(rsd);
1726				fp->page = rsd->page;
1727				fp->offset = q->offset;
1728				fp->size = min(bufsz, len);
1729				len -= fp->size;
1730				if (!len)
1731					break;
1732				unmap_rx_buf(q->adap, &rxq->fl);
1733			}
1734
1735			/*
1736			 * Last buffer remains mapped so explicitly make it
1737			 * coherent for CPU access.
1738			 */
1739			dma_sync_single_for_cpu(q->adap->pdev_dev,
1740						get_buf_addr(rsd),
1741						fp->size, DMA_FROM_DEVICE);
1742
1743			si.va = page_address(si.frags[0].page) +
1744				si.frags[0].offset;
1745			prefetch(si.va);
1746
1747			si.nfrags = frags + 1;
1748			ret = q->handler(q, q->cur_desc, &si);
1749			if (likely(ret == 0))
1750				q->offset += ALIGN(fp->size, FL_ALIGN);
1751			else
1752				restore_rx_bufs(&si, &rxq->fl, frags);
1753		} else if (likely(rsp_type == RSP_TYPE_CPL)) {
1754			ret = q->handler(q, q->cur_desc, NULL);
1755		} else {
1756			ret = q->handler(q, (const __be64 *)rc, CXGB4_MSG_AN);
1757		}
1758
1759		if (unlikely(ret)) {
1760			/* couldn't process descriptor, back off for recovery */
1761			q->next_intr_params = QINTR_TIMER_IDX(NOMEM_TMR_IDX);
1762			break;
1763		}
1764
1765		rspq_next(q);
1766		budget_left--;
1767	}
1768
1769	if (q->offset >= 0 && rxq->fl.size - rxq->fl.avail >= 16)
1770		__refill_fl(q->adap, &rxq->fl);
1771	return budget - budget_left;
1772}
1773
1774/**
1775 *	napi_rx_handler - the NAPI handler for Rx processing
1776 *	@napi: the napi instance
1777 *	@budget: how many packets we can process in this round
1778 *
1779 *	Handler for new data events when using NAPI.  This does not need any
1780 *	locking or protection from interrupts as data interrupts are off at
1781 *	this point and other adapter interrupts do not interfere (the latter
1782 *	in not a concern at all with MSI-X as non-data interrupts then have
1783 *	a separate handler).
1784 */
1785static int napi_rx_handler(struct napi_struct *napi, int budget)
1786{
1787	unsigned int params;
1788	struct sge_rspq *q = container_of(napi, struct sge_rspq, napi);
1789	int work_done = process_responses(q, budget);
1790
1791	if (likely(work_done < budget)) {
1792		napi_complete(napi);
1793		params = q->next_intr_params;
1794		q->next_intr_params = q->intr_params;
1795	} else
1796		params = QINTR_TIMER_IDX(7);
1797
1798	t4_write_reg(q->adap, MYPF_REG(SGE_PF_GTS), CIDXINC(work_done) |
1799		     INGRESSQID((u32)q->cntxt_id) | SEINTARM(params));
1800	return work_done;
1801}
1802
1803/*
1804 * The MSI-X interrupt handler for an SGE response queue.
1805 */
1806irqreturn_t t4_sge_intr_msix(int irq, void *cookie)
1807{
1808	struct sge_rspq *q = cookie;
1809
1810	napi_schedule(&q->napi);
1811	return IRQ_HANDLED;
1812}
1813
1814/*
1815 * Process the indirect interrupt entries in the interrupt queue and kick off
1816 * NAPI for each queue that has generated an entry.
1817 */
1818static unsigned int process_intrq(struct adapter *adap)
1819{
1820	unsigned int credits;
1821	const struct rsp_ctrl *rc;
1822	struct sge_rspq *q = &adap->sge.intrq;
1823
1824	spin_lock(&adap->sge.intrq_lock);
1825	for (credits = 0; ; credits++) {
1826		rc = (void *)q->cur_desc + (q->iqe_len - sizeof(*rc));
1827		if (!is_new_response(rc, q))
1828			break;
1829
1830		rmb();
1831		if (RSPD_TYPE(rc->type_gen) == RSP_TYPE_INTR) {
1832			unsigned int qid = ntohl(rc->pldbuflen_qid);
1833
1834			qid -= adap->sge.ingr_start;
1835			napi_schedule(&adap->sge.ingr_map[qid]->napi);
1836		}
1837
1838		rspq_next(q);
1839	}
1840
1841	t4_write_reg(adap, MYPF_REG(SGE_PF_GTS), CIDXINC(credits) |
1842		     INGRESSQID(q->cntxt_id) | SEINTARM(q->intr_params));
1843	spin_unlock(&adap->sge.intrq_lock);
1844	return credits;
1845}
1846
1847/*
1848 * The MSI interrupt handler, which handles data events from SGE response queues
1849 * as well as error and other async events as they all use the same MSI vector.
1850 */
1851static irqreturn_t t4_intr_msi(int irq, void *cookie)
1852{
1853	struct adapter *adap = cookie;
1854
1855	t4_slow_intr_handler(adap);
1856	process_intrq(adap);
1857	return IRQ_HANDLED;
1858}
1859
1860/*
1861 * Interrupt handler for legacy INTx interrupts.
1862 * Handles data events from SGE response queues as well as error and other
1863 * async events as they all use the same interrupt line.
1864 */
1865static irqreturn_t t4_intr_intx(int irq, void *cookie)
1866{
1867	struct adapter *adap = cookie;
1868
1869	t4_write_reg(adap, MYPF_REG(PCIE_PF_CLI), 0);
1870	if (t4_slow_intr_handler(adap) | process_intrq(adap))
1871		return IRQ_HANDLED;
1872	return IRQ_NONE;             /* probably shared interrupt */
1873}
1874
1875/**
1876 *	t4_intr_handler - select the top-level interrupt handler
1877 *	@adap: the adapter
1878 *
1879 *	Selects the top-level interrupt handler based on the type of interrupts
1880 *	(MSI-X, MSI, or INTx).
1881 */
1882irq_handler_t t4_intr_handler(struct adapter *adap)
1883{
1884	if (adap->flags & USING_MSIX)
1885		return t4_sge_intr_msix;
1886	if (adap->flags & USING_MSI)
1887		return t4_intr_msi;
1888	return t4_intr_intx;
1889}
1890
1891static void sge_rx_timer_cb(unsigned long data)
1892{
1893	unsigned long m;
1894	unsigned int i, cnt[2];
1895	struct adapter *adap = (struct adapter *)data;
1896	struct sge *s = &adap->sge;
1897
1898	for (i = 0; i < ARRAY_SIZE(s->starving_fl); i++)
1899		for (m = s->starving_fl[i]; m; m &= m - 1) {
1900			struct sge_eth_rxq *rxq;
1901			unsigned int id = __ffs(m) + i * BITS_PER_LONG;
1902			struct sge_fl *fl = s->egr_map[id];
1903
1904			clear_bit(id, s->starving_fl);
1905			smp_mb__after_clear_bit();
1906
1907			if (fl_starving(fl)) {
1908				rxq = container_of(fl, struct sge_eth_rxq, fl);
1909				if (napi_reschedule(&rxq->rspq.napi))
1910					fl->starving++;
1911				else
1912					set_bit(id, s->starving_fl);
1913			}
1914		}
1915
1916	t4_write_reg(adap, SGE_DEBUG_INDEX, 13);
1917	cnt[0] = t4_read_reg(adap, SGE_DEBUG_DATA_HIGH);
1918	cnt[1] = t4_read_reg(adap, SGE_DEBUG_DATA_LOW);
1919
1920	for (i = 0; i < 2; i++)
1921		if (cnt[i] >= s->starve_thres) {
1922			if (s->idma_state[i] || cnt[i] == 0xffffffff)
1923				continue;
1924			s->idma_state[i] = 1;
1925			t4_write_reg(adap, SGE_DEBUG_INDEX, 11);
1926			m = t4_read_reg(adap, SGE_DEBUG_DATA_LOW) >> (i * 16);
1927			dev_warn(adap->pdev_dev,
1928				 "SGE idma%u starvation detected for "
1929				 "queue %lu\n", i, m & 0xffff);
1930		} else if (s->idma_state[i])
1931			s->idma_state[i] = 0;
1932
1933	mod_timer(&s->rx_timer, jiffies + RX_QCHECK_PERIOD);
1934}
1935
1936static void sge_tx_timer_cb(unsigned long data)
1937{
1938	unsigned long m;
1939	unsigned int i, budget;
1940	struct adapter *adap = (struct adapter *)data;
1941	struct sge *s = &adap->sge;
1942
1943	for (i = 0; i < ARRAY_SIZE(s->txq_maperr); i++)
1944		for (m = s->txq_maperr[i]; m; m &= m - 1) {
1945			unsigned long id = __ffs(m) + i * BITS_PER_LONG;
1946			struct sge_ofld_txq *txq = s->egr_map[id];
1947
1948			clear_bit(id, s->txq_maperr);
1949			tasklet_schedule(&txq->qresume_tsk);
1950		}
1951
1952	budget = MAX_TIMER_TX_RECLAIM;
1953	i = s->ethtxq_rover;
1954	do {
1955		struct sge_eth_txq *q = &s->ethtxq[i];
1956
1957		if (q->q.in_use &&
1958		    time_after_eq(jiffies, q->txq->trans_start + HZ / 100) &&
1959		    __netif_tx_trylock(q->txq)) {
1960			int avail = reclaimable(&q->q);
1961
1962			if (avail) {
1963				if (avail > budget)
1964					avail = budget;
1965
1966				free_tx_desc(adap, &q->q, avail, true);
1967				q->q.in_use -= avail;
1968				budget -= avail;
1969			}
1970			__netif_tx_unlock(q->txq);
1971		}
1972
1973		if (++i >= s->ethqsets)
1974			i = 0;
1975	} while (budget && i != s->ethtxq_rover);
1976	s->ethtxq_rover = i;
1977	mod_timer(&s->tx_timer, jiffies + (budget ? TX_QCHECK_PERIOD : 2));
1978}
1979
1980int t4_sge_alloc_rxq(struct adapter *adap, struct sge_rspq *iq, bool fwevtq,
1981		     struct net_device *dev, int intr_idx,
1982		     struct sge_fl *fl, rspq_handler_t hnd)
1983{
1984	int ret, flsz = 0;
1985	struct fw_iq_cmd c;
1986	struct port_info *pi = netdev_priv(dev);
1987
1988	/* Size needs to be multiple of 16, including status entry. */
1989	iq->size = roundup(iq->size, 16);
1990
1991	iq->desc = alloc_ring(adap->pdev_dev, iq->size, iq->iqe_len, 0,
1992			      &iq->phys_addr, NULL, 0, NUMA_NO_NODE);
1993	if (!iq->desc)
1994		return -ENOMEM;
1995
1996	memset(&c, 0, sizeof(c));
1997	c.op_to_vfn = htonl(FW_CMD_OP(FW_IQ_CMD) | FW_CMD_REQUEST |
1998			    FW_CMD_WRITE | FW_CMD_EXEC |
1999			    FW_IQ_CMD_PFN(adap->fn) | FW_IQ_CMD_VFN(0));
2000	c.alloc_to_len16 = htonl(FW_IQ_CMD_ALLOC | FW_IQ_CMD_IQSTART(1) |
2001				 FW_LEN16(c));
2002	c.type_to_iqandstindex = htonl(FW_IQ_CMD_TYPE(FW_IQ_TYPE_FL_INT_CAP) |
2003		FW_IQ_CMD_IQASYNCH(fwevtq) | FW_IQ_CMD_VIID(pi->viid) |
2004		FW_IQ_CMD_IQANDST(intr_idx < 0) | FW_IQ_CMD_IQANUD(1) |
2005		FW_IQ_CMD_IQANDSTINDEX(intr_idx >= 0 ? intr_idx :
2006							-intr_idx - 1));
2007	c.iqdroprss_to_iqesize = htons(FW_IQ_CMD_IQPCIECH(pi->tx_chan) |
2008		FW_IQ_CMD_IQGTSMODE |
2009		FW_IQ_CMD_IQINTCNTTHRESH(iq->pktcnt_idx) |
2010		FW_IQ_CMD_IQESIZE(ilog2(iq->iqe_len) - 4));
2011	c.iqsize = htons(iq->size);
2012	c.iqaddr = cpu_to_be64(iq->phys_addr);
2013
2014	if (fl) {
2015		fl->size = roundup(fl->size, 8);
2016		fl->desc = alloc_ring(adap->pdev_dev, fl->size, sizeof(__be64),
2017				      sizeof(struct rx_sw_desc), &fl->addr,
2018				      &fl->sdesc, STAT_LEN, NUMA_NO_NODE);
2019		if (!fl->desc)
2020			goto fl_nomem;
2021
2022		flsz = fl->size / 8 + STAT_LEN / sizeof(struct tx_desc);
2023		c.iqns_to_fl0congen = htonl(FW_IQ_CMD_FL0PACKEN |
2024					    FW_IQ_CMD_FL0FETCHRO(1) |
2025					    FW_IQ_CMD_FL0DATARO(1) |
2026					    FW_IQ_CMD_FL0PADEN);
2027		c.fl0dcaen_to_fl0cidxfthresh = htons(FW_IQ_CMD_FL0FBMIN(2) |
2028				FW_IQ_CMD_FL0FBMAX(3));
2029		c.fl0size = htons(flsz);
2030		c.fl0addr = cpu_to_be64(fl->addr);
2031	}
2032
2033	ret = t4_wr_mbox(adap, adap->fn, &c, sizeof(c), &c);
2034	if (ret)
2035		goto err;
2036
2037	netif_napi_add(dev, &iq->napi, napi_rx_handler, 64);
2038	iq->cur_desc = iq->desc;
2039	iq->cidx = 0;
2040	iq->gen = 1;
2041	iq->next_intr_params = iq->intr_params;
2042	iq->cntxt_id = ntohs(c.iqid);
2043	iq->abs_id = ntohs(c.physiqid);
2044	iq->size--;                           /* subtract status entry */
2045	iq->adap = adap;
2046	iq->netdev = dev;
2047	iq->handler = hnd;
2048
2049	/* set offset to -1 to distinguish ingress queues without FL */
2050	iq->offset = fl ? 0 : -1;
2051
2052	adap->sge.ingr_map[iq->cntxt_id - adap->sge.ingr_start] = iq;
2053
2054	if (fl) {
2055		fl->cntxt_id = ntohs(c.fl0id);
2056		fl->avail = fl->pend_cred = 0;
2057		fl->pidx = fl->cidx = 0;
2058		fl->alloc_failed = fl->large_alloc_failed = fl->starving = 0;
2059		adap->sge.egr_map[fl->cntxt_id - adap->sge.egr_start] = fl;
2060		refill_fl(adap, fl, fl_cap(fl), GFP_KERNEL);
2061	}
2062	return 0;
2063
2064fl_nomem:
2065	ret = -ENOMEM;
2066err:
2067	if (iq->desc) {
2068		dma_free_coherent(adap->pdev_dev, iq->size * iq->iqe_len,
2069				  iq->desc, iq->phys_addr);
2070		iq->desc = NULL;
2071	}
2072	if (fl && fl->desc) {
2073		kfree(fl->sdesc);
2074		fl->sdesc = NULL;
2075		dma_free_coherent(adap->pdev_dev, flsz * sizeof(struct tx_desc),
2076				  fl->desc, fl->addr);
2077		fl->desc = NULL;
2078	}
2079	return ret;
2080}
2081
2082static void init_txq(struct adapter *adap, struct sge_txq *q, unsigned int id)
2083{
2084	q->in_use = 0;
2085	q->cidx = q->pidx = 0;
2086	q->stops = q->restarts = 0;
2087	q->stat = (void *)&q->desc[q->size];
2088	q->cntxt_id = id;
2089	spin_lock_init(&q->db_lock);
2090	adap->sge.egr_map[id - adap->sge.egr_start] = q;
2091}
2092
2093int t4_sge_alloc_eth_txq(struct adapter *adap, struct sge_eth_txq *txq,
2094			 struct net_device *dev, struct netdev_queue *netdevq,
2095			 unsigned int iqid)
2096{
2097	int ret, nentries;
2098	struct fw_eq_eth_cmd c;
2099	struct port_info *pi = netdev_priv(dev);
2100
2101	/* Add status entries */
2102	nentries = txq->q.size + STAT_LEN / sizeof(struct tx_desc);
2103
2104	txq->q.desc = alloc_ring(adap->pdev_dev, txq->q.size,
2105			sizeof(struct tx_desc), sizeof(struct tx_sw_desc),
2106			&txq->q.phys_addr, &txq->q.sdesc, STAT_LEN,
2107			netdev_queue_numa_node_read(netdevq));
2108	if (!txq->q.desc)
2109		return -ENOMEM;
2110
2111	memset(&c, 0, sizeof(c));
2112	c.op_to_vfn = htonl(FW_CMD_OP(FW_EQ_ETH_CMD) | FW_CMD_REQUEST |
2113			    FW_CMD_WRITE | FW_CMD_EXEC |
2114			    FW_EQ_ETH_CMD_PFN(adap->fn) | FW_EQ_ETH_CMD_VFN(0));
2115	c.alloc_to_len16 = htonl(FW_EQ_ETH_CMD_ALLOC |
2116				 FW_EQ_ETH_CMD_EQSTART | FW_LEN16(c));
2117	c.viid_pkd = htonl(FW_EQ_ETH_CMD_VIID(pi->viid));
2118	c.fetchszm_to_iqid = htonl(FW_EQ_ETH_CMD_HOSTFCMODE(2) |
2119				   FW_EQ_ETH_CMD_PCIECHN(pi->tx_chan) |
2120				   FW_EQ_ETH_CMD_FETCHRO(1) |
2121				   FW_EQ_ETH_CMD_IQID(iqid));
2122	c.dcaen_to_eqsize = htonl(FW_EQ_ETH_CMD_FBMIN(2) |
2123				  FW_EQ_ETH_CMD_FBMAX(3) |
2124				  FW_EQ_ETH_CMD_CIDXFTHRESH(5) |
2125				  FW_EQ_ETH_CMD_EQSIZE(nentries));
2126	c.eqaddr = cpu_to_be64(txq->q.phys_addr);
2127
2128	ret = t4_wr_mbox(adap, adap->fn, &c, sizeof(c), &c);
2129	if (ret) {
2130		kfree(txq->q.sdesc);
2131		txq->q.sdesc = NULL;
2132		dma_free_coherent(adap->pdev_dev,
2133				  nentries * sizeof(struct tx_desc),
2134				  txq->q.desc, txq->q.phys_addr);
2135		txq->q.desc = NULL;
2136		return ret;
2137	}
2138
2139	init_txq(adap, &txq->q, FW_EQ_ETH_CMD_EQID_GET(ntohl(c.eqid_pkd)));
2140	txq->txq = netdevq;
2141	txq->tso = txq->tx_cso = txq->vlan_ins = 0;
2142	txq->mapping_err = 0;
2143	return 0;
2144}
2145
2146int t4_sge_alloc_ctrl_txq(struct adapter *adap, struct sge_ctrl_txq *txq,
2147			  struct net_device *dev, unsigned int iqid,
2148			  unsigned int cmplqid)
2149{
2150	int ret, nentries;
2151	struct fw_eq_ctrl_cmd c;
2152	struct port_info *pi = netdev_priv(dev);
2153
2154	/* Add status entries */
2155	nentries = txq->q.size + STAT_LEN / sizeof(struct tx_desc);
2156
2157	txq->q.desc = alloc_ring(adap->pdev_dev, nentries,
2158				 sizeof(struct tx_desc), 0, &txq->q.phys_addr,
2159				 NULL, 0, NUMA_NO_NODE);
2160	if (!txq->q.desc)
2161		return -ENOMEM;
2162
2163	c.op_to_vfn = htonl(FW_CMD_OP(FW_EQ_CTRL_CMD) | FW_CMD_REQUEST |
2164			    FW_CMD_WRITE | FW_CMD_EXEC |
2165			    FW_EQ_CTRL_CMD_PFN(adap->fn) |
2166			    FW_EQ_CTRL_CMD_VFN(0));
2167	c.alloc_to_len16 = htonl(FW_EQ_CTRL_CMD_ALLOC |
2168				 FW_EQ_CTRL_CMD_EQSTART | FW_LEN16(c));
2169	c.cmpliqid_eqid = htonl(FW_EQ_CTRL_CMD_CMPLIQID(cmplqid));
2170	c.physeqid_pkd = htonl(0);
2171	c.fetchszm_to_iqid = htonl(FW_EQ_CTRL_CMD_HOSTFCMODE(2) |
2172				   FW_EQ_CTRL_CMD_PCIECHN(pi->tx_chan) |
2173				   FW_EQ_CTRL_CMD_FETCHRO |
2174				   FW_EQ_CTRL_CMD_IQID(iqid));
2175	c.dcaen_to_eqsize = htonl(FW_EQ_CTRL_CMD_FBMIN(2) |
2176				  FW_EQ_CTRL_CMD_FBMAX(3) |
2177				  FW_EQ_CTRL_CMD_CIDXFTHRESH(5) |
2178				  FW_EQ_CTRL_CMD_EQSIZE(nentries));
2179	c.eqaddr = cpu_to_be64(txq->q.phys_addr);
2180
2181	ret = t4_wr_mbox(adap, adap->fn, &c, sizeof(c), &c);
2182	if (ret) {
2183		dma_free_coherent(adap->pdev_dev,
2184				  nentries * sizeof(struct tx_desc),
2185				  txq->q.desc, txq->q.phys_addr);
2186		txq->q.desc = NULL;
2187		return ret;
2188	}
2189
2190	init_txq(adap, &txq->q, FW_EQ_CTRL_CMD_EQID_GET(ntohl(c.cmpliqid_eqid)));
2191	txq->adap = adap;
2192	skb_queue_head_init(&txq->sendq);
2193	tasklet_init(&txq->qresume_tsk, restart_ctrlq, (unsigned long)txq);
2194	txq->full = 0;
2195	return 0;
2196}
2197
2198int t4_sge_alloc_ofld_txq(struct adapter *adap, struct sge_ofld_txq *txq,
2199			  struct net_device *dev, unsigned int iqid)
2200{
2201	int ret, nentries;
2202	struct fw_eq_ofld_cmd c;
2203	struct port_info *pi = netdev_priv(dev);
2204
2205	/* Add status entries */
2206	nentries = txq->q.size + STAT_LEN / sizeof(struct tx_desc);
2207
2208	txq->q.desc = alloc_ring(adap->pdev_dev, txq->q.size,
2209			sizeof(struct tx_desc), sizeof(struct tx_sw_desc),
2210			&txq->q.phys_addr, &txq->q.sdesc, STAT_LEN,
2211			NUMA_NO_NODE);
2212	if (!txq->q.desc)
2213		return -ENOMEM;
2214
2215	memset(&c, 0, sizeof(c));
2216	c.op_to_vfn = htonl(FW_CMD_OP(FW_EQ_OFLD_CMD) | FW_CMD_REQUEST |
2217			    FW_CMD_WRITE | FW_CMD_EXEC |
2218			    FW_EQ_OFLD_CMD_PFN(adap->fn) |
2219			    FW_EQ_OFLD_CMD_VFN(0));
2220	c.alloc_to_len16 = htonl(FW_EQ_OFLD_CMD_ALLOC |
2221				 FW_EQ_OFLD_CMD_EQSTART | FW_LEN16(c));
2222	c.fetchszm_to_iqid = htonl(FW_EQ_OFLD_CMD_HOSTFCMODE(2) |
2223				   FW_EQ_OFLD_CMD_PCIECHN(pi->tx_chan) |
2224				   FW_EQ_OFLD_CMD_FETCHRO(1) |
2225				   FW_EQ_OFLD_CMD_IQID(iqid));
2226	c.dcaen_to_eqsize = htonl(FW_EQ_OFLD_CMD_FBMIN(2) |
2227				  FW_EQ_OFLD_CMD_FBMAX(3) |
2228				  FW_EQ_OFLD_CMD_CIDXFTHRESH(5) |
2229				  FW_EQ_OFLD_CMD_EQSIZE(nentries));
2230	c.eqaddr = cpu_to_be64(txq->q.phys_addr);
2231
2232	ret = t4_wr_mbox(adap, adap->fn, &c, sizeof(c), &c);
2233	if (ret) {
2234		kfree(txq->q.sdesc);
2235		txq->q.sdesc = NULL;
2236		dma_free_coherent(adap->pdev_dev,
2237				  nentries * sizeof(struct tx_desc),
2238				  txq->q.desc, txq->q.phys_addr);
2239		txq->q.desc = NULL;
2240		return ret;
2241	}
2242
2243	init_txq(adap, &txq->q, FW_EQ_OFLD_CMD_EQID_GET(ntohl(c.eqid_pkd)));
2244	txq->adap = adap;
2245	skb_queue_head_init(&txq->sendq);
2246	tasklet_init(&txq->qresume_tsk, restart_ofldq, (unsigned long)txq);
2247	txq->full = 0;
2248	txq->mapping_err = 0;
2249	return 0;
2250}
2251
2252static void free_txq(struct adapter *adap, struct sge_txq *q)
2253{
2254	dma_free_coherent(adap->pdev_dev,
2255			  q->size * sizeof(struct tx_desc) + STAT_LEN,
2256			  q->desc, q->phys_addr);
2257	q->cntxt_id = 0;
2258	q->sdesc = NULL;
2259	q->desc = NULL;
2260}
2261
2262static void free_rspq_fl(struct adapter *adap, struct sge_rspq *rq,
2263			 struct sge_fl *fl)
2264{
2265	unsigned int fl_id = fl ? fl->cntxt_id : 0xffff;
2266
2267	adap->sge.ingr_map[rq->cntxt_id - adap->sge.ingr_start] = NULL;
2268	t4_iq_free(adap, adap->fn, adap->fn, 0, FW_IQ_TYPE_FL_INT_CAP,
2269		   rq->cntxt_id, fl_id, 0xffff);
2270	dma_free_coherent(adap->pdev_dev, (rq->size + 1) * rq->iqe_len,
2271			  rq->desc, rq->phys_addr);
2272	netif_napi_del(&rq->napi);
2273	rq->netdev = NULL;
2274	rq->cntxt_id = rq->abs_id = 0;
2275	rq->desc = NULL;
2276
2277	if (fl) {
2278		free_rx_bufs(adap, fl, fl->avail);
2279		dma_free_coherent(adap->pdev_dev, fl->size * 8 + STAT_LEN,
2280				  fl->desc, fl->addr);
2281		kfree(fl->sdesc);
2282		fl->sdesc = NULL;
2283		fl->cntxt_id = 0;
2284		fl->desc = NULL;
2285	}
2286}
2287
2288/**
2289 *	t4_free_sge_resources - free SGE resources
2290 *	@adap: the adapter
2291 *
2292 *	Frees resources used by the SGE queue sets.
2293 */
2294void t4_free_sge_resources(struct adapter *adap)
2295{
2296	int i;
2297	struct sge_eth_rxq *eq = adap->sge.ethrxq;
2298	struct sge_eth_txq *etq = adap->sge.ethtxq;
2299	struct sge_ofld_rxq *oq = adap->sge.ofldrxq;
2300
2301	/* clean up Ethernet Tx/Rx queues */
2302	for (i = 0; i < adap->sge.ethqsets; i++, eq++, etq++) {
2303		if (eq->rspq.desc)
2304			free_rspq_fl(adap, &eq->rspq, &eq->fl);
2305		if (etq->q.desc) {
2306			t4_eth_eq_free(adap, adap->fn, adap->fn, 0,
2307				       etq->q.cntxt_id);
2308			free_tx_desc(adap, &etq->q, etq->q.in_use, true);
2309			kfree(etq->q.sdesc);
2310			free_txq(adap, &etq->q);
2311		}
2312	}
2313
2314	/* clean up RDMA and iSCSI Rx queues */
2315	for (i = 0; i < adap->sge.ofldqsets; i++, oq++) {
2316		if (oq->rspq.desc)
2317			free_rspq_fl(adap, &oq->rspq, &oq->fl);
2318	}
2319	for (i = 0, oq = adap->sge.rdmarxq; i < adap->sge.rdmaqs; i++, oq++) {
2320		if (oq->rspq.desc)
2321			free_rspq_fl(adap, &oq->rspq, &oq->fl);
2322	}
2323
2324	/* clean up offload Tx queues */
2325	for (i = 0; i < ARRAY_SIZE(adap->sge.ofldtxq); i++) {
2326		struct sge_ofld_txq *q = &adap->sge.ofldtxq[i];
2327
2328		if (q->q.desc) {
2329			tasklet_kill(&q->qresume_tsk);
2330			t4_ofld_eq_free(adap, adap->fn, adap->fn, 0,
2331					q->q.cntxt_id);
2332			free_tx_desc(adap, &q->q, q->q.in_use, false);
2333			kfree(q->q.sdesc);
2334			__skb_queue_purge(&q->sendq);
2335			free_txq(adap, &q->q);
2336		}
2337	}
2338
2339	/* clean up control Tx queues */
2340	for (i = 0; i < ARRAY_SIZE(adap->sge.ctrlq); i++) {
2341		struct sge_ctrl_txq *cq = &adap->sge.ctrlq[i];
2342
2343		if (cq->q.desc) {
2344			tasklet_kill(&cq->qresume_tsk);
2345			t4_ctrl_eq_free(adap, adap->fn, adap->fn, 0,
2346					cq->q.cntxt_id);
2347			__skb_queue_purge(&cq->sendq);
2348			free_txq(adap, &cq->q);
2349		}
2350	}
2351
2352	if (adap->sge.fw_evtq.desc)
2353		free_rspq_fl(adap, &adap->sge.fw_evtq, NULL);
2354
2355	if (adap->sge.intrq.desc)
2356		free_rspq_fl(adap, &adap->sge.intrq, NULL);
2357
2358	/* clear the reverse egress queue map */
2359	memset(adap->sge.egr_map, 0, sizeof(adap->sge.egr_map));
2360}
2361
2362void t4_sge_start(struct adapter *adap)
2363{
2364	adap->sge.ethtxq_rover = 0;
2365	mod_timer(&adap->sge.rx_timer, jiffies + RX_QCHECK_PERIOD);
2366	mod_timer(&adap->sge.tx_timer, jiffies + TX_QCHECK_PERIOD);
2367}
2368
2369/**
2370 *	t4_sge_stop - disable SGE operation
2371 *	@adap: the adapter
2372 *
2373 *	Stop tasklets and timers associated with the DMA engine.  Note that
2374 *	this is effective only if measures have been taken to disable any HW
2375 *	events that may restart them.
2376 */
2377void t4_sge_stop(struct adapter *adap)
2378{
2379	int i;
2380	struct sge *s = &adap->sge;
2381
2382	if (in_interrupt())  /* actions below require waiting */
2383		return;
2384
2385	if (s->rx_timer.function)
2386		del_timer_sync(&s->rx_timer);
2387	if (s->tx_timer.function)
2388		del_timer_sync(&s->tx_timer);
2389
2390	for (i = 0; i < ARRAY_SIZE(s->ofldtxq); i++) {
2391		struct sge_ofld_txq *q = &s->ofldtxq[i];
2392
2393		if (q->q.desc)
2394			tasklet_kill(&q->qresume_tsk);
2395	}
2396	for (i = 0; i < ARRAY_SIZE(s->ctrlq); i++) {
2397		struct sge_ctrl_txq *cq = &s->ctrlq[i];
2398
2399		if (cq->q.desc)
2400			tasklet_kill(&cq->qresume_tsk);
2401	}
2402}
2403
2404/**
2405 *	t4_sge_init - initialize SGE
2406 *	@adap: the adapter
2407 *
2408 *	Performs SGE initialization needed every time after a chip reset.
2409 *	We do not initialize any of the queues here, instead the driver
2410 *	top-level must request them individually.
2411 */
2412void t4_sge_init(struct adapter *adap)
2413{
2414	unsigned int i, v;
2415	struct sge *s = &adap->sge;
2416	unsigned int fl_align_log = ilog2(FL_ALIGN);
2417
2418	t4_set_reg_field(adap, SGE_CONTROL, PKTSHIFT_MASK |
2419			 INGPADBOUNDARY_MASK | EGRSTATUSPAGESIZE,
2420			 INGPADBOUNDARY(fl_align_log - 5) | PKTSHIFT(2) |
2421			 RXPKTCPLMODE |
2422			 (STAT_LEN == 128 ? EGRSTATUSPAGESIZE : 0));
2423
2424	/*
2425	 * Set up to drop DOORBELL writes when the DOORBELL FIFO overflows
2426	 * and generate an interrupt when this occurs so we can recover.
2427	 */
2428	t4_set_reg_field(adap, A_SGE_DBFIFO_STATUS,
2429			V_HP_INT_THRESH(M_HP_INT_THRESH) |
2430			V_LP_INT_THRESH(M_LP_INT_THRESH),
2431			V_HP_INT_THRESH(dbfifo_int_thresh) |
2432			V_LP_INT_THRESH(dbfifo_int_thresh));
2433	t4_set_reg_field(adap, A_SGE_DOORBELL_CONTROL, F_ENABLE_DROP,
2434			F_ENABLE_DROP);
2435
2436	for (i = v = 0; i < 32; i += 4)
2437		v |= (PAGE_SHIFT - 10) << i;
2438	t4_write_reg(adap, SGE_HOST_PAGE_SIZE, v);
2439	t4_write_reg(adap, SGE_FL_BUFFER_SIZE0, PAGE_SIZE);
2440#if FL_PG_ORDER > 0
2441	t4_write_reg(adap, SGE_FL_BUFFER_SIZE1, PAGE_SIZE << FL_PG_ORDER);
2442#endif
2443	t4_write_reg(adap, SGE_INGRESS_RX_THRESHOLD,
2444		     THRESHOLD_0(s->counter_val[0]) |
2445		     THRESHOLD_1(s->counter_val[1]) |
2446		     THRESHOLD_2(s->counter_val[2]) |
2447		     THRESHOLD_3(s->counter_val[3]));
2448	t4_write_reg(adap, SGE_TIMER_VALUE_0_AND_1,
2449		     TIMERVALUE0(us_to_core_ticks(adap, s->timer_val[0])) |
2450		     TIMERVALUE1(us_to_core_ticks(adap, s->timer_val[1])));
2451	t4_write_reg(adap, SGE_TIMER_VALUE_2_AND_3,
2452		     TIMERVALUE0(us_to_core_ticks(adap, s->timer_val[2])) |
2453		     TIMERVALUE1(us_to_core_ticks(adap, s->timer_val[3])));
2454	t4_write_reg(adap, SGE_TIMER_VALUE_4_AND_5,
2455		     TIMERVALUE0(us_to_core_ticks(adap, s->timer_val[4])) |
2456		     TIMERVALUE1(us_to_core_ticks(adap, s->timer_val[5])));
2457	setup_timer(&s->rx_timer, sge_rx_timer_cb, (unsigned long)adap);
2458	setup_timer(&s->tx_timer, sge_tx_timer_cb, (unsigned long)adap);
2459	s->starve_thres = core_ticks_per_usec(adap) * 1000000;  /* 1 s */
2460	s->idma_state[0] = s->idma_state[1] = 0;
2461	spin_lock_init(&s->intrq_lock);
2462}