Linux Audio

Check our new training course

Loading...
Note: File does not exist in v6.13.7.
   1/*****************************************************************************
   2 *                                                                           *
   3 * File: sge.c                                                               *
   4 * $Revision: 1.26 $                                                         *
   5 * $Date: 2005/06/21 18:29:48 $                                              *
   6 * Description:                                                              *
   7 *  DMA engine.                                                              *
   8 *  part of the Chelsio 10Gb Ethernet Driver.                                *
   9 *                                                                           *
  10 * This program is free software; you can redistribute it and/or modify      *
  11 * it under the terms of the GNU General Public License, version 2, as       *
  12 * published by the Free Software Foundation.                                *
  13 *                                                                           *
  14 * You should have received a copy of the GNU General Public License along   *
  15 * with this program; if not, write to the Free Software Foundation, Inc.,   *
  16 * 59 Temple Place - Suite 330, Boston, MA  02111-1307, USA.                 *
  17 *                                                                           *
  18 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED    *
  19 * WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF      *
  20 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.                     *
  21 *                                                                           *
  22 * http://www.chelsio.com                                                    *
  23 *                                                                           *
  24 * Copyright (c) 2003 - 2005 Chelsio Communications, Inc.                    *
  25 * All rights reserved.                                                      *
  26 *                                                                           *
  27 * Maintainers: maintainers@chelsio.com                                      *
  28 *                                                                           *
  29 * Authors: Dimitrios Michailidis   <dm@chelsio.com>                         *
  30 *          Tina Yang               <tainay@chelsio.com>                     *
  31 *          Felix Marti             <felix@chelsio.com>                      *
  32 *          Scott Bardone           <sbardone@chelsio.com>                   *
  33 *          Kurt Ottaway            <kottaway@chelsio.com>                   *
  34 *          Frank DiMambro          <frank@chelsio.com>                      *
  35 *                                                                           *
  36 * History:                                                                  *
  37 *                                                                           *
  38 ****************************************************************************/
  39
  40#include "common.h"
  41
  42#include <linux/types.h>
  43#include <linux/errno.h>
  44#include <linux/pci.h>
  45#include <linux/ktime.h>
  46#include <linux/netdevice.h>
  47#include <linux/etherdevice.h>
  48#include <linux/if_vlan.h>
  49#include <linux/skbuff.h>
  50#include <linux/init.h>
  51#include <linux/mm.h>
  52#include <linux/tcp.h>
  53#include <linux/ip.h>
  54#include <linux/in.h>
  55#include <linux/if_arp.h>
  56#include <linux/slab.h>
  57#include <linux/prefetch.h>
  58
  59#include "cpl5_cmd.h"
  60#include "sge.h"
  61#include "regs.h"
  62#include "espi.h"
  63
  64/* This belongs in if_ether.h */
  65#define ETH_P_CPL5 0xf
  66
  67#define SGE_CMDQ_N		2
  68#define SGE_FREELQ_N		2
  69#define SGE_CMDQ0_E_N		1024
  70#define SGE_CMDQ1_E_N		128
  71#define SGE_FREEL_SIZE		4096
  72#define SGE_JUMBO_FREEL_SIZE	512
  73#define SGE_FREEL_REFILL_THRESH	16
  74#define SGE_RESPQ_E_N		1024
  75#define SGE_INTRTIMER_NRES	1000
  76#define SGE_RX_SM_BUF_SIZE	1536
  77#define SGE_TX_DESC_MAX_PLEN	16384
  78
  79#define SGE_RESPQ_REPLENISH_THRES (SGE_RESPQ_E_N / 4)
  80
  81/*
  82 * Period of the TX buffer reclaim timer.  This timer does not need to run
  83 * frequently as TX buffers are usually reclaimed by new TX packets.
  84 */
  85#define TX_RECLAIM_PERIOD (HZ / 4)
  86
  87#define M_CMD_LEN       0x7fffffff
  88#define V_CMD_LEN(v)    (v)
  89#define G_CMD_LEN(v)    ((v) & M_CMD_LEN)
  90#define V_CMD_GEN1(v)   ((v) << 31)
  91#define V_CMD_GEN2(v)   (v)
  92#define F_CMD_DATAVALID (1 << 1)
  93#define F_CMD_SOP       (1 << 2)
  94#define V_CMD_EOP(v)    ((v) << 3)
  95
  96/*
  97 * Command queue, receive buffer list, and response queue descriptors.
  98 */
  99#if defined(__BIG_ENDIAN_BITFIELD)
 100struct cmdQ_e {
 101	u32 addr_lo;
 102	u32 len_gen;
 103	u32 flags;
 104	u32 addr_hi;
 105};
 106
 107struct freelQ_e {
 108	u32 addr_lo;
 109	u32 len_gen;
 110	u32 gen2;
 111	u32 addr_hi;
 112};
 113
 114struct respQ_e {
 115	u32 Qsleeping		: 4;
 116	u32 Cmdq1CreditReturn	: 5;
 117	u32 Cmdq1DmaComplete	: 5;
 118	u32 Cmdq0CreditReturn	: 5;
 119	u32 Cmdq0DmaComplete	: 5;
 120	u32 FreelistQid		: 2;
 121	u32 CreditValid		: 1;
 122	u32 DataValid		: 1;
 123	u32 Offload		: 1;
 124	u32 Eop			: 1;
 125	u32 Sop			: 1;
 126	u32 GenerationBit	: 1;
 127	u32 BufferLength;
 128};
 129#elif defined(__LITTLE_ENDIAN_BITFIELD)
 130struct cmdQ_e {
 131	u32 len_gen;
 132	u32 addr_lo;
 133	u32 addr_hi;
 134	u32 flags;
 135};
 136
 137struct freelQ_e {
 138	u32 len_gen;
 139	u32 addr_lo;
 140	u32 addr_hi;
 141	u32 gen2;
 142};
 143
 144struct respQ_e {
 145	u32 BufferLength;
 146	u32 GenerationBit	: 1;
 147	u32 Sop			: 1;
 148	u32 Eop			: 1;
 149	u32 Offload		: 1;
 150	u32 DataValid		: 1;
 151	u32 CreditValid		: 1;
 152	u32 FreelistQid		: 2;
 153	u32 Cmdq0DmaComplete	: 5;
 154	u32 Cmdq0CreditReturn	: 5;
 155	u32 Cmdq1DmaComplete	: 5;
 156	u32 Cmdq1CreditReturn	: 5;
 157	u32 Qsleeping		: 4;
 158} ;
 159#endif
 160
 161/*
 162 * SW Context Command and Freelist Queue Descriptors
 163 */
 164struct cmdQ_ce {
 165	struct sk_buff *skb;
 166	DEFINE_DMA_UNMAP_ADDR(dma_addr);
 167	DEFINE_DMA_UNMAP_LEN(dma_len);
 168};
 169
 170struct freelQ_ce {
 171	struct sk_buff *skb;
 172	DEFINE_DMA_UNMAP_ADDR(dma_addr);
 173	DEFINE_DMA_UNMAP_LEN(dma_len);
 174};
 175
 176/*
 177 * SW command, freelist and response rings
 178 */
 179struct cmdQ {
 180	unsigned long   status;         /* HW DMA fetch status */
 181	unsigned int    in_use;         /* # of in-use command descriptors */
 182	unsigned int	size;	        /* # of descriptors */
 183	unsigned int    processed;      /* total # of descs HW has processed */
 184	unsigned int    cleaned;        /* total # of descs SW has reclaimed */
 185	unsigned int    stop_thres;     /* SW TX queue suspend threshold */
 186	u16		pidx;           /* producer index (SW) */
 187	u16		cidx;           /* consumer index (HW) */
 188	u8		genbit;         /* current generation (=valid) bit */
 189	u8              sop;            /* is next entry start of packet? */
 190	struct cmdQ_e  *entries;        /* HW command descriptor Q */
 191	struct cmdQ_ce *centries;       /* SW command context descriptor Q */
 192	dma_addr_t	dma_addr;       /* DMA addr HW command descriptor Q */
 193	spinlock_t	lock;           /* Lock to protect cmdQ enqueuing */
 194};
 195
 196struct freelQ {
 197	unsigned int	credits;        /* # of available RX buffers */
 198	unsigned int	size;	        /* free list capacity */
 199	u16		pidx;           /* producer index (SW) */
 200	u16		cidx;           /* consumer index (HW) */
 201	u16		rx_buffer_size; /* Buffer size on this free list */
 202	u16             dma_offset;     /* DMA offset to align IP headers */
 203	u16             recycleq_idx;   /* skb recycle q to use */
 204	u8		genbit;	        /* current generation (=valid) bit */
 205	struct freelQ_e	*entries;       /* HW freelist descriptor Q */
 206	struct freelQ_ce *centries;     /* SW freelist context descriptor Q */
 207	dma_addr_t	dma_addr;       /* DMA addr HW freelist descriptor Q */
 208};
 209
 210struct respQ {
 211	unsigned int	credits;        /* credits to be returned to SGE */
 212	unsigned int	size;	        /* # of response Q descriptors */
 213	u16		cidx;	        /* consumer index (SW) */
 214	u8		genbit;	        /* current generation(=valid) bit */
 215	struct respQ_e *entries;        /* HW response descriptor Q */
 216	dma_addr_t	dma_addr;       /* DMA addr HW response descriptor Q */
 217};
 218
 219/* Bit flags for cmdQ.status */
 220enum {
 221	CMDQ_STAT_RUNNING = 1,          /* fetch engine is running */
 222	CMDQ_STAT_LAST_PKT_DB = 2       /* last packet rung the doorbell */
 223};
 224
 225/* T204 TX SW scheduler */
 226
 227/* Per T204 TX port */
 228struct sched_port {
 229	unsigned int	avail;		/* available bits - quota */
 230	unsigned int	drain_bits_per_1024ns; /* drain rate */
 231	unsigned int	speed;		/* drain rate, mbps */
 232	unsigned int	mtu;		/* mtu size */
 233	struct sk_buff_head skbq;	/* pending skbs */
 234};
 235
 236/* Per T204 device */
 237struct sched {
 238	ktime_t         last_updated;   /* last time quotas were computed */
 239	unsigned int	max_avail;	/* max bits to be sent to any port */
 240	unsigned int	port;		/* port index (round robin ports) */
 241	unsigned int	num;		/* num skbs in per port queues */
 242	struct sched_port p[MAX_NPORTS];
 243	struct tasklet_struct sched_tsk;/* tasklet used to run scheduler */
 244};
 245static void restart_sched(unsigned long);
 246
 247
 248/*
 249 * Main SGE data structure
 250 *
 251 * Interrupts are handled by a single CPU and it is likely that on a MP system
 252 * the application is migrated to another CPU. In that scenario, we try to
 253 * separate the RX(in irq context) and TX state in order to decrease memory
 254 * contention.
 255 */
 256struct sge {
 257	struct adapter *adapter;	/* adapter backpointer */
 258	struct net_device *netdev;      /* netdevice backpointer */
 259	struct freelQ	freelQ[SGE_FREELQ_N]; /* buffer free lists */
 260	struct respQ	respQ;		/* response Q */
 261	unsigned long   stopped_tx_queues; /* bitmap of suspended Tx queues */
 262	unsigned int	rx_pkt_pad;     /* RX padding for L2 packets */
 263	unsigned int	jumbo_fl;       /* jumbo freelist Q index */
 264	unsigned int	intrtimer_nres;	/* no-resource interrupt timer */
 265	unsigned int    fixed_intrtimer;/* non-adaptive interrupt timer */
 266	struct timer_list tx_reclaim_timer; /* reclaims TX buffers */
 267	struct timer_list espibug_timer;
 268	unsigned long	espibug_timeout;
 269	struct sk_buff	*espibug_skb[MAX_NPORTS];
 270	u32		sge_control;	/* shadow value of sge control reg */
 271	struct sge_intr_counts stats;
 272	struct sge_port_stats __percpu *port_stats[MAX_NPORTS];
 273	struct sched	*tx_sched;
 274	struct cmdQ cmdQ[SGE_CMDQ_N] ____cacheline_aligned_in_smp;
 275};
 276
 277static const u8 ch_mac_addr[ETH_ALEN] = {
 278	0x0, 0x7, 0x43, 0x0, 0x0, 0x0
 279};
 280
 281/*
 282 * stop tasklet and free all pending skb's
 283 */
 284static void tx_sched_stop(struct sge *sge)
 285{
 286	struct sched *s = sge->tx_sched;
 287	int i;
 288
 289	tasklet_kill(&s->sched_tsk);
 290
 291	for (i = 0; i < MAX_NPORTS; i++)
 292		__skb_queue_purge(&s->p[s->port].skbq);
 293}
 294
 295/*
 296 * t1_sched_update_parms() is called when the MTU or link speed changes. It
 297 * re-computes scheduler parameters to scope with the change.
 298 */
 299unsigned int t1_sched_update_parms(struct sge *sge, unsigned int port,
 300				   unsigned int mtu, unsigned int speed)
 301{
 302	struct sched *s = sge->tx_sched;
 303	struct sched_port *p = &s->p[port];
 304	unsigned int max_avail_segs;
 305
 306	pr_debug("t1_sched_update_params mtu=%d speed=%d\n", mtu, speed);
 307	if (speed)
 308		p->speed = speed;
 309	if (mtu)
 310		p->mtu = mtu;
 311
 312	if (speed || mtu) {
 313		unsigned long long drain = 1024ULL * p->speed * (p->mtu - 40);
 314		do_div(drain, (p->mtu + 50) * 1000);
 315		p->drain_bits_per_1024ns = (unsigned int) drain;
 316
 317		if (p->speed < 1000)
 318			p->drain_bits_per_1024ns =
 319				90 * p->drain_bits_per_1024ns / 100;
 320	}
 321
 322	if (board_info(sge->adapter)->board == CHBT_BOARD_CHT204) {
 323		p->drain_bits_per_1024ns -= 16;
 324		s->max_avail = max(4096U, p->mtu + 16 + 14 + 4);
 325		max_avail_segs = max(1U, 4096 / (p->mtu - 40));
 326	} else {
 327		s->max_avail = 16384;
 328		max_avail_segs = max(1U, 9000 / (p->mtu - 40));
 329	}
 330
 331	pr_debug("t1_sched_update_parms: mtu %u speed %u max_avail %u "
 332		 "max_avail_segs %u drain_bits_per_1024ns %u\n", p->mtu,
 333		 p->speed, s->max_avail, max_avail_segs,
 334		 p->drain_bits_per_1024ns);
 335
 336	return max_avail_segs * (p->mtu - 40);
 337}
 338
 339#if 0
 340
 341/*
 342 * t1_sched_max_avail_bytes() tells the scheduler the maximum amount of
 343 * data that can be pushed per port.
 344 */
 345void t1_sched_set_max_avail_bytes(struct sge *sge, unsigned int val)
 346{
 347	struct sched *s = sge->tx_sched;
 348	unsigned int i;
 349
 350	s->max_avail = val;
 351	for (i = 0; i < MAX_NPORTS; i++)
 352		t1_sched_update_parms(sge, i, 0, 0);
 353}
 354
 355/*
 356 * t1_sched_set_drain_bits_per_us() tells the scheduler at which rate a port
 357 * is draining.
 358 */
 359void t1_sched_set_drain_bits_per_us(struct sge *sge, unsigned int port,
 360					 unsigned int val)
 361{
 362	struct sched *s = sge->tx_sched;
 363	struct sched_port *p = &s->p[port];
 364	p->drain_bits_per_1024ns = val * 1024 / 1000;
 365	t1_sched_update_parms(sge, port, 0, 0);
 366}
 367
 368#endif  /*  0  */
 369
 370
 371/*
 372 * get_clock() implements a ns clock (see ktime_get)
 373 */
 374static inline ktime_t get_clock(void)
 375{
 376	struct timespec ts;
 377
 378	ktime_get_ts(&ts);
 379	return timespec_to_ktime(ts);
 380}
 381
 382/*
 383 * tx_sched_init() allocates resources and does basic initialization.
 384 */
 385static int tx_sched_init(struct sge *sge)
 386{
 387	struct sched *s;
 388	int i;
 389
 390	s = kzalloc(sizeof (struct sched), GFP_KERNEL);
 391	if (!s)
 392		return -ENOMEM;
 393
 394	pr_debug("tx_sched_init\n");
 395	tasklet_init(&s->sched_tsk, restart_sched, (unsigned long) sge);
 396	sge->tx_sched = s;
 397
 398	for (i = 0; i < MAX_NPORTS; i++) {
 399		skb_queue_head_init(&s->p[i].skbq);
 400		t1_sched_update_parms(sge, i, 1500, 1000);
 401	}
 402
 403	return 0;
 404}
 405
 406/*
 407 * sched_update_avail() computes the delta since the last time it was called
 408 * and updates the per port quota (number of bits that can be sent to the any
 409 * port).
 410 */
 411static inline int sched_update_avail(struct sge *sge)
 412{
 413	struct sched *s = sge->tx_sched;
 414	ktime_t now = get_clock();
 415	unsigned int i;
 416	long long delta_time_ns;
 417
 418	delta_time_ns = ktime_to_ns(ktime_sub(now, s->last_updated));
 419
 420	pr_debug("sched_update_avail delta=%lld\n", delta_time_ns);
 421	if (delta_time_ns < 15000)
 422		return 0;
 423
 424	for (i = 0; i < MAX_NPORTS; i++) {
 425		struct sched_port *p = &s->p[i];
 426		unsigned int delta_avail;
 427
 428		delta_avail = (p->drain_bits_per_1024ns * delta_time_ns) >> 13;
 429		p->avail = min(p->avail + delta_avail, s->max_avail);
 430	}
 431
 432	s->last_updated = now;
 433
 434	return 1;
 435}
 436
 437/*
 438 * sched_skb() is called from two different places. In the tx path, any
 439 * packet generating load on an output port will call sched_skb()
 440 * (skb != NULL). In addition, sched_skb() is called from the irq/soft irq
 441 * context (skb == NULL).
 442 * The scheduler only returns a skb (which will then be sent) if the
 443 * length of the skb is <= the current quota of the output port.
 444 */
 445static struct sk_buff *sched_skb(struct sge *sge, struct sk_buff *skb,
 446				unsigned int credits)
 447{
 448	struct sched *s = sge->tx_sched;
 449	struct sk_buff_head *skbq;
 450	unsigned int i, len, update = 1;
 451
 452	pr_debug("sched_skb %p\n", skb);
 453	if (!skb) {
 454		if (!s->num)
 455			return NULL;
 456	} else {
 457		skbq = &s->p[skb->dev->if_port].skbq;
 458		__skb_queue_tail(skbq, skb);
 459		s->num++;
 460		skb = NULL;
 461	}
 462
 463	if (credits < MAX_SKB_FRAGS + 1)
 464		goto out;
 465
 466again:
 467	for (i = 0; i < MAX_NPORTS; i++) {
 468		s->port = (s->port + 1) & (MAX_NPORTS - 1);
 469		skbq = &s->p[s->port].skbq;
 470
 471		skb = skb_peek(skbq);
 472
 473		if (!skb)
 474			continue;
 475
 476		len = skb->len;
 477		if (len <= s->p[s->port].avail) {
 478			s->p[s->port].avail -= len;
 479			s->num--;
 480			__skb_unlink(skb, skbq);
 481			goto out;
 482		}
 483		skb = NULL;
 484	}
 485
 486	if (update-- && sched_update_avail(sge))
 487		goto again;
 488
 489out:
 490	/* If there are more pending skbs, we use the hardware to schedule us
 491	 * again.
 492	 */
 493	if (s->num && !skb) {
 494		struct cmdQ *q = &sge->cmdQ[0];
 495		clear_bit(CMDQ_STAT_LAST_PKT_DB, &q->status);
 496		if (test_and_set_bit(CMDQ_STAT_RUNNING, &q->status) == 0) {
 497			set_bit(CMDQ_STAT_LAST_PKT_DB, &q->status);
 498			writel(F_CMDQ0_ENABLE, sge->adapter->regs + A_SG_DOORBELL);
 499		}
 500	}
 501	pr_debug("sched_skb ret %p\n", skb);
 502
 503	return skb;
 504}
 505
 506/*
 507 * PIO to indicate that memory mapped Q contains valid descriptor(s).
 508 */
 509static inline void doorbell_pio(struct adapter *adapter, u32 val)
 510{
 511	wmb();
 512	writel(val, adapter->regs + A_SG_DOORBELL);
 513}
 514
 515/*
 516 * Frees all RX buffers on the freelist Q. The caller must make sure that
 517 * the SGE is turned off before calling this function.
 518 */
 519static void free_freelQ_buffers(struct pci_dev *pdev, struct freelQ *q)
 520{
 521	unsigned int cidx = q->cidx;
 522
 523	while (q->credits--) {
 524		struct freelQ_ce *ce = &q->centries[cidx];
 525
 526		pci_unmap_single(pdev, dma_unmap_addr(ce, dma_addr),
 527				 dma_unmap_len(ce, dma_len),
 528				 PCI_DMA_FROMDEVICE);
 529		dev_kfree_skb(ce->skb);
 530		ce->skb = NULL;
 531		if (++cidx == q->size)
 532			cidx = 0;
 533	}
 534}
 535
 536/*
 537 * Free RX free list and response queue resources.
 538 */
 539static void free_rx_resources(struct sge *sge)
 540{
 541	struct pci_dev *pdev = sge->adapter->pdev;
 542	unsigned int size, i;
 543
 544	if (sge->respQ.entries) {
 545		size = sizeof(struct respQ_e) * sge->respQ.size;
 546		pci_free_consistent(pdev, size, sge->respQ.entries,
 547				    sge->respQ.dma_addr);
 548	}
 549
 550	for (i = 0; i < SGE_FREELQ_N; i++) {
 551		struct freelQ *q = &sge->freelQ[i];
 552
 553		if (q->centries) {
 554			free_freelQ_buffers(pdev, q);
 555			kfree(q->centries);
 556		}
 557		if (q->entries) {
 558			size = sizeof(struct freelQ_e) * q->size;
 559			pci_free_consistent(pdev, size, q->entries,
 560					    q->dma_addr);
 561		}
 562	}
 563}
 564
 565/*
 566 * Allocates basic RX resources, consisting of memory mapped freelist Qs and a
 567 * response queue.
 568 */
 569static int alloc_rx_resources(struct sge *sge, struct sge_params *p)
 570{
 571	struct pci_dev *pdev = sge->adapter->pdev;
 572	unsigned int size, i;
 573
 574	for (i = 0; i < SGE_FREELQ_N; i++) {
 575		struct freelQ *q = &sge->freelQ[i];
 576
 577		q->genbit = 1;
 578		q->size = p->freelQ_size[i];
 579		q->dma_offset = sge->rx_pkt_pad ? 0 : NET_IP_ALIGN;
 580		size = sizeof(struct freelQ_e) * q->size;
 581		q->entries = pci_alloc_consistent(pdev, size, &q->dma_addr);
 582		if (!q->entries)
 583			goto err_no_mem;
 584
 585		size = sizeof(struct freelQ_ce) * q->size;
 586		q->centries = kzalloc(size, GFP_KERNEL);
 587		if (!q->centries)
 588			goto err_no_mem;
 589	}
 590
 591	/*
 592	 * Calculate the buffer sizes for the two free lists.  FL0 accommodates
 593	 * regular sized Ethernet frames, FL1 is sized not to exceed 16K,
 594	 * including all the sk_buff overhead.
 595	 *
 596	 * Note: For T2 FL0 and FL1 are reversed.
 597	 */
 598	sge->freelQ[!sge->jumbo_fl].rx_buffer_size = SGE_RX_SM_BUF_SIZE +
 599		sizeof(struct cpl_rx_data) +
 600		sge->freelQ[!sge->jumbo_fl].dma_offset;
 601
 602		size = (16 * 1024) -
 603		    SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
 604
 605	sge->freelQ[sge->jumbo_fl].rx_buffer_size = size;
 606
 607	/*
 608	 * Setup which skb recycle Q should be used when recycling buffers from
 609	 * each free list.
 610	 */
 611	sge->freelQ[!sge->jumbo_fl].recycleq_idx = 0;
 612	sge->freelQ[sge->jumbo_fl].recycleq_idx = 1;
 613
 614	sge->respQ.genbit = 1;
 615	sge->respQ.size = SGE_RESPQ_E_N;
 616	sge->respQ.credits = 0;
 617	size = sizeof(struct respQ_e) * sge->respQ.size;
 618	sge->respQ.entries =
 619		pci_alloc_consistent(pdev, size, &sge->respQ.dma_addr);
 620	if (!sge->respQ.entries)
 621		goto err_no_mem;
 622	return 0;
 623
 624err_no_mem:
 625	free_rx_resources(sge);
 626	return -ENOMEM;
 627}
 628
 629/*
 630 * Reclaims n TX descriptors and frees the buffers associated with them.
 631 */
 632static void free_cmdQ_buffers(struct sge *sge, struct cmdQ *q, unsigned int n)
 633{
 634	struct cmdQ_ce *ce;
 635	struct pci_dev *pdev = sge->adapter->pdev;
 636	unsigned int cidx = q->cidx;
 637
 638	q->in_use -= n;
 639	ce = &q->centries[cidx];
 640	while (n--) {
 641		if (likely(dma_unmap_len(ce, dma_len))) {
 642			pci_unmap_single(pdev, dma_unmap_addr(ce, dma_addr),
 643					 dma_unmap_len(ce, dma_len),
 644					 PCI_DMA_TODEVICE);
 645			if (q->sop)
 646				q->sop = 0;
 647		}
 648		if (ce->skb) {
 649			dev_kfree_skb_any(ce->skb);
 650			q->sop = 1;
 651		}
 652		ce++;
 653		if (++cidx == q->size) {
 654			cidx = 0;
 655			ce = q->centries;
 656		}
 657	}
 658	q->cidx = cidx;
 659}
 660
 661/*
 662 * Free TX resources.
 663 *
 664 * Assumes that SGE is stopped and all interrupts are disabled.
 665 */
 666static void free_tx_resources(struct sge *sge)
 667{
 668	struct pci_dev *pdev = sge->adapter->pdev;
 669	unsigned int size, i;
 670
 671	for (i = 0; i < SGE_CMDQ_N; i++) {
 672		struct cmdQ *q = &sge->cmdQ[i];
 673
 674		if (q->centries) {
 675			if (q->in_use)
 676				free_cmdQ_buffers(sge, q, q->in_use);
 677			kfree(q->centries);
 678		}
 679		if (q->entries) {
 680			size = sizeof(struct cmdQ_e) * q->size;
 681			pci_free_consistent(pdev, size, q->entries,
 682					    q->dma_addr);
 683		}
 684	}
 685}
 686
 687/*
 688 * Allocates basic TX resources, consisting of memory mapped command Qs.
 689 */
 690static int alloc_tx_resources(struct sge *sge, struct sge_params *p)
 691{
 692	struct pci_dev *pdev = sge->adapter->pdev;
 693	unsigned int size, i;
 694
 695	for (i = 0; i < SGE_CMDQ_N; i++) {
 696		struct cmdQ *q = &sge->cmdQ[i];
 697
 698		q->genbit = 1;
 699		q->sop = 1;
 700		q->size = p->cmdQ_size[i];
 701		q->in_use = 0;
 702		q->status = 0;
 703		q->processed = q->cleaned = 0;
 704		q->stop_thres = 0;
 705		spin_lock_init(&q->lock);
 706		size = sizeof(struct cmdQ_e) * q->size;
 707		q->entries = pci_alloc_consistent(pdev, size, &q->dma_addr);
 708		if (!q->entries)
 709			goto err_no_mem;
 710
 711		size = sizeof(struct cmdQ_ce) * q->size;
 712		q->centries = kzalloc(size, GFP_KERNEL);
 713		if (!q->centries)
 714			goto err_no_mem;
 715	}
 716
 717	/*
 718	 * CommandQ 0 handles Ethernet and TOE packets, while queue 1 is TOE
 719	 * only.  For queue 0 set the stop threshold so we can handle one more
 720	 * packet from each port, plus reserve an additional 24 entries for
 721	 * Ethernet packets only.  Queue 1 never suspends nor do we reserve
 722	 * space for Ethernet packets.
 723	 */
 724	sge->cmdQ[0].stop_thres = sge->adapter->params.nports *
 725		(MAX_SKB_FRAGS + 1);
 726	return 0;
 727
 728err_no_mem:
 729	free_tx_resources(sge);
 730	return -ENOMEM;
 731}
 732
 733static inline void setup_ring_params(struct adapter *adapter, u64 addr,
 734				     u32 size, int base_reg_lo,
 735				     int base_reg_hi, int size_reg)
 736{
 737	writel((u32)addr, adapter->regs + base_reg_lo);
 738	writel(addr >> 32, adapter->regs + base_reg_hi);
 739	writel(size, adapter->regs + size_reg);
 740}
 741
 742/*
 743 * Enable/disable VLAN acceleration.
 744 */
 745void t1_vlan_mode(struct adapter *adapter, u32 features)
 746{
 747	struct sge *sge = adapter->sge;
 748
 749	if (features & NETIF_F_HW_VLAN_RX)
 750		sge->sge_control |= F_VLAN_XTRACT;
 751	else
 752		sge->sge_control &= ~F_VLAN_XTRACT;
 753	if (adapter->open_device_map) {
 754		writel(sge->sge_control, adapter->regs + A_SG_CONTROL);
 755		readl(adapter->regs + A_SG_CONTROL);   /* flush */
 756	}
 757}
 758
 759/*
 760 * Programs the various SGE registers. However, the engine is not yet enabled,
 761 * but sge->sge_control is setup and ready to go.
 762 */
 763static void configure_sge(struct sge *sge, struct sge_params *p)
 764{
 765	struct adapter *ap = sge->adapter;
 766
 767	writel(0, ap->regs + A_SG_CONTROL);
 768	setup_ring_params(ap, sge->cmdQ[0].dma_addr, sge->cmdQ[0].size,
 769			  A_SG_CMD0BASELWR, A_SG_CMD0BASEUPR, A_SG_CMD0SIZE);
 770	setup_ring_params(ap, sge->cmdQ[1].dma_addr, sge->cmdQ[1].size,
 771			  A_SG_CMD1BASELWR, A_SG_CMD1BASEUPR, A_SG_CMD1SIZE);
 772	setup_ring_params(ap, sge->freelQ[0].dma_addr,
 773			  sge->freelQ[0].size, A_SG_FL0BASELWR,
 774			  A_SG_FL0BASEUPR, A_SG_FL0SIZE);
 775	setup_ring_params(ap, sge->freelQ[1].dma_addr,
 776			  sge->freelQ[1].size, A_SG_FL1BASELWR,
 777			  A_SG_FL1BASEUPR, A_SG_FL1SIZE);
 778
 779	/* The threshold comparison uses <. */
 780	writel(SGE_RX_SM_BUF_SIZE + 1, ap->regs + A_SG_FLTHRESHOLD);
 781
 782	setup_ring_params(ap, sge->respQ.dma_addr, sge->respQ.size,
 783			  A_SG_RSPBASELWR, A_SG_RSPBASEUPR, A_SG_RSPSIZE);
 784	writel((u32)sge->respQ.size - 1, ap->regs + A_SG_RSPQUEUECREDIT);
 785
 786	sge->sge_control = F_CMDQ0_ENABLE | F_CMDQ1_ENABLE | F_FL0_ENABLE |
 787		F_FL1_ENABLE | F_CPL_ENABLE | F_RESPONSE_QUEUE_ENABLE |
 788		V_CMDQ_PRIORITY(2) | F_DISABLE_CMDQ1_GTS | F_ISCSI_COALESCE |
 789		V_RX_PKT_OFFSET(sge->rx_pkt_pad);
 790
 791#if defined(__BIG_ENDIAN_BITFIELD)
 792	sge->sge_control |= F_ENABLE_BIG_ENDIAN;
 793#endif
 794
 795	/* Initialize no-resource timer */
 796	sge->intrtimer_nres = SGE_INTRTIMER_NRES * core_ticks_per_usec(ap);
 797
 798	t1_sge_set_coalesce_params(sge, p);
 799}
 800
 801/*
 802 * Return the payload capacity of the jumbo free-list buffers.
 803 */
 804static inline unsigned int jumbo_payload_capacity(const struct sge *sge)
 805{
 806	return sge->freelQ[sge->jumbo_fl].rx_buffer_size -
 807		sge->freelQ[sge->jumbo_fl].dma_offset -
 808		sizeof(struct cpl_rx_data);
 809}
 810
 811/*
 812 * Frees all SGE related resources and the sge structure itself
 813 */
 814void t1_sge_destroy(struct sge *sge)
 815{
 816	int i;
 817
 818	for_each_port(sge->adapter, i)
 819		free_percpu(sge->port_stats[i]);
 820
 821	kfree(sge->tx_sched);
 822	free_tx_resources(sge);
 823	free_rx_resources(sge);
 824	kfree(sge);
 825}
 826
 827/*
 828 * Allocates new RX buffers on the freelist Q (and tracks them on the freelist
 829 * context Q) until the Q is full or alloc_skb fails.
 830 *
 831 * It is possible that the generation bits already match, indicating that the
 832 * buffer is already valid and nothing needs to be done. This happens when we
 833 * copied a received buffer into a new sk_buff during the interrupt processing.
 834 *
 835 * If the SGE doesn't automatically align packets properly (!sge->rx_pkt_pad),
 836 * we specify a RX_OFFSET in order to make sure that the IP header is 4B
 837 * aligned.
 838 */
 839static void refill_free_list(struct sge *sge, struct freelQ *q)
 840{
 841	struct pci_dev *pdev = sge->adapter->pdev;
 842	struct freelQ_ce *ce = &q->centries[q->pidx];
 843	struct freelQ_e *e = &q->entries[q->pidx];
 844	unsigned int dma_len = q->rx_buffer_size - q->dma_offset;
 845
 846	while (q->credits < q->size) {
 847		struct sk_buff *skb;
 848		dma_addr_t mapping;
 849
 850		skb = alloc_skb(q->rx_buffer_size, GFP_ATOMIC);
 851		if (!skb)
 852			break;
 853
 854		skb_reserve(skb, q->dma_offset);
 855		mapping = pci_map_single(pdev, skb->data, dma_len,
 856					 PCI_DMA_FROMDEVICE);
 857		skb_reserve(skb, sge->rx_pkt_pad);
 858
 859		ce->skb = skb;
 860		dma_unmap_addr_set(ce, dma_addr, mapping);
 861		dma_unmap_len_set(ce, dma_len, dma_len);
 862		e->addr_lo = (u32)mapping;
 863		e->addr_hi = (u64)mapping >> 32;
 864		e->len_gen = V_CMD_LEN(dma_len) | V_CMD_GEN1(q->genbit);
 865		wmb();
 866		e->gen2 = V_CMD_GEN2(q->genbit);
 867
 868		e++;
 869		ce++;
 870		if (++q->pidx == q->size) {
 871			q->pidx = 0;
 872			q->genbit ^= 1;
 873			ce = q->centries;
 874			e = q->entries;
 875		}
 876		q->credits++;
 877	}
 878}
 879
 880/*
 881 * Calls refill_free_list for both free lists. If we cannot fill at least 1/4
 882 * of both rings, we go into 'few interrupt mode' in order to give the system
 883 * time to free up resources.
 884 */
 885static void freelQs_empty(struct sge *sge)
 886{
 887	struct adapter *adapter = sge->adapter;
 888	u32 irq_reg = readl(adapter->regs + A_SG_INT_ENABLE);
 889	u32 irqholdoff_reg;
 890
 891	refill_free_list(sge, &sge->freelQ[0]);
 892	refill_free_list(sge, &sge->freelQ[1]);
 893
 894	if (sge->freelQ[0].credits > (sge->freelQ[0].size >> 2) &&
 895	    sge->freelQ[1].credits > (sge->freelQ[1].size >> 2)) {
 896		irq_reg |= F_FL_EXHAUSTED;
 897		irqholdoff_reg = sge->fixed_intrtimer;
 898	} else {
 899		/* Clear the F_FL_EXHAUSTED interrupts for now */
 900		irq_reg &= ~F_FL_EXHAUSTED;
 901		irqholdoff_reg = sge->intrtimer_nres;
 902	}
 903	writel(irqholdoff_reg, adapter->regs + A_SG_INTRTIMER);
 904	writel(irq_reg, adapter->regs + A_SG_INT_ENABLE);
 905
 906	/* We reenable the Qs to force a freelist GTS interrupt later */
 907	doorbell_pio(adapter, F_FL0_ENABLE | F_FL1_ENABLE);
 908}
 909
 910#define SGE_PL_INTR_MASK (F_PL_INTR_SGE_ERR | F_PL_INTR_SGE_DATA)
 911#define SGE_INT_FATAL (F_RESPQ_OVERFLOW | F_PACKET_TOO_BIG | F_PACKET_MISMATCH)
 912#define SGE_INT_ENABLE (F_RESPQ_EXHAUSTED | F_RESPQ_OVERFLOW | \
 913			F_FL_EXHAUSTED | F_PACKET_TOO_BIG | F_PACKET_MISMATCH)
 914
 915/*
 916 * Disable SGE Interrupts
 917 */
 918void t1_sge_intr_disable(struct sge *sge)
 919{
 920	u32 val = readl(sge->adapter->regs + A_PL_ENABLE);
 921
 922	writel(val & ~SGE_PL_INTR_MASK, sge->adapter->regs + A_PL_ENABLE);
 923	writel(0, sge->adapter->regs + A_SG_INT_ENABLE);
 924}
 925
 926/*
 927 * Enable SGE interrupts.
 928 */
 929void t1_sge_intr_enable(struct sge *sge)
 930{
 931	u32 en = SGE_INT_ENABLE;
 932	u32 val = readl(sge->adapter->regs + A_PL_ENABLE);
 933
 934	if (sge->adapter->port[0].dev->hw_features & NETIF_F_TSO)
 935		en &= ~F_PACKET_TOO_BIG;
 936	writel(en, sge->adapter->regs + A_SG_INT_ENABLE);
 937	writel(val | SGE_PL_INTR_MASK, sge->adapter->regs + A_PL_ENABLE);
 938}
 939
 940/*
 941 * Clear SGE interrupts.
 942 */
 943void t1_sge_intr_clear(struct sge *sge)
 944{
 945	writel(SGE_PL_INTR_MASK, sge->adapter->regs + A_PL_CAUSE);
 946	writel(0xffffffff, sge->adapter->regs + A_SG_INT_CAUSE);
 947}
 948
 949/*
 950 * SGE 'Error' interrupt handler
 951 */
 952int t1_sge_intr_error_handler(struct sge *sge)
 953{
 954	struct adapter *adapter = sge->adapter;
 955	u32 cause = readl(adapter->regs + A_SG_INT_CAUSE);
 956
 957	if (adapter->port[0].dev->hw_features & NETIF_F_TSO)
 958		cause &= ~F_PACKET_TOO_BIG;
 959	if (cause & F_RESPQ_EXHAUSTED)
 960		sge->stats.respQ_empty++;
 961	if (cause & F_RESPQ_OVERFLOW) {
 962		sge->stats.respQ_overflow++;
 963		pr_alert("%s: SGE response queue overflow\n",
 964			 adapter->name);
 965	}
 966	if (cause & F_FL_EXHAUSTED) {
 967		sge->stats.freelistQ_empty++;
 968		freelQs_empty(sge);
 969	}
 970	if (cause & F_PACKET_TOO_BIG) {
 971		sge->stats.pkt_too_big++;
 972		pr_alert("%s: SGE max packet size exceeded\n",
 973			 adapter->name);
 974	}
 975	if (cause & F_PACKET_MISMATCH) {
 976		sge->stats.pkt_mismatch++;
 977		pr_alert("%s: SGE packet mismatch\n", adapter->name);
 978	}
 979	if (cause & SGE_INT_FATAL)
 980		t1_fatal_err(adapter);
 981
 982	writel(cause, adapter->regs + A_SG_INT_CAUSE);
 983	return 0;
 984}
 985
 986const struct sge_intr_counts *t1_sge_get_intr_counts(const struct sge *sge)
 987{
 988	return &sge->stats;
 989}
 990
 991void t1_sge_get_port_stats(const struct sge *sge, int port,
 992			   struct sge_port_stats *ss)
 993{
 994	int cpu;
 995
 996	memset(ss, 0, sizeof(*ss));
 997	for_each_possible_cpu(cpu) {
 998		struct sge_port_stats *st = per_cpu_ptr(sge->port_stats[port], cpu);
 999
1000		ss->rx_cso_good += st->rx_cso_good;
1001		ss->tx_cso += st->tx_cso;
1002		ss->tx_tso += st->tx_tso;
1003		ss->tx_need_hdrroom += st->tx_need_hdrroom;
1004		ss->vlan_xtract += st->vlan_xtract;
1005		ss->vlan_insert += st->vlan_insert;
1006	}
1007}
1008
1009/**
1010 *	recycle_fl_buf - recycle a free list buffer
1011 *	@fl: the free list
1012 *	@idx: index of buffer to recycle
1013 *
1014 *	Recycles the specified buffer on the given free list by adding it at
1015 *	the next available slot on the list.
1016 */
1017static void recycle_fl_buf(struct freelQ *fl, int idx)
1018{
1019	struct freelQ_e *from = &fl->entries[idx];
1020	struct freelQ_e *to = &fl->entries[fl->pidx];
1021
1022	fl->centries[fl->pidx] = fl->centries[idx];
1023	to->addr_lo = from->addr_lo;
1024	to->addr_hi = from->addr_hi;
1025	to->len_gen = G_CMD_LEN(from->len_gen) | V_CMD_GEN1(fl->genbit);
1026	wmb();
1027	to->gen2 = V_CMD_GEN2(fl->genbit);
1028	fl->credits++;
1029
1030	if (++fl->pidx == fl->size) {
1031		fl->pidx = 0;
1032		fl->genbit ^= 1;
1033	}
1034}
1035
1036static int copybreak __read_mostly = 256;
1037module_param(copybreak, int, 0);
1038MODULE_PARM_DESC(copybreak, "Receive copy threshold");
1039
1040/**
1041 *	get_packet - return the next ingress packet buffer
1042 *	@pdev: the PCI device that received the packet
1043 *	@fl: the SGE free list holding the packet
1044 *	@len: the actual packet length, excluding any SGE padding
1045 *
1046 *	Get the next packet from a free list and complete setup of the
1047 *	sk_buff.  If the packet is small we make a copy and recycle the
1048 *	original buffer, otherwise we use the original buffer itself.  If a
1049 *	positive drop threshold is supplied packets are dropped and their
1050 *	buffers recycled if (a) the number of remaining buffers is under the
1051 *	threshold and the packet is too big to copy, or (b) the packet should
1052 *	be copied but there is no memory for the copy.
1053 */
1054static inline struct sk_buff *get_packet(struct pci_dev *pdev,
1055					 struct freelQ *fl, unsigned int len)
1056{
1057	struct sk_buff *skb;
1058	const struct freelQ_ce *ce = &fl->centries[fl->cidx];
1059
1060	if (len < copybreak) {
1061		skb = alloc_skb(len + 2, GFP_ATOMIC);
1062		if (!skb)
1063			goto use_orig_buf;
1064
1065		skb_reserve(skb, 2);	/* align IP header */
1066		skb_put(skb, len);
1067		pci_dma_sync_single_for_cpu(pdev,
1068					    dma_unmap_addr(ce, dma_addr),
1069					    dma_unmap_len(ce, dma_len),
1070					    PCI_DMA_FROMDEVICE);
1071		skb_copy_from_linear_data(ce->skb, skb->data, len);
1072		pci_dma_sync_single_for_device(pdev,
1073					       dma_unmap_addr(ce, dma_addr),
1074					       dma_unmap_len(ce, dma_len),
1075					       PCI_DMA_FROMDEVICE);
1076		recycle_fl_buf(fl, fl->cidx);
1077		return skb;
1078	}
1079
1080use_orig_buf:
1081	if (fl->credits < 2) {
1082		recycle_fl_buf(fl, fl->cidx);
1083		return NULL;
1084	}
1085
1086	pci_unmap_single(pdev, dma_unmap_addr(ce, dma_addr),
1087			 dma_unmap_len(ce, dma_len), PCI_DMA_FROMDEVICE);
1088	skb = ce->skb;
1089	prefetch(skb->data);
1090
1091	skb_put(skb, len);
1092	return skb;
1093}
1094
1095/**
1096 *	unexpected_offload - handle an unexpected offload packet
1097 *	@adapter: the adapter
1098 *	@fl: the free list that received the packet
1099 *
1100 *	Called when we receive an unexpected offload packet (e.g., the TOE
1101 *	function is disabled or the card is a NIC).  Prints a message and
1102 *	recycles the buffer.
1103 */
1104static void unexpected_offload(struct adapter *adapter, struct freelQ *fl)
1105{
1106	struct freelQ_ce *ce = &fl->centries[fl->cidx];
1107	struct sk_buff *skb = ce->skb;
1108
1109	pci_dma_sync_single_for_cpu(adapter->pdev, dma_unmap_addr(ce, dma_addr),
1110			    dma_unmap_len(ce, dma_len), PCI_DMA_FROMDEVICE);
1111	pr_err("%s: unexpected offload packet, cmd %u\n",
1112	       adapter->name, *skb->data);
1113	recycle_fl_buf(fl, fl->cidx);
1114}
1115
1116/*
1117 * T1/T2 SGE limits the maximum DMA size per TX descriptor to
1118 * SGE_TX_DESC_MAX_PLEN (16KB). If the PAGE_SIZE is larger than 16KB, the
1119 * stack might send more than SGE_TX_DESC_MAX_PLEN in a contiguous manner.
1120 * Note that the *_large_page_tx_descs stuff will be optimized out when
1121 * PAGE_SIZE <= SGE_TX_DESC_MAX_PLEN.
1122 *
1123 * compute_large_page_descs() computes how many additional descriptors are
1124 * required to break down the stack's request.
1125 */
1126static inline unsigned int compute_large_page_tx_descs(struct sk_buff *skb)
1127{
1128	unsigned int count = 0;
1129
1130	if (PAGE_SIZE > SGE_TX_DESC_MAX_PLEN) {
1131		unsigned int nfrags = skb_shinfo(skb)->nr_frags;
1132		unsigned int i, len = skb_headlen(skb);
1133		while (len > SGE_TX_DESC_MAX_PLEN) {
1134			count++;
1135			len -= SGE_TX_DESC_MAX_PLEN;
1136		}
1137		for (i = 0; nfrags--; i++) {
1138			skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
1139			len = frag->size;
1140			while (len > SGE_TX_DESC_MAX_PLEN) {
1141				count++;
1142				len -= SGE_TX_DESC_MAX_PLEN;
1143			}
1144		}
1145	}
1146	return count;
1147}
1148
1149/*
1150 * Write a cmdQ entry.
1151 *
1152 * Since this function writes the 'flags' field, it must not be used to
1153 * write the first cmdQ entry.
1154 */
1155static inline void write_tx_desc(struct cmdQ_e *e, dma_addr_t mapping,
1156				 unsigned int len, unsigned int gen,
1157				 unsigned int eop)
1158{
1159	BUG_ON(len > SGE_TX_DESC_MAX_PLEN);
1160
1161	e->addr_lo = (u32)mapping;
1162	e->addr_hi = (u64)mapping >> 32;
1163	e->len_gen = V_CMD_LEN(len) | V_CMD_GEN1(gen);
1164	e->flags = F_CMD_DATAVALID | V_CMD_EOP(eop) | V_CMD_GEN2(gen);
1165}
1166
1167/*
1168 * See comment for previous function.
1169 *
1170 * write_tx_descs_large_page() writes additional SGE tx descriptors if
1171 * *desc_len exceeds HW's capability.
1172 */
1173static inline unsigned int write_large_page_tx_descs(unsigned int pidx,
1174						     struct cmdQ_e **e,
1175						     struct cmdQ_ce **ce,
1176						     unsigned int *gen,
1177						     dma_addr_t *desc_mapping,
1178						     unsigned int *desc_len,
1179						     unsigned int nfrags,
1180						     struct cmdQ *q)
1181{
1182	if (PAGE_SIZE > SGE_TX_DESC_MAX_PLEN) {
1183		struct cmdQ_e *e1 = *e;
1184		struct cmdQ_ce *ce1 = *ce;
1185
1186		while (*desc_len > SGE_TX_DESC_MAX_PLEN) {
1187			*desc_len -= SGE_TX_DESC_MAX_PLEN;
1188			write_tx_desc(e1, *desc_mapping, SGE_TX_DESC_MAX_PLEN,
1189				      *gen, nfrags == 0 && *desc_len == 0);
1190			ce1->skb = NULL;
1191			dma_unmap_len_set(ce1, dma_len, 0);
1192			*desc_mapping += SGE_TX_DESC_MAX_PLEN;
1193			if (*desc_len) {
1194				ce1++;
1195				e1++;
1196				if (++pidx == q->size) {
1197					pidx = 0;
1198					*gen ^= 1;
1199					ce1 = q->centries;
1200					e1 = q->entries;
1201				}
1202			}
1203		}
1204		*e = e1;
1205		*ce = ce1;
1206	}
1207	return pidx;
1208}
1209
1210/*
1211 * Write the command descriptors to transmit the given skb starting at
1212 * descriptor pidx with the given generation.
1213 */
1214static inline void write_tx_descs(struct adapter *adapter, struct sk_buff *skb,
1215				  unsigned int pidx, unsigned int gen,
1216				  struct cmdQ *q)
1217{
1218	dma_addr_t mapping, desc_mapping;
1219	struct cmdQ_e *e, *e1;
1220	struct cmdQ_ce *ce;
1221	unsigned int i, flags, first_desc_len, desc_len,
1222	    nfrags = skb_shinfo(skb)->nr_frags;
1223
1224	e = e1 = &q->entries[pidx];
1225	ce = &q->centries[pidx];
1226
1227	mapping = pci_map_single(adapter->pdev, skb->data,
1228				 skb_headlen(skb), PCI_DMA_TODEVICE);
1229
1230	desc_mapping = mapping;
1231	desc_len = skb_headlen(skb);
1232
1233	flags = F_CMD_DATAVALID | F_CMD_SOP |
1234	    V_CMD_EOP(nfrags == 0 && desc_len <= SGE_TX_DESC_MAX_PLEN) |
1235	    V_CMD_GEN2(gen);
1236	first_desc_len = (desc_len <= SGE_TX_DESC_MAX_PLEN) ?
1237	    desc_len : SGE_TX_DESC_MAX_PLEN;
1238	e->addr_lo = (u32)desc_mapping;
1239	e->addr_hi = (u64)desc_mapping >> 32;
1240	e->len_gen = V_CMD_LEN(first_desc_len) | V_CMD_GEN1(gen);
1241	ce->skb = NULL;
1242	dma_unmap_len_set(ce, dma_len, 0);
1243
1244	if (PAGE_SIZE > SGE_TX_DESC_MAX_PLEN &&
1245	    desc_len > SGE_TX_DESC_MAX_PLEN) {
1246		desc_mapping += first_desc_len;
1247		desc_len -= first_desc_len;
1248		e1++;
1249		ce++;
1250		if (++pidx == q->size) {
1251			pidx = 0;
1252			gen ^= 1;
1253			e1 = q->entries;
1254			ce = q->centries;
1255		}
1256		pidx = write_large_page_tx_descs(pidx, &e1, &ce, &gen,
1257						 &desc_mapping, &desc_len,
1258						 nfrags, q);
1259
1260		if (likely(desc_len))
1261			write_tx_desc(e1, desc_mapping, desc_len, gen,
1262				      nfrags == 0);
1263	}
1264
1265	ce->skb = NULL;
1266	dma_unmap_addr_set(ce, dma_addr, mapping);
1267	dma_unmap_len_set(ce, dma_len, skb_headlen(skb));
1268
1269	for (i = 0; nfrags--; i++) {
1270		skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
1271		e1++;
1272		ce++;
1273		if (++pidx == q->size) {
1274			pidx = 0;
1275			gen ^= 1;
1276			e1 = q->entries;
1277			ce = q->centries;
1278		}
1279
1280		mapping = pci_map_page(adapter->pdev, frag->page,
1281				       frag->page_offset, frag->size,
1282				       PCI_DMA_TODEVICE);
1283		desc_mapping = mapping;
1284		desc_len = frag->size;
1285
1286		pidx = write_large_page_tx_descs(pidx, &e1, &ce, &gen,
1287						 &desc_mapping, &desc_len,
1288						 nfrags, q);
1289		if (likely(desc_len))
1290			write_tx_desc(e1, desc_mapping, desc_len, gen,
1291				      nfrags == 0);
1292		ce->skb = NULL;
1293		dma_unmap_addr_set(ce, dma_addr, mapping);
1294		dma_unmap_len_set(ce, dma_len, frag->size);
1295	}
1296	ce->skb = skb;
1297	wmb();
1298	e->flags = flags;
1299}
1300
1301/*
1302 * Clean up completed Tx buffers.
1303 */
1304static inline void reclaim_completed_tx(struct sge *sge, struct cmdQ *q)
1305{
1306	unsigned int reclaim = q->processed - q->cleaned;
1307
1308	if (reclaim) {
1309		pr_debug("reclaim_completed_tx processed:%d cleaned:%d\n",
1310			 q->processed, q->cleaned);
1311		free_cmdQ_buffers(sge, q, reclaim);
1312		q->cleaned += reclaim;
1313	}
1314}
1315
1316/*
1317 * Called from tasklet. Checks the scheduler for any
1318 * pending skbs that can be sent.
1319 */
1320static void restart_sched(unsigned long arg)
1321{
1322	struct sge *sge = (struct sge *) arg;
1323	struct adapter *adapter = sge->adapter;
1324	struct cmdQ *q = &sge->cmdQ[0];
1325	struct sk_buff *skb;
1326	unsigned int credits, queued_skb = 0;
1327
1328	spin_lock(&q->lock);
1329	reclaim_completed_tx(sge, q);
1330
1331	credits = q->size - q->in_use;
1332	pr_debug("restart_sched credits=%d\n", credits);
1333	while ((skb = sched_skb(sge, NULL, credits)) != NULL) {
1334		unsigned int genbit, pidx, count;
1335	        count = 1 + skb_shinfo(skb)->nr_frags;
1336		count += compute_large_page_tx_descs(skb);
1337		q->in_use += count;
1338		genbit = q->genbit;
1339		pidx = q->pidx;
1340		q->pidx += count;
1341		if (q->pidx >= q->size) {
1342			q->pidx -= q->size;
1343			q->genbit ^= 1;
1344		}
1345		write_tx_descs(adapter, skb, pidx, genbit, q);
1346	        credits = q->size - q->in_use;
1347		queued_skb = 1;
1348	}
1349
1350	if (queued_skb) {
1351		clear_bit(CMDQ_STAT_LAST_PKT_DB, &q->status);
1352		if (test_and_set_bit(CMDQ_STAT_RUNNING, &q->status) == 0) {
1353			set_bit(CMDQ_STAT_LAST_PKT_DB, &q->status);
1354			writel(F_CMDQ0_ENABLE, adapter->regs + A_SG_DOORBELL);
1355		}
1356	}
1357	spin_unlock(&q->lock);
1358}
1359
1360/**
1361 *	sge_rx - process an ingress ethernet packet
1362 *	@sge: the sge structure
1363 *	@fl: the free list that contains the packet buffer
1364 *	@len: the packet length
1365 *
1366 *	Process an ingress ethernet pakcet and deliver it to the stack.
1367 */
1368static void sge_rx(struct sge *sge, struct freelQ *fl, unsigned int len)
1369{
1370	struct sk_buff *skb;
1371	const struct cpl_rx_pkt *p;
1372	struct adapter *adapter = sge->adapter;
1373	struct sge_port_stats *st;
1374	struct net_device *dev;
1375
1376	skb = get_packet(adapter->pdev, fl, len - sge->rx_pkt_pad);
1377	if (unlikely(!skb)) {
1378		sge->stats.rx_drops++;
1379		return;
1380	}
1381
1382	p = (const struct cpl_rx_pkt *) skb->data;
1383	if (p->iff >= adapter->params.nports) {
1384		kfree_skb(skb);
1385		return;
1386	}
1387	__skb_pull(skb, sizeof(*p));
1388
1389	st = this_cpu_ptr(sge->port_stats[p->iff]);
1390	dev = adapter->port[p->iff].dev;
1391
1392	skb->protocol = eth_type_trans(skb, dev);
1393	if ((dev->features & NETIF_F_RXCSUM) && p->csum == 0xffff &&
1394	    skb->protocol == htons(ETH_P_IP) &&
1395	    (skb->data[9] == IPPROTO_TCP || skb->data[9] == IPPROTO_UDP)) {
1396		++st->rx_cso_good;
1397		skb->ip_summed = CHECKSUM_UNNECESSARY;
1398	} else
1399		skb_checksum_none_assert(skb);
1400
1401	if (p->vlan_valid) {
1402		st->vlan_xtract++;
1403		__vlan_hwaccel_put_tag(skb, ntohs(p->vlan));
1404	}
1405	netif_receive_skb(skb);
1406}
1407
1408/*
1409 * Returns true if a command queue has enough available descriptors that
1410 * we can resume Tx operation after temporarily disabling its packet queue.
1411 */
1412static inline int enough_free_Tx_descs(const struct cmdQ *q)
1413{
1414	unsigned int r = q->processed - q->cleaned;
1415
1416	return q->in_use - r < (q->size >> 1);
1417}
1418
1419/*
1420 * Called when sufficient space has become available in the SGE command queues
1421 * after the Tx packet schedulers have been suspended to restart the Tx path.
1422 */
1423static void restart_tx_queues(struct sge *sge)
1424{
1425	struct adapter *adap = sge->adapter;
1426	int i;
1427
1428	if (!enough_free_Tx_descs(&sge->cmdQ[0]))
1429		return;
1430
1431	for_each_port(adap, i) {
1432		struct net_device *nd = adap->port[i].dev;
1433
1434		if (test_and_clear_bit(nd->if_port, &sge->stopped_tx_queues) &&
1435		    netif_running(nd)) {
1436			sge->stats.cmdQ_restarted[2]++;
1437			netif_wake_queue(nd);
1438		}
1439	}
1440}
1441
1442/*
1443 * update_tx_info is called from the interrupt handler/NAPI to return cmdQ0
1444 * information.
1445 */
1446static unsigned int update_tx_info(struct adapter *adapter,
1447					  unsigned int flags,
1448					  unsigned int pr0)
1449{
1450	struct sge *sge = adapter->sge;
1451	struct cmdQ *cmdq = &sge->cmdQ[0];
1452
1453	cmdq->processed += pr0;
1454	if (flags & (F_FL0_ENABLE | F_FL1_ENABLE)) {
1455		freelQs_empty(sge);
1456		flags &= ~(F_FL0_ENABLE | F_FL1_ENABLE);
1457	}
1458	if (flags & F_CMDQ0_ENABLE) {
1459		clear_bit(CMDQ_STAT_RUNNING, &cmdq->status);
1460
1461		if (cmdq->cleaned + cmdq->in_use != cmdq->processed &&
1462		    !test_and_set_bit(CMDQ_STAT_LAST_PKT_DB, &cmdq->status)) {
1463			set_bit(CMDQ_STAT_RUNNING, &cmdq->status);
1464			writel(F_CMDQ0_ENABLE, adapter->regs + A_SG_DOORBELL);
1465		}
1466		if (sge->tx_sched)
1467			tasklet_hi_schedule(&sge->tx_sched->sched_tsk);
1468
1469		flags &= ~F_CMDQ0_ENABLE;
1470	}
1471
1472	if (unlikely(sge->stopped_tx_queues != 0))
1473		restart_tx_queues(sge);
1474
1475	return flags;
1476}
1477
1478/*
1479 * Process SGE responses, up to the supplied budget.  Returns the number of
1480 * responses processed.  A negative budget is effectively unlimited.
1481 */
1482static int process_responses(struct adapter *adapter, int budget)
1483{
1484	struct sge *sge = adapter->sge;
1485	struct respQ *q = &sge->respQ;
1486	struct respQ_e *e = &q->entries[q->cidx];
1487	int done = 0;
1488	unsigned int flags = 0;
1489	unsigned int cmdq_processed[SGE_CMDQ_N] = {0, 0};
1490
1491	while (done < budget && e->GenerationBit == q->genbit) {
1492		flags |= e->Qsleeping;
1493
1494		cmdq_processed[0] += e->Cmdq0CreditReturn;
1495		cmdq_processed[1] += e->Cmdq1CreditReturn;
1496
1497		/* We batch updates to the TX side to avoid cacheline
1498		 * ping-pong of TX state information on MP where the sender
1499		 * might run on a different CPU than this function...
1500		 */
1501		if (unlikely((flags & F_CMDQ0_ENABLE) || cmdq_processed[0] > 64)) {
1502			flags = update_tx_info(adapter, flags, cmdq_processed[0]);
1503			cmdq_processed[0] = 0;
1504		}
1505
1506		if (unlikely(cmdq_processed[1] > 16)) {
1507			sge->cmdQ[1].processed += cmdq_processed[1];
1508			cmdq_processed[1] = 0;
1509		}
1510
1511		if (likely(e->DataValid)) {
1512			struct freelQ *fl = &sge->freelQ[e->FreelistQid];
1513
1514			BUG_ON(!e->Sop || !e->Eop);
1515			if (unlikely(e->Offload))
1516				unexpected_offload(adapter, fl);
1517			else
1518				sge_rx(sge, fl, e->BufferLength);
1519
1520			++done;
1521
1522			/*
1523			 * Note: this depends on each packet consuming a
1524			 * single free-list buffer; cf. the BUG above.
1525			 */
1526			if (++fl->cidx == fl->size)
1527				fl->cidx = 0;
1528			prefetch(fl->centries[fl->cidx].skb);
1529
1530			if (unlikely(--fl->credits <
1531				     fl->size - SGE_FREEL_REFILL_THRESH))
1532				refill_free_list(sge, fl);
1533		} else
1534			sge->stats.pure_rsps++;
1535
1536		e++;
1537		if (unlikely(++q->cidx == q->size)) {
1538			q->cidx = 0;
1539			q->genbit ^= 1;
1540			e = q->entries;
1541		}
1542		prefetch(e);
1543
1544		if (++q->credits > SGE_RESPQ_REPLENISH_THRES) {
1545			writel(q->credits, adapter->regs + A_SG_RSPQUEUECREDIT);
1546			q->credits = 0;
1547		}
1548	}
1549
1550	flags = update_tx_info(adapter, flags, cmdq_processed[0]);
1551	sge->cmdQ[1].processed += cmdq_processed[1];
1552
1553	return done;
1554}
1555
1556static inline int responses_pending(const struct adapter *adapter)
1557{
1558	const struct respQ *Q = &adapter->sge->respQ;
1559	const struct respQ_e *e = &Q->entries[Q->cidx];
1560
1561	return e->GenerationBit == Q->genbit;
1562}
1563
1564/*
1565 * A simpler version of process_responses() that handles only pure (i.e.,
1566 * non data-carrying) responses.  Such respones are too light-weight to justify
1567 * calling a softirq when using NAPI, so we handle them specially in hard
1568 * interrupt context.  The function is called with a pointer to a response,
1569 * which the caller must ensure is a valid pure response.  Returns 1 if it
1570 * encounters a valid data-carrying response, 0 otherwise.
1571 */
1572static int process_pure_responses(struct adapter *adapter)
1573{
1574	struct sge *sge = adapter->sge;
1575	struct respQ *q = &sge->respQ;
1576	struct respQ_e *e = &q->entries[q->cidx];
1577	const struct freelQ *fl = &sge->freelQ[e->FreelistQid];
1578	unsigned int flags = 0;
1579	unsigned int cmdq_processed[SGE_CMDQ_N] = {0, 0};
1580
1581	prefetch(fl->centries[fl->cidx].skb);
1582	if (e->DataValid)
1583		return 1;
1584
1585	do {
1586		flags |= e->Qsleeping;
1587
1588		cmdq_processed[0] += e->Cmdq0CreditReturn;
1589		cmdq_processed[1] += e->Cmdq1CreditReturn;
1590
1591		e++;
1592		if (unlikely(++q->cidx == q->size)) {
1593			q->cidx = 0;
1594			q->genbit ^= 1;
1595			e = q->entries;
1596		}
1597		prefetch(e);
1598
1599		if (++q->credits > SGE_RESPQ_REPLENISH_THRES) {
1600			writel(q->credits, adapter->regs + A_SG_RSPQUEUECREDIT);
1601			q->credits = 0;
1602		}
1603		sge->stats.pure_rsps++;
1604	} while (e->GenerationBit == q->genbit && !e->DataValid);
1605
1606	flags = update_tx_info(adapter, flags, cmdq_processed[0]);
1607	sge->cmdQ[1].processed += cmdq_processed[1];
1608
1609	return e->GenerationBit == q->genbit;
1610}
1611
1612/*
1613 * Handler for new data events when using NAPI.  This does not need any locking
1614 * or protection from interrupts as data interrupts are off at this point and
1615 * other adapter interrupts do not interfere.
1616 */
1617int t1_poll(struct napi_struct *napi, int budget)
1618{
1619	struct adapter *adapter = container_of(napi, struct adapter, napi);
1620	int work_done = process_responses(adapter, budget);
1621
1622	if (likely(work_done < budget)) {
1623		napi_complete(napi);
1624		writel(adapter->sge->respQ.cidx,
1625		       adapter->regs + A_SG_SLEEPING);
1626	}
1627	return work_done;
1628}
1629
1630irqreturn_t t1_interrupt(int irq, void *data)
1631{
1632	struct adapter *adapter = data;
1633	struct sge *sge = adapter->sge;
1634	int handled;
1635
1636	if (likely(responses_pending(adapter))) {
1637		writel(F_PL_INTR_SGE_DATA, adapter->regs + A_PL_CAUSE);
1638
1639		if (napi_schedule_prep(&adapter->napi)) {
1640			if (process_pure_responses(adapter))
1641				__napi_schedule(&adapter->napi);
1642			else {
1643				/* no data, no NAPI needed */
1644				writel(sge->respQ.cidx, adapter->regs + A_SG_SLEEPING);
1645				/* undo schedule_prep */
1646				napi_enable(&adapter->napi);
1647			}
1648		}
1649		return IRQ_HANDLED;
1650	}
1651
1652	spin_lock(&adapter->async_lock);
1653	handled = t1_slow_intr_handler(adapter);
1654	spin_unlock(&adapter->async_lock);
1655
1656	if (!handled)
1657		sge->stats.unhandled_irqs++;
1658
1659	return IRQ_RETVAL(handled != 0);
1660}
1661
1662/*
1663 * Enqueues the sk_buff onto the cmdQ[qid] and has hardware fetch it.
1664 *
1665 * The code figures out how many entries the sk_buff will require in the
1666 * cmdQ and updates the cmdQ data structure with the state once the enqueue
1667 * has complete. Then, it doesn't access the global structure anymore, but
1668 * uses the corresponding fields on the stack. In conjunction with a spinlock
1669 * around that code, we can make the function reentrant without holding the
1670 * lock when we actually enqueue (which might be expensive, especially on
1671 * architectures with IO MMUs).
1672 *
1673 * This runs with softirqs disabled.
1674 */
1675static int t1_sge_tx(struct sk_buff *skb, struct adapter *adapter,
1676		     unsigned int qid, struct net_device *dev)
1677{
1678	struct sge *sge = adapter->sge;
1679	struct cmdQ *q = &sge->cmdQ[qid];
1680	unsigned int credits, pidx, genbit, count, use_sched_skb = 0;
1681
1682	if (!spin_trylock(&q->lock))
1683		return NETDEV_TX_LOCKED;
1684
1685	reclaim_completed_tx(sge, q);
1686
1687	pidx = q->pidx;
1688	credits = q->size - q->in_use;
1689	count = 1 + skb_shinfo(skb)->nr_frags;
1690	count += compute_large_page_tx_descs(skb);
1691
1692	/* Ethernet packet */
1693	if (unlikely(credits < count)) {
1694		if (!netif_queue_stopped(dev)) {
1695			netif_stop_queue(dev);
1696			set_bit(dev->if_port, &sge->stopped_tx_queues);
1697			sge->stats.cmdQ_full[2]++;
1698			pr_err("%s: Tx ring full while queue awake!\n",
1699			       adapter->name);
1700		}
1701		spin_unlock(&q->lock);
1702		return NETDEV_TX_BUSY;
1703	}
1704
1705	if (unlikely(credits - count < q->stop_thres)) {
1706		netif_stop_queue(dev);
1707		set_bit(dev->if_port, &sge->stopped_tx_queues);
1708		sge->stats.cmdQ_full[2]++;
1709	}
1710
1711	/* T204 cmdQ0 skbs that are destined for a certain port have to go
1712	 * through the scheduler.
1713	 */
1714	if (sge->tx_sched && !qid && skb->dev) {
1715use_sched:
1716		use_sched_skb = 1;
1717		/* Note that the scheduler might return a different skb than
1718		 * the one passed in.
1719		 */
1720		skb = sched_skb(sge, skb, credits);
1721		if (!skb) {
1722			spin_unlock(&q->lock);
1723			return NETDEV_TX_OK;
1724		}
1725		pidx = q->pidx;
1726		count = 1 + skb_shinfo(skb)->nr_frags;
1727		count += compute_large_page_tx_descs(skb);
1728	}
1729
1730	q->in_use += count;
1731	genbit = q->genbit;
1732	pidx = q->pidx;
1733	q->pidx += count;
1734	if (q->pidx >= q->size) {
1735		q->pidx -= q->size;
1736		q->genbit ^= 1;
1737	}
1738	spin_unlock(&q->lock);
1739
1740	write_tx_descs(adapter, skb, pidx, genbit, q);
1741
1742	/*
1743	 * We always ring the doorbell for cmdQ1.  For cmdQ0, we only ring
1744	 * the doorbell if the Q is asleep. There is a natural race, where
1745	 * the hardware is going to sleep just after we checked, however,
1746	 * then the interrupt handler will detect the outstanding TX packet
1747	 * and ring the doorbell for us.
1748	 */
1749	if (qid)
1750		doorbell_pio(adapter, F_CMDQ1_ENABLE);
1751	else {
1752		clear_bit(CMDQ_STAT_LAST_PKT_DB, &q->status);
1753		if (test_and_set_bit(CMDQ_STAT_RUNNING, &q->status) == 0) {
1754			set_bit(CMDQ_STAT_LAST_PKT_DB, &q->status);
1755			writel(F_CMDQ0_ENABLE, adapter->regs + A_SG_DOORBELL);
1756		}
1757	}
1758
1759	if (use_sched_skb) {
1760		if (spin_trylock(&q->lock)) {
1761			credits = q->size - q->in_use;
1762			skb = NULL;
1763			goto use_sched;
1764		}
1765	}
1766	return NETDEV_TX_OK;
1767}
1768
1769#define MK_ETH_TYPE_MSS(type, mss) (((mss) & 0x3FFF) | ((type) << 14))
1770
1771/*
1772 *	eth_hdr_len - return the length of an Ethernet header
1773 *	@data: pointer to the start of the Ethernet header
1774 *
1775 *	Returns the length of an Ethernet header, including optional VLAN tag.
1776 */
1777static inline int eth_hdr_len(const void *data)
1778{
1779	const struct ethhdr *e = data;
1780
1781	return e->h_proto == htons(ETH_P_8021Q) ? VLAN_ETH_HLEN : ETH_HLEN;
1782}
1783
1784/*
1785 * Adds the CPL header to the sk_buff and passes it to t1_sge_tx.
1786 */
1787netdev_tx_t t1_start_xmit(struct sk_buff *skb, struct net_device *dev)
1788{
1789	struct adapter *adapter = dev->ml_priv;
1790	struct sge *sge = adapter->sge;
1791	struct sge_port_stats *st = this_cpu_ptr(sge->port_stats[dev->if_port]);
1792	struct cpl_tx_pkt *cpl;
1793	struct sk_buff *orig_skb = skb;
1794	int ret;
1795
1796	if (skb->protocol == htons(ETH_P_CPL5))
1797		goto send;
1798
1799	/*
1800	 * We are using a non-standard hard_header_len.
1801	 * Allocate more header room in the rare cases it is not big enough.
1802	 */
1803	if (unlikely(skb_headroom(skb) < dev->hard_header_len - ETH_HLEN)) {
1804		skb = skb_realloc_headroom(skb, sizeof(struct cpl_tx_pkt_lso));
1805		++st->tx_need_hdrroom;
1806		dev_kfree_skb_any(orig_skb);
1807		if (!skb)
1808			return NETDEV_TX_OK;
1809	}
1810
1811	if (skb_shinfo(skb)->gso_size) {
1812		int eth_type;
1813		struct cpl_tx_pkt_lso *hdr;
1814
1815		++st->tx_tso;
1816
1817		eth_type = skb_network_offset(skb) == ETH_HLEN ?
1818			CPL_ETH_II : CPL_ETH_II_VLAN;
1819
1820		hdr = (struct cpl_tx_pkt_lso *)skb_push(skb, sizeof(*hdr));
1821		hdr->opcode = CPL_TX_PKT_LSO;
1822		hdr->ip_csum_dis = hdr->l4_csum_dis = 0;
1823		hdr->ip_hdr_words = ip_hdr(skb)->ihl;
1824		hdr->tcp_hdr_words = tcp_hdr(skb)->doff;
1825		hdr->eth_type_mss = htons(MK_ETH_TYPE_MSS(eth_type,
1826							  skb_shinfo(skb)->gso_size));
1827		hdr->len = htonl(skb->len - sizeof(*hdr));
1828		cpl = (struct cpl_tx_pkt *)hdr;
1829	} else {
1830		/*
1831		 * Packets shorter than ETH_HLEN can break the MAC, drop them
1832		 * early.  Also, we may get oversized packets because some
1833		 * parts of the kernel don't handle our unusual hard_header_len
1834		 * right, drop those too.
1835		 */
1836		if (unlikely(skb->len < ETH_HLEN ||
1837			     skb->len > dev->mtu + eth_hdr_len(skb->data))) {
1838			pr_debug("%s: packet size %d hdr %d mtu%d\n", dev->name,
1839				 skb->len, eth_hdr_len(skb->data), dev->mtu);
1840			dev_kfree_skb_any(skb);
1841			return NETDEV_TX_OK;
1842		}
1843
1844		if (skb->ip_summed == CHECKSUM_PARTIAL &&
1845		    ip_hdr(skb)->protocol == IPPROTO_UDP) {
1846			if (unlikely(skb_checksum_help(skb))) {
1847				pr_debug("%s: unable to do udp checksum\n", dev->name);
1848				dev_kfree_skb_any(skb);
1849				return NETDEV_TX_OK;
1850			}
1851		}
1852
1853		/* Hmmm, assuming to catch the gratious arp... and we'll use
1854		 * it to flush out stuck espi packets...
1855		 */
1856		if ((unlikely(!adapter->sge->espibug_skb[dev->if_port]))) {
1857			if (skb->protocol == htons(ETH_P_ARP) &&
1858			    arp_hdr(skb)->ar_op == htons(ARPOP_REQUEST)) {
1859				adapter->sge->espibug_skb[dev->if_port] = skb;
1860				/* We want to re-use this skb later. We
1861				 * simply bump the reference count and it
1862				 * will not be freed...
1863				 */
1864				skb = skb_get(skb);
1865			}
1866		}
1867
1868		cpl = (struct cpl_tx_pkt *)__skb_push(skb, sizeof(*cpl));
1869		cpl->opcode = CPL_TX_PKT;
1870		cpl->ip_csum_dis = 1;    /* SW calculates IP csum */
1871		cpl->l4_csum_dis = skb->ip_summed == CHECKSUM_PARTIAL ? 0 : 1;
1872		/* the length field isn't used so don't bother setting it */
1873
1874		st->tx_cso += (skb->ip_summed == CHECKSUM_PARTIAL);
1875	}
1876	cpl->iff = dev->if_port;
1877
1878	if (vlan_tx_tag_present(skb)) {
1879		cpl->vlan_valid = 1;
1880		cpl->vlan = htons(vlan_tx_tag_get(skb));
1881		st->vlan_insert++;
1882	} else
1883		cpl->vlan_valid = 0;
1884
1885send:
1886	ret = t1_sge_tx(skb, adapter, 0, dev);
1887
1888	/* If transmit busy, and we reallocated skb's due to headroom limit,
1889	 * then silently discard to avoid leak.
1890	 */
1891	if (unlikely(ret != NETDEV_TX_OK && skb != orig_skb)) {
1892		dev_kfree_skb_any(skb);
1893		ret = NETDEV_TX_OK;
1894	}
1895	return ret;
1896}
1897
1898/*
1899 * Callback for the Tx buffer reclaim timer.  Runs with softirqs disabled.
1900 */
1901static void sge_tx_reclaim_cb(unsigned long data)
1902{
1903	int i;
1904	struct sge *sge = (struct sge *)data;
1905
1906	for (i = 0; i < SGE_CMDQ_N; ++i) {
1907		struct cmdQ *q = &sge->cmdQ[i];
1908
1909		if (!spin_trylock(&q->lock))
1910			continue;
1911
1912		reclaim_completed_tx(sge, q);
1913		if (i == 0 && q->in_use) {    /* flush pending credits */
1914			writel(F_CMDQ0_ENABLE, sge->adapter->regs + A_SG_DOORBELL);
1915		}
1916		spin_unlock(&q->lock);
1917	}
1918	mod_timer(&sge->tx_reclaim_timer, jiffies + TX_RECLAIM_PERIOD);
1919}
1920
1921/*
1922 * Propagate changes of the SGE coalescing parameters to the HW.
1923 */
1924int t1_sge_set_coalesce_params(struct sge *sge, struct sge_params *p)
1925{
1926	sge->fixed_intrtimer = p->rx_coalesce_usecs *
1927		core_ticks_per_usec(sge->adapter);
1928	writel(sge->fixed_intrtimer, sge->adapter->regs + A_SG_INTRTIMER);
1929	return 0;
1930}
1931
1932/*
1933 * Allocates both RX and TX resources and configures the SGE. However,
1934 * the hardware is not enabled yet.
1935 */
1936int t1_sge_configure(struct sge *sge, struct sge_params *p)
1937{
1938	if (alloc_rx_resources(sge, p))
1939		return -ENOMEM;
1940	if (alloc_tx_resources(sge, p)) {
1941		free_rx_resources(sge);
1942		return -ENOMEM;
1943	}
1944	configure_sge(sge, p);
1945
1946	/*
1947	 * Now that we have sized the free lists calculate the payload
1948	 * capacity of the large buffers.  Other parts of the driver use
1949	 * this to set the max offload coalescing size so that RX packets
1950	 * do not overflow our large buffers.
1951	 */
1952	p->large_buf_capacity = jumbo_payload_capacity(sge);
1953	return 0;
1954}
1955
1956/*
1957 * Disables the DMA engine.
1958 */
1959void t1_sge_stop(struct sge *sge)
1960{
1961	int i;
1962	writel(0, sge->adapter->regs + A_SG_CONTROL);
1963	readl(sge->adapter->regs + A_SG_CONTROL); /* flush */
1964
1965	if (is_T2(sge->adapter))
1966		del_timer_sync(&sge->espibug_timer);
1967
1968	del_timer_sync(&sge->tx_reclaim_timer);
1969	if (sge->tx_sched)
1970		tx_sched_stop(sge);
1971
1972	for (i = 0; i < MAX_NPORTS; i++)
1973		kfree_skb(sge->espibug_skb[i]);
1974}
1975
1976/*
1977 * Enables the DMA engine.
1978 */
1979void t1_sge_start(struct sge *sge)
1980{
1981	refill_free_list(sge, &sge->freelQ[0]);
1982	refill_free_list(sge, &sge->freelQ[1]);
1983
1984	writel(sge->sge_control, sge->adapter->regs + A_SG_CONTROL);
1985	doorbell_pio(sge->adapter, F_FL0_ENABLE | F_FL1_ENABLE);
1986	readl(sge->adapter->regs + A_SG_CONTROL); /* flush */
1987
1988	mod_timer(&sge->tx_reclaim_timer, jiffies + TX_RECLAIM_PERIOD);
1989
1990	if (is_T2(sge->adapter))
1991		mod_timer(&sge->espibug_timer, jiffies + sge->espibug_timeout);
1992}
1993
1994/*
1995 * Callback for the T2 ESPI 'stuck packet feature' workaorund
1996 */
1997static void espibug_workaround_t204(unsigned long data)
1998{
1999	struct adapter *adapter = (struct adapter *)data;
2000	struct sge *sge = adapter->sge;
2001	unsigned int nports = adapter->params.nports;
2002	u32 seop[MAX_NPORTS];
2003
2004	if (adapter->open_device_map & PORT_MASK) {
2005		int i;
2006
2007		if (t1_espi_get_mon_t204(adapter, &(seop[0]), 0) < 0)
2008			return;
2009
2010		for (i = 0; i < nports; i++) {
2011			struct sk_buff *skb = sge->espibug_skb[i];
2012
2013			if (!netif_running(adapter->port[i].dev) ||
2014			    netif_queue_stopped(adapter->port[i].dev) ||
2015			    !seop[i] || ((seop[i] & 0xfff) != 0) || !skb)
2016				continue;
2017
2018			if (!skb->cb[0]) {
2019				skb_copy_to_linear_data_offset(skb,
2020						    sizeof(struct cpl_tx_pkt),
2021							       ch_mac_addr,
2022							       ETH_ALEN);
2023				skb_copy_to_linear_data_offset(skb,
2024							       skb->len - 10,
2025							       ch_mac_addr,
2026							       ETH_ALEN);
2027				skb->cb[0] = 0xff;
2028			}
2029
2030			/* bump the reference count to avoid freeing of
2031			 * the skb once the DMA has completed.
2032			 */
2033			skb = skb_get(skb);
2034			t1_sge_tx(skb, adapter, 0, adapter->port[i].dev);
2035		}
2036	}
2037	mod_timer(&sge->espibug_timer, jiffies + sge->espibug_timeout);
2038}
2039
2040static void espibug_workaround(unsigned long data)
2041{
2042	struct adapter *adapter = (struct adapter *)data;
2043	struct sge *sge = adapter->sge;
2044
2045	if (netif_running(adapter->port[0].dev)) {
2046	        struct sk_buff *skb = sge->espibug_skb[0];
2047	        u32 seop = t1_espi_get_mon(adapter, 0x930, 0);
2048
2049	        if ((seop & 0xfff0fff) == 0xfff && skb) {
2050	                if (!skb->cb[0]) {
2051	                        skb_copy_to_linear_data_offset(skb,
2052						     sizeof(struct cpl_tx_pkt),
2053							       ch_mac_addr,
2054							       ETH_ALEN);
2055	                        skb_copy_to_linear_data_offset(skb,
2056							       skb->len - 10,
2057							       ch_mac_addr,
2058							       ETH_ALEN);
2059	                        skb->cb[0] = 0xff;
2060	                }
2061
2062	                /* bump the reference count to avoid freeing of the
2063	                 * skb once the DMA has completed.
2064	                 */
2065	                skb = skb_get(skb);
2066	                t1_sge_tx(skb, adapter, 0, adapter->port[0].dev);
2067	        }
2068	}
2069	mod_timer(&sge->espibug_timer, jiffies + sge->espibug_timeout);
2070}
2071
2072/*
2073 * Creates a t1_sge structure and returns suggested resource parameters.
2074 */
2075struct sge * __devinit t1_sge_create(struct adapter *adapter,
2076				     struct sge_params *p)
2077{
2078	struct sge *sge = kzalloc(sizeof(*sge), GFP_KERNEL);
2079	int i;
2080
2081	if (!sge)
2082		return NULL;
2083
2084	sge->adapter = adapter;
2085	sge->netdev = adapter->port[0].dev;
2086	sge->rx_pkt_pad = t1_is_T1B(adapter) ? 0 : 2;
2087	sge->jumbo_fl = t1_is_T1B(adapter) ? 1 : 0;
2088
2089	for_each_port(adapter, i) {
2090		sge->port_stats[i] = alloc_percpu(struct sge_port_stats);
2091		if (!sge->port_stats[i])
2092			goto nomem_port;
2093	}
2094
2095	init_timer(&sge->tx_reclaim_timer);
2096	sge->tx_reclaim_timer.data = (unsigned long)sge;
2097	sge->tx_reclaim_timer.function = sge_tx_reclaim_cb;
2098
2099	if (is_T2(sge->adapter)) {
2100		init_timer(&sge->espibug_timer);
2101
2102		if (adapter->params.nports > 1) {
2103			tx_sched_init(sge);
2104			sge->espibug_timer.function = espibug_workaround_t204;
2105		} else
2106			sge->espibug_timer.function = espibug_workaround;
2107		sge->espibug_timer.data = (unsigned long)sge->adapter;
2108
2109		sge->espibug_timeout = 1;
2110		/* for T204, every 10ms */
2111		if (adapter->params.nports > 1)
2112			sge->espibug_timeout = HZ/100;
2113	}
2114
2115
2116	p->cmdQ_size[0] = SGE_CMDQ0_E_N;
2117	p->cmdQ_size[1] = SGE_CMDQ1_E_N;
2118	p->freelQ_size[!sge->jumbo_fl] = SGE_FREEL_SIZE;
2119	p->freelQ_size[sge->jumbo_fl] = SGE_JUMBO_FREEL_SIZE;
2120	if (sge->tx_sched) {
2121		if (board_info(sge->adapter)->board == CHBT_BOARD_CHT204)
2122			p->rx_coalesce_usecs = 15;
2123		else
2124			p->rx_coalesce_usecs = 50;
2125	} else
2126		p->rx_coalesce_usecs = 50;
2127
2128	p->coalesce_enable = 0;
2129	p->sample_interval_usecs = 0;
2130
2131	return sge;
2132nomem_port:
2133	while (i >= 0) {
2134		free_percpu(sge->port_stats[i]);
2135		--i;
2136	}
2137	kfree(sge);
2138	return NULL;
2139
2140}