Linux Audio

Check our new training course

In-person Linux kernel drivers training

Jun 16-20, 2025
Register
Loading...
v6.8
   1// SPDX-License-Identifier: GPL-2.0+
   2/* cassini.c: Sun Microsystems Cassini(+) ethernet driver.
   3 *
   4 * Copyright (C) 2004 Sun Microsystems Inc.
   5 * Copyright (C) 2003 Adrian Sun (asun@darksunrising.com)
   6 *
 
 
 
 
 
 
 
 
 
 
 
 
 
   7 * This driver uses the sungem driver (c) David Miller
   8 * (davem@redhat.com) as its basis.
   9 *
  10 * The cassini chip has a number of features that distinguish it from
  11 * the gem chip:
  12 *  4 transmit descriptor rings that are used for either QoS (VLAN) or
  13 *      load balancing (non-VLAN mode)
  14 *  batching of multiple packets
  15 *  multiple CPU dispatching
  16 *  page-based RX descriptor engine with separate completion rings
  17 *  Gigabit support (GMII and PCS interface)
  18 *  MIF link up/down detection works
  19 *
  20 * RX is handled by page sized buffers that are attached as fragments to
  21 * the skb. here's what's done:
  22 *  -- driver allocates pages at a time and keeps reference counts
  23 *     on them.
  24 *  -- the upper protocol layers assume that the header is in the skb
  25 *     itself. as a result, cassini will copy a small amount (64 bytes)
  26 *     to make them happy.
  27 *  -- driver appends the rest of the data pages as frags to skbuffs
  28 *     and increments the reference count
  29 *  -- on page reclamation, the driver swaps the page with a spare page.
  30 *     if that page is still in use, it frees its reference to that page,
  31 *     and allocates a new page for use. otherwise, it just recycles the
  32 *     page.
  33 *
  34 * NOTE: cassini can parse the header. however, it's not worth it
  35 *       as long as the network stack requires a header copy.
  36 *
  37 * TX has 4 queues. currently these queues are used in a round-robin
  38 * fashion for load balancing. They can also be used for QoS. for that
  39 * to work, however, QoS information needs to be exposed down to the driver
  40 * level so that subqueues get targeted to particular transmit rings.
  41 * alternatively, the queues can be configured via use of the all-purpose
  42 * ioctl.
  43 *
  44 * RX DATA: the rx completion ring has all the info, but the rx desc
  45 * ring has all of the data. RX can conceivably come in under multiple
  46 * interrupts, but the INT# assignment needs to be set up properly by
  47 * the BIOS and conveyed to the driver. PCI BIOSes don't know how to do
  48 * that. also, the two descriptor rings are designed to distinguish between
  49 * encrypted and non-encrypted packets, but we use them for buffering
  50 * instead.
  51 *
  52 * by default, the selective clear mask is set up to process rx packets.
  53 */
  54
  55#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  56
  57#include <linux/module.h>
  58#include <linux/kernel.h>
  59#include <linux/types.h>
  60#include <linux/compiler.h>
  61#include <linux/slab.h>
  62#include <linux/delay.h>
  63#include <linux/init.h>
  64#include <linux/interrupt.h>
  65#include <linux/vmalloc.h>
  66#include <linux/ioport.h>
  67#include <linux/pci.h>
  68#include <linux/mm.h>
  69#include <linux/highmem.h>
  70#include <linux/list.h>
  71#include <linux/dma-mapping.h>
  72
  73#include <linux/netdevice.h>
  74#include <linux/etherdevice.h>
  75#include <linux/skbuff.h>
  76#include <linux/ethtool.h>
  77#include <linux/crc32.h>
  78#include <linux/random.h>
  79#include <linux/mii.h>
  80#include <linux/ip.h>
  81#include <linux/tcp.h>
  82#include <linux/mutex.h>
  83#include <linux/firmware.h>
  84
  85#include <net/checksum.h>
  86
  87#include <linux/atomic.h>
  88#include <asm/io.h>
  89#include <asm/byteorder.h>
  90#include <linux/uaccess.h>
  91#include <linux/jiffies.h>
  92
 
 
  93#define CAS_NCPUS            num_online_cpus()
  94
  95#define cas_skb_release(x)  netif_rx(x)
  96
  97/* select which firmware to use */
  98#define USE_HP_WORKAROUND
  99#define HP_WORKAROUND_DEFAULT /* select which firmware to use as default */
 100#define CAS_HP_ALT_FIRMWARE   cas_prog_null /* alternate firmware */
 101
 102#include "cassini.h"
 103
 104#define USE_TX_COMPWB      /* use completion writeback registers */
 105#define USE_CSMA_CD_PROTO  /* standard CSMA/CD */
 106#define USE_RX_BLANK       /* hw interrupt mitigation */
 107#undef USE_ENTROPY_DEV     /* don't test for entropy device */
 108
 109/* NOTE: these aren't useable unless PCI interrupts can be assigned.
 110 * also, we need to make cp->lock finer-grained.
 111 */
 112#undef  USE_PCI_INTB
 113#undef  USE_PCI_INTC
 114#undef  USE_PCI_INTD
 115#undef  USE_QOS
 116
 117#undef  USE_VPD_DEBUG       /* debug vpd information if defined */
 118
 119/* rx processing options */
 120#define USE_PAGE_ORDER      /* specify to allocate large rx pages */
 121#define RX_DONT_BATCH  0    /* if 1, don't batch flows */
 122#define RX_COPY_ALWAYS 0    /* if 0, use frags */
 123#define RX_COPY_MIN    64   /* copy a little to make upper layers happy */
 124#undef  RX_COUNT_BUFFERS    /* define to calculate RX buffer stats */
 125
 126#define DRV_MODULE_NAME		"cassini"
 127#define DRV_MODULE_VERSION	"1.6"
 128#define DRV_MODULE_RELDATE	"21 May 2008"
 129
 130#define CAS_DEF_MSG_ENABLE	  \
 131	(NETIF_MSG_DRV		| \
 132	 NETIF_MSG_PROBE	| \
 133	 NETIF_MSG_LINK		| \
 134	 NETIF_MSG_TIMER	| \
 135	 NETIF_MSG_IFDOWN	| \
 136	 NETIF_MSG_IFUP		| \
 137	 NETIF_MSG_RX_ERR	| \
 138	 NETIF_MSG_TX_ERR)
 139
 140/* length of time before we decide the hardware is borked,
 141 * and dev->tx_timeout() should be called to fix the problem
 142 */
 143#define CAS_TX_TIMEOUT			(HZ)
 144#define CAS_LINK_TIMEOUT                (22*HZ/10)
 145#define CAS_LINK_FAST_TIMEOUT           (1)
 146
 147/* timeout values for state changing. these specify the number
 148 * of 10us delays to be used before giving up.
 149 */
 150#define STOP_TRIES_PHY 1000
 151#define STOP_TRIES     5000
 152
 153/* specify a minimum frame size to deal with some fifo issues
 154 * max mtu == 2 * page size - ethernet header - 64 - swivel =
 155 *            2 * page_size - 0x50
 156 */
 157#define CAS_MIN_FRAME			97
 158#define CAS_1000MB_MIN_FRAME            255
 159#define CAS_MIN_MTU                     60
 160#define CAS_MAX_MTU                     min(((cp->page_size << 1) - 0x50), 9000)
 161
 162#if 1
 163/*
 164 * Eliminate these and use separate atomic counters for each, to
 165 * avoid a race condition.
 166 */
 167#else
 168#define CAS_RESET_MTU                   1
 169#define CAS_RESET_ALL                   2
 170#define CAS_RESET_SPARE                 3
 171#endif
 172
 173static char version[] =
 174	DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
 175
 176static int cassini_debug = -1;	/* -1 == use CAS_DEF_MSG_ENABLE as value */
 177static int link_mode;
 178
 179MODULE_AUTHOR("Adrian Sun (asun@darksunrising.com)");
 180MODULE_DESCRIPTION("Sun Cassini(+) ethernet driver");
 181MODULE_LICENSE("GPL");
 182MODULE_FIRMWARE("sun/cassini.bin");
 183module_param(cassini_debug, int, 0);
 184MODULE_PARM_DESC(cassini_debug, "Cassini bitmapped debugging message enable value");
 185module_param(link_mode, int, 0);
 186MODULE_PARM_DESC(link_mode, "default link mode");
 187
 188/*
 189 * Work around for a PCS bug in which the link goes down due to the chip
 190 * being confused and never showing a link status of "up."
 191 */
 192#define DEFAULT_LINKDOWN_TIMEOUT 5
 193/*
 194 * Value in seconds, for user input.
 195 */
 196static int linkdown_timeout = DEFAULT_LINKDOWN_TIMEOUT;
 197module_param(linkdown_timeout, int, 0);
 198MODULE_PARM_DESC(linkdown_timeout,
 199"min reset interval in sec. for PCS linkdown issue; disabled if not positive");
 200
 201/*
 202 * value in 'ticks' (units used by jiffies). Set when we init the
 203 * module because 'HZ' in actually a function call on some flavors of
 204 * Linux.  This will default to DEFAULT_LINKDOWN_TIMEOUT * HZ.
 205 */
 206static int link_transition_timeout;
 207
 208
 209
 210static u16 link_modes[] = {
 211	BMCR_ANENABLE,			 /* 0 : autoneg */
 212	0,				 /* 1 : 10bt half duplex */
 213	BMCR_SPEED100,			 /* 2 : 100bt half duplex */
 214	BMCR_FULLDPLX,			 /* 3 : 10bt full duplex */
 215	BMCR_SPEED100|BMCR_FULLDPLX,	 /* 4 : 100bt full duplex */
 216	CAS_BMCR_SPEED1000|BMCR_FULLDPLX /* 5 : 1000bt full duplex */
 217};
 218
 219static const struct pci_device_id cas_pci_tbl[] = {
 220	{ PCI_VENDOR_ID_SUN, PCI_DEVICE_ID_SUN_CASSINI,
 221	  PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
 222	{ PCI_VENDOR_ID_NS, PCI_DEVICE_ID_NS_SATURN,
 223	  PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
 224	{ 0, }
 225};
 226
 227MODULE_DEVICE_TABLE(pci, cas_pci_tbl);
 228
 229static void cas_set_link_modes(struct cas *cp);
 230
 231static inline void cas_lock_tx(struct cas *cp)
 232{
 233	int i;
 234
 235	for (i = 0; i < N_TX_RINGS; i++)
 236		spin_lock_nested(&cp->tx_lock[i], i);
 237}
 238
 
 
 
 
 
 
 239/* WTZ: QA was finding deadlock problems with the previous
 240 * versions after long test runs with multiple cards per machine.
 241 * See if replacing cas_lock_all with safer versions helps. The
 242 * symptoms QA is reporting match those we'd expect if interrupts
 243 * aren't being properly restored, and we fixed a previous deadlock
 244 * with similar symptoms by using save/restore versions in other
 245 * places.
 246 */
 247#define cas_lock_all_save(cp, flags) \
 248do { \
 249	struct cas *xxxcp = (cp); \
 250	spin_lock_irqsave(&xxxcp->lock, flags); \
 251	cas_lock_tx(xxxcp); \
 252} while (0)
 253
 254static inline void cas_unlock_tx(struct cas *cp)
 255{
 256	int i;
 257
 258	for (i = N_TX_RINGS; i > 0; i--)
 259		spin_unlock(&cp->tx_lock[i - 1]);
 260}
 261
 
 
 
 
 
 
 262#define cas_unlock_all_restore(cp, flags) \
 263do { \
 264	struct cas *xxxcp = (cp); \
 265	cas_unlock_tx(xxxcp); \
 266	spin_unlock_irqrestore(&xxxcp->lock, flags); \
 267} while (0)
 268
 269static void cas_disable_irq(struct cas *cp, const int ring)
 270{
 271	/* Make sure we won't get any more interrupts */
 272	if (ring == 0) {
 273		writel(0xFFFFFFFF, cp->regs + REG_INTR_MASK);
 274		return;
 275	}
 276
 277	/* disable completion interrupts and selectively mask */
 278	if (cp->cas_flags & CAS_FLAG_REG_PLUS) {
 279		switch (ring) {
 280#if defined (USE_PCI_INTB) || defined(USE_PCI_INTC) || defined(USE_PCI_INTD)
 281#ifdef USE_PCI_INTB
 282		case 1:
 283#endif
 284#ifdef USE_PCI_INTC
 285		case 2:
 286#endif
 287#ifdef USE_PCI_INTD
 288		case 3:
 289#endif
 290			writel(INTRN_MASK_CLEAR_ALL | INTRN_MASK_RX_EN,
 291			       cp->regs + REG_PLUS_INTRN_MASK(ring));
 292			break;
 293#endif
 294		default:
 295			writel(INTRN_MASK_CLEAR_ALL, cp->regs +
 296			       REG_PLUS_INTRN_MASK(ring));
 297			break;
 298		}
 299	}
 300}
 301
 302static inline void cas_mask_intr(struct cas *cp)
 303{
 304	int i;
 305
 306	for (i = 0; i < N_RX_COMP_RINGS; i++)
 307		cas_disable_irq(cp, i);
 308}
 309
 310static void cas_enable_irq(struct cas *cp, const int ring)
 311{
 312	if (ring == 0) { /* all but TX_DONE */
 313		writel(INTR_TX_DONE, cp->regs + REG_INTR_MASK);
 314		return;
 315	}
 316
 317	if (cp->cas_flags & CAS_FLAG_REG_PLUS) {
 318		switch (ring) {
 319#if defined (USE_PCI_INTB) || defined(USE_PCI_INTC) || defined(USE_PCI_INTD)
 320#ifdef USE_PCI_INTB
 321		case 1:
 322#endif
 323#ifdef USE_PCI_INTC
 324		case 2:
 325#endif
 326#ifdef USE_PCI_INTD
 327		case 3:
 328#endif
 329			writel(INTRN_MASK_RX_EN, cp->regs +
 330			       REG_PLUS_INTRN_MASK(ring));
 331			break;
 332#endif
 333		default:
 334			break;
 335		}
 336	}
 337}
 338
 339static inline void cas_unmask_intr(struct cas *cp)
 340{
 341	int i;
 342
 343	for (i = 0; i < N_RX_COMP_RINGS; i++)
 344		cas_enable_irq(cp, i);
 345}
 346
 347static inline void cas_entropy_gather(struct cas *cp)
 348{
 349#ifdef USE_ENTROPY_DEV
 350	if ((cp->cas_flags & CAS_FLAG_ENTROPY_DEV) == 0)
 351		return;
 352
 353	batch_entropy_store(readl(cp->regs + REG_ENTROPY_IV),
 354			    readl(cp->regs + REG_ENTROPY_IV),
 355			    sizeof(uint64_t)*8);
 356#endif
 357}
 358
 359static inline void cas_entropy_reset(struct cas *cp)
 360{
 361#ifdef USE_ENTROPY_DEV
 362	if ((cp->cas_flags & CAS_FLAG_ENTROPY_DEV) == 0)
 363		return;
 364
 365	writel(BIM_LOCAL_DEV_PAD | BIM_LOCAL_DEV_PROM | BIM_LOCAL_DEV_EXT,
 366	       cp->regs + REG_BIM_LOCAL_DEV_EN);
 367	writeb(ENTROPY_RESET_STC_MODE, cp->regs + REG_ENTROPY_RESET);
 368	writeb(0x55, cp->regs + REG_ENTROPY_RAND_REG);
 369
 370	/* if we read back 0x0, we don't have an entropy device */
 371	if (readb(cp->regs + REG_ENTROPY_RAND_REG) == 0)
 372		cp->cas_flags &= ~CAS_FLAG_ENTROPY_DEV;
 373#endif
 374}
 375
 376/* access to the phy. the following assumes that we've initialized the MIF to
 377 * be in frame rather than bit-bang mode
 378 */
 379static u16 cas_phy_read(struct cas *cp, int reg)
 380{
 381	u32 cmd;
 382	int limit = STOP_TRIES_PHY;
 383
 384	cmd = MIF_FRAME_ST | MIF_FRAME_OP_READ;
 385	cmd |= CAS_BASE(MIF_FRAME_PHY_ADDR, cp->phy_addr);
 386	cmd |= CAS_BASE(MIF_FRAME_REG_ADDR, reg);
 387	cmd |= MIF_FRAME_TURN_AROUND_MSB;
 388	writel(cmd, cp->regs + REG_MIF_FRAME);
 389
 390	/* poll for completion */
 391	while (limit-- > 0) {
 392		udelay(10);
 393		cmd = readl(cp->regs + REG_MIF_FRAME);
 394		if (cmd & MIF_FRAME_TURN_AROUND_LSB)
 395			return cmd & MIF_FRAME_DATA_MASK;
 396	}
 397	return 0xFFFF; /* -1 */
 398}
 399
 400static int cas_phy_write(struct cas *cp, int reg, u16 val)
 401{
 402	int limit = STOP_TRIES_PHY;
 403	u32 cmd;
 404
 405	cmd = MIF_FRAME_ST | MIF_FRAME_OP_WRITE;
 406	cmd |= CAS_BASE(MIF_FRAME_PHY_ADDR, cp->phy_addr);
 407	cmd |= CAS_BASE(MIF_FRAME_REG_ADDR, reg);
 408	cmd |= MIF_FRAME_TURN_AROUND_MSB;
 409	cmd |= val & MIF_FRAME_DATA_MASK;
 410	writel(cmd, cp->regs + REG_MIF_FRAME);
 411
 412	/* poll for completion */
 413	while (limit-- > 0) {
 414		udelay(10);
 415		cmd = readl(cp->regs + REG_MIF_FRAME);
 416		if (cmd & MIF_FRAME_TURN_AROUND_LSB)
 417			return 0;
 418	}
 419	return -1;
 420}
 421
 422static void cas_phy_powerup(struct cas *cp)
 423{
 424	u16 ctl = cas_phy_read(cp, MII_BMCR);
 425
 426	if ((ctl & BMCR_PDOWN) == 0)
 427		return;
 428	ctl &= ~BMCR_PDOWN;
 429	cas_phy_write(cp, MII_BMCR, ctl);
 430}
 431
 432static void cas_phy_powerdown(struct cas *cp)
 433{
 434	u16 ctl = cas_phy_read(cp, MII_BMCR);
 435
 436	if (ctl & BMCR_PDOWN)
 437		return;
 438	ctl |= BMCR_PDOWN;
 439	cas_phy_write(cp, MII_BMCR, ctl);
 440}
 441
 442/* cp->lock held. note: the last put_page will free the buffer */
 443static int cas_page_free(struct cas *cp, cas_page_t *page)
 444{
 445	dma_unmap_page(&cp->pdev->dev, page->dma_addr, cp->page_size,
 446		       DMA_FROM_DEVICE);
 447	__free_pages(page->buffer, cp->page_order);
 448	kfree(page);
 449	return 0;
 450}
 451
 452#ifdef RX_COUNT_BUFFERS
 453#define RX_USED_ADD(x, y)       ((x)->used += (y))
 454#define RX_USED_SET(x, y)       ((x)->used  = (y))
 455#else
 456#define RX_USED_ADD(x, y) do { } while(0)
 457#define RX_USED_SET(x, y) do { } while(0)
 458#endif
 459
 460/* local page allocation routines for the receive buffers. jumbo pages
 461 * require at least 8K contiguous and 8K aligned buffers.
 462 */
 463static cas_page_t *cas_page_alloc(struct cas *cp, const gfp_t flags)
 464{
 465	cas_page_t *page;
 466
 467	page = kmalloc(sizeof(cas_page_t), flags);
 468	if (!page)
 469		return NULL;
 470
 471	INIT_LIST_HEAD(&page->list);
 472	RX_USED_SET(page, 0);
 473	page->buffer = alloc_pages(flags, cp->page_order);
 474	if (!page->buffer)
 475		goto page_err;
 476	page->dma_addr = dma_map_page(&cp->pdev->dev, page->buffer, 0,
 477				      cp->page_size, DMA_FROM_DEVICE);
 478	return page;
 479
 480page_err:
 481	kfree(page);
 482	return NULL;
 483}
 484
 485/* initialize spare pool of rx buffers, but allocate during the open */
 486static void cas_spare_init(struct cas *cp)
 487{
 488	spin_lock(&cp->rx_inuse_lock);
 489	INIT_LIST_HEAD(&cp->rx_inuse_list);
 490	spin_unlock(&cp->rx_inuse_lock);
 491
 492	spin_lock(&cp->rx_spare_lock);
 493	INIT_LIST_HEAD(&cp->rx_spare_list);
 494	cp->rx_spares_needed = RX_SPARE_COUNT;
 495	spin_unlock(&cp->rx_spare_lock);
 496}
 497
 498/* used on close. free all the spare buffers. */
 499static void cas_spare_free(struct cas *cp)
 500{
 501	struct list_head list, *elem, *tmp;
 502
 503	/* free spare buffers */
 504	INIT_LIST_HEAD(&list);
 505	spin_lock(&cp->rx_spare_lock);
 506	list_splice_init(&cp->rx_spare_list, &list);
 507	spin_unlock(&cp->rx_spare_lock);
 508	list_for_each_safe(elem, tmp, &list) {
 509		cas_page_free(cp, list_entry(elem, cas_page_t, list));
 510	}
 511
 512	INIT_LIST_HEAD(&list);
 513#if 1
 514	/*
 515	 * Looks like Adrian had protected this with a different
 516	 * lock than used everywhere else to manipulate this list.
 517	 */
 518	spin_lock(&cp->rx_inuse_lock);
 519	list_splice_init(&cp->rx_inuse_list, &list);
 520	spin_unlock(&cp->rx_inuse_lock);
 521#else
 522	spin_lock(&cp->rx_spare_lock);
 523	list_splice_init(&cp->rx_inuse_list, &list);
 524	spin_unlock(&cp->rx_spare_lock);
 525#endif
 526	list_for_each_safe(elem, tmp, &list) {
 527		cas_page_free(cp, list_entry(elem, cas_page_t, list));
 528	}
 529}
 530
 531/* replenish spares if needed */
 532static void cas_spare_recover(struct cas *cp, const gfp_t flags)
 533{
 534	struct list_head list, *elem, *tmp;
 535	int needed, i;
 536
 537	/* check inuse list. if we don't need any more free buffers,
 538	 * just free it
 539	 */
 540
 541	/* make a local copy of the list */
 542	INIT_LIST_HEAD(&list);
 543	spin_lock(&cp->rx_inuse_lock);
 544	list_splice_init(&cp->rx_inuse_list, &list);
 545	spin_unlock(&cp->rx_inuse_lock);
 546
 547	list_for_each_safe(elem, tmp, &list) {
 548		cas_page_t *page = list_entry(elem, cas_page_t, list);
 549
 550		/*
 551		 * With the lockless pagecache, cassini buffering scheme gets
 552		 * slightly less accurate: we might find that a page has an
 553		 * elevated reference count here, due to a speculative ref,
 554		 * and skip it as in-use. Ideally we would be able to reclaim
 555		 * it. However this would be such a rare case, it doesn't
 556		 * matter too much as we should pick it up the next time round.
 557		 *
 558		 * Importantly, if we find that the page has a refcount of 1
 559		 * here (our refcount), then we know it is definitely not inuse
 560		 * so we can reuse it.
 561		 */
 562		if (page_count(page->buffer) > 1)
 563			continue;
 564
 565		list_del(elem);
 566		spin_lock(&cp->rx_spare_lock);
 567		if (cp->rx_spares_needed > 0) {
 568			list_add(elem, &cp->rx_spare_list);
 569			cp->rx_spares_needed--;
 570			spin_unlock(&cp->rx_spare_lock);
 571		} else {
 572			spin_unlock(&cp->rx_spare_lock);
 573			cas_page_free(cp, page);
 574		}
 575	}
 576
 577	/* put any inuse buffers back on the list */
 578	if (!list_empty(&list)) {
 579		spin_lock(&cp->rx_inuse_lock);
 580		list_splice(&list, &cp->rx_inuse_list);
 581		spin_unlock(&cp->rx_inuse_lock);
 582	}
 583
 584	spin_lock(&cp->rx_spare_lock);
 585	needed = cp->rx_spares_needed;
 586	spin_unlock(&cp->rx_spare_lock);
 587	if (!needed)
 588		return;
 589
 590	/* we still need spares, so try to allocate some */
 591	INIT_LIST_HEAD(&list);
 592	i = 0;
 593	while (i < needed) {
 594		cas_page_t *spare = cas_page_alloc(cp, flags);
 595		if (!spare)
 596			break;
 597		list_add(&spare->list, &list);
 598		i++;
 599	}
 600
 601	spin_lock(&cp->rx_spare_lock);
 602	list_splice(&list, &cp->rx_spare_list);
 603	cp->rx_spares_needed -= i;
 604	spin_unlock(&cp->rx_spare_lock);
 605}
 606
 607/* pull a page from the list. */
 608static cas_page_t *cas_page_dequeue(struct cas *cp)
 609{
 610	struct list_head *entry;
 611	int recover;
 612
 613	spin_lock(&cp->rx_spare_lock);
 614	if (list_empty(&cp->rx_spare_list)) {
 615		/* try to do a quick recovery */
 616		spin_unlock(&cp->rx_spare_lock);
 617		cas_spare_recover(cp, GFP_ATOMIC);
 618		spin_lock(&cp->rx_spare_lock);
 619		if (list_empty(&cp->rx_spare_list)) {
 620			netif_err(cp, rx_err, cp->dev,
 621				  "no spare buffers available\n");
 622			spin_unlock(&cp->rx_spare_lock);
 623			return NULL;
 624		}
 625	}
 626
 627	entry = cp->rx_spare_list.next;
 628	list_del(entry);
 629	recover = ++cp->rx_spares_needed;
 630	spin_unlock(&cp->rx_spare_lock);
 631
 632	/* trigger the timer to do the recovery */
 633	if ((recover & (RX_SPARE_RECOVER_VAL - 1)) == 0) {
 634#if 1
 635		atomic_inc(&cp->reset_task_pending);
 636		atomic_inc(&cp->reset_task_pending_spare);
 637		schedule_work(&cp->reset_task);
 638#else
 639		atomic_set(&cp->reset_task_pending, CAS_RESET_SPARE);
 640		schedule_work(&cp->reset_task);
 641#endif
 642	}
 643	return list_entry(entry, cas_page_t, list);
 644}
 645
 646
 647static void cas_mif_poll(struct cas *cp, const int enable)
 648{
 649	u32 cfg;
 650
 651	cfg  = readl(cp->regs + REG_MIF_CFG);
 652	cfg &= (MIF_CFG_MDIO_0 | MIF_CFG_MDIO_1);
 653
 654	if (cp->phy_type & CAS_PHY_MII_MDIO1)
 655		cfg |= MIF_CFG_PHY_SELECT;
 656
 657	/* poll and interrupt on link status change. */
 658	if (enable) {
 659		cfg |= MIF_CFG_POLL_EN;
 660		cfg |= CAS_BASE(MIF_CFG_POLL_REG, MII_BMSR);
 661		cfg |= CAS_BASE(MIF_CFG_POLL_PHY, cp->phy_addr);
 662	}
 663	writel((enable) ? ~(BMSR_LSTATUS | BMSR_ANEGCOMPLETE) : 0xFFFF,
 664	       cp->regs + REG_MIF_MASK);
 665	writel(cfg, cp->regs + REG_MIF_CFG);
 666}
 667
 668/* Must be invoked under cp->lock */
 669static void cas_begin_auto_negotiation(struct cas *cp,
 670				       const struct ethtool_link_ksettings *ep)
 671{
 672	u16 ctl;
 673#if 1
 674	int lcntl;
 675	int changed = 0;
 676	int oldstate = cp->lstate;
 677	int link_was_not_down = !(oldstate == link_down);
 678#endif
 679	/* Setup link parameters */
 680	if (!ep)
 681		goto start_aneg;
 682	lcntl = cp->link_cntl;
 683	if (ep->base.autoneg == AUTONEG_ENABLE) {
 684		cp->link_cntl = BMCR_ANENABLE;
 685	} else {
 686		u32 speed = ep->base.speed;
 687		cp->link_cntl = 0;
 688		if (speed == SPEED_100)
 689			cp->link_cntl |= BMCR_SPEED100;
 690		else if (speed == SPEED_1000)
 691			cp->link_cntl |= CAS_BMCR_SPEED1000;
 692		if (ep->base.duplex == DUPLEX_FULL)
 693			cp->link_cntl |= BMCR_FULLDPLX;
 694	}
 695#if 1
 696	changed = (lcntl != cp->link_cntl);
 697#endif
 698start_aneg:
 699	if (cp->lstate == link_up) {
 700		netdev_info(cp->dev, "PCS link down\n");
 701	} else {
 702		if (changed) {
 703			netdev_info(cp->dev, "link configuration changed\n");
 704		}
 705	}
 706	cp->lstate = link_down;
 707	cp->link_transition = LINK_TRANSITION_LINK_DOWN;
 708	if (!cp->hw_running)
 709		return;
 710#if 1
 711	/*
 712	 * WTZ: If the old state was link_up, we turn off the carrier
 713	 * to replicate everything we do elsewhere on a link-down
 714	 * event when we were already in a link-up state..
 715	 */
 716	if (oldstate == link_up)
 717		netif_carrier_off(cp->dev);
 718	if (changed  && link_was_not_down) {
 719		/*
 720		 * WTZ: This branch will simply schedule a full reset after
 721		 * we explicitly changed link modes in an ioctl. See if this
 722		 * fixes the link-problems we were having for forced mode.
 723		 */
 724		atomic_inc(&cp->reset_task_pending);
 725		atomic_inc(&cp->reset_task_pending_all);
 726		schedule_work(&cp->reset_task);
 727		cp->timer_ticks = 0;
 728		mod_timer(&cp->link_timer, jiffies + CAS_LINK_TIMEOUT);
 729		return;
 730	}
 731#endif
 732	if (cp->phy_type & CAS_PHY_SERDES) {
 733		u32 val = readl(cp->regs + REG_PCS_MII_CTRL);
 734
 735		if (cp->link_cntl & BMCR_ANENABLE) {
 736			val |= (PCS_MII_RESTART_AUTONEG | PCS_MII_AUTONEG_EN);
 737			cp->lstate = link_aneg;
 738		} else {
 739			if (cp->link_cntl & BMCR_FULLDPLX)
 740				val |= PCS_MII_CTRL_DUPLEX;
 741			val &= ~PCS_MII_AUTONEG_EN;
 742			cp->lstate = link_force_ok;
 743		}
 744		cp->link_transition = LINK_TRANSITION_LINK_CONFIG;
 745		writel(val, cp->regs + REG_PCS_MII_CTRL);
 746
 747	} else {
 748		cas_mif_poll(cp, 0);
 749		ctl = cas_phy_read(cp, MII_BMCR);
 750		ctl &= ~(BMCR_FULLDPLX | BMCR_SPEED100 |
 751			 CAS_BMCR_SPEED1000 | BMCR_ANENABLE);
 752		ctl |= cp->link_cntl;
 753		if (ctl & BMCR_ANENABLE) {
 754			ctl |= BMCR_ANRESTART;
 755			cp->lstate = link_aneg;
 756		} else {
 757			cp->lstate = link_force_ok;
 758		}
 759		cp->link_transition = LINK_TRANSITION_LINK_CONFIG;
 760		cas_phy_write(cp, MII_BMCR, ctl);
 761		cas_mif_poll(cp, 1);
 762	}
 763
 764	cp->timer_ticks = 0;
 765	mod_timer(&cp->link_timer, jiffies + CAS_LINK_TIMEOUT);
 766}
 767
 768/* Must be invoked under cp->lock. */
 769static int cas_reset_mii_phy(struct cas *cp)
 770{
 771	int limit = STOP_TRIES_PHY;
 772	u16 val;
 773
 774	cas_phy_write(cp, MII_BMCR, BMCR_RESET);
 775	udelay(100);
 776	while (--limit) {
 777		val = cas_phy_read(cp, MII_BMCR);
 778		if ((val & BMCR_RESET) == 0)
 779			break;
 780		udelay(10);
 781	}
 782	return limit <= 0;
 783}
 784
 785static void cas_saturn_firmware_init(struct cas *cp)
 786{
 787	const struct firmware *fw;
 788	const char fw_name[] = "sun/cassini.bin";
 789	int err;
 790
 791	if (PHY_NS_DP83065 != cp->phy_id)
 792		return;
 793
 794	err = request_firmware(&fw, fw_name, &cp->pdev->dev);
 795	if (err) {
 796		pr_err("Failed to load firmware \"%s\"\n",
 797		       fw_name);
 798		return;
 799	}
 800	if (fw->size < 2) {
 801		pr_err("bogus length %zu in \"%s\"\n",
 802		       fw->size, fw_name);
 803		goto out;
 804	}
 805	cp->fw_load_addr= fw->data[1] << 8 | fw->data[0];
 806	cp->fw_size = fw->size - 2;
 807	cp->fw_data = vmalloc(cp->fw_size);
 808	if (!cp->fw_data)
 809		goto out;
 810	memcpy(cp->fw_data, &fw->data[2], cp->fw_size);
 811out:
 812	release_firmware(fw);
 813}
 814
 815static void cas_saturn_firmware_load(struct cas *cp)
 816{
 817	int i;
 818
 819	if (!cp->fw_data)
 820		return;
 821
 822	cas_phy_powerdown(cp);
 823
 824	/* expanded memory access mode */
 825	cas_phy_write(cp, DP83065_MII_MEM, 0x0);
 826
 827	/* pointer configuration for new firmware */
 828	cas_phy_write(cp, DP83065_MII_REGE, 0x8ff9);
 829	cas_phy_write(cp, DP83065_MII_REGD, 0xbd);
 830	cas_phy_write(cp, DP83065_MII_REGE, 0x8ffa);
 831	cas_phy_write(cp, DP83065_MII_REGD, 0x82);
 832	cas_phy_write(cp, DP83065_MII_REGE, 0x8ffb);
 833	cas_phy_write(cp, DP83065_MII_REGD, 0x0);
 834	cas_phy_write(cp, DP83065_MII_REGE, 0x8ffc);
 835	cas_phy_write(cp, DP83065_MII_REGD, 0x39);
 836
 837	/* download new firmware */
 838	cas_phy_write(cp, DP83065_MII_MEM, 0x1);
 839	cas_phy_write(cp, DP83065_MII_REGE, cp->fw_load_addr);
 840	for (i = 0; i < cp->fw_size; i++)
 841		cas_phy_write(cp, DP83065_MII_REGD, cp->fw_data[i]);
 842
 843	/* enable firmware */
 844	cas_phy_write(cp, DP83065_MII_REGE, 0x8ff8);
 845	cas_phy_write(cp, DP83065_MII_REGD, 0x1);
 846}
 847
 848
 849/* phy initialization */
 850static void cas_phy_init(struct cas *cp)
 851{
 852	u16 val;
 853
 854	/* if we're in MII/GMII mode, set up phy */
 855	if (CAS_PHY_MII(cp->phy_type)) {
 856		writel(PCS_DATAPATH_MODE_MII,
 857		       cp->regs + REG_PCS_DATAPATH_MODE);
 858
 859		cas_mif_poll(cp, 0);
 860		cas_reset_mii_phy(cp); /* take out of isolate mode */
 861
 862		if (PHY_LUCENT_B0 == cp->phy_id) {
 863			/* workaround link up/down issue with lucent */
 864			cas_phy_write(cp, LUCENT_MII_REG, 0x8000);
 865			cas_phy_write(cp, MII_BMCR, 0x00f1);
 866			cas_phy_write(cp, LUCENT_MII_REG, 0x0);
 867
 868		} else if (PHY_BROADCOM_B0 == (cp->phy_id & 0xFFFFFFFC)) {
 869			/* workarounds for broadcom phy */
 870			cas_phy_write(cp, BROADCOM_MII_REG8, 0x0C20);
 871			cas_phy_write(cp, BROADCOM_MII_REG7, 0x0012);
 872			cas_phy_write(cp, BROADCOM_MII_REG5, 0x1804);
 873			cas_phy_write(cp, BROADCOM_MII_REG7, 0x0013);
 874			cas_phy_write(cp, BROADCOM_MII_REG5, 0x1204);
 875			cas_phy_write(cp, BROADCOM_MII_REG7, 0x8006);
 876			cas_phy_write(cp, BROADCOM_MII_REG5, 0x0132);
 877			cas_phy_write(cp, BROADCOM_MII_REG7, 0x8006);
 878			cas_phy_write(cp, BROADCOM_MII_REG5, 0x0232);
 879			cas_phy_write(cp, BROADCOM_MII_REG7, 0x201F);
 880			cas_phy_write(cp, BROADCOM_MII_REG5, 0x0A20);
 881
 882		} else if (PHY_BROADCOM_5411 == cp->phy_id) {
 883			val = cas_phy_read(cp, BROADCOM_MII_REG4);
 884			val = cas_phy_read(cp, BROADCOM_MII_REG4);
 885			if (val & 0x0080) {
 886				/* link workaround */
 887				cas_phy_write(cp, BROADCOM_MII_REG4,
 888					      val & ~0x0080);
 889			}
 890
 891		} else if (cp->cas_flags & CAS_FLAG_SATURN) {
 892			writel((cp->phy_type & CAS_PHY_MII_MDIO0) ?
 893			       SATURN_PCFG_FSI : 0x0,
 894			       cp->regs + REG_SATURN_PCFG);
 895
 896			/* load firmware to address 10Mbps auto-negotiation
 897			 * issue. NOTE: this will need to be changed if the
 898			 * default firmware gets fixed.
 899			 */
 900			if (PHY_NS_DP83065 == cp->phy_id) {
 901				cas_saturn_firmware_load(cp);
 902			}
 903			cas_phy_powerup(cp);
 904		}
 905
 906		/* advertise capabilities */
 907		val = cas_phy_read(cp, MII_BMCR);
 908		val &= ~BMCR_ANENABLE;
 909		cas_phy_write(cp, MII_BMCR, val);
 910		udelay(10);
 911
 912		cas_phy_write(cp, MII_ADVERTISE,
 913			      cas_phy_read(cp, MII_ADVERTISE) |
 914			      (ADVERTISE_10HALF | ADVERTISE_10FULL |
 915			       ADVERTISE_100HALF | ADVERTISE_100FULL |
 916			       CAS_ADVERTISE_PAUSE |
 917			       CAS_ADVERTISE_ASYM_PAUSE));
 918
 919		if (cp->cas_flags & CAS_FLAG_1000MB_CAP) {
 920			/* make sure that we don't advertise half
 921			 * duplex to avoid a chip issue
 922			 */
 923			val  = cas_phy_read(cp, CAS_MII_1000_CTRL);
 924			val &= ~CAS_ADVERTISE_1000HALF;
 925			val |= CAS_ADVERTISE_1000FULL;
 926			cas_phy_write(cp, CAS_MII_1000_CTRL, val);
 927		}
 928
 929	} else {
 930		/* reset pcs for serdes */
 931		u32 val;
 932		int limit;
 933
 934		writel(PCS_DATAPATH_MODE_SERDES,
 935		       cp->regs + REG_PCS_DATAPATH_MODE);
 936
 937		/* enable serdes pins on saturn */
 938		if (cp->cas_flags & CAS_FLAG_SATURN)
 939			writel(0, cp->regs + REG_SATURN_PCFG);
 940
 941		/* Reset PCS unit. */
 942		val = readl(cp->regs + REG_PCS_MII_CTRL);
 943		val |= PCS_MII_RESET;
 944		writel(val, cp->regs + REG_PCS_MII_CTRL);
 945
 946		limit = STOP_TRIES;
 947		while (--limit > 0) {
 948			udelay(10);
 949			if ((readl(cp->regs + REG_PCS_MII_CTRL) &
 950			     PCS_MII_RESET) == 0)
 951				break;
 952		}
 953		if (limit <= 0)
 954			netdev_warn(cp->dev, "PCS reset bit would not clear [%08x]\n",
 955				    readl(cp->regs + REG_PCS_STATE_MACHINE));
 956
 957		/* Make sure PCS is disabled while changing advertisement
 958		 * configuration.
 959		 */
 960		writel(0x0, cp->regs + REG_PCS_CFG);
 961
 962		/* Advertise all capabilities except half-duplex. */
 963		val  = readl(cp->regs + REG_PCS_MII_ADVERT);
 964		val &= ~PCS_MII_ADVERT_HD;
 965		val |= (PCS_MII_ADVERT_FD | PCS_MII_ADVERT_SYM_PAUSE |
 966			PCS_MII_ADVERT_ASYM_PAUSE);
 967		writel(val, cp->regs + REG_PCS_MII_ADVERT);
 968
 969		/* enable PCS */
 970		writel(PCS_CFG_EN, cp->regs + REG_PCS_CFG);
 971
 972		/* pcs workaround: enable sync detect */
 973		writel(PCS_SERDES_CTRL_SYNCD_EN,
 974		       cp->regs + REG_PCS_SERDES_CTRL);
 975	}
 976}
 977
 978
 979static int cas_pcs_link_check(struct cas *cp)
 980{
 981	u32 stat, state_machine;
 982	int retval = 0;
 983
 984	/* The link status bit latches on zero, so you must
 985	 * read it twice in such a case to see a transition
 986	 * to the link being up.
 987	 */
 988	stat = readl(cp->regs + REG_PCS_MII_STATUS);
 989	if ((stat & PCS_MII_STATUS_LINK_STATUS) == 0)
 990		stat = readl(cp->regs + REG_PCS_MII_STATUS);
 991
 992	/* The remote-fault indication is only valid
 993	 * when autoneg has completed.
 994	 */
 995	if ((stat & (PCS_MII_STATUS_AUTONEG_COMP |
 996		     PCS_MII_STATUS_REMOTE_FAULT)) ==
 997	    (PCS_MII_STATUS_AUTONEG_COMP | PCS_MII_STATUS_REMOTE_FAULT))
 998		netif_info(cp, link, cp->dev, "PCS RemoteFault\n");
 999
1000	/* work around link detection issue by querying the PCS state
1001	 * machine directly.
1002	 */
1003	state_machine = readl(cp->regs + REG_PCS_STATE_MACHINE);
1004	if ((state_machine & PCS_SM_LINK_STATE_MASK) != SM_LINK_STATE_UP) {
1005		stat &= ~PCS_MII_STATUS_LINK_STATUS;
1006	} else if (state_machine & PCS_SM_WORD_SYNC_STATE_MASK) {
1007		stat |= PCS_MII_STATUS_LINK_STATUS;
1008	}
1009
1010	if (stat & PCS_MII_STATUS_LINK_STATUS) {
1011		if (cp->lstate != link_up) {
1012			if (cp->opened) {
1013				cp->lstate = link_up;
1014				cp->link_transition = LINK_TRANSITION_LINK_UP;
1015
1016				cas_set_link_modes(cp);
1017				netif_carrier_on(cp->dev);
1018			}
1019		}
1020	} else if (cp->lstate == link_up) {
1021		cp->lstate = link_down;
1022		if (link_transition_timeout != 0 &&
1023		    cp->link_transition != LINK_TRANSITION_REQUESTED_RESET &&
1024		    !cp->link_transition_jiffies_valid) {
1025			/*
1026			 * force a reset, as a workaround for the
1027			 * link-failure problem. May want to move this to a
1028			 * point a bit earlier in the sequence. If we had
1029			 * generated a reset a short time ago, we'll wait for
1030			 * the link timer to check the status until a
1031			 * timer expires (link_transistion_jiffies_valid is
1032			 * true when the timer is running.)  Instead of using
1033			 * a system timer, we just do a check whenever the
1034			 * link timer is running - this clears the flag after
1035			 * a suitable delay.
1036			 */
1037			retval = 1;
1038			cp->link_transition = LINK_TRANSITION_REQUESTED_RESET;
1039			cp->link_transition_jiffies = jiffies;
1040			cp->link_transition_jiffies_valid = 1;
1041		} else {
1042			cp->link_transition = LINK_TRANSITION_ON_FAILURE;
1043		}
1044		netif_carrier_off(cp->dev);
1045		if (cp->opened)
1046			netif_info(cp, link, cp->dev, "PCS link down\n");
1047
1048		/* Cassini only: if you force a mode, there can be
1049		 * sync problems on link down. to fix that, the following
1050		 * things need to be checked:
1051		 * 1) read serialink state register
1052		 * 2) read pcs status register to verify link down.
1053		 * 3) if link down and serial link == 0x03, then you need
1054		 *    to global reset the chip.
1055		 */
1056		if ((cp->cas_flags & CAS_FLAG_REG_PLUS) == 0) {
1057			/* should check to see if we're in a forced mode */
1058			stat = readl(cp->regs + REG_PCS_SERDES_STATE);
1059			if (stat == 0x03)
1060				return 1;
1061		}
1062	} else if (cp->lstate == link_down) {
1063		if (link_transition_timeout != 0 &&
1064		    cp->link_transition != LINK_TRANSITION_REQUESTED_RESET &&
1065		    !cp->link_transition_jiffies_valid) {
1066			/* force a reset, as a workaround for the
1067			 * link-failure problem.  May want to move
1068			 * this to a point a bit earlier in the
1069			 * sequence.
1070			 */
1071			retval = 1;
1072			cp->link_transition = LINK_TRANSITION_REQUESTED_RESET;
1073			cp->link_transition_jiffies = jiffies;
1074			cp->link_transition_jiffies_valid = 1;
1075		} else {
1076			cp->link_transition = LINK_TRANSITION_STILL_FAILED;
1077		}
1078	}
1079
1080	return retval;
1081}
1082
1083static int cas_pcs_interrupt(struct net_device *dev,
1084			     struct cas *cp, u32 status)
1085{
1086	u32 stat = readl(cp->regs + REG_PCS_INTR_STATUS);
1087
1088	if ((stat & PCS_INTR_STATUS_LINK_CHANGE) == 0)
1089		return 0;
1090	return cas_pcs_link_check(cp);
1091}
1092
1093static int cas_txmac_interrupt(struct net_device *dev,
1094			       struct cas *cp, u32 status)
1095{
1096	u32 txmac_stat = readl(cp->regs + REG_MAC_TX_STATUS);
1097
1098	if (!txmac_stat)
1099		return 0;
1100
1101	netif_printk(cp, intr, KERN_DEBUG, cp->dev,
1102		     "txmac interrupt, txmac_stat: 0x%x\n", txmac_stat);
1103
1104	/* Defer timer expiration is quite normal,
1105	 * don't even log the event.
1106	 */
1107	if ((txmac_stat & MAC_TX_DEFER_TIMER) &&
1108	    !(txmac_stat & ~MAC_TX_DEFER_TIMER))
1109		return 0;
1110
1111	spin_lock(&cp->stat_lock[0]);
1112	if (txmac_stat & MAC_TX_UNDERRUN) {
1113		netdev_err(dev, "TX MAC xmit underrun\n");
1114		cp->net_stats[0].tx_fifo_errors++;
1115	}
1116
1117	if (txmac_stat & MAC_TX_MAX_PACKET_ERR) {
1118		netdev_err(dev, "TX MAC max packet size error\n");
1119		cp->net_stats[0].tx_errors++;
1120	}
1121
1122	/* The rest are all cases of one of the 16-bit TX
1123	 * counters expiring.
1124	 */
1125	if (txmac_stat & MAC_TX_COLL_NORMAL)
1126		cp->net_stats[0].collisions += 0x10000;
1127
1128	if (txmac_stat & MAC_TX_COLL_EXCESS) {
1129		cp->net_stats[0].tx_aborted_errors += 0x10000;
1130		cp->net_stats[0].collisions += 0x10000;
1131	}
1132
1133	if (txmac_stat & MAC_TX_COLL_LATE) {
1134		cp->net_stats[0].tx_aborted_errors += 0x10000;
1135		cp->net_stats[0].collisions += 0x10000;
1136	}
1137	spin_unlock(&cp->stat_lock[0]);
1138
1139	/* We do not keep track of MAC_TX_COLL_FIRST and
1140	 * MAC_TX_PEAK_ATTEMPTS events.
1141	 */
1142	return 0;
1143}
1144
1145static void cas_load_firmware(struct cas *cp, cas_hp_inst_t *firmware)
1146{
1147	cas_hp_inst_t *inst;
1148	u32 val;
1149	int i;
1150
1151	i = 0;
1152	while ((inst = firmware) && inst->note) {
1153		writel(i, cp->regs + REG_HP_INSTR_RAM_ADDR);
1154
1155		val = CAS_BASE(HP_INSTR_RAM_HI_VAL, inst->val);
1156		val |= CAS_BASE(HP_INSTR_RAM_HI_MASK, inst->mask);
1157		writel(val, cp->regs + REG_HP_INSTR_RAM_DATA_HI);
1158
1159		val = CAS_BASE(HP_INSTR_RAM_MID_OUTARG, inst->outarg >> 10);
1160		val |= CAS_BASE(HP_INSTR_RAM_MID_OUTOP, inst->outop);
1161		val |= CAS_BASE(HP_INSTR_RAM_MID_FNEXT, inst->fnext);
1162		val |= CAS_BASE(HP_INSTR_RAM_MID_FOFF, inst->foff);
1163		val |= CAS_BASE(HP_INSTR_RAM_MID_SNEXT, inst->snext);
1164		val |= CAS_BASE(HP_INSTR_RAM_MID_SOFF, inst->soff);
1165		val |= CAS_BASE(HP_INSTR_RAM_MID_OP, inst->op);
1166		writel(val, cp->regs + REG_HP_INSTR_RAM_DATA_MID);
1167
1168		val = CAS_BASE(HP_INSTR_RAM_LOW_OUTMASK, inst->outmask);
1169		val |= CAS_BASE(HP_INSTR_RAM_LOW_OUTSHIFT, inst->outshift);
1170		val |= CAS_BASE(HP_INSTR_RAM_LOW_OUTEN, inst->outenab);
1171		val |= CAS_BASE(HP_INSTR_RAM_LOW_OUTARG, inst->outarg);
1172		writel(val, cp->regs + REG_HP_INSTR_RAM_DATA_LOW);
1173		++firmware;
1174		++i;
1175	}
1176}
1177
1178static void cas_init_rx_dma(struct cas *cp)
1179{
1180	u64 desc_dma = cp->block_dvma;
1181	u32 val;
1182	int i, size;
1183
1184	/* rx free descriptors */
1185	val = CAS_BASE(RX_CFG_SWIVEL, RX_SWIVEL_OFF_VAL);
1186	val |= CAS_BASE(RX_CFG_DESC_RING, RX_DESC_RINGN_INDEX(0));
1187	val |= CAS_BASE(RX_CFG_COMP_RING, RX_COMP_RINGN_INDEX(0));
1188	if ((N_RX_DESC_RINGS > 1) &&
1189	    (cp->cas_flags & CAS_FLAG_REG_PLUS))  /* do desc 2 */
1190		val |= CAS_BASE(RX_CFG_DESC_RING1, RX_DESC_RINGN_INDEX(1));
1191	writel(val, cp->regs + REG_RX_CFG);
1192
1193	val = (unsigned long) cp->init_rxds[0] -
1194		(unsigned long) cp->init_block;
1195	writel((desc_dma + val) >> 32, cp->regs + REG_RX_DB_HI);
1196	writel((desc_dma + val) & 0xffffffff, cp->regs + REG_RX_DB_LOW);
1197	writel(RX_DESC_RINGN_SIZE(0) - 4, cp->regs + REG_RX_KICK);
1198
1199	if (cp->cas_flags & CAS_FLAG_REG_PLUS) {
1200		/* rx desc 2 is for IPSEC packets. however,
1201		 * we don't it that for that purpose.
1202		 */
1203		val = (unsigned long) cp->init_rxds[1] -
1204			(unsigned long) cp->init_block;
1205		writel((desc_dma + val) >> 32, cp->regs + REG_PLUS_RX_DB1_HI);
1206		writel((desc_dma + val) & 0xffffffff, cp->regs +
1207		       REG_PLUS_RX_DB1_LOW);
1208		writel(RX_DESC_RINGN_SIZE(1) - 4, cp->regs +
1209		       REG_PLUS_RX_KICK1);
1210	}
1211
1212	/* rx completion registers */
1213	val = (unsigned long) cp->init_rxcs[0] -
1214		(unsigned long) cp->init_block;
1215	writel((desc_dma + val) >> 32, cp->regs + REG_RX_CB_HI);
1216	writel((desc_dma + val) & 0xffffffff, cp->regs + REG_RX_CB_LOW);
1217
1218	if (cp->cas_flags & CAS_FLAG_REG_PLUS) {
1219		/* rx comp 2-4 */
1220		for (i = 1; i < MAX_RX_COMP_RINGS; i++) {
1221			val = (unsigned long) cp->init_rxcs[i] -
1222				(unsigned long) cp->init_block;
1223			writel((desc_dma + val) >> 32, cp->regs +
1224			       REG_PLUS_RX_CBN_HI(i));
1225			writel((desc_dma + val) & 0xffffffff, cp->regs +
1226			       REG_PLUS_RX_CBN_LOW(i));
1227		}
1228	}
1229
1230	/* read selective clear regs to prevent spurious interrupts
1231	 * on reset because complete == kick.
1232	 * selective clear set up to prevent interrupts on resets
1233	 */
1234	readl(cp->regs + REG_INTR_STATUS_ALIAS);
1235	writel(INTR_RX_DONE | INTR_RX_BUF_UNAVAIL, cp->regs + REG_ALIAS_CLEAR);
 
 
 
 
 
 
 
 
 
 
 
 
 
1236
1237	/* set up pause thresholds */
1238	val  = CAS_BASE(RX_PAUSE_THRESH_OFF,
1239			cp->rx_pause_off / RX_PAUSE_THRESH_QUANTUM);
1240	val |= CAS_BASE(RX_PAUSE_THRESH_ON,
1241			cp->rx_pause_on / RX_PAUSE_THRESH_QUANTUM);
1242	writel(val, cp->regs + REG_RX_PAUSE_THRESH);
1243
1244	/* zero out dma reassembly buffers */
1245	for (i = 0; i < 64; i++) {
1246		writel(i, cp->regs + REG_RX_TABLE_ADDR);
1247		writel(0x0, cp->regs + REG_RX_TABLE_DATA_LOW);
1248		writel(0x0, cp->regs + REG_RX_TABLE_DATA_MID);
1249		writel(0x0, cp->regs + REG_RX_TABLE_DATA_HI);
1250	}
1251
1252	/* make sure address register is 0 for normal operation */
1253	writel(0x0, cp->regs + REG_RX_CTRL_FIFO_ADDR);
1254	writel(0x0, cp->regs + REG_RX_IPP_FIFO_ADDR);
1255
1256	/* interrupt mitigation */
1257#ifdef USE_RX_BLANK
1258	val = CAS_BASE(RX_BLANK_INTR_TIME, RX_BLANK_INTR_TIME_VAL);
1259	val |= CAS_BASE(RX_BLANK_INTR_PKT, RX_BLANK_INTR_PKT_VAL);
1260	writel(val, cp->regs + REG_RX_BLANK);
1261#else
1262	writel(0x0, cp->regs + REG_RX_BLANK);
1263#endif
1264
1265	/* interrupt generation as a function of low water marks for
1266	 * free desc and completion entries. these are used to trigger
1267	 * housekeeping for rx descs. we don't use the free interrupt
1268	 * as it's not very useful
1269	 */
1270	/* val = CAS_BASE(RX_AE_THRESH_FREE, RX_AE_FREEN_VAL(0)); */
1271	val = CAS_BASE(RX_AE_THRESH_COMP, RX_AE_COMP_VAL);
1272	writel(val, cp->regs + REG_RX_AE_THRESH);
1273	if (cp->cas_flags & CAS_FLAG_REG_PLUS) {
1274		val = CAS_BASE(RX_AE1_THRESH_FREE, RX_AE_FREEN_VAL(1));
1275		writel(val, cp->regs + REG_PLUS_RX_AE1_THRESH);
1276	}
1277
1278	/* Random early detect registers. useful for congestion avoidance.
1279	 * this should be tunable.
1280	 */
1281	writel(0x0, cp->regs + REG_RX_RED);
1282
1283	/* receive page sizes. default == 2K (0x800) */
1284	val = 0;
1285	if (cp->page_size == 0x1000)
1286		val = 0x1;
1287	else if (cp->page_size == 0x2000)
1288		val = 0x2;
1289	else if (cp->page_size == 0x4000)
1290		val = 0x3;
1291
1292	/* round mtu + offset. constrain to page size. */
1293	size = cp->dev->mtu + 64;
1294	if (size > cp->page_size)
1295		size = cp->page_size;
1296
1297	if (size <= 0x400)
1298		i = 0x0;
1299	else if (size <= 0x800)
1300		i = 0x1;
1301	else if (size <= 0x1000)
1302		i = 0x2;
1303	else
1304		i = 0x3;
1305
1306	cp->mtu_stride = 1 << (i + 10);
1307	val  = CAS_BASE(RX_PAGE_SIZE, val);
1308	val |= CAS_BASE(RX_PAGE_SIZE_MTU_STRIDE, i);
1309	val |= CAS_BASE(RX_PAGE_SIZE_MTU_COUNT, cp->page_size >> (i + 10));
1310	val |= CAS_BASE(RX_PAGE_SIZE_MTU_OFF, 0x1);
1311	writel(val, cp->regs + REG_RX_PAGE_SIZE);
1312
1313	/* enable the header parser if desired */
1314	if (&CAS_HP_FIRMWARE[0] == &cas_prog_null[0])
1315		return;
1316
1317	val = CAS_BASE(HP_CFG_NUM_CPU, CAS_NCPUS > 63 ? 0 : CAS_NCPUS);
1318	val |= HP_CFG_PARSE_EN | HP_CFG_SYN_INC_MASK;
1319	val |= CAS_BASE(HP_CFG_TCP_THRESH, HP_TCP_THRESH_VAL);
1320	writel(val, cp->regs + REG_HP_CFG);
1321}
1322
1323static inline void cas_rxc_init(struct cas_rx_comp *rxc)
1324{
1325	memset(rxc, 0, sizeof(*rxc));
1326	rxc->word4 = cpu_to_le64(RX_COMP4_ZERO);
1327}
1328
1329/* NOTE: we use the ENC RX DESC ring for spares. the rx_page[0,1]
1330 * flipping is protected by the fact that the chip will not
1331 * hand back the same page index while it's being processed.
1332 */
1333static inline cas_page_t *cas_page_spare(struct cas *cp, const int index)
1334{
1335	cas_page_t *page = cp->rx_pages[1][index];
1336	cas_page_t *new;
1337
1338	if (page_count(page->buffer) == 1)
1339		return page;
1340
1341	new = cas_page_dequeue(cp);
1342	if (new) {
1343		spin_lock(&cp->rx_inuse_lock);
1344		list_add(&page->list, &cp->rx_inuse_list);
1345		spin_unlock(&cp->rx_inuse_lock);
1346	}
1347	return new;
1348}
1349
1350/* this needs to be changed if we actually use the ENC RX DESC ring */
1351static cas_page_t *cas_page_swap(struct cas *cp, const int ring,
1352				 const int index)
1353{
1354	cas_page_t **page0 = cp->rx_pages[0];
1355	cas_page_t **page1 = cp->rx_pages[1];
1356
1357	/* swap if buffer is in use */
1358	if (page_count(page0[index]->buffer) > 1) {
1359		cas_page_t *new = cas_page_spare(cp, index);
1360		if (new) {
1361			page1[index] = page0[index];
1362			page0[index] = new;
1363		}
1364	}
1365	RX_USED_SET(page0[index], 0);
1366	return page0[index];
1367}
1368
1369static void cas_clean_rxds(struct cas *cp)
1370{
1371	/* only clean ring 0 as ring 1 is used for spare buffers */
1372        struct cas_rx_desc *rxd = cp->init_rxds[0];
1373	int i, size;
1374
1375	/* release all rx flows */
1376	for (i = 0; i < N_RX_FLOWS; i++) {
1377		struct sk_buff *skb;
1378		while ((skb = __skb_dequeue(&cp->rx_flows[i]))) {
1379			cas_skb_release(skb);
1380		}
1381	}
1382
1383	/* initialize descriptors */
1384	size = RX_DESC_RINGN_SIZE(0);
1385	for (i = 0; i < size; i++) {
1386		cas_page_t *page = cas_page_swap(cp, 0, i);
1387		rxd[i].buffer = cpu_to_le64(page->dma_addr);
1388		rxd[i].index  = cpu_to_le64(CAS_BASE(RX_INDEX_NUM, i) |
1389					    CAS_BASE(RX_INDEX_RING, 0));
1390	}
1391
1392	cp->rx_old[0]  = RX_DESC_RINGN_SIZE(0) - 4;
1393	cp->rx_last[0] = 0;
1394	cp->cas_flags &= ~CAS_FLAG_RXD_POST(0);
1395}
1396
1397static void cas_clean_rxcs(struct cas *cp)
1398{
1399	int i, j;
1400
1401	/* take ownership of rx comp descriptors */
1402	memset(cp->rx_cur, 0, sizeof(*cp->rx_cur)*N_RX_COMP_RINGS);
1403	memset(cp->rx_new, 0, sizeof(*cp->rx_new)*N_RX_COMP_RINGS);
1404	for (i = 0; i < N_RX_COMP_RINGS; i++) {
1405		struct cas_rx_comp *rxc = cp->init_rxcs[i];
1406		for (j = 0; j < RX_COMP_RINGN_SIZE(i); j++) {
1407			cas_rxc_init(rxc + j);
1408		}
1409	}
1410}
1411
1412#if 0
1413/* When we get a RX fifo overflow, the RX unit is probably hung
1414 * so we do the following.
1415 *
1416 * If any part of the reset goes wrong, we return 1 and that causes the
1417 * whole chip to be reset.
1418 */
1419static int cas_rxmac_reset(struct cas *cp)
1420{
1421	struct net_device *dev = cp->dev;
1422	int limit;
1423	u32 val;
1424
1425	/* First, reset MAC RX. */
1426	writel(cp->mac_rx_cfg & ~MAC_RX_CFG_EN, cp->regs + REG_MAC_RX_CFG);
1427	for (limit = 0; limit < STOP_TRIES; limit++) {
1428		if (!(readl(cp->regs + REG_MAC_RX_CFG) & MAC_RX_CFG_EN))
1429			break;
1430		udelay(10);
1431	}
1432	if (limit == STOP_TRIES) {
1433		netdev_err(dev, "RX MAC will not disable, resetting whole chip\n");
1434		return 1;
1435	}
1436
1437	/* Second, disable RX DMA. */
1438	writel(0, cp->regs + REG_RX_CFG);
1439	for (limit = 0; limit < STOP_TRIES; limit++) {
1440		if (!(readl(cp->regs + REG_RX_CFG) & RX_CFG_DMA_EN))
1441			break;
1442		udelay(10);
1443	}
1444	if (limit == STOP_TRIES) {
1445		netdev_err(dev, "RX DMA will not disable, resetting whole chip\n");
1446		return 1;
1447	}
1448
1449	mdelay(5);
1450
1451	/* Execute RX reset command. */
1452	writel(SW_RESET_RX, cp->regs + REG_SW_RESET);
1453	for (limit = 0; limit < STOP_TRIES; limit++) {
1454		if (!(readl(cp->regs + REG_SW_RESET) & SW_RESET_RX))
1455			break;
1456		udelay(10);
1457	}
1458	if (limit == STOP_TRIES) {
1459		netdev_err(dev, "RX reset command will not execute, resetting whole chip\n");
1460		return 1;
1461	}
1462
1463	/* reset driver rx state */
1464	cas_clean_rxds(cp);
1465	cas_clean_rxcs(cp);
1466
1467	/* Now, reprogram the rest of RX unit. */
1468	cas_init_rx_dma(cp);
1469
1470	/* re-enable */
1471	val = readl(cp->regs + REG_RX_CFG);
1472	writel(val | RX_CFG_DMA_EN, cp->regs + REG_RX_CFG);
1473	writel(MAC_RX_FRAME_RECV, cp->regs + REG_MAC_RX_MASK);
1474	val = readl(cp->regs + REG_MAC_RX_CFG);
1475	writel(val | MAC_RX_CFG_EN, cp->regs + REG_MAC_RX_CFG);
1476	return 0;
1477}
1478#endif
1479
1480static int cas_rxmac_interrupt(struct net_device *dev, struct cas *cp,
1481			       u32 status)
1482{
1483	u32 stat = readl(cp->regs + REG_MAC_RX_STATUS);
1484
1485	if (!stat)
1486		return 0;
1487
1488	netif_dbg(cp, intr, cp->dev, "rxmac interrupt, stat: 0x%x\n", stat);
1489
1490	/* these are all rollovers */
1491	spin_lock(&cp->stat_lock[0]);
1492	if (stat & MAC_RX_ALIGN_ERR)
1493		cp->net_stats[0].rx_frame_errors += 0x10000;
1494
1495	if (stat & MAC_RX_CRC_ERR)
1496		cp->net_stats[0].rx_crc_errors += 0x10000;
1497
1498	if (stat & MAC_RX_LEN_ERR)
1499		cp->net_stats[0].rx_length_errors += 0x10000;
1500
1501	if (stat & MAC_RX_OVERFLOW) {
1502		cp->net_stats[0].rx_over_errors++;
1503		cp->net_stats[0].rx_fifo_errors++;
1504	}
1505
1506	/* We do not track MAC_RX_FRAME_COUNT and MAC_RX_VIOL_ERR
1507	 * events.
1508	 */
1509	spin_unlock(&cp->stat_lock[0]);
1510	return 0;
1511}
1512
1513static int cas_mac_interrupt(struct net_device *dev, struct cas *cp,
1514			     u32 status)
1515{
1516	u32 stat = readl(cp->regs + REG_MAC_CTRL_STATUS);
1517
1518	if (!stat)
1519		return 0;
1520
1521	netif_printk(cp, intr, KERN_DEBUG, cp->dev,
1522		     "mac interrupt, stat: 0x%x\n", stat);
1523
1524	/* This interrupt is just for pause frame and pause
1525	 * tracking.  It is useful for diagnostics and debug
1526	 * but probably by default we will mask these events.
1527	 */
1528	if (stat & MAC_CTRL_PAUSE_STATE)
1529		cp->pause_entered++;
1530
1531	if (stat & MAC_CTRL_PAUSE_RECEIVED)
1532		cp->pause_last_time_recvd = (stat >> 16);
1533
1534	return 0;
1535}
1536
1537
1538/* Must be invoked under cp->lock. */
1539static inline int cas_mdio_link_not_up(struct cas *cp)
1540{
1541	u16 val;
1542
1543	switch (cp->lstate) {
1544	case link_force_ret:
1545		netif_info(cp, link, cp->dev, "Autoneg failed again, keeping forced mode\n");
1546		cas_phy_write(cp, MII_BMCR, cp->link_fcntl);
1547		cp->timer_ticks = 5;
1548		cp->lstate = link_force_ok;
1549		cp->link_transition = LINK_TRANSITION_LINK_CONFIG;
1550		break;
1551
1552	case link_aneg:
1553		val = cas_phy_read(cp, MII_BMCR);
1554
1555		/* Try forced modes. we try things in the following order:
1556		 * 1000 full -> 100 full/half -> 10 half
1557		 */
1558		val &= ~(BMCR_ANRESTART | BMCR_ANENABLE);
1559		val |= BMCR_FULLDPLX;
1560		val |= (cp->cas_flags & CAS_FLAG_1000MB_CAP) ?
1561			CAS_BMCR_SPEED1000 : BMCR_SPEED100;
1562		cas_phy_write(cp, MII_BMCR, val);
1563		cp->timer_ticks = 5;
1564		cp->lstate = link_force_try;
1565		cp->link_transition = LINK_TRANSITION_LINK_CONFIG;
1566		break;
1567
1568	case link_force_try:
1569		/* Downgrade from 1000 to 100 to 10 Mbps if necessary. */
1570		val = cas_phy_read(cp, MII_BMCR);
1571		cp->timer_ticks = 5;
1572		if (val & CAS_BMCR_SPEED1000) { /* gigabit */
1573			val &= ~CAS_BMCR_SPEED1000;
1574			val |= (BMCR_SPEED100 | BMCR_FULLDPLX);
1575			cas_phy_write(cp, MII_BMCR, val);
1576			break;
1577		}
1578
1579		if (val & BMCR_SPEED100) {
1580			if (val & BMCR_FULLDPLX) /* fd failed */
1581				val &= ~BMCR_FULLDPLX;
1582			else { /* 100Mbps failed */
1583				val &= ~BMCR_SPEED100;
1584			}
1585			cas_phy_write(cp, MII_BMCR, val);
1586			break;
1587		}
1588		break;
1589	default:
1590		break;
1591	}
1592	return 0;
1593}
1594
1595
1596/* must be invoked with cp->lock held */
1597static int cas_mii_link_check(struct cas *cp, const u16 bmsr)
1598{
1599	int restart;
1600
1601	if (bmsr & BMSR_LSTATUS) {
1602		/* Ok, here we got a link. If we had it due to a forced
1603		 * fallback, and we were configured for autoneg, we
1604		 * retry a short autoneg pass. If you know your hub is
1605		 * broken, use ethtool ;)
1606		 */
1607		if ((cp->lstate == link_force_try) &&
1608		    (cp->link_cntl & BMCR_ANENABLE)) {
1609			cp->lstate = link_force_ret;
1610			cp->link_transition = LINK_TRANSITION_LINK_CONFIG;
1611			cas_mif_poll(cp, 0);
1612			cp->link_fcntl = cas_phy_read(cp, MII_BMCR);
1613			cp->timer_ticks = 5;
1614			if (cp->opened)
1615				netif_info(cp, link, cp->dev,
1616					   "Got link after fallback, retrying autoneg once...\n");
1617			cas_phy_write(cp, MII_BMCR,
1618				      cp->link_fcntl | BMCR_ANENABLE |
1619				      BMCR_ANRESTART);
1620			cas_mif_poll(cp, 1);
1621
1622		} else if (cp->lstate != link_up) {
1623			cp->lstate = link_up;
1624			cp->link_transition = LINK_TRANSITION_LINK_UP;
1625
1626			if (cp->opened) {
1627				cas_set_link_modes(cp);
1628				netif_carrier_on(cp->dev);
1629			}
1630		}
1631		return 0;
1632	}
1633
1634	/* link not up. if the link was previously up, we restart the
1635	 * whole process
1636	 */
1637	restart = 0;
1638	if (cp->lstate == link_up) {
1639		cp->lstate = link_down;
1640		cp->link_transition = LINK_TRANSITION_LINK_DOWN;
1641
1642		netif_carrier_off(cp->dev);
1643		if (cp->opened)
1644			netif_info(cp, link, cp->dev, "Link down\n");
1645		restart = 1;
1646
1647	} else if (++cp->timer_ticks > 10)
1648		cas_mdio_link_not_up(cp);
1649
1650	return restart;
1651}
1652
1653static int cas_mif_interrupt(struct net_device *dev, struct cas *cp,
1654			     u32 status)
1655{
1656	u32 stat = readl(cp->regs + REG_MIF_STATUS);
1657	u16 bmsr;
1658
1659	/* check for a link change */
1660	if (CAS_VAL(MIF_STATUS_POLL_STATUS, stat) == 0)
1661		return 0;
1662
1663	bmsr = CAS_VAL(MIF_STATUS_POLL_DATA, stat);
1664	return cas_mii_link_check(cp, bmsr);
1665}
1666
1667static int cas_pci_interrupt(struct net_device *dev, struct cas *cp,
1668			     u32 status)
1669{
1670	u32 stat = readl(cp->regs + REG_PCI_ERR_STATUS);
1671
1672	if (!stat)
1673		return 0;
1674
1675	netdev_err(dev, "PCI error [%04x:%04x]",
1676		   stat, readl(cp->regs + REG_BIM_DIAG));
1677
1678	/* cassini+ has this reserved */
1679	if ((stat & PCI_ERR_BADACK) &&
1680	    ((cp->cas_flags & CAS_FLAG_REG_PLUS) == 0))
1681		pr_cont(" <No ACK64# during ABS64 cycle>");
1682
1683	if (stat & PCI_ERR_DTRTO)
1684		pr_cont(" <Delayed transaction timeout>");
1685	if (stat & PCI_ERR_OTHER)
1686		pr_cont(" <other>");
1687	if (stat & PCI_ERR_BIM_DMA_WRITE)
1688		pr_cont(" <BIM DMA 0 write req>");
1689	if (stat & PCI_ERR_BIM_DMA_READ)
1690		pr_cont(" <BIM DMA 0 read req>");
1691	pr_cont("\n");
1692
1693	if (stat & PCI_ERR_OTHER) {
1694		int pci_errs;
1695
1696		/* Interrogate PCI config space for the
1697		 * true cause.
1698		 */
1699		pci_errs = pci_status_get_and_clear_errors(cp->pdev);
1700
1701		netdev_err(dev, "PCI status errors[%04x]\n", pci_errs);
1702		if (pci_errs & PCI_STATUS_PARITY)
1703			netdev_err(dev, "PCI parity error detected\n");
1704		if (pci_errs & PCI_STATUS_SIG_TARGET_ABORT)
1705			netdev_err(dev, "PCI target abort\n");
1706		if (pci_errs & PCI_STATUS_REC_TARGET_ABORT)
1707			netdev_err(dev, "PCI master acks target abort\n");
1708		if (pci_errs & PCI_STATUS_REC_MASTER_ABORT)
1709			netdev_err(dev, "PCI master abort\n");
1710		if (pci_errs & PCI_STATUS_SIG_SYSTEM_ERROR)
1711			netdev_err(dev, "PCI system error SERR#\n");
1712		if (pci_errs & PCI_STATUS_DETECTED_PARITY)
1713			netdev_err(dev, "PCI parity error\n");
 
 
 
 
 
 
 
 
 
1714	}
1715
1716	/* For all PCI errors, we should reset the chip. */
1717	return 1;
1718}
1719
1720/* All non-normal interrupt conditions get serviced here.
1721 * Returns non-zero if we should just exit the interrupt
1722 * handler right now (ie. if we reset the card which invalidates
1723 * all of the other original irq status bits).
1724 */
1725static int cas_abnormal_irq(struct net_device *dev, struct cas *cp,
1726			    u32 status)
1727{
1728	if (status & INTR_RX_TAG_ERROR) {
1729		/* corrupt RX tag framing */
1730		netif_printk(cp, rx_err, KERN_DEBUG, cp->dev,
1731			     "corrupt rx tag framing\n");
1732		spin_lock(&cp->stat_lock[0]);
1733		cp->net_stats[0].rx_errors++;
1734		spin_unlock(&cp->stat_lock[0]);
1735		goto do_reset;
1736	}
1737
1738	if (status & INTR_RX_LEN_MISMATCH) {
1739		/* length mismatch. */
1740		netif_printk(cp, rx_err, KERN_DEBUG, cp->dev,
1741			     "length mismatch for rx frame\n");
1742		spin_lock(&cp->stat_lock[0]);
1743		cp->net_stats[0].rx_errors++;
1744		spin_unlock(&cp->stat_lock[0]);
1745		goto do_reset;
1746	}
1747
1748	if (status & INTR_PCS_STATUS) {
1749		if (cas_pcs_interrupt(dev, cp, status))
1750			goto do_reset;
1751	}
1752
1753	if (status & INTR_TX_MAC_STATUS) {
1754		if (cas_txmac_interrupt(dev, cp, status))
1755			goto do_reset;
1756	}
1757
1758	if (status & INTR_RX_MAC_STATUS) {
1759		if (cas_rxmac_interrupt(dev, cp, status))
1760			goto do_reset;
1761	}
1762
1763	if (status & INTR_MAC_CTRL_STATUS) {
1764		if (cas_mac_interrupt(dev, cp, status))
1765			goto do_reset;
1766	}
1767
1768	if (status & INTR_MIF_STATUS) {
1769		if (cas_mif_interrupt(dev, cp, status))
1770			goto do_reset;
1771	}
1772
1773	if (status & INTR_PCI_ERROR_STATUS) {
1774		if (cas_pci_interrupt(dev, cp, status))
1775			goto do_reset;
1776	}
1777	return 0;
1778
1779do_reset:
1780#if 1
1781	atomic_inc(&cp->reset_task_pending);
1782	atomic_inc(&cp->reset_task_pending_all);
1783	netdev_err(dev, "reset called in cas_abnormal_irq [0x%x]\n", status);
1784	schedule_work(&cp->reset_task);
1785#else
1786	atomic_set(&cp->reset_task_pending, CAS_RESET_ALL);
1787	netdev_err(dev, "reset called in cas_abnormal_irq\n");
1788	schedule_work(&cp->reset_task);
1789#endif
1790	return 1;
1791}
1792
1793/* NOTE: CAS_TABORT returns 1 or 2 so that it can be used when
1794 *       determining whether to do a netif_stop/wakeup
1795 */
1796#define CAS_TABORT(x)      (((x)->cas_flags & CAS_FLAG_TARGET_ABORT) ? 2 : 1)
1797#define CAS_ROUND_PAGE(x)  (((x) + PAGE_SIZE - 1) & PAGE_MASK)
1798static inline int cas_calc_tabort(struct cas *cp, const unsigned long addr,
1799				  const int len)
1800{
1801	unsigned long off = addr + len;
1802
1803	if (CAS_TABORT(cp) == 1)
1804		return 0;
1805	if ((CAS_ROUND_PAGE(off) - off) > TX_TARGET_ABORT_LEN)
1806		return 0;
1807	return TX_TARGET_ABORT_LEN;
1808}
1809
1810static inline void cas_tx_ringN(struct cas *cp, int ring, int limit)
1811{
1812	struct cas_tx_desc *txds;
1813	struct sk_buff **skbs;
1814	struct net_device *dev = cp->dev;
1815	int entry, count;
1816
1817	spin_lock(&cp->tx_lock[ring]);
1818	txds = cp->init_txds[ring];
1819	skbs = cp->tx_skbs[ring];
1820	entry = cp->tx_old[ring];
1821
1822	count = TX_BUFF_COUNT(ring, entry, limit);
1823	while (entry != limit) {
1824		struct sk_buff *skb = skbs[entry];
1825		dma_addr_t daddr;
1826		u32 dlen;
1827		int frag;
1828
1829		if (!skb) {
1830			/* this should never occur */
1831			entry = TX_DESC_NEXT(ring, entry);
1832			continue;
1833		}
1834
1835		/* however, we might get only a partial skb release. */
1836		count -= skb_shinfo(skb)->nr_frags +
1837			+ cp->tx_tiny_use[ring][entry].nbufs + 1;
1838		if (count < 0)
1839			break;
1840
1841		netif_printk(cp, tx_done, KERN_DEBUG, cp->dev,
1842			     "tx[%d] done, slot %d\n", ring, entry);
1843
1844		skbs[entry] = NULL;
1845		cp->tx_tiny_use[ring][entry].nbufs = 0;
1846
1847		for (frag = 0; frag <= skb_shinfo(skb)->nr_frags; frag++) {
1848			struct cas_tx_desc *txd = txds + entry;
1849
1850			daddr = le64_to_cpu(txd->buffer);
1851			dlen = CAS_VAL(TX_DESC_BUFLEN,
1852				       le64_to_cpu(txd->control));
1853			dma_unmap_page(&cp->pdev->dev, daddr, dlen,
1854				       DMA_TO_DEVICE);
1855			entry = TX_DESC_NEXT(ring, entry);
1856
1857			/* tiny buffer may follow */
1858			if (cp->tx_tiny_use[ring][entry].used) {
1859				cp->tx_tiny_use[ring][entry].used = 0;
1860				entry = TX_DESC_NEXT(ring, entry);
1861			}
1862		}
1863
1864		spin_lock(&cp->stat_lock[ring]);
1865		cp->net_stats[ring].tx_packets++;
1866		cp->net_stats[ring].tx_bytes += skb->len;
1867		spin_unlock(&cp->stat_lock[ring]);
1868		dev_consume_skb_irq(skb);
1869	}
1870	cp->tx_old[ring] = entry;
1871
1872	/* this is wrong for multiple tx rings. the net device needs
1873	 * multiple queues for this to do the right thing.  we wait
1874	 * for 2*packets to be available when using tiny buffers
1875	 */
1876	if (netif_queue_stopped(dev) &&
1877	    (TX_BUFFS_AVAIL(cp, ring) > CAS_TABORT(cp)*(MAX_SKB_FRAGS + 1)))
1878		netif_wake_queue(dev);
1879	spin_unlock(&cp->tx_lock[ring]);
1880}
1881
1882static void cas_tx(struct net_device *dev, struct cas *cp,
1883		   u32 status)
1884{
1885        int limit, ring;
1886#ifdef USE_TX_COMPWB
1887	u64 compwb = le64_to_cpu(cp->init_block->tx_compwb);
1888#endif
1889	netif_printk(cp, intr, KERN_DEBUG, cp->dev,
1890		     "tx interrupt, status: 0x%x, %llx\n",
1891		     status, (unsigned long long)compwb);
1892	/* process all the rings */
1893	for (ring = 0; ring < N_TX_RINGS; ring++) {
1894#ifdef USE_TX_COMPWB
1895		/* use the completion writeback registers */
1896		limit = (CAS_VAL(TX_COMPWB_MSB, compwb) << 8) |
1897			CAS_VAL(TX_COMPWB_LSB, compwb);
1898		compwb = TX_COMPWB_NEXT(compwb);
1899#else
1900		limit = readl(cp->regs + REG_TX_COMPN(ring));
1901#endif
1902		if (cp->tx_old[ring] != limit)
1903			cas_tx_ringN(cp, ring, limit);
1904	}
1905}
1906
1907
1908static int cas_rx_process_pkt(struct cas *cp, struct cas_rx_comp *rxc,
1909			      int entry, const u64 *words,
1910			      struct sk_buff **skbref)
1911{
1912	int dlen, hlen, len, i, alloclen;
1913	int off, swivel = RX_SWIVEL_OFF_VAL;
1914	struct cas_page *page;
1915	struct sk_buff *skb;
1916	void *crcaddr;
1917	__sum16 csum;
1918	char *p;
1919
1920	hlen = CAS_VAL(RX_COMP2_HDR_SIZE, words[1]);
1921	dlen = CAS_VAL(RX_COMP1_DATA_SIZE, words[0]);
1922	len  = hlen + dlen;
1923
1924	if (RX_COPY_ALWAYS || (words[2] & RX_COMP3_SMALL_PKT))
1925		alloclen = len;
1926	else
1927		alloclen = max(hlen, RX_COPY_MIN);
1928
1929	skb = netdev_alloc_skb(cp->dev, alloclen + swivel + cp->crc_size);
1930	if (skb == NULL)
1931		return -1;
1932
1933	*skbref = skb;
1934	skb_reserve(skb, swivel);
1935
1936	p = skb->data;
1937	crcaddr = NULL;
1938	if (hlen) { /* always copy header pages */
1939		i = CAS_VAL(RX_COMP2_HDR_INDEX, words[1]);
1940		page = cp->rx_pages[CAS_VAL(RX_INDEX_RING, i)][CAS_VAL(RX_INDEX_NUM, i)];
1941		off = CAS_VAL(RX_COMP2_HDR_OFF, words[1]) * 0x100 +
1942			swivel;
1943
1944		i = hlen;
1945		if (!dlen) /* attach FCS */
1946			i += cp->crc_size;
1947		dma_sync_single_for_cpu(&cp->pdev->dev, page->dma_addr + off,
1948					i, DMA_FROM_DEVICE);
1949		memcpy(p, page_address(page->buffer) + off, i);
1950		dma_sync_single_for_device(&cp->pdev->dev,
1951					   page->dma_addr + off, i,
1952					   DMA_FROM_DEVICE);
 
1953		RX_USED_ADD(page, 0x100);
1954		p += hlen;
1955		swivel = 0;
1956	}
1957
1958
1959	if (alloclen < (hlen + dlen)) {
1960		skb_frag_t *frag = skb_shinfo(skb)->frags;
1961
1962		/* normal or jumbo packets. we use frags */
1963		i = CAS_VAL(RX_COMP1_DATA_INDEX, words[0]);
1964		page = cp->rx_pages[CAS_VAL(RX_INDEX_RING, i)][CAS_VAL(RX_INDEX_NUM, i)];
1965		off = CAS_VAL(RX_COMP1_DATA_OFF, words[0]) + swivel;
1966
1967		hlen = min(cp->page_size - off, dlen);
1968		if (hlen < 0) {
1969			netif_printk(cp, rx_err, KERN_DEBUG, cp->dev,
1970				     "rx page overflow: %d\n", hlen);
1971			dev_kfree_skb_irq(skb);
1972			return -1;
1973		}
1974		i = hlen;
1975		if (i == dlen)  /* attach FCS */
1976			i += cp->crc_size;
1977		dma_sync_single_for_cpu(&cp->pdev->dev, page->dma_addr + off,
1978					i, DMA_FROM_DEVICE);
1979
1980		/* make sure we always copy a header */
1981		swivel = 0;
1982		if (p == (char *) skb->data) { /* not split */
1983			memcpy(p, page_address(page->buffer) + off,
1984			       RX_COPY_MIN);
1985			dma_sync_single_for_device(&cp->pdev->dev,
1986						   page->dma_addr + off, i,
1987						   DMA_FROM_DEVICE);
1988			off += RX_COPY_MIN;
1989			swivel = RX_COPY_MIN;
1990			RX_USED_ADD(page, cp->mtu_stride);
1991		} else {
1992			RX_USED_ADD(page, hlen);
1993		}
1994		skb_put(skb, alloclen);
1995
1996		skb_shinfo(skb)->nr_frags++;
1997		skb->data_len += hlen - swivel;
1998		skb->truesize += hlen - swivel;
1999		skb->len      += hlen - swivel;
2000
2001		skb_frag_fill_page_desc(frag, page->buffer, off, hlen - swivel);
2002		__skb_frag_ref(frag);
 
 
2003
2004		/* any more data? */
2005		if ((words[0] & RX_COMP1_SPLIT_PKT) && ((dlen -= hlen) > 0)) {
2006			hlen = dlen;
2007			off = 0;
2008
2009			i = CAS_VAL(RX_COMP2_NEXT_INDEX, words[1]);
2010			page = cp->rx_pages[CAS_VAL(RX_INDEX_RING, i)][CAS_VAL(RX_INDEX_NUM, i)];
2011			dma_sync_single_for_cpu(&cp->pdev->dev,
2012						page->dma_addr,
2013						hlen + cp->crc_size,
2014						DMA_FROM_DEVICE);
2015			dma_sync_single_for_device(&cp->pdev->dev,
2016						   page->dma_addr,
2017						   hlen + cp->crc_size,
2018						   DMA_FROM_DEVICE);
2019
2020			skb_shinfo(skb)->nr_frags++;
2021			skb->data_len += hlen;
2022			skb->len      += hlen;
2023			frag++;
2024
2025			skb_frag_fill_page_desc(frag, page->buffer, 0, hlen);
2026			__skb_frag_ref(frag);
 
 
2027			RX_USED_ADD(page, hlen + cp->crc_size);
2028		}
2029
2030		if (cp->crc_size)
2031			crcaddr = page_address(page->buffer) + off + hlen;
 
 
2032
2033	} else {
2034		/* copying packet */
2035		if (!dlen)
2036			goto end_copy_pkt;
2037
2038		i = CAS_VAL(RX_COMP1_DATA_INDEX, words[0]);
2039		page = cp->rx_pages[CAS_VAL(RX_INDEX_RING, i)][CAS_VAL(RX_INDEX_NUM, i)];
2040		off = CAS_VAL(RX_COMP1_DATA_OFF, words[0]) + swivel;
2041		hlen = min(cp->page_size - off, dlen);
2042		if (hlen < 0) {
2043			netif_printk(cp, rx_err, KERN_DEBUG, cp->dev,
2044				     "rx page overflow: %d\n", hlen);
2045			dev_kfree_skb_irq(skb);
2046			return -1;
2047		}
2048		i = hlen;
2049		if (i == dlen) /* attach FCS */
2050			i += cp->crc_size;
2051		dma_sync_single_for_cpu(&cp->pdev->dev, page->dma_addr + off,
2052					i, DMA_FROM_DEVICE);
2053		memcpy(p, page_address(page->buffer) + off, i);
2054		dma_sync_single_for_device(&cp->pdev->dev,
2055					   page->dma_addr + off, i,
2056					   DMA_FROM_DEVICE);
 
2057		if (p == (char *) skb->data) /* not split */
2058			RX_USED_ADD(page, cp->mtu_stride);
2059		else
2060			RX_USED_ADD(page, i);
2061
2062		/* any more data? */
2063		if ((words[0] & RX_COMP1_SPLIT_PKT) && ((dlen -= hlen) > 0)) {
2064			p += hlen;
2065			i = CAS_VAL(RX_COMP2_NEXT_INDEX, words[1]);
2066			page = cp->rx_pages[CAS_VAL(RX_INDEX_RING, i)][CAS_VAL(RX_INDEX_NUM, i)];
2067			dma_sync_single_for_cpu(&cp->pdev->dev,
2068						page->dma_addr,
2069						dlen + cp->crc_size,
2070						DMA_FROM_DEVICE);
2071			memcpy(p, page_address(page->buffer), dlen + cp->crc_size);
2072			dma_sync_single_for_device(&cp->pdev->dev,
2073						   page->dma_addr,
2074						   dlen + cp->crc_size,
2075						   DMA_FROM_DEVICE);
2076			RX_USED_ADD(page, dlen + cp->crc_size);
2077		}
2078end_copy_pkt:
2079		if (cp->crc_size)
 
2080			crcaddr = skb->data + alloclen;
2081
2082		skb_put(skb, alloclen);
2083	}
2084
2085	csum = (__force __sum16)htons(CAS_VAL(RX_COMP4_TCP_CSUM, words[3]));
2086	if (cp->crc_size) {
2087		/* checksum includes FCS. strip it out. */
2088		csum = csum_fold(csum_partial(crcaddr, cp->crc_size,
2089					      csum_unfold(csum)));
 
 
2090	}
2091	skb->protocol = eth_type_trans(skb, cp->dev);
2092	if (skb->protocol == htons(ETH_P_IP)) {
2093		skb->csum = csum_unfold(~csum);
2094		skb->ip_summed = CHECKSUM_COMPLETE;
2095	} else
2096		skb_checksum_none_assert(skb);
2097	return len;
2098}
2099
2100
2101/* we can handle up to 64 rx flows at a time. we do the same thing
2102 * as nonreassm except that we batch up the buffers.
2103 * NOTE: we currently just treat each flow as a bunch of packets that
2104 *       we pass up. a better way would be to coalesce the packets
2105 *       into a jumbo packet. to do that, we need to do the following:
2106 *       1) the first packet will have a clean split between header and
2107 *          data. save both.
2108 *       2) each time the next flow packet comes in, extend the
2109 *          data length and merge the checksums.
2110 *       3) on flow release, fix up the header.
2111 *       4) make sure the higher layer doesn't care.
2112 * because packets get coalesced, we shouldn't run into fragment count
2113 * issues.
2114 */
2115static inline void cas_rx_flow_pkt(struct cas *cp, const u64 *words,
2116				   struct sk_buff *skb)
2117{
2118	int flowid = CAS_VAL(RX_COMP3_FLOWID, words[2]) & (N_RX_FLOWS - 1);
2119	struct sk_buff_head *flow = &cp->rx_flows[flowid];
2120
2121	/* this is protected at a higher layer, so no need to
2122	 * do any additional locking here. stick the buffer
2123	 * at the end.
2124	 */
2125	__skb_queue_tail(flow, skb);
2126	if (words[0] & RX_COMP1_RELEASE_FLOW) {
2127		while ((skb = __skb_dequeue(flow))) {
2128			cas_skb_release(skb);
2129		}
2130	}
2131}
2132
2133/* put rx descriptor back on ring. if a buffer is in use by a higher
2134 * layer, this will need to put in a replacement.
2135 */
2136static void cas_post_page(struct cas *cp, const int ring, const int index)
2137{
2138	cas_page_t *new;
2139	int entry;
2140
2141	entry = cp->rx_old[ring];
2142
2143	new = cas_page_swap(cp, ring, index);
2144	cp->init_rxds[ring][entry].buffer = cpu_to_le64(new->dma_addr);
2145	cp->init_rxds[ring][entry].index  =
2146		cpu_to_le64(CAS_BASE(RX_INDEX_NUM, index) |
2147			    CAS_BASE(RX_INDEX_RING, ring));
2148
2149	entry = RX_DESC_ENTRY(ring, entry + 1);
2150	cp->rx_old[ring] = entry;
2151
2152	if (entry % 4)
2153		return;
2154
2155	if (ring == 0)
2156		writel(entry, cp->regs + REG_RX_KICK);
2157	else if ((N_RX_DESC_RINGS > 1) &&
2158		 (cp->cas_flags & CAS_FLAG_REG_PLUS))
2159		writel(entry, cp->regs + REG_PLUS_RX_KICK1);
2160}
2161
2162
2163/* only when things are bad */
2164static int cas_post_rxds_ringN(struct cas *cp, int ring, int num)
2165{
2166	unsigned int entry, last, count, released;
2167	int cluster;
2168	cas_page_t **page = cp->rx_pages[ring];
2169
2170	entry = cp->rx_old[ring];
2171
2172	netif_printk(cp, intr, KERN_DEBUG, cp->dev,
2173		     "rxd[%d] interrupt, done: %d\n", ring, entry);
2174
2175	cluster = -1;
2176	count = entry & 0x3;
2177	last = RX_DESC_ENTRY(ring, num ? entry + num - 4: entry - 4);
2178	released = 0;
2179	while (entry != last) {
2180		/* make a new buffer if it's still in use */
2181		if (page_count(page[entry]->buffer) > 1) {
2182			cas_page_t *new = cas_page_dequeue(cp);
2183			if (!new) {
2184				/* let the timer know that we need to
2185				 * do this again
2186				 */
2187				cp->cas_flags |= CAS_FLAG_RXD_POST(ring);
2188				if (!timer_pending(&cp->link_timer))
2189					mod_timer(&cp->link_timer, jiffies +
2190						  CAS_LINK_FAST_TIMEOUT);
2191				cp->rx_old[ring]  = entry;
2192				cp->rx_last[ring] = num ? num - released : 0;
2193				return -ENOMEM;
2194			}
2195			spin_lock(&cp->rx_inuse_lock);
2196			list_add(&page[entry]->list, &cp->rx_inuse_list);
2197			spin_unlock(&cp->rx_inuse_lock);
2198			cp->init_rxds[ring][entry].buffer =
2199				cpu_to_le64(new->dma_addr);
2200			page[entry] = new;
2201
2202		}
2203
2204		if (++count == 4) {
2205			cluster = entry;
2206			count = 0;
2207		}
2208		released++;
2209		entry = RX_DESC_ENTRY(ring, entry + 1);
2210	}
2211	cp->rx_old[ring] = entry;
2212
2213	if (cluster < 0)
2214		return 0;
2215
2216	if (ring == 0)
2217		writel(cluster, cp->regs + REG_RX_KICK);
2218	else if ((N_RX_DESC_RINGS > 1) &&
2219		 (cp->cas_flags & CAS_FLAG_REG_PLUS))
2220		writel(cluster, cp->regs + REG_PLUS_RX_KICK1);
2221	return 0;
2222}
2223
2224
2225/* process a completion ring. packets are set up in three basic ways:
2226 * small packets: should be copied header + data in single buffer.
2227 * large packets: header and data in a single buffer.
2228 * split packets: header in a separate buffer from data.
2229 *                data may be in multiple pages. data may be > 256
2230 *                bytes but in a single page.
2231 *
2232 * NOTE: RX page posting is done in this routine as well. while there's
2233 *       the capability of using multiple RX completion rings, it isn't
2234 *       really worthwhile due to the fact that the page posting will
2235 *       force serialization on the single descriptor ring.
2236 */
2237static int cas_rx_ringN(struct cas *cp, int ring, int budget)
2238{
2239	struct cas_rx_comp *rxcs = cp->init_rxcs[ring];
2240	int entry, drops;
2241	int npackets = 0;
2242
2243	netif_printk(cp, intr, KERN_DEBUG, cp->dev,
2244		     "rx[%d] interrupt, done: %d/%d\n",
2245		     ring,
2246		     readl(cp->regs + REG_RX_COMP_HEAD), cp->rx_new[ring]);
2247
2248	entry = cp->rx_new[ring];
2249	drops = 0;
2250	while (1) {
2251		struct cas_rx_comp *rxc = rxcs + entry;
2252		struct sk_buff *skb;
2253		int type, len;
2254		u64 words[4];
2255		int i, dring;
2256
2257		words[0] = le64_to_cpu(rxc->word1);
2258		words[1] = le64_to_cpu(rxc->word2);
2259		words[2] = le64_to_cpu(rxc->word3);
2260		words[3] = le64_to_cpu(rxc->word4);
2261
2262		/* don't touch if still owned by hw */
2263		type = CAS_VAL(RX_COMP1_TYPE, words[0]);
2264		if (type == 0)
2265			break;
2266
2267		/* hw hasn't cleared the zero bit yet */
2268		if (words[3] & RX_COMP4_ZERO) {
2269			break;
2270		}
2271
2272		/* get info on the packet */
2273		if (words[3] & (RX_COMP4_LEN_MISMATCH | RX_COMP4_BAD)) {
2274			spin_lock(&cp->stat_lock[ring]);
2275			cp->net_stats[ring].rx_errors++;
2276			if (words[3] & RX_COMP4_LEN_MISMATCH)
2277				cp->net_stats[ring].rx_length_errors++;
2278			if (words[3] & RX_COMP4_BAD)
2279				cp->net_stats[ring].rx_crc_errors++;
2280			spin_unlock(&cp->stat_lock[ring]);
2281
2282			/* We'll just return it to Cassini. */
2283		drop_it:
2284			spin_lock(&cp->stat_lock[ring]);
2285			++cp->net_stats[ring].rx_dropped;
2286			spin_unlock(&cp->stat_lock[ring]);
2287			goto next;
2288		}
2289
2290		len = cas_rx_process_pkt(cp, rxc, entry, words, &skb);
2291		if (len < 0) {
2292			++drops;
2293			goto drop_it;
2294		}
2295
2296		/* see if it's a flow re-assembly or not. the driver
2297		 * itself handles release back up.
2298		 */
2299		if (RX_DONT_BATCH || (type == 0x2)) {
2300			/* non-reassm: these always get released */
2301			cas_skb_release(skb);
2302		} else {
2303			cas_rx_flow_pkt(cp, words, skb);
2304		}
2305
2306		spin_lock(&cp->stat_lock[ring]);
2307		cp->net_stats[ring].rx_packets++;
2308		cp->net_stats[ring].rx_bytes += len;
2309		spin_unlock(&cp->stat_lock[ring]);
2310
2311	next:
2312		npackets++;
2313
2314		/* should it be released? */
2315		if (words[0] & RX_COMP1_RELEASE_HDR) {
2316			i = CAS_VAL(RX_COMP2_HDR_INDEX, words[1]);
2317			dring = CAS_VAL(RX_INDEX_RING, i);
2318			i = CAS_VAL(RX_INDEX_NUM, i);
2319			cas_post_page(cp, dring, i);
2320		}
2321
2322		if (words[0] & RX_COMP1_RELEASE_DATA) {
2323			i = CAS_VAL(RX_COMP1_DATA_INDEX, words[0]);
2324			dring = CAS_VAL(RX_INDEX_RING, i);
2325			i = CAS_VAL(RX_INDEX_NUM, i);
2326			cas_post_page(cp, dring, i);
2327		}
2328
2329		if (words[0] & RX_COMP1_RELEASE_NEXT) {
2330			i = CAS_VAL(RX_COMP2_NEXT_INDEX, words[1]);
2331			dring = CAS_VAL(RX_INDEX_RING, i);
2332			i = CAS_VAL(RX_INDEX_NUM, i);
2333			cas_post_page(cp, dring, i);
2334		}
2335
2336		/* skip to the next entry */
2337		entry = RX_COMP_ENTRY(ring, entry + 1 +
2338				      CAS_VAL(RX_COMP1_SKIP, words[0]));
2339#ifdef USE_NAPI
2340		if (budget && (npackets >= budget))
2341			break;
2342#endif
2343	}
2344	cp->rx_new[ring] = entry;
2345
2346	if (drops)
2347		netdev_info(cp->dev, "Memory squeeze, deferring packet\n");
2348	return npackets;
2349}
2350
2351
2352/* put completion entries back on the ring */
2353static void cas_post_rxcs_ringN(struct net_device *dev,
2354				struct cas *cp, int ring)
2355{
2356	struct cas_rx_comp *rxc = cp->init_rxcs[ring];
2357	int last, entry;
2358
2359	last = cp->rx_cur[ring];
2360	entry = cp->rx_new[ring];
2361	netif_printk(cp, intr, KERN_DEBUG, dev,
2362		     "rxc[%d] interrupt, done: %d/%d\n",
2363		     ring, readl(cp->regs + REG_RX_COMP_HEAD), entry);
2364
2365	/* zero and re-mark descriptors */
2366	while (last != entry) {
2367		cas_rxc_init(rxc + last);
2368		last = RX_COMP_ENTRY(ring, last + 1);
2369	}
2370	cp->rx_cur[ring] = last;
2371
2372	if (ring == 0)
2373		writel(last, cp->regs + REG_RX_COMP_TAIL);
2374	else if (cp->cas_flags & CAS_FLAG_REG_PLUS)
2375		writel(last, cp->regs + REG_PLUS_RX_COMPN_TAIL(ring));
2376}
2377
2378
2379
2380/* cassini can use all four PCI interrupts for the completion ring.
2381 * rings 3 and 4 are identical
2382 */
2383#if defined(USE_PCI_INTC) || defined(USE_PCI_INTD)
2384static inline void cas_handle_irqN(struct net_device *dev,
2385				   struct cas *cp, const u32 status,
2386				   const int ring)
2387{
2388	if (status & (INTR_RX_COMP_FULL_ALT | INTR_RX_COMP_AF_ALT))
2389		cas_post_rxcs_ringN(dev, cp, ring);
2390}
2391
2392static irqreturn_t cas_interruptN(int irq, void *dev_id)
2393{
2394	struct net_device *dev = dev_id;
2395	struct cas *cp = netdev_priv(dev);
2396	unsigned long flags;
2397	int ring = (irq == cp->pci_irq_INTC) ? 2 : 3;
2398	u32 status = readl(cp->regs + REG_PLUS_INTRN_STATUS(ring));
2399
2400	/* check for shared irq */
2401	if (status == 0)
2402		return IRQ_NONE;
2403
2404	spin_lock_irqsave(&cp->lock, flags);
2405	if (status & INTR_RX_DONE_ALT) { /* handle rx separately */
2406#ifdef USE_NAPI
2407		cas_mask_intr(cp);
2408		napi_schedule(&cp->napi);
2409#else
2410		cas_rx_ringN(cp, ring, 0);
2411#endif
2412		status &= ~INTR_RX_DONE_ALT;
2413	}
2414
2415	if (status)
2416		cas_handle_irqN(dev, cp, status, ring);
2417	spin_unlock_irqrestore(&cp->lock, flags);
2418	return IRQ_HANDLED;
2419}
2420#endif
2421
2422#ifdef USE_PCI_INTB
2423/* everything but rx packets */
2424static inline void cas_handle_irq1(struct cas *cp, const u32 status)
2425{
2426	if (status & INTR_RX_BUF_UNAVAIL_1) {
2427		/* Frame arrived, no free RX buffers available.
2428		 * NOTE: we can get this on a link transition. */
2429		cas_post_rxds_ringN(cp, 1, 0);
2430		spin_lock(&cp->stat_lock[1]);
2431		cp->net_stats[1].rx_dropped++;
2432		spin_unlock(&cp->stat_lock[1]);
2433	}
2434
2435	if (status & INTR_RX_BUF_AE_1)
2436		cas_post_rxds_ringN(cp, 1, RX_DESC_RINGN_SIZE(1) -
2437				    RX_AE_FREEN_VAL(1));
2438
2439	if (status & (INTR_RX_COMP_AF | INTR_RX_COMP_FULL))
2440		cas_post_rxcs_ringN(cp, 1);
2441}
2442
2443/* ring 2 handles a few more events than 3 and 4 */
2444static irqreturn_t cas_interrupt1(int irq, void *dev_id)
2445{
2446	struct net_device *dev = dev_id;
2447	struct cas *cp = netdev_priv(dev);
2448	unsigned long flags;
2449	u32 status = readl(cp->regs + REG_PLUS_INTRN_STATUS(1));
2450
2451	/* check for shared interrupt */
2452	if (status == 0)
2453		return IRQ_NONE;
2454
2455	spin_lock_irqsave(&cp->lock, flags);
2456	if (status & INTR_RX_DONE_ALT) { /* handle rx separately */
2457#ifdef USE_NAPI
2458		cas_mask_intr(cp);
2459		napi_schedule(&cp->napi);
2460#else
2461		cas_rx_ringN(cp, 1, 0);
2462#endif
2463		status &= ~INTR_RX_DONE_ALT;
2464	}
2465	if (status)
2466		cas_handle_irq1(cp, status);
2467	spin_unlock_irqrestore(&cp->lock, flags);
2468	return IRQ_HANDLED;
2469}
2470#endif
2471
2472static inline void cas_handle_irq(struct net_device *dev,
2473				  struct cas *cp, const u32 status)
2474{
2475	/* housekeeping interrupts */
2476	if (status & INTR_ERROR_MASK)
2477		cas_abnormal_irq(dev, cp, status);
2478
2479	if (status & INTR_RX_BUF_UNAVAIL) {
2480		/* Frame arrived, no free RX buffers available.
2481		 * NOTE: we can get this on a link transition.
2482		 */
2483		cas_post_rxds_ringN(cp, 0, 0);
2484		spin_lock(&cp->stat_lock[0]);
2485		cp->net_stats[0].rx_dropped++;
2486		spin_unlock(&cp->stat_lock[0]);
2487	} else if (status & INTR_RX_BUF_AE) {
2488		cas_post_rxds_ringN(cp, 0, RX_DESC_RINGN_SIZE(0) -
2489				    RX_AE_FREEN_VAL(0));
2490	}
2491
2492	if (status & (INTR_RX_COMP_AF | INTR_RX_COMP_FULL))
2493		cas_post_rxcs_ringN(dev, cp, 0);
2494}
2495
2496static irqreturn_t cas_interrupt(int irq, void *dev_id)
2497{
2498	struct net_device *dev = dev_id;
2499	struct cas *cp = netdev_priv(dev);
2500	unsigned long flags;
2501	u32 status = readl(cp->regs + REG_INTR_STATUS);
2502
2503	if (status == 0)
2504		return IRQ_NONE;
2505
2506	spin_lock_irqsave(&cp->lock, flags);
2507	if (status & (INTR_TX_ALL | INTR_TX_INTME)) {
2508		cas_tx(dev, cp, status);
2509		status &= ~(INTR_TX_ALL | INTR_TX_INTME);
2510	}
2511
2512	if (status & INTR_RX_DONE) {
2513#ifdef USE_NAPI
2514		cas_mask_intr(cp);
2515		napi_schedule(&cp->napi);
2516#else
2517		cas_rx_ringN(cp, 0, 0);
2518#endif
2519		status &= ~INTR_RX_DONE;
2520	}
2521
2522	if (status)
2523		cas_handle_irq(dev, cp, status);
2524	spin_unlock_irqrestore(&cp->lock, flags);
2525	return IRQ_HANDLED;
2526}
2527
2528
2529#ifdef USE_NAPI
2530static int cas_poll(struct napi_struct *napi, int budget)
2531{
2532	struct cas *cp = container_of(napi, struct cas, napi);
2533	struct net_device *dev = cp->dev;
2534	int i, enable_intr, credits;
2535	u32 status = readl(cp->regs + REG_INTR_STATUS);
2536	unsigned long flags;
2537
2538	spin_lock_irqsave(&cp->lock, flags);
2539	cas_tx(dev, cp, status);
2540	spin_unlock_irqrestore(&cp->lock, flags);
2541
2542	/* NAPI rx packets. we spread the credits across all of the
2543	 * rxc rings
2544	 *
2545	 * to make sure we're fair with the work we loop through each
2546	 * ring N_RX_COMP_RING times with a request of
2547	 * budget / N_RX_COMP_RINGS
2548	 */
2549	enable_intr = 1;
2550	credits = 0;
2551	for (i = 0; i < N_RX_COMP_RINGS; i++) {
2552		int j;
2553		for (j = 0; j < N_RX_COMP_RINGS; j++) {
2554			credits += cas_rx_ringN(cp, j, budget / N_RX_COMP_RINGS);
2555			if (credits >= budget) {
2556				enable_intr = 0;
2557				goto rx_comp;
2558			}
2559		}
2560	}
2561
2562rx_comp:
2563	/* final rx completion */
2564	spin_lock_irqsave(&cp->lock, flags);
2565	if (status)
2566		cas_handle_irq(dev, cp, status);
2567
2568#ifdef USE_PCI_INTB
2569	if (N_RX_COMP_RINGS > 1) {
2570		status = readl(cp->regs + REG_PLUS_INTRN_STATUS(1));
2571		if (status)
2572			cas_handle_irq1(dev, cp, status);
2573	}
2574#endif
2575
2576#ifdef USE_PCI_INTC
2577	if (N_RX_COMP_RINGS > 2) {
2578		status = readl(cp->regs + REG_PLUS_INTRN_STATUS(2));
2579		if (status)
2580			cas_handle_irqN(dev, cp, status, 2);
2581	}
2582#endif
2583
2584#ifdef USE_PCI_INTD
2585	if (N_RX_COMP_RINGS > 3) {
2586		status = readl(cp->regs + REG_PLUS_INTRN_STATUS(3));
2587		if (status)
2588			cas_handle_irqN(dev, cp, status, 3);
2589	}
2590#endif
2591	spin_unlock_irqrestore(&cp->lock, flags);
2592	if (enable_intr) {
2593		napi_complete(napi);
2594		cas_unmask_intr(cp);
2595	}
2596	return credits;
2597}
2598#endif
2599
2600#ifdef CONFIG_NET_POLL_CONTROLLER
2601static void cas_netpoll(struct net_device *dev)
2602{
2603	struct cas *cp = netdev_priv(dev);
2604
2605	cas_disable_irq(cp, 0);
2606	cas_interrupt(cp->pdev->irq, dev);
2607	cas_enable_irq(cp, 0);
2608
2609#ifdef USE_PCI_INTB
2610	if (N_RX_COMP_RINGS > 1) {
2611		/* cas_interrupt1(); */
2612	}
2613#endif
2614#ifdef USE_PCI_INTC
2615	if (N_RX_COMP_RINGS > 2) {
2616		/* cas_interruptN(); */
2617	}
2618#endif
2619#ifdef USE_PCI_INTD
2620	if (N_RX_COMP_RINGS > 3) {
2621		/* cas_interruptN(); */
2622	}
2623#endif
2624}
2625#endif
2626
2627static void cas_tx_timeout(struct net_device *dev, unsigned int txqueue)
2628{
2629	struct cas *cp = netdev_priv(dev);
2630
2631	netdev_err(dev, "transmit timed out, resetting\n");
2632	if (!cp->hw_running) {
2633		netdev_err(dev, "hrm.. hw not running!\n");
2634		return;
2635	}
2636
2637	netdev_err(dev, "MIF_STATE[%08x]\n",
2638		   readl(cp->regs + REG_MIF_STATE_MACHINE));
2639
2640	netdev_err(dev, "MAC_STATE[%08x]\n",
2641		   readl(cp->regs + REG_MAC_STATE_MACHINE));
2642
2643	netdev_err(dev, "TX_STATE[%08x:%08x:%08x] FIFO[%08x:%08x:%08x] SM1[%08x] SM2[%08x]\n",
2644		   readl(cp->regs + REG_TX_CFG),
2645		   readl(cp->regs + REG_MAC_TX_STATUS),
2646		   readl(cp->regs + REG_MAC_TX_CFG),
2647		   readl(cp->regs + REG_TX_FIFO_PKT_CNT),
2648		   readl(cp->regs + REG_TX_FIFO_WRITE_PTR),
2649		   readl(cp->regs + REG_TX_FIFO_READ_PTR),
2650		   readl(cp->regs + REG_TX_SM_1),
2651		   readl(cp->regs + REG_TX_SM_2));
2652
2653	netdev_err(dev, "RX_STATE[%08x:%08x:%08x]\n",
2654		   readl(cp->regs + REG_RX_CFG),
2655		   readl(cp->regs + REG_MAC_RX_STATUS),
2656		   readl(cp->regs + REG_MAC_RX_CFG));
2657
2658	netdev_err(dev, "HP_STATE[%08x:%08x:%08x:%08x]\n",
2659		   readl(cp->regs + REG_HP_STATE_MACHINE),
2660		   readl(cp->regs + REG_HP_STATUS0),
2661		   readl(cp->regs + REG_HP_STATUS1),
2662		   readl(cp->regs + REG_HP_STATUS2));
2663
2664#if 1
2665	atomic_inc(&cp->reset_task_pending);
2666	atomic_inc(&cp->reset_task_pending_all);
2667	schedule_work(&cp->reset_task);
2668#else
2669	atomic_set(&cp->reset_task_pending, CAS_RESET_ALL);
2670	schedule_work(&cp->reset_task);
2671#endif
2672}
2673
2674static inline int cas_intme(int ring, int entry)
2675{
2676	/* Algorithm: IRQ every 1/2 of descriptors. */
2677	if (!(entry & ((TX_DESC_RINGN_SIZE(ring) >> 1) - 1)))
2678		return 1;
2679	return 0;
2680}
2681
2682
2683static void cas_write_txd(struct cas *cp, int ring, int entry,
2684			  dma_addr_t mapping, int len, u64 ctrl, int last)
2685{
2686	struct cas_tx_desc *txd = cp->init_txds[ring] + entry;
2687
2688	ctrl |= CAS_BASE(TX_DESC_BUFLEN, len);
2689	if (cas_intme(ring, entry))
2690		ctrl |= TX_DESC_INTME;
2691	if (last)
2692		ctrl |= TX_DESC_EOF;
2693	txd->control = cpu_to_le64(ctrl);
2694	txd->buffer = cpu_to_le64(mapping);
2695}
2696
2697static inline void *tx_tiny_buf(struct cas *cp, const int ring,
2698				const int entry)
2699{
2700	return cp->tx_tiny_bufs[ring] + TX_TINY_BUF_LEN*entry;
2701}
2702
2703static inline dma_addr_t tx_tiny_map(struct cas *cp, const int ring,
2704				     const int entry, const int tentry)
2705{
2706	cp->tx_tiny_use[ring][tentry].nbufs++;
2707	cp->tx_tiny_use[ring][entry].used = 1;
2708	return cp->tx_tiny_dvma[ring] + TX_TINY_BUF_LEN*entry;
2709}
2710
2711static inline int cas_xmit_tx_ringN(struct cas *cp, int ring,
2712				    struct sk_buff *skb)
2713{
2714	struct net_device *dev = cp->dev;
2715	int entry, nr_frags, frag, tabort, tentry;
2716	dma_addr_t mapping;
2717	unsigned long flags;
2718	u64 ctrl;
2719	u32 len;
2720
2721	spin_lock_irqsave(&cp->tx_lock[ring], flags);
2722
2723	/* This is a hard error, log it. */
2724	if (TX_BUFFS_AVAIL(cp, ring) <=
2725	    CAS_TABORT(cp)*(skb_shinfo(skb)->nr_frags + 1)) {
2726		netif_stop_queue(dev);
2727		spin_unlock_irqrestore(&cp->tx_lock[ring], flags);
2728		netdev_err(dev, "BUG! Tx Ring full when queue awake!\n");
2729		return 1;
2730	}
2731
2732	ctrl = 0;
2733	if (skb->ip_summed == CHECKSUM_PARTIAL) {
2734		const u64 csum_start_off = skb_checksum_start_offset(skb);
2735		const u64 csum_stuff_off = csum_start_off + skb->csum_offset;
2736
2737		ctrl =  TX_DESC_CSUM_EN |
2738			CAS_BASE(TX_DESC_CSUM_START, csum_start_off) |
2739			CAS_BASE(TX_DESC_CSUM_STUFF, csum_stuff_off);
2740	}
2741
2742	entry = cp->tx_new[ring];
2743	cp->tx_skbs[ring][entry] = skb;
2744
2745	nr_frags = skb_shinfo(skb)->nr_frags;
2746	len = skb_headlen(skb);
2747	mapping = dma_map_page(&cp->pdev->dev, virt_to_page(skb->data),
2748			       offset_in_page(skb->data), len, DMA_TO_DEVICE);
 
2749
2750	tentry = entry;
2751	tabort = cas_calc_tabort(cp, (unsigned long) skb->data, len);
2752	if (unlikely(tabort)) {
2753		/* NOTE: len is always >  tabort */
2754		cas_write_txd(cp, ring, entry, mapping, len - tabort,
2755			      ctrl | TX_DESC_SOF, 0);
2756		entry = TX_DESC_NEXT(ring, entry);
2757
2758		skb_copy_from_linear_data_offset(skb, len - tabort,
2759			      tx_tiny_buf(cp, ring, entry), tabort);
2760		mapping = tx_tiny_map(cp, ring, entry, tentry);
2761		cas_write_txd(cp, ring, entry, mapping, tabort, ctrl,
2762			      (nr_frags == 0));
2763	} else {
2764		cas_write_txd(cp, ring, entry, mapping, len, ctrl |
2765			      TX_DESC_SOF, (nr_frags == 0));
2766	}
2767	entry = TX_DESC_NEXT(ring, entry);
2768
2769	for (frag = 0; frag < nr_frags; frag++) {
2770		const skb_frag_t *fragp = &skb_shinfo(skb)->frags[frag];
2771
2772		len = skb_frag_size(fragp);
2773		mapping = skb_frag_dma_map(&cp->pdev->dev, fragp, 0, len,
2774					   DMA_TO_DEVICE);
2775
2776		tabort = cas_calc_tabort(cp, skb_frag_off(fragp), len);
2777		if (unlikely(tabort)) {
 
 
2778			/* NOTE: len is always > tabort */
2779			cas_write_txd(cp, ring, entry, mapping, len - tabort,
2780				      ctrl, 0);
2781			entry = TX_DESC_NEXT(ring, entry);
2782			memcpy_from_page(tx_tiny_buf(cp, ring, entry),
2783					 skb_frag_page(fragp),
2784					 skb_frag_off(fragp) + len - tabort,
2785					 tabort);
 
 
2786			mapping = tx_tiny_map(cp, ring, entry, tentry);
2787			len     = tabort;
2788		}
2789
2790		cas_write_txd(cp, ring, entry, mapping, len, ctrl,
2791			      (frag + 1 == nr_frags));
2792		entry = TX_DESC_NEXT(ring, entry);
2793	}
2794
2795	cp->tx_new[ring] = entry;
2796	if (TX_BUFFS_AVAIL(cp, ring) <= CAS_TABORT(cp)*(MAX_SKB_FRAGS + 1))
2797		netif_stop_queue(dev);
2798
2799	netif_printk(cp, tx_queued, KERN_DEBUG, dev,
2800		     "tx[%d] queued, slot %d, skblen %d, avail %d\n",
2801		     ring, entry, skb->len, TX_BUFFS_AVAIL(cp, ring));
2802	writel(entry, cp->regs + REG_TX_KICKN(ring));
2803	spin_unlock_irqrestore(&cp->tx_lock[ring], flags);
2804	return 0;
2805}
2806
2807static netdev_tx_t cas_start_xmit(struct sk_buff *skb, struct net_device *dev)
2808{
2809	struct cas *cp = netdev_priv(dev);
2810
2811	/* this is only used as a load-balancing hint, so it doesn't
2812	 * need to be SMP safe
2813	 */
2814	static int ring;
2815
2816	if (skb_padto(skb, cp->min_frame_size))
2817		return NETDEV_TX_OK;
2818
2819	/* XXX: we need some higher-level QoS hooks to steer packets to
2820	 *      individual queues.
2821	 */
2822	if (cas_xmit_tx_ringN(cp, ring++ & N_TX_RINGS_MASK, skb))
2823		return NETDEV_TX_BUSY;
2824	return NETDEV_TX_OK;
2825}
2826
2827static void cas_init_tx_dma(struct cas *cp)
2828{
2829	u64 desc_dma = cp->block_dvma;
2830	unsigned long off;
2831	u32 val;
2832	int i;
2833
2834	/* set up tx completion writeback registers. must be 8-byte aligned */
2835#ifdef USE_TX_COMPWB
2836	off = offsetof(struct cas_init_block, tx_compwb);
2837	writel((desc_dma + off) >> 32, cp->regs + REG_TX_COMPWB_DB_HI);
2838	writel((desc_dma + off) & 0xffffffff, cp->regs + REG_TX_COMPWB_DB_LOW);
2839#endif
2840
2841	/* enable completion writebacks, enable paced mode,
2842	 * disable read pipe, and disable pre-interrupt compwbs
2843	 */
2844	val =   TX_CFG_COMPWB_Q1 | TX_CFG_COMPWB_Q2 |
2845		TX_CFG_COMPWB_Q3 | TX_CFG_COMPWB_Q4 |
2846		TX_CFG_DMA_RDPIPE_DIS | TX_CFG_PACED_MODE |
2847		TX_CFG_INTR_COMPWB_DIS;
2848
2849	/* write out tx ring info and tx desc bases */
2850	for (i = 0; i < MAX_TX_RINGS; i++) {
2851		off = (unsigned long) cp->init_txds[i] -
2852			(unsigned long) cp->init_block;
2853
2854		val |= CAS_TX_RINGN_BASE(i);
2855		writel((desc_dma + off) >> 32, cp->regs + REG_TX_DBN_HI(i));
2856		writel((desc_dma + off) & 0xffffffff, cp->regs +
2857		       REG_TX_DBN_LOW(i));
2858		/* don't zero out the kick register here as the system
2859		 * will wedge
2860		 */
2861	}
2862	writel(val, cp->regs + REG_TX_CFG);
2863
2864	/* program max burst sizes. these numbers should be different
2865	 * if doing QoS.
2866	 */
2867#ifdef USE_QOS
2868	writel(0x800, cp->regs + REG_TX_MAXBURST_0);
2869	writel(0x1600, cp->regs + REG_TX_MAXBURST_1);
2870	writel(0x2400, cp->regs + REG_TX_MAXBURST_2);
2871	writel(0x4800, cp->regs + REG_TX_MAXBURST_3);
2872#else
2873	writel(0x800, cp->regs + REG_TX_MAXBURST_0);
2874	writel(0x800, cp->regs + REG_TX_MAXBURST_1);
2875	writel(0x800, cp->regs + REG_TX_MAXBURST_2);
2876	writel(0x800, cp->regs + REG_TX_MAXBURST_3);
2877#endif
2878}
2879
2880/* Must be invoked under cp->lock. */
2881static inline void cas_init_dma(struct cas *cp)
2882{
2883	cas_init_tx_dma(cp);
2884	cas_init_rx_dma(cp);
2885}
2886
2887static void cas_process_mc_list(struct cas *cp)
2888{
2889	u16 hash_table[16];
2890	u32 crc;
2891	struct netdev_hw_addr *ha;
2892	int i = 1;
2893
2894	memset(hash_table, 0, sizeof(hash_table));
2895	netdev_for_each_mc_addr(ha, cp->dev) {
2896		if (i <= CAS_MC_EXACT_MATCH_SIZE) {
2897			/* use the alternate mac address registers for the
2898			 * first 15 multicast addresses
2899			 */
2900			writel((ha->addr[4] << 8) | ha->addr[5],
2901			       cp->regs + REG_MAC_ADDRN(i*3 + 0));
2902			writel((ha->addr[2] << 8) | ha->addr[3],
2903			       cp->regs + REG_MAC_ADDRN(i*3 + 1));
2904			writel((ha->addr[0] << 8) | ha->addr[1],
2905			       cp->regs + REG_MAC_ADDRN(i*3 + 2));
2906			i++;
2907		}
2908		else {
2909			/* use hw hash table for the next series of
2910			 * multicast addresses
2911			 */
2912			crc = ether_crc_le(ETH_ALEN, ha->addr);
2913			crc >>= 24;
2914			hash_table[crc >> 4] |= 1 << (15 - (crc & 0xf));
2915		}
2916	}
2917	for (i = 0; i < 16; i++)
2918		writel(hash_table[i], cp->regs + REG_MAC_HASH_TABLEN(i));
2919}
2920
2921/* Must be invoked under cp->lock. */
2922static u32 cas_setup_multicast(struct cas *cp)
2923{
2924	u32 rxcfg = 0;
2925	int i;
2926
2927	if (cp->dev->flags & IFF_PROMISC) {
2928		rxcfg |= MAC_RX_CFG_PROMISC_EN;
2929
2930	} else if (cp->dev->flags & IFF_ALLMULTI) {
2931	    	for (i=0; i < 16; i++)
2932			writel(0xFFFF, cp->regs + REG_MAC_HASH_TABLEN(i));
2933		rxcfg |= MAC_RX_CFG_HASH_FILTER_EN;
2934
2935	} else {
2936		cas_process_mc_list(cp);
2937		rxcfg |= MAC_RX_CFG_HASH_FILTER_EN;
2938	}
2939
2940	return rxcfg;
2941}
2942
2943/* must be invoked under cp->stat_lock[N_TX_RINGS] */
2944static void cas_clear_mac_err(struct cas *cp)
2945{
2946	writel(0, cp->regs + REG_MAC_COLL_NORMAL);
2947	writel(0, cp->regs + REG_MAC_COLL_FIRST);
2948	writel(0, cp->regs + REG_MAC_COLL_EXCESS);
2949	writel(0, cp->regs + REG_MAC_COLL_LATE);
2950	writel(0, cp->regs + REG_MAC_TIMER_DEFER);
2951	writel(0, cp->regs + REG_MAC_ATTEMPTS_PEAK);
2952	writel(0, cp->regs + REG_MAC_RECV_FRAME);
2953	writel(0, cp->regs + REG_MAC_LEN_ERR);
2954	writel(0, cp->regs + REG_MAC_ALIGN_ERR);
2955	writel(0, cp->regs + REG_MAC_FCS_ERR);
2956	writel(0, cp->regs + REG_MAC_RX_CODE_ERR);
2957}
2958
2959
2960static void cas_mac_reset(struct cas *cp)
2961{
2962	int i;
2963
2964	/* do both TX and RX reset */
2965	writel(0x1, cp->regs + REG_MAC_TX_RESET);
2966	writel(0x1, cp->regs + REG_MAC_RX_RESET);
2967
2968	/* wait for TX */
2969	i = STOP_TRIES;
2970	while (i-- > 0) {
2971		if (readl(cp->regs + REG_MAC_TX_RESET) == 0)
2972			break;
2973		udelay(10);
2974	}
2975
2976	/* wait for RX */
2977	i = STOP_TRIES;
2978	while (i-- > 0) {
2979		if (readl(cp->regs + REG_MAC_RX_RESET) == 0)
2980			break;
2981		udelay(10);
2982	}
2983
2984	if (readl(cp->regs + REG_MAC_TX_RESET) |
2985	    readl(cp->regs + REG_MAC_RX_RESET))
2986		netdev_err(cp->dev, "mac tx[%d]/rx[%d] reset failed [%08x]\n",
2987			   readl(cp->regs + REG_MAC_TX_RESET),
2988			   readl(cp->regs + REG_MAC_RX_RESET),
2989			   readl(cp->regs + REG_MAC_STATE_MACHINE));
2990}
2991
2992
2993/* Must be invoked under cp->lock. */
2994static void cas_init_mac(struct cas *cp)
2995{
2996	const unsigned char *e = &cp->dev->dev_addr[0];
2997	int i;
2998	cas_mac_reset(cp);
2999
3000	/* setup core arbitration weight register */
3001	writel(CAWR_RR_DIS, cp->regs + REG_CAWR);
3002
3003#if !defined(CONFIG_SPARC64) && !defined(CONFIG_ALPHA)
3004	/* set the infinite burst register for chips that don't have
3005	 * pci issues.
3006	 */
3007	if ((cp->cas_flags & CAS_FLAG_TARGET_ABORT) == 0)
3008		writel(INF_BURST_EN, cp->regs + REG_INF_BURST);
3009#endif
3010
3011	writel(0x1BF0, cp->regs + REG_MAC_SEND_PAUSE);
3012
3013	writel(0x00, cp->regs + REG_MAC_IPG0);
3014	writel(0x08, cp->regs + REG_MAC_IPG1);
3015	writel(0x04, cp->regs + REG_MAC_IPG2);
3016
3017	/* change later for 802.3z */
3018	writel(0x40, cp->regs + REG_MAC_SLOT_TIME);
3019
3020	/* min frame + FCS */
3021	writel(ETH_ZLEN + 4, cp->regs + REG_MAC_FRAMESIZE_MIN);
3022
3023	/* Ethernet payload + header + FCS + optional VLAN tag. NOTE: we
3024	 * specify the maximum frame size to prevent RX tag errors on
3025	 * oversized frames.
3026	 */
3027	writel(CAS_BASE(MAC_FRAMESIZE_MAX_BURST, 0x2000) |
3028	       CAS_BASE(MAC_FRAMESIZE_MAX_FRAME,
3029			(CAS_MAX_MTU + ETH_HLEN + 4 + 4)),
3030	       cp->regs + REG_MAC_FRAMESIZE_MAX);
3031
3032	/* NOTE: crc_size is used as a surrogate for half-duplex.
3033	 * workaround saturn half-duplex issue by increasing preamble
3034	 * size to 65 bytes.
3035	 */
3036	if ((cp->cas_flags & CAS_FLAG_SATURN) && cp->crc_size)
3037		writel(0x41, cp->regs + REG_MAC_PA_SIZE);
3038	else
3039		writel(0x07, cp->regs + REG_MAC_PA_SIZE);
3040	writel(0x04, cp->regs + REG_MAC_JAM_SIZE);
3041	writel(0x10, cp->regs + REG_MAC_ATTEMPT_LIMIT);
3042	writel(0x8808, cp->regs + REG_MAC_CTRL_TYPE);
3043
3044	writel((e[5] | (e[4] << 8)) & 0x3ff, cp->regs + REG_MAC_RANDOM_SEED);
3045
3046	writel(0, cp->regs + REG_MAC_ADDR_FILTER0);
3047	writel(0, cp->regs + REG_MAC_ADDR_FILTER1);
3048	writel(0, cp->regs + REG_MAC_ADDR_FILTER2);
3049	writel(0, cp->regs + REG_MAC_ADDR_FILTER2_1_MASK);
3050	writel(0, cp->regs + REG_MAC_ADDR_FILTER0_MASK);
3051
3052	/* setup mac address in perfect filter array */
3053	for (i = 0; i < 45; i++)
3054		writel(0x0, cp->regs + REG_MAC_ADDRN(i));
3055
3056	writel((e[4] << 8) | e[5], cp->regs + REG_MAC_ADDRN(0));
3057	writel((e[2] << 8) | e[3], cp->regs + REG_MAC_ADDRN(1));
3058	writel((e[0] << 8) | e[1], cp->regs + REG_MAC_ADDRN(2));
3059
3060	writel(0x0001, cp->regs + REG_MAC_ADDRN(42));
3061	writel(0xc200, cp->regs + REG_MAC_ADDRN(43));
3062	writel(0x0180, cp->regs + REG_MAC_ADDRN(44));
3063
3064	cp->mac_rx_cfg = cas_setup_multicast(cp);
3065
3066	spin_lock(&cp->stat_lock[N_TX_RINGS]);
3067	cas_clear_mac_err(cp);
3068	spin_unlock(&cp->stat_lock[N_TX_RINGS]);
3069
3070	/* Setup MAC interrupts.  We want to get all of the interesting
3071	 * counter expiration events, but we do not want to hear about
3072	 * normal rx/tx as the DMA engine tells us that.
3073	 */
3074	writel(MAC_TX_FRAME_XMIT, cp->regs + REG_MAC_TX_MASK);
3075	writel(MAC_RX_FRAME_RECV, cp->regs + REG_MAC_RX_MASK);
3076
3077	/* Don't enable even the PAUSE interrupts for now, we
3078	 * make no use of those events other than to record them.
3079	 */
3080	writel(0xffffffff, cp->regs + REG_MAC_CTRL_MASK);
3081}
3082
3083/* Must be invoked under cp->lock. */
3084static void cas_init_pause_thresholds(struct cas *cp)
3085{
3086	/* Calculate pause thresholds.  Setting the OFF threshold to the
3087	 * full RX fifo size effectively disables PAUSE generation
3088	 */
3089	if (cp->rx_fifo_size <= (2 * 1024)) {
3090		cp->rx_pause_off = cp->rx_pause_on = cp->rx_fifo_size;
3091	} else {
3092		int max_frame = (cp->dev->mtu + ETH_HLEN + 4 + 4 + 64) & ~63;
3093		if (max_frame * 3 > cp->rx_fifo_size) {
3094			cp->rx_pause_off = 7104;
3095			cp->rx_pause_on  = 960;
3096		} else {
3097			int off = (cp->rx_fifo_size - (max_frame * 2));
3098			int on = off - max_frame;
3099			cp->rx_pause_off = off;
3100			cp->rx_pause_on = on;
3101		}
3102	}
3103}
3104
3105static int cas_vpd_match(const void __iomem *p, const char *str)
3106{
3107	int len = strlen(str) + 1;
3108	int i;
3109
3110	for (i = 0; i < len; i++) {
3111		if (readb(p + i) != str[i])
3112			return 0;
3113	}
3114	return 1;
3115}
3116
3117
3118/* get the mac address by reading the vpd information in the rom.
3119 * also get the phy type and determine if there's an entropy generator.
3120 * NOTE: this is a bit convoluted for the following reasons:
3121 *  1) vpd info has order-dependent mac addresses for multinic cards
3122 *  2) the only way to determine the nic order is to use the slot
3123 *     number.
3124 *  3) fiber cards don't have bridges, so their slot numbers don't
3125 *     mean anything.
3126 *  4) we don't actually know we have a fiber card until after
3127 *     the mac addresses are parsed.
3128 */
3129static int cas_get_vpd_info(struct cas *cp, unsigned char *dev_addr,
3130			    const int offset)
3131{
3132	void __iomem *p = cp->regs + REG_EXPANSION_ROM_RUN_START;
3133	void __iomem *base, *kstart;
3134	int i, len;
3135	int found = 0;
3136#define VPD_FOUND_MAC        0x01
3137#define VPD_FOUND_PHY        0x02
3138
3139	int phy_type = CAS_PHY_MII_MDIO0; /* default phy type */
3140	int mac_off  = 0;
3141
3142#if defined(CONFIG_SPARC)
3143	const unsigned char *addr;
3144#endif
3145
3146	/* give us access to the PROM */
3147	writel(BIM_LOCAL_DEV_PROM | BIM_LOCAL_DEV_PAD,
3148	       cp->regs + REG_BIM_LOCAL_DEV_EN);
3149
3150	/* check for an expansion rom */
3151	if (readb(p) != 0x55 || readb(p + 1) != 0xaa)
3152		goto use_random_mac_addr;
3153
3154	/* search for beginning of vpd */
3155	base = NULL;
3156	for (i = 2; i < EXPANSION_ROM_SIZE; i++) {
3157		/* check for PCIR */
3158		if ((readb(p + i + 0) == 0x50) &&
3159		    (readb(p + i + 1) == 0x43) &&
3160		    (readb(p + i + 2) == 0x49) &&
3161		    (readb(p + i + 3) == 0x52)) {
3162			base = p + (readb(p + i + 8) |
3163				    (readb(p + i + 9) << 8));
3164			break;
3165		}
3166	}
3167
3168	if (!base || (readb(base) != 0x82))
3169		goto use_random_mac_addr;
3170
3171	i = (readb(base + 1) | (readb(base + 2) << 8)) + 3;
3172	while (i < EXPANSION_ROM_SIZE) {
3173		if (readb(base + i) != 0x90) /* no vpd found */
3174			goto use_random_mac_addr;
3175
3176		/* found a vpd field */
3177		len = readb(base + i + 1) | (readb(base + i + 2) << 8);
3178
3179		/* extract keywords */
3180		kstart = base + i + 3;
3181		p = kstart;
3182		while ((p - kstart) < len) {
3183			int klen = readb(p + 2);
3184			int j;
3185			char type;
3186
3187			p += 3;
3188
3189			/* look for the following things:
3190			 * -- correct length == 29
3191			 * 3 (type) + 2 (size) +
3192			 * 18 (strlen("local-mac-address") + 1) +
3193			 * 6 (mac addr)
3194			 * -- VPD Instance 'I'
3195			 * -- VPD Type Bytes 'B'
3196			 * -- VPD data length == 6
3197			 * -- property string == local-mac-address
3198			 *
3199			 * -- correct length == 24
3200			 * 3 (type) + 2 (size) +
3201			 * 12 (strlen("entropy-dev") + 1) +
3202			 * 7 (strlen("vms110") + 1)
3203			 * -- VPD Instance 'I'
3204			 * -- VPD Type String 'B'
3205			 * -- VPD data length == 7
3206			 * -- property string == entropy-dev
3207			 *
3208			 * -- correct length == 18
3209			 * 3 (type) + 2 (size) +
3210			 * 9 (strlen("phy-type") + 1) +
3211			 * 4 (strlen("pcs") + 1)
3212			 * -- VPD Instance 'I'
3213			 * -- VPD Type String 'S'
3214			 * -- VPD data length == 4
3215			 * -- property string == phy-type
3216			 *
3217			 * -- correct length == 23
3218			 * 3 (type) + 2 (size) +
3219			 * 14 (strlen("phy-interface") + 1) +
3220			 * 4 (strlen("pcs") + 1)
3221			 * -- VPD Instance 'I'
3222			 * -- VPD Type String 'S'
3223			 * -- VPD data length == 4
3224			 * -- property string == phy-interface
3225			 */
3226			if (readb(p) != 'I')
3227				goto next;
3228
3229			/* finally, check string and length */
3230			type = readb(p + 3);
3231			if (type == 'B') {
3232				if ((klen == 29) && readb(p + 4) == 6 &&
3233				    cas_vpd_match(p + 5,
3234						  "local-mac-address")) {
3235					if (mac_off++ > offset)
3236						goto next;
3237
3238					/* set mac address */
3239					for (j = 0; j < 6; j++)
3240						dev_addr[j] =
3241							readb(p + 23 + j);
3242					goto found_mac;
3243				}
3244			}
3245
3246			if (type != 'S')
3247				goto next;
3248
3249#ifdef USE_ENTROPY_DEV
3250			if ((klen == 24) &&
3251			    cas_vpd_match(p + 5, "entropy-dev") &&
3252			    cas_vpd_match(p + 17, "vms110")) {
3253				cp->cas_flags |= CAS_FLAG_ENTROPY_DEV;
3254				goto next;
3255			}
3256#endif
3257
3258			if (found & VPD_FOUND_PHY)
3259				goto next;
3260
3261			if ((klen == 18) && readb(p + 4) == 4 &&
3262			    cas_vpd_match(p + 5, "phy-type")) {
3263				if (cas_vpd_match(p + 14, "pcs")) {
3264					phy_type = CAS_PHY_SERDES;
3265					goto found_phy;
3266				}
3267			}
3268
3269			if ((klen == 23) && readb(p + 4) == 4 &&
3270			    cas_vpd_match(p + 5, "phy-interface")) {
3271				if (cas_vpd_match(p + 19, "pcs")) {
3272					phy_type = CAS_PHY_SERDES;
3273					goto found_phy;
3274				}
3275			}
3276found_mac:
3277			found |= VPD_FOUND_MAC;
3278			goto next;
3279
3280found_phy:
3281			found |= VPD_FOUND_PHY;
3282
3283next:
3284			p += klen;
3285		}
3286		i += len + 3;
3287	}
3288
3289use_random_mac_addr:
3290	if (found & VPD_FOUND_MAC)
3291		goto done;
3292
3293#if defined(CONFIG_SPARC)
3294	addr = of_get_property(cp->of_node, "local-mac-address", NULL);
3295	if (addr != NULL) {
3296		memcpy(dev_addr, addr, ETH_ALEN);
3297		goto done;
3298	}
3299#endif
3300
3301	/* Sun MAC prefix then 3 random bytes. */
3302	pr_info("MAC address not found in ROM VPD\n");
3303	dev_addr[0] = 0x08;
3304	dev_addr[1] = 0x00;
3305	dev_addr[2] = 0x20;
3306	get_random_bytes(dev_addr + 3, 3);
3307
3308done:
3309	writel(0, cp->regs + REG_BIM_LOCAL_DEV_EN);
3310	return phy_type;
3311}
3312
3313/* check pci invariants */
3314static void cas_check_pci_invariants(struct cas *cp)
3315{
3316	struct pci_dev *pdev = cp->pdev;
3317
3318	cp->cas_flags = 0;
3319	if ((pdev->vendor == PCI_VENDOR_ID_SUN) &&
3320	    (pdev->device == PCI_DEVICE_ID_SUN_CASSINI)) {
3321		if (pdev->revision >= CAS_ID_REVPLUS)
3322			cp->cas_flags |= CAS_FLAG_REG_PLUS;
3323		if (pdev->revision < CAS_ID_REVPLUS02u)
3324			cp->cas_flags |= CAS_FLAG_TARGET_ABORT;
3325
3326		/* Original Cassini supports HW CSUM, but it's not
3327		 * enabled by default as it can trigger TX hangs.
3328		 */
3329		if (pdev->revision < CAS_ID_REV2)
3330			cp->cas_flags |= CAS_FLAG_NO_HW_CSUM;
3331	} else {
3332		/* Only sun has original cassini chips.  */
3333		cp->cas_flags |= CAS_FLAG_REG_PLUS;
3334
3335		/* We use a flag because the same phy might be externally
3336		 * connected.
3337		 */
3338		if ((pdev->vendor == PCI_VENDOR_ID_NS) &&
3339		    (pdev->device == PCI_DEVICE_ID_NS_SATURN))
3340			cp->cas_flags |= CAS_FLAG_SATURN;
3341	}
3342}
3343
3344
3345static int cas_check_invariants(struct cas *cp)
3346{
3347	struct pci_dev *pdev = cp->pdev;
3348	u8 addr[ETH_ALEN];
3349	u32 cfg;
3350	int i;
3351
3352	/* get page size for rx buffers. */
3353	cp->page_order = 0;
3354#ifdef USE_PAGE_ORDER
3355	if (PAGE_SHIFT < CAS_JUMBO_PAGE_SHIFT) {
3356		/* see if we can allocate larger pages */
3357		struct page *page = alloc_pages(GFP_ATOMIC,
3358						CAS_JUMBO_PAGE_SHIFT -
3359						PAGE_SHIFT);
3360		if (page) {
3361			__free_pages(page, CAS_JUMBO_PAGE_SHIFT - PAGE_SHIFT);
3362			cp->page_order = CAS_JUMBO_PAGE_SHIFT - PAGE_SHIFT;
3363		} else {
3364			printk("MTU limited to %d bytes\n", CAS_MAX_MTU);
3365		}
3366	}
3367#endif
3368	cp->page_size = (PAGE_SIZE << cp->page_order);
3369
3370	/* Fetch the FIFO configurations. */
3371	cp->tx_fifo_size = readl(cp->regs + REG_TX_FIFO_SIZE) * 64;
3372	cp->rx_fifo_size = RX_FIFO_SIZE;
3373
3374	/* finish phy determination. MDIO1 takes precedence over MDIO0 if
3375	 * they're both connected.
3376	 */
3377	cp->phy_type = cas_get_vpd_info(cp, addr, PCI_SLOT(pdev->devfn));
3378	eth_hw_addr_set(cp->dev, addr);
3379	if (cp->phy_type & CAS_PHY_SERDES) {
3380		cp->cas_flags |= CAS_FLAG_1000MB_CAP;
3381		return 0; /* no more checking needed */
3382	}
3383
3384	/* MII */
3385	cfg = readl(cp->regs + REG_MIF_CFG);
3386	if (cfg & MIF_CFG_MDIO_1) {
3387		cp->phy_type = CAS_PHY_MII_MDIO1;
3388	} else if (cfg & MIF_CFG_MDIO_0) {
3389		cp->phy_type = CAS_PHY_MII_MDIO0;
3390	}
3391
3392	cas_mif_poll(cp, 0);
3393	writel(PCS_DATAPATH_MODE_MII, cp->regs + REG_PCS_DATAPATH_MODE);
3394
3395	for (i = 0; i < 32; i++) {
3396		u32 phy_id;
3397		int j;
3398
3399		for (j = 0; j < 3; j++) {
3400			cp->phy_addr = i;
3401			phy_id = cas_phy_read(cp, MII_PHYSID1) << 16;
3402			phy_id |= cas_phy_read(cp, MII_PHYSID2);
3403			if (phy_id && (phy_id != 0xFFFFFFFF)) {
3404				cp->phy_id = phy_id;
3405				goto done;
3406			}
3407		}
3408	}
3409	pr_err("MII phy did not respond [%08x]\n",
3410	       readl(cp->regs + REG_MIF_STATE_MACHINE));
3411	return -1;
3412
3413done:
3414	/* see if we can do gigabit */
3415	cfg = cas_phy_read(cp, MII_BMSR);
3416	if ((cfg & CAS_BMSR_1000_EXTEND) &&
3417	    cas_phy_read(cp, CAS_MII_1000_EXTEND))
3418		cp->cas_flags |= CAS_FLAG_1000MB_CAP;
3419	return 0;
3420}
3421
3422/* Must be invoked under cp->lock. */
3423static inline void cas_start_dma(struct cas *cp)
3424{
3425	int i;
3426	u32 val;
3427	int txfailed = 0;
3428
3429	/* enable dma */
3430	val = readl(cp->regs + REG_TX_CFG) | TX_CFG_DMA_EN;
3431	writel(val, cp->regs + REG_TX_CFG);
3432	val = readl(cp->regs + REG_RX_CFG) | RX_CFG_DMA_EN;
3433	writel(val, cp->regs + REG_RX_CFG);
3434
3435	/* enable the mac */
3436	val = readl(cp->regs + REG_MAC_TX_CFG) | MAC_TX_CFG_EN;
3437	writel(val, cp->regs + REG_MAC_TX_CFG);
3438	val = readl(cp->regs + REG_MAC_RX_CFG) | MAC_RX_CFG_EN;
3439	writel(val, cp->regs + REG_MAC_RX_CFG);
3440
3441	i = STOP_TRIES;
3442	while (i-- > 0) {
3443		val = readl(cp->regs + REG_MAC_TX_CFG);
3444		if ((val & MAC_TX_CFG_EN))
3445			break;
3446		udelay(10);
3447	}
3448	if (i < 0) txfailed = 1;
3449	i = STOP_TRIES;
3450	while (i-- > 0) {
3451		val = readl(cp->regs + REG_MAC_RX_CFG);
3452		if ((val & MAC_RX_CFG_EN)) {
3453			if (txfailed) {
3454				netdev_err(cp->dev,
3455					   "enabling mac failed [tx:%08x:%08x]\n",
3456					   readl(cp->regs + REG_MIF_STATE_MACHINE),
3457					   readl(cp->regs + REG_MAC_STATE_MACHINE));
3458			}
3459			goto enable_rx_done;
3460		}
3461		udelay(10);
3462	}
3463	netdev_err(cp->dev, "enabling mac failed [%s:%08x:%08x]\n",
3464		   (txfailed ? "tx,rx" : "rx"),
3465		   readl(cp->regs + REG_MIF_STATE_MACHINE),
3466		   readl(cp->regs + REG_MAC_STATE_MACHINE));
3467
3468enable_rx_done:
3469	cas_unmask_intr(cp); /* enable interrupts */
3470	writel(RX_DESC_RINGN_SIZE(0) - 4, cp->regs + REG_RX_KICK);
3471	writel(0, cp->regs + REG_RX_COMP_TAIL);
3472
3473	if (cp->cas_flags & CAS_FLAG_REG_PLUS) {
3474		if (N_RX_DESC_RINGS > 1)
3475			writel(RX_DESC_RINGN_SIZE(1) - 4,
3476			       cp->regs + REG_PLUS_RX_KICK1);
 
 
 
3477	}
3478}
3479
3480/* Must be invoked under cp->lock. */
3481static void cas_read_pcs_link_mode(struct cas *cp, int *fd, int *spd,
3482				   int *pause)
3483{
3484	u32 val = readl(cp->regs + REG_PCS_MII_LPA);
3485	*fd     = (val & PCS_MII_LPA_FD) ? 1 : 0;
3486	*pause  = (val & PCS_MII_LPA_SYM_PAUSE) ? 0x01 : 0x00;
3487	if (val & PCS_MII_LPA_ASYM_PAUSE)
3488		*pause |= 0x10;
3489	*spd = 1000;
3490}
3491
3492/* Must be invoked under cp->lock. */
3493static void cas_read_mii_link_mode(struct cas *cp, int *fd, int *spd,
3494				   int *pause)
3495{
3496	u32 val;
3497
3498	*fd = 0;
3499	*spd = 10;
3500	*pause = 0;
3501
3502	/* use GMII registers */
3503	val = cas_phy_read(cp, MII_LPA);
3504	if (val & CAS_LPA_PAUSE)
3505		*pause = 0x01;
3506
3507	if (val & CAS_LPA_ASYM_PAUSE)
3508		*pause |= 0x10;
3509
3510	if (val & LPA_DUPLEX)
3511		*fd = 1;
3512	if (val & LPA_100)
3513		*spd = 100;
3514
3515	if (cp->cas_flags & CAS_FLAG_1000MB_CAP) {
3516		val = cas_phy_read(cp, CAS_MII_1000_STATUS);
3517		if (val & (CAS_LPA_1000FULL | CAS_LPA_1000HALF))
3518			*spd = 1000;
3519		if (val & CAS_LPA_1000FULL)
3520			*fd = 1;
3521	}
3522}
3523
3524/* A link-up condition has occurred, initialize and enable the
3525 * rest of the chip.
3526 *
3527 * Must be invoked under cp->lock.
3528 */
3529static void cas_set_link_modes(struct cas *cp)
3530{
3531	u32 val;
3532	int full_duplex, speed, pause;
3533
3534	full_duplex = 0;
3535	speed = 10;
3536	pause = 0;
3537
3538	if (CAS_PHY_MII(cp->phy_type)) {
3539		cas_mif_poll(cp, 0);
3540		val = cas_phy_read(cp, MII_BMCR);
3541		if (val & BMCR_ANENABLE) {
3542			cas_read_mii_link_mode(cp, &full_duplex, &speed,
3543					       &pause);
3544		} else {
3545			if (val & BMCR_FULLDPLX)
3546				full_duplex = 1;
3547
3548			if (val & BMCR_SPEED100)
3549				speed = 100;
3550			else if (val & CAS_BMCR_SPEED1000)
3551				speed = (cp->cas_flags & CAS_FLAG_1000MB_CAP) ?
3552					1000 : 100;
3553		}
3554		cas_mif_poll(cp, 1);
3555
3556	} else {
3557		val = readl(cp->regs + REG_PCS_MII_CTRL);
3558		cas_read_pcs_link_mode(cp, &full_duplex, &speed, &pause);
3559		if ((val & PCS_MII_AUTONEG_EN) == 0) {
3560			if (val & PCS_MII_CTRL_DUPLEX)
3561				full_duplex = 1;
3562		}
3563	}
3564
3565	netif_info(cp, link, cp->dev, "Link up at %d Mbps, %s-duplex\n",
3566		   speed, full_duplex ? "full" : "half");
3567
3568	val = MAC_XIF_TX_MII_OUTPUT_EN | MAC_XIF_LINK_LED;
3569	if (CAS_PHY_MII(cp->phy_type)) {
3570		val |= MAC_XIF_MII_BUFFER_OUTPUT_EN;
3571		if (!full_duplex)
3572			val |= MAC_XIF_DISABLE_ECHO;
3573	}
3574	if (full_duplex)
3575		val |= MAC_XIF_FDPLX_LED;
3576	if (speed == 1000)
3577		val |= MAC_XIF_GMII_MODE;
3578	writel(val, cp->regs + REG_MAC_XIF_CFG);
3579
3580	/* deal with carrier and collision detect. */
3581	val = MAC_TX_CFG_IPG_EN;
3582	if (full_duplex) {
3583		val |= MAC_TX_CFG_IGNORE_CARRIER;
3584		val |= MAC_TX_CFG_IGNORE_COLL;
3585	} else {
3586#ifndef USE_CSMA_CD_PROTO
3587		val |= MAC_TX_CFG_NEVER_GIVE_UP_EN;
3588		val |= MAC_TX_CFG_NEVER_GIVE_UP_LIM;
3589#endif
3590	}
3591	/* val now set up for REG_MAC_TX_CFG */
3592
3593	/* If gigabit and half-duplex, enable carrier extension
3594	 * mode.  increase slot time to 512 bytes as well.
3595	 * else, disable it and make sure slot time is 64 bytes.
3596	 * also activate checksum bug workaround
3597	 */
3598	if ((speed == 1000) && !full_duplex) {
3599		writel(val | MAC_TX_CFG_CARRIER_EXTEND,
3600		       cp->regs + REG_MAC_TX_CFG);
3601
3602		val = readl(cp->regs + REG_MAC_RX_CFG);
3603		val &= ~MAC_RX_CFG_STRIP_FCS; /* checksum workaround */
3604		writel(val | MAC_RX_CFG_CARRIER_EXTEND,
3605		       cp->regs + REG_MAC_RX_CFG);
3606
3607		writel(0x200, cp->regs + REG_MAC_SLOT_TIME);
3608
3609		cp->crc_size = 4;
3610		/* minimum size gigabit frame at half duplex */
3611		cp->min_frame_size = CAS_1000MB_MIN_FRAME;
3612
3613	} else {
3614		writel(val, cp->regs + REG_MAC_TX_CFG);
3615
3616		/* checksum bug workaround. don't strip FCS when in
3617		 * half-duplex mode
3618		 */
3619		val = readl(cp->regs + REG_MAC_RX_CFG);
3620		if (full_duplex) {
3621			val |= MAC_RX_CFG_STRIP_FCS;
3622			cp->crc_size = 0;
3623			cp->min_frame_size = CAS_MIN_MTU;
3624		} else {
3625			val &= ~MAC_RX_CFG_STRIP_FCS;
3626			cp->crc_size = 4;
3627			cp->min_frame_size = CAS_MIN_FRAME;
3628		}
3629		writel(val & ~MAC_RX_CFG_CARRIER_EXTEND,
3630		       cp->regs + REG_MAC_RX_CFG);
3631		writel(0x40, cp->regs + REG_MAC_SLOT_TIME);
3632	}
3633
3634	if (netif_msg_link(cp)) {
3635		if (pause & 0x01) {
3636			netdev_info(cp->dev, "Pause is enabled (rxfifo: %d off: %d on: %d)\n",
3637				    cp->rx_fifo_size,
3638				    cp->rx_pause_off,
3639				    cp->rx_pause_on);
3640		} else if (pause & 0x10) {
3641			netdev_info(cp->dev, "TX pause enabled\n");
3642		} else {
3643			netdev_info(cp->dev, "Pause is disabled\n");
3644		}
3645	}
3646
3647	val = readl(cp->regs + REG_MAC_CTRL_CFG);
3648	val &= ~(MAC_CTRL_CFG_SEND_PAUSE_EN | MAC_CTRL_CFG_RECV_PAUSE_EN);
3649	if (pause) { /* symmetric or asymmetric pause */
3650		val |= MAC_CTRL_CFG_SEND_PAUSE_EN;
3651		if (pause & 0x01) { /* symmetric pause */
3652			val |= MAC_CTRL_CFG_RECV_PAUSE_EN;
3653		}
3654	}
3655	writel(val, cp->regs + REG_MAC_CTRL_CFG);
3656	cas_start_dma(cp);
3657}
3658
3659/* Must be invoked under cp->lock. */
3660static void cas_init_hw(struct cas *cp, int restart_link)
3661{
3662	if (restart_link)
3663		cas_phy_init(cp);
3664
3665	cas_init_pause_thresholds(cp);
3666	cas_init_mac(cp);
3667	cas_init_dma(cp);
3668
3669	if (restart_link) {
3670		/* Default aneg parameters */
3671		cp->timer_ticks = 0;
3672		cas_begin_auto_negotiation(cp, NULL);
3673	} else if (cp->lstate == link_up) {
3674		cas_set_link_modes(cp);
3675		netif_carrier_on(cp->dev);
3676	}
3677}
3678
3679/* Must be invoked under cp->lock. on earlier cassini boards,
3680 * SOFT_0 is tied to PCI reset. we use this to force a pci reset,
3681 * let it settle out, and then restore pci state.
3682 */
3683static void cas_hard_reset(struct cas *cp)
3684{
3685	writel(BIM_LOCAL_DEV_SOFT_0, cp->regs + REG_BIM_LOCAL_DEV_EN);
3686	udelay(20);
3687	pci_restore_state(cp->pdev);
3688}
3689
3690
3691static void cas_global_reset(struct cas *cp, int blkflag)
3692{
3693	int limit;
3694
3695	/* issue a global reset. don't use RSTOUT. */
3696	if (blkflag && !CAS_PHY_MII(cp->phy_type)) {
3697		/* For PCS, when the blkflag is set, we should set the
3698		 * SW_REST_BLOCK_PCS_SLINK bit to prevent the results of
3699		 * the last autonegotiation from being cleared.  We'll
3700		 * need some special handling if the chip is set into a
3701		 * loopback mode.
3702		 */
3703		writel((SW_RESET_TX | SW_RESET_RX | SW_RESET_BLOCK_PCS_SLINK),
3704		       cp->regs + REG_SW_RESET);
3705	} else {
3706		writel(SW_RESET_TX | SW_RESET_RX, cp->regs + REG_SW_RESET);
3707	}
3708
3709	/* need to wait at least 3ms before polling register */
3710	mdelay(3);
3711
3712	limit = STOP_TRIES;
3713	while (limit-- > 0) {
3714		u32 val = readl(cp->regs + REG_SW_RESET);
3715		if ((val & (SW_RESET_TX | SW_RESET_RX)) == 0)
3716			goto done;
3717		udelay(10);
3718	}
3719	netdev_err(cp->dev, "sw reset failed\n");
3720
3721done:
3722	/* enable various BIM interrupts */
3723	writel(BIM_CFG_DPAR_INTR_ENABLE | BIM_CFG_RMA_INTR_ENABLE |
3724	       BIM_CFG_RTA_INTR_ENABLE, cp->regs + REG_BIM_CFG);
3725
3726	/* clear out pci error status mask for handled errors.
3727	 * we don't deal with DMA counter overflows as they happen
3728	 * all the time.
3729	 */
3730	writel(0xFFFFFFFFU & ~(PCI_ERR_BADACK | PCI_ERR_DTRTO |
3731			       PCI_ERR_OTHER | PCI_ERR_BIM_DMA_WRITE |
3732			       PCI_ERR_BIM_DMA_READ), cp->regs +
3733	       REG_PCI_ERR_STATUS_MASK);
3734
3735	/* set up for MII by default to address mac rx reset timeout
3736	 * issue
3737	 */
3738	writel(PCS_DATAPATH_MODE_MII, cp->regs + REG_PCS_DATAPATH_MODE);
3739}
3740
3741static void cas_reset(struct cas *cp, int blkflag)
3742{
3743	u32 val;
3744
3745	cas_mask_intr(cp);
3746	cas_global_reset(cp, blkflag);
3747	cas_mac_reset(cp);
3748	cas_entropy_reset(cp);
3749
3750	/* disable dma engines. */
3751	val = readl(cp->regs + REG_TX_CFG);
3752	val &= ~TX_CFG_DMA_EN;
3753	writel(val, cp->regs + REG_TX_CFG);
3754
3755	val = readl(cp->regs + REG_RX_CFG);
3756	val &= ~RX_CFG_DMA_EN;
3757	writel(val, cp->regs + REG_RX_CFG);
3758
3759	/* program header parser */
3760	if ((cp->cas_flags & CAS_FLAG_TARGET_ABORT) ||
3761	    (&CAS_HP_ALT_FIRMWARE[0] == &cas_prog_null[0])) {
3762		cas_load_firmware(cp, CAS_HP_FIRMWARE);
3763	} else {
3764		cas_load_firmware(cp, CAS_HP_ALT_FIRMWARE);
3765	}
3766
3767	/* clear out error registers */
3768	spin_lock(&cp->stat_lock[N_TX_RINGS]);
3769	cas_clear_mac_err(cp);
3770	spin_unlock(&cp->stat_lock[N_TX_RINGS]);
3771}
3772
3773/* Shut down the chip, must be called with pm_mutex held.  */
3774static void cas_shutdown(struct cas *cp)
3775{
3776	unsigned long flags;
3777
3778	/* Make us not-running to avoid timers respawning */
3779	cp->hw_running = 0;
3780
3781	del_timer_sync(&cp->link_timer);
3782
3783	/* Stop the reset task */
3784#if 0
3785	while (atomic_read(&cp->reset_task_pending_mtu) ||
3786	       atomic_read(&cp->reset_task_pending_spare) ||
3787	       atomic_read(&cp->reset_task_pending_all))
3788		schedule();
3789
3790#else
3791	while (atomic_read(&cp->reset_task_pending))
3792		schedule();
3793#endif
3794	/* Actually stop the chip */
3795	cas_lock_all_save(cp, flags);
3796	cas_reset(cp, 0);
3797	if (cp->cas_flags & CAS_FLAG_SATURN)
3798		cas_phy_powerdown(cp);
3799	cas_unlock_all_restore(cp, flags);
3800}
3801
3802static int cas_change_mtu(struct net_device *dev, int new_mtu)
3803{
3804	struct cas *cp = netdev_priv(dev);
3805
 
 
 
3806	dev->mtu = new_mtu;
3807	if (!netif_running(dev) || !netif_device_present(dev))
3808		return 0;
3809
3810	/* let the reset task handle it */
3811#if 1
3812	atomic_inc(&cp->reset_task_pending);
3813	if ((cp->phy_type & CAS_PHY_SERDES)) {
3814		atomic_inc(&cp->reset_task_pending_all);
3815	} else {
3816		atomic_inc(&cp->reset_task_pending_mtu);
3817	}
3818	schedule_work(&cp->reset_task);
3819#else
3820	atomic_set(&cp->reset_task_pending, (cp->phy_type & CAS_PHY_SERDES) ?
3821		   CAS_RESET_ALL : CAS_RESET_MTU);
3822	pr_err("reset called in cas_change_mtu\n");
3823	schedule_work(&cp->reset_task);
3824#endif
3825
3826	flush_work(&cp->reset_task);
3827	return 0;
3828}
3829
3830static void cas_clean_txd(struct cas *cp, int ring)
3831{
3832	struct cas_tx_desc *txd = cp->init_txds[ring];
3833	struct sk_buff *skb, **skbs = cp->tx_skbs[ring];
3834	u64 daddr, dlen;
3835	int i, size;
3836
3837	size = TX_DESC_RINGN_SIZE(ring);
3838	for (i = 0; i < size; i++) {
3839		int frag;
3840
3841		if (skbs[i] == NULL)
3842			continue;
3843
3844		skb = skbs[i];
3845		skbs[i] = NULL;
3846
3847		for (frag = 0; frag <= skb_shinfo(skb)->nr_frags;  frag++) {
3848			int ent = i & (size - 1);
3849
3850			/* first buffer is never a tiny buffer and so
3851			 * needs to be unmapped.
3852			 */
3853			daddr = le64_to_cpu(txd[ent].buffer);
3854			dlen  =  CAS_VAL(TX_DESC_BUFLEN,
3855					 le64_to_cpu(txd[ent].control));
3856			dma_unmap_page(&cp->pdev->dev, daddr, dlen,
3857				       DMA_TO_DEVICE);
3858
3859			if (frag != skb_shinfo(skb)->nr_frags) {
3860				i++;
3861
3862				/* next buffer might by a tiny buffer.
3863				 * skip past it.
3864				 */
3865				ent = i & (size - 1);
3866				if (cp->tx_tiny_use[ring][ent].used)
3867					i++;
3868			}
3869		}
3870		dev_kfree_skb_any(skb);
3871	}
3872
3873	/* zero out tiny buf usage */
3874	memset(cp->tx_tiny_use[ring], 0, size*sizeof(*cp->tx_tiny_use[ring]));
3875}
3876
3877/* freed on close */
3878static inline void cas_free_rx_desc(struct cas *cp, int ring)
3879{
3880	cas_page_t **page = cp->rx_pages[ring];
3881	int i, size;
3882
3883	size = RX_DESC_RINGN_SIZE(ring);
3884	for (i = 0; i < size; i++) {
3885		if (page[i]) {
3886			cas_page_free(cp, page[i]);
3887			page[i] = NULL;
3888		}
3889	}
3890}
3891
3892static void cas_free_rxds(struct cas *cp)
3893{
3894	int i;
3895
3896	for (i = 0; i < N_RX_DESC_RINGS; i++)
3897		cas_free_rx_desc(cp, i);
3898}
3899
3900/* Must be invoked under cp->lock. */
3901static void cas_clean_rings(struct cas *cp)
3902{
3903	int i;
3904
3905	/* need to clean all tx rings */
3906	memset(cp->tx_old, 0, sizeof(*cp->tx_old)*N_TX_RINGS);
3907	memset(cp->tx_new, 0, sizeof(*cp->tx_new)*N_TX_RINGS);
3908	for (i = 0; i < N_TX_RINGS; i++)
3909		cas_clean_txd(cp, i);
3910
3911	/* zero out init block */
3912	memset(cp->init_block, 0, sizeof(struct cas_init_block));
3913	cas_clean_rxds(cp);
3914	cas_clean_rxcs(cp);
3915}
3916
3917/* allocated on open */
3918static inline int cas_alloc_rx_desc(struct cas *cp, int ring)
3919{
3920	cas_page_t **page = cp->rx_pages[ring];
3921	int size, i = 0;
3922
3923	size = RX_DESC_RINGN_SIZE(ring);
3924	for (i = 0; i < size; i++) {
3925		if ((page[i] = cas_page_alloc(cp, GFP_KERNEL)) == NULL)
3926			return -1;
3927	}
3928	return 0;
3929}
3930
3931static int cas_alloc_rxds(struct cas *cp)
3932{
3933	int i;
3934
3935	for (i = 0; i < N_RX_DESC_RINGS; i++) {
3936		if (cas_alloc_rx_desc(cp, i) < 0) {
3937			cas_free_rxds(cp);
3938			return -1;
3939		}
3940	}
3941	return 0;
3942}
3943
3944static void cas_reset_task(struct work_struct *work)
3945{
3946	struct cas *cp = container_of(work, struct cas, reset_task);
3947#if 0
3948	int pending = atomic_read(&cp->reset_task_pending);
3949#else
3950	int pending_all = atomic_read(&cp->reset_task_pending_all);
3951	int pending_spare = atomic_read(&cp->reset_task_pending_spare);
3952	int pending_mtu = atomic_read(&cp->reset_task_pending_mtu);
3953
3954	if (pending_all == 0 && pending_spare == 0 && pending_mtu == 0) {
3955		/* We can have more tasks scheduled than actually
3956		 * needed.
3957		 */
3958		atomic_dec(&cp->reset_task_pending);
3959		return;
3960	}
3961#endif
3962	/* The link went down, we reset the ring, but keep
3963	 * DMA stopped. Use this function for reset
3964	 * on error as well.
3965	 */
3966	if (cp->hw_running) {
3967		unsigned long flags;
3968
3969		/* Make sure we don't get interrupts or tx packets */
3970		netif_device_detach(cp->dev);
3971		cas_lock_all_save(cp, flags);
3972
3973		if (cp->opened) {
3974			/* We call cas_spare_recover when we call cas_open.
3975			 * but we do not initialize the lists cas_spare_recover
3976			 * uses until cas_open is called.
3977			 */
3978			cas_spare_recover(cp, GFP_ATOMIC);
3979		}
3980#if 1
3981		/* test => only pending_spare set */
3982		if (!pending_all && !pending_mtu)
3983			goto done;
3984#else
3985		if (pending == CAS_RESET_SPARE)
3986			goto done;
3987#endif
3988		/* when pending == CAS_RESET_ALL, the following
3989		 * call to cas_init_hw will restart auto negotiation.
3990		 * Setting the second argument of cas_reset to
3991		 * !(pending == CAS_RESET_ALL) will set this argument
3992		 * to 1 (avoiding reinitializing the PHY for the normal
3993		 * PCS case) when auto negotiation is not restarted.
3994		 */
3995#if 1
3996		cas_reset(cp, !(pending_all > 0));
3997		if (cp->opened)
3998			cas_clean_rings(cp);
3999		cas_init_hw(cp, (pending_all > 0));
4000#else
4001		cas_reset(cp, !(pending == CAS_RESET_ALL));
4002		if (cp->opened)
4003			cas_clean_rings(cp);
4004		cas_init_hw(cp, pending == CAS_RESET_ALL);
4005#endif
4006
4007done:
4008		cas_unlock_all_restore(cp, flags);
4009		netif_device_attach(cp->dev);
4010	}
4011#if 1
4012	atomic_sub(pending_all, &cp->reset_task_pending_all);
4013	atomic_sub(pending_spare, &cp->reset_task_pending_spare);
4014	atomic_sub(pending_mtu, &cp->reset_task_pending_mtu);
4015	atomic_dec(&cp->reset_task_pending);
4016#else
4017	atomic_set(&cp->reset_task_pending, 0);
4018#endif
4019}
4020
4021static void cas_link_timer(struct timer_list *t)
4022{
4023	struct cas *cp = from_timer(cp, t, link_timer);
4024	int mask, pending = 0, reset = 0;
4025	unsigned long flags;
4026
4027	if (link_transition_timeout != 0 &&
4028	    cp->link_transition_jiffies_valid &&
4029	    time_is_before_jiffies(cp->link_transition_jiffies +
4030	      link_transition_timeout)) {
4031		/* One-second counter so link-down workaround doesn't
4032		 * cause resets to occur so fast as to fool the switch
4033		 * into thinking the link is down.
4034		 */
4035		cp->link_transition_jiffies_valid = 0;
4036	}
4037
4038	if (!cp->hw_running)
4039		return;
4040
4041	spin_lock_irqsave(&cp->lock, flags);
4042	cas_lock_tx(cp);
4043	cas_entropy_gather(cp);
4044
4045	/* If the link task is still pending, we just
4046	 * reschedule the link timer
4047	 */
4048#if 1
4049	if (atomic_read(&cp->reset_task_pending_all) ||
4050	    atomic_read(&cp->reset_task_pending_spare) ||
4051	    atomic_read(&cp->reset_task_pending_mtu))
4052		goto done;
4053#else
4054	if (atomic_read(&cp->reset_task_pending))
4055		goto done;
4056#endif
4057
4058	/* check for rx cleaning */
4059	if ((mask = (cp->cas_flags & CAS_FLAG_RXD_POST_MASK))) {
4060		int i, rmask;
4061
4062		for (i = 0; i < MAX_RX_DESC_RINGS; i++) {
4063			rmask = CAS_FLAG_RXD_POST(i);
4064			if ((mask & rmask) == 0)
4065				continue;
4066
4067			/* post_rxds will do a mod_timer */
4068			if (cas_post_rxds_ringN(cp, i, cp->rx_last[i]) < 0) {
4069				pending = 1;
4070				continue;
4071			}
4072			cp->cas_flags &= ~rmask;
4073		}
4074	}
4075
4076	if (CAS_PHY_MII(cp->phy_type)) {
4077		u16 bmsr;
4078		cas_mif_poll(cp, 0);
4079		bmsr = cas_phy_read(cp, MII_BMSR);
4080		/* WTZ: Solaris driver reads this twice, but that
4081		 * may be due to the PCS case and the use of a
4082		 * common implementation. Read it twice here to be
4083		 * safe.
4084		 */
4085		bmsr = cas_phy_read(cp, MII_BMSR);
4086		cas_mif_poll(cp, 1);
4087		readl(cp->regs + REG_MIF_STATUS); /* avoid dups */
4088		reset = cas_mii_link_check(cp, bmsr);
4089	} else {
4090		reset = cas_pcs_link_check(cp);
4091	}
4092
4093	if (reset)
4094		goto done;
4095
4096	/* check for tx state machine confusion */
4097	if ((readl(cp->regs + REG_MAC_TX_STATUS) & MAC_TX_FRAME_XMIT) == 0) {
4098		u32 val = readl(cp->regs + REG_MAC_STATE_MACHINE);
4099		u32 wptr, rptr;
4100		int tlm  = CAS_VAL(MAC_SM_TLM, val);
4101
4102		if (((tlm == 0x5) || (tlm == 0x3)) &&
4103		    (CAS_VAL(MAC_SM_ENCAP_SM, val) == 0)) {
4104			netif_printk(cp, tx_err, KERN_DEBUG, cp->dev,
4105				     "tx err: MAC_STATE[%08x]\n", val);
4106			reset = 1;
4107			goto done;
4108		}
4109
4110		val  = readl(cp->regs + REG_TX_FIFO_PKT_CNT);
4111		wptr = readl(cp->regs + REG_TX_FIFO_WRITE_PTR);
4112		rptr = readl(cp->regs + REG_TX_FIFO_READ_PTR);
4113		if ((val == 0) && (wptr != rptr)) {
4114			netif_printk(cp, tx_err, KERN_DEBUG, cp->dev,
4115				     "tx err: TX_FIFO[%08x:%08x:%08x]\n",
4116				     val, wptr, rptr);
4117			reset = 1;
4118		}
4119
4120		if (reset)
4121			cas_hard_reset(cp);
4122	}
4123
4124done:
4125	if (reset) {
4126#if 1
4127		atomic_inc(&cp->reset_task_pending);
4128		atomic_inc(&cp->reset_task_pending_all);
4129		schedule_work(&cp->reset_task);
4130#else
4131		atomic_set(&cp->reset_task_pending, CAS_RESET_ALL);
4132		pr_err("reset called in cas_link_timer\n");
4133		schedule_work(&cp->reset_task);
4134#endif
4135	}
4136
4137	if (!pending)
4138		mod_timer(&cp->link_timer, jiffies + CAS_LINK_TIMEOUT);
4139	cas_unlock_tx(cp);
4140	spin_unlock_irqrestore(&cp->lock, flags);
4141}
4142
4143/* tiny buffers are used to avoid target abort issues with
4144 * older cassini's
4145 */
4146static void cas_tx_tiny_free(struct cas *cp)
4147{
4148	struct pci_dev *pdev = cp->pdev;
4149	int i;
4150
4151	for (i = 0; i < N_TX_RINGS; i++) {
4152		if (!cp->tx_tiny_bufs[i])
4153			continue;
4154
4155		dma_free_coherent(&pdev->dev, TX_TINY_BUF_BLOCK,
4156				  cp->tx_tiny_bufs[i], cp->tx_tiny_dvma[i]);
 
4157		cp->tx_tiny_bufs[i] = NULL;
4158	}
4159}
4160
4161static int cas_tx_tiny_alloc(struct cas *cp)
4162{
4163	struct pci_dev *pdev = cp->pdev;
4164	int i;
4165
4166	for (i = 0; i < N_TX_RINGS; i++) {
4167		cp->tx_tiny_bufs[i] =
4168			dma_alloc_coherent(&pdev->dev, TX_TINY_BUF_BLOCK,
4169					   &cp->tx_tiny_dvma[i], GFP_KERNEL);
4170		if (!cp->tx_tiny_bufs[i]) {
4171			cas_tx_tiny_free(cp);
4172			return -1;
4173		}
4174	}
4175	return 0;
4176}
4177
4178
4179static int cas_open(struct net_device *dev)
4180{
4181	struct cas *cp = netdev_priv(dev);
4182	int hw_was_up, err;
4183	unsigned long flags;
4184
4185	mutex_lock(&cp->pm_mutex);
4186
4187	hw_was_up = cp->hw_running;
4188
4189	/* The power-management mutex protects the hw_running
4190	 * etc. state so it is safe to do this bit without cp->lock
4191	 */
4192	if (!cp->hw_running) {
4193		/* Reset the chip */
4194		cas_lock_all_save(cp, flags);
4195		/* We set the second arg to cas_reset to zero
4196		 * because cas_init_hw below will have its second
4197		 * argument set to non-zero, which will force
4198		 * autonegotiation to start.
4199		 */
4200		cas_reset(cp, 0);
4201		cp->hw_running = 1;
4202		cas_unlock_all_restore(cp, flags);
4203	}
4204
4205	err = -ENOMEM;
4206	if (cas_tx_tiny_alloc(cp) < 0)
4207		goto err_unlock;
4208
4209	/* alloc rx descriptors */
4210	if (cas_alloc_rxds(cp) < 0)
4211		goto err_tx_tiny;
4212
4213	/* allocate spares */
4214	cas_spare_init(cp);
4215	cas_spare_recover(cp, GFP_KERNEL);
4216
4217	/* We can now request the interrupt as we know it's masked
4218	 * on the controller. cassini+ has up to 4 interrupts
4219	 * that can be used, but you need to do explicit pci interrupt
4220	 * mapping to expose them
4221	 */
4222	if (request_irq(cp->pdev->irq, cas_interrupt,
4223			IRQF_SHARED, dev->name, (void *) dev)) {
4224		netdev_err(cp->dev, "failed to request irq !\n");
4225		err = -EAGAIN;
4226		goto err_spare;
4227	}
4228
4229#ifdef USE_NAPI
4230	napi_enable(&cp->napi);
4231#endif
4232	/* init hw */
4233	cas_lock_all_save(cp, flags);
4234	cas_clean_rings(cp);
4235	cas_init_hw(cp, !hw_was_up);
4236	cp->opened = 1;
4237	cas_unlock_all_restore(cp, flags);
4238
4239	netif_start_queue(dev);
4240	mutex_unlock(&cp->pm_mutex);
4241	return 0;
4242
4243err_spare:
4244	cas_spare_free(cp);
4245	cas_free_rxds(cp);
4246err_tx_tiny:
4247	cas_tx_tiny_free(cp);
4248err_unlock:
4249	mutex_unlock(&cp->pm_mutex);
4250	return err;
4251}
4252
4253static int cas_close(struct net_device *dev)
4254{
4255	unsigned long flags;
4256	struct cas *cp = netdev_priv(dev);
4257
4258#ifdef USE_NAPI
4259	napi_disable(&cp->napi);
4260#endif
4261	/* Make sure we don't get distracted by suspend/resume */
4262	mutex_lock(&cp->pm_mutex);
4263
4264	netif_stop_queue(dev);
4265
4266	/* Stop traffic, mark us closed */
4267	cas_lock_all_save(cp, flags);
4268	cp->opened = 0;
4269	cas_reset(cp, 0);
4270	cas_phy_init(cp);
4271	cas_begin_auto_negotiation(cp, NULL);
4272	cas_clean_rings(cp);
4273	cas_unlock_all_restore(cp, flags);
4274
4275	free_irq(cp->pdev->irq, (void *) dev);
4276	cas_spare_free(cp);
4277	cas_free_rxds(cp);
4278	cas_tx_tiny_free(cp);
4279	mutex_unlock(&cp->pm_mutex);
4280	return 0;
4281}
4282
4283static struct {
4284	const char name[ETH_GSTRING_LEN];
4285} ethtool_cassini_statnames[] = {
4286	{"collisions"},
4287	{"rx_bytes"},
4288	{"rx_crc_errors"},
4289	{"rx_dropped"},
4290	{"rx_errors"},
4291	{"rx_fifo_errors"},
4292	{"rx_frame_errors"},
4293	{"rx_length_errors"},
4294	{"rx_over_errors"},
4295	{"rx_packets"},
4296	{"tx_aborted_errors"},
4297	{"tx_bytes"},
4298	{"tx_dropped"},
4299	{"tx_errors"},
4300	{"tx_fifo_errors"},
4301	{"tx_packets"}
4302};
4303#define CAS_NUM_STAT_KEYS ARRAY_SIZE(ethtool_cassini_statnames)
4304
4305static struct {
4306	const int offsets;	/* neg. values for 2nd arg to cas_read_phy */
4307} ethtool_register_table[] = {
4308	{-MII_BMSR},
4309	{-MII_BMCR},
4310	{REG_CAWR},
4311	{REG_INF_BURST},
4312	{REG_BIM_CFG},
4313	{REG_RX_CFG},
4314	{REG_HP_CFG},
4315	{REG_MAC_TX_CFG},
4316	{REG_MAC_RX_CFG},
4317	{REG_MAC_CTRL_CFG},
4318	{REG_MAC_XIF_CFG},
4319	{REG_MIF_CFG},
4320	{REG_PCS_CFG},
4321	{REG_SATURN_PCFG},
4322	{REG_PCS_MII_STATUS},
4323	{REG_PCS_STATE_MACHINE},
4324	{REG_MAC_COLL_EXCESS},
4325	{REG_MAC_COLL_LATE}
4326};
4327#define CAS_REG_LEN 	ARRAY_SIZE(ethtool_register_table)
4328#define CAS_MAX_REGS 	(sizeof (u32)*CAS_REG_LEN)
4329
4330static void cas_read_regs(struct cas *cp, u8 *ptr, int len)
4331{
4332	u8 *p;
4333	int i;
4334	unsigned long flags;
4335
4336	spin_lock_irqsave(&cp->lock, flags);
4337	for (i = 0, p = ptr; i < len ; i ++, p += sizeof(u32)) {
4338		u16 hval;
4339		u32 val;
4340		if (ethtool_register_table[i].offsets < 0) {
4341			hval = cas_phy_read(cp,
4342				    -ethtool_register_table[i].offsets);
4343			val = hval;
4344		} else {
4345			val= readl(cp->regs+ethtool_register_table[i].offsets);
4346		}
4347		memcpy(p, (u8 *)&val, sizeof(u32));
4348	}
4349	spin_unlock_irqrestore(&cp->lock, flags);
4350}
4351
4352static struct net_device_stats *cas_get_stats(struct net_device *dev)
4353{
4354	struct cas *cp = netdev_priv(dev);
4355	struct net_device_stats *stats = cp->net_stats;
4356	unsigned long flags;
4357	int i;
4358	unsigned long tmp;
4359
4360	/* we collate all of the stats into net_stats[N_TX_RING] */
4361	if (!cp->hw_running)
4362		return stats + N_TX_RINGS;
4363
4364	/* collect outstanding stats */
4365	/* WTZ: the Cassini spec gives these as 16 bit counters but
4366	 * stored in 32-bit words.  Added a mask of 0xffff to be safe,
4367	 * in case the chip somehow puts any garbage in the other bits.
4368	 * Also, counter usage didn't seem to mach what Adrian did
4369	 * in the parts of the code that set these quantities. Made
4370	 * that consistent.
4371	 */
4372	spin_lock_irqsave(&cp->stat_lock[N_TX_RINGS], flags);
4373	stats[N_TX_RINGS].rx_crc_errors +=
4374	  readl(cp->regs + REG_MAC_FCS_ERR) & 0xffff;
4375	stats[N_TX_RINGS].rx_frame_errors +=
4376		readl(cp->regs + REG_MAC_ALIGN_ERR) &0xffff;
4377	stats[N_TX_RINGS].rx_length_errors +=
4378		readl(cp->regs + REG_MAC_LEN_ERR) & 0xffff;
4379#if 1
4380	tmp = (readl(cp->regs + REG_MAC_COLL_EXCESS) & 0xffff) +
4381		(readl(cp->regs + REG_MAC_COLL_LATE) & 0xffff);
4382	stats[N_TX_RINGS].tx_aborted_errors += tmp;
4383	stats[N_TX_RINGS].collisions +=
4384	  tmp + (readl(cp->regs + REG_MAC_COLL_NORMAL) & 0xffff);
4385#else
4386	stats[N_TX_RINGS].tx_aborted_errors +=
4387		readl(cp->regs + REG_MAC_COLL_EXCESS);
4388	stats[N_TX_RINGS].collisions += readl(cp->regs + REG_MAC_COLL_EXCESS) +
4389		readl(cp->regs + REG_MAC_COLL_LATE);
4390#endif
4391	cas_clear_mac_err(cp);
4392
4393	/* saved bits that are unique to ring 0 */
4394	spin_lock(&cp->stat_lock[0]);
4395	stats[N_TX_RINGS].collisions        += stats[0].collisions;
4396	stats[N_TX_RINGS].rx_over_errors    += stats[0].rx_over_errors;
4397	stats[N_TX_RINGS].rx_frame_errors   += stats[0].rx_frame_errors;
4398	stats[N_TX_RINGS].rx_fifo_errors    += stats[0].rx_fifo_errors;
4399	stats[N_TX_RINGS].tx_aborted_errors += stats[0].tx_aborted_errors;
4400	stats[N_TX_RINGS].tx_fifo_errors    += stats[0].tx_fifo_errors;
4401	spin_unlock(&cp->stat_lock[0]);
4402
4403	for (i = 0; i < N_TX_RINGS; i++) {
4404		spin_lock(&cp->stat_lock[i]);
4405		stats[N_TX_RINGS].rx_length_errors +=
4406			stats[i].rx_length_errors;
4407		stats[N_TX_RINGS].rx_crc_errors += stats[i].rx_crc_errors;
4408		stats[N_TX_RINGS].rx_packets    += stats[i].rx_packets;
4409		stats[N_TX_RINGS].tx_packets    += stats[i].tx_packets;
4410		stats[N_TX_RINGS].rx_bytes      += stats[i].rx_bytes;
4411		stats[N_TX_RINGS].tx_bytes      += stats[i].tx_bytes;
4412		stats[N_TX_RINGS].rx_errors     += stats[i].rx_errors;
4413		stats[N_TX_RINGS].tx_errors     += stats[i].tx_errors;
4414		stats[N_TX_RINGS].rx_dropped    += stats[i].rx_dropped;
4415		stats[N_TX_RINGS].tx_dropped    += stats[i].tx_dropped;
4416		memset(stats + i, 0, sizeof(struct net_device_stats));
4417		spin_unlock(&cp->stat_lock[i]);
4418	}
4419	spin_unlock_irqrestore(&cp->stat_lock[N_TX_RINGS], flags);
4420	return stats + N_TX_RINGS;
4421}
4422
4423
4424static void cas_set_multicast(struct net_device *dev)
4425{
4426	struct cas *cp = netdev_priv(dev);
4427	u32 rxcfg, rxcfg_new;
4428	unsigned long flags;
4429	int limit = STOP_TRIES;
4430
4431	if (!cp->hw_running)
4432		return;
4433
4434	spin_lock_irqsave(&cp->lock, flags);
4435	rxcfg = readl(cp->regs + REG_MAC_RX_CFG);
4436
4437	/* disable RX MAC and wait for completion */
4438	writel(rxcfg & ~MAC_RX_CFG_EN, cp->regs + REG_MAC_RX_CFG);
4439	while (readl(cp->regs + REG_MAC_RX_CFG) & MAC_RX_CFG_EN) {
4440		if (!limit--)
4441			break;
4442		udelay(10);
4443	}
4444
4445	/* disable hash filter and wait for completion */
4446	limit = STOP_TRIES;
4447	rxcfg &= ~(MAC_RX_CFG_PROMISC_EN | MAC_RX_CFG_HASH_FILTER_EN);
4448	writel(rxcfg & ~MAC_RX_CFG_EN, cp->regs + REG_MAC_RX_CFG);
4449	while (readl(cp->regs + REG_MAC_RX_CFG) & MAC_RX_CFG_HASH_FILTER_EN) {
4450		if (!limit--)
4451			break;
4452		udelay(10);
4453	}
4454
4455	/* program hash filters */
4456	cp->mac_rx_cfg = rxcfg_new = cas_setup_multicast(cp);
4457	rxcfg |= rxcfg_new;
4458	writel(rxcfg, cp->regs + REG_MAC_RX_CFG);
4459	spin_unlock_irqrestore(&cp->lock, flags);
4460}
4461
4462static void cas_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
4463{
4464	struct cas *cp = netdev_priv(dev);
4465	strscpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
4466	strscpy(info->version, DRV_MODULE_VERSION, sizeof(info->version));
4467	strscpy(info->bus_info, pci_name(cp->pdev), sizeof(info->bus_info));
4468}
4469
4470static int cas_get_link_ksettings(struct net_device *dev,
4471				  struct ethtool_link_ksettings *cmd)
4472{
4473	struct cas *cp = netdev_priv(dev);
4474	u16 bmcr;
4475	int full_duplex, speed, pause;
4476	unsigned long flags;
4477	enum link_state linkstate = link_up;
4478	u32 supported, advertising;
4479
4480	advertising = 0;
4481	supported = SUPPORTED_Autoneg;
4482	if (cp->cas_flags & CAS_FLAG_1000MB_CAP) {
4483		supported |= SUPPORTED_1000baseT_Full;
4484		advertising |= ADVERTISED_1000baseT_Full;
4485	}
4486
4487	/* Record PHY settings if HW is on. */
4488	spin_lock_irqsave(&cp->lock, flags);
4489	bmcr = 0;
4490	linkstate = cp->lstate;
4491	if (CAS_PHY_MII(cp->phy_type)) {
4492		cmd->base.port = PORT_MII;
4493		cmd->base.phy_address = cp->phy_addr;
4494		advertising |= ADVERTISED_TP | ADVERTISED_MII |
 
 
4495			ADVERTISED_10baseT_Half |
4496			ADVERTISED_10baseT_Full |
4497			ADVERTISED_100baseT_Half |
4498			ADVERTISED_100baseT_Full;
4499
4500		supported |=
4501			(SUPPORTED_10baseT_Half |
4502			 SUPPORTED_10baseT_Full |
4503			 SUPPORTED_100baseT_Half |
4504			 SUPPORTED_100baseT_Full |
4505			 SUPPORTED_TP | SUPPORTED_MII);
4506
4507		if (cp->hw_running) {
4508			cas_mif_poll(cp, 0);
4509			bmcr = cas_phy_read(cp, MII_BMCR);
4510			cas_read_mii_link_mode(cp, &full_duplex,
4511					       &speed, &pause);
4512			cas_mif_poll(cp, 1);
4513		}
4514
4515	} else {
4516		cmd->base.port = PORT_FIBRE;
4517		cmd->base.phy_address = 0;
4518		supported   |= SUPPORTED_FIBRE;
4519		advertising |= ADVERTISED_FIBRE;
 
4520
4521		if (cp->hw_running) {
4522			/* pcs uses the same bits as mii */
4523			bmcr = readl(cp->regs + REG_PCS_MII_CTRL);
4524			cas_read_pcs_link_mode(cp, &full_duplex,
4525					       &speed, &pause);
4526		}
4527	}
4528	spin_unlock_irqrestore(&cp->lock, flags);
4529
4530	if (bmcr & BMCR_ANENABLE) {
4531		advertising |= ADVERTISED_Autoneg;
4532		cmd->base.autoneg = AUTONEG_ENABLE;
4533		cmd->base.speed =  ((speed == 10) ?
4534					    SPEED_10 :
4535					    ((speed == 1000) ?
4536					     SPEED_1000 : SPEED_100));
4537		cmd->base.duplex = full_duplex ? DUPLEX_FULL : DUPLEX_HALF;
4538	} else {
4539		cmd->base.autoneg = AUTONEG_DISABLE;
4540		cmd->base.speed = ((bmcr & CAS_BMCR_SPEED1000) ?
4541					    SPEED_1000 :
4542					    ((bmcr & BMCR_SPEED100) ?
4543					     SPEED_100 : SPEED_10));
4544		cmd->base.duplex = (bmcr & BMCR_FULLDPLX) ?
 
4545			DUPLEX_FULL : DUPLEX_HALF;
4546	}
4547	if (linkstate != link_up) {
4548		/* Force these to "unknown" if the link is not up and
4549		 * autonogotiation in enabled. We can set the link
4550		 * speed to 0, but not cmd->duplex,
4551		 * because its legal values are 0 and 1.  Ethtool will
4552		 * print the value reported in parentheses after the
4553		 * word "Unknown" for unrecognized values.
4554		 *
4555		 * If in forced mode, we report the speed and duplex
4556		 * settings that we configured.
4557		 */
4558		if (cp->link_cntl & BMCR_ANENABLE) {
4559			cmd->base.speed = 0;
4560			cmd->base.duplex = 0xff;
4561		} else {
4562			cmd->base.speed = SPEED_10;
4563			if (cp->link_cntl & BMCR_SPEED100) {
4564				cmd->base.speed = SPEED_100;
4565			} else if (cp->link_cntl & CAS_BMCR_SPEED1000) {
4566				cmd->base.speed = SPEED_1000;
4567			}
4568			cmd->base.duplex = (cp->link_cntl & BMCR_FULLDPLX) ?
4569				DUPLEX_FULL : DUPLEX_HALF;
4570		}
4571	}
4572
4573	ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
4574						supported);
4575	ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
4576						advertising);
4577
4578	return 0;
4579}
4580
4581static int cas_set_link_ksettings(struct net_device *dev,
4582				  const struct ethtool_link_ksettings *cmd)
4583{
4584	struct cas *cp = netdev_priv(dev);
4585	unsigned long flags;
4586	u32 speed = cmd->base.speed;
4587
4588	/* Verify the settings we care about. */
4589	if (cmd->base.autoneg != AUTONEG_ENABLE &&
4590	    cmd->base.autoneg != AUTONEG_DISABLE)
4591		return -EINVAL;
4592
4593	if (cmd->base.autoneg == AUTONEG_DISABLE &&
4594	    ((speed != SPEED_1000 &&
4595	      speed != SPEED_100 &&
4596	      speed != SPEED_10) ||
4597	     (cmd->base.duplex != DUPLEX_HALF &&
4598	      cmd->base.duplex != DUPLEX_FULL)))
4599		return -EINVAL;
4600
4601	/* Apply settings and restart link process. */
4602	spin_lock_irqsave(&cp->lock, flags);
4603	cas_begin_auto_negotiation(cp, cmd);
4604	spin_unlock_irqrestore(&cp->lock, flags);
4605	return 0;
4606}
4607
4608static int cas_nway_reset(struct net_device *dev)
4609{
4610	struct cas *cp = netdev_priv(dev);
4611	unsigned long flags;
4612
4613	if ((cp->link_cntl & BMCR_ANENABLE) == 0)
4614		return -EINVAL;
4615
4616	/* Restart link process. */
4617	spin_lock_irqsave(&cp->lock, flags);
4618	cas_begin_auto_negotiation(cp, NULL);
4619	spin_unlock_irqrestore(&cp->lock, flags);
4620
4621	return 0;
4622}
4623
4624static u32 cas_get_link(struct net_device *dev)
4625{
4626	struct cas *cp = netdev_priv(dev);
4627	return cp->lstate == link_up;
4628}
4629
4630static u32 cas_get_msglevel(struct net_device *dev)
4631{
4632	struct cas *cp = netdev_priv(dev);
4633	return cp->msg_enable;
4634}
4635
4636static void cas_set_msglevel(struct net_device *dev, u32 value)
4637{
4638	struct cas *cp = netdev_priv(dev);
4639	cp->msg_enable = value;
4640}
4641
4642static int cas_get_regs_len(struct net_device *dev)
4643{
4644	struct cas *cp = netdev_priv(dev);
4645	return min_t(int, cp->casreg_len, CAS_MAX_REGS);
4646}
4647
4648static void cas_get_regs(struct net_device *dev, struct ethtool_regs *regs,
4649			     void *p)
4650{
4651	struct cas *cp = netdev_priv(dev);
4652	regs->version = 0;
4653	/* cas_read_regs handles locks (cp->lock).  */
4654	cas_read_regs(cp, p, regs->len / sizeof(u32));
4655}
4656
4657static int cas_get_sset_count(struct net_device *dev, int sset)
4658{
4659	switch (sset) {
4660	case ETH_SS_STATS:
4661		return CAS_NUM_STAT_KEYS;
4662	default:
4663		return -EOPNOTSUPP;
4664	}
4665}
4666
4667static void cas_get_strings(struct net_device *dev, u32 stringset, u8 *data)
4668{
4669	 memcpy(data, &ethtool_cassini_statnames,
4670					 CAS_NUM_STAT_KEYS * ETH_GSTRING_LEN);
4671}
4672
4673static void cas_get_ethtool_stats(struct net_device *dev,
4674				      struct ethtool_stats *estats, u64 *data)
4675{
4676	struct cas *cp = netdev_priv(dev);
4677	struct net_device_stats *stats = cas_get_stats(cp->dev);
4678	int i = 0;
4679	data[i++] = stats->collisions;
4680	data[i++] = stats->rx_bytes;
4681	data[i++] = stats->rx_crc_errors;
4682	data[i++] = stats->rx_dropped;
4683	data[i++] = stats->rx_errors;
4684	data[i++] = stats->rx_fifo_errors;
4685	data[i++] = stats->rx_frame_errors;
4686	data[i++] = stats->rx_length_errors;
4687	data[i++] = stats->rx_over_errors;
4688	data[i++] = stats->rx_packets;
4689	data[i++] = stats->tx_aborted_errors;
4690	data[i++] = stats->tx_bytes;
4691	data[i++] = stats->tx_dropped;
4692	data[i++] = stats->tx_errors;
4693	data[i++] = stats->tx_fifo_errors;
4694	data[i++] = stats->tx_packets;
4695	BUG_ON(i != CAS_NUM_STAT_KEYS);
4696}
4697
4698static const struct ethtool_ops cas_ethtool_ops = {
4699	.get_drvinfo		= cas_get_drvinfo,
 
 
4700	.nway_reset		= cas_nway_reset,
4701	.get_link		= cas_get_link,
4702	.get_msglevel		= cas_get_msglevel,
4703	.set_msglevel		= cas_set_msglevel,
4704	.get_regs_len		= cas_get_regs_len,
4705	.get_regs		= cas_get_regs,
4706	.get_sset_count		= cas_get_sset_count,
4707	.get_strings		= cas_get_strings,
4708	.get_ethtool_stats	= cas_get_ethtool_stats,
4709	.get_link_ksettings	= cas_get_link_ksettings,
4710	.set_link_ksettings	= cas_set_link_ksettings,
4711};
4712
4713static int cas_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
4714{
4715	struct cas *cp = netdev_priv(dev);
4716	struct mii_ioctl_data *data = if_mii(ifr);
4717	unsigned long flags;
4718	int rc = -EOPNOTSUPP;
4719
4720	/* Hold the PM mutex while doing ioctl's or we may collide
4721	 * with open/close and power management and oops.
4722	 */
4723	mutex_lock(&cp->pm_mutex);
4724	switch (cmd) {
4725	case SIOCGMIIPHY:		/* Get address of MII PHY in use. */
4726		data->phy_id = cp->phy_addr;
4727		fallthrough;
4728
4729	case SIOCGMIIREG:		/* Read MII PHY register. */
4730		spin_lock_irqsave(&cp->lock, flags);
4731		cas_mif_poll(cp, 0);
4732		data->val_out = cas_phy_read(cp, data->reg_num & 0x1f);
4733		cas_mif_poll(cp, 1);
4734		spin_unlock_irqrestore(&cp->lock, flags);
4735		rc = 0;
4736		break;
4737
4738	case SIOCSMIIREG:		/* Write MII PHY register. */
4739		spin_lock_irqsave(&cp->lock, flags);
4740		cas_mif_poll(cp, 0);
4741		rc = cas_phy_write(cp, data->reg_num & 0x1f, data->val_in);
4742		cas_mif_poll(cp, 1);
4743		spin_unlock_irqrestore(&cp->lock, flags);
4744		break;
4745	default:
4746		break;
4747	}
4748
4749	mutex_unlock(&cp->pm_mutex);
4750	return rc;
4751}
4752
4753/* When this chip sits underneath an Intel 31154 bridge, it is the
4754 * only subordinate device and we can tweak the bridge settings to
4755 * reflect that fact.
4756 */
4757static void cas_program_bridge(struct pci_dev *cas_pdev)
4758{
4759	struct pci_dev *pdev = cas_pdev->bus->self;
4760	u32 val;
4761
4762	if (!pdev)
4763		return;
4764
4765	if (pdev->vendor != 0x8086 || pdev->device != 0x537c)
4766		return;
4767
4768	/* Clear bit 10 (Bus Parking Control) in the Secondary
4769	 * Arbiter Control/Status Register which lives at offset
4770	 * 0x41.  Using a 32-bit word read/modify/write at 0x40
4771	 * is much simpler so that's how we do this.
4772	 */
4773	pci_read_config_dword(pdev, 0x40, &val);
4774	val &= ~0x00040000;
4775	pci_write_config_dword(pdev, 0x40, val);
4776
4777	/* Max out the Multi-Transaction Timer settings since
4778	 * Cassini is the only device present.
4779	 *
4780	 * The register is 16-bit and lives at 0x50.  When the
4781	 * settings are enabled, it extends the GRANT# signal
4782	 * for a requestor after a transaction is complete.  This
4783	 * allows the next request to run without first needing
4784	 * to negotiate the GRANT# signal back.
4785	 *
4786	 * Bits 12:10 define the grant duration:
4787	 *
4788	 *	1	--	16 clocks
4789	 *	2	--	32 clocks
4790	 *	3	--	64 clocks
4791	 *	4	--	128 clocks
4792	 *	5	--	256 clocks
4793	 *
4794	 * All other values are illegal.
4795	 *
4796	 * Bits 09:00 define which REQ/GNT signal pairs get the
4797	 * GRANT# signal treatment.  We set them all.
4798	 */
4799	pci_write_config_word(pdev, 0x50, (5 << 10) | 0x3ff);
4800
4801	/* The Read Prefecth Policy register is 16-bit and sits at
4802	 * offset 0x52.  It enables a "smart" pre-fetch policy.  We
4803	 * enable it and max out all of the settings since only one
4804	 * device is sitting underneath and thus bandwidth sharing is
4805	 * not an issue.
4806	 *
4807	 * The register has several 3 bit fields, which indicates a
4808	 * multiplier applied to the base amount of prefetching the
4809	 * chip would do.  These fields are at:
4810	 *
4811	 *	15:13	---	ReRead Primary Bus
4812	 *	12:10	---	FirstRead Primary Bus
4813	 *	09:07	---	ReRead Secondary Bus
4814	 *	06:04	---	FirstRead Secondary Bus
4815	 *
4816	 * Bits 03:00 control which REQ/GNT pairs the prefetch settings
4817	 * get enabled on.  Bit 3 is a grouped enabler which controls
4818	 * all of the REQ/GNT pairs from [8:3].  Bits 2 to 0 control
4819	 * the individual REQ/GNT pairs [2:0].
4820	 */
4821	pci_write_config_word(pdev, 0x52,
4822			      (0x7 << 13) |
4823			      (0x7 << 10) |
4824			      (0x7 <<  7) |
4825			      (0x7 <<  4) |
4826			      (0xf <<  0));
4827
4828	/* Force cacheline size to 0x8 */
4829	pci_write_config_byte(pdev, PCI_CACHE_LINE_SIZE, 0x08);
4830
4831	/* Force latency timer to maximum setting so Cassini can
4832	 * sit on the bus as long as it likes.
4833	 */
4834	pci_write_config_byte(pdev, PCI_LATENCY_TIMER, 0xff);
4835}
4836
4837static const struct net_device_ops cas_netdev_ops = {
4838	.ndo_open		= cas_open,
4839	.ndo_stop		= cas_close,
4840	.ndo_start_xmit		= cas_start_xmit,
4841	.ndo_get_stats 		= cas_get_stats,
4842	.ndo_set_rx_mode	= cas_set_multicast,
4843	.ndo_eth_ioctl		= cas_ioctl,
4844	.ndo_tx_timeout		= cas_tx_timeout,
4845	.ndo_change_mtu		= cas_change_mtu,
4846	.ndo_set_mac_address	= eth_mac_addr,
4847	.ndo_validate_addr	= eth_validate_addr,
4848#ifdef CONFIG_NET_POLL_CONTROLLER
4849	.ndo_poll_controller	= cas_netpoll,
4850#endif
4851};
4852
4853static int cas_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
4854{
4855	static int cas_version_printed = 0;
4856	unsigned long casreg_len;
4857	struct net_device *dev;
4858	struct cas *cp;
 
4859	u16 pci_cmd;
4860	int i, err;
4861	u8 orig_cacheline_size = 0, cas_cacheline_size = 0;
4862
4863	if (cas_version_printed++ == 0)
4864		pr_info("%s", version);
4865
4866	err = pci_enable_device(pdev);
4867	if (err) {
4868		dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
4869		return err;
4870	}
4871
4872	if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
4873		dev_err(&pdev->dev, "Cannot find proper PCI device "
4874		       "base address, aborting\n");
4875		err = -ENODEV;
4876		goto err_out_disable_pdev;
4877	}
4878
4879	dev = alloc_etherdev(sizeof(*cp));
4880	if (!dev) {
4881		err = -ENOMEM;
4882		goto err_out_disable_pdev;
4883	}
4884	SET_NETDEV_DEV(dev, &pdev->dev);
4885
4886	err = pci_request_regions(pdev, dev->name);
4887	if (err) {
4888		dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
4889		goto err_out_free_netdev;
4890	}
4891	pci_set_master(pdev);
4892
4893	/* we must always turn on parity response or else parity
4894	 * doesn't get generated properly. disable SERR/PERR as well.
4895	 * in addition, we want to turn MWI on.
4896	 */
4897	pci_read_config_word(pdev, PCI_COMMAND, &pci_cmd);
4898	pci_cmd &= ~PCI_COMMAND_SERR;
4899	pci_cmd |= PCI_COMMAND_PARITY;
4900	pci_write_config_word(pdev, PCI_COMMAND, pci_cmd);
4901	if (pci_try_set_mwi(pdev))
4902		pr_warn("Could not enable MWI for %s\n", pci_name(pdev));
4903
4904	cas_program_bridge(pdev);
4905
4906	/*
4907	 * On some architectures, the default cache line size set
4908	 * by pci_try_set_mwi reduces perforamnce.  We have to increase
4909	 * it for this case.  To start, we'll print some configuration
4910	 * data.
4911	 */
4912#if 1
4913	pci_read_config_byte(pdev, PCI_CACHE_LINE_SIZE,
4914			     &orig_cacheline_size);
4915	if (orig_cacheline_size < CAS_PREF_CACHELINE_SIZE) {
4916		cas_cacheline_size =
4917			(CAS_PREF_CACHELINE_SIZE < SMP_CACHE_BYTES) ?
4918			CAS_PREF_CACHELINE_SIZE : SMP_CACHE_BYTES;
4919		if (pci_write_config_byte(pdev,
4920					  PCI_CACHE_LINE_SIZE,
4921					  cas_cacheline_size)) {
4922			dev_err(&pdev->dev, "Could not set PCI cache "
4923			       "line size\n");
4924			goto err_out_free_res;
4925		}
4926	}
4927#endif
4928
4929
4930	/* Configure DMA attributes. */
4931	err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
4932	if (err) {
4933		dev_err(&pdev->dev, "No usable DMA configuration, aborting\n");
4934		goto err_out_free_res;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4935	}
4936
4937	casreg_len = pci_resource_len(pdev, 0);
4938
4939	cp = netdev_priv(dev);
4940	cp->pdev = pdev;
4941#if 1
4942	/* A value of 0 indicates we never explicitly set it */
4943	cp->orig_cacheline_size = cas_cacheline_size ? orig_cacheline_size: 0;
4944#endif
4945	cp->dev = dev;
4946	cp->msg_enable = (cassini_debug < 0) ? CAS_DEF_MSG_ENABLE :
4947	  cassini_debug;
4948
4949#if defined(CONFIG_SPARC)
4950	cp->of_node = pci_device_to_OF_node(pdev);
4951#endif
4952
4953	cp->link_transition = LINK_TRANSITION_UNKNOWN;
4954	cp->link_transition_jiffies_valid = 0;
4955
4956	spin_lock_init(&cp->lock);
4957	spin_lock_init(&cp->rx_inuse_lock);
4958	spin_lock_init(&cp->rx_spare_lock);
4959	for (i = 0; i < N_TX_RINGS; i++) {
4960		spin_lock_init(&cp->stat_lock[i]);
4961		spin_lock_init(&cp->tx_lock[i]);
4962	}
4963	spin_lock_init(&cp->stat_lock[N_TX_RINGS]);
4964	mutex_init(&cp->pm_mutex);
4965
4966	timer_setup(&cp->link_timer, cas_link_timer, 0);
 
 
4967
4968#if 1
4969	/* Just in case the implementation of atomic operations
4970	 * change so that an explicit initialization is necessary.
4971	 */
4972	atomic_set(&cp->reset_task_pending, 0);
4973	atomic_set(&cp->reset_task_pending_all, 0);
4974	atomic_set(&cp->reset_task_pending_spare, 0);
4975	atomic_set(&cp->reset_task_pending_mtu, 0);
4976#endif
4977	INIT_WORK(&cp->reset_task, cas_reset_task);
4978
4979	/* Default link parameters */
4980	if (link_mode >= 0 && link_mode < 6)
4981		cp->link_cntl = link_modes[link_mode];
4982	else
4983		cp->link_cntl = BMCR_ANENABLE;
4984	cp->lstate = link_down;
4985	cp->link_transition = LINK_TRANSITION_LINK_DOWN;
4986	netif_carrier_off(cp->dev);
4987	cp->timer_ticks = 0;
4988
4989	/* give us access to cassini registers */
4990	cp->regs = pci_iomap(pdev, 0, casreg_len);
4991	if (!cp->regs) {
4992		dev_err(&pdev->dev, "Cannot map device registers, aborting\n");
4993		goto err_out_free_res;
4994	}
4995	cp->casreg_len = casreg_len;
4996
4997	pci_save_state(pdev);
4998	cas_check_pci_invariants(cp);
4999	cas_hard_reset(cp);
5000	cas_reset(cp, 0);
5001	if (cas_check_invariants(cp))
5002		goto err_out_iounmap;
5003	if (cp->cas_flags & CAS_FLAG_SATURN)
5004		cas_saturn_firmware_init(cp);
5005
5006	cp->init_block =
5007		dma_alloc_coherent(&pdev->dev, sizeof(struct cas_init_block),
5008				   &cp->block_dvma, GFP_KERNEL);
5009	if (!cp->init_block) {
5010		dev_err(&pdev->dev, "Cannot allocate init block, aborting\n");
5011		goto err_out_iounmap;
5012	}
5013
5014	for (i = 0; i < N_TX_RINGS; i++)
5015		cp->init_txds[i] = cp->init_block->txds[i];
5016
5017	for (i = 0; i < N_RX_DESC_RINGS; i++)
5018		cp->init_rxds[i] = cp->init_block->rxds[i];
5019
5020	for (i = 0; i < N_RX_COMP_RINGS; i++)
5021		cp->init_rxcs[i] = cp->init_block->rxcs[i];
5022
5023	for (i = 0; i < N_RX_FLOWS; i++)
5024		skb_queue_head_init(&cp->rx_flows[i]);
5025
5026	dev->netdev_ops = &cas_netdev_ops;
5027	dev->ethtool_ops = &cas_ethtool_ops;
5028	dev->watchdog_timeo = CAS_TX_TIMEOUT;
5029
5030#ifdef USE_NAPI
5031	netif_napi_add(dev, &cp->napi, cas_poll);
5032#endif
5033	dev->irq = pdev->irq;
5034	dev->dma = 0;
5035
5036	/* Cassini features. */
5037	if ((cp->cas_flags & CAS_FLAG_NO_HW_CSUM) == 0)
5038		dev->features |= NETIF_F_HW_CSUM | NETIF_F_SG;
5039
5040	dev->features |= NETIF_F_HIGHDMA;
5041
5042	/* MTU range: 60 - varies or 9000 */
5043	dev->min_mtu = CAS_MIN_MTU;
5044	dev->max_mtu = CAS_MAX_MTU;
5045
5046	if (register_netdev(dev)) {
5047		dev_err(&pdev->dev, "Cannot register net device, aborting\n");
5048		goto err_out_free_consistent;
5049	}
5050
5051	i = readl(cp->regs + REG_BIM_CFG);
5052	netdev_info(dev, "Sun Cassini%s (%sbit/%sMHz PCI/%s) Ethernet[%d] %pM\n",
5053		    (cp->cas_flags & CAS_FLAG_REG_PLUS) ? "+" : "",
5054		    (i & BIM_CFG_32BIT) ? "32" : "64",
5055		    (i & BIM_CFG_66MHZ) ? "66" : "33",
5056		    (cp->phy_type == CAS_PHY_SERDES) ? "Fi" : "Cu", pdev->irq,
5057		    dev->dev_addr);
5058
5059	pci_set_drvdata(pdev, dev);
5060	cp->hw_running = 1;
5061	cas_entropy_reset(cp);
5062	cas_phy_init(cp);
5063	cas_begin_auto_negotiation(cp, NULL);
5064	return 0;
5065
5066err_out_free_consistent:
5067	dma_free_coherent(&pdev->dev, sizeof(struct cas_init_block),
5068			  cp->init_block, cp->block_dvma);
5069
5070err_out_iounmap:
5071	mutex_lock(&cp->pm_mutex);
5072	if (cp->hw_running)
5073		cas_shutdown(cp);
5074	mutex_unlock(&cp->pm_mutex);
5075
5076	vfree(cp->fw_data);
5077
5078	pci_iounmap(pdev, cp->regs);
5079
5080
5081err_out_free_res:
5082	pci_release_regions(pdev);
5083
 
5084	/* Try to restore it in case the error occurred after we
5085	 * set it.
5086	 */
5087	pci_write_config_byte(pdev, PCI_CACHE_LINE_SIZE, orig_cacheline_size);
5088
5089err_out_free_netdev:
5090	free_netdev(dev);
5091
5092err_out_disable_pdev:
5093	pci_disable_device(pdev);
5094	return -ENODEV;
5095}
5096
5097static void cas_remove_one(struct pci_dev *pdev)
5098{
5099	struct net_device *dev = pci_get_drvdata(pdev);
5100	struct cas *cp;
5101	if (!dev)
5102		return;
5103
5104	cp = netdev_priv(dev);
5105	unregister_netdev(dev);
5106
5107	vfree(cp->fw_data);
5108
5109	mutex_lock(&cp->pm_mutex);
5110	cancel_work_sync(&cp->reset_task);
5111	if (cp->hw_running)
5112		cas_shutdown(cp);
5113	mutex_unlock(&cp->pm_mutex);
5114
5115#if 1
5116	if (cp->orig_cacheline_size) {
5117		/* Restore the cache line size if we had modified
5118		 * it.
5119		 */
5120		pci_write_config_byte(pdev, PCI_CACHE_LINE_SIZE,
5121				      cp->orig_cacheline_size);
5122	}
5123#endif
5124	dma_free_coherent(&pdev->dev, sizeof(struct cas_init_block),
5125			  cp->init_block, cp->block_dvma);
5126	pci_iounmap(pdev, cp->regs);
5127	free_netdev(dev);
5128	pci_release_regions(pdev);
5129	pci_disable_device(pdev);
5130}
5131
5132static int __maybe_unused cas_suspend(struct device *dev_d)
 
5133{
5134	struct net_device *dev = dev_get_drvdata(dev_d);
5135	struct cas *cp = netdev_priv(dev);
5136	unsigned long flags;
5137
5138	mutex_lock(&cp->pm_mutex);
5139
5140	/* If the driver is opened, we stop the DMA */
5141	if (cp->opened) {
5142		netif_device_detach(dev);
5143
5144		cas_lock_all_save(cp, flags);
5145
5146		/* We can set the second arg of cas_reset to 0
5147		 * because on resume, we'll call cas_init_hw with
5148		 * its second arg set so that autonegotiation is
5149		 * restarted.
5150		 */
5151		cas_reset(cp, 0);
5152		cas_clean_rings(cp);
5153		cas_unlock_all_restore(cp, flags);
5154	}
5155
5156	if (cp->hw_running)
5157		cas_shutdown(cp);
5158	mutex_unlock(&cp->pm_mutex);
5159
5160	return 0;
5161}
5162
5163static int __maybe_unused cas_resume(struct device *dev_d)
5164{
5165	struct net_device *dev = dev_get_drvdata(dev_d);
5166	struct cas *cp = netdev_priv(dev);
5167
5168	netdev_info(dev, "resuming\n");
5169
5170	mutex_lock(&cp->pm_mutex);
5171	cas_hard_reset(cp);
5172	if (cp->opened) {
5173		unsigned long flags;
5174		cas_lock_all_save(cp, flags);
5175		cas_reset(cp, 0);
5176		cp->hw_running = 1;
5177		cas_clean_rings(cp);
5178		cas_init_hw(cp, 1);
5179		cas_unlock_all_restore(cp, flags);
5180
5181		netif_device_attach(dev);
5182	}
5183	mutex_unlock(&cp->pm_mutex);
5184	return 0;
5185}
5186
5187static SIMPLE_DEV_PM_OPS(cas_pm_ops, cas_suspend, cas_resume);
5188
5189static struct pci_driver cas_driver = {
5190	.name		= DRV_MODULE_NAME,
5191	.id_table	= cas_pci_tbl,
5192	.probe		= cas_init_one,
5193	.remove		= cas_remove_one,
5194	.driver.pm	= &cas_pm_ops,
 
 
 
5195};
5196
5197static int __init cas_init(void)
5198{
5199	if (linkdown_timeout > 0)
5200		link_transition_timeout = linkdown_timeout * HZ;
5201	else
5202		link_transition_timeout = 0;
5203
5204	return pci_register_driver(&cas_driver);
5205}
5206
5207static void __exit cas_cleanup(void)
5208{
5209	pci_unregister_driver(&cas_driver);
5210}
5211
5212module_init(cas_init);
5213module_exit(cas_cleanup);
v4.6
 
   1/* cassini.c: Sun Microsystems Cassini(+) ethernet driver.
   2 *
   3 * Copyright (C) 2004 Sun Microsystems Inc.
   4 * Copyright (C) 2003 Adrian Sun (asun@darksunrising.com)
   5 *
   6 * This program is free software; you can redistribute it and/or
   7 * modify it under the terms of the GNU General Public License as
   8 * published by the Free Software Foundation; either version 2 of the
   9 * License, or (at your option) any later version.
  10 *
  11 * This program is distributed in the hope that it will be useful,
  12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  14 * GNU General Public License for more details.
  15 *
  16 * You should have received a copy of the GNU General Public License
  17 * along with this program; if not, see <http://www.gnu.org/licenses/>.
  18 *
  19 * This driver uses the sungem driver (c) David Miller
  20 * (davem@redhat.com) as its basis.
  21 *
  22 * The cassini chip has a number of features that distinguish it from
  23 * the gem chip:
  24 *  4 transmit descriptor rings that are used for either QoS (VLAN) or
  25 *      load balancing (non-VLAN mode)
  26 *  batching of multiple packets
  27 *  multiple CPU dispatching
  28 *  page-based RX descriptor engine with separate completion rings
  29 *  Gigabit support (GMII and PCS interface)
  30 *  MIF link up/down detection works
  31 *
  32 * RX is handled by page sized buffers that are attached as fragments to
  33 * the skb. here's what's done:
  34 *  -- driver allocates pages at a time and keeps reference counts
  35 *     on them.
  36 *  -- the upper protocol layers assume that the header is in the skb
  37 *     itself. as a result, cassini will copy a small amount (64 bytes)
  38 *     to make them happy.
  39 *  -- driver appends the rest of the data pages as frags to skbuffs
  40 *     and increments the reference count
  41 *  -- on page reclamation, the driver swaps the page with a spare page.
  42 *     if that page is still in use, it frees its reference to that page,
  43 *     and allocates a new page for use. otherwise, it just recycles the
  44 *     the page.
  45 *
  46 * NOTE: cassini can parse the header. however, it's not worth it
  47 *       as long as the network stack requires a header copy.
  48 *
  49 * TX has 4 queues. currently these queues are used in a round-robin
  50 * fashion for load balancing. They can also be used for QoS. for that
  51 * to work, however, QoS information needs to be exposed down to the driver
  52 * level so that subqueues get targeted to particular transmit rings.
  53 * alternatively, the queues can be configured via use of the all-purpose
  54 * ioctl.
  55 *
  56 * RX DATA: the rx completion ring has all the info, but the rx desc
  57 * ring has all of the data. RX can conceivably come in under multiple
  58 * interrupts, but the INT# assignment needs to be set up properly by
  59 * the BIOS and conveyed to the driver. PCI BIOSes don't know how to do
  60 * that. also, the two descriptor rings are designed to distinguish between
  61 * encrypted and non-encrypted packets, but we use them for buffering
  62 * instead.
  63 *
  64 * by default, the selective clear mask is set up to process rx packets.
  65 */
  66
  67#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  68
  69#include <linux/module.h>
  70#include <linux/kernel.h>
  71#include <linux/types.h>
  72#include <linux/compiler.h>
  73#include <linux/slab.h>
  74#include <linux/delay.h>
  75#include <linux/init.h>
  76#include <linux/interrupt.h>
  77#include <linux/vmalloc.h>
  78#include <linux/ioport.h>
  79#include <linux/pci.h>
  80#include <linux/mm.h>
  81#include <linux/highmem.h>
  82#include <linux/list.h>
  83#include <linux/dma-mapping.h>
  84
  85#include <linux/netdevice.h>
  86#include <linux/etherdevice.h>
  87#include <linux/skbuff.h>
  88#include <linux/ethtool.h>
  89#include <linux/crc32.h>
  90#include <linux/random.h>
  91#include <linux/mii.h>
  92#include <linux/ip.h>
  93#include <linux/tcp.h>
  94#include <linux/mutex.h>
  95#include <linux/firmware.h>
  96
  97#include <net/checksum.h>
  98
  99#include <linux/atomic.h>
 100#include <asm/io.h>
 101#include <asm/byteorder.h>
 102#include <asm/uaccess.h>
 
 103
 104#define cas_page_map(x)      kmap_atomic((x))
 105#define cas_page_unmap(x)    kunmap_atomic((x))
 106#define CAS_NCPUS            num_online_cpus()
 107
 108#define cas_skb_release(x)  netif_rx(x)
 109
 110/* select which firmware to use */
 111#define USE_HP_WORKAROUND
 112#define HP_WORKAROUND_DEFAULT /* select which firmware to use as default */
 113#define CAS_HP_ALT_FIRMWARE   cas_prog_null /* alternate firmware */
 114
 115#include "cassini.h"
 116
 117#define USE_TX_COMPWB      /* use completion writeback registers */
 118#define USE_CSMA_CD_PROTO  /* standard CSMA/CD */
 119#define USE_RX_BLANK       /* hw interrupt mitigation */
 120#undef USE_ENTROPY_DEV     /* don't test for entropy device */
 121
 122/* NOTE: these aren't useable unless PCI interrupts can be assigned.
 123 * also, we need to make cp->lock finer-grained.
 124 */
 125#undef  USE_PCI_INTB
 126#undef  USE_PCI_INTC
 127#undef  USE_PCI_INTD
 128#undef  USE_QOS
 129
 130#undef  USE_VPD_DEBUG       /* debug vpd information if defined */
 131
 132/* rx processing options */
 133#define USE_PAGE_ORDER      /* specify to allocate large rx pages */
 134#define RX_DONT_BATCH  0    /* if 1, don't batch flows */
 135#define RX_COPY_ALWAYS 0    /* if 0, use frags */
 136#define RX_COPY_MIN    64   /* copy a little to make upper layers happy */
 137#undef  RX_COUNT_BUFFERS    /* define to calculate RX buffer stats */
 138
 139#define DRV_MODULE_NAME		"cassini"
 140#define DRV_MODULE_VERSION	"1.6"
 141#define DRV_MODULE_RELDATE	"21 May 2008"
 142
 143#define CAS_DEF_MSG_ENABLE	  \
 144	(NETIF_MSG_DRV		| \
 145	 NETIF_MSG_PROBE	| \
 146	 NETIF_MSG_LINK		| \
 147	 NETIF_MSG_TIMER	| \
 148	 NETIF_MSG_IFDOWN	| \
 149	 NETIF_MSG_IFUP		| \
 150	 NETIF_MSG_RX_ERR	| \
 151	 NETIF_MSG_TX_ERR)
 152
 153/* length of time before we decide the hardware is borked,
 154 * and dev->tx_timeout() should be called to fix the problem
 155 */
 156#define CAS_TX_TIMEOUT			(HZ)
 157#define CAS_LINK_TIMEOUT                (22*HZ/10)
 158#define CAS_LINK_FAST_TIMEOUT           (1)
 159
 160/* timeout values for state changing. these specify the number
 161 * of 10us delays to be used before giving up.
 162 */
 163#define STOP_TRIES_PHY 1000
 164#define STOP_TRIES     5000
 165
 166/* specify a minimum frame size to deal with some fifo issues
 167 * max mtu == 2 * page size - ethernet header - 64 - swivel =
 168 *            2 * page_size - 0x50
 169 */
 170#define CAS_MIN_FRAME			97
 171#define CAS_1000MB_MIN_FRAME            255
 172#define CAS_MIN_MTU                     60
 173#define CAS_MAX_MTU                     min(((cp->page_size << 1) - 0x50), 9000)
 174
 175#if 1
 176/*
 177 * Eliminate these and use separate atomic counters for each, to
 178 * avoid a race condition.
 179 */
 180#else
 181#define CAS_RESET_MTU                   1
 182#define CAS_RESET_ALL                   2
 183#define CAS_RESET_SPARE                 3
 184#endif
 185
 186static char version[] =
 187	DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
 188
 189static int cassini_debug = -1;	/* -1 == use CAS_DEF_MSG_ENABLE as value */
 190static int link_mode;
 191
 192MODULE_AUTHOR("Adrian Sun (asun@darksunrising.com)");
 193MODULE_DESCRIPTION("Sun Cassini(+) ethernet driver");
 194MODULE_LICENSE("GPL");
 195MODULE_FIRMWARE("sun/cassini.bin");
 196module_param(cassini_debug, int, 0);
 197MODULE_PARM_DESC(cassini_debug, "Cassini bitmapped debugging message enable value");
 198module_param(link_mode, int, 0);
 199MODULE_PARM_DESC(link_mode, "default link mode");
 200
 201/*
 202 * Work around for a PCS bug in which the link goes down due to the chip
 203 * being confused and never showing a link status of "up."
 204 */
 205#define DEFAULT_LINKDOWN_TIMEOUT 5
 206/*
 207 * Value in seconds, for user input.
 208 */
 209static int linkdown_timeout = DEFAULT_LINKDOWN_TIMEOUT;
 210module_param(linkdown_timeout, int, 0);
 211MODULE_PARM_DESC(linkdown_timeout,
 212"min reset interval in sec. for PCS linkdown issue; disabled if not positive");
 213
 214/*
 215 * value in 'ticks' (units used by jiffies). Set when we init the
 216 * module because 'HZ' in actually a function call on some flavors of
 217 * Linux.  This will default to DEFAULT_LINKDOWN_TIMEOUT * HZ.
 218 */
 219static int link_transition_timeout;
 220
 221
 222
 223static u16 link_modes[] = {
 224	BMCR_ANENABLE,			 /* 0 : autoneg */
 225	0,				 /* 1 : 10bt half duplex */
 226	BMCR_SPEED100,			 /* 2 : 100bt half duplex */
 227	BMCR_FULLDPLX,			 /* 3 : 10bt full duplex */
 228	BMCR_SPEED100|BMCR_FULLDPLX,	 /* 4 : 100bt full duplex */
 229	CAS_BMCR_SPEED1000|BMCR_FULLDPLX /* 5 : 1000bt full duplex */
 230};
 231
 232static const struct pci_device_id cas_pci_tbl[] = {
 233	{ PCI_VENDOR_ID_SUN, PCI_DEVICE_ID_SUN_CASSINI,
 234	  PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
 235	{ PCI_VENDOR_ID_NS, PCI_DEVICE_ID_NS_SATURN,
 236	  PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
 237	{ 0, }
 238};
 239
 240MODULE_DEVICE_TABLE(pci, cas_pci_tbl);
 241
 242static void cas_set_link_modes(struct cas *cp);
 243
 244static inline void cas_lock_tx(struct cas *cp)
 245{
 246	int i;
 247
 248	for (i = 0; i < N_TX_RINGS; i++)
 249		spin_lock_nested(&cp->tx_lock[i], i);
 250}
 251
 252static inline void cas_lock_all(struct cas *cp)
 253{
 254	spin_lock_irq(&cp->lock);
 255	cas_lock_tx(cp);
 256}
 257
 258/* WTZ: QA was finding deadlock problems with the previous
 259 * versions after long test runs with multiple cards per machine.
 260 * See if replacing cas_lock_all with safer versions helps. The
 261 * symptoms QA is reporting match those we'd expect if interrupts
 262 * aren't being properly restored, and we fixed a previous deadlock
 263 * with similar symptoms by using save/restore versions in other
 264 * places.
 265 */
 266#define cas_lock_all_save(cp, flags) \
 267do { \
 268	struct cas *xxxcp = (cp); \
 269	spin_lock_irqsave(&xxxcp->lock, flags); \
 270	cas_lock_tx(xxxcp); \
 271} while (0)
 272
 273static inline void cas_unlock_tx(struct cas *cp)
 274{
 275	int i;
 276
 277	for (i = N_TX_RINGS; i > 0; i--)
 278		spin_unlock(&cp->tx_lock[i - 1]);
 279}
 280
 281static inline void cas_unlock_all(struct cas *cp)
 282{
 283	cas_unlock_tx(cp);
 284	spin_unlock_irq(&cp->lock);
 285}
 286
 287#define cas_unlock_all_restore(cp, flags) \
 288do { \
 289	struct cas *xxxcp = (cp); \
 290	cas_unlock_tx(xxxcp); \
 291	spin_unlock_irqrestore(&xxxcp->lock, flags); \
 292} while (0)
 293
 294static void cas_disable_irq(struct cas *cp, const int ring)
 295{
 296	/* Make sure we won't get any more interrupts */
 297	if (ring == 0) {
 298		writel(0xFFFFFFFF, cp->regs + REG_INTR_MASK);
 299		return;
 300	}
 301
 302	/* disable completion interrupts and selectively mask */
 303	if (cp->cas_flags & CAS_FLAG_REG_PLUS) {
 304		switch (ring) {
 305#if defined (USE_PCI_INTB) || defined(USE_PCI_INTC) || defined(USE_PCI_INTD)
 306#ifdef USE_PCI_INTB
 307		case 1:
 308#endif
 309#ifdef USE_PCI_INTC
 310		case 2:
 311#endif
 312#ifdef USE_PCI_INTD
 313		case 3:
 314#endif
 315			writel(INTRN_MASK_CLEAR_ALL | INTRN_MASK_RX_EN,
 316			       cp->regs + REG_PLUS_INTRN_MASK(ring));
 317			break;
 318#endif
 319		default:
 320			writel(INTRN_MASK_CLEAR_ALL, cp->regs +
 321			       REG_PLUS_INTRN_MASK(ring));
 322			break;
 323		}
 324	}
 325}
 326
 327static inline void cas_mask_intr(struct cas *cp)
 328{
 329	int i;
 330
 331	for (i = 0; i < N_RX_COMP_RINGS; i++)
 332		cas_disable_irq(cp, i);
 333}
 334
 335static void cas_enable_irq(struct cas *cp, const int ring)
 336{
 337	if (ring == 0) { /* all but TX_DONE */
 338		writel(INTR_TX_DONE, cp->regs + REG_INTR_MASK);
 339		return;
 340	}
 341
 342	if (cp->cas_flags & CAS_FLAG_REG_PLUS) {
 343		switch (ring) {
 344#if defined (USE_PCI_INTB) || defined(USE_PCI_INTC) || defined(USE_PCI_INTD)
 345#ifdef USE_PCI_INTB
 346		case 1:
 347#endif
 348#ifdef USE_PCI_INTC
 349		case 2:
 350#endif
 351#ifdef USE_PCI_INTD
 352		case 3:
 353#endif
 354			writel(INTRN_MASK_RX_EN, cp->regs +
 355			       REG_PLUS_INTRN_MASK(ring));
 356			break;
 357#endif
 358		default:
 359			break;
 360		}
 361	}
 362}
 363
 364static inline void cas_unmask_intr(struct cas *cp)
 365{
 366	int i;
 367
 368	for (i = 0; i < N_RX_COMP_RINGS; i++)
 369		cas_enable_irq(cp, i);
 370}
 371
 372static inline void cas_entropy_gather(struct cas *cp)
 373{
 374#ifdef USE_ENTROPY_DEV
 375	if ((cp->cas_flags & CAS_FLAG_ENTROPY_DEV) == 0)
 376		return;
 377
 378	batch_entropy_store(readl(cp->regs + REG_ENTROPY_IV),
 379			    readl(cp->regs + REG_ENTROPY_IV),
 380			    sizeof(uint64_t)*8);
 381#endif
 382}
 383
 384static inline void cas_entropy_reset(struct cas *cp)
 385{
 386#ifdef USE_ENTROPY_DEV
 387	if ((cp->cas_flags & CAS_FLAG_ENTROPY_DEV) == 0)
 388		return;
 389
 390	writel(BIM_LOCAL_DEV_PAD | BIM_LOCAL_DEV_PROM | BIM_LOCAL_DEV_EXT,
 391	       cp->regs + REG_BIM_LOCAL_DEV_EN);
 392	writeb(ENTROPY_RESET_STC_MODE, cp->regs + REG_ENTROPY_RESET);
 393	writeb(0x55, cp->regs + REG_ENTROPY_RAND_REG);
 394
 395	/* if we read back 0x0, we don't have an entropy device */
 396	if (readb(cp->regs + REG_ENTROPY_RAND_REG) == 0)
 397		cp->cas_flags &= ~CAS_FLAG_ENTROPY_DEV;
 398#endif
 399}
 400
 401/* access to the phy. the following assumes that we've initialized the MIF to
 402 * be in frame rather than bit-bang mode
 403 */
 404static u16 cas_phy_read(struct cas *cp, int reg)
 405{
 406	u32 cmd;
 407	int limit = STOP_TRIES_PHY;
 408
 409	cmd = MIF_FRAME_ST | MIF_FRAME_OP_READ;
 410	cmd |= CAS_BASE(MIF_FRAME_PHY_ADDR, cp->phy_addr);
 411	cmd |= CAS_BASE(MIF_FRAME_REG_ADDR, reg);
 412	cmd |= MIF_FRAME_TURN_AROUND_MSB;
 413	writel(cmd, cp->regs + REG_MIF_FRAME);
 414
 415	/* poll for completion */
 416	while (limit-- > 0) {
 417		udelay(10);
 418		cmd = readl(cp->regs + REG_MIF_FRAME);
 419		if (cmd & MIF_FRAME_TURN_AROUND_LSB)
 420			return cmd & MIF_FRAME_DATA_MASK;
 421	}
 422	return 0xFFFF; /* -1 */
 423}
 424
 425static int cas_phy_write(struct cas *cp, int reg, u16 val)
 426{
 427	int limit = STOP_TRIES_PHY;
 428	u32 cmd;
 429
 430	cmd = MIF_FRAME_ST | MIF_FRAME_OP_WRITE;
 431	cmd |= CAS_BASE(MIF_FRAME_PHY_ADDR, cp->phy_addr);
 432	cmd |= CAS_BASE(MIF_FRAME_REG_ADDR, reg);
 433	cmd |= MIF_FRAME_TURN_AROUND_MSB;
 434	cmd |= val & MIF_FRAME_DATA_MASK;
 435	writel(cmd, cp->regs + REG_MIF_FRAME);
 436
 437	/* poll for completion */
 438	while (limit-- > 0) {
 439		udelay(10);
 440		cmd = readl(cp->regs + REG_MIF_FRAME);
 441		if (cmd & MIF_FRAME_TURN_AROUND_LSB)
 442			return 0;
 443	}
 444	return -1;
 445}
 446
 447static void cas_phy_powerup(struct cas *cp)
 448{
 449	u16 ctl = cas_phy_read(cp, MII_BMCR);
 450
 451	if ((ctl & BMCR_PDOWN) == 0)
 452		return;
 453	ctl &= ~BMCR_PDOWN;
 454	cas_phy_write(cp, MII_BMCR, ctl);
 455}
 456
 457static void cas_phy_powerdown(struct cas *cp)
 458{
 459	u16 ctl = cas_phy_read(cp, MII_BMCR);
 460
 461	if (ctl & BMCR_PDOWN)
 462		return;
 463	ctl |= BMCR_PDOWN;
 464	cas_phy_write(cp, MII_BMCR, ctl);
 465}
 466
 467/* cp->lock held. note: the last put_page will free the buffer */
 468static int cas_page_free(struct cas *cp, cas_page_t *page)
 469{
 470	pci_unmap_page(cp->pdev, page->dma_addr, cp->page_size,
 471		       PCI_DMA_FROMDEVICE);
 472	__free_pages(page->buffer, cp->page_order);
 473	kfree(page);
 474	return 0;
 475}
 476
 477#ifdef RX_COUNT_BUFFERS
 478#define RX_USED_ADD(x, y)       ((x)->used += (y))
 479#define RX_USED_SET(x, y)       ((x)->used  = (y))
 480#else
 481#define RX_USED_ADD(x, y)
 482#define RX_USED_SET(x, y)
 483#endif
 484
 485/* local page allocation routines for the receive buffers. jumbo pages
 486 * require at least 8K contiguous and 8K aligned buffers.
 487 */
 488static cas_page_t *cas_page_alloc(struct cas *cp, const gfp_t flags)
 489{
 490	cas_page_t *page;
 491
 492	page = kmalloc(sizeof(cas_page_t), flags);
 493	if (!page)
 494		return NULL;
 495
 496	INIT_LIST_HEAD(&page->list);
 497	RX_USED_SET(page, 0);
 498	page->buffer = alloc_pages(flags, cp->page_order);
 499	if (!page->buffer)
 500		goto page_err;
 501	page->dma_addr = pci_map_page(cp->pdev, page->buffer, 0,
 502				      cp->page_size, PCI_DMA_FROMDEVICE);
 503	return page;
 504
 505page_err:
 506	kfree(page);
 507	return NULL;
 508}
 509
 510/* initialize spare pool of rx buffers, but allocate during the open */
 511static void cas_spare_init(struct cas *cp)
 512{
 513  	spin_lock(&cp->rx_inuse_lock);
 514	INIT_LIST_HEAD(&cp->rx_inuse_list);
 515	spin_unlock(&cp->rx_inuse_lock);
 516
 517	spin_lock(&cp->rx_spare_lock);
 518	INIT_LIST_HEAD(&cp->rx_spare_list);
 519	cp->rx_spares_needed = RX_SPARE_COUNT;
 520	spin_unlock(&cp->rx_spare_lock);
 521}
 522
 523/* used on close. free all the spare buffers. */
 524static void cas_spare_free(struct cas *cp)
 525{
 526	struct list_head list, *elem, *tmp;
 527
 528	/* free spare buffers */
 529	INIT_LIST_HEAD(&list);
 530	spin_lock(&cp->rx_spare_lock);
 531	list_splice_init(&cp->rx_spare_list, &list);
 532	spin_unlock(&cp->rx_spare_lock);
 533	list_for_each_safe(elem, tmp, &list) {
 534		cas_page_free(cp, list_entry(elem, cas_page_t, list));
 535	}
 536
 537	INIT_LIST_HEAD(&list);
 538#if 1
 539	/*
 540	 * Looks like Adrian had protected this with a different
 541	 * lock than used everywhere else to manipulate this list.
 542	 */
 543	spin_lock(&cp->rx_inuse_lock);
 544	list_splice_init(&cp->rx_inuse_list, &list);
 545	spin_unlock(&cp->rx_inuse_lock);
 546#else
 547	spin_lock(&cp->rx_spare_lock);
 548	list_splice_init(&cp->rx_inuse_list, &list);
 549	spin_unlock(&cp->rx_spare_lock);
 550#endif
 551	list_for_each_safe(elem, tmp, &list) {
 552		cas_page_free(cp, list_entry(elem, cas_page_t, list));
 553	}
 554}
 555
 556/* replenish spares if needed */
 557static void cas_spare_recover(struct cas *cp, const gfp_t flags)
 558{
 559	struct list_head list, *elem, *tmp;
 560	int needed, i;
 561
 562	/* check inuse list. if we don't need any more free buffers,
 563	 * just free it
 564	 */
 565
 566	/* make a local copy of the list */
 567	INIT_LIST_HEAD(&list);
 568	spin_lock(&cp->rx_inuse_lock);
 569	list_splice_init(&cp->rx_inuse_list, &list);
 570	spin_unlock(&cp->rx_inuse_lock);
 571
 572	list_for_each_safe(elem, tmp, &list) {
 573		cas_page_t *page = list_entry(elem, cas_page_t, list);
 574
 575		/*
 576		 * With the lockless pagecache, cassini buffering scheme gets
 577		 * slightly less accurate: we might find that a page has an
 578		 * elevated reference count here, due to a speculative ref,
 579		 * and skip it as in-use. Ideally we would be able to reclaim
 580		 * it. However this would be such a rare case, it doesn't
 581		 * matter too much as we should pick it up the next time round.
 582		 *
 583		 * Importantly, if we find that the page has a refcount of 1
 584		 * here (our refcount), then we know it is definitely not inuse
 585		 * so we can reuse it.
 586		 */
 587		if (page_count(page->buffer) > 1)
 588			continue;
 589
 590		list_del(elem);
 591		spin_lock(&cp->rx_spare_lock);
 592		if (cp->rx_spares_needed > 0) {
 593			list_add(elem, &cp->rx_spare_list);
 594			cp->rx_spares_needed--;
 595			spin_unlock(&cp->rx_spare_lock);
 596		} else {
 597			spin_unlock(&cp->rx_spare_lock);
 598			cas_page_free(cp, page);
 599		}
 600	}
 601
 602	/* put any inuse buffers back on the list */
 603	if (!list_empty(&list)) {
 604		spin_lock(&cp->rx_inuse_lock);
 605		list_splice(&list, &cp->rx_inuse_list);
 606		spin_unlock(&cp->rx_inuse_lock);
 607	}
 608
 609	spin_lock(&cp->rx_spare_lock);
 610	needed = cp->rx_spares_needed;
 611	spin_unlock(&cp->rx_spare_lock);
 612	if (!needed)
 613		return;
 614
 615	/* we still need spares, so try to allocate some */
 616	INIT_LIST_HEAD(&list);
 617	i = 0;
 618	while (i < needed) {
 619		cas_page_t *spare = cas_page_alloc(cp, flags);
 620		if (!spare)
 621			break;
 622		list_add(&spare->list, &list);
 623		i++;
 624	}
 625
 626	spin_lock(&cp->rx_spare_lock);
 627	list_splice(&list, &cp->rx_spare_list);
 628	cp->rx_spares_needed -= i;
 629	spin_unlock(&cp->rx_spare_lock);
 630}
 631
 632/* pull a page from the list. */
 633static cas_page_t *cas_page_dequeue(struct cas *cp)
 634{
 635	struct list_head *entry;
 636	int recover;
 637
 638	spin_lock(&cp->rx_spare_lock);
 639	if (list_empty(&cp->rx_spare_list)) {
 640		/* try to do a quick recovery */
 641		spin_unlock(&cp->rx_spare_lock);
 642		cas_spare_recover(cp, GFP_ATOMIC);
 643		spin_lock(&cp->rx_spare_lock);
 644		if (list_empty(&cp->rx_spare_list)) {
 645			netif_err(cp, rx_err, cp->dev,
 646				  "no spare buffers available\n");
 647			spin_unlock(&cp->rx_spare_lock);
 648			return NULL;
 649		}
 650	}
 651
 652	entry = cp->rx_spare_list.next;
 653	list_del(entry);
 654	recover = ++cp->rx_spares_needed;
 655	spin_unlock(&cp->rx_spare_lock);
 656
 657	/* trigger the timer to do the recovery */
 658	if ((recover & (RX_SPARE_RECOVER_VAL - 1)) == 0) {
 659#if 1
 660		atomic_inc(&cp->reset_task_pending);
 661		atomic_inc(&cp->reset_task_pending_spare);
 662		schedule_work(&cp->reset_task);
 663#else
 664		atomic_set(&cp->reset_task_pending, CAS_RESET_SPARE);
 665		schedule_work(&cp->reset_task);
 666#endif
 667	}
 668	return list_entry(entry, cas_page_t, list);
 669}
 670
 671
 672static void cas_mif_poll(struct cas *cp, const int enable)
 673{
 674	u32 cfg;
 675
 676	cfg  = readl(cp->regs + REG_MIF_CFG);
 677	cfg &= (MIF_CFG_MDIO_0 | MIF_CFG_MDIO_1);
 678
 679	if (cp->phy_type & CAS_PHY_MII_MDIO1)
 680		cfg |= MIF_CFG_PHY_SELECT;
 681
 682	/* poll and interrupt on link status change. */
 683	if (enable) {
 684		cfg |= MIF_CFG_POLL_EN;
 685		cfg |= CAS_BASE(MIF_CFG_POLL_REG, MII_BMSR);
 686		cfg |= CAS_BASE(MIF_CFG_POLL_PHY, cp->phy_addr);
 687	}
 688	writel((enable) ? ~(BMSR_LSTATUS | BMSR_ANEGCOMPLETE) : 0xFFFF,
 689	       cp->regs + REG_MIF_MASK);
 690	writel(cfg, cp->regs + REG_MIF_CFG);
 691}
 692
 693/* Must be invoked under cp->lock */
 694static void cas_begin_auto_negotiation(struct cas *cp, struct ethtool_cmd *ep)
 
 695{
 696	u16 ctl;
 697#if 1
 698	int lcntl;
 699	int changed = 0;
 700	int oldstate = cp->lstate;
 701	int link_was_not_down = !(oldstate == link_down);
 702#endif
 703	/* Setup link parameters */
 704	if (!ep)
 705		goto start_aneg;
 706	lcntl = cp->link_cntl;
 707	if (ep->autoneg == AUTONEG_ENABLE)
 708		cp->link_cntl = BMCR_ANENABLE;
 709	else {
 710		u32 speed = ethtool_cmd_speed(ep);
 711		cp->link_cntl = 0;
 712		if (speed == SPEED_100)
 713			cp->link_cntl |= BMCR_SPEED100;
 714		else if (speed == SPEED_1000)
 715			cp->link_cntl |= CAS_BMCR_SPEED1000;
 716		if (ep->duplex == DUPLEX_FULL)
 717			cp->link_cntl |= BMCR_FULLDPLX;
 718	}
 719#if 1
 720	changed = (lcntl != cp->link_cntl);
 721#endif
 722start_aneg:
 723	if (cp->lstate == link_up) {
 724		netdev_info(cp->dev, "PCS link down\n");
 725	} else {
 726		if (changed) {
 727			netdev_info(cp->dev, "link configuration changed\n");
 728		}
 729	}
 730	cp->lstate = link_down;
 731	cp->link_transition = LINK_TRANSITION_LINK_DOWN;
 732	if (!cp->hw_running)
 733		return;
 734#if 1
 735	/*
 736	 * WTZ: If the old state was link_up, we turn off the carrier
 737	 * to replicate everything we do elsewhere on a link-down
 738	 * event when we were already in a link-up state..
 739	 */
 740	if (oldstate == link_up)
 741		netif_carrier_off(cp->dev);
 742	if (changed  && link_was_not_down) {
 743		/*
 744		 * WTZ: This branch will simply schedule a full reset after
 745		 * we explicitly changed link modes in an ioctl. See if this
 746		 * fixes the link-problems we were having for forced mode.
 747		 */
 748		atomic_inc(&cp->reset_task_pending);
 749		atomic_inc(&cp->reset_task_pending_all);
 750		schedule_work(&cp->reset_task);
 751		cp->timer_ticks = 0;
 752		mod_timer(&cp->link_timer, jiffies + CAS_LINK_TIMEOUT);
 753		return;
 754	}
 755#endif
 756	if (cp->phy_type & CAS_PHY_SERDES) {
 757		u32 val = readl(cp->regs + REG_PCS_MII_CTRL);
 758
 759		if (cp->link_cntl & BMCR_ANENABLE) {
 760			val |= (PCS_MII_RESTART_AUTONEG | PCS_MII_AUTONEG_EN);
 761			cp->lstate = link_aneg;
 762		} else {
 763			if (cp->link_cntl & BMCR_FULLDPLX)
 764				val |= PCS_MII_CTRL_DUPLEX;
 765			val &= ~PCS_MII_AUTONEG_EN;
 766			cp->lstate = link_force_ok;
 767		}
 768		cp->link_transition = LINK_TRANSITION_LINK_CONFIG;
 769		writel(val, cp->regs + REG_PCS_MII_CTRL);
 770
 771	} else {
 772		cas_mif_poll(cp, 0);
 773		ctl = cas_phy_read(cp, MII_BMCR);
 774		ctl &= ~(BMCR_FULLDPLX | BMCR_SPEED100 |
 775			 CAS_BMCR_SPEED1000 | BMCR_ANENABLE);
 776		ctl |= cp->link_cntl;
 777		if (ctl & BMCR_ANENABLE) {
 778			ctl |= BMCR_ANRESTART;
 779			cp->lstate = link_aneg;
 780		} else {
 781			cp->lstate = link_force_ok;
 782		}
 783		cp->link_transition = LINK_TRANSITION_LINK_CONFIG;
 784		cas_phy_write(cp, MII_BMCR, ctl);
 785		cas_mif_poll(cp, 1);
 786	}
 787
 788	cp->timer_ticks = 0;
 789	mod_timer(&cp->link_timer, jiffies + CAS_LINK_TIMEOUT);
 790}
 791
 792/* Must be invoked under cp->lock. */
 793static int cas_reset_mii_phy(struct cas *cp)
 794{
 795	int limit = STOP_TRIES_PHY;
 796	u16 val;
 797
 798	cas_phy_write(cp, MII_BMCR, BMCR_RESET);
 799	udelay(100);
 800	while (--limit) {
 801		val = cas_phy_read(cp, MII_BMCR);
 802		if ((val & BMCR_RESET) == 0)
 803			break;
 804		udelay(10);
 805	}
 806	return limit <= 0;
 807}
 808
 809static void cas_saturn_firmware_init(struct cas *cp)
 810{
 811	const struct firmware *fw;
 812	const char fw_name[] = "sun/cassini.bin";
 813	int err;
 814
 815	if (PHY_NS_DP83065 != cp->phy_id)
 816		return;
 817
 818	err = request_firmware(&fw, fw_name, &cp->pdev->dev);
 819	if (err) {
 820		pr_err("Failed to load firmware \"%s\"\n",
 821		       fw_name);
 822		return;
 823	}
 824	if (fw->size < 2) {
 825		pr_err("bogus length %zu in \"%s\"\n",
 826		       fw->size, fw_name);
 827		goto out;
 828	}
 829	cp->fw_load_addr= fw->data[1] << 8 | fw->data[0];
 830	cp->fw_size = fw->size - 2;
 831	cp->fw_data = vmalloc(cp->fw_size);
 832	if (!cp->fw_data)
 833		goto out;
 834	memcpy(cp->fw_data, &fw->data[2], cp->fw_size);
 835out:
 836	release_firmware(fw);
 837}
 838
 839static void cas_saturn_firmware_load(struct cas *cp)
 840{
 841	int i;
 842
 843	if (!cp->fw_data)
 844		return;
 845
 846	cas_phy_powerdown(cp);
 847
 848	/* expanded memory access mode */
 849	cas_phy_write(cp, DP83065_MII_MEM, 0x0);
 850
 851	/* pointer configuration for new firmware */
 852	cas_phy_write(cp, DP83065_MII_REGE, 0x8ff9);
 853	cas_phy_write(cp, DP83065_MII_REGD, 0xbd);
 854	cas_phy_write(cp, DP83065_MII_REGE, 0x8ffa);
 855	cas_phy_write(cp, DP83065_MII_REGD, 0x82);
 856	cas_phy_write(cp, DP83065_MII_REGE, 0x8ffb);
 857	cas_phy_write(cp, DP83065_MII_REGD, 0x0);
 858	cas_phy_write(cp, DP83065_MII_REGE, 0x8ffc);
 859	cas_phy_write(cp, DP83065_MII_REGD, 0x39);
 860
 861	/* download new firmware */
 862	cas_phy_write(cp, DP83065_MII_MEM, 0x1);
 863	cas_phy_write(cp, DP83065_MII_REGE, cp->fw_load_addr);
 864	for (i = 0; i < cp->fw_size; i++)
 865		cas_phy_write(cp, DP83065_MII_REGD, cp->fw_data[i]);
 866
 867	/* enable firmware */
 868	cas_phy_write(cp, DP83065_MII_REGE, 0x8ff8);
 869	cas_phy_write(cp, DP83065_MII_REGD, 0x1);
 870}
 871
 872
 873/* phy initialization */
 874static void cas_phy_init(struct cas *cp)
 875{
 876	u16 val;
 877
 878	/* if we're in MII/GMII mode, set up phy */
 879	if (CAS_PHY_MII(cp->phy_type)) {
 880		writel(PCS_DATAPATH_MODE_MII,
 881		       cp->regs + REG_PCS_DATAPATH_MODE);
 882
 883		cas_mif_poll(cp, 0);
 884		cas_reset_mii_phy(cp); /* take out of isolate mode */
 885
 886		if (PHY_LUCENT_B0 == cp->phy_id) {
 887			/* workaround link up/down issue with lucent */
 888			cas_phy_write(cp, LUCENT_MII_REG, 0x8000);
 889			cas_phy_write(cp, MII_BMCR, 0x00f1);
 890			cas_phy_write(cp, LUCENT_MII_REG, 0x0);
 891
 892		} else if (PHY_BROADCOM_B0 == (cp->phy_id & 0xFFFFFFFC)) {
 893			/* workarounds for broadcom phy */
 894			cas_phy_write(cp, BROADCOM_MII_REG8, 0x0C20);
 895			cas_phy_write(cp, BROADCOM_MII_REG7, 0x0012);
 896			cas_phy_write(cp, BROADCOM_MII_REG5, 0x1804);
 897			cas_phy_write(cp, BROADCOM_MII_REG7, 0x0013);
 898			cas_phy_write(cp, BROADCOM_MII_REG5, 0x1204);
 899			cas_phy_write(cp, BROADCOM_MII_REG7, 0x8006);
 900			cas_phy_write(cp, BROADCOM_MII_REG5, 0x0132);
 901			cas_phy_write(cp, BROADCOM_MII_REG7, 0x8006);
 902			cas_phy_write(cp, BROADCOM_MII_REG5, 0x0232);
 903			cas_phy_write(cp, BROADCOM_MII_REG7, 0x201F);
 904			cas_phy_write(cp, BROADCOM_MII_REG5, 0x0A20);
 905
 906		} else if (PHY_BROADCOM_5411 == cp->phy_id) {
 907			val = cas_phy_read(cp, BROADCOM_MII_REG4);
 908			val = cas_phy_read(cp, BROADCOM_MII_REG4);
 909			if (val & 0x0080) {
 910				/* link workaround */
 911				cas_phy_write(cp, BROADCOM_MII_REG4,
 912					      val & ~0x0080);
 913			}
 914
 915		} else if (cp->cas_flags & CAS_FLAG_SATURN) {
 916			writel((cp->phy_type & CAS_PHY_MII_MDIO0) ?
 917			       SATURN_PCFG_FSI : 0x0,
 918			       cp->regs + REG_SATURN_PCFG);
 919
 920			/* load firmware to address 10Mbps auto-negotiation
 921			 * issue. NOTE: this will need to be changed if the
 922			 * default firmware gets fixed.
 923			 */
 924			if (PHY_NS_DP83065 == cp->phy_id) {
 925				cas_saturn_firmware_load(cp);
 926			}
 927			cas_phy_powerup(cp);
 928		}
 929
 930		/* advertise capabilities */
 931		val = cas_phy_read(cp, MII_BMCR);
 932		val &= ~BMCR_ANENABLE;
 933		cas_phy_write(cp, MII_BMCR, val);
 934		udelay(10);
 935
 936		cas_phy_write(cp, MII_ADVERTISE,
 937			      cas_phy_read(cp, MII_ADVERTISE) |
 938			      (ADVERTISE_10HALF | ADVERTISE_10FULL |
 939			       ADVERTISE_100HALF | ADVERTISE_100FULL |
 940			       CAS_ADVERTISE_PAUSE |
 941			       CAS_ADVERTISE_ASYM_PAUSE));
 942
 943		if (cp->cas_flags & CAS_FLAG_1000MB_CAP) {
 944			/* make sure that we don't advertise half
 945			 * duplex to avoid a chip issue
 946			 */
 947			val  = cas_phy_read(cp, CAS_MII_1000_CTRL);
 948			val &= ~CAS_ADVERTISE_1000HALF;
 949			val |= CAS_ADVERTISE_1000FULL;
 950			cas_phy_write(cp, CAS_MII_1000_CTRL, val);
 951		}
 952
 953	} else {
 954		/* reset pcs for serdes */
 955		u32 val;
 956		int limit;
 957
 958		writel(PCS_DATAPATH_MODE_SERDES,
 959		       cp->regs + REG_PCS_DATAPATH_MODE);
 960
 961		/* enable serdes pins on saturn */
 962		if (cp->cas_flags & CAS_FLAG_SATURN)
 963			writel(0, cp->regs + REG_SATURN_PCFG);
 964
 965		/* Reset PCS unit. */
 966		val = readl(cp->regs + REG_PCS_MII_CTRL);
 967		val |= PCS_MII_RESET;
 968		writel(val, cp->regs + REG_PCS_MII_CTRL);
 969
 970		limit = STOP_TRIES;
 971		while (--limit > 0) {
 972			udelay(10);
 973			if ((readl(cp->regs + REG_PCS_MII_CTRL) &
 974			     PCS_MII_RESET) == 0)
 975				break;
 976		}
 977		if (limit <= 0)
 978			netdev_warn(cp->dev, "PCS reset bit would not clear [%08x]\n",
 979				    readl(cp->regs + REG_PCS_STATE_MACHINE));
 980
 981		/* Make sure PCS is disabled while changing advertisement
 982		 * configuration.
 983		 */
 984		writel(0x0, cp->regs + REG_PCS_CFG);
 985
 986		/* Advertise all capabilities except half-duplex. */
 987		val  = readl(cp->regs + REG_PCS_MII_ADVERT);
 988		val &= ~PCS_MII_ADVERT_HD;
 989		val |= (PCS_MII_ADVERT_FD | PCS_MII_ADVERT_SYM_PAUSE |
 990			PCS_MII_ADVERT_ASYM_PAUSE);
 991		writel(val, cp->regs + REG_PCS_MII_ADVERT);
 992
 993		/* enable PCS */
 994		writel(PCS_CFG_EN, cp->regs + REG_PCS_CFG);
 995
 996		/* pcs workaround: enable sync detect */
 997		writel(PCS_SERDES_CTRL_SYNCD_EN,
 998		       cp->regs + REG_PCS_SERDES_CTRL);
 999	}
1000}
1001
1002
1003static int cas_pcs_link_check(struct cas *cp)
1004{
1005	u32 stat, state_machine;
1006	int retval = 0;
1007
1008	/* The link status bit latches on zero, so you must
1009	 * read it twice in such a case to see a transition
1010	 * to the link being up.
1011	 */
1012	stat = readl(cp->regs + REG_PCS_MII_STATUS);
1013	if ((stat & PCS_MII_STATUS_LINK_STATUS) == 0)
1014		stat = readl(cp->regs + REG_PCS_MII_STATUS);
1015
1016	/* The remote-fault indication is only valid
1017	 * when autoneg has completed.
1018	 */
1019	if ((stat & (PCS_MII_STATUS_AUTONEG_COMP |
1020		     PCS_MII_STATUS_REMOTE_FAULT)) ==
1021	    (PCS_MII_STATUS_AUTONEG_COMP | PCS_MII_STATUS_REMOTE_FAULT))
1022		netif_info(cp, link, cp->dev, "PCS RemoteFault\n");
1023
1024	/* work around link detection issue by querying the PCS state
1025	 * machine directly.
1026	 */
1027	state_machine = readl(cp->regs + REG_PCS_STATE_MACHINE);
1028	if ((state_machine & PCS_SM_LINK_STATE_MASK) != SM_LINK_STATE_UP) {
1029		stat &= ~PCS_MII_STATUS_LINK_STATUS;
1030	} else if (state_machine & PCS_SM_WORD_SYNC_STATE_MASK) {
1031		stat |= PCS_MII_STATUS_LINK_STATUS;
1032	}
1033
1034	if (stat & PCS_MII_STATUS_LINK_STATUS) {
1035		if (cp->lstate != link_up) {
1036			if (cp->opened) {
1037				cp->lstate = link_up;
1038				cp->link_transition = LINK_TRANSITION_LINK_UP;
1039
1040				cas_set_link_modes(cp);
1041				netif_carrier_on(cp->dev);
1042			}
1043		}
1044	} else if (cp->lstate == link_up) {
1045		cp->lstate = link_down;
1046		if (link_transition_timeout != 0 &&
1047		    cp->link_transition != LINK_TRANSITION_REQUESTED_RESET &&
1048		    !cp->link_transition_jiffies_valid) {
1049			/*
1050			 * force a reset, as a workaround for the
1051			 * link-failure problem. May want to move this to a
1052			 * point a bit earlier in the sequence. If we had
1053			 * generated a reset a short time ago, we'll wait for
1054			 * the link timer to check the status until a
1055			 * timer expires (link_transistion_jiffies_valid is
1056			 * true when the timer is running.)  Instead of using
1057			 * a system timer, we just do a check whenever the
1058			 * link timer is running - this clears the flag after
1059			 * a suitable delay.
1060			 */
1061			retval = 1;
1062			cp->link_transition = LINK_TRANSITION_REQUESTED_RESET;
1063			cp->link_transition_jiffies = jiffies;
1064			cp->link_transition_jiffies_valid = 1;
1065		} else {
1066			cp->link_transition = LINK_TRANSITION_ON_FAILURE;
1067		}
1068		netif_carrier_off(cp->dev);
1069		if (cp->opened)
1070			netif_info(cp, link, cp->dev, "PCS link down\n");
1071
1072		/* Cassini only: if you force a mode, there can be
1073		 * sync problems on link down. to fix that, the following
1074		 * things need to be checked:
1075		 * 1) read serialink state register
1076		 * 2) read pcs status register to verify link down.
1077		 * 3) if link down and serial link == 0x03, then you need
1078		 *    to global reset the chip.
1079		 */
1080		if ((cp->cas_flags & CAS_FLAG_REG_PLUS) == 0) {
1081			/* should check to see if we're in a forced mode */
1082			stat = readl(cp->regs + REG_PCS_SERDES_STATE);
1083			if (stat == 0x03)
1084				return 1;
1085		}
1086	} else if (cp->lstate == link_down) {
1087		if (link_transition_timeout != 0 &&
1088		    cp->link_transition != LINK_TRANSITION_REQUESTED_RESET &&
1089		    !cp->link_transition_jiffies_valid) {
1090			/* force a reset, as a workaround for the
1091			 * link-failure problem.  May want to move
1092			 * this to a point a bit earlier in the
1093			 * sequence.
1094			 */
1095			retval = 1;
1096			cp->link_transition = LINK_TRANSITION_REQUESTED_RESET;
1097			cp->link_transition_jiffies = jiffies;
1098			cp->link_transition_jiffies_valid = 1;
1099		} else {
1100			cp->link_transition = LINK_TRANSITION_STILL_FAILED;
1101		}
1102	}
1103
1104	return retval;
1105}
1106
1107static int cas_pcs_interrupt(struct net_device *dev,
1108			     struct cas *cp, u32 status)
1109{
1110	u32 stat = readl(cp->regs + REG_PCS_INTR_STATUS);
1111
1112	if ((stat & PCS_INTR_STATUS_LINK_CHANGE) == 0)
1113		return 0;
1114	return cas_pcs_link_check(cp);
1115}
1116
1117static int cas_txmac_interrupt(struct net_device *dev,
1118			       struct cas *cp, u32 status)
1119{
1120	u32 txmac_stat = readl(cp->regs + REG_MAC_TX_STATUS);
1121
1122	if (!txmac_stat)
1123		return 0;
1124
1125	netif_printk(cp, intr, KERN_DEBUG, cp->dev,
1126		     "txmac interrupt, txmac_stat: 0x%x\n", txmac_stat);
1127
1128	/* Defer timer expiration is quite normal,
1129	 * don't even log the event.
1130	 */
1131	if ((txmac_stat & MAC_TX_DEFER_TIMER) &&
1132	    !(txmac_stat & ~MAC_TX_DEFER_TIMER))
1133		return 0;
1134
1135	spin_lock(&cp->stat_lock[0]);
1136	if (txmac_stat & MAC_TX_UNDERRUN) {
1137		netdev_err(dev, "TX MAC xmit underrun\n");
1138		cp->net_stats[0].tx_fifo_errors++;
1139	}
1140
1141	if (txmac_stat & MAC_TX_MAX_PACKET_ERR) {
1142		netdev_err(dev, "TX MAC max packet size error\n");
1143		cp->net_stats[0].tx_errors++;
1144	}
1145
1146	/* The rest are all cases of one of the 16-bit TX
1147	 * counters expiring.
1148	 */
1149	if (txmac_stat & MAC_TX_COLL_NORMAL)
1150		cp->net_stats[0].collisions += 0x10000;
1151
1152	if (txmac_stat & MAC_TX_COLL_EXCESS) {
1153		cp->net_stats[0].tx_aborted_errors += 0x10000;
1154		cp->net_stats[0].collisions += 0x10000;
1155	}
1156
1157	if (txmac_stat & MAC_TX_COLL_LATE) {
1158		cp->net_stats[0].tx_aborted_errors += 0x10000;
1159		cp->net_stats[0].collisions += 0x10000;
1160	}
1161	spin_unlock(&cp->stat_lock[0]);
1162
1163	/* We do not keep track of MAC_TX_COLL_FIRST and
1164	 * MAC_TX_PEAK_ATTEMPTS events.
1165	 */
1166	return 0;
1167}
1168
1169static void cas_load_firmware(struct cas *cp, cas_hp_inst_t *firmware)
1170{
1171	cas_hp_inst_t *inst;
1172	u32 val;
1173	int i;
1174
1175	i = 0;
1176	while ((inst = firmware) && inst->note) {
1177		writel(i, cp->regs + REG_HP_INSTR_RAM_ADDR);
1178
1179		val = CAS_BASE(HP_INSTR_RAM_HI_VAL, inst->val);
1180		val |= CAS_BASE(HP_INSTR_RAM_HI_MASK, inst->mask);
1181		writel(val, cp->regs + REG_HP_INSTR_RAM_DATA_HI);
1182
1183		val = CAS_BASE(HP_INSTR_RAM_MID_OUTARG, inst->outarg >> 10);
1184		val |= CAS_BASE(HP_INSTR_RAM_MID_OUTOP, inst->outop);
1185		val |= CAS_BASE(HP_INSTR_RAM_MID_FNEXT, inst->fnext);
1186		val |= CAS_BASE(HP_INSTR_RAM_MID_FOFF, inst->foff);
1187		val |= CAS_BASE(HP_INSTR_RAM_MID_SNEXT, inst->snext);
1188		val |= CAS_BASE(HP_INSTR_RAM_MID_SOFF, inst->soff);
1189		val |= CAS_BASE(HP_INSTR_RAM_MID_OP, inst->op);
1190		writel(val, cp->regs + REG_HP_INSTR_RAM_DATA_MID);
1191
1192		val = CAS_BASE(HP_INSTR_RAM_LOW_OUTMASK, inst->outmask);
1193		val |= CAS_BASE(HP_INSTR_RAM_LOW_OUTSHIFT, inst->outshift);
1194		val |= CAS_BASE(HP_INSTR_RAM_LOW_OUTEN, inst->outenab);
1195		val |= CAS_BASE(HP_INSTR_RAM_LOW_OUTARG, inst->outarg);
1196		writel(val, cp->regs + REG_HP_INSTR_RAM_DATA_LOW);
1197		++firmware;
1198		++i;
1199	}
1200}
1201
1202static void cas_init_rx_dma(struct cas *cp)
1203{
1204	u64 desc_dma = cp->block_dvma;
1205	u32 val;
1206	int i, size;
1207
1208	/* rx free descriptors */
1209	val = CAS_BASE(RX_CFG_SWIVEL, RX_SWIVEL_OFF_VAL);
1210	val |= CAS_BASE(RX_CFG_DESC_RING, RX_DESC_RINGN_INDEX(0));
1211	val |= CAS_BASE(RX_CFG_COMP_RING, RX_COMP_RINGN_INDEX(0));
1212	if ((N_RX_DESC_RINGS > 1) &&
1213	    (cp->cas_flags & CAS_FLAG_REG_PLUS))  /* do desc 2 */
1214		val |= CAS_BASE(RX_CFG_DESC_RING1, RX_DESC_RINGN_INDEX(1));
1215	writel(val, cp->regs + REG_RX_CFG);
1216
1217	val = (unsigned long) cp->init_rxds[0] -
1218		(unsigned long) cp->init_block;
1219	writel((desc_dma + val) >> 32, cp->regs + REG_RX_DB_HI);
1220	writel((desc_dma + val) & 0xffffffff, cp->regs + REG_RX_DB_LOW);
1221	writel(RX_DESC_RINGN_SIZE(0) - 4, cp->regs + REG_RX_KICK);
1222
1223	if (cp->cas_flags & CAS_FLAG_REG_PLUS) {
1224		/* rx desc 2 is for IPSEC packets. however,
1225		 * we don't it that for that purpose.
1226		 */
1227		val = (unsigned long) cp->init_rxds[1] -
1228			(unsigned long) cp->init_block;
1229		writel((desc_dma + val) >> 32, cp->regs + REG_PLUS_RX_DB1_HI);
1230		writel((desc_dma + val) & 0xffffffff, cp->regs +
1231		       REG_PLUS_RX_DB1_LOW);
1232		writel(RX_DESC_RINGN_SIZE(1) - 4, cp->regs +
1233		       REG_PLUS_RX_KICK1);
1234	}
1235
1236	/* rx completion registers */
1237	val = (unsigned long) cp->init_rxcs[0] -
1238		(unsigned long) cp->init_block;
1239	writel((desc_dma + val) >> 32, cp->regs + REG_RX_CB_HI);
1240	writel((desc_dma + val) & 0xffffffff, cp->regs + REG_RX_CB_LOW);
1241
1242	if (cp->cas_flags & CAS_FLAG_REG_PLUS) {
1243		/* rx comp 2-4 */
1244		for (i = 1; i < MAX_RX_COMP_RINGS; i++) {
1245			val = (unsigned long) cp->init_rxcs[i] -
1246				(unsigned long) cp->init_block;
1247			writel((desc_dma + val) >> 32, cp->regs +
1248			       REG_PLUS_RX_CBN_HI(i));
1249			writel((desc_dma + val) & 0xffffffff, cp->regs +
1250			       REG_PLUS_RX_CBN_LOW(i));
1251		}
1252	}
1253
1254	/* read selective clear regs to prevent spurious interrupts
1255	 * on reset because complete == kick.
1256	 * selective clear set up to prevent interrupts on resets
1257	 */
1258	readl(cp->regs + REG_INTR_STATUS_ALIAS);
1259	writel(INTR_RX_DONE | INTR_RX_BUF_UNAVAIL, cp->regs + REG_ALIAS_CLEAR);
1260	if (cp->cas_flags & CAS_FLAG_REG_PLUS) {
1261		for (i = 1; i < N_RX_COMP_RINGS; i++)
1262			readl(cp->regs + REG_PLUS_INTRN_STATUS_ALIAS(i));
1263
1264		/* 2 is different from 3 and 4 */
1265		if (N_RX_COMP_RINGS > 1)
1266			writel(INTR_RX_DONE_ALT | INTR_RX_BUF_UNAVAIL_1,
1267			       cp->regs + REG_PLUS_ALIASN_CLEAR(1));
1268
1269		for (i = 2; i < N_RX_COMP_RINGS; i++)
1270			writel(INTR_RX_DONE_ALT,
1271			       cp->regs + REG_PLUS_ALIASN_CLEAR(i));
1272	}
1273
1274	/* set up pause thresholds */
1275	val  = CAS_BASE(RX_PAUSE_THRESH_OFF,
1276			cp->rx_pause_off / RX_PAUSE_THRESH_QUANTUM);
1277	val |= CAS_BASE(RX_PAUSE_THRESH_ON,
1278			cp->rx_pause_on / RX_PAUSE_THRESH_QUANTUM);
1279	writel(val, cp->regs + REG_RX_PAUSE_THRESH);
1280
1281	/* zero out dma reassembly buffers */
1282	for (i = 0; i < 64; i++) {
1283		writel(i, cp->regs + REG_RX_TABLE_ADDR);
1284		writel(0x0, cp->regs + REG_RX_TABLE_DATA_LOW);
1285		writel(0x0, cp->regs + REG_RX_TABLE_DATA_MID);
1286		writel(0x0, cp->regs + REG_RX_TABLE_DATA_HI);
1287	}
1288
1289	/* make sure address register is 0 for normal operation */
1290	writel(0x0, cp->regs + REG_RX_CTRL_FIFO_ADDR);
1291	writel(0x0, cp->regs + REG_RX_IPP_FIFO_ADDR);
1292
1293	/* interrupt mitigation */
1294#ifdef USE_RX_BLANK
1295	val = CAS_BASE(RX_BLANK_INTR_TIME, RX_BLANK_INTR_TIME_VAL);
1296	val |= CAS_BASE(RX_BLANK_INTR_PKT, RX_BLANK_INTR_PKT_VAL);
1297	writel(val, cp->regs + REG_RX_BLANK);
1298#else
1299	writel(0x0, cp->regs + REG_RX_BLANK);
1300#endif
1301
1302	/* interrupt generation as a function of low water marks for
1303	 * free desc and completion entries. these are used to trigger
1304	 * housekeeping for rx descs. we don't use the free interrupt
1305	 * as it's not very useful
1306	 */
1307	/* val = CAS_BASE(RX_AE_THRESH_FREE, RX_AE_FREEN_VAL(0)); */
1308	val = CAS_BASE(RX_AE_THRESH_COMP, RX_AE_COMP_VAL);
1309	writel(val, cp->regs + REG_RX_AE_THRESH);
1310	if (cp->cas_flags & CAS_FLAG_REG_PLUS) {
1311		val = CAS_BASE(RX_AE1_THRESH_FREE, RX_AE_FREEN_VAL(1));
1312		writel(val, cp->regs + REG_PLUS_RX_AE1_THRESH);
1313	}
1314
1315	/* Random early detect registers. useful for congestion avoidance.
1316	 * this should be tunable.
1317	 */
1318	writel(0x0, cp->regs + REG_RX_RED);
1319
1320	/* receive page sizes. default == 2K (0x800) */
1321	val = 0;
1322	if (cp->page_size == 0x1000)
1323		val = 0x1;
1324	else if (cp->page_size == 0x2000)
1325		val = 0x2;
1326	else if (cp->page_size == 0x4000)
1327		val = 0x3;
1328
1329	/* round mtu + offset. constrain to page size. */
1330	size = cp->dev->mtu + 64;
1331	if (size > cp->page_size)
1332		size = cp->page_size;
1333
1334	if (size <= 0x400)
1335		i = 0x0;
1336	else if (size <= 0x800)
1337		i = 0x1;
1338	else if (size <= 0x1000)
1339		i = 0x2;
1340	else
1341		i = 0x3;
1342
1343	cp->mtu_stride = 1 << (i + 10);
1344	val  = CAS_BASE(RX_PAGE_SIZE, val);
1345	val |= CAS_BASE(RX_PAGE_SIZE_MTU_STRIDE, i);
1346	val |= CAS_BASE(RX_PAGE_SIZE_MTU_COUNT, cp->page_size >> (i + 10));
1347	val |= CAS_BASE(RX_PAGE_SIZE_MTU_OFF, 0x1);
1348	writel(val, cp->regs + REG_RX_PAGE_SIZE);
1349
1350	/* enable the header parser if desired */
1351	if (CAS_HP_FIRMWARE == cas_prog_null)
1352		return;
1353
1354	val = CAS_BASE(HP_CFG_NUM_CPU, CAS_NCPUS > 63 ? 0 : CAS_NCPUS);
1355	val |= HP_CFG_PARSE_EN | HP_CFG_SYN_INC_MASK;
1356	val |= CAS_BASE(HP_CFG_TCP_THRESH, HP_TCP_THRESH_VAL);
1357	writel(val, cp->regs + REG_HP_CFG);
1358}
1359
1360static inline void cas_rxc_init(struct cas_rx_comp *rxc)
1361{
1362	memset(rxc, 0, sizeof(*rxc));
1363	rxc->word4 = cpu_to_le64(RX_COMP4_ZERO);
1364}
1365
1366/* NOTE: we use the ENC RX DESC ring for spares. the rx_page[0,1]
1367 * flipping is protected by the fact that the chip will not
1368 * hand back the same page index while it's being processed.
1369 */
1370static inline cas_page_t *cas_page_spare(struct cas *cp, const int index)
1371{
1372	cas_page_t *page = cp->rx_pages[1][index];
1373	cas_page_t *new;
1374
1375	if (page_count(page->buffer) == 1)
1376		return page;
1377
1378	new = cas_page_dequeue(cp);
1379	if (new) {
1380		spin_lock(&cp->rx_inuse_lock);
1381		list_add(&page->list, &cp->rx_inuse_list);
1382		spin_unlock(&cp->rx_inuse_lock);
1383	}
1384	return new;
1385}
1386
1387/* this needs to be changed if we actually use the ENC RX DESC ring */
1388static cas_page_t *cas_page_swap(struct cas *cp, const int ring,
1389				 const int index)
1390{
1391	cas_page_t **page0 = cp->rx_pages[0];
1392	cas_page_t **page1 = cp->rx_pages[1];
1393
1394	/* swap if buffer is in use */
1395	if (page_count(page0[index]->buffer) > 1) {
1396		cas_page_t *new = cas_page_spare(cp, index);
1397		if (new) {
1398			page1[index] = page0[index];
1399			page0[index] = new;
1400		}
1401	}
1402	RX_USED_SET(page0[index], 0);
1403	return page0[index];
1404}
1405
1406static void cas_clean_rxds(struct cas *cp)
1407{
1408	/* only clean ring 0 as ring 1 is used for spare buffers */
1409        struct cas_rx_desc *rxd = cp->init_rxds[0];
1410	int i, size;
1411
1412	/* release all rx flows */
1413	for (i = 0; i < N_RX_FLOWS; i++) {
1414		struct sk_buff *skb;
1415		while ((skb = __skb_dequeue(&cp->rx_flows[i]))) {
1416			cas_skb_release(skb);
1417		}
1418	}
1419
1420	/* initialize descriptors */
1421	size = RX_DESC_RINGN_SIZE(0);
1422	for (i = 0; i < size; i++) {
1423		cas_page_t *page = cas_page_swap(cp, 0, i);
1424		rxd[i].buffer = cpu_to_le64(page->dma_addr);
1425		rxd[i].index  = cpu_to_le64(CAS_BASE(RX_INDEX_NUM, i) |
1426					    CAS_BASE(RX_INDEX_RING, 0));
1427	}
1428
1429	cp->rx_old[0]  = RX_DESC_RINGN_SIZE(0) - 4;
1430	cp->rx_last[0] = 0;
1431	cp->cas_flags &= ~CAS_FLAG_RXD_POST(0);
1432}
1433
1434static void cas_clean_rxcs(struct cas *cp)
1435{
1436	int i, j;
1437
1438	/* take ownership of rx comp descriptors */
1439	memset(cp->rx_cur, 0, sizeof(*cp->rx_cur)*N_RX_COMP_RINGS);
1440	memset(cp->rx_new, 0, sizeof(*cp->rx_new)*N_RX_COMP_RINGS);
1441	for (i = 0; i < N_RX_COMP_RINGS; i++) {
1442		struct cas_rx_comp *rxc = cp->init_rxcs[i];
1443		for (j = 0; j < RX_COMP_RINGN_SIZE(i); j++) {
1444			cas_rxc_init(rxc + j);
1445		}
1446	}
1447}
1448
1449#if 0
1450/* When we get a RX fifo overflow, the RX unit is probably hung
1451 * so we do the following.
1452 *
1453 * If any part of the reset goes wrong, we return 1 and that causes the
1454 * whole chip to be reset.
1455 */
1456static int cas_rxmac_reset(struct cas *cp)
1457{
1458	struct net_device *dev = cp->dev;
1459	int limit;
1460	u32 val;
1461
1462	/* First, reset MAC RX. */
1463	writel(cp->mac_rx_cfg & ~MAC_RX_CFG_EN, cp->regs + REG_MAC_RX_CFG);
1464	for (limit = 0; limit < STOP_TRIES; limit++) {
1465		if (!(readl(cp->regs + REG_MAC_RX_CFG) & MAC_RX_CFG_EN))
1466			break;
1467		udelay(10);
1468	}
1469	if (limit == STOP_TRIES) {
1470		netdev_err(dev, "RX MAC will not disable, resetting whole chip\n");
1471		return 1;
1472	}
1473
1474	/* Second, disable RX DMA. */
1475	writel(0, cp->regs + REG_RX_CFG);
1476	for (limit = 0; limit < STOP_TRIES; limit++) {
1477		if (!(readl(cp->regs + REG_RX_CFG) & RX_CFG_DMA_EN))
1478			break;
1479		udelay(10);
1480	}
1481	if (limit == STOP_TRIES) {
1482		netdev_err(dev, "RX DMA will not disable, resetting whole chip\n");
1483		return 1;
1484	}
1485
1486	mdelay(5);
1487
1488	/* Execute RX reset command. */
1489	writel(SW_RESET_RX, cp->regs + REG_SW_RESET);
1490	for (limit = 0; limit < STOP_TRIES; limit++) {
1491		if (!(readl(cp->regs + REG_SW_RESET) & SW_RESET_RX))
1492			break;
1493		udelay(10);
1494	}
1495	if (limit == STOP_TRIES) {
1496		netdev_err(dev, "RX reset command will not execute, resetting whole chip\n");
1497		return 1;
1498	}
1499
1500	/* reset driver rx state */
1501	cas_clean_rxds(cp);
1502	cas_clean_rxcs(cp);
1503
1504	/* Now, reprogram the rest of RX unit. */
1505	cas_init_rx_dma(cp);
1506
1507	/* re-enable */
1508	val = readl(cp->regs + REG_RX_CFG);
1509	writel(val | RX_CFG_DMA_EN, cp->regs + REG_RX_CFG);
1510	writel(MAC_RX_FRAME_RECV, cp->regs + REG_MAC_RX_MASK);
1511	val = readl(cp->regs + REG_MAC_RX_CFG);
1512	writel(val | MAC_RX_CFG_EN, cp->regs + REG_MAC_RX_CFG);
1513	return 0;
1514}
1515#endif
1516
1517static int cas_rxmac_interrupt(struct net_device *dev, struct cas *cp,
1518			       u32 status)
1519{
1520	u32 stat = readl(cp->regs + REG_MAC_RX_STATUS);
1521
1522	if (!stat)
1523		return 0;
1524
1525	netif_dbg(cp, intr, cp->dev, "rxmac interrupt, stat: 0x%x\n", stat);
1526
1527	/* these are all rollovers */
1528	spin_lock(&cp->stat_lock[0]);
1529	if (stat & MAC_RX_ALIGN_ERR)
1530		cp->net_stats[0].rx_frame_errors += 0x10000;
1531
1532	if (stat & MAC_RX_CRC_ERR)
1533		cp->net_stats[0].rx_crc_errors += 0x10000;
1534
1535	if (stat & MAC_RX_LEN_ERR)
1536		cp->net_stats[0].rx_length_errors += 0x10000;
1537
1538	if (stat & MAC_RX_OVERFLOW) {
1539		cp->net_stats[0].rx_over_errors++;
1540		cp->net_stats[0].rx_fifo_errors++;
1541	}
1542
1543	/* We do not track MAC_RX_FRAME_COUNT and MAC_RX_VIOL_ERR
1544	 * events.
1545	 */
1546	spin_unlock(&cp->stat_lock[0]);
1547	return 0;
1548}
1549
1550static int cas_mac_interrupt(struct net_device *dev, struct cas *cp,
1551			     u32 status)
1552{
1553	u32 stat = readl(cp->regs + REG_MAC_CTRL_STATUS);
1554
1555	if (!stat)
1556		return 0;
1557
1558	netif_printk(cp, intr, KERN_DEBUG, cp->dev,
1559		     "mac interrupt, stat: 0x%x\n", stat);
1560
1561	/* This interrupt is just for pause frame and pause
1562	 * tracking.  It is useful for diagnostics and debug
1563	 * but probably by default we will mask these events.
1564	 */
1565	if (stat & MAC_CTRL_PAUSE_STATE)
1566		cp->pause_entered++;
1567
1568	if (stat & MAC_CTRL_PAUSE_RECEIVED)
1569		cp->pause_last_time_recvd = (stat >> 16);
1570
1571	return 0;
1572}
1573
1574
1575/* Must be invoked under cp->lock. */
1576static inline int cas_mdio_link_not_up(struct cas *cp)
1577{
1578	u16 val;
1579
1580	switch (cp->lstate) {
1581	case link_force_ret:
1582		netif_info(cp, link, cp->dev, "Autoneg failed again, keeping forced mode\n");
1583		cas_phy_write(cp, MII_BMCR, cp->link_fcntl);
1584		cp->timer_ticks = 5;
1585		cp->lstate = link_force_ok;
1586		cp->link_transition = LINK_TRANSITION_LINK_CONFIG;
1587		break;
1588
1589	case link_aneg:
1590		val = cas_phy_read(cp, MII_BMCR);
1591
1592		/* Try forced modes. we try things in the following order:
1593		 * 1000 full -> 100 full/half -> 10 half
1594		 */
1595		val &= ~(BMCR_ANRESTART | BMCR_ANENABLE);
1596		val |= BMCR_FULLDPLX;
1597		val |= (cp->cas_flags & CAS_FLAG_1000MB_CAP) ?
1598			CAS_BMCR_SPEED1000 : BMCR_SPEED100;
1599		cas_phy_write(cp, MII_BMCR, val);
1600		cp->timer_ticks = 5;
1601		cp->lstate = link_force_try;
1602		cp->link_transition = LINK_TRANSITION_LINK_CONFIG;
1603		break;
1604
1605	case link_force_try:
1606		/* Downgrade from 1000 to 100 to 10 Mbps if necessary. */
1607		val = cas_phy_read(cp, MII_BMCR);
1608		cp->timer_ticks = 5;
1609		if (val & CAS_BMCR_SPEED1000) { /* gigabit */
1610			val &= ~CAS_BMCR_SPEED1000;
1611			val |= (BMCR_SPEED100 | BMCR_FULLDPLX);
1612			cas_phy_write(cp, MII_BMCR, val);
1613			break;
1614		}
1615
1616		if (val & BMCR_SPEED100) {
1617			if (val & BMCR_FULLDPLX) /* fd failed */
1618				val &= ~BMCR_FULLDPLX;
1619			else { /* 100Mbps failed */
1620				val &= ~BMCR_SPEED100;
1621			}
1622			cas_phy_write(cp, MII_BMCR, val);
1623			break;
1624		}
 
1625	default:
1626		break;
1627	}
1628	return 0;
1629}
1630
1631
1632/* must be invoked with cp->lock held */
1633static int cas_mii_link_check(struct cas *cp, const u16 bmsr)
1634{
1635	int restart;
1636
1637	if (bmsr & BMSR_LSTATUS) {
1638		/* Ok, here we got a link. If we had it due to a forced
1639		 * fallback, and we were configured for autoneg, we
1640		 * retry a short autoneg pass. If you know your hub is
1641		 * broken, use ethtool ;)
1642		 */
1643		if ((cp->lstate == link_force_try) &&
1644		    (cp->link_cntl & BMCR_ANENABLE)) {
1645			cp->lstate = link_force_ret;
1646			cp->link_transition = LINK_TRANSITION_LINK_CONFIG;
1647			cas_mif_poll(cp, 0);
1648			cp->link_fcntl = cas_phy_read(cp, MII_BMCR);
1649			cp->timer_ticks = 5;
1650			if (cp->opened)
1651				netif_info(cp, link, cp->dev,
1652					   "Got link after fallback, retrying autoneg once...\n");
1653			cas_phy_write(cp, MII_BMCR,
1654				      cp->link_fcntl | BMCR_ANENABLE |
1655				      BMCR_ANRESTART);
1656			cas_mif_poll(cp, 1);
1657
1658		} else if (cp->lstate != link_up) {
1659			cp->lstate = link_up;
1660			cp->link_transition = LINK_TRANSITION_LINK_UP;
1661
1662			if (cp->opened) {
1663				cas_set_link_modes(cp);
1664				netif_carrier_on(cp->dev);
1665			}
1666		}
1667		return 0;
1668	}
1669
1670	/* link not up. if the link was previously up, we restart the
1671	 * whole process
1672	 */
1673	restart = 0;
1674	if (cp->lstate == link_up) {
1675		cp->lstate = link_down;
1676		cp->link_transition = LINK_TRANSITION_LINK_DOWN;
1677
1678		netif_carrier_off(cp->dev);
1679		if (cp->opened)
1680			netif_info(cp, link, cp->dev, "Link down\n");
1681		restart = 1;
1682
1683	} else if (++cp->timer_ticks > 10)
1684		cas_mdio_link_not_up(cp);
1685
1686	return restart;
1687}
1688
1689static int cas_mif_interrupt(struct net_device *dev, struct cas *cp,
1690			     u32 status)
1691{
1692	u32 stat = readl(cp->regs + REG_MIF_STATUS);
1693	u16 bmsr;
1694
1695	/* check for a link change */
1696	if (CAS_VAL(MIF_STATUS_POLL_STATUS, stat) == 0)
1697		return 0;
1698
1699	bmsr = CAS_VAL(MIF_STATUS_POLL_DATA, stat);
1700	return cas_mii_link_check(cp, bmsr);
1701}
1702
1703static int cas_pci_interrupt(struct net_device *dev, struct cas *cp,
1704			     u32 status)
1705{
1706	u32 stat = readl(cp->regs + REG_PCI_ERR_STATUS);
1707
1708	if (!stat)
1709		return 0;
1710
1711	netdev_err(dev, "PCI error [%04x:%04x]",
1712		   stat, readl(cp->regs + REG_BIM_DIAG));
1713
1714	/* cassini+ has this reserved */
1715	if ((stat & PCI_ERR_BADACK) &&
1716	    ((cp->cas_flags & CAS_FLAG_REG_PLUS) == 0))
1717		pr_cont(" <No ACK64# during ABS64 cycle>");
1718
1719	if (stat & PCI_ERR_DTRTO)
1720		pr_cont(" <Delayed transaction timeout>");
1721	if (stat & PCI_ERR_OTHER)
1722		pr_cont(" <other>");
1723	if (stat & PCI_ERR_BIM_DMA_WRITE)
1724		pr_cont(" <BIM DMA 0 write req>");
1725	if (stat & PCI_ERR_BIM_DMA_READ)
1726		pr_cont(" <BIM DMA 0 read req>");
1727	pr_cont("\n");
1728
1729	if (stat & PCI_ERR_OTHER) {
1730		u16 cfg;
1731
1732		/* Interrogate PCI config space for the
1733		 * true cause.
1734		 */
1735		pci_read_config_word(cp->pdev, PCI_STATUS, &cfg);
1736		netdev_err(dev, "Read PCI cfg space status [%04x]\n", cfg);
1737		if (cfg & PCI_STATUS_PARITY)
 
1738			netdev_err(dev, "PCI parity error detected\n");
1739		if (cfg & PCI_STATUS_SIG_TARGET_ABORT)
1740			netdev_err(dev, "PCI target abort\n");
1741		if (cfg & PCI_STATUS_REC_TARGET_ABORT)
1742			netdev_err(dev, "PCI master acks target abort\n");
1743		if (cfg & PCI_STATUS_REC_MASTER_ABORT)
1744			netdev_err(dev, "PCI master abort\n");
1745		if (cfg & PCI_STATUS_SIG_SYSTEM_ERROR)
1746			netdev_err(dev, "PCI system error SERR#\n");
1747		if (cfg & PCI_STATUS_DETECTED_PARITY)
1748			netdev_err(dev, "PCI parity error\n");
1749
1750		/* Write the error bits back to clear them. */
1751		cfg &= (PCI_STATUS_PARITY |
1752			PCI_STATUS_SIG_TARGET_ABORT |
1753			PCI_STATUS_REC_TARGET_ABORT |
1754			PCI_STATUS_REC_MASTER_ABORT |
1755			PCI_STATUS_SIG_SYSTEM_ERROR |
1756			PCI_STATUS_DETECTED_PARITY);
1757		pci_write_config_word(cp->pdev, PCI_STATUS, cfg);
1758	}
1759
1760	/* For all PCI errors, we should reset the chip. */
1761	return 1;
1762}
1763
1764/* All non-normal interrupt conditions get serviced here.
1765 * Returns non-zero if we should just exit the interrupt
1766 * handler right now (ie. if we reset the card which invalidates
1767 * all of the other original irq status bits).
1768 */
1769static int cas_abnormal_irq(struct net_device *dev, struct cas *cp,
1770			    u32 status)
1771{
1772	if (status & INTR_RX_TAG_ERROR) {
1773		/* corrupt RX tag framing */
1774		netif_printk(cp, rx_err, KERN_DEBUG, cp->dev,
1775			     "corrupt rx tag framing\n");
1776		spin_lock(&cp->stat_lock[0]);
1777		cp->net_stats[0].rx_errors++;
1778		spin_unlock(&cp->stat_lock[0]);
1779		goto do_reset;
1780	}
1781
1782	if (status & INTR_RX_LEN_MISMATCH) {
1783		/* length mismatch. */
1784		netif_printk(cp, rx_err, KERN_DEBUG, cp->dev,
1785			     "length mismatch for rx frame\n");
1786		spin_lock(&cp->stat_lock[0]);
1787		cp->net_stats[0].rx_errors++;
1788		spin_unlock(&cp->stat_lock[0]);
1789		goto do_reset;
1790	}
1791
1792	if (status & INTR_PCS_STATUS) {
1793		if (cas_pcs_interrupt(dev, cp, status))
1794			goto do_reset;
1795	}
1796
1797	if (status & INTR_TX_MAC_STATUS) {
1798		if (cas_txmac_interrupt(dev, cp, status))
1799			goto do_reset;
1800	}
1801
1802	if (status & INTR_RX_MAC_STATUS) {
1803		if (cas_rxmac_interrupt(dev, cp, status))
1804			goto do_reset;
1805	}
1806
1807	if (status & INTR_MAC_CTRL_STATUS) {
1808		if (cas_mac_interrupt(dev, cp, status))
1809			goto do_reset;
1810	}
1811
1812	if (status & INTR_MIF_STATUS) {
1813		if (cas_mif_interrupt(dev, cp, status))
1814			goto do_reset;
1815	}
1816
1817	if (status & INTR_PCI_ERROR_STATUS) {
1818		if (cas_pci_interrupt(dev, cp, status))
1819			goto do_reset;
1820	}
1821	return 0;
1822
1823do_reset:
1824#if 1
1825	atomic_inc(&cp->reset_task_pending);
1826	atomic_inc(&cp->reset_task_pending_all);
1827	netdev_err(dev, "reset called in cas_abnormal_irq [0x%x]\n", status);
1828	schedule_work(&cp->reset_task);
1829#else
1830	atomic_set(&cp->reset_task_pending, CAS_RESET_ALL);
1831	netdev_err(dev, "reset called in cas_abnormal_irq\n");
1832	schedule_work(&cp->reset_task);
1833#endif
1834	return 1;
1835}
1836
1837/* NOTE: CAS_TABORT returns 1 or 2 so that it can be used when
1838 *       determining whether to do a netif_stop/wakeup
1839 */
1840#define CAS_TABORT(x)      (((x)->cas_flags & CAS_FLAG_TARGET_ABORT) ? 2 : 1)
1841#define CAS_ROUND_PAGE(x)  (((x) + PAGE_SIZE - 1) & PAGE_MASK)
1842static inline int cas_calc_tabort(struct cas *cp, const unsigned long addr,
1843				  const int len)
1844{
1845	unsigned long off = addr + len;
1846
1847	if (CAS_TABORT(cp) == 1)
1848		return 0;
1849	if ((CAS_ROUND_PAGE(off) - off) > TX_TARGET_ABORT_LEN)
1850		return 0;
1851	return TX_TARGET_ABORT_LEN;
1852}
1853
1854static inline void cas_tx_ringN(struct cas *cp, int ring, int limit)
1855{
1856	struct cas_tx_desc *txds;
1857	struct sk_buff **skbs;
1858	struct net_device *dev = cp->dev;
1859	int entry, count;
1860
1861	spin_lock(&cp->tx_lock[ring]);
1862	txds = cp->init_txds[ring];
1863	skbs = cp->tx_skbs[ring];
1864	entry = cp->tx_old[ring];
1865
1866	count = TX_BUFF_COUNT(ring, entry, limit);
1867	while (entry != limit) {
1868		struct sk_buff *skb = skbs[entry];
1869		dma_addr_t daddr;
1870		u32 dlen;
1871		int frag;
1872
1873		if (!skb) {
1874			/* this should never occur */
1875			entry = TX_DESC_NEXT(ring, entry);
1876			continue;
1877		}
1878
1879		/* however, we might get only a partial skb release. */
1880		count -= skb_shinfo(skb)->nr_frags +
1881			+ cp->tx_tiny_use[ring][entry].nbufs + 1;
1882		if (count < 0)
1883			break;
1884
1885		netif_printk(cp, tx_done, KERN_DEBUG, cp->dev,
1886			     "tx[%d] done, slot %d\n", ring, entry);
1887
1888		skbs[entry] = NULL;
1889		cp->tx_tiny_use[ring][entry].nbufs = 0;
1890
1891		for (frag = 0; frag <= skb_shinfo(skb)->nr_frags; frag++) {
1892			struct cas_tx_desc *txd = txds + entry;
1893
1894			daddr = le64_to_cpu(txd->buffer);
1895			dlen = CAS_VAL(TX_DESC_BUFLEN,
1896				       le64_to_cpu(txd->control));
1897			pci_unmap_page(cp->pdev, daddr, dlen,
1898				       PCI_DMA_TODEVICE);
1899			entry = TX_DESC_NEXT(ring, entry);
1900
1901			/* tiny buffer may follow */
1902			if (cp->tx_tiny_use[ring][entry].used) {
1903				cp->tx_tiny_use[ring][entry].used = 0;
1904				entry = TX_DESC_NEXT(ring, entry);
1905			}
1906		}
1907
1908		spin_lock(&cp->stat_lock[ring]);
1909		cp->net_stats[ring].tx_packets++;
1910		cp->net_stats[ring].tx_bytes += skb->len;
1911		spin_unlock(&cp->stat_lock[ring]);
1912		dev_kfree_skb_irq(skb);
1913	}
1914	cp->tx_old[ring] = entry;
1915
1916	/* this is wrong for multiple tx rings. the net device needs
1917	 * multiple queues for this to do the right thing.  we wait
1918	 * for 2*packets to be available when using tiny buffers
1919	 */
1920	if (netif_queue_stopped(dev) &&
1921	    (TX_BUFFS_AVAIL(cp, ring) > CAS_TABORT(cp)*(MAX_SKB_FRAGS + 1)))
1922		netif_wake_queue(dev);
1923	spin_unlock(&cp->tx_lock[ring]);
1924}
1925
1926static void cas_tx(struct net_device *dev, struct cas *cp,
1927		   u32 status)
1928{
1929        int limit, ring;
1930#ifdef USE_TX_COMPWB
1931	u64 compwb = le64_to_cpu(cp->init_block->tx_compwb);
1932#endif
1933	netif_printk(cp, intr, KERN_DEBUG, cp->dev,
1934		     "tx interrupt, status: 0x%x, %llx\n",
1935		     status, (unsigned long long)compwb);
1936	/* process all the rings */
1937	for (ring = 0; ring < N_TX_RINGS; ring++) {
1938#ifdef USE_TX_COMPWB
1939		/* use the completion writeback registers */
1940		limit = (CAS_VAL(TX_COMPWB_MSB, compwb) << 8) |
1941			CAS_VAL(TX_COMPWB_LSB, compwb);
1942		compwb = TX_COMPWB_NEXT(compwb);
1943#else
1944		limit = readl(cp->regs + REG_TX_COMPN(ring));
1945#endif
1946		if (cp->tx_old[ring] != limit)
1947			cas_tx_ringN(cp, ring, limit);
1948	}
1949}
1950
1951
1952static int cas_rx_process_pkt(struct cas *cp, struct cas_rx_comp *rxc,
1953			      int entry, const u64 *words,
1954			      struct sk_buff **skbref)
1955{
1956	int dlen, hlen, len, i, alloclen;
1957	int off, swivel = RX_SWIVEL_OFF_VAL;
1958	struct cas_page *page;
1959	struct sk_buff *skb;
1960	void *addr, *crcaddr;
1961	__sum16 csum;
1962	char *p;
1963
1964	hlen = CAS_VAL(RX_COMP2_HDR_SIZE, words[1]);
1965	dlen = CAS_VAL(RX_COMP1_DATA_SIZE, words[0]);
1966	len  = hlen + dlen;
1967
1968	if (RX_COPY_ALWAYS || (words[2] & RX_COMP3_SMALL_PKT))
1969		alloclen = len;
1970	else
1971		alloclen = max(hlen, RX_COPY_MIN);
1972
1973	skb = netdev_alloc_skb(cp->dev, alloclen + swivel + cp->crc_size);
1974	if (skb == NULL)
1975		return -1;
1976
1977	*skbref = skb;
1978	skb_reserve(skb, swivel);
1979
1980	p = skb->data;
1981	addr = crcaddr = NULL;
1982	if (hlen) { /* always copy header pages */
1983		i = CAS_VAL(RX_COMP2_HDR_INDEX, words[1]);
1984		page = cp->rx_pages[CAS_VAL(RX_INDEX_RING, i)][CAS_VAL(RX_INDEX_NUM, i)];
1985		off = CAS_VAL(RX_COMP2_HDR_OFF, words[1]) * 0x100 +
1986			swivel;
1987
1988		i = hlen;
1989		if (!dlen) /* attach FCS */
1990			i += cp->crc_size;
1991		pci_dma_sync_single_for_cpu(cp->pdev, page->dma_addr + off, i,
1992				    PCI_DMA_FROMDEVICE);
1993		addr = cas_page_map(page->buffer);
1994		memcpy(p, addr + off, i);
1995		pci_dma_sync_single_for_device(cp->pdev, page->dma_addr + off, i,
1996				    PCI_DMA_FROMDEVICE);
1997		cas_page_unmap(addr);
1998		RX_USED_ADD(page, 0x100);
1999		p += hlen;
2000		swivel = 0;
2001	}
2002
2003
2004	if (alloclen < (hlen + dlen)) {
2005		skb_frag_t *frag = skb_shinfo(skb)->frags;
2006
2007		/* normal or jumbo packets. we use frags */
2008		i = CAS_VAL(RX_COMP1_DATA_INDEX, words[0]);
2009		page = cp->rx_pages[CAS_VAL(RX_INDEX_RING, i)][CAS_VAL(RX_INDEX_NUM, i)];
2010		off = CAS_VAL(RX_COMP1_DATA_OFF, words[0]) + swivel;
2011
2012		hlen = min(cp->page_size - off, dlen);
2013		if (hlen < 0) {
2014			netif_printk(cp, rx_err, KERN_DEBUG, cp->dev,
2015				     "rx page overflow: %d\n", hlen);
2016			dev_kfree_skb_irq(skb);
2017			return -1;
2018		}
2019		i = hlen;
2020		if (i == dlen)  /* attach FCS */
2021			i += cp->crc_size;
2022		pci_dma_sync_single_for_cpu(cp->pdev, page->dma_addr + off, i,
2023				    PCI_DMA_FROMDEVICE);
2024
2025		/* make sure we always copy a header */
2026		swivel = 0;
2027		if (p == (char *) skb->data) { /* not split */
2028			addr = cas_page_map(page->buffer);
2029			memcpy(p, addr + off, RX_COPY_MIN);
2030			pci_dma_sync_single_for_device(cp->pdev, page->dma_addr + off, i,
2031					PCI_DMA_FROMDEVICE);
2032			cas_page_unmap(addr);
2033			off += RX_COPY_MIN;
2034			swivel = RX_COPY_MIN;
2035			RX_USED_ADD(page, cp->mtu_stride);
2036		} else {
2037			RX_USED_ADD(page, hlen);
2038		}
2039		skb_put(skb, alloclen);
2040
2041		skb_shinfo(skb)->nr_frags++;
2042		skb->data_len += hlen - swivel;
2043		skb->truesize += hlen - swivel;
2044		skb->len      += hlen - swivel;
2045
2046		__skb_frag_set_page(frag, page->buffer);
2047		__skb_frag_ref(frag);
2048		frag->page_offset = off;
2049		skb_frag_size_set(frag, hlen - swivel);
2050
2051		/* any more data? */
2052		if ((words[0] & RX_COMP1_SPLIT_PKT) && ((dlen -= hlen) > 0)) {
2053			hlen = dlen;
2054			off = 0;
2055
2056			i = CAS_VAL(RX_COMP2_NEXT_INDEX, words[1]);
2057			page = cp->rx_pages[CAS_VAL(RX_INDEX_RING, i)][CAS_VAL(RX_INDEX_NUM, i)];
2058			pci_dma_sync_single_for_cpu(cp->pdev, page->dma_addr,
2059					    hlen + cp->crc_size,
2060					    PCI_DMA_FROMDEVICE);
2061			pci_dma_sync_single_for_device(cp->pdev, page->dma_addr,
2062					    hlen + cp->crc_size,
2063					    PCI_DMA_FROMDEVICE);
 
 
2064
2065			skb_shinfo(skb)->nr_frags++;
2066			skb->data_len += hlen;
2067			skb->len      += hlen;
2068			frag++;
2069
2070			__skb_frag_set_page(frag, page->buffer);
2071			__skb_frag_ref(frag);
2072			frag->page_offset = 0;
2073			skb_frag_size_set(frag, hlen);
2074			RX_USED_ADD(page, hlen + cp->crc_size);
2075		}
2076
2077		if (cp->crc_size) {
2078			addr = cas_page_map(page->buffer);
2079			crcaddr  = addr + off + hlen;
2080		}
2081
2082	} else {
2083		/* copying packet */
2084		if (!dlen)
2085			goto end_copy_pkt;
2086
2087		i = CAS_VAL(RX_COMP1_DATA_INDEX, words[0]);
2088		page = cp->rx_pages[CAS_VAL(RX_INDEX_RING, i)][CAS_VAL(RX_INDEX_NUM, i)];
2089		off = CAS_VAL(RX_COMP1_DATA_OFF, words[0]) + swivel;
2090		hlen = min(cp->page_size - off, dlen);
2091		if (hlen < 0) {
2092			netif_printk(cp, rx_err, KERN_DEBUG, cp->dev,
2093				     "rx page overflow: %d\n", hlen);
2094			dev_kfree_skb_irq(skb);
2095			return -1;
2096		}
2097		i = hlen;
2098		if (i == dlen) /* attach FCS */
2099			i += cp->crc_size;
2100		pci_dma_sync_single_for_cpu(cp->pdev, page->dma_addr + off, i,
2101				    PCI_DMA_FROMDEVICE);
2102		addr = cas_page_map(page->buffer);
2103		memcpy(p, addr + off, i);
2104		pci_dma_sync_single_for_device(cp->pdev, page->dma_addr + off, i,
2105				    PCI_DMA_FROMDEVICE);
2106		cas_page_unmap(addr);
2107		if (p == (char *) skb->data) /* not split */
2108			RX_USED_ADD(page, cp->mtu_stride);
2109		else
2110			RX_USED_ADD(page, i);
2111
2112		/* any more data? */
2113		if ((words[0] & RX_COMP1_SPLIT_PKT) && ((dlen -= hlen) > 0)) {
2114			p += hlen;
2115			i = CAS_VAL(RX_COMP2_NEXT_INDEX, words[1]);
2116			page = cp->rx_pages[CAS_VAL(RX_INDEX_RING, i)][CAS_VAL(RX_INDEX_NUM, i)];
2117			pci_dma_sync_single_for_cpu(cp->pdev, page->dma_addr,
2118					    dlen + cp->crc_size,
2119					    PCI_DMA_FROMDEVICE);
2120			addr = cas_page_map(page->buffer);
2121			memcpy(p, addr, dlen + cp->crc_size);
2122			pci_dma_sync_single_for_device(cp->pdev, page->dma_addr,
2123					    dlen + cp->crc_size,
2124					    PCI_DMA_FROMDEVICE);
2125			cas_page_unmap(addr);
2126			RX_USED_ADD(page, dlen + cp->crc_size);
2127		}
2128end_copy_pkt:
2129		if (cp->crc_size) {
2130			addr    = NULL;
2131			crcaddr = skb->data + alloclen;
2132		}
2133		skb_put(skb, alloclen);
2134	}
2135
2136	csum = (__force __sum16)htons(CAS_VAL(RX_COMP4_TCP_CSUM, words[3]));
2137	if (cp->crc_size) {
2138		/* checksum includes FCS. strip it out. */
2139		csum = csum_fold(csum_partial(crcaddr, cp->crc_size,
2140					      csum_unfold(csum)));
2141		if (addr)
2142			cas_page_unmap(addr);
2143	}
2144	skb->protocol = eth_type_trans(skb, cp->dev);
2145	if (skb->protocol == htons(ETH_P_IP)) {
2146		skb->csum = csum_unfold(~csum);
2147		skb->ip_summed = CHECKSUM_COMPLETE;
2148	} else
2149		skb_checksum_none_assert(skb);
2150	return len;
2151}
2152
2153
2154/* we can handle up to 64 rx flows at a time. we do the same thing
2155 * as nonreassm except that we batch up the buffers.
2156 * NOTE: we currently just treat each flow as a bunch of packets that
2157 *       we pass up. a better way would be to coalesce the packets
2158 *       into a jumbo packet. to do that, we need to do the following:
2159 *       1) the first packet will have a clean split between header and
2160 *          data. save both.
2161 *       2) each time the next flow packet comes in, extend the
2162 *          data length and merge the checksums.
2163 *       3) on flow release, fix up the header.
2164 *       4) make sure the higher layer doesn't care.
2165 * because packets get coalesced, we shouldn't run into fragment count
2166 * issues.
2167 */
2168static inline void cas_rx_flow_pkt(struct cas *cp, const u64 *words,
2169				   struct sk_buff *skb)
2170{
2171	int flowid = CAS_VAL(RX_COMP3_FLOWID, words[2]) & (N_RX_FLOWS - 1);
2172	struct sk_buff_head *flow = &cp->rx_flows[flowid];
2173
2174	/* this is protected at a higher layer, so no need to
2175	 * do any additional locking here. stick the buffer
2176	 * at the end.
2177	 */
2178	__skb_queue_tail(flow, skb);
2179	if (words[0] & RX_COMP1_RELEASE_FLOW) {
2180		while ((skb = __skb_dequeue(flow))) {
2181			cas_skb_release(skb);
2182		}
2183	}
2184}
2185
2186/* put rx descriptor back on ring. if a buffer is in use by a higher
2187 * layer, this will need to put in a replacement.
2188 */
2189static void cas_post_page(struct cas *cp, const int ring, const int index)
2190{
2191	cas_page_t *new;
2192	int entry;
2193
2194	entry = cp->rx_old[ring];
2195
2196	new = cas_page_swap(cp, ring, index);
2197	cp->init_rxds[ring][entry].buffer = cpu_to_le64(new->dma_addr);
2198	cp->init_rxds[ring][entry].index  =
2199		cpu_to_le64(CAS_BASE(RX_INDEX_NUM, index) |
2200			    CAS_BASE(RX_INDEX_RING, ring));
2201
2202	entry = RX_DESC_ENTRY(ring, entry + 1);
2203	cp->rx_old[ring] = entry;
2204
2205	if (entry % 4)
2206		return;
2207
2208	if (ring == 0)
2209		writel(entry, cp->regs + REG_RX_KICK);
2210	else if ((N_RX_DESC_RINGS > 1) &&
2211		 (cp->cas_flags & CAS_FLAG_REG_PLUS))
2212		writel(entry, cp->regs + REG_PLUS_RX_KICK1);
2213}
2214
2215
2216/* only when things are bad */
2217static int cas_post_rxds_ringN(struct cas *cp, int ring, int num)
2218{
2219	unsigned int entry, last, count, released;
2220	int cluster;
2221	cas_page_t **page = cp->rx_pages[ring];
2222
2223	entry = cp->rx_old[ring];
2224
2225	netif_printk(cp, intr, KERN_DEBUG, cp->dev,
2226		     "rxd[%d] interrupt, done: %d\n", ring, entry);
2227
2228	cluster = -1;
2229	count = entry & 0x3;
2230	last = RX_DESC_ENTRY(ring, num ? entry + num - 4: entry - 4);
2231	released = 0;
2232	while (entry != last) {
2233		/* make a new buffer if it's still in use */
2234		if (page_count(page[entry]->buffer) > 1) {
2235			cas_page_t *new = cas_page_dequeue(cp);
2236			if (!new) {
2237				/* let the timer know that we need to
2238				 * do this again
2239				 */
2240				cp->cas_flags |= CAS_FLAG_RXD_POST(ring);
2241				if (!timer_pending(&cp->link_timer))
2242					mod_timer(&cp->link_timer, jiffies +
2243						  CAS_LINK_FAST_TIMEOUT);
2244				cp->rx_old[ring]  = entry;
2245				cp->rx_last[ring] = num ? num - released : 0;
2246				return -ENOMEM;
2247			}
2248			spin_lock(&cp->rx_inuse_lock);
2249			list_add(&page[entry]->list, &cp->rx_inuse_list);
2250			spin_unlock(&cp->rx_inuse_lock);
2251			cp->init_rxds[ring][entry].buffer =
2252				cpu_to_le64(new->dma_addr);
2253			page[entry] = new;
2254
2255		}
2256
2257		if (++count == 4) {
2258			cluster = entry;
2259			count = 0;
2260		}
2261		released++;
2262		entry = RX_DESC_ENTRY(ring, entry + 1);
2263	}
2264	cp->rx_old[ring] = entry;
2265
2266	if (cluster < 0)
2267		return 0;
2268
2269	if (ring == 0)
2270		writel(cluster, cp->regs + REG_RX_KICK);
2271	else if ((N_RX_DESC_RINGS > 1) &&
2272		 (cp->cas_flags & CAS_FLAG_REG_PLUS))
2273		writel(cluster, cp->regs + REG_PLUS_RX_KICK1);
2274	return 0;
2275}
2276
2277
2278/* process a completion ring. packets are set up in three basic ways:
2279 * small packets: should be copied header + data in single buffer.
2280 * large packets: header and data in a single buffer.
2281 * split packets: header in a separate buffer from data.
2282 *                data may be in multiple pages. data may be > 256
2283 *                bytes but in a single page.
2284 *
2285 * NOTE: RX page posting is done in this routine as well. while there's
2286 *       the capability of using multiple RX completion rings, it isn't
2287 *       really worthwhile due to the fact that the page posting will
2288 *       force serialization on the single descriptor ring.
2289 */
2290static int cas_rx_ringN(struct cas *cp, int ring, int budget)
2291{
2292	struct cas_rx_comp *rxcs = cp->init_rxcs[ring];
2293	int entry, drops;
2294	int npackets = 0;
2295
2296	netif_printk(cp, intr, KERN_DEBUG, cp->dev,
2297		     "rx[%d] interrupt, done: %d/%d\n",
2298		     ring,
2299		     readl(cp->regs + REG_RX_COMP_HEAD), cp->rx_new[ring]);
2300
2301	entry = cp->rx_new[ring];
2302	drops = 0;
2303	while (1) {
2304		struct cas_rx_comp *rxc = rxcs + entry;
2305		struct sk_buff *uninitialized_var(skb);
2306		int type, len;
2307		u64 words[4];
2308		int i, dring;
2309
2310		words[0] = le64_to_cpu(rxc->word1);
2311		words[1] = le64_to_cpu(rxc->word2);
2312		words[2] = le64_to_cpu(rxc->word3);
2313		words[3] = le64_to_cpu(rxc->word4);
2314
2315		/* don't touch if still owned by hw */
2316		type = CAS_VAL(RX_COMP1_TYPE, words[0]);
2317		if (type == 0)
2318			break;
2319
2320		/* hw hasn't cleared the zero bit yet */
2321		if (words[3] & RX_COMP4_ZERO) {
2322			break;
2323		}
2324
2325		/* get info on the packet */
2326		if (words[3] & (RX_COMP4_LEN_MISMATCH | RX_COMP4_BAD)) {
2327			spin_lock(&cp->stat_lock[ring]);
2328			cp->net_stats[ring].rx_errors++;
2329			if (words[3] & RX_COMP4_LEN_MISMATCH)
2330				cp->net_stats[ring].rx_length_errors++;
2331			if (words[3] & RX_COMP4_BAD)
2332				cp->net_stats[ring].rx_crc_errors++;
2333			spin_unlock(&cp->stat_lock[ring]);
2334
2335			/* We'll just return it to Cassini. */
2336		drop_it:
2337			spin_lock(&cp->stat_lock[ring]);
2338			++cp->net_stats[ring].rx_dropped;
2339			spin_unlock(&cp->stat_lock[ring]);
2340			goto next;
2341		}
2342
2343		len = cas_rx_process_pkt(cp, rxc, entry, words, &skb);
2344		if (len < 0) {
2345			++drops;
2346			goto drop_it;
2347		}
2348
2349		/* see if it's a flow re-assembly or not. the driver
2350		 * itself handles release back up.
2351		 */
2352		if (RX_DONT_BATCH || (type == 0x2)) {
2353			/* non-reassm: these always get released */
2354			cas_skb_release(skb);
2355		} else {
2356			cas_rx_flow_pkt(cp, words, skb);
2357		}
2358
2359		spin_lock(&cp->stat_lock[ring]);
2360		cp->net_stats[ring].rx_packets++;
2361		cp->net_stats[ring].rx_bytes += len;
2362		spin_unlock(&cp->stat_lock[ring]);
2363
2364	next:
2365		npackets++;
2366
2367		/* should it be released? */
2368		if (words[0] & RX_COMP1_RELEASE_HDR) {
2369			i = CAS_VAL(RX_COMP2_HDR_INDEX, words[1]);
2370			dring = CAS_VAL(RX_INDEX_RING, i);
2371			i = CAS_VAL(RX_INDEX_NUM, i);
2372			cas_post_page(cp, dring, i);
2373		}
2374
2375		if (words[0] & RX_COMP1_RELEASE_DATA) {
2376			i = CAS_VAL(RX_COMP1_DATA_INDEX, words[0]);
2377			dring = CAS_VAL(RX_INDEX_RING, i);
2378			i = CAS_VAL(RX_INDEX_NUM, i);
2379			cas_post_page(cp, dring, i);
2380		}
2381
2382		if (words[0] & RX_COMP1_RELEASE_NEXT) {
2383			i = CAS_VAL(RX_COMP2_NEXT_INDEX, words[1]);
2384			dring = CAS_VAL(RX_INDEX_RING, i);
2385			i = CAS_VAL(RX_INDEX_NUM, i);
2386			cas_post_page(cp, dring, i);
2387		}
2388
2389		/* skip to the next entry */
2390		entry = RX_COMP_ENTRY(ring, entry + 1 +
2391				      CAS_VAL(RX_COMP1_SKIP, words[0]));
2392#ifdef USE_NAPI
2393		if (budget && (npackets >= budget))
2394			break;
2395#endif
2396	}
2397	cp->rx_new[ring] = entry;
2398
2399	if (drops)
2400		netdev_info(cp->dev, "Memory squeeze, deferring packet\n");
2401	return npackets;
2402}
2403
2404
2405/* put completion entries back on the ring */
2406static void cas_post_rxcs_ringN(struct net_device *dev,
2407				struct cas *cp, int ring)
2408{
2409	struct cas_rx_comp *rxc = cp->init_rxcs[ring];
2410	int last, entry;
2411
2412	last = cp->rx_cur[ring];
2413	entry = cp->rx_new[ring];
2414	netif_printk(cp, intr, KERN_DEBUG, dev,
2415		     "rxc[%d] interrupt, done: %d/%d\n",
2416		     ring, readl(cp->regs + REG_RX_COMP_HEAD), entry);
2417
2418	/* zero and re-mark descriptors */
2419	while (last != entry) {
2420		cas_rxc_init(rxc + last);
2421		last = RX_COMP_ENTRY(ring, last + 1);
2422	}
2423	cp->rx_cur[ring] = last;
2424
2425	if (ring == 0)
2426		writel(last, cp->regs + REG_RX_COMP_TAIL);
2427	else if (cp->cas_flags & CAS_FLAG_REG_PLUS)
2428		writel(last, cp->regs + REG_PLUS_RX_COMPN_TAIL(ring));
2429}
2430
2431
2432
2433/* cassini can use all four PCI interrupts for the completion ring.
2434 * rings 3 and 4 are identical
2435 */
2436#if defined(USE_PCI_INTC) || defined(USE_PCI_INTD)
2437static inline void cas_handle_irqN(struct net_device *dev,
2438				   struct cas *cp, const u32 status,
2439				   const int ring)
2440{
2441	if (status & (INTR_RX_COMP_FULL_ALT | INTR_RX_COMP_AF_ALT))
2442		cas_post_rxcs_ringN(dev, cp, ring);
2443}
2444
2445static irqreturn_t cas_interruptN(int irq, void *dev_id)
2446{
2447	struct net_device *dev = dev_id;
2448	struct cas *cp = netdev_priv(dev);
2449	unsigned long flags;
2450	int ring = (irq == cp->pci_irq_INTC) ? 2 : 3;
2451	u32 status = readl(cp->regs + REG_PLUS_INTRN_STATUS(ring));
2452
2453	/* check for shared irq */
2454	if (status == 0)
2455		return IRQ_NONE;
2456
2457	spin_lock_irqsave(&cp->lock, flags);
2458	if (status & INTR_RX_DONE_ALT) { /* handle rx separately */
2459#ifdef USE_NAPI
2460		cas_mask_intr(cp);
2461		napi_schedule(&cp->napi);
2462#else
2463		cas_rx_ringN(cp, ring, 0);
2464#endif
2465		status &= ~INTR_RX_DONE_ALT;
2466	}
2467
2468	if (status)
2469		cas_handle_irqN(dev, cp, status, ring);
2470	spin_unlock_irqrestore(&cp->lock, flags);
2471	return IRQ_HANDLED;
2472}
2473#endif
2474
2475#ifdef USE_PCI_INTB
2476/* everything but rx packets */
2477static inline void cas_handle_irq1(struct cas *cp, const u32 status)
2478{
2479	if (status & INTR_RX_BUF_UNAVAIL_1) {
2480		/* Frame arrived, no free RX buffers available.
2481		 * NOTE: we can get this on a link transition. */
2482		cas_post_rxds_ringN(cp, 1, 0);
2483		spin_lock(&cp->stat_lock[1]);
2484		cp->net_stats[1].rx_dropped++;
2485		spin_unlock(&cp->stat_lock[1]);
2486	}
2487
2488	if (status & INTR_RX_BUF_AE_1)
2489		cas_post_rxds_ringN(cp, 1, RX_DESC_RINGN_SIZE(1) -
2490				    RX_AE_FREEN_VAL(1));
2491
2492	if (status & (INTR_RX_COMP_AF | INTR_RX_COMP_FULL))
2493		cas_post_rxcs_ringN(cp, 1);
2494}
2495
2496/* ring 2 handles a few more events than 3 and 4 */
2497static irqreturn_t cas_interrupt1(int irq, void *dev_id)
2498{
2499	struct net_device *dev = dev_id;
2500	struct cas *cp = netdev_priv(dev);
2501	unsigned long flags;
2502	u32 status = readl(cp->regs + REG_PLUS_INTRN_STATUS(1));
2503
2504	/* check for shared interrupt */
2505	if (status == 0)
2506		return IRQ_NONE;
2507
2508	spin_lock_irqsave(&cp->lock, flags);
2509	if (status & INTR_RX_DONE_ALT) { /* handle rx separately */
2510#ifdef USE_NAPI
2511		cas_mask_intr(cp);
2512		napi_schedule(&cp->napi);
2513#else
2514		cas_rx_ringN(cp, 1, 0);
2515#endif
2516		status &= ~INTR_RX_DONE_ALT;
2517	}
2518	if (status)
2519		cas_handle_irq1(cp, status);
2520	spin_unlock_irqrestore(&cp->lock, flags);
2521	return IRQ_HANDLED;
2522}
2523#endif
2524
2525static inline void cas_handle_irq(struct net_device *dev,
2526				  struct cas *cp, const u32 status)
2527{
2528	/* housekeeping interrupts */
2529	if (status & INTR_ERROR_MASK)
2530		cas_abnormal_irq(dev, cp, status);
2531
2532	if (status & INTR_RX_BUF_UNAVAIL) {
2533		/* Frame arrived, no free RX buffers available.
2534		 * NOTE: we can get this on a link transition.
2535		 */
2536		cas_post_rxds_ringN(cp, 0, 0);
2537		spin_lock(&cp->stat_lock[0]);
2538		cp->net_stats[0].rx_dropped++;
2539		spin_unlock(&cp->stat_lock[0]);
2540	} else if (status & INTR_RX_BUF_AE) {
2541		cas_post_rxds_ringN(cp, 0, RX_DESC_RINGN_SIZE(0) -
2542				    RX_AE_FREEN_VAL(0));
2543	}
2544
2545	if (status & (INTR_RX_COMP_AF | INTR_RX_COMP_FULL))
2546		cas_post_rxcs_ringN(dev, cp, 0);
2547}
2548
2549static irqreturn_t cas_interrupt(int irq, void *dev_id)
2550{
2551	struct net_device *dev = dev_id;
2552	struct cas *cp = netdev_priv(dev);
2553	unsigned long flags;
2554	u32 status = readl(cp->regs + REG_INTR_STATUS);
2555
2556	if (status == 0)
2557		return IRQ_NONE;
2558
2559	spin_lock_irqsave(&cp->lock, flags);
2560	if (status & (INTR_TX_ALL | INTR_TX_INTME)) {
2561		cas_tx(dev, cp, status);
2562		status &= ~(INTR_TX_ALL | INTR_TX_INTME);
2563	}
2564
2565	if (status & INTR_RX_DONE) {
2566#ifdef USE_NAPI
2567		cas_mask_intr(cp);
2568		napi_schedule(&cp->napi);
2569#else
2570		cas_rx_ringN(cp, 0, 0);
2571#endif
2572		status &= ~INTR_RX_DONE;
2573	}
2574
2575	if (status)
2576		cas_handle_irq(dev, cp, status);
2577	spin_unlock_irqrestore(&cp->lock, flags);
2578	return IRQ_HANDLED;
2579}
2580
2581
2582#ifdef USE_NAPI
2583static int cas_poll(struct napi_struct *napi, int budget)
2584{
2585	struct cas *cp = container_of(napi, struct cas, napi);
2586	struct net_device *dev = cp->dev;
2587	int i, enable_intr, credits;
2588	u32 status = readl(cp->regs + REG_INTR_STATUS);
2589	unsigned long flags;
2590
2591	spin_lock_irqsave(&cp->lock, flags);
2592	cas_tx(dev, cp, status);
2593	spin_unlock_irqrestore(&cp->lock, flags);
2594
2595	/* NAPI rx packets. we spread the credits across all of the
2596	 * rxc rings
2597	 *
2598	 * to make sure we're fair with the work we loop through each
2599	 * ring N_RX_COMP_RING times with a request of
2600	 * budget / N_RX_COMP_RINGS
2601	 */
2602	enable_intr = 1;
2603	credits = 0;
2604	for (i = 0; i < N_RX_COMP_RINGS; i++) {
2605		int j;
2606		for (j = 0; j < N_RX_COMP_RINGS; j++) {
2607			credits += cas_rx_ringN(cp, j, budget / N_RX_COMP_RINGS);
2608			if (credits >= budget) {
2609				enable_intr = 0;
2610				goto rx_comp;
2611			}
2612		}
2613	}
2614
2615rx_comp:
2616	/* final rx completion */
2617	spin_lock_irqsave(&cp->lock, flags);
2618	if (status)
2619		cas_handle_irq(dev, cp, status);
2620
2621#ifdef USE_PCI_INTB
2622	if (N_RX_COMP_RINGS > 1) {
2623		status = readl(cp->regs + REG_PLUS_INTRN_STATUS(1));
2624		if (status)
2625			cas_handle_irq1(dev, cp, status);
2626	}
2627#endif
2628
2629#ifdef USE_PCI_INTC
2630	if (N_RX_COMP_RINGS > 2) {
2631		status = readl(cp->regs + REG_PLUS_INTRN_STATUS(2));
2632		if (status)
2633			cas_handle_irqN(dev, cp, status, 2);
2634	}
2635#endif
2636
2637#ifdef USE_PCI_INTD
2638	if (N_RX_COMP_RINGS > 3) {
2639		status = readl(cp->regs + REG_PLUS_INTRN_STATUS(3));
2640		if (status)
2641			cas_handle_irqN(dev, cp, status, 3);
2642	}
2643#endif
2644	spin_unlock_irqrestore(&cp->lock, flags);
2645	if (enable_intr) {
2646		napi_complete(napi);
2647		cas_unmask_intr(cp);
2648	}
2649	return credits;
2650}
2651#endif
2652
2653#ifdef CONFIG_NET_POLL_CONTROLLER
2654static void cas_netpoll(struct net_device *dev)
2655{
2656	struct cas *cp = netdev_priv(dev);
2657
2658	cas_disable_irq(cp, 0);
2659	cas_interrupt(cp->pdev->irq, dev);
2660	cas_enable_irq(cp, 0);
2661
2662#ifdef USE_PCI_INTB
2663	if (N_RX_COMP_RINGS > 1) {
2664		/* cas_interrupt1(); */
2665	}
2666#endif
2667#ifdef USE_PCI_INTC
2668	if (N_RX_COMP_RINGS > 2) {
2669		/* cas_interruptN(); */
2670	}
2671#endif
2672#ifdef USE_PCI_INTD
2673	if (N_RX_COMP_RINGS > 3) {
2674		/* cas_interruptN(); */
2675	}
2676#endif
2677}
2678#endif
2679
2680static void cas_tx_timeout(struct net_device *dev)
2681{
2682	struct cas *cp = netdev_priv(dev);
2683
2684	netdev_err(dev, "transmit timed out, resetting\n");
2685	if (!cp->hw_running) {
2686		netdev_err(dev, "hrm.. hw not running!\n");
2687		return;
2688	}
2689
2690	netdev_err(dev, "MIF_STATE[%08x]\n",
2691		   readl(cp->regs + REG_MIF_STATE_MACHINE));
2692
2693	netdev_err(dev, "MAC_STATE[%08x]\n",
2694		   readl(cp->regs + REG_MAC_STATE_MACHINE));
2695
2696	netdev_err(dev, "TX_STATE[%08x:%08x:%08x] FIFO[%08x:%08x:%08x] SM1[%08x] SM2[%08x]\n",
2697		   readl(cp->regs + REG_TX_CFG),
2698		   readl(cp->regs + REG_MAC_TX_STATUS),
2699		   readl(cp->regs + REG_MAC_TX_CFG),
2700		   readl(cp->regs + REG_TX_FIFO_PKT_CNT),
2701		   readl(cp->regs + REG_TX_FIFO_WRITE_PTR),
2702		   readl(cp->regs + REG_TX_FIFO_READ_PTR),
2703		   readl(cp->regs + REG_TX_SM_1),
2704		   readl(cp->regs + REG_TX_SM_2));
2705
2706	netdev_err(dev, "RX_STATE[%08x:%08x:%08x]\n",
2707		   readl(cp->regs + REG_RX_CFG),
2708		   readl(cp->regs + REG_MAC_RX_STATUS),
2709		   readl(cp->regs + REG_MAC_RX_CFG));
2710
2711	netdev_err(dev, "HP_STATE[%08x:%08x:%08x:%08x]\n",
2712		   readl(cp->regs + REG_HP_STATE_MACHINE),
2713		   readl(cp->regs + REG_HP_STATUS0),
2714		   readl(cp->regs + REG_HP_STATUS1),
2715		   readl(cp->regs + REG_HP_STATUS2));
2716
2717#if 1
2718	atomic_inc(&cp->reset_task_pending);
2719	atomic_inc(&cp->reset_task_pending_all);
2720	schedule_work(&cp->reset_task);
2721#else
2722	atomic_set(&cp->reset_task_pending, CAS_RESET_ALL);
2723	schedule_work(&cp->reset_task);
2724#endif
2725}
2726
2727static inline int cas_intme(int ring, int entry)
2728{
2729	/* Algorithm: IRQ every 1/2 of descriptors. */
2730	if (!(entry & ((TX_DESC_RINGN_SIZE(ring) >> 1) - 1)))
2731		return 1;
2732	return 0;
2733}
2734
2735
2736static void cas_write_txd(struct cas *cp, int ring, int entry,
2737			  dma_addr_t mapping, int len, u64 ctrl, int last)
2738{
2739	struct cas_tx_desc *txd = cp->init_txds[ring] + entry;
2740
2741	ctrl |= CAS_BASE(TX_DESC_BUFLEN, len);
2742	if (cas_intme(ring, entry))
2743		ctrl |= TX_DESC_INTME;
2744	if (last)
2745		ctrl |= TX_DESC_EOF;
2746	txd->control = cpu_to_le64(ctrl);
2747	txd->buffer = cpu_to_le64(mapping);
2748}
2749
2750static inline void *tx_tiny_buf(struct cas *cp, const int ring,
2751				const int entry)
2752{
2753	return cp->tx_tiny_bufs[ring] + TX_TINY_BUF_LEN*entry;
2754}
2755
2756static inline dma_addr_t tx_tiny_map(struct cas *cp, const int ring,
2757				     const int entry, const int tentry)
2758{
2759	cp->tx_tiny_use[ring][tentry].nbufs++;
2760	cp->tx_tiny_use[ring][entry].used = 1;
2761	return cp->tx_tiny_dvma[ring] + TX_TINY_BUF_LEN*entry;
2762}
2763
2764static inline int cas_xmit_tx_ringN(struct cas *cp, int ring,
2765				    struct sk_buff *skb)
2766{
2767	struct net_device *dev = cp->dev;
2768	int entry, nr_frags, frag, tabort, tentry;
2769	dma_addr_t mapping;
2770	unsigned long flags;
2771	u64 ctrl;
2772	u32 len;
2773
2774	spin_lock_irqsave(&cp->tx_lock[ring], flags);
2775
2776	/* This is a hard error, log it. */
2777	if (TX_BUFFS_AVAIL(cp, ring) <=
2778	    CAS_TABORT(cp)*(skb_shinfo(skb)->nr_frags + 1)) {
2779		netif_stop_queue(dev);
2780		spin_unlock_irqrestore(&cp->tx_lock[ring], flags);
2781		netdev_err(dev, "BUG! Tx Ring full when queue awake!\n");
2782		return 1;
2783	}
2784
2785	ctrl = 0;
2786	if (skb->ip_summed == CHECKSUM_PARTIAL) {
2787		const u64 csum_start_off = skb_checksum_start_offset(skb);
2788		const u64 csum_stuff_off = csum_start_off + skb->csum_offset;
2789
2790		ctrl =  TX_DESC_CSUM_EN |
2791			CAS_BASE(TX_DESC_CSUM_START, csum_start_off) |
2792			CAS_BASE(TX_DESC_CSUM_STUFF, csum_stuff_off);
2793	}
2794
2795	entry = cp->tx_new[ring];
2796	cp->tx_skbs[ring][entry] = skb;
2797
2798	nr_frags = skb_shinfo(skb)->nr_frags;
2799	len = skb_headlen(skb);
2800	mapping = pci_map_page(cp->pdev, virt_to_page(skb->data),
2801			       offset_in_page(skb->data), len,
2802			       PCI_DMA_TODEVICE);
2803
2804	tentry = entry;
2805	tabort = cas_calc_tabort(cp, (unsigned long) skb->data, len);
2806	if (unlikely(tabort)) {
2807		/* NOTE: len is always >  tabort */
2808		cas_write_txd(cp, ring, entry, mapping, len - tabort,
2809			      ctrl | TX_DESC_SOF, 0);
2810		entry = TX_DESC_NEXT(ring, entry);
2811
2812		skb_copy_from_linear_data_offset(skb, len - tabort,
2813			      tx_tiny_buf(cp, ring, entry), tabort);
2814		mapping = tx_tiny_map(cp, ring, entry, tentry);
2815		cas_write_txd(cp, ring, entry, mapping, tabort, ctrl,
2816			      (nr_frags == 0));
2817	} else {
2818		cas_write_txd(cp, ring, entry, mapping, len, ctrl |
2819			      TX_DESC_SOF, (nr_frags == 0));
2820	}
2821	entry = TX_DESC_NEXT(ring, entry);
2822
2823	for (frag = 0; frag < nr_frags; frag++) {
2824		const skb_frag_t *fragp = &skb_shinfo(skb)->frags[frag];
2825
2826		len = skb_frag_size(fragp);
2827		mapping = skb_frag_dma_map(&cp->pdev->dev, fragp, 0, len,
2828					   DMA_TO_DEVICE);
2829
2830		tabort = cas_calc_tabort(cp, fragp->page_offset, len);
2831		if (unlikely(tabort)) {
2832			void *addr;
2833
2834			/* NOTE: len is always > tabort */
2835			cas_write_txd(cp, ring, entry, mapping, len - tabort,
2836				      ctrl, 0);
2837			entry = TX_DESC_NEXT(ring, entry);
2838
2839			addr = cas_page_map(skb_frag_page(fragp));
2840			memcpy(tx_tiny_buf(cp, ring, entry),
2841			       addr + fragp->page_offset + len - tabort,
2842			       tabort);
2843			cas_page_unmap(addr);
2844			mapping = tx_tiny_map(cp, ring, entry, tentry);
2845			len     = tabort;
2846		}
2847
2848		cas_write_txd(cp, ring, entry, mapping, len, ctrl,
2849			      (frag + 1 == nr_frags));
2850		entry = TX_DESC_NEXT(ring, entry);
2851	}
2852
2853	cp->tx_new[ring] = entry;
2854	if (TX_BUFFS_AVAIL(cp, ring) <= CAS_TABORT(cp)*(MAX_SKB_FRAGS + 1))
2855		netif_stop_queue(dev);
2856
2857	netif_printk(cp, tx_queued, KERN_DEBUG, dev,
2858		     "tx[%d] queued, slot %d, skblen %d, avail %d\n",
2859		     ring, entry, skb->len, TX_BUFFS_AVAIL(cp, ring));
2860	writel(entry, cp->regs + REG_TX_KICKN(ring));
2861	spin_unlock_irqrestore(&cp->tx_lock[ring], flags);
2862	return 0;
2863}
2864
2865static netdev_tx_t cas_start_xmit(struct sk_buff *skb, struct net_device *dev)
2866{
2867	struct cas *cp = netdev_priv(dev);
2868
2869	/* this is only used as a load-balancing hint, so it doesn't
2870	 * need to be SMP safe
2871	 */
2872	static int ring;
2873
2874	if (skb_padto(skb, cp->min_frame_size))
2875		return NETDEV_TX_OK;
2876
2877	/* XXX: we need some higher-level QoS hooks to steer packets to
2878	 *      individual queues.
2879	 */
2880	if (cas_xmit_tx_ringN(cp, ring++ & N_TX_RINGS_MASK, skb))
2881		return NETDEV_TX_BUSY;
2882	return NETDEV_TX_OK;
2883}
2884
2885static void cas_init_tx_dma(struct cas *cp)
2886{
2887	u64 desc_dma = cp->block_dvma;
2888	unsigned long off;
2889	u32 val;
2890	int i;
2891
2892	/* set up tx completion writeback registers. must be 8-byte aligned */
2893#ifdef USE_TX_COMPWB
2894	off = offsetof(struct cas_init_block, tx_compwb);
2895	writel((desc_dma + off) >> 32, cp->regs + REG_TX_COMPWB_DB_HI);
2896	writel((desc_dma + off) & 0xffffffff, cp->regs + REG_TX_COMPWB_DB_LOW);
2897#endif
2898
2899	/* enable completion writebacks, enable paced mode,
2900	 * disable read pipe, and disable pre-interrupt compwbs
2901	 */
2902	val =   TX_CFG_COMPWB_Q1 | TX_CFG_COMPWB_Q2 |
2903		TX_CFG_COMPWB_Q3 | TX_CFG_COMPWB_Q4 |
2904		TX_CFG_DMA_RDPIPE_DIS | TX_CFG_PACED_MODE |
2905		TX_CFG_INTR_COMPWB_DIS;
2906
2907	/* write out tx ring info and tx desc bases */
2908	for (i = 0; i < MAX_TX_RINGS; i++) {
2909		off = (unsigned long) cp->init_txds[i] -
2910			(unsigned long) cp->init_block;
2911
2912		val |= CAS_TX_RINGN_BASE(i);
2913		writel((desc_dma + off) >> 32, cp->regs + REG_TX_DBN_HI(i));
2914		writel((desc_dma + off) & 0xffffffff, cp->regs +
2915		       REG_TX_DBN_LOW(i));
2916		/* don't zero out the kick register here as the system
2917		 * will wedge
2918		 */
2919	}
2920	writel(val, cp->regs + REG_TX_CFG);
2921
2922	/* program max burst sizes. these numbers should be different
2923	 * if doing QoS.
2924	 */
2925#ifdef USE_QOS
2926	writel(0x800, cp->regs + REG_TX_MAXBURST_0);
2927	writel(0x1600, cp->regs + REG_TX_MAXBURST_1);
2928	writel(0x2400, cp->regs + REG_TX_MAXBURST_2);
2929	writel(0x4800, cp->regs + REG_TX_MAXBURST_3);
2930#else
2931	writel(0x800, cp->regs + REG_TX_MAXBURST_0);
2932	writel(0x800, cp->regs + REG_TX_MAXBURST_1);
2933	writel(0x800, cp->regs + REG_TX_MAXBURST_2);
2934	writel(0x800, cp->regs + REG_TX_MAXBURST_3);
2935#endif
2936}
2937
2938/* Must be invoked under cp->lock. */
2939static inline void cas_init_dma(struct cas *cp)
2940{
2941	cas_init_tx_dma(cp);
2942	cas_init_rx_dma(cp);
2943}
2944
2945static void cas_process_mc_list(struct cas *cp)
2946{
2947	u16 hash_table[16];
2948	u32 crc;
2949	struct netdev_hw_addr *ha;
2950	int i = 1;
2951
2952	memset(hash_table, 0, sizeof(hash_table));
2953	netdev_for_each_mc_addr(ha, cp->dev) {
2954		if (i <= CAS_MC_EXACT_MATCH_SIZE) {
2955			/* use the alternate mac address registers for the
2956			 * first 15 multicast addresses
2957			 */
2958			writel((ha->addr[4] << 8) | ha->addr[5],
2959			       cp->regs + REG_MAC_ADDRN(i*3 + 0));
2960			writel((ha->addr[2] << 8) | ha->addr[3],
2961			       cp->regs + REG_MAC_ADDRN(i*3 + 1));
2962			writel((ha->addr[0] << 8) | ha->addr[1],
2963			       cp->regs + REG_MAC_ADDRN(i*3 + 2));
2964			i++;
2965		}
2966		else {
2967			/* use hw hash table for the next series of
2968			 * multicast addresses
2969			 */
2970			crc = ether_crc_le(ETH_ALEN, ha->addr);
2971			crc >>= 24;
2972			hash_table[crc >> 4] |= 1 << (15 - (crc & 0xf));
2973		}
2974	}
2975	for (i = 0; i < 16; i++)
2976		writel(hash_table[i], cp->regs + REG_MAC_HASH_TABLEN(i));
2977}
2978
2979/* Must be invoked under cp->lock. */
2980static u32 cas_setup_multicast(struct cas *cp)
2981{
2982	u32 rxcfg = 0;
2983	int i;
2984
2985	if (cp->dev->flags & IFF_PROMISC) {
2986		rxcfg |= MAC_RX_CFG_PROMISC_EN;
2987
2988	} else if (cp->dev->flags & IFF_ALLMULTI) {
2989	    	for (i=0; i < 16; i++)
2990			writel(0xFFFF, cp->regs + REG_MAC_HASH_TABLEN(i));
2991		rxcfg |= MAC_RX_CFG_HASH_FILTER_EN;
2992
2993	} else {
2994		cas_process_mc_list(cp);
2995		rxcfg |= MAC_RX_CFG_HASH_FILTER_EN;
2996	}
2997
2998	return rxcfg;
2999}
3000
3001/* must be invoked under cp->stat_lock[N_TX_RINGS] */
3002static void cas_clear_mac_err(struct cas *cp)
3003{
3004	writel(0, cp->regs + REG_MAC_COLL_NORMAL);
3005	writel(0, cp->regs + REG_MAC_COLL_FIRST);
3006	writel(0, cp->regs + REG_MAC_COLL_EXCESS);
3007	writel(0, cp->regs + REG_MAC_COLL_LATE);
3008	writel(0, cp->regs + REG_MAC_TIMER_DEFER);
3009	writel(0, cp->regs + REG_MAC_ATTEMPTS_PEAK);
3010	writel(0, cp->regs + REG_MAC_RECV_FRAME);
3011	writel(0, cp->regs + REG_MAC_LEN_ERR);
3012	writel(0, cp->regs + REG_MAC_ALIGN_ERR);
3013	writel(0, cp->regs + REG_MAC_FCS_ERR);
3014	writel(0, cp->regs + REG_MAC_RX_CODE_ERR);
3015}
3016
3017
3018static void cas_mac_reset(struct cas *cp)
3019{
3020	int i;
3021
3022	/* do both TX and RX reset */
3023	writel(0x1, cp->regs + REG_MAC_TX_RESET);
3024	writel(0x1, cp->regs + REG_MAC_RX_RESET);
3025
3026	/* wait for TX */
3027	i = STOP_TRIES;
3028	while (i-- > 0) {
3029		if (readl(cp->regs + REG_MAC_TX_RESET) == 0)
3030			break;
3031		udelay(10);
3032	}
3033
3034	/* wait for RX */
3035	i = STOP_TRIES;
3036	while (i-- > 0) {
3037		if (readl(cp->regs + REG_MAC_RX_RESET) == 0)
3038			break;
3039		udelay(10);
3040	}
3041
3042	if (readl(cp->regs + REG_MAC_TX_RESET) |
3043	    readl(cp->regs + REG_MAC_RX_RESET))
3044		netdev_err(cp->dev, "mac tx[%d]/rx[%d] reset failed [%08x]\n",
3045			   readl(cp->regs + REG_MAC_TX_RESET),
3046			   readl(cp->regs + REG_MAC_RX_RESET),
3047			   readl(cp->regs + REG_MAC_STATE_MACHINE));
3048}
3049
3050
3051/* Must be invoked under cp->lock. */
3052static void cas_init_mac(struct cas *cp)
3053{
3054	unsigned char *e = &cp->dev->dev_addr[0];
3055	int i;
3056	cas_mac_reset(cp);
3057
3058	/* setup core arbitration weight register */
3059	writel(CAWR_RR_DIS, cp->regs + REG_CAWR);
3060
3061#if !defined(CONFIG_SPARC64) && !defined(CONFIG_ALPHA)
3062	/* set the infinite burst register for chips that don't have
3063	 * pci issues.
3064	 */
3065	if ((cp->cas_flags & CAS_FLAG_TARGET_ABORT) == 0)
3066		writel(INF_BURST_EN, cp->regs + REG_INF_BURST);
3067#endif
3068
3069	writel(0x1BF0, cp->regs + REG_MAC_SEND_PAUSE);
3070
3071	writel(0x00, cp->regs + REG_MAC_IPG0);
3072	writel(0x08, cp->regs + REG_MAC_IPG1);
3073	writel(0x04, cp->regs + REG_MAC_IPG2);
3074
3075	/* change later for 802.3z */
3076	writel(0x40, cp->regs + REG_MAC_SLOT_TIME);
3077
3078	/* min frame + FCS */
3079	writel(ETH_ZLEN + 4, cp->regs + REG_MAC_FRAMESIZE_MIN);
3080
3081	/* Ethernet payload + header + FCS + optional VLAN tag. NOTE: we
3082	 * specify the maximum frame size to prevent RX tag errors on
3083	 * oversized frames.
3084	 */
3085	writel(CAS_BASE(MAC_FRAMESIZE_MAX_BURST, 0x2000) |
3086	       CAS_BASE(MAC_FRAMESIZE_MAX_FRAME,
3087			(CAS_MAX_MTU + ETH_HLEN + 4 + 4)),
3088	       cp->regs + REG_MAC_FRAMESIZE_MAX);
3089
3090	/* NOTE: crc_size is used as a surrogate for half-duplex.
3091	 * workaround saturn half-duplex issue by increasing preamble
3092	 * size to 65 bytes.
3093	 */
3094	if ((cp->cas_flags & CAS_FLAG_SATURN) && cp->crc_size)
3095		writel(0x41, cp->regs + REG_MAC_PA_SIZE);
3096	else
3097		writel(0x07, cp->regs + REG_MAC_PA_SIZE);
3098	writel(0x04, cp->regs + REG_MAC_JAM_SIZE);
3099	writel(0x10, cp->regs + REG_MAC_ATTEMPT_LIMIT);
3100	writel(0x8808, cp->regs + REG_MAC_CTRL_TYPE);
3101
3102	writel((e[5] | (e[4] << 8)) & 0x3ff, cp->regs + REG_MAC_RANDOM_SEED);
3103
3104	writel(0, cp->regs + REG_MAC_ADDR_FILTER0);
3105	writel(0, cp->regs + REG_MAC_ADDR_FILTER1);
3106	writel(0, cp->regs + REG_MAC_ADDR_FILTER2);
3107	writel(0, cp->regs + REG_MAC_ADDR_FILTER2_1_MASK);
3108	writel(0, cp->regs + REG_MAC_ADDR_FILTER0_MASK);
3109
3110	/* setup mac address in perfect filter array */
3111	for (i = 0; i < 45; i++)
3112		writel(0x0, cp->regs + REG_MAC_ADDRN(i));
3113
3114	writel((e[4] << 8) | e[5], cp->regs + REG_MAC_ADDRN(0));
3115	writel((e[2] << 8) | e[3], cp->regs + REG_MAC_ADDRN(1));
3116	writel((e[0] << 8) | e[1], cp->regs + REG_MAC_ADDRN(2));
3117
3118	writel(0x0001, cp->regs + REG_MAC_ADDRN(42));
3119	writel(0xc200, cp->regs + REG_MAC_ADDRN(43));
3120	writel(0x0180, cp->regs + REG_MAC_ADDRN(44));
3121
3122	cp->mac_rx_cfg = cas_setup_multicast(cp);
3123
3124	spin_lock(&cp->stat_lock[N_TX_RINGS]);
3125	cas_clear_mac_err(cp);
3126	spin_unlock(&cp->stat_lock[N_TX_RINGS]);
3127
3128	/* Setup MAC interrupts.  We want to get all of the interesting
3129	 * counter expiration events, but we do not want to hear about
3130	 * normal rx/tx as the DMA engine tells us that.
3131	 */
3132	writel(MAC_TX_FRAME_XMIT, cp->regs + REG_MAC_TX_MASK);
3133	writel(MAC_RX_FRAME_RECV, cp->regs + REG_MAC_RX_MASK);
3134
3135	/* Don't enable even the PAUSE interrupts for now, we
3136	 * make no use of those events other than to record them.
3137	 */
3138	writel(0xffffffff, cp->regs + REG_MAC_CTRL_MASK);
3139}
3140
3141/* Must be invoked under cp->lock. */
3142static void cas_init_pause_thresholds(struct cas *cp)
3143{
3144	/* Calculate pause thresholds.  Setting the OFF threshold to the
3145	 * full RX fifo size effectively disables PAUSE generation
3146	 */
3147	if (cp->rx_fifo_size <= (2 * 1024)) {
3148		cp->rx_pause_off = cp->rx_pause_on = cp->rx_fifo_size;
3149	} else {
3150		int max_frame = (cp->dev->mtu + ETH_HLEN + 4 + 4 + 64) & ~63;
3151		if (max_frame * 3 > cp->rx_fifo_size) {
3152			cp->rx_pause_off = 7104;
3153			cp->rx_pause_on  = 960;
3154		} else {
3155			int off = (cp->rx_fifo_size - (max_frame * 2));
3156			int on = off - max_frame;
3157			cp->rx_pause_off = off;
3158			cp->rx_pause_on = on;
3159		}
3160	}
3161}
3162
3163static int cas_vpd_match(const void __iomem *p, const char *str)
3164{
3165	int len = strlen(str) + 1;
3166	int i;
3167
3168	for (i = 0; i < len; i++) {
3169		if (readb(p + i) != str[i])
3170			return 0;
3171	}
3172	return 1;
3173}
3174
3175
3176/* get the mac address by reading the vpd information in the rom.
3177 * also get the phy type and determine if there's an entropy generator.
3178 * NOTE: this is a bit convoluted for the following reasons:
3179 *  1) vpd info has order-dependent mac addresses for multinic cards
3180 *  2) the only way to determine the nic order is to use the slot
3181 *     number.
3182 *  3) fiber cards don't have bridges, so their slot numbers don't
3183 *     mean anything.
3184 *  4) we don't actually know we have a fiber card until after
3185 *     the mac addresses are parsed.
3186 */
3187static int cas_get_vpd_info(struct cas *cp, unsigned char *dev_addr,
3188			    const int offset)
3189{
3190	void __iomem *p = cp->regs + REG_EXPANSION_ROM_RUN_START;
3191	void __iomem *base, *kstart;
3192	int i, len;
3193	int found = 0;
3194#define VPD_FOUND_MAC        0x01
3195#define VPD_FOUND_PHY        0x02
3196
3197	int phy_type = CAS_PHY_MII_MDIO0; /* default phy type */
3198	int mac_off  = 0;
3199
3200#if defined(CONFIG_SPARC)
3201	const unsigned char *addr;
3202#endif
3203
3204	/* give us access to the PROM */
3205	writel(BIM_LOCAL_DEV_PROM | BIM_LOCAL_DEV_PAD,
3206	       cp->regs + REG_BIM_LOCAL_DEV_EN);
3207
3208	/* check for an expansion rom */
3209	if (readb(p) != 0x55 || readb(p + 1) != 0xaa)
3210		goto use_random_mac_addr;
3211
3212	/* search for beginning of vpd */
3213	base = NULL;
3214	for (i = 2; i < EXPANSION_ROM_SIZE; i++) {
3215		/* check for PCIR */
3216		if ((readb(p + i + 0) == 0x50) &&
3217		    (readb(p + i + 1) == 0x43) &&
3218		    (readb(p + i + 2) == 0x49) &&
3219		    (readb(p + i + 3) == 0x52)) {
3220			base = p + (readb(p + i + 8) |
3221				    (readb(p + i + 9) << 8));
3222			break;
3223		}
3224	}
3225
3226	if (!base || (readb(base) != 0x82))
3227		goto use_random_mac_addr;
3228
3229	i = (readb(base + 1) | (readb(base + 2) << 8)) + 3;
3230	while (i < EXPANSION_ROM_SIZE) {
3231		if (readb(base + i) != 0x90) /* no vpd found */
3232			goto use_random_mac_addr;
3233
3234		/* found a vpd field */
3235		len = readb(base + i + 1) | (readb(base + i + 2) << 8);
3236
3237		/* extract keywords */
3238		kstart = base + i + 3;
3239		p = kstart;
3240		while ((p - kstart) < len) {
3241			int klen = readb(p + 2);
3242			int j;
3243			char type;
3244
3245			p += 3;
3246
3247			/* look for the following things:
3248			 * -- correct length == 29
3249			 * 3 (type) + 2 (size) +
3250			 * 18 (strlen("local-mac-address") + 1) +
3251			 * 6 (mac addr)
3252			 * -- VPD Instance 'I'
3253			 * -- VPD Type Bytes 'B'
3254			 * -- VPD data length == 6
3255			 * -- property string == local-mac-address
3256			 *
3257			 * -- correct length == 24
3258			 * 3 (type) + 2 (size) +
3259			 * 12 (strlen("entropy-dev") + 1) +
3260			 * 7 (strlen("vms110") + 1)
3261			 * -- VPD Instance 'I'
3262			 * -- VPD Type String 'B'
3263			 * -- VPD data length == 7
3264			 * -- property string == entropy-dev
3265			 *
3266			 * -- correct length == 18
3267			 * 3 (type) + 2 (size) +
3268			 * 9 (strlen("phy-type") + 1) +
3269			 * 4 (strlen("pcs") + 1)
3270			 * -- VPD Instance 'I'
3271			 * -- VPD Type String 'S'
3272			 * -- VPD data length == 4
3273			 * -- property string == phy-type
3274			 *
3275			 * -- correct length == 23
3276			 * 3 (type) + 2 (size) +
3277			 * 14 (strlen("phy-interface") + 1) +
3278			 * 4 (strlen("pcs") + 1)
3279			 * -- VPD Instance 'I'
3280			 * -- VPD Type String 'S'
3281			 * -- VPD data length == 4
3282			 * -- property string == phy-interface
3283			 */
3284			if (readb(p) != 'I')
3285				goto next;
3286
3287			/* finally, check string and length */
3288			type = readb(p + 3);
3289			if (type == 'B') {
3290				if ((klen == 29) && readb(p + 4) == 6 &&
3291				    cas_vpd_match(p + 5,
3292						  "local-mac-address")) {
3293					if (mac_off++ > offset)
3294						goto next;
3295
3296					/* set mac address */
3297					for (j = 0; j < 6; j++)
3298						dev_addr[j] =
3299							readb(p + 23 + j);
3300					goto found_mac;
3301				}
3302			}
3303
3304			if (type != 'S')
3305				goto next;
3306
3307#ifdef USE_ENTROPY_DEV
3308			if ((klen == 24) &&
3309			    cas_vpd_match(p + 5, "entropy-dev") &&
3310			    cas_vpd_match(p + 17, "vms110")) {
3311				cp->cas_flags |= CAS_FLAG_ENTROPY_DEV;
3312				goto next;
3313			}
3314#endif
3315
3316			if (found & VPD_FOUND_PHY)
3317				goto next;
3318
3319			if ((klen == 18) && readb(p + 4) == 4 &&
3320			    cas_vpd_match(p + 5, "phy-type")) {
3321				if (cas_vpd_match(p + 14, "pcs")) {
3322					phy_type = CAS_PHY_SERDES;
3323					goto found_phy;
3324				}
3325			}
3326
3327			if ((klen == 23) && readb(p + 4) == 4 &&
3328			    cas_vpd_match(p + 5, "phy-interface")) {
3329				if (cas_vpd_match(p + 19, "pcs")) {
3330					phy_type = CAS_PHY_SERDES;
3331					goto found_phy;
3332				}
3333			}
3334found_mac:
3335			found |= VPD_FOUND_MAC;
3336			goto next;
3337
3338found_phy:
3339			found |= VPD_FOUND_PHY;
3340
3341next:
3342			p += klen;
3343		}
3344		i += len + 3;
3345	}
3346
3347use_random_mac_addr:
3348	if (found & VPD_FOUND_MAC)
3349		goto done;
3350
3351#if defined(CONFIG_SPARC)
3352	addr = of_get_property(cp->of_node, "local-mac-address", NULL);
3353	if (addr != NULL) {
3354		memcpy(dev_addr, addr, ETH_ALEN);
3355		goto done;
3356	}
3357#endif
3358
3359	/* Sun MAC prefix then 3 random bytes. */
3360	pr_info("MAC address not found in ROM VPD\n");
3361	dev_addr[0] = 0x08;
3362	dev_addr[1] = 0x00;
3363	dev_addr[2] = 0x20;
3364	get_random_bytes(dev_addr + 3, 3);
3365
3366done:
3367	writel(0, cp->regs + REG_BIM_LOCAL_DEV_EN);
3368	return phy_type;
3369}
3370
3371/* check pci invariants */
3372static void cas_check_pci_invariants(struct cas *cp)
3373{
3374	struct pci_dev *pdev = cp->pdev;
3375
3376	cp->cas_flags = 0;
3377	if ((pdev->vendor == PCI_VENDOR_ID_SUN) &&
3378	    (pdev->device == PCI_DEVICE_ID_SUN_CASSINI)) {
3379		if (pdev->revision >= CAS_ID_REVPLUS)
3380			cp->cas_flags |= CAS_FLAG_REG_PLUS;
3381		if (pdev->revision < CAS_ID_REVPLUS02u)
3382			cp->cas_flags |= CAS_FLAG_TARGET_ABORT;
3383
3384		/* Original Cassini supports HW CSUM, but it's not
3385		 * enabled by default as it can trigger TX hangs.
3386		 */
3387		if (pdev->revision < CAS_ID_REV2)
3388			cp->cas_flags |= CAS_FLAG_NO_HW_CSUM;
3389	} else {
3390		/* Only sun has original cassini chips.  */
3391		cp->cas_flags |= CAS_FLAG_REG_PLUS;
3392
3393		/* We use a flag because the same phy might be externally
3394		 * connected.
3395		 */
3396		if ((pdev->vendor == PCI_VENDOR_ID_NS) &&
3397		    (pdev->device == PCI_DEVICE_ID_NS_SATURN))
3398			cp->cas_flags |= CAS_FLAG_SATURN;
3399	}
3400}
3401
3402
3403static int cas_check_invariants(struct cas *cp)
3404{
3405	struct pci_dev *pdev = cp->pdev;
 
3406	u32 cfg;
3407	int i;
3408
3409	/* get page size for rx buffers. */
3410	cp->page_order = 0;
3411#ifdef USE_PAGE_ORDER
3412	if (PAGE_SHIFT < CAS_JUMBO_PAGE_SHIFT) {
3413		/* see if we can allocate larger pages */
3414		struct page *page = alloc_pages(GFP_ATOMIC,
3415						CAS_JUMBO_PAGE_SHIFT -
3416						PAGE_SHIFT);
3417		if (page) {
3418			__free_pages(page, CAS_JUMBO_PAGE_SHIFT - PAGE_SHIFT);
3419			cp->page_order = CAS_JUMBO_PAGE_SHIFT - PAGE_SHIFT;
3420		} else {
3421			printk("MTU limited to %d bytes\n", CAS_MAX_MTU);
3422		}
3423	}
3424#endif
3425	cp->page_size = (PAGE_SIZE << cp->page_order);
3426
3427	/* Fetch the FIFO configurations. */
3428	cp->tx_fifo_size = readl(cp->regs + REG_TX_FIFO_SIZE) * 64;
3429	cp->rx_fifo_size = RX_FIFO_SIZE;
3430
3431	/* finish phy determination. MDIO1 takes precedence over MDIO0 if
3432	 * they're both connected.
3433	 */
3434	cp->phy_type = cas_get_vpd_info(cp, cp->dev->dev_addr,
3435					PCI_SLOT(pdev->devfn));
3436	if (cp->phy_type & CAS_PHY_SERDES) {
3437		cp->cas_flags |= CAS_FLAG_1000MB_CAP;
3438		return 0; /* no more checking needed */
3439	}
3440
3441	/* MII */
3442	cfg = readl(cp->regs + REG_MIF_CFG);
3443	if (cfg & MIF_CFG_MDIO_1) {
3444		cp->phy_type = CAS_PHY_MII_MDIO1;
3445	} else if (cfg & MIF_CFG_MDIO_0) {
3446		cp->phy_type = CAS_PHY_MII_MDIO0;
3447	}
3448
3449	cas_mif_poll(cp, 0);
3450	writel(PCS_DATAPATH_MODE_MII, cp->regs + REG_PCS_DATAPATH_MODE);
3451
3452	for (i = 0; i < 32; i++) {
3453		u32 phy_id;
3454		int j;
3455
3456		for (j = 0; j < 3; j++) {
3457			cp->phy_addr = i;
3458			phy_id = cas_phy_read(cp, MII_PHYSID1) << 16;
3459			phy_id |= cas_phy_read(cp, MII_PHYSID2);
3460			if (phy_id && (phy_id != 0xFFFFFFFF)) {
3461				cp->phy_id = phy_id;
3462				goto done;
3463			}
3464		}
3465	}
3466	pr_err("MII phy did not respond [%08x]\n",
3467	       readl(cp->regs + REG_MIF_STATE_MACHINE));
3468	return -1;
3469
3470done:
3471	/* see if we can do gigabit */
3472	cfg = cas_phy_read(cp, MII_BMSR);
3473	if ((cfg & CAS_BMSR_1000_EXTEND) &&
3474	    cas_phy_read(cp, CAS_MII_1000_EXTEND))
3475		cp->cas_flags |= CAS_FLAG_1000MB_CAP;
3476	return 0;
3477}
3478
3479/* Must be invoked under cp->lock. */
3480static inline void cas_start_dma(struct cas *cp)
3481{
3482	int i;
3483	u32 val;
3484	int txfailed = 0;
3485
3486	/* enable dma */
3487	val = readl(cp->regs + REG_TX_CFG) | TX_CFG_DMA_EN;
3488	writel(val, cp->regs + REG_TX_CFG);
3489	val = readl(cp->regs + REG_RX_CFG) | RX_CFG_DMA_EN;
3490	writel(val, cp->regs + REG_RX_CFG);
3491
3492	/* enable the mac */
3493	val = readl(cp->regs + REG_MAC_TX_CFG) | MAC_TX_CFG_EN;
3494	writel(val, cp->regs + REG_MAC_TX_CFG);
3495	val = readl(cp->regs + REG_MAC_RX_CFG) | MAC_RX_CFG_EN;
3496	writel(val, cp->regs + REG_MAC_RX_CFG);
3497
3498	i = STOP_TRIES;
3499	while (i-- > 0) {
3500		val = readl(cp->regs + REG_MAC_TX_CFG);
3501		if ((val & MAC_TX_CFG_EN))
3502			break;
3503		udelay(10);
3504	}
3505	if (i < 0) txfailed = 1;
3506	i = STOP_TRIES;
3507	while (i-- > 0) {
3508		val = readl(cp->regs + REG_MAC_RX_CFG);
3509		if ((val & MAC_RX_CFG_EN)) {
3510			if (txfailed) {
3511				netdev_err(cp->dev,
3512					   "enabling mac failed [tx:%08x:%08x]\n",
3513					   readl(cp->regs + REG_MIF_STATE_MACHINE),
3514					   readl(cp->regs + REG_MAC_STATE_MACHINE));
3515			}
3516			goto enable_rx_done;
3517		}
3518		udelay(10);
3519	}
3520	netdev_err(cp->dev, "enabling mac failed [%s:%08x:%08x]\n",
3521		   (txfailed ? "tx,rx" : "rx"),
3522		   readl(cp->regs + REG_MIF_STATE_MACHINE),
3523		   readl(cp->regs + REG_MAC_STATE_MACHINE));
3524
3525enable_rx_done:
3526	cas_unmask_intr(cp); /* enable interrupts */
3527	writel(RX_DESC_RINGN_SIZE(0) - 4, cp->regs + REG_RX_KICK);
3528	writel(0, cp->regs + REG_RX_COMP_TAIL);
3529
3530	if (cp->cas_flags & CAS_FLAG_REG_PLUS) {
3531		if (N_RX_DESC_RINGS > 1)
3532			writel(RX_DESC_RINGN_SIZE(1) - 4,
3533			       cp->regs + REG_PLUS_RX_KICK1);
3534
3535		for (i = 1; i < N_RX_COMP_RINGS; i++)
3536			writel(0, cp->regs + REG_PLUS_RX_COMPN_TAIL(i));
3537	}
3538}
3539
3540/* Must be invoked under cp->lock. */
3541static void cas_read_pcs_link_mode(struct cas *cp, int *fd, int *spd,
3542				   int *pause)
3543{
3544	u32 val = readl(cp->regs + REG_PCS_MII_LPA);
3545	*fd     = (val & PCS_MII_LPA_FD) ? 1 : 0;
3546	*pause  = (val & PCS_MII_LPA_SYM_PAUSE) ? 0x01 : 0x00;
3547	if (val & PCS_MII_LPA_ASYM_PAUSE)
3548		*pause |= 0x10;
3549	*spd = 1000;
3550}
3551
3552/* Must be invoked under cp->lock. */
3553static void cas_read_mii_link_mode(struct cas *cp, int *fd, int *spd,
3554				   int *pause)
3555{
3556	u32 val;
3557
3558	*fd = 0;
3559	*spd = 10;
3560	*pause = 0;
3561
3562	/* use GMII registers */
3563	val = cas_phy_read(cp, MII_LPA);
3564	if (val & CAS_LPA_PAUSE)
3565		*pause = 0x01;
3566
3567	if (val & CAS_LPA_ASYM_PAUSE)
3568		*pause |= 0x10;
3569
3570	if (val & LPA_DUPLEX)
3571		*fd = 1;
3572	if (val & LPA_100)
3573		*spd = 100;
3574
3575	if (cp->cas_flags & CAS_FLAG_1000MB_CAP) {
3576		val = cas_phy_read(cp, CAS_MII_1000_STATUS);
3577		if (val & (CAS_LPA_1000FULL | CAS_LPA_1000HALF))
3578			*spd = 1000;
3579		if (val & CAS_LPA_1000FULL)
3580			*fd = 1;
3581	}
3582}
3583
3584/* A link-up condition has occurred, initialize and enable the
3585 * rest of the chip.
3586 *
3587 * Must be invoked under cp->lock.
3588 */
3589static void cas_set_link_modes(struct cas *cp)
3590{
3591	u32 val;
3592	int full_duplex, speed, pause;
3593
3594	full_duplex = 0;
3595	speed = 10;
3596	pause = 0;
3597
3598	if (CAS_PHY_MII(cp->phy_type)) {
3599		cas_mif_poll(cp, 0);
3600		val = cas_phy_read(cp, MII_BMCR);
3601		if (val & BMCR_ANENABLE) {
3602			cas_read_mii_link_mode(cp, &full_duplex, &speed,
3603					       &pause);
3604		} else {
3605			if (val & BMCR_FULLDPLX)
3606				full_duplex = 1;
3607
3608			if (val & BMCR_SPEED100)
3609				speed = 100;
3610			else if (val & CAS_BMCR_SPEED1000)
3611				speed = (cp->cas_flags & CAS_FLAG_1000MB_CAP) ?
3612					1000 : 100;
3613		}
3614		cas_mif_poll(cp, 1);
3615
3616	} else {
3617		val = readl(cp->regs + REG_PCS_MII_CTRL);
3618		cas_read_pcs_link_mode(cp, &full_duplex, &speed, &pause);
3619		if ((val & PCS_MII_AUTONEG_EN) == 0) {
3620			if (val & PCS_MII_CTRL_DUPLEX)
3621				full_duplex = 1;
3622		}
3623	}
3624
3625	netif_info(cp, link, cp->dev, "Link up at %d Mbps, %s-duplex\n",
3626		   speed, full_duplex ? "full" : "half");
3627
3628	val = MAC_XIF_TX_MII_OUTPUT_EN | MAC_XIF_LINK_LED;
3629	if (CAS_PHY_MII(cp->phy_type)) {
3630		val |= MAC_XIF_MII_BUFFER_OUTPUT_EN;
3631		if (!full_duplex)
3632			val |= MAC_XIF_DISABLE_ECHO;
3633	}
3634	if (full_duplex)
3635		val |= MAC_XIF_FDPLX_LED;
3636	if (speed == 1000)
3637		val |= MAC_XIF_GMII_MODE;
3638	writel(val, cp->regs + REG_MAC_XIF_CFG);
3639
3640	/* deal with carrier and collision detect. */
3641	val = MAC_TX_CFG_IPG_EN;
3642	if (full_duplex) {
3643		val |= MAC_TX_CFG_IGNORE_CARRIER;
3644		val |= MAC_TX_CFG_IGNORE_COLL;
3645	} else {
3646#ifndef USE_CSMA_CD_PROTO
3647		val |= MAC_TX_CFG_NEVER_GIVE_UP_EN;
3648		val |= MAC_TX_CFG_NEVER_GIVE_UP_LIM;
3649#endif
3650	}
3651	/* val now set up for REG_MAC_TX_CFG */
3652
3653	/* If gigabit and half-duplex, enable carrier extension
3654	 * mode.  increase slot time to 512 bytes as well.
3655	 * else, disable it and make sure slot time is 64 bytes.
3656	 * also activate checksum bug workaround
3657	 */
3658	if ((speed == 1000) && !full_duplex) {
3659		writel(val | MAC_TX_CFG_CARRIER_EXTEND,
3660		       cp->regs + REG_MAC_TX_CFG);
3661
3662		val = readl(cp->regs + REG_MAC_RX_CFG);
3663		val &= ~MAC_RX_CFG_STRIP_FCS; /* checksum workaround */
3664		writel(val | MAC_RX_CFG_CARRIER_EXTEND,
3665		       cp->regs + REG_MAC_RX_CFG);
3666
3667		writel(0x200, cp->regs + REG_MAC_SLOT_TIME);
3668
3669		cp->crc_size = 4;
3670		/* minimum size gigabit frame at half duplex */
3671		cp->min_frame_size = CAS_1000MB_MIN_FRAME;
3672
3673	} else {
3674		writel(val, cp->regs + REG_MAC_TX_CFG);
3675
3676		/* checksum bug workaround. don't strip FCS when in
3677		 * half-duplex mode
3678		 */
3679		val = readl(cp->regs + REG_MAC_RX_CFG);
3680		if (full_duplex) {
3681			val |= MAC_RX_CFG_STRIP_FCS;
3682			cp->crc_size = 0;
3683			cp->min_frame_size = CAS_MIN_MTU;
3684		} else {
3685			val &= ~MAC_RX_CFG_STRIP_FCS;
3686			cp->crc_size = 4;
3687			cp->min_frame_size = CAS_MIN_FRAME;
3688		}
3689		writel(val & ~MAC_RX_CFG_CARRIER_EXTEND,
3690		       cp->regs + REG_MAC_RX_CFG);
3691		writel(0x40, cp->regs + REG_MAC_SLOT_TIME);
3692	}
3693
3694	if (netif_msg_link(cp)) {
3695		if (pause & 0x01) {
3696			netdev_info(cp->dev, "Pause is enabled (rxfifo: %d off: %d on: %d)\n",
3697				    cp->rx_fifo_size,
3698				    cp->rx_pause_off,
3699				    cp->rx_pause_on);
3700		} else if (pause & 0x10) {
3701			netdev_info(cp->dev, "TX pause enabled\n");
3702		} else {
3703			netdev_info(cp->dev, "Pause is disabled\n");
3704		}
3705	}
3706
3707	val = readl(cp->regs + REG_MAC_CTRL_CFG);
3708	val &= ~(MAC_CTRL_CFG_SEND_PAUSE_EN | MAC_CTRL_CFG_RECV_PAUSE_EN);
3709	if (pause) { /* symmetric or asymmetric pause */
3710		val |= MAC_CTRL_CFG_SEND_PAUSE_EN;
3711		if (pause & 0x01) { /* symmetric pause */
3712			val |= MAC_CTRL_CFG_RECV_PAUSE_EN;
3713		}
3714	}
3715	writel(val, cp->regs + REG_MAC_CTRL_CFG);
3716	cas_start_dma(cp);
3717}
3718
3719/* Must be invoked under cp->lock. */
3720static void cas_init_hw(struct cas *cp, int restart_link)
3721{
3722	if (restart_link)
3723		cas_phy_init(cp);
3724
3725	cas_init_pause_thresholds(cp);
3726	cas_init_mac(cp);
3727	cas_init_dma(cp);
3728
3729	if (restart_link) {
3730		/* Default aneg parameters */
3731		cp->timer_ticks = 0;
3732		cas_begin_auto_negotiation(cp, NULL);
3733	} else if (cp->lstate == link_up) {
3734		cas_set_link_modes(cp);
3735		netif_carrier_on(cp->dev);
3736	}
3737}
3738
3739/* Must be invoked under cp->lock. on earlier cassini boards,
3740 * SOFT_0 is tied to PCI reset. we use this to force a pci reset,
3741 * let it settle out, and then restore pci state.
3742 */
3743static void cas_hard_reset(struct cas *cp)
3744{
3745	writel(BIM_LOCAL_DEV_SOFT_0, cp->regs + REG_BIM_LOCAL_DEV_EN);
3746	udelay(20);
3747	pci_restore_state(cp->pdev);
3748}
3749
3750
3751static void cas_global_reset(struct cas *cp, int blkflag)
3752{
3753	int limit;
3754
3755	/* issue a global reset. don't use RSTOUT. */
3756	if (blkflag && !CAS_PHY_MII(cp->phy_type)) {
3757		/* For PCS, when the blkflag is set, we should set the
3758		 * SW_REST_BLOCK_PCS_SLINK bit to prevent the results of
3759		 * the last autonegotiation from being cleared.  We'll
3760		 * need some special handling if the chip is set into a
3761		 * loopback mode.
3762		 */
3763		writel((SW_RESET_TX | SW_RESET_RX | SW_RESET_BLOCK_PCS_SLINK),
3764		       cp->regs + REG_SW_RESET);
3765	} else {
3766		writel(SW_RESET_TX | SW_RESET_RX, cp->regs + REG_SW_RESET);
3767	}
3768
3769	/* need to wait at least 3ms before polling register */
3770	mdelay(3);
3771
3772	limit = STOP_TRIES;
3773	while (limit-- > 0) {
3774		u32 val = readl(cp->regs + REG_SW_RESET);
3775		if ((val & (SW_RESET_TX | SW_RESET_RX)) == 0)
3776			goto done;
3777		udelay(10);
3778	}
3779	netdev_err(cp->dev, "sw reset failed\n");
3780
3781done:
3782	/* enable various BIM interrupts */
3783	writel(BIM_CFG_DPAR_INTR_ENABLE | BIM_CFG_RMA_INTR_ENABLE |
3784	       BIM_CFG_RTA_INTR_ENABLE, cp->regs + REG_BIM_CFG);
3785
3786	/* clear out pci error status mask for handled errors.
3787	 * we don't deal with DMA counter overflows as they happen
3788	 * all the time.
3789	 */
3790	writel(0xFFFFFFFFU & ~(PCI_ERR_BADACK | PCI_ERR_DTRTO |
3791			       PCI_ERR_OTHER | PCI_ERR_BIM_DMA_WRITE |
3792			       PCI_ERR_BIM_DMA_READ), cp->regs +
3793	       REG_PCI_ERR_STATUS_MASK);
3794
3795	/* set up for MII by default to address mac rx reset timeout
3796	 * issue
3797	 */
3798	writel(PCS_DATAPATH_MODE_MII, cp->regs + REG_PCS_DATAPATH_MODE);
3799}
3800
3801static void cas_reset(struct cas *cp, int blkflag)
3802{
3803	u32 val;
3804
3805	cas_mask_intr(cp);
3806	cas_global_reset(cp, blkflag);
3807	cas_mac_reset(cp);
3808	cas_entropy_reset(cp);
3809
3810	/* disable dma engines. */
3811	val = readl(cp->regs + REG_TX_CFG);
3812	val &= ~TX_CFG_DMA_EN;
3813	writel(val, cp->regs + REG_TX_CFG);
3814
3815	val = readl(cp->regs + REG_RX_CFG);
3816	val &= ~RX_CFG_DMA_EN;
3817	writel(val, cp->regs + REG_RX_CFG);
3818
3819	/* program header parser */
3820	if ((cp->cas_flags & CAS_FLAG_TARGET_ABORT) ||
3821	    (CAS_HP_ALT_FIRMWARE == cas_prog_null)) {
3822		cas_load_firmware(cp, CAS_HP_FIRMWARE);
3823	} else {
3824		cas_load_firmware(cp, CAS_HP_ALT_FIRMWARE);
3825	}
3826
3827	/* clear out error registers */
3828	spin_lock(&cp->stat_lock[N_TX_RINGS]);
3829	cas_clear_mac_err(cp);
3830	spin_unlock(&cp->stat_lock[N_TX_RINGS]);
3831}
3832
3833/* Shut down the chip, must be called with pm_mutex held.  */
3834static void cas_shutdown(struct cas *cp)
3835{
3836	unsigned long flags;
3837
3838	/* Make us not-running to avoid timers respawning */
3839	cp->hw_running = 0;
3840
3841	del_timer_sync(&cp->link_timer);
3842
3843	/* Stop the reset task */
3844#if 0
3845	while (atomic_read(&cp->reset_task_pending_mtu) ||
3846	       atomic_read(&cp->reset_task_pending_spare) ||
3847	       atomic_read(&cp->reset_task_pending_all))
3848		schedule();
3849
3850#else
3851	while (atomic_read(&cp->reset_task_pending))
3852		schedule();
3853#endif
3854	/* Actually stop the chip */
3855	cas_lock_all_save(cp, flags);
3856	cas_reset(cp, 0);
3857	if (cp->cas_flags & CAS_FLAG_SATURN)
3858		cas_phy_powerdown(cp);
3859	cas_unlock_all_restore(cp, flags);
3860}
3861
3862static int cas_change_mtu(struct net_device *dev, int new_mtu)
3863{
3864	struct cas *cp = netdev_priv(dev);
3865
3866	if (new_mtu < CAS_MIN_MTU || new_mtu > CAS_MAX_MTU)
3867		return -EINVAL;
3868
3869	dev->mtu = new_mtu;
3870	if (!netif_running(dev) || !netif_device_present(dev))
3871		return 0;
3872
3873	/* let the reset task handle it */
3874#if 1
3875	atomic_inc(&cp->reset_task_pending);
3876	if ((cp->phy_type & CAS_PHY_SERDES)) {
3877		atomic_inc(&cp->reset_task_pending_all);
3878	} else {
3879		atomic_inc(&cp->reset_task_pending_mtu);
3880	}
3881	schedule_work(&cp->reset_task);
3882#else
3883	atomic_set(&cp->reset_task_pending, (cp->phy_type & CAS_PHY_SERDES) ?
3884		   CAS_RESET_ALL : CAS_RESET_MTU);
3885	pr_err("reset called in cas_change_mtu\n");
3886	schedule_work(&cp->reset_task);
3887#endif
3888
3889	flush_work(&cp->reset_task);
3890	return 0;
3891}
3892
3893static void cas_clean_txd(struct cas *cp, int ring)
3894{
3895	struct cas_tx_desc *txd = cp->init_txds[ring];
3896	struct sk_buff *skb, **skbs = cp->tx_skbs[ring];
3897	u64 daddr, dlen;
3898	int i, size;
3899
3900	size = TX_DESC_RINGN_SIZE(ring);
3901	for (i = 0; i < size; i++) {
3902		int frag;
3903
3904		if (skbs[i] == NULL)
3905			continue;
3906
3907		skb = skbs[i];
3908		skbs[i] = NULL;
3909
3910		for (frag = 0; frag <= skb_shinfo(skb)->nr_frags;  frag++) {
3911			int ent = i & (size - 1);
3912
3913			/* first buffer is never a tiny buffer and so
3914			 * needs to be unmapped.
3915			 */
3916			daddr = le64_to_cpu(txd[ent].buffer);
3917			dlen  =  CAS_VAL(TX_DESC_BUFLEN,
3918					 le64_to_cpu(txd[ent].control));
3919			pci_unmap_page(cp->pdev, daddr, dlen,
3920				       PCI_DMA_TODEVICE);
3921
3922			if (frag != skb_shinfo(skb)->nr_frags) {
3923				i++;
3924
3925				/* next buffer might by a tiny buffer.
3926				 * skip past it.
3927				 */
3928				ent = i & (size - 1);
3929				if (cp->tx_tiny_use[ring][ent].used)
3930					i++;
3931			}
3932		}
3933		dev_kfree_skb_any(skb);
3934	}
3935
3936	/* zero out tiny buf usage */
3937	memset(cp->tx_tiny_use[ring], 0, size*sizeof(*cp->tx_tiny_use[ring]));
3938}
3939
3940/* freed on close */
3941static inline void cas_free_rx_desc(struct cas *cp, int ring)
3942{
3943	cas_page_t **page = cp->rx_pages[ring];
3944	int i, size;
3945
3946	size = RX_DESC_RINGN_SIZE(ring);
3947	for (i = 0; i < size; i++) {
3948		if (page[i]) {
3949			cas_page_free(cp, page[i]);
3950			page[i] = NULL;
3951		}
3952	}
3953}
3954
3955static void cas_free_rxds(struct cas *cp)
3956{
3957	int i;
3958
3959	for (i = 0; i < N_RX_DESC_RINGS; i++)
3960		cas_free_rx_desc(cp, i);
3961}
3962
3963/* Must be invoked under cp->lock. */
3964static void cas_clean_rings(struct cas *cp)
3965{
3966	int i;
3967
3968	/* need to clean all tx rings */
3969	memset(cp->tx_old, 0, sizeof(*cp->tx_old)*N_TX_RINGS);
3970	memset(cp->tx_new, 0, sizeof(*cp->tx_new)*N_TX_RINGS);
3971	for (i = 0; i < N_TX_RINGS; i++)
3972		cas_clean_txd(cp, i);
3973
3974	/* zero out init block */
3975	memset(cp->init_block, 0, sizeof(struct cas_init_block));
3976	cas_clean_rxds(cp);
3977	cas_clean_rxcs(cp);
3978}
3979
3980/* allocated on open */
3981static inline int cas_alloc_rx_desc(struct cas *cp, int ring)
3982{
3983	cas_page_t **page = cp->rx_pages[ring];
3984	int size, i = 0;
3985
3986	size = RX_DESC_RINGN_SIZE(ring);
3987	for (i = 0; i < size; i++) {
3988		if ((page[i] = cas_page_alloc(cp, GFP_KERNEL)) == NULL)
3989			return -1;
3990	}
3991	return 0;
3992}
3993
3994static int cas_alloc_rxds(struct cas *cp)
3995{
3996	int i;
3997
3998	for (i = 0; i < N_RX_DESC_RINGS; i++) {
3999		if (cas_alloc_rx_desc(cp, i) < 0) {
4000			cas_free_rxds(cp);
4001			return -1;
4002		}
4003	}
4004	return 0;
4005}
4006
4007static void cas_reset_task(struct work_struct *work)
4008{
4009	struct cas *cp = container_of(work, struct cas, reset_task);
4010#if 0
4011	int pending = atomic_read(&cp->reset_task_pending);
4012#else
4013	int pending_all = atomic_read(&cp->reset_task_pending_all);
4014	int pending_spare = atomic_read(&cp->reset_task_pending_spare);
4015	int pending_mtu = atomic_read(&cp->reset_task_pending_mtu);
4016
4017	if (pending_all == 0 && pending_spare == 0 && pending_mtu == 0) {
4018		/* We can have more tasks scheduled than actually
4019		 * needed.
4020		 */
4021		atomic_dec(&cp->reset_task_pending);
4022		return;
4023	}
4024#endif
4025	/* The link went down, we reset the ring, but keep
4026	 * DMA stopped. Use this function for reset
4027	 * on error as well.
4028	 */
4029	if (cp->hw_running) {
4030		unsigned long flags;
4031
4032		/* Make sure we don't get interrupts or tx packets */
4033		netif_device_detach(cp->dev);
4034		cas_lock_all_save(cp, flags);
4035
4036		if (cp->opened) {
4037			/* We call cas_spare_recover when we call cas_open.
4038			 * but we do not initialize the lists cas_spare_recover
4039			 * uses until cas_open is called.
4040			 */
4041			cas_spare_recover(cp, GFP_ATOMIC);
4042		}
4043#if 1
4044		/* test => only pending_spare set */
4045		if (!pending_all && !pending_mtu)
4046			goto done;
4047#else
4048		if (pending == CAS_RESET_SPARE)
4049			goto done;
4050#endif
4051		/* when pending == CAS_RESET_ALL, the following
4052		 * call to cas_init_hw will restart auto negotiation.
4053		 * Setting the second argument of cas_reset to
4054		 * !(pending == CAS_RESET_ALL) will set this argument
4055		 * to 1 (avoiding reinitializing the PHY for the normal
4056		 * PCS case) when auto negotiation is not restarted.
4057		 */
4058#if 1
4059		cas_reset(cp, !(pending_all > 0));
4060		if (cp->opened)
4061			cas_clean_rings(cp);
4062		cas_init_hw(cp, (pending_all > 0));
4063#else
4064		cas_reset(cp, !(pending == CAS_RESET_ALL));
4065		if (cp->opened)
4066			cas_clean_rings(cp);
4067		cas_init_hw(cp, pending == CAS_RESET_ALL);
4068#endif
4069
4070done:
4071		cas_unlock_all_restore(cp, flags);
4072		netif_device_attach(cp->dev);
4073	}
4074#if 1
4075	atomic_sub(pending_all, &cp->reset_task_pending_all);
4076	atomic_sub(pending_spare, &cp->reset_task_pending_spare);
4077	atomic_sub(pending_mtu, &cp->reset_task_pending_mtu);
4078	atomic_dec(&cp->reset_task_pending);
4079#else
4080	atomic_set(&cp->reset_task_pending, 0);
4081#endif
4082}
4083
4084static void cas_link_timer(unsigned long data)
4085{
4086	struct cas *cp = (struct cas *) data;
4087	int mask, pending = 0, reset = 0;
4088	unsigned long flags;
4089
4090	if (link_transition_timeout != 0 &&
4091	    cp->link_transition_jiffies_valid &&
4092	    ((jiffies - cp->link_transition_jiffies) >
4093	      (link_transition_timeout))) {
4094		/* One-second counter so link-down workaround doesn't
4095		 * cause resets to occur so fast as to fool the switch
4096		 * into thinking the link is down.
4097		 */
4098		cp->link_transition_jiffies_valid = 0;
4099	}
4100
4101	if (!cp->hw_running)
4102		return;
4103
4104	spin_lock_irqsave(&cp->lock, flags);
4105	cas_lock_tx(cp);
4106	cas_entropy_gather(cp);
4107
4108	/* If the link task is still pending, we just
4109	 * reschedule the link timer
4110	 */
4111#if 1
4112	if (atomic_read(&cp->reset_task_pending_all) ||
4113	    atomic_read(&cp->reset_task_pending_spare) ||
4114	    atomic_read(&cp->reset_task_pending_mtu))
4115		goto done;
4116#else
4117	if (atomic_read(&cp->reset_task_pending))
4118		goto done;
4119#endif
4120
4121	/* check for rx cleaning */
4122	if ((mask = (cp->cas_flags & CAS_FLAG_RXD_POST_MASK))) {
4123		int i, rmask;
4124
4125		for (i = 0; i < MAX_RX_DESC_RINGS; i++) {
4126			rmask = CAS_FLAG_RXD_POST(i);
4127			if ((mask & rmask) == 0)
4128				continue;
4129
4130			/* post_rxds will do a mod_timer */
4131			if (cas_post_rxds_ringN(cp, i, cp->rx_last[i]) < 0) {
4132				pending = 1;
4133				continue;
4134			}
4135			cp->cas_flags &= ~rmask;
4136		}
4137	}
4138
4139	if (CAS_PHY_MII(cp->phy_type)) {
4140		u16 bmsr;
4141		cas_mif_poll(cp, 0);
4142		bmsr = cas_phy_read(cp, MII_BMSR);
4143		/* WTZ: Solaris driver reads this twice, but that
4144		 * may be due to the PCS case and the use of a
4145		 * common implementation. Read it twice here to be
4146		 * safe.
4147		 */
4148		bmsr = cas_phy_read(cp, MII_BMSR);
4149		cas_mif_poll(cp, 1);
4150		readl(cp->regs + REG_MIF_STATUS); /* avoid dups */
4151		reset = cas_mii_link_check(cp, bmsr);
4152	} else {
4153		reset = cas_pcs_link_check(cp);
4154	}
4155
4156	if (reset)
4157		goto done;
4158
4159	/* check for tx state machine confusion */
4160	if ((readl(cp->regs + REG_MAC_TX_STATUS) & MAC_TX_FRAME_XMIT) == 0) {
4161		u32 val = readl(cp->regs + REG_MAC_STATE_MACHINE);
4162		u32 wptr, rptr;
4163		int tlm  = CAS_VAL(MAC_SM_TLM, val);
4164
4165		if (((tlm == 0x5) || (tlm == 0x3)) &&
4166		    (CAS_VAL(MAC_SM_ENCAP_SM, val) == 0)) {
4167			netif_printk(cp, tx_err, KERN_DEBUG, cp->dev,
4168				     "tx err: MAC_STATE[%08x]\n", val);
4169			reset = 1;
4170			goto done;
4171		}
4172
4173		val  = readl(cp->regs + REG_TX_FIFO_PKT_CNT);
4174		wptr = readl(cp->regs + REG_TX_FIFO_WRITE_PTR);
4175		rptr = readl(cp->regs + REG_TX_FIFO_READ_PTR);
4176		if ((val == 0) && (wptr != rptr)) {
4177			netif_printk(cp, tx_err, KERN_DEBUG, cp->dev,
4178				     "tx err: TX_FIFO[%08x:%08x:%08x]\n",
4179				     val, wptr, rptr);
4180			reset = 1;
4181		}
4182
4183		if (reset)
4184			cas_hard_reset(cp);
4185	}
4186
4187done:
4188	if (reset) {
4189#if 1
4190		atomic_inc(&cp->reset_task_pending);
4191		atomic_inc(&cp->reset_task_pending_all);
4192		schedule_work(&cp->reset_task);
4193#else
4194		atomic_set(&cp->reset_task_pending, CAS_RESET_ALL);
4195		pr_err("reset called in cas_link_timer\n");
4196		schedule_work(&cp->reset_task);
4197#endif
4198	}
4199
4200	if (!pending)
4201		mod_timer(&cp->link_timer, jiffies + CAS_LINK_TIMEOUT);
4202	cas_unlock_tx(cp);
4203	spin_unlock_irqrestore(&cp->lock, flags);
4204}
4205
4206/* tiny buffers are used to avoid target abort issues with
4207 * older cassini's
4208 */
4209static void cas_tx_tiny_free(struct cas *cp)
4210{
4211	struct pci_dev *pdev = cp->pdev;
4212	int i;
4213
4214	for (i = 0; i < N_TX_RINGS; i++) {
4215		if (!cp->tx_tiny_bufs[i])
4216			continue;
4217
4218		pci_free_consistent(pdev, TX_TINY_BUF_BLOCK,
4219				    cp->tx_tiny_bufs[i],
4220				    cp->tx_tiny_dvma[i]);
4221		cp->tx_tiny_bufs[i] = NULL;
4222	}
4223}
4224
4225static int cas_tx_tiny_alloc(struct cas *cp)
4226{
4227	struct pci_dev *pdev = cp->pdev;
4228	int i;
4229
4230	for (i = 0; i < N_TX_RINGS; i++) {
4231		cp->tx_tiny_bufs[i] =
4232			pci_alloc_consistent(pdev, TX_TINY_BUF_BLOCK,
4233					     &cp->tx_tiny_dvma[i]);
4234		if (!cp->tx_tiny_bufs[i]) {
4235			cas_tx_tiny_free(cp);
4236			return -1;
4237		}
4238	}
4239	return 0;
4240}
4241
4242
4243static int cas_open(struct net_device *dev)
4244{
4245	struct cas *cp = netdev_priv(dev);
4246	int hw_was_up, err;
4247	unsigned long flags;
4248
4249	mutex_lock(&cp->pm_mutex);
4250
4251	hw_was_up = cp->hw_running;
4252
4253	/* The power-management mutex protects the hw_running
4254	 * etc. state so it is safe to do this bit without cp->lock
4255	 */
4256	if (!cp->hw_running) {
4257		/* Reset the chip */
4258		cas_lock_all_save(cp, flags);
4259		/* We set the second arg to cas_reset to zero
4260		 * because cas_init_hw below will have its second
4261		 * argument set to non-zero, which will force
4262		 * autonegotiation to start.
4263		 */
4264		cas_reset(cp, 0);
4265		cp->hw_running = 1;
4266		cas_unlock_all_restore(cp, flags);
4267	}
4268
4269	err = -ENOMEM;
4270	if (cas_tx_tiny_alloc(cp) < 0)
4271		goto err_unlock;
4272
4273	/* alloc rx descriptors */
4274	if (cas_alloc_rxds(cp) < 0)
4275		goto err_tx_tiny;
4276
4277	/* allocate spares */
4278	cas_spare_init(cp);
4279	cas_spare_recover(cp, GFP_KERNEL);
4280
4281	/* We can now request the interrupt as we know it's masked
4282	 * on the controller. cassini+ has up to 4 interrupts
4283	 * that can be used, but you need to do explicit pci interrupt
4284	 * mapping to expose them
4285	 */
4286	if (request_irq(cp->pdev->irq, cas_interrupt,
4287			IRQF_SHARED, dev->name, (void *) dev)) {
4288		netdev_err(cp->dev, "failed to request irq !\n");
4289		err = -EAGAIN;
4290		goto err_spare;
4291	}
4292
4293#ifdef USE_NAPI
4294	napi_enable(&cp->napi);
4295#endif
4296	/* init hw */
4297	cas_lock_all_save(cp, flags);
4298	cas_clean_rings(cp);
4299	cas_init_hw(cp, !hw_was_up);
4300	cp->opened = 1;
4301	cas_unlock_all_restore(cp, flags);
4302
4303	netif_start_queue(dev);
4304	mutex_unlock(&cp->pm_mutex);
4305	return 0;
4306
4307err_spare:
4308	cas_spare_free(cp);
4309	cas_free_rxds(cp);
4310err_tx_tiny:
4311	cas_tx_tiny_free(cp);
4312err_unlock:
4313	mutex_unlock(&cp->pm_mutex);
4314	return err;
4315}
4316
4317static int cas_close(struct net_device *dev)
4318{
4319	unsigned long flags;
4320	struct cas *cp = netdev_priv(dev);
4321
4322#ifdef USE_NAPI
4323	napi_disable(&cp->napi);
4324#endif
4325	/* Make sure we don't get distracted by suspend/resume */
4326	mutex_lock(&cp->pm_mutex);
4327
4328	netif_stop_queue(dev);
4329
4330	/* Stop traffic, mark us closed */
4331	cas_lock_all_save(cp, flags);
4332	cp->opened = 0;
4333	cas_reset(cp, 0);
4334	cas_phy_init(cp);
4335	cas_begin_auto_negotiation(cp, NULL);
4336	cas_clean_rings(cp);
4337	cas_unlock_all_restore(cp, flags);
4338
4339	free_irq(cp->pdev->irq, (void *) dev);
4340	cas_spare_free(cp);
4341	cas_free_rxds(cp);
4342	cas_tx_tiny_free(cp);
4343	mutex_unlock(&cp->pm_mutex);
4344	return 0;
4345}
4346
4347static struct {
4348	const char name[ETH_GSTRING_LEN];
4349} ethtool_cassini_statnames[] = {
4350	{"collisions"},
4351	{"rx_bytes"},
4352	{"rx_crc_errors"},
4353	{"rx_dropped"},
4354	{"rx_errors"},
4355	{"rx_fifo_errors"},
4356	{"rx_frame_errors"},
4357	{"rx_length_errors"},
4358	{"rx_over_errors"},
4359	{"rx_packets"},
4360	{"tx_aborted_errors"},
4361	{"tx_bytes"},
4362	{"tx_dropped"},
4363	{"tx_errors"},
4364	{"tx_fifo_errors"},
4365	{"tx_packets"}
4366};
4367#define CAS_NUM_STAT_KEYS ARRAY_SIZE(ethtool_cassini_statnames)
4368
4369static struct {
4370	const int offsets;	/* neg. values for 2nd arg to cas_read_phy */
4371} ethtool_register_table[] = {
4372	{-MII_BMSR},
4373	{-MII_BMCR},
4374	{REG_CAWR},
4375	{REG_INF_BURST},
4376	{REG_BIM_CFG},
4377	{REG_RX_CFG},
4378	{REG_HP_CFG},
4379	{REG_MAC_TX_CFG},
4380	{REG_MAC_RX_CFG},
4381	{REG_MAC_CTRL_CFG},
4382	{REG_MAC_XIF_CFG},
4383	{REG_MIF_CFG},
4384	{REG_PCS_CFG},
4385	{REG_SATURN_PCFG},
4386	{REG_PCS_MII_STATUS},
4387	{REG_PCS_STATE_MACHINE},
4388	{REG_MAC_COLL_EXCESS},
4389	{REG_MAC_COLL_LATE}
4390};
4391#define CAS_REG_LEN 	ARRAY_SIZE(ethtool_register_table)
4392#define CAS_MAX_REGS 	(sizeof (u32)*CAS_REG_LEN)
4393
4394static void cas_read_regs(struct cas *cp, u8 *ptr, int len)
4395{
4396	u8 *p;
4397	int i;
4398	unsigned long flags;
4399
4400	spin_lock_irqsave(&cp->lock, flags);
4401	for (i = 0, p = ptr; i < len ; i ++, p += sizeof(u32)) {
4402		u16 hval;
4403		u32 val;
4404		if (ethtool_register_table[i].offsets < 0) {
4405			hval = cas_phy_read(cp,
4406				    -ethtool_register_table[i].offsets);
4407			val = hval;
4408		} else {
4409			val= readl(cp->regs+ethtool_register_table[i].offsets);
4410		}
4411		memcpy(p, (u8 *)&val, sizeof(u32));
4412	}
4413	spin_unlock_irqrestore(&cp->lock, flags);
4414}
4415
4416static struct net_device_stats *cas_get_stats(struct net_device *dev)
4417{
4418	struct cas *cp = netdev_priv(dev);
4419	struct net_device_stats *stats = cp->net_stats;
4420	unsigned long flags;
4421	int i;
4422	unsigned long tmp;
4423
4424	/* we collate all of the stats into net_stats[N_TX_RING] */
4425	if (!cp->hw_running)
4426		return stats + N_TX_RINGS;
4427
4428	/* collect outstanding stats */
4429	/* WTZ: the Cassini spec gives these as 16 bit counters but
4430	 * stored in 32-bit words.  Added a mask of 0xffff to be safe,
4431	 * in case the chip somehow puts any garbage in the other bits.
4432	 * Also, counter usage didn't seem to mach what Adrian did
4433	 * in the parts of the code that set these quantities. Made
4434	 * that consistent.
4435	 */
4436	spin_lock_irqsave(&cp->stat_lock[N_TX_RINGS], flags);
4437	stats[N_TX_RINGS].rx_crc_errors +=
4438	  readl(cp->regs + REG_MAC_FCS_ERR) & 0xffff;
4439	stats[N_TX_RINGS].rx_frame_errors +=
4440		readl(cp->regs + REG_MAC_ALIGN_ERR) &0xffff;
4441	stats[N_TX_RINGS].rx_length_errors +=
4442		readl(cp->regs + REG_MAC_LEN_ERR) & 0xffff;
4443#if 1
4444	tmp = (readl(cp->regs + REG_MAC_COLL_EXCESS) & 0xffff) +
4445		(readl(cp->regs + REG_MAC_COLL_LATE) & 0xffff);
4446	stats[N_TX_RINGS].tx_aborted_errors += tmp;
4447	stats[N_TX_RINGS].collisions +=
4448	  tmp + (readl(cp->regs + REG_MAC_COLL_NORMAL) & 0xffff);
4449#else
4450	stats[N_TX_RINGS].tx_aborted_errors +=
4451		readl(cp->regs + REG_MAC_COLL_EXCESS);
4452	stats[N_TX_RINGS].collisions += readl(cp->regs + REG_MAC_COLL_EXCESS) +
4453		readl(cp->regs + REG_MAC_COLL_LATE);
4454#endif
4455	cas_clear_mac_err(cp);
4456
4457	/* saved bits that are unique to ring 0 */
4458	spin_lock(&cp->stat_lock[0]);
4459	stats[N_TX_RINGS].collisions        += stats[0].collisions;
4460	stats[N_TX_RINGS].rx_over_errors    += stats[0].rx_over_errors;
4461	stats[N_TX_RINGS].rx_frame_errors   += stats[0].rx_frame_errors;
4462	stats[N_TX_RINGS].rx_fifo_errors    += stats[0].rx_fifo_errors;
4463	stats[N_TX_RINGS].tx_aborted_errors += stats[0].tx_aborted_errors;
4464	stats[N_TX_RINGS].tx_fifo_errors    += stats[0].tx_fifo_errors;
4465	spin_unlock(&cp->stat_lock[0]);
4466
4467	for (i = 0; i < N_TX_RINGS; i++) {
4468		spin_lock(&cp->stat_lock[i]);
4469		stats[N_TX_RINGS].rx_length_errors +=
4470			stats[i].rx_length_errors;
4471		stats[N_TX_RINGS].rx_crc_errors += stats[i].rx_crc_errors;
4472		stats[N_TX_RINGS].rx_packets    += stats[i].rx_packets;
4473		stats[N_TX_RINGS].tx_packets    += stats[i].tx_packets;
4474		stats[N_TX_RINGS].rx_bytes      += stats[i].rx_bytes;
4475		stats[N_TX_RINGS].tx_bytes      += stats[i].tx_bytes;
4476		stats[N_TX_RINGS].rx_errors     += stats[i].rx_errors;
4477		stats[N_TX_RINGS].tx_errors     += stats[i].tx_errors;
4478		stats[N_TX_RINGS].rx_dropped    += stats[i].rx_dropped;
4479		stats[N_TX_RINGS].tx_dropped    += stats[i].tx_dropped;
4480		memset(stats + i, 0, sizeof(struct net_device_stats));
4481		spin_unlock(&cp->stat_lock[i]);
4482	}
4483	spin_unlock_irqrestore(&cp->stat_lock[N_TX_RINGS], flags);
4484	return stats + N_TX_RINGS;
4485}
4486
4487
4488static void cas_set_multicast(struct net_device *dev)
4489{
4490	struct cas *cp = netdev_priv(dev);
4491	u32 rxcfg, rxcfg_new;
4492	unsigned long flags;
4493	int limit = STOP_TRIES;
4494
4495	if (!cp->hw_running)
4496		return;
4497
4498	spin_lock_irqsave(&cp->lock, flags);
4499	rxcfg = readl(cp->regs + REG_MAC_RX_CFG);
4500
4501	/* disable RX MAC and wait for completion */
4502	writel(rxcfg & ~MAC_RX_CFG_EN, cp->regs + REG_MAC_RX_CFG);
4503	while (readl(cp->regs + REG_MAC_RX_CFG) & MAC_RX_CFG_EN) {
4504		if (!limit--)
4505			break;
4506		udelay(10);
4507	}
4508
4509	/* disable hash filter and wait for completion */
4510	limit = STOP_TRIES;
4511	rxcfg &= ~(MAC_RX_CFG_PROMISC_EN | MAC_RX_CFG_HASH_FILTER_EN);
4512	writel(rxcfg & ~MAC_RX_CFG_EN, cp->regs + REG_MAC_RX_CFG);
4513	while (readl(cp->regs + REG_MAC_RX_CFG) & MAC_RX_CFG_HASH_FILTER_EN) {
4514		if (!limit--)
4515			break;
4516		udelay(10);
4517	}
4518
4519	/* program hash filters */
4520	cp->mac_rx_cfg = rxcfg_new = cas_setup_multicast(cp);
4521	rxcfg |= rxcfg_new;
4522	writel(rxcfg, cp->regs + REG_MAC_RX_CFG);
4523	spin_unlock_irqrestore(&cp->lock, flags);
4524}
4525
4526static void cas_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
4527{
4528	struct cas *cp = netdev_priv(dev);
4529	strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
4530	strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version));
4531	strlcpy(info->bus_info, pci_name(cp->pdev), sizeof(info->bus_info));
4532}
4533
4534static int cas_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
 
4535{
4536	struct cas *cp = netdev_priv(dev);
4537	u16 bmcr;
4538	int full_duplex, speed, pause;
4539	unsigned long flags;
4540	enum link_state linkstate = link_up;
 
4541
4542	cmd->advertising = 0;
4543	cmd->supported = SUPPORTED_Autoneg;
4544	if (cp->cas_flags & CAS_FLAG_1000MB_CAP) {
4545		cmd->supported |= SUPPORTED_1000baseT_Full;
4546		cmd->advertising |= ADVERTISED_1000baseT_Full;
4547	}
4548
4549	/* Record PHY settings if HW is on. */
4550	spin_lock_irqsave(&cp->lock, flags);
4551	bmcr = 0;
4552	linkstate = cp->lstate;
4553	if (CAS_PHY_MII(cp->phy_type)) {
4554		cmd->port = PORT_MII;
4555		cmd->transceiver = (cp->cas_flags & CAS_FLAG_SATURN) ?
4556			XCVR_INTERNAL : XCVR_EXTERNAL;
4557		cmd->phy_address = cp->phy_addr;
4558		cmd->advertising |= ADVERTISED_TP | ADVERTISED_MII |
4559			ADVERTISED_10baseT_Half |
4560			ADVERTISED_10baseT_Full |
4561			ADVERTISED_100baseT_Half |
4562			ADVERTISED_100baseT_Full;
4563
4564		cmd->supported |=
4565			(SUPPORTED_10baseT_Half |
4566			 SUPPORTED_10baseT_Full |
4567			 SUPPORTED_100baseT_Half |
4568			 SUPPORTED_100baseT_Full |
4569			 SUPPORTED_TP | SUPPORTED_MII);
4570
4571		if (cp->hw_running) {
4572			cas_mif_poll(cp, 0);
4573			bmcr = cas_phy_read(cp, MII_BMCR);
4574			cas_read_mii_link_mode(cp, &full_duplex,
4575					       &speed, &pause);
4576			cas_mif_poll(cp, 1);
4577		}
4578
4579	} else {
4580		cmd->port = PORT_FIBRE;
4581		cmd->transceiver = XCVR_INTERNAL;
4582		cmd->phy_address = 0;
4583		cmd->supported   |= SUPPORTED_FIBRE;
4584		cmd->advertising |= ADVERTISED_FIBRE;
4585
4586		if (cp->hw_running) {
4587			/* pcs uses the same bits as mii */
4588			bmcr = readl(cp->regs + REG_PCS_MII_CTRL);
4589			cas_read_pcs_link_mode(cp, &full_duplex,
4590					       &speed, &pause);
4591		}
4592	}
4593	spin_unlock_irqrestore(&cp->lock, flags);
4594
4595	if (bmcr & BMCR_ANENABLE) {
4596		cmd->advertising |= ADVERTISED_Autoneg;
4597		cmd->autoneg = AUTONEG_ENABLE;
4598		ethtool_cmd_speed_set(cmd, ((speed == 10) ?
4599					    SPEED_10 :
4600					    ((speed == 1000) ?
4601					     SPEED_1000 : SPEED_100)));
4602		cmd->duplex = full_duplex ? DUPLEX_FULL : DUPLEX_HALF;
4603	} else {
4604		cmd->autoneg = AUTONEG_DISABLE;
4605		ethtool_cmd_speed_set(cmd, ((bmcr & CAS_BMCR_SPEED1000) ?
4606					    SPEED_1000 :
4607					    ((bmcr & BMCR_SPEED100) ?
4608					     SPEED_100 : SPEED_10)));
4609		cmd->duplex =
4610			(bmcr & BMCR_FULLDPLX) ?
4611			DUPLEX_FULL : DUPLEX_HALF;
4612	}
4613	if (linkstate != link_up) {
4614		/* Force these to "unknown" if the link is not up and
4615		 * autonogotiation in enabled. We can set the link
4616		 * speed to 0, but not cmd->duplex,
4617		 * because its legal values are 0 and 1.  Ethtool will
4618		 * print the value reported in parentheses after the
4619		 * word "Unknown" for unrecognized values.
4620		 *
4621		 * If in forced mode, we report the speed and duplex
4622		 * settings that we configured.
4623		 */
4624		if (cp->link_cntl & BMCR_ANENABLE) {
4625			ethtool_cmd_speed_set(cmd, 0);
4626			cmd->duplex = 0xff;
4627		} else {
4628			ethtool_cmd_speed_set(cmd, SPEED_10);
4629			if (cp->link_cntl & BMCR_SPEED100) {
4630				ethtool_cmd_speed_set(cmd, SPEED_100);
4631			} else if (cp->link_cntl & CAS_BMCR_SPEED1000) {
4632				ethtool_cmd_speed_set(cmd, SPEED_1000);
4633			}
4634			cmd->duplex = (cp->link_cntl & BMCR_FULLDPLX)?
4635				DUPLEX_FULL : DUPLEX_HALF;
4636		}
4637	}
 
 
 
 
 
 
4638	return 0;
4639}
4640
4641static int cas_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
 
4642{
4643	struct cas *cp = netdev_priv(dev);
4644	unsigned long flags;
4645	u32 speed = ethtool_cmd_speed(cmd);
4646
4647	/* Verify the settings we care about. */
4648	if (cmd->autoneg != AUTONEG_ENABLE &&
4649	    cmd->autoneg != AUTONEG_DISABLE)
4650		return -EINVAL;
4651
4652	if (cmd->autoneg == AUTONEG_DISABLE &&
4653	    ((speed != SPEED_1000 &&
4654	      speed != SPEED_100 &&
4655	      speed != SPEED_10) ||
4656	     (cmd->duplex != DUPLEX_HALF &&
4657	      cmd->duplex != DUPLEX_FULL)))
4658		return -EINVAL;
4659
4660	/* Apply settings and restart link process. */
4661	spin_lock_irqsave(&cp->lock, flags);
4662	cas_begin_auto_negotiation(cp, cmd);
4663	spin_unlock_irqrestore(&cp->lock, flags);
4664	return 0;
4665}
4666
4667static int cas_nway_reset(struct net_device *dev)
4668{
4669	struct cas *cp = netdev_priv(dev);
4670	unsigned long flags;
4671
4672	if ((cp->link_cntl & BMCR_ANENABLE) == 0)
4673		return -EINVAL;
4674
4675	/* Restart link process. */
4676	spin_lock_irqsave(&cp->lock, flags);
4677	cas_begin_auto_negotiation(cp, NULL);
4678	spin_unlock_irqrestore(&cp->lock, flags);
4679
4680	return 0;
4681}
4682
4683static u32 cas_get_link(struct net_device *dev)
4684{
4685	struct cas *cp = netdev_priv(dev);
4686	return cp->lstate == link_up;
4687}
4688
4689static u32 cas_get_msglevel(struct net_device *dev)
4690{
4691	struct cas *cp = netdev_priv(dev);
4692	return cp->msg_enable;
4693}
4694
4695static void cas_set_msglevel(struct net_device *dev, u32 value)
4696{
4697	struct cas *cp = netdev_priv(dev);
4698	cp->msg_enable = value;
4699}
4700
4701static int cas_get_regs_len(struct net_device *dev)
4702{
4703	struct cas *cp = netdev_priv(dev);
4704	return cp->casreg_len < CAS_MAX_REGS ? cp->casreg_len: CAS_MAX_REGS;
4705}
4706
4707static void cas_get_regs(struct net_device *dev, struct ethtool_regs *regs,
4708			     void *p)
4709{
4710	struct cas *cp = netdev_priv(dev);
4711	regs->version = 0;
4712	/* cas_read_regs handles locks (cp->lock).  */
4713	cas_read_regs(cp, p, regs->len / sizeof(u32));
4714}
4715
4716static int cas_get_sset_count(struct net_device *dev, int sset)
4717{
4718	switch (sset) {
4719	case ETH_SS_STATS:
4720		return CAS_NUM_STAT_KEYS;
4721	default:
4722		return -EOPNOTSUPP;
4723	}
4724}
4725
4726static void cas_get_strings(struct net_device *dev, u32 stringset, u8 *data)
4727{
4728	 memcpy(data, &ethtool_cassini_statnames,
4729					 CAS_NUM_STAT_KEYS * ETH_GSTRING_LEN);
4730}
4731
4732static void cas_get_ethtool_stats(struct net_device *dev,
4733				      struct ethtool_stats *estats, u64 *data)
4734{
4735	struct cas *cp = netdev_priv(dev);
4736	struct net_device_stats *stats = cas_get_stats(cp->dev);
4737	int i = 0;
4738	data[i++] = stats->collisions;
4739	data[i++] = stats->rx_bytes;
4740	data[i++] = stats->rx_crc_errors;
4741	data[i++] = stats->rx_dropped;
4742	data[i++] = stats->rx_errors;
4743	data[i++] = stats->rx_fifo_errors;
4744	data[i++] = stats->rx_frame_errors;
4745	data[i++] = stats->rx_length_errors;
4746	data[i++] = stats->rx_over_errors;
4747	data[i++] = stats->rx_packets;
4748	data[i++] = stats->tx_aborted_errors;
4749	data[i++] = stats->tx_bytes;
4750	data[i++] = stats->tx_dropped;
4751	data[i++] = stats->tx_errors;
4752	data[i++] = stats->tx_fifo_errors;
4753	data[i++] = stats->tx_packets;
4754	BUG_ON(i != CAS_NUM_STAT_KEYS);
4755}
4756
4757static const struct ethtool_ops cas_ethtool_ops = {
4758	.get_drvinfo		= cas_get_drvinfo,
4759	.get_settings		= cas_get_settings,
4760	.set_settings		= cas_set_settings,
4761	.nway_reset		= cas_nway_reset,
4762	.get_link		= cas_get_link,
4763	.get_msglevel		= cas_get_msglevel,
4764	.set_msglevel		= cas_set_msglevel,
4765	.get_regs_len		= cas_get_regs_len,
4766	.get_regs		= cas_get_regs,
4767	.get_sset_count		= cas_get_sset_count,
4768	.get_strings		= cas_get_strings,
4769	.get_ethtool_stats	= cas_get_ethtool_stats,
 
 
4770};
4771
4772static int cas_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
4773{
4774	struct cas *cp = netdev_priv(dev);
4775	struct mii_ioctl_data *data = if_mii(ifr);
4776	unsigned long flags;
4777	int rc = -EOPNOTSUPP;
4778
4779	/* Hold the PM mutex while doing ioctl's or we may collide
4780	 * with open/close and power management and oops.
4781	 */
4782	mutex_lock(&cp->pm_mutex);
4783	switch (cmd) {
4784	case SIOCGMIIPHY:		/* Get address of MII PHY in use. */
4785		data->phy_id = cp->phy_addr;
4786		/* Fallthrough... */
4787
4788	case SIOCGMIIREG:		/* Read MII PHY register. */
4789		spin_lock_irqsave(&cp->lock, flags);
4790		cas_mif_poll(cp, 0);
4791		data->val_out = cas_phy_read(cp, data->reg_num & 0x1f);
4792		cas_mif_poll(cp, 1);
4793		spin_unlock_irqrestore(&cp->lock, flags);
4794		rc = 0;
4795		break;
4796
4797	case SIOCSMIIREG:		/* Write MII PHY register. */
4798		spin_lock_irqsave(&cp->lock, flags);
4799		cas_mif_poll(cp, 0);
4800		rc = cas_phy_write(cp, data->reg_num & 0x1f, data->val_in);
4801		cas_mif_poll(cp, 1);
4802		spin_unlock_irqrestore(&cp->lock, flags);
4803		break;
4804	default:
4805		break;
4806	}
4807
4808	mutex_unlock(&cp->pm_mutex);
4809	return rc;
4810}
4811
4812/* When this chip sits underneath an Intel 31154 bridge, it is the
4813 * only subordinate device and we can tweak the bridge settings to
4814 * reflect that fact.
4815 */
4816static void cas_program_bridge(struct pci_dev *cas_pdev)
4817{
4818	struct pci_dev *pdev = cas_pdev->bus->self;
4819	u32 val;
4820
4821	if (!pdev)
4822		return;
4823
4824	if (pdev->vendor != 0x8086 || pdev->device != 0x537c)
4825		return;
4826
4827	/* Clear bit 10 (Bus Parking Control) in the Secondary
4828	 * Arbiter Control/Status Register which lives at offset
4829	 * 0x41.  Using a 32-bit word read/modify/write at 0x40
4830	 * is much simpler so that's how we do this.
4831	 */
4832	pci_read_config_dword(pdev, 0x40, &val);
4833	val &= ~0x00040000;
4834	pci_write_config_dword(pdev, 0x40, val);
4835
4836	/* Max out the Multi-Transaction Timer settings since
4837	 * Cassini is the only device present.
4838	 *
4839	 * The register is 16-bit and lives at 0x50.  When the
4840	 * settings are enabled, it extends the GRANT# signal
4841	 * for a requestor after a transaction is complete.  This
4842	 * allows the next request to run without first needing
4843	 * to negotiate the GRANT# signal back.
4844	 *
4845	 * Bits 12:10 define the grant duration:
4846	 *
4847	 *	1	--	16 clocks
4848	 *	2	--	32 clocks
4849	 *	3	--	64 clocks
4850	 *	4	--	128 clocks
4851	 *	5	--	256 clocks
4852	 *
4853	 * All other values are illegal.
4854	 *
4855	 * Bits 09:00 define which REQ/GNT signal pairs get the
4856	 * GRANT# signal treatment.  We set them all.
4857	 */
4858	pci_write_config_word(pdev, 0x50, (5 << 10) | 0x3ff);
4859
4860	/* The Read Prefecth Policy register is 16-bit and sits at
4861	 * offset 0x52.  It enables a "smart" pre-fetch policy.  We
4862	 * enable it and max out all of the settings since only one
4863	 * device is sitting underneath and thus bandwidth sharing is
4864	 * not an issue.
4865	 *
4866	 * The register has several 3 bit fields, which indicates a
4867	 * multiplier applied to the base amount of prefetching the
4868	 * chip would do.  These fields are at:
4869	 *
4870	 *	15:13	---	ReRead Primary Bus
4871	 *	12:10	---	FirstRead Primary Bus
4872	 *	09:07	---	ReRead Secondary Bus
4873	 *	06:04	---	FirstRead Secondary Bus
4874	 *
4875	 * Bits 03:00 control which REQ/GNT pairs the prefetch settings
4876	 * get enabled on.  Bit 3 is a grouped enabler which controls
4877	 * all of the REQ/GNT pairs from [8:3].  Bits 2 to 0 control
4878	 * the individual REQ/GNT pairs [2:0].
4879	 */
4880	pci_write_config_word(pdev, 0x52,
4881			      (0x7 << 13) |
4882			      (0x7 << 10) |
4883			      (0x7 <<  7) |
4884			      (0x7 <<  4) |
4885			      (0xf <<  0));
4886
4887	/* Force cacheline size to 0x8 */
4888	pci_write_config_byte(pdev, PCI_CACHE_LINE_SIZE, 0x08);
4889
4890	/* Force latency timer to maximum setting so Cassini can
4891	 * sit on the bus as long as it likes.
4892	 */
4893	pci_write_config_byte(pdev, PCI_LATENCY_TIMER, 0xff);
4894}
4895
4896static const struct net_device_ops cas_netdev_ops = {
4897	.ndo_open		= cas_open,
4898	.ndo_stop		= cas_close,
4899	.ndo_start_xmit		= cas_start_xmit,
4900	.ndo_get_stats 		= cas_get_stats,
4901	.ndo_set_rx_mode	= cas_set_multicast,
4902	.ndo_do_ioctl		= cas_ioctl,
4903	.ndo_tx_timeout		= cas_tx_timeout,
4904	.ndo_change_mtu		= cas_change_mtu,
4905	.ndo_set_mac_address	= eth_mac_addr,
4906	.ndo_validate_addr	= eth_validate_addr,
4907#ifdef CONFIG_NET_POLL_CONTROLLER
4908	.ndo_poll_controller	= cas_netpoll,
4909#endif
4910};
4911
4912static int cas_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
4913{
4914	static int cas_version_printed = 0;
4915	unsigned long casreg_len;
4916	struct net_device *dev;
4917	struct cas *cp;
4918	int i, err, pci_using_dac;
4919	u16 pci_cmd;
 
4920	u8 orig_cacheline_size = 0, cas_cacheline_size = 0;
4921
4922	if (cas_version_printed++ == 0)
4923		pr_info("%s", version);
4924
4925	err = pci_enable_device(pdev);
4926	if (err) {
4927		dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
4928		return err;
4929	}
4930
4931	if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
4932		dev_err(&pdev->dev, "Cannot find proper PCI device "
4933		       "base address, aborting\n");
4934		err = -ENODEV;
4935		goto err_out_disable_pdev;
4936	}
4937
4938	dev = alloc_etherdev(sizeof(*cp));
4939	if (!dev) {
4940		err = -ENOMEM;
4941		goto err_out_disable_pdev;
4942	}
4943	SET_NETDEV_DEV(dev, &pdev->dev);
4944
4945	err = pci_request_regions(pdev, dev->name);
4946	if (err) {
4947		dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
4948		goto err_out_free_netdev;
4949	}
4950	pci_set_master(pdev);
4951
4952	/* we must always turn on parity response or else parity
4953	 * doesn't get generated properly. disable SERR/PERR as well.
4954	 * in addition, we want to turn MWI on.
4955	 */
4956	pci_read_config_word(pdev, PCI_COMMAND, &pci_cmd);
4957	pci_cmd &= ~PCI_COMMAND_SERR;
4958	pci_cmd |= PCI_COMMAND_PARITY;
4959	pci_write_config_word(pdev, PCI_COMMAND, pci_cmd);
4960	if (pci_try_set_mwi(pdev))
4961		pr_warn("Could not enable MWI for %s\n", pci_name(pdev));
4962
4963	cas_program_bridge(pdev);
4964
4965	/*
4966	 * On some architectures, the default cache line size set
4967	 * by pci_try_set_mwi reduces perforamnce.  We have to increase
4968	 * it for this case.  To start, we'll print some configuration
4969	 * data.
4970	 */
4971#if 1
4972	pci_read_config_byte(pdev, PCI_CACHE_LINE_SIZE,
4973			     &orig_cacheline_size);
4974	if (orig_cacheline_size < CAS_PREF_CACHELINE_SIZE) {
4975		cas_cacheline_size =
4976			(CAS_PREF_CACHELINE_SIZE < SMP_CACHE_BYTES) ?
4977			CAS_PREF_CACHELINE_SIZE : SMP_CACHE_BYTES;
4978		if (pci_write_config_byte(pdev,
4979					  PCI_CACHE_LINE_SIZE,
4980					  cas_cacheline_size)) {
4981			dev_err(&pdev->dev, "Could not set PCI cache "
4982			       "line size\n");
4983			goto err_write_cacheline;
4984		}
4985	}
4986#endif
4987
4988
4989	/* Configure DMA attributes. */
4990	if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
4991		pci_using_dac = 1;
4992		err = pci_set_consistent_dma_mask(pdev,
4993						  DMA_BIT_MASK(64));
4994		if (err < 0) {
4995			dev_err(&pdev->dev, "Unable to obtain 64-bit DMA "
4996			       "for consistent allocations\n");
4997			goto err_out_free_res;
4998		}
4999
5000	} else {
5001		err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
5002		if (err) {
5003			dev_err(&pdev->dev, "No usable DMA configuration, "
5004			       "aborting\n");
5005			goto err_out_free_res;
5006		}
5007		pci_using_dac = 0;
5008	}
5009
5010	casreg_len = pci_resource_len(pdev, 0);
5011
5012	cp = netdev_priv(dev);
5013	cp->pdev = pdev;
5014#if 1
5015	/* A value of 0 indicates we never explicitly set it */
5016	cp->orig_cacheline_size = cas_cacheline_size ? orig_cacheline_size: 0;
5017#endif
5018	cp->dev = dev;
5019	cp->msg_enable = (cassini_debug < 0) ? CAS_DEF_MSG_ENABLE :
5020	  cassini_debug;
5021
5022#if defined(CONFIG_SPARC)
5023	cp->of_node = pci_device_to_OF_node(pdev);
5024#endif
5025
5026	cp->link_transition = LINK_TRANSITION_UNKNOWN;
5027	cp->link_transition_jiffies_valid = 0;
5028
5029	spin_lock_init(&cp->lock);
5030	spin_lock_init(&cp->rx_inuse_lock);
5031	spin_lock_init(&cp->rx_spare_lock);
5032	for (i = 0; i < N_TX_RINGS; i++) {
5033		spin_lock_init(&cp->stat_lock[i]);
5034		spin_lock_init(&cp->tx_lock[i]);
5035	}
5036	spin_lock_init(&cp->stat_lock[N_TX_RINGS]);
5037	mutex_init(&cp->pm_mutex);
5038
5039	init_timer(&cp->link_timer);
5040	cp->link_timer.function = cas_link_timer;
5041	cp->link_timer.data = (unsigned long) cp;
5042
5043#if 1
5044	/* Just in case the implementation of atomic operations
5045	 * change so that an explicit initialization is necessary.
5046	 */
5047	atomic_set(&cp->reset_task_pending, 0);
5048	atomic_set(&cp->reset_task_pending_all, 0);
5049	atomic_set(&cp->reset_task_pending_spare, 0);
5050	atomic_set(&cp->reset_task_pending_mtu, 0);
5051#endif
5052	INIT_WORK(&cp->reset_task, cas_reset_task);
5053
5054	/* Default link parameters */
5055	if (link_mode >= 0 && link_mode < 6)
5056		cp->link_cntl = link_modes[link_mode];
5057	else
5058		cp->link_cntl = BMCR_ANENABLE;
5059	cp->lstate = link_down;
5060	cp->link_transition = LINK_TRANSITION_LINK_DOWN;
5061	netif_carrier_off(cp->dev);
5062	cp->timer_ticks = 0;
5063
5064	/* give us access to cassini registers */
5065	cp->regs = pci_iomap(pdev, 0, casreg_len);
5066	if (!cp->regs) {
5067		dev_err(&pdev->dev, "Cannot map device registers, aborting\n");
5068		goto err_out_free_res;
5069	}
5070	cp->casreg_len = casreg_len;
5071
5072	pci_save_state(pdev);
5073	cas_check_pci_invariants(cp);
5074	cas_hard_reset(cp);
5075	cas_reset(cp, 0);
5076	if (cas_check_invariants(cp))
5077		goto err_out_iounmap;
5078	if (cp->cas_flags & CAS_FLAG_SATURN)
5079		cas_saturn_firmware_init(cp);
5080
5081	cp->init_block = (struct cas_init_block *)
5082		pci_alloc_consistent(pdev, sizeof(struct cas_init_block),
5083				     &cp->block_dvma);
5084	if (!cp->init_block) {
5085		dev_err(&pdev->dev, "Cannot allocate init block, aborting\n");
5086		goto err_out_iounmap;
5087	}
5088
5089	for (i = 0; i < N_TX_RINGS; i++)
5090		cp->init_txds[i] = cp->init_block->txds[i];
5091
5092	for (i = 0; i < N_RX_DESC_RINGS; i++)
5093		cp->init_rxds[i] = cp->init_block->rxds[i];
5094
5095	for (i = 0; i < N_RX_COMP_RINGS; i++)
5096		cp->init_rxcs[i] = cp->init_block->rxcs[i];
5097
5098	for (i = 0; i < N_RX_FLOWS; i++)
5099		skb_queue_head_init(&cp->rx_flows[i]);
5100
5101	dev->netdev_ops = &cas_netdev_ops;
5102	dev->ethtool_ops = &cas_ethtool_ops;
5103	dev->watchdog_timeo = CAS_TX_TIMEOUT;
5104
5105#ifdef USE_NAPI
5106	netif_napi_add(dev, &cp->napi, cas_poll, 64);
5107#endif
5108	dev->irq = pdev->irq;
5109	dev->dma = 0;
5110
5111	/* Cassini features. */
5112	if ((cp->cas_flags & CAS_FLAG_NO_HW_CSUM) == 0)
5113		dev->features |= NETIF_F_HW_CSUM | NETIF_F_SG;
5114
5115	if (pci_using_dac)
5116		dev->features |= NETIF_F_HIGHDMA;
 
 
 
5117
5118	if (register_netdev(dev)) {
5119		dev_err(&pdev->dev, "Cannot register net device, aborting\n");
5120		goto err_out_free_consistent;
5121	}
5122
5123	i = readl(cp->regs + REG_BIM_CFG);
5124	netdev_info(dev, "Sun Cassini%s (%sbit/%sMHz PCI/%s) Ethernet[%d] %pM\n",
5125		    (cp->cas_flags & CAS_FLAG_REG_PLUS) ? "+" : "",
5126		    (i & BIM_CFG_32BIT) ? "32" : "64",
5127		    (i & BIM_CFG_66MHZ) ? "66" : "33",
5128		    (cp->phy_type == CAS_PHY_SERDES) ? "Fi" : "Cu", pdev->irq,
5129		    dev->dev_addr);
5130
5131	pci_set_drvdata(pdev, dev);
5132	cp->hw_running = 1;
5133	cas_entropy_reset(cp);
5134	cas_phy_init(cp);
5135	cas_begin_auto_negotiation(cp, NULL);
5136	return 0;
5137
5138err_out_free_consistent:
5139	pci_free_consistent(pdev, sizeof(struct cas_init_block),
5140			    cp->init_block, cp->block_dvma);
5141
5142err_out_iounmap:
5143	mutex_lock(&cp->pm_mutex);
5144	if (cp->hw_running)
5145		cas_shutdown(cp);
5146	mutex_unlock(&cp->pm_mutex);
5147
 
 
5148	pci_iounmap(pdev, cp->regs);
5149
5150
5151err_out_free_res:
5152	pci_release_regions(pdev);
5153
5154err_write_cacheline:
5155	/* Try to restore it in case the error occurred after we
5156	 * set it.
5157	 */
5158	pci_write_config_byte(pdev, PCI_CACHE_LINE_SIZE, orig_cacheline_size);
5159
5160err_out_free_netdev:
5161	free_netdev(dev);
5162
5163err_out_disable_pdev:
5164	pci_disable_device(pdev);
5165	return -ENODEV;
5166}
5167
5168static void cas_remove_one(struct pci_dev *pdev)
5169{
5170	struct net_device *dev = pci_get_drvdata(pdev);
5171	struct cas *cp;
5172	if (!dev)
5173		return;
5174
5175	cp = netdev_priv(dev);
5176	unregister_netdev(dev);
5177
5178	vfree(cp->fw_data);
5179
5180	mutex_lock(&cp->pm_mutex);
5181	cancel_work_sync(&cp->reset_task);
5182	if (cp->hw_running)
5183		cas_shutdown(cp);
5184	mutex_unlock(&cp->pm_mutex);
5185
5186#if 1
5187	if (cp->orig_cacheline_size) {
5188		/* Restore the cache line size if we had modified
5189		 * it.
5190		 */
5191		pci_write_config_byte(pdev, PCI_CACHE_LINE_SIZE,
5192				      cp->orig_cacheline_size);
5193	}
5194#endif
5195	pci_free_consistent(pdev, sizeof(struct cas_init_block),
5196			    cp->init_block, cp->block_dvma);
5197	pci_iounmap(pdev, cp->regs);
5198	free_netdev(dev);
5199	pci_release_regions(pdev);
5200	pci_disable_device(pdev);
5201}
5202
5203#ifdef CONFIG_PM
5204static int cas_suspend(struct pci_dev *pdev, pm_message_t state)
5205{
5206	struct net_device *dev = pci_get_drvdata(pdev);
5207	struct cas *cp = netdev_priv(dev);
5208	unsigned long flags;
5209
5210	mutex_lock(&cp->pm_mutex);
5211
5212	/* If the driver is opened, we stop the DMA */
5213	if (cp->opened) {
5214		netif_device_detach(dev);
5215
5216		cas_lock_all_save(cp, flags);
5217
5218		/* We can set the second arg of cas_reset to 0
5219		 * because on resume, we'll call cas_init_hw with
5220		 * its second arg set so that autonegotiation is
5221		 * restarted.
5222		 */
5223		cas_reset(cp, 0);
5224		cas_clean_rings(cp);
5225		cas_unlock_all_restore(cp, flags);
5226	}
5227
5228	if (cp->hw_running)
5229		cas_shutdown(cp);
5230	mutex_unlock(&cp->pm_mutex);
5231
5232	return 0;
5233}
5234
5235static int cas_resume(struct pci_dev *pdev)
5236{
5237	struct net_device *dev = pci_get_drvdata(pdev);
5238	struct cas *cp = netdev_priv(dev);
5239
5240	netdev_info(dev, "resuming\n");
5241
5242	mutex_lock(&cp->pm_mutex);
5243	cas_hard_reset(cp);
5244	if (cp->opened) {
5245		unsigned long flags;
5246		cas_lock_all_save(cp, flags);
5247		cas_reset(cp, 0);
5248		cp->hw_running = 1;
5249		cas_clean_rings(cp);
5250		cas_init_hw(cp, 1);
5251		cas_unlock_all_restore(cp, flags);
5252
5253		netif_device_attach(dev);
5254	}
5255	mutex_unlock(&cp->pm_mutex);
5256	return 0;
5257}
5258#endif /* CONFIG_PM */
 
5259
5260static struct pci_driver cas_driver = {
5261	.name		= DRV_MODULE_NAME,
5262	.id_table	= cas_pci_tbl,
5263	.probe		= cas_init_one,
5264	.remove		= cas_remove_one,
5265#ifdef CONFIG_PM
5266	.suspend	= cas_suspend,
5267	.resume		= cas_resume
5268#endif
5269};
5270
5271static int __init cas_init(void)
5272{
5273	if (linkdown_timeout > 0)
5274		link_transition_timeout = linkdown_timeout * HZ;
5275	else
5276		link_transition_timeout = 0;
5277
5278	return pci_register_driver(&cas_driver);
5279}
5280
5281static void __exit cas_cleanup(void)
5282{
5283	pci_unregister_driver(&cas_driver);
5284}
5285
5286module_init(cas_init);
5287module_exit(cas_cleanup);