Linux Audio

Check our new training course

Loading...
v6.8
   1// SPDX-License-Identifier: GPL-2.0+
   2/* cassini.c: Sun Microsystems Cassini(+) ethernet driver.
   3 *
   4 * Copyright (C) 2004 Sun Microsystems Inc.
   5 * Copyright (C) 2003 Adrian Sun (asun@darksunrising.com)
   6 *
 
 
 
 
 
 
 
 
 
 
 
 
 
   7 * This driver uses the sungem driver (c) David Miller
   8 * (davem@redhat.com) as its basis.
   9 *
  10 * The cassini chip has a number of features that distinguish it from
  11 * the gem chip:
  12 *  4 transmit descriptor rings that are used for either QoS (VLAN) or
  13 *      load balancing (non-VLAN mode)
  14 *  batching of multiple packets
  15 *  multiple CPU dispatching
  16 *  page-based RX descriptor engine with separate completion rings
  17 *  Gigabit support (GMII and PCS interface)
  18 *  MIF link up/down detection works
  19 *
  20 * RX is handled by page sized buffers that are attached as fragments to
  21 * the skb. here's what's done:
  22 *  -- driver allocates pages at a time and keeps reference counts
  23 *     on them.
  24 *  -- the upper protocol layers assume that the header is in the skb
  25 *     itself. as a result, cassini will copy a small amount (64 bytes)
  26 *     to make them happy.
  27 *  -- driver appends the rest of the data pages as frags to skbuffs
  28 *     and increments the reference count
  29 *  -- on page reclamation, the driver swaps the page with a spare page.
  30 *     if that page is still in use, it frees its reference to that page,
  31 *     and allocates a new page for use. otherwise, it just recycles the
  32 *     page.
  33 *
  34 * NOTE: cassini can parse the header. however, it's not worth it
  35 *       as long as the network stack requires a header copy.
  36 *
  37 * TX has 4 queues. currently these queues are used in a round-robin
  38 * fashion for load balancing. They can also be used for QoS. for that
  39 * to work, however, QoS information needs to be exposed down to the driver
  40 * level so that subqueues get targeted to particular transmit rings.
  41 * alternatively, the queues can be configured via use of the all-purpose
  42 * ioctl.
  43 *
  44 * RX DATA: the rx completion ring has all the info, but the rx desc
  45 * ring has all of the data. RX can conceivably come in under multiple
  46 * interrupts, but the INT# assignment needs to be set up properly by
  47 * the BIOS and conveyed to the driver. PCI BIOSes don't know how to do
  48 * that. also, the two descriptor rings are designed to distinguish between
  49 * encrypted and non-encrypted packets, but we use them for buffering
  50 * instead.
  51 *
  52 * by default, the selective clear mask is set up to process rx packets.
  53 */
  54
  55#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  56
  57#include <linux/module.h>
  58#include <linux/kernel.h>
  59#include <linux/types.h>
  60#include <linux/compiler.h>
  61#include <linux/slab.h>
  62#include <linux/delay.h>
  63#include <linux/init.h>
  64#include <linux/interrupt.h>
  65#include <linux/vmalloc.h>
  66#include <linux/ioport.h>
  67#include <linux/pci.h>
  68#include <linux/mm.h>
  69#include <linux/highmem.h>
  70#include <linux/list.h>
  71#include <linux/dma-mapping.h>
  72
  73#include <linux/netdevice.h>
  74#include <linux/etherdevice.h>
  75#include <linux/skbuff.h>
  76#include <linux/ethtool.h>
  77#include <linux/crc32.h>
  78#include <linux/random.h>
  79#include <linux/mii.h>
  80#include <linux/ip.h>
  81#include <linux/tcp.h>
  82#include <linux/mutex.h>
  83#include <linux/firmware.h>
  84
  85#include <net/checksum.h>
  86
  87#include <linux/atomic.h>
  88#include <asm/io.h>
  89#include <asm/byteorder.h>
  90#include <linux/uaccess.h>
  91#include <linux/jiffies.h>
  92
 
 
  93#define CAS_NCPUS            num_online_cpus()
  94
  95#define cas_skb_release(x)  netif_rx(x)
  96
  97/* select which firmware to use */
  98#define USE_HP_WORKAROUND
  99#define HP_WORKAROUND_DEFAULT /* select which firmware to use as default */
 100#define CAS_HP_ALT_FIRMWARE   cas_prog_null /* alternate firmware */
 101
 102#include "cassini.h"
 103
 104#define USE_TX_COMPWB      /* use completion writeback registers */
 105#define USE_CSMA_CD_PROTO  /* standard CSMA/CD */
 106#define USE_RX_BLANK       /* hw interrupt mitigation */
 107#undef USE_ENTROPY_DEV     /* don't test for entropy device */
 108
 109/* NOTE: these aren't useable unless PCI interrupts can be assigned.
 110 * also, we need to make cp->lock finer-grained.
 111 */
 112#undef  USE_PCI_INTB
 113#undef  USE_PCI_INTC
 114#undef  USE_PCI_INTD
 115#undef  USE_QOS
 116
 117#undef  USE_VPD_DEBUG       /* debug vpd information if defined */
 118
 119/* rx processing options */
 120#define USE_PAGE_ORDER      /* specify to allocate large rx pages */
 121#define RX_DONT_BATCH  0    /* if 1, don't batch flows */
 122#define RX_COPY_ALWAYS 0    /* if 0, use frags */
 123#define RX_COPY_MIN    64   /* copy a little to make upper layers happy */
 124#undef  RX_COUNT_BUFFERS    /* define to calculate RX buffer stats */
 125
 126#define DRV_MODULE_NAME		"cassini"
 127#define DRV_MODULE_VERSION	"1.6"
 128#define DRV_MODULE_RELDATE	"21 May 2008"
 129
 130#define CAS_DEF_MSG_ENABLE	  \
 131	(NETIF_MSG_DRV		| \
 132	 NETIF_MSG_PROBE	| \
 133	 NETIF_MSG_LINK		| \
 134	 NETIF_MSG_TIMER	| \
 135	 NETIF_MSG_IFDOWN	| \
 136	 NETIF_MSG_IFUP		| \
 137	 NETIF_MSG_RX_ERR	| \
 138	 NETIF_MSG_TX_ERR)
 139
 140/* length of time before we decide the hardware is borked,
 141 * and dev->tx_timeout() should be called to fix the problem
 142 */
 143#define CAS_TX_TIMEOUT			(HZ)
 144#define CAS_LINK_TIMEOUT                (22*HZ/10)
 145#define CAS_LINK_FAST_TIMEOUT           (1)
 146
 147/* timeout values for state changing. these specify the number
 148 * of 10us delays to be used before giving up.
 149 */
 150#define STOP_TRIES_PHY 1000
 151#define STOP_TRIES     5000
 152
 153/* specify a minimum frame size to deal with some fifo issues
 154 * max mtu == 2 * page size - ethernet header - 64 - swivel =
 155 *            2 * page_size - 0x50
 156 */
 157#define CAS_MIN_FRAME			97
 158#define CAS_1000MB_MIN_FRAME            255
 159#define CAS_MIN_MTU                     60
 160#define CAS_MAX_MTU                     min(((cp->page_size << 1) - 0x50), 9000)
 161
 162#if 1
 163/*
 164 * Eliminate these and use separate atomic counters for each, to
 165 * avoid a race condition.
 166 */
 167#else
 168#define CAS_RESET_MTU                   1
 169#define CAS_RESET_ALL                   2
 170#define CAS_RESET_SPARE                 3
 171#endif
 172
 173static char version[] =
 174	DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
 175
 176static int cassini_debug = -1;	/* -1 == use CAS_DEF_MSG_ENABLE as value */
 177static int link_mode;
 178
 179MODULE_AUTHOR("Adrian Sun (asun@darksunrising.com)");
 180MODULE_DESCRIPTION("Sun Cassini(+) ethernet driver");
 181MODULE_LICENSE("GPL");
 182MODULE_FIRMWARE("sun/cassini.bin");
 183module_param(cassini_debug, int, 0);
 184MODULE_PARM_DESC(cassini_debug, "Cassini bitmapped debugging message enable value");
 185module_param(link_mode, int, 0);
 186MODULE_PARM_DESC(link_mode, "default link mode");
 187
 188/*
 189 * Work around for a PCS bug in which the link goes down due to the chip
 190 * being confused and never showing a link status of "up."
 191 */
 192#define DEFAULT_LINKDOWN_TIMEOUT 5
 193/*
 194 * Value in seconds, for user input.
 195 */
 196static int linkdown_timeout = DEFAULT_LINKDOWN_TIMEOUT;
 197module_param(linkdown_timeout, int, 0);
 198MODULE_PARM_DESC(linkdown_timeout,
 199"min reset interval in sec. for PCS linkdown issue; disabled if not positive");
 200
 201/*
 202 * value in 'ticks' (units used by jiffies). Set when we init the
 203 * module because 'HZ' in actually a function call on some flavors of
 204 * Linux.  This will default to DEFAULT_LINKDOWN_TIMEOUT * HZ.
 205 */
 206static int link_transition_timeout;
 207
 208
 209
 210static u16 link_modes[] = {
 211	BMCR_ANENABLE,			 /* 0 : autoneg */
 212	0,				 /* 1 : 10bt half duplex */
 213	BMCR_SPEED100,			 /* 2 : 100bt half duplex */
 214	BMCR_FULLDPLX,			 /* 3 : 10bt full duplex */
 215	BMCR_SPEED100|BMCR_FULLDPLX,	 /* 4 : 100bt full duplex */
 216	CAS_BMCR_SPEED1000|BMCR_FULLDPLX /* 5 : 1000bt full duplex */
 217};
 218
 219static const struct pci_device_id cas_pci_tbl[] = {
 220	{ PCI_VENDOR_ID_SUN, PCI_DEVICE_ID_SUN_CASSINI,
 221	  PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
 222	{ PCI_VENDOR_ID_NS, PCI_DEVICE_ID_NS_SATURN,
 223	  PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
 224	{ 0, }
 225};
 226
 227MODULE_DEVICE_TABLE(pci, cas_pci_tbl);
 228
 229static void cas_set_link_modes(struct cas *cp);
 230
 231static inline void cas_lock_tx(struct cas *cp)
 232{
 233	int i;
 234
 235	for (i = 0; i < N_TX_RINGS; i++)
 236		spin_lock_nested(&cp->tx_lock[i], i);
 237}
 238
 
 
 
 
 
 
 239/* WTZ: QA was finding deadlock problems with the previous
 240 * versions after long test runs with multiple cards per machine.
 241 * See if replacing cas_lock_all with safer versions helps. The
 242 * symptoms QA is reporting match those we'd expect if interrupts
 243 * aren't being properly restored, and we fixed a previous deadlock
 244 * with similar symptoms by using save/restore versions in other
 245 * places.
 246 */
 247#define cas_lock_all_save(cp, flags) \
 248do { \
 249	struct cas *xxxcp = (cp); \
 250	spin_lock_irqsave(&xxxcp->lock, flags); \
 251	cas_lock_tx(xxxcp); \
 252} while (0)
 253
 254static inline void cas_unlock_tx(struct cas *cp)
 255{
 256	int i;
 257
 258	for (i = N_TX_RINGS; i > 0; i--)
 259		spin_unlock(&cp->tx_lock[i - 1]);
 260}
 261
 
 
 
 
 
 
 262#define cas_unlock_all_restore(cp, flags) \
 263do { \
 264	struct cas *xxxcp = (cp); \
 265	cas_unlock_tx(xxxcp); \
 266	spin_unlock_irqrestore(&xxxcp->lock, flags); \
 267} while (0)
 268
 269static void cas_disable_irq(struct cas *cp, const int ring)
 270{
 271	/* Make sure we won't get any more interrupts */
 272	if (ring == 0) {
 273		writel(0xFFFFFFFF, cp->regs + REG_INTR_MASK);
 274		return;
 275	}
 276
 277	/* disable completion interrupts and selectively mask */
 278	if (cp->cas_flags & CAS_FLAG_REG_PLUS) {
 279		switch (ring) {
 280#if defined (USE_PCI_INTB) || defined(USE_PCI_INTC) || defined(USE_PCI_INTD)
 281#ifdef USE_PCI_INTB
 282		case 1:
 283#endif
 284#ifdef USE_PCI_INTC
 285		case 2:
 286#endif
 287#ifdef USE_PCI_INTD
 288		case 3:
 289#endif
 290			writel(INTRN_MASK_CLEAR_ALL | INTRN_MASK_RX_EN,
 291			       cp->regs + REG_PLUS_INTRN_MASK(ring));
 292			break;
 293#endif
 294		default:
 295			writel(INTRN_MASK_CLEAR_ALL, cp->regs +
 296			       REG_PLUS_INTRN_MASK(ring));
 297			break;
 298		}
 299	}
 300}
 301
 302static inline void cas_mask_intr(struct cas *cp)
 303{
 304	int i;
 305
 306	for (i = 0; i < N_RX_COMP_RINGS; i++)
 307		cas_disable_irq(cp, i);
 308}
 309
 310static void cas_enable_irq(struct cas *cp, const int ring)
 311{
 312	if (ring == 0) { /* all but TX_DONE */
 313		writel(INTR_TX_DONE, cp->regs + REG_INTR_MASK);
 314		return;
 315	}
 316
 317	if (cp->cas_flags & CAS_FLAG_REG_PLUS) {
 318		switch (ring) {
 319#if defined (USE_PCI_INTB) || defined(USE_PCI_INTC) || defined(USE_PCI_INTD)
 320#ifdef USE_PCI_INTB
 321		case 1:
 322#endif
 323#ifdef USE_PCI_INTC
 324		case 2:
 325#endif
 326#ifdef USE_PCI_INTD
 327		case 3:
 328#endif
 329			writel(INTRN_MASK_RX_EN, cp->regs +
 330			       REG_PLUS_INTRN_MASK(ring));
 331			break;
 332#endif
 333		default:
 334			break;
 335		}
 336	}
 337}
 338
 339static inline void cas_unmask_intr(struct cas *cp)
 340{
 341	int i;
 342
 343	for (i = 0; i < N_RX_COMP_RINGS; i++)
 344		cas_enable_irq(cp, i);
 345}
 346
 347static inline void cas_entropy_gather(struct cas *cp)
 348{
 349#ifdef USE_ENTROPY_DEV
 350	if ((cp->cas_flags & CAS_FLAG_ENTROPY_DEV) == 0)
 351		return;
 352
 353	batch_entropy_store(readl(cp->regs + REG_ENTROPY_IV),
 354			    readl(cp->regs + REG_ENTROPY_IV),
 355			    sizeof(uint64_t)*8);
 356#endif
 357}
 358
 359static inline void cas_entropy_reset(struct cas *cp)
 360{
 361#ifdef USE_ENTROPY_DEV
 362	if ((cp->cas_flags & CAS_FLAG_ENTROPY_DEV) == 0)
 363		return;
 364
 365	writel(BIM_LOCAL_DEV_PAD | BIM_LOCAL_DEV_PROM | BIM_LOCAL_DEV_EXT,
 366	       cp->regs + REG_BIM_LOCAL_DEV_EN);
 367	writeb(ENTROPY_RESET_STC_MODE, cp->regs + REG_ENTROPY_RESET);
 368	writeb(0x55, cp->regs + REG_ENTROPY_RAND_REG);
 369
 370	/* if we read back 0x0, we don't have an entropy device */
 371	if (readb(cp->regs + REG_ENTROPY_RAND_REG) == 0)
 372		cp->cas_flags &= ~CAS_FLAG_ENTROPY_DEV;
 373#endif
 374}
 375
 376/* access to the phy. the following assumes that we've initialized the MIF to
 377 * be in frame rather than bit-bang mode
 378 */
 379static u16 cas_phy_read(struct cas *cp, int reg)
 380{
 381	u32 cmd;
 382	int limit = STOP_TRIES_PHY;
 383
 384	cmd = MIF_FRAME_ST | MIF_FRAME_OP_READ;
 385	cmd |= CAS_BASE(MIF_FRAME_PHY_ADDR, cp->phy_addr);
 386	cmd |= CAS_BASE(MIF_FRAME_REG_ADDR, reg);
 387	cmd |= MIF_FRAME_TURN_AROUND_MSB;
 388	writel(cmd, cp->regs + REG_MIF_FRAME);
 389
 390	/* poll for completion */
 391	while (limit-- > 0) {
 392		udelay(10);
 393		cmd = readl(cp->regs + REG_MIF_FRAME);
 394		if (cmd & MIF_FRAME_TURN_AROUND_LSB)
 395			return cmd & MIF_FRAME_DATA_MASK;
 396	}
 397	return 0xFFFF; /* -1 */
 398}
 399
 400static int cas_phy_write(struct cas *cp, int reg, u16 val)
 401{
 402	int limit = STOP_TRIES_PHY;
 403	u32 cmd;
 404
 405	cmd = MIF_FRAME_ST | MIF_FRAME_OP_WRITE;
 406	cmd |= CAS_BASE(MIF_FRAME_PHY_ADDR, cp->phy_addr);
 407	cmd |= CAS_BASE(MIF_FRAME_REG_ADDR, reg);
 408	cmd |= MIF_FRAME_TURN_AROUND_MSB;
 409	cmd |= val & MIF_FRAME_DATA_MASK;
 410	writel(cmd, cp->regs + REG_MIF_FRAME);
 411
 412	/* poll for completion */
 413	while (limit-- > 0) {
 414		udelay(10);
 415		cmd = readl(cp->regs + REG_MIF_FRAME);
 416		if (cmd & MIF_FRAME_TURN_AROUND_LSB)
 417			return 0;
 418	}
 419	return -1;
 420}
 421
 422static void cas_phy_powerup(struct cas *cp)
 423{
 424	u16 ctl = cas_phy_read(cp, MII_BMCR);
 425
 426	if ((ctl & BMCR_PDOWN) == 0)
 427		return;
 428	ctl &= ~BMCR_PDOWN;
 429	cas_phy_write(cp, MII_BMCR, ctl);
 430}
 431
 432static void cas_phy_powerdown(struct cas *cp)
 433{
 434	u16 ctl = cas_phy_read(cp, MII_BMCR);
 435
 436	if (ctl & BMCR_PDOWN)
 437		return;
 438	ctl |= BMCR_PDOWN;
 439	cas_phy_write(cp, MII_BMCR, ctl);
 440}
 441
 442/* cp->lock held. note: the last put_page will free the buffer */
 443static int cas_page_free(struct cas *cp, cas_page_t *page)
 444{
 445	dma_unmap_page(&cp->pdev->dev, page->dma_addr, cp->page_size,
 446		       DMA_FROM_DEVICE);
 447	__free_pages(page->buffer, cp->page_order);
 448	kfree(page);
 449	return 0;
 450}
 451
 452#ifdef RX_COUNT_BUFFERS
 453#define RX_USED_ADD(x, y)       ((x)->used += (y))
 454#define RX_USED_SET(x, y)       ((x)->used  = (y))
 455#else
 456#define RX_USED_ADD(x, y) do { } while(0)
 457#define RX_USED_SET(x, y) do { } while(0)
 458#endif
 459
 460/* local page allocation routines for the receive buffers. jumbo pages
 461 * require at least 8K contiguous and 8K aligned buffers.
 462 */
 463static cas_page_t *cas_page_alloc(struct cas *cp, const gfp_t flags)
 464{
 465	cas_page_t *page;
 466
 467	page = kmalloc(sizeof(cas_page_t), flags);
 468	if (!page)
 469		return NULL;
 470
 471	INIT_LIST_HEAD(&page->list);
 472	RX_USED_SET(page, 0);
 473	page->buffer = alloc_pages(flags, cp->page_order);
 474	if (!page->buffer)
 475		goto page_err;
 476	page->dma_addr = dma_map_page(&cp->pdev->dev, page->buffer, 0,
 477				      cp->page_size, DMA_FROM_DEVICE);
 478	return page;
 479
 480page_err:
 481	kfree(page);
 482	return NULL;
 483}
 484
 485/* initialize spare pool of rx buffers, but allocate during the open */
 486static void cas_spare_init(struct cas *cp)
 487{
 488	spin_lock(&cp->rx_inuse_lock);
 489	INIT_LIST_HEAD(&cp->rx_inuse_list);
 490	spin_unlock(&cp->rx_inuse_lock);
 491
 492	spin_lock(&cp->rx_spare_lock);
 493	INIT_LIST_HEAD(&cp->rx_spare_list);
 494	cp->rx_spares_needed = RX_SPARE_COUNT;
 495	spin_unlock(&cp->rx_spare_lock);
 496}
 497
 498/* used on close. free all the spare buffers. */
 499static void cas_spare_free(struct cas *cp)
 500{
 501	struct list_head list, *elem, *tmp;
 502
 503	/* free spare buffers */
 504	INIT_LIST_HEAD(&list);
 505	spin_lock(&cp->rx_spare_lock);
 506	list_splice_init(&cp->rx_spare_list, &list);
 507	spin_unlock(&cp->rx_spare_lock);
 508	list_for_each_safe(elem, tmp, &list) {
 509		cas_page_free(cp, list_entry(elem, cas_page_t, list));
 510	}
 511
 512	INIT_LIST_HEAD(&list);
 513#if 1
 514	/*
 515	 * Looks like Adrian had protected this with a different
 516	 * lock than used everywhere else to manipulate this list.
 517	 */
 518	spin_lock(&cp->rx_inuse_lock);
 519	list_splice_init(&cp->rx_inuse_list, &list);
 520	spin_unlock(&cp->rx_inuse_lock);
 521#else
 522	spin_lock(&cp->rx_spare_lock);
 523	list_splice_init(&cp->rx_inuse_list, &list);
 524	spin_unlock(&cp->rx_spare_lock);
 525#endif
 526	list_for_each_safe(elem, tmp, &list) {
 527		cas_page_free(cp, list_entry(elem, cas_page_t, list));
 528	}
 529}
 530
 531/* replenish spares if needed */
 532static void cas_spare_recover(struct cas *cp, const gfp_t flags)
 533{
 534	struct list_head list, *elem, *tmp;
 535	int needed, i;
 536
 537	/* check inuse list. if we don't need any more free buffers,
 538	 * just free it
 539	 */
 540
 541	/* make a local copy of the list */
 542	INIT_LIST_HEAD(&list);
 543	spin_lock(&cp->rx_inuse_lock);
 544	list_splice_init(&cp->rx_inuse_list, &list);
 545	spin_unlock(&cp->rx_inuse_lock);
 546
 547	list_for_each_safe(elem, tmp, &list) {
 548		cas_page_t *page = list_entry(elem, cas_page_t, list);
 549
 550		/*
 551		 * With the lockless pagecache, cassini buffering scheme gets
 552		 * slightly less accurate: we might find that a page has an
 553		 * elevated reference count here, due to a speculative ref,
 554		 * and skip it as in-use. Ideally we would be able to reclaim
 555		 * it. However this would be such a rare case, it doesn't
 556		 * matter too much as we should pick it up the next time round.
 557		 *
 558		 * Importantly, if we find that the page has a refcount of 1
 559		 * here (our refcount), then we know it is definitely not inuse
 560		 * so we can reuse it.
 561		 */
 562		if (page_count(page->buffer) > 1)
 563			continue;
 564
 565		list_del(elem);
 566		spin_lock(&cp->rx_spare_lock);
 567		if (cp->rx_spares_needed > 0) {
 568			list_add(elem, &cp->rx_spare_list);
 569			cp->rx_spares_needed--;
 570			spin_unlock(&cp->rx_spare_lock);
 571		} else {
 572			spin_unlock(&cp->rx_spare_lock);
 573			cas_page_free(cp, page);
 574		}
 575	}
 576
 577	/* put any inuse buffers back on the list */
 578	if (!list_empty(&list)) {
 579		spin_lock(&cp->rx_inuse_lock);
 580		list_splice(&list, &cp->rx_inuse_list);
 581		spin_unlock(&cp->rx_inuse_lock);
 582	}
 583
 584	spin_lock(&cp->rx_spare_lock);
 585	needed = cp->rx_spares_needed;
 586	spin_unlock(&cp->rx_spare_lock);
 587	if (!needed)
 588		return;
 589
 590	/* we still need spares, so try to allocate some */
 591	INIT_LIST_HEAD(&list);
 592	i = 0;
 593	while (i < needed) {
 594		cas_page_t *spare = cas_page_alloc(cp, flags);
 595		if (!spare)
 596			break;
 597		list_add(&spare->list, &list);
 598		i++;
 599	}
 600
 601	spin_lock(&cp->rx_spare_lock);
 602	list_splice(&list, &cp->rx_spare_list);
 603	cp->rx_spares_needed -= i;
 604	spin_unlock(&cp->rx_spare_lock);
 605}
 606
 607/* pull a page from the list. */
 608static cas_page_t *cas_page_dequeue(struct cas *cp)
 609{
 610	struct list_head *entry;
 611	int recover;
 612
 613	spin_lock(&cp->rx_spare_lock);
 614	if (list_empty(&cp->rx_spare_list)) {
 615		/* try to do a quick recovery */
 616		spin_unlock(&cp->rx_spare_lock);
 617		cas_spare_recover(cp, GFP_ATOMIC);
 618		spin_lock(&cp->rx_spare_lock);
 619		if (list_empty(&cp->rx_spare_list)) {
 620			netif_err(cp, rx_err, cp->dev,
 621				  "no spare buffers available\n");
 622			spin_unlock(&cp->rx_spare_lock);
 623			return NULL;
 624		}
 625	}
 626
 627	entry = cp->rx_spare_list.next;
 628	list_del(entry);
 629	recover = ++cp->rx_spares_needed;
 630	spin_unlock(&cp->rx_spare_lock);
 631
 632	/* trigger the timer to do the recovery */
 633	if ((recover & (RX_SPARE_RECOVER_VAL - 1)) == 0) {
 634#if 1
 635		atomic_inc(&cp->reset_task_pending);
 636		atomic_inc(&cp->reset_task_pending_spare);
 637		schedule_work(&cp->reset_task);
 638#else
 639		atomic_set(&cp->reset_task_pending, CAS_RESET_SPARE);
 640		schedule_work(&cp->reset_task);
 641#endif
 642	}
 643	return list_entry(entry, cas_page_t, list);
 644}
 645
 646
 647static void cas_mif_poll(struct cas *cp, const int enable)
 648{
 649	u32 cfg;
 650
 651	cfg  = readl(cp->regs + REG_MIF_CFG);
 652	cfg &= (MIF_CFG_MDIO_0 | MIF_CFG_MDIO_1);
 653
 654	if (cp->phy_type & CAS_PHY_MII_MDIO1)
 655		cfg |= MIF_CFG_PHY_SELECT;
 656
 657	/* poll and interrupt on link status change. */
 658	if (enable) {
 659		cfg |= MIF_CFG_POLL_EN;
 660		cfg |= CAS_BASE(MIF_CFG_POLL_REG, MII_BMSR);
 661		cfg |= CAS_BASE(MIF_CFG_POLL_PHY, cp->phy_addr);
 662	}
 663	writel((enable) ? ~(BMSR_LSTATUS | BMSR_ANEGCOMPLETE) : 0xFFFF,
 664	       cp->regs + REG_MIF_MASK);
 665	writel(cfg, cp->regs + REG_MIF_CFG);
 666}
 667
 668/* Must be invoked under cp->lock */
 669static void cas_begin_auto_negotiation(struct cas *cp,
 670				       const struct ethtool_link_ksettings *ep)
 671{
 672	u16 ctl;
 673#if 1
 674	int lcntl;
 675	int changed = 0;
 676	int oldstate = cp->lstate;
 677	int link_was_not_down = !(oldstate == link_down);
 678#endif
 679	/* Setup link parameters */
 680	if (!ep)
 681		goto start_aneg;
 682	lcntl = cp->link_cntl;
 683	if (ep->base.autoneg == AUTONEG_ENABLE) {
 684		cp->link_cntl = BMCR_ANENABLE;
 685	} else {
 686		u32 speed = ep->base.speed;
 687		cp->link_cntl = 0;
 688		if (speed == SPEED_100)
 689			cp->link_cntl |= BMCR_SPEED100;
 690		else if (speed == SPEED_1000)
 691			cp->link_cntl |= CAS_BMCR_SPEED1000;
 692		if (ep->base.duplex == DUPLEX_FULL)
 693			cp->link_cntl |= BMCR_FULLDPLX;
 694	}
 695#if 1
 696	changed = (lcntl != cp->link_cntl);
 697#endif
 698start_aneg:
 699	if (cp->lstate == link_up) {
 700		netdev_info(cp->dev, "PCS link down\n");
 701	} else {
 702		if (changed) {
 703			netdev_info(cp->dev, "link configuration changed\n");
 704		}
 705	}
 706	cp->lstate = link_down;
 707	cp->link_transition = LINK_TRANSITION_LINK_DOWN;
 708	if (!cp->hw_running)
 709		return;
 710#if 1
 711	/*
 712	 * WTZ: If the old state was link_up, we turn off the carrier
 713	 * to replicate everything we do elsewhere on a link-down
 714	 * event when we were already in a link-up state..
 715	 */
 716	if (oldstate == link_up)
 717		netif_carrier_off(cp->dev);
 718	if (changed  && link_was_not_down) {
 719		/*
 720		 * WTZ: This branch will simply schedule a full reset after
 721		 * we explicitly changed link modes in an ioctl. See if this
 722		 * fixes the link-problems we were having for forced mode.
 723		 */
 724		atomic_inc(&cp->reset_task_pending);
 725		atomic_inc(&cp->reset_task_pending_all);
 726		schedule_work(&cp->reset_task);
 727		cp->timer_ticks = 0;
 728		mod_timer(&cp->link_timer, jiffies + CAS_LINK_TIMEOUT);
 729		return;
 730	}
 731#endif
 732	if (cp->phy_type & CAS_PHY_SERDES) {
 733		u32 val = readl(cp->regs + REG_PCS_MII_CTRL);
 734
 735		if (cp->link_cntl & BMCR_ANENABLE) {
 736			val |= (PCS_MII_RESTART_AUTONEG | PCS_MII_AUTONEG_EN);
 737			cp->lstate = link_aneg;
 738		} else {
 739			if (cp->link_cntl & BMCR_FULLDPLX)
 740				val |= PCS_MII_CTRL_DUPLEX;
 741			val &= ~PCS_MII_AUTONEG_EN;
 742			cp->lstate = link_force_ok;
 743		}
 744		cp->link_transition = LINK_TRANSITION_LINK_CONFIG;
 745		writel(val, cp->regs + REG_PCS_MII_CTRL);
 746
 747	} else {
 748		cas_mif_poll(cp, 0);
 749		ctl = cas_phy_read(cp, MII_BMCR);
 750		ctl &= ~(BMCR_FULLDPLX | BMCR_SPEED100 |
 751			 CAS_BMCR_SPEED1000 | BMCR_ANENABLE);
 752		ctl |= cp->link_cntl;
 753		if (ctl & BMCR_ANENABLE) {
 754			ctl |= BMCR_ANRESTART;
 755			cp->lstate = link_aneg;
 756		} else {
 757			cp->lstate = link_force_ok;
 758		}
 759		cp->link_transition = LINK_TRANSITION_LINK_CONFIG;
 760		cas_phy_write(cp, MII_BMCR, ctl);
 761		cas_mif_poll(cp, 1);
 762	}
 763
 764	cp->timer_ticks = 0;
 765	mod_timer(&cp->link_timer, jiffies + CAS_LINK_TIMEOUT);
 766}
 767
 768/* Must be invoked under cp->lock. */
 769static int cas_reset_mii_phy(struct cas *cp)
 770{
 771	int limit = STOP_TRIES_PHY;
 772	u16 val;
 773
 774	cas_phy_write(cp, MII_BMCR, BMCR_RESET);
 775	udelay(100);
 776	while (--limit) {
 777		val = cas_phy_read(cp, MII_BMCR);
 778		if ((val & BMCR_RESET) == 0)
 779			break;
 780		udelay(10);
 781	}
 782	return limit <= 0;
 783}
 784
 785static void cas_saturn_firmware_init(struct cas *cp)
 786{
 787	const struct firmware *fw;
 788	const char fw_name[] = "sun/cassini.bin";
 789	int err;
 790
 791	if (PHY_NS_DP83065 != cp->phy_id)
 792		return;
 793
 794	err = request_firmware(&fw, fw_name, &cp->pdev->dev);
 795	if (err) {
 796		pr_err("Failed to load firmware \"%s\"\n",
 797		       fw_name);
 798		return;
 799	}
 800	if (fw->size < 2) {
 801		pr_err("bogus length %zu in \"%s\"\n",
 802		       fw->size, fw_name);
 803		goto out;
 804	}
 805	cp->fw_load_addr= fw->data[1] << 8 | fw->data[0];
 806	cp->fw_size = fw->size - 2;
 807	cp->fw_data = vmalloc(cp->fw_size);
 808	if (!cp->fw_data)
 809		goto out;
 810	memcpy(cp->fw_data, &fw->data[2], cp->fw_size);
 811out:
 812	release_firmware(fw);
 813}
 814
 815static void cas_saturn_firmware_load(struct cas *cp)
 816{
 817	int i;
 818
 819	if (!cp->fw_data)
 820		return;
 821
 822	cas_phy_powerdown(cp);
 823
 824	/* expanded memory access mode */
 825	cas_phy_write(cp, DP83065_MII_MEM, 0x0);
 826
 827	/* pointer configuration for new firmware */
 828	cas_phy_write(cp, DP83065_MII_REGE, 0x8ff9);
 829	cas_phy_write(cp, DP83065_MII_REGD, 0xbd);
 830	cas_phy_write(cp, DP83065_MII_REGE, 0x8ffa);
 831	cas_phy_write(cp, DP83065_MII_REGD, 0x82);
 832	cas_phy_write(cp, DP83065_MII_REGE, 0x8ffb);
 833	cas_phy_write(cp, DP83065_MII_REGD, 0x0);
 834	cas_phy_write(cp, DP83065_MII_REGE, 0x8ffc);
 835	cas_phy_write(cp, DP83065_MII_REGD, 0x39);
 836
 837	/* download new firmware */
 838	cas_phy_write(cp, DP83065_MII_MEM, 0x1);
 839	cas_phy_write(cp, DP83065_MII_REGE, cp->fw_load_addr);
 840	for (i = 0; i < cp->fw_size; i++)
 841		cas_phy_write(cp, DP83065_MII_REGD, cp->fw_data[i]);
 842
 843	/* enable firmware */
 844	cas_phy_write(cp, DP83065_MII_REGE, 0x8ff8);
 845	cas_phy_write(cp, DP83065_MII_REGD, 0x1);
 846}
 847
 848
 849/* phy initialization */
 850static void cas_phy_init(struct cas *cp)
 851{
 852	u16 val;
 853
 854	/* if we're in MII/GMII mode, set up phy */
 855	if (CAS_PHY_MII(cp->phy_type)) {
 856		writel(PCS_DATAPATH_MODE_MII,
 857		       cp->regs + REG_PCS_DATAPATH_MODE);
 858
 859		cas_mif_poll(cp, 0);
 860		cas_reset_mii_phy(cp); /* take out of isolate mode */
 861
 862		if (PHY_LUCENT_B0 == cp->phy_id) {
 863			/* workaround link up/down issue with lucent */
 864			cas_phy_write(cp, LUCENT_MII_REG, 0x8000);
 865			cas_phy_write(cp, MII_BMCR, 0x00f1);
 866			cas_phy_write(cp, LUCENT_MII_REG, 0x0);
 867
 868		} else if (PHY_BROADCOM_B0 == (cp->phy_id & 0xFFFFFFFC)) {
 869			/* workarounds for broadcom phy */
 870			cas_phy_write(cp, BROADCOM_MII_REG8, 0x0C20);
 871			cas_phy_write(cp, BROADCOM_MII_REG7, 0x0012);
 872			cas_phy_write(cp, BROADCOM_MII_REG5, 0x1804);
 873			cas_phy_write(cp, BROADCOM_MII_REG7, 0x0013);
 874			cas_phy_write(cp, BROADCOM_MII_REG5, 0x1204);
 875			cas_phy_write(cp, BROADCOM_MII_REG7, 0x8006);
 876			cas_phy_write(cp, BROADCOM_MII_REG5, 0x0132);
 877			cas_phy_write(cp, BROADCOM_MII_REG7, 0x8006);
 878			cas_phy_write(cp, BROADCOM_MII_REG5, 0x0232);
 879			cas_phy_write(cp, BROADCOM_MII_REG7, 0x201F);
 880			cas_phy_write(cp, BROADCOM_MII_REG5, 0x0A20);
 881
 882		} else if (PHY_BROADCOM_5411 == cp->phy_id) {
 883			val = cas_phy_read(cp, BROADCOM_MII_REG4);
 884			val = cas_phy_read(cp, BROADCOM_MII_REG4);
 885			if (val & 0x0080) {
 886				/* link workaround */
 887				cas_phy_write(cp, BROADCOM_MII_REG4,
 888					      val & ~0x0080);
 889			}
 890
 891		} else if (cp->cas_flags & CAS_FLAG_SATURN) {
 892			writel((cp->phy_type & CAS_PHY_MII_MDIO0) ?
 893			       SATURN_PCFG_FSI : 0x0,
 894			       cp->regs + REG_SATURN_PCFG);
 895
 896			/* load firmware to address 10Mbps auto-negotiation
 897			 * issue. NOTE: this will need to be changed if the
 898			 * default firmware gets fixed.
 899			 */
 900			if (PHY_NS_DP83065 == cp->phy_id) {
 901				cas_saturn_firmware_load(cp);
 902			}
 903			cas_phy_powerup(cp);
 904		}
 905
 906		/* advertise capabilities */
 907		val = cas_phy_read(cp, MII_BMCR);
 908		val &= ~BMCR_ANENABLE;
 909		cas_phy_write(cp, MII_BMCR, val);
 910		udelay(10);
 911
 912		cas_phy_write(cp, MII_ADVERTISE,
 913			      cas_phy_read(cp, MII_ADVERTISE) |
 914			      (ADVERTISE_10HALF | ADVERTISE_10FULL |
 915			       ADVERTISE_100HALF | ADVERTISE_100FULL |
 916			       CAS_ADVERTISE_PAUSE |
 917			       CAS_ADVERTISE_ASYM_PAUSE));
 918
 919		if (cp->cas_flags & CAS_FLAG_1000MB_CAP) {
 920			/* make sure that we don't advertise half
 921			 * duplex to avoid a chip issue
 922			 */
 923			val  = cas_phy_read(cp, CAS_MII_1000_CTRL);
 924			val &= ~CAS_ADVERTISE_1000HALF;
 925			val |= CAS_ADVERTISE_1000FULL;
 926			cas_phy_write(cp, CAS_MII_1000_CTRL, val);
 927		}
 928
 929	} else {
 930		/* reset pcs for serdes */
 931		u32 val;
 932		int limit;
 933
 934		writel(PCS_DATAPATH_MODE_SERDES,
 935		       cp->regs + REG_PCS_DATAPATH_MODE);
 936
 937		/* enable serdes pins on saturn */
 938		if (cp->cas_flags & CAS_FLAG_SATURN)
 939			writel(0, cp->regs + REG_SATURN_PCFG);
 940
 941		/* Reset PCS unit. */
 942		val = readl(cp->regs + REG_PCS_MII_CTRL);
 943		val |= PCS_MII_RESET;
 944		writel(val, cp->regs + REG_PCS_MII_CTRL);
 945
 946		limit = STOP_TRIES;
 947		while (--limit > 0) {
 948			udelay(10);
 949			if ((readl(cp->regs + REG_PCS_MII_CTRL) &
 950			     PCS_MII_RESET) == 0)
 951				break;
 952		}
 953		if (limit <= 0)
 954			netdev_warn(cp->dev, "PCS reset bit would not clear [%08x]\n",
 955				    readl(cp->regs + REG_PCS_STATE_MACHINE));
 956
 957		/* Make sure PCS is disabled while changing advertisement
 958		 * configuration.
 959		 */
 960		writel(0x0, cp->regs + REG_PCS_CFG);
 961
 962		/* Advertise all capabilities except half-duplex. */
 963		val  = readl(cp->regs + REG_PCS_MII_ADVERT);
 964		val &= ~PCS_MII_ADVERT_HD;
 965		val |= (PCS_MII_ADVERT_FD | PCS_MII_ADVERT_SYM_PAUSE |
 966			PCS_MII_ADVERT_ASYM_PAUSE);
 967		writel(val, cp->regs + REG_PCS_MII_ADVERT);
 968
 969		/* enable PCS */
 970		writel(PCS_CFG_EN, cp->regs + REG_PCS_CFG);
 971
 972		/* pcs workaround: enable sync detect */
 973		writel(PCS_SERDES_CTRL_SYNCD_EN,
 974		       cp->regs + REG_PCS_SERDES_CTRL);
 975	}
 976}
 977
 978
 979static int cas_pcs_link_check(struct cas *cp)
 980{
 981	u32 stat, state_machine;
 982	int retval = 0;
 983
 984	/* The link status bit latches on zero, so you must
 985	 * read it twice in such a case to see a transition
 986	 * to the link being up.
 987	 */
 988	stat = readl(cp->regs + REG_PCS_MII_STATUS);
 989	if ((stat & PCS_MII_STATUS_LINK_STATUS) == 0)
 990		stat = readl(cp->regs + REG_PCS_MII_STATUS);
 991
 992	/* The remote-fault indication is only valid
 993	 * when autoneg has completed.
 994	 */
 995	if ((stat & (PCS_MII_STATUS_AUTONEG_COMP |
 996		     PCS_MII_STATUS_REMOTE_FAULT)) ==
 997	    (PCS_MII_STATUS_AUTONEG_COMP | PCS_MII_STATUS_REMOTE_FAULT))
 998		netif_info(cp, link, cp->dev, "PCS RemoteFault\n");
 999
1000	/* work around link detection issue by querying the PCS state
1001	 * machine directly.
1002	 */
1003	state_machine = readl(cp->regs + REG_PCS_STATE_MACHINE);
1004	if ((state_machine & PCS_SM_LINK_STATE_MASK) != SM_LINK_STATE_UP) {
1005		stat &= ~PCS_MII_STATUS_LINK_STATUS;
1006	} else if (state_machine & PCS_SM_WORD_SYNC_STATE_MASK) {
1007		stat |= PCS_MII_STATUS_LINK_STATUS;
1008	}
1009
1010	if (stat & PCS_MII_STATUS_LINK_STATUS) {
1011		if (cp->lstate != link_up) {
1012			if (cp->opened) {
1013				cp->lstate = link_up;
1014				cp->link_transition = LINK_TRANSITION_LINK_UP;
1015
1016				cas_set_link_modes(cp);
1017				netif_carrier_on(cp->dev);
1018			}
1019		}
1020	} else if (cp->lstate == link_up) {
1021		cp->lstate = link_down;
1022		if (link_transition_timeout != 0 &&
1023		    cp->link_transition != LINK_TRANSITION_REQUESTED_RESET &&
1024		    !cp->link_transition_jiffies_valid) {
1025			/*
1026			 * force a reset, as a workaround for the
1027			 * link-failure problem. May want to move this to a
1028			 * point a bit earlier in the sequence. If we had
1029			 * generated a reset a short time ago, we'll wait for
1030			 * the link timer to check the status until a
1031			 * timer expires (link_transistion_jiffies_valid is
1032			 * true when the timer is running.)  Instead of using
1033			 * a system timer, we just do a check whenever the
1034			 * link timer is running - this clears the flag after
1035			 * a suitable delay.
1036			 */
1037			retval = 1;
1038			cp->link_transition = LINK_TRANSITION_REQUESTED_RESET;
1039			cp->link_transition_jiffies = jiffies;
1040			cp->link_transition_jiffies_valid = 1;
1041		} else {
1042			cp->link_transition = LINK_TRANSITION_ON_FAILURE;
1043		}
1044		netif_carrier_off(cp->dev);
1045		if (cp->opened)
1046			netif_info(cp, link, cp->dev, "PCS link down\n");
1047
1048		/* Cassini only: if you force a mode, there can be
1049		 * sync problems on link down. to fix that, the following
1050		 * things need to be checked:
1051		 * 1) read serialink state register
1052		 * 2) read pcs status register to verify link down.
1053		 * 3) if link down and serial link == 0x03, then you need
1054		 *    to global reset the chip.
1055		 */
1056		if ((cp->cas_flags & CAS_FLAG_REG_PLUS) == 0) {
1057			/* should check to see if we're in a forced mode */
1058			stat = readl(cp->regs + REG_PCS_SERDES_STATE);
1059			if (stat == 0x03)
1060				return 1;
1061		}
1062	} else if (cp->lstate == link_down) {
1063		if (link_transition_timeout != 0 &&
1064		    cp->link_transition != LINK_TRANSITION_REQUESTED_RESET &&
1065		    !cp->link_transition_jiffies_valid) {
1066			/* force a reset, as a workaround for the
1067			 * link-failure problem.  May want to move
1068			 * this to a point a bit earlier in the
1069			 * sequence.
1070			 */
1071			retval = 1;
1072			cp->link_transition = LINK_TRANSITION_REQUESTED_RESET;
1073			cp->link_transition_jiffies = jiffies;
1074			cp->link_transition_jiffies_valid = 1;
1075		} else {
1076			cp->link_transition = LINK_TRANSITION_STILL_FAILED;
1077		}
1078	}
1079
1080	return retval;
1081}
1082
1083static int cas_pcs_interrupt(struct net_device *dev,
1084			     struct cas *cp, u32 status)
1085{
1086	u32 stat = readl(cp->regs + REG_PCS_INTR_STATUS);
1087
1088	if ((stat & PCS_INTR_STATUS_LINK_CHANGE) == 0)
1089		return 0;
1090	return cas_pcs_link_check(cp);
1091}
1092
1093static int cas_txmac_interrupt(struct net_device *dev,
1094			       struct cas *cp, u32 status)
1095{
1096	u32 txmac_stat = readl(cp->regs + REG_MAC_TX_STATUS);
1097
1098	if (!txmac_stat)
1099		return 0;
1100
1101	netif_printk(cp, intr, KERN_DEBUG, cp->dev,
1102		     "txmac interrupt, txmac_stat: 0x%x\n", txmac_stat);
1103
1104	/* Defer timer expiration is quite normal,
1105	 * don't even log the event.
1106	 */
1107	if ((txmac_stat & MAC_TX_DEFER_TIMER) &&
1108	    !(txmac_stat & ~MAC_TX_DEFER_TIMER))
1109		return 0;
1110
1111	spin_lock(&cp->stat_lock[0]);
1112	if (txmac_stat & MAC_TX_UNDERRUN) {
1113		netdev_err(dev, "TX MAC xmit underrun\n");
1114		cp->net_stats[0].tx_fifo_errors++;
1115	}
1116
1117	if (txmac_stat & MAC_TX_MAX_PACKET_ERR) {
1118		netdev_err(dev, "TX MAC max packet size error\n");
1119		cp->net_stats[0].tx_errors++;
1120	}
1121
1122	/* The rest are all cases of one of the 16-bit TX
1123	 * counters expiring.
1124	 */
1125	if (txmac_stat & MAC_TX_COLL_NORMAL)
1126		cp->net_stats[0].collisions += 0x10000;
1127
1128	if (txmac_stat & MAC_TX_COLL_EXCESS) {
1129		cp->net_stats[0].tx_aborted_errors += 0x10000;
1130		cp->net_stats[0].collisions += 0x10000;
1131	}
1132
1133	if (txmac_stat & MAC_TX_COLL_LATE) {
1134		cp->net_stats[0].tx_aborted_errors += 0x10000;
1135		cp->net_stats[0].collisions += 0x10000;
1136	}
1137	spin_unlock(&cp->stat_lock[0]);
1138
1139	/* We do not keep track of MAC_TX_COLL_FIRST and
1140	 * MAC_TX_PEAK_ATTEMPTS events.
1141	 */
1142	return 0;
1143}
1144
1145static void cas_load_firmware(struct cas *cp, cas_hp_inst_t *firmware)
1146{
1147	cas_hp_inst_t *inst;
1148	u32 val;
1149	int i;
1150
1151	i = 0;
1152	while ((inst = firmware) && inst->note) {
1153		writel(i, cp->regs + REG_HP_INSTR_RAM_ADDR);
1154
1155		val = CAS_BASE(HP_INSTR_RAM_HI_VAL, inst->val);
1156		val |= CAS_BASE(HP_INSTR_RAM_HI_MASK, inst->mask);
1157		writel(val, cp->regs + REG_HP_INSTR_RAM_DATA_HI);
1158
1159		val = CAS_BASE(HP_INSTR_RAM_MID_OUTARG, inst->outarg >> 10);
1160		val |= CAS_BASE(HP_INSTR_RAM_MID_OUTOP, inst->outop);
1161		val |= CAS_BASE(HP_INSTR_RAM_MID_FNEXT, inst->fnext);
1162		val |= CAS_BASE(HP_INSTR_RAM_MID_FOFF, inst->foff);
1163		val |= CAS_BASE(HP_INSTR_RAM_MID_SNEXT, inst->snext);
1164		val |= CAS_BASE(HP_INSTR_RAM_MID_SOFF, inst->soff);
1165		val |= CAS_BASE(HP_INSTR_RAM_MID_OP, inst->op);
1166		writel(val, cp->regs + REG_HP_INSTR_RAM_DATA_MID);
1167
1168		val = CAS_BASE(HP_INSTR_RAM_LOW_OUTMASK, inst->outmask);
1169		val |= CAS_BASE(HP_INSTR_RAM_LOW_OUTSHIFT, inst->outshift);
1170		val |= CAS_BASE(HP_INSTR_RAM_LOW_OUTEN, inst->outenab);
1171		val |= CAS_BASE(HP_INSTR_RAM_LOW_OUTARG, inst->outarg);
1172		writel(val, cp->regs + REG_HP_INSTR_RAM_DATA_LOW);
1173		++firmware;
1174		++i;
1175	}
1176}
1177
1178static void cas_init_rx_dma(struct cas *cp)
1179{
1180	u64 desc_dma = cp->block_dvma;
1181	u32 val;
1182	int i, size;
1183
1184	/* rx free descriptors */
1185	val = CAS_BASE(RX_CFG_SWIVEL, RX_SWIVEL_OFF_VAL);
1186	val |= CAS_BASE(RX_CFG_DESC_RING, RX_DESC_RINGN_INDEX(0));
1187	val |= CAS_BASE(RX_CFG_COMP_RING, RX_COMP_RINGN_INDEX(0));
1188	if ((N_RX_DESC_RINGS > 1) &&
1189	    (cp->cas_flags & CAS_FLAG_REG_PLUS))  /* do desc 2 */
1190		val |= CAS_BASE(RX_CFG_DESC_RING1, RX_DESC_RINGN_INDEX(1));
1191	writel(val, cp->regs + REG_RX_CFG);
1192
1193	val = (unsigned long) cp->init_rxds[0] -
1194		(unsigned long) cp->init_block;
1195	writel((desc_dma + val) >> 32, cp->regs + REG_RX_DB_HI);
1196	writel((desc_dma + val) & 0xffffffff, cp->regs + REG_RX_DB_LOW);
1197	writel(RX_DESC_RINGN_SIZE(0) - 4, cp->regs + REG_RX_KICK);
1198
1199	if (cp->cas_flags & CAS_FLAG_REG_PLUS) {
1200		/* rx desc 2 is for IPSEC packets. however,
1201		 * we don't it that for that purpose.
1202		 */
1203		val = (unsigned long) cp->init_rxds[1] -
1204			(unsigned long) cp->init_block;
1205		writel((desc_dma + val) >> 32, cp->regs + REG_PLUS_RX_DB1_HI);
1206		writel((desc_dma + val) & 0xffffffff, cp->regs +
1207		       REG_PLUS_RX_DB1_LOW);
1208		writel(RX_DESC_RINGN_SIZE(1) - 4, cp->regs +
1209		       REG_PLUS_RX_KICK1);
1210	}
1211
1212	/* rx completion registers */
1213	val = (unsigned long) cp->init_rxcs[0] -
1214		(unsigned long) cp->init_block;
1215	writel((desc_dma + val) >> 32, cp->regs + REG_RX_CB_HI);
1216	writel((desc_dma + val) & 0xffffffff, cp->regs + REG_RX_CB_LOW);
1217
1218	if (cp->cas_flags & CAS_FLAG_REG_PLUS) {
1219		/* rx comp 2-4 */
1220		for (i = 1; i < MAX_RX_COMP_RINGS; i++) {
1221			val = (unsigned long) cp->init_rxcs[i] -
1222				(unsigned long) cp->init_block;
1223			writel((desc_dma + val) >> 32, cp->regs +
1224			       REG_PLUS_RX_CBN_HI(i));
1225			writel((desc_dma + val) & 0xffffffff, cp->regs +
1226			       REG_PLUS_RX_CBN_LOW(i));
1227		}
1228	}
1229
1230	/* read selective clear regs to prevent spurious interrupts
1231	 * on reset because complete == kick.
1232	 * selective clear set up to prevent interrupts on resets
1233	 */
1234	readl(cp->regs + REG_INTR_STATUS_ALIAS);
1235	writel(INTR_RX_DONE | INTR_RX_BUF_UNAVAIL, cp->regs + REG_ALIAS_CLEAR);
 
 
 
 
 
 
 
 
 
 
 
 
 
1236
1237	/* set up pause thresholds */
1238	val  = CAS_BASE(RX_PAUSE_THRESH_OFF,
1239			cp->rx_pause_off / RX_PAUSE_THRESH_QUANTUM);
1240	val |= CAS_BASE(RX_PAUSE_THRESH_ON,
1241			cp->rx_pause_on / RX_PAUSE_THRESH_QUANTUM);
1242	writel(val, cp->regs + REG_RX_PAUSE_THRESH);
1243
1244	/* zero out dma reassembly buffers */
1245	for (i = 0; i < 64; i++) {
1246		writel(i, cp->regs + REG_RX_TABLE_ADDR);
1247		writel(0x0, cp->regs + REG_RX_TABLE_DATA_LOW);
1248		writel(0x0, cp->regs + REG_RX_TABLE_DATA_MID);
1249		writel(0x0, cp->regs + REG_RX_TABLE_DATA_HI);
1250	}
1251
1252	/* make sure address register is 0 for normal operation */
1253	writel(0x0, cp->regs + REG_RX_CTRL_FIFO_ADDR);
1254	writel(0x0, cp->regs + REG_RX_IPP_FIFO_ADDR);
1255
1256	/* interrupt mitigation */
1257#ifdef USE_RX_BLANK
1258	val = CAS_BASE(RX_BLANK_INTR_TIME, RX_BLANK_INTR_TIME_VAL);
1259	val |= CAS_BASE(RX_BLANK_INTR_PKT, RX_BLANK_INTR_PKT_VAL);
1260	writel(val, cp->regs + REG_RX_BLANK);
1261#else
1262	writel(0x0, cp->regs + REG_RX_BLANK);
1263#endif
1264
1265	/* interrupt generation as a function of low water marks for
1266	 * free desc and completion entries. these are used to trigger
1267	 * housekeeping for rx descs. we don't use the free interrupt
1268	 * as it's not very useful
1269	 */
1270	/* val = CAS_BASE(RX_AE_THRESH_FREE, RX_AE_FREEN_VAL(0)); */
1271	val = CAS_BASE(RX_AE_THRESH_COMP, RX_AE_COMP_VAL);
1272	writel(val, cp->regs + REG_RX_AE_THRESH);
1273	if (cp->cas_flags & CAS_FLAG_REG_PLUS) {
1274		val = CAS_BASE(RX_AE1_THRESH_FREE, RX_AE_FREEN_VAL(1));
1275		writel(val, cp->regs + REG_PLUS_RX_AE1_THRESH);
1276	}
1277
1278	/* Random early detect registers. useful for congestion avoidance.
1279	 * this should be tunable.
1280	 */
1281	writel(0x0, cp->regs + REG_RX_RED);
1282
1283	/* receive page sizes. default == 2K (0x800) */
1284	val = 0;
1285	if (cp->page_size == 0x1000)
1286		val = 0x1;
1287	else if (cp->page_size == 0x2000)
1288		val = 0x2;
1289	else if (cp->page_size == 0x4000)
1290		val = 0x3;
1291
1292	/* round mtu + offset. constrain to page size. */
1293	size = cp->dev->mtu + 64;
1294	if (size > cp->page_size)
1295		size = cp->page_size;
1296
1297	if (size <= 0x400)
1298		i = 0x0;
1299	else if (size <= 0x800)
1300		i = 0x1;
1301	else if (size <= 0x1000)
1302		i = 0x2;
1303	else
1304		i = 0x3;
1305
1306	cp->mtu_stride = 1 << (i + 10);
1307	val  = CAS_BASE(RX_PAGE_SIZE, val);
1308	val |= CAS_BASE(RX_PAGE_SIZE_MTU_STRIDE, i);
1309	val |= CAS_BASE(RX_PAGE_SIZE_MTU_COUNT, cp->page_size >> (i + 10));
1310	val |= CAS_BASE(RX_PAGE_SIZE_MTU_OFF, 0x1);
1311	writel(val, cp->regs + REG_RX_PAGE_SIZE);
1312
1313	/* enable the header parser if desired */
1314	if (&CAS_HP_FIRMWARE[0] == &cas_prog_null[0])
1315		return;
1316
1317	val = CAS_BASE(HP_CFG_NUM_CPU, CAS_NCPUS > 63 ? 0 : CAS_NCPUS);
1318	val |= HP_CFG_PARSE_EN | HP_CFG_SYN_INC_MASK;
1319	val |= CAS_BASE(HP_CFG_TCP_THRESH, HP_TCP_THRESH_VAL);
1320	writel(val, cp->regs + REG_HP_CFG);
1321}
1322
1323static inline void cas_rxc_init(struct cas_rx_comp *rxc)
1324{
1325	memset(rxc, 0, sizeof(*rxc));
1326	rxc->word4 = cpu_to_le64(RX_COMP4_ZERO);
1327}
1328
1329/* NOTE: we use the ENC RX DESC ring for spares. the rx_page[0,1]
1330 * flipping is protected by the fact that the chip will not
1331 * hand back the same page index while it's being processed.
1332 */
1333static inline cas_page_t *cas_page_spare(struct cas *cp, const int index)
1334{
1335	cas_page_t *page = cp->rx_pages[1][index];
1336	cas_page_t *new;
1337
1338	if (page_count(page->buffer) == 1)
1339		return page;
1340
1341	new = cas_page_dequeue(cp);
1342	if (new) {
1343		spin_lock(&cp->rx_inuse_lock);
1344		list_add(&page->list, &cp->rx_inuse_list);
1345		spin_unlock(&cp->rx_inuse_lock);
1346	}
1347	return new;
1348}
1349
1350/* this needs to be changed if we actually use the ENC RX DESC ring */
1351static cas_page_t *cas_page_swap(struct cas *cp, const int ring,
1352				 const int index)
1353{
1354	cas_page_t **page0 = cp->rx_pages[0];
1355	cas_page_t **page1 = cp->rx_pages[1];
1356
1357	/* swap if buffer is in use */
1358	if (page_count(page0[index]->buffer) > 1) {
1359		cas_page_t *new = cas_page_spare(cp, index);
1360		if (new) {
1361			page1[index] = page0[index];
1362			page0[index] = new;
1363		}
1364	}
1365	RX_USED_SET(page0[index], 0);
1366	return page0[index];
1367}
1368
1369static void cas_clean_rxds(struct cas *cp)
1370{
1371	/* only clean ring 0 as ring 1 is used for spare buffers */
1372        struct cas_rx_desc *rxd = cp->init_rxds[0];
1373	int i, size;
1374
1375	/* release all rx flows */
1376	for (i = 0; i < N_RX_FLOWS; i++) {
1377		struct sk_buff *skb;
1378		while ((skb = __skb_dequeue(&cp->rx_flows[i]))) {
1379			cas_skb_release(skb);
1380		}
1381	}
1382
1383	/* initialize descriptors */
1384	size = RX_DESC_RINGN_SIZE(0);
1385	for (i = 0; i < size; i++) {
1386		cas_page_t *page = cas_page_swap(cp, 0, i);
1387		rxd[i].buffer = cpu_to_le64(page->dma_addr);
1388		rxd[i].index  = cpu_to_le64(CAS_BASE(RX_INDEX_NUM, i) |
1389					    CAS_BASE(RX_INDEX_RING, 0));
1390	}
1391
1392	cp->rx_old[0]  = RX_DESC_RINGN_SIZE(0) - 4;
1393	cp->rx_last[0] = 0;
1394	cp->cas_flags &= ~CAS_FLAG_RXD_POST(0);
1395}
1396
1397static void cas_clean_rxcs(struct cas *cp)
1398{
1399	int i, j;
1400
1401	/* take ownership of rx comp descriptors */
1402	memset(cp->rx_cur, 0, sizeof(*cp->rx_cur)*N_RX_COMP_RINGS);
1403	memset(cp->rx_new, 0, sizeof(*cp->rx_new)*N_RX_COMP_RINGS);
1404	for (i = 0; i < N_RX_COMP_RINGS; i++) {
1405		struct cas_rx_comp *rxc = cp->init_rxcs[i];
1406		for (j = 0; j < RX_COMP_RINGN_SIZE(i); j++) {
1407			cas_rxc_init(rxc + j);
1408		}
1409	}
1410}
1411
1412#if 0
1413/* When we get a RX fifo overflow, the RX unit is probably hung
1414 * so we do the following.
1415 *
1416 * If any part of the reset goes wrong, we return 1 and that causes the
1417 * whole chip to be reset.
1418 */
1419static int cas_rxmac_reset(struct cas *cp)
1420{
1421	struct net_device *dev = cp->dev;
1422	int limit;
1423	u32 val;
1424
1425	/* First, reset MAC RX. */
1426	writel(cp->mac_rx_cfg & ~MAC_RX_CFG_EN, cp->regs + REG_MAC_RX_CFG);
1427	for (limit = 0; limit < STOP_TRIES; limit++) {
1428		if (!(readl(cp->regs + REG_MAC_RX_CFG) & MAC_RX_CFG_EN))
1429			break;
1430		udelay(10);
1431	}
1432	if (limit == STOP_TRIES) {
1433		netdev_err(dev, "RX MAC will not disable, resetting whole chip\n");
1434		return 1;
1435	}
1436
1437	/* Second, disable RX DMA. */
1438	writel(0, cp->regs + REG_RX_CFG);
1439	for (limit = 0; limit < STOP_TRIES; limit++) {
1440		if (!(readl(cp->regs + REG_RX_CFG) & RX_CFG_DMA_EN))
1441			break;
1442		udelay(10);
1443	}
1444	if (limit == STOP_TRIES) {
1445		netdev_err(dev, "RX DMA will not disable, resetting whole chip\n");
1446		return 1;
1447	}
1448
1449	mdelay(5);
1450
1451	/* Execute RX reset command. */
1452	writel(SW_RESET_RX, cp->regs + REG_SW_RESET);
1453	for (limit = 0; limit < STOP_TRIES; limit++) {
1454		if (!(readl(cp->regs + REG_SW_RESET) & SW_RESET_RX))
1455			break;
1456		udelay(10);
1457	}
1458	if (limit == STOP_TRIES) {
1459		netdev_err(dev, "RX reset command will not execute, resetting whole chip\n");
1460		return 1;
1461	}
1462
1463	/* reset driver rx state */
1464	cas_clean_rxds(cp);
1465	cas_clean_rxcs(cp);
1466
1467	/* Now, reprogram the rest of RX unit. */
1468	cas_init_rx_dma(cp);
1469
1470	/* re-enable */
1471	val = readl(cp->regs + REG_RX_CFG);
1472	writel(val | RX_CFG_DMA_EN, cp->regs + REG_RX_CFG);
1473	writel(MAC_RX_FRAME_RECV, cp->regs + REG_MAC_RX_MASK);
1474	val = readl(cp->regs + REG_MAC_RX_CFG);
1475	writel(val | MAC_RX_CFG_EN, cp->regs + REG_MAC_RX_CFG);
1476	return 0;
1477}
1478#endif
1479
1480static int cas_rxmac_interrupt(struct net_device *dev, struct cas *cp,
1481			       u32 status)
1482{
1483	u32 stat = readl(cp->regs + REG_MAC_RX_STATUS);
1484
1485	if (!stat)
1486		return 0;
1487
1488	netif_dbg(cp, intr, cp->dev, "rxmac interrupt, stat: 0x%x\n", stat);
1489
1490	/* these are all rollovers */
1491	spin_lock(&cp->stat_lock[0]);
1492	if (stat & MAC_RX_ALIGN_ERR)
1493		cp->net_stats[0].rx_frame_errors += 0x10000;
1494
1495	if (stat & MAC_RX_CRC_ERR)
1496		cp->net_stats[0].rx_crc_errors += 0x10000;
1497
1498	if (stat & MAC_RX_LEN_ERR)
1499		cp->net_stats[0].rx_length_errors += 0x10000;
1500
1501	if (stat & MAC_RX_OVERFLOW) {
1502		cp->net_stats[0].rx_over_errors++;
1503		cp->net_stats[0].rx_fifo_errors++;
1504	}
1505
1506	/* We do not track MAC_RX_FRAME_COUNT and MAC_RX_VIOL_ERR
1507	 * events.
1508	 */
1509	spin_unlock(&cp->stat_lock[0]);
1510	return 0;
1511}
1512
1513static int cas_mac_interrupt(struct net_device *dev, struct cas *cp,
1514			     u32 status)
1515{
1516	u32 stat = readl(cp->regs + REG_MAC_CTRL_STATUS);
1517
1518	if (!stat)
1519		return 0;
1520
1521	netif_printk(cp, intr, KERN_DEBUG, cp->dev,
1522		     "mac interrupt, stat: 0x%x\n", stat);
1523
1524	/* This interrupt is just for pause frame and pause
1525	 * tracking.  It is useful for diagnostics and debug
1526	 * but probably by default we will mask these events.
1527	 */
1528	if (stat & MAC_CTRL_PAUSE_STATE)
1529		cp->pause_entered++;
1530
1531	if (stat & MAC_CTRL_PAUSE_RECEIVED)
1532		cp->pause_last_time_recvd = (stat >> 16);
1533
1534	return 0;
1535}
1536
1537
1538/* Must be invoked under cp->lock. */
1539static inline int cas_mdio_link_not_up(struct cas *cp)
1540{
1541	u16 val;
1542
1543	switch (cp->lstate) {
1544	case link_force_ret:
1545		netif_info(cp, link, cp->dev, "Autoneg failed again, keeping forced mode\n");
1546		cas_phy_write(cp, MII_BMCR, cp->link_fcntl);
1547		cp->timer_ticks = 5;
1548		cp->lstate = link_force_ok;
1549		cp->link_transition = LINK_TRANSITION_LINK_CONFIG;
1550		break;
1551
1552	case link_aneg:
1553		val = cas_phy_read(cp, MII_BMCR);
1554
1555		/* Try forced modes. we try things in the following order:
1556		 * 1000 full -> 100 full/half -> 10 half
1557		 */
1558		val &= ~(BMCR_ANRESTART | BMCR_ANENABLE);
1559		val |= BMCR_FULLDPLX;
1560		val |= (cp->cas_flags & CAS_FLAG_1000MB_CAP) ?
1561			CAS_BMCR_SPEED1000 : BMCR_SPEED100;
1562		cas_phy_write(cp, MII_BMCR, val);
1563		cp->timer_ticks = 5;
1564		cp->lstate = link_force_try;
1565		cp->link_transition = LINK_TRANSITION_LINK_CONFIG;
1566		break;
1567
1568	case link_force_try:
1569		/* Downgrade from 1000 to 100 to 10 Mbps if necessary. */
1570		val = cas_phy_read(cp, MII_BMCR);
1571		cp->timer_ticks = 5;
1572		if (val & CAS_BMCR_SPEED1000) { /* gigabit */
1573			val &= ~CAS_BMCR_SPEED1000;
1574			val |= (BMCR_SPEED100 | BMCR_FULLDPLX);
1575			cas_phy_write(cp, MII_BMCR, val);
1576			break;
1577		}
1578
1579		if (val & BMCR_SPEED100) {
1580			if (val & BMCR_FULLDPLX) /* fd failed */
1581				val &= ~BMCR_FULLDPLX;
1582			else { /* 100Mbps failed */
1583				val &= ~BMCR_SPEED100;
1584			}
1585			cas_phy_write(cp, MII_BMCR, val);
1586			break;
1587		}
1588		break;
1589	default:
1590		break;
1591	}
1592	return 0;
1593}
1594
1595
1596/* must be invoked with cp->lock held */
1597static int cas_mii_link_check(struct cas *cp, const u16 bmsr)
1598{
1599	int restart;
1600
1601	if (bmsr & BMSR_LSTATUS) {
1602		/* Ok, here we got a link. If we had it due to a forced
1603		 * fallback, and we were configured for autoneg, we
1604		 * retry a short autoneg pass. If you know your hub is
1605		 * broken, use ethtool ;)
1606		 */
1607		if ((cp->lstate == link_force_try) &&
1608		    (cp->link_cntl & BMCR_ANENABLE)) {
1609			cp->lstate = link_force_ret;
1610			cp->link_transition = LINK_TRANSITION_LINK_CONFIG;
1611			cas_mif_poll(cp, 0);
1612			cp->link_fcntl = cas_phy_read(cp, MII_BMCR);
1613			cp->timer_ticks = 5;
1614			if (cp->opened)
1615				netif_info(cp, link, cp->dev,
1616					   "Got link after fallback, retrying autoneg once...\n");
1617			cas_phy_write(cp, MII_BMCR,
1618				      cp->link_fcntl | BMCR_ANENABLE |
1619				      BMCR_ANRESTART);
1620			cas_mif_poll(cp, 1);
1621
1622		} else if (cp->lstate != link_up) {
1623			cp->lstate = link_up;
1624			cp->link_transition = LINK_TRANSITION_LINK_UP;
1625
1626			if (cp->opened) {
1627				cas_set_link_modes(cp);
1628				netif_carrier_on(cp->dev);
1629			}
1630		}
1631		return 0;
1632	}
1633
1634	/* link not up. if the link was previously up, we restart the
1635	 * whole process
1636	 */
1637	restart = 0;
1638	if (cp->lstate == link_up) {
1639		cp->lstate = link_down;
1640		cp->link_transition = LINK_TRANSITION_LINK_DOWN;
1641
1642		netif_carrier_off(cp->dev);
1643		if (cp->opened)
1644			netif_info(cp, link, cp->dev, "Link down\n");
1645		restart = 1;
1646
1647	} else if (++cp->timer_ticks > 10)
1648		cas_mdio_link_not_up(cp);
1649
1650	return restart;
1651}
1652
1653static int cas_mif_interrupt(struct net_device *dev, struct cas *cp,
1654			     u32 status)
1655{
1656	u32 stat = readl(cp->regs + REG_MIF_STATUS);
1657	u16 bmsr;
1658
1659	/* check for a link change */
1660	if (CAS_VAL(MIF_STATUS_POLL_STATUS, stat) == 0)
1661		return 0;
1662
1663	bmsr = CAS_VAL(MIF_STATUS_POLL_DATA, stat);
1664	return cas_mii_link_check(cp, bmsr);
1665}
1666
1667static int cas_pci_interrupt(struct net_device *dev, struct cas *cp,
1668			     u32 status)
1669{
1670	u32 stat = readl(cp->regs + REG_PCI_ERR_STATUS);
1671
1672	if (!stat)
1673		return 0;
1674
1675	netdev_err(dev, "PCI error [%04x:%04x]",
1676		   stat, readl(cp->regs + REG_BIM_DIAG));
1677
1678	/* cassini+ has this reserved */
1679	if ((stat & PCI_ERR_BADACK) &&
1680	    ((cp->cas_flags & CAS_FLAG_REG_PLUS) == 0))
1681		pr_cont(" <No ACK64# during ABS64 cycle>");
1682
1683	if (stat & PCI_ERR_DTRTO)
1684		pr_cont(" <Delayed transaction timeout>");
1685	if (stat & PCI_ERR_OTHER)
1686		pr_cont(" <other>");
1687	if (stat & PCI_ERR_BIM_DMA_WRITE)
1688		pr_cont(" <BIM DMA 0 write req>");
1689	if (stat & PCI_ERR_BIM_DMA_READ)
1690		pr_cont(" <BIM DMA 0 read req>");
1691	pr_cont("\n");
1692
1693	if (stat & PCI_ERR_OTHER) {
1694		int pci_errs;
1695
1696		/* Interrogate PCI config space for the
1697		 * true cause.
1698		 */
1699		pci_errs = pci_status_get_and_clear_errors(cp->pdev);
1700
1701		netdev_err(dev, "PCI status errors[%04x]\n", pci_errs);
1702		if (pci_errs & PCI_STATUS_PARITY)
1703			netdev_err(dev, "PCI parity error detected\n");
1704		if (pci_errs & PCI_STATUS_SIG_TARGET_ABORT)
1705			netdev_err(dev, "PCI target abort\n");
1706		if (pci_errs & PCI_STATUS_REC_TARGET_ABORT)
1707			netdev_err(dev, "PCI master acks target abort\n");
1708		if (pci_errs & PCI_STATUS_REC_MASTER_ABORT)
1709			netdev_err(dev, "PCI master abort\n");
1710		if (pci_errs & PCI_STATUS_SIG_SYSTEM_ERROR)
1711			netdev_err(dev, "PCI system error SERR#\n");
1712		if (pci_errs & PCI_STATUS_DETECTED_PARITY)
1713			netdev_err(dev, "PCI parity error\n");
 
 
 
 
 
 
 
 
 
1714	}
1715
1716	/* For all PCI errors, we should reset the chip. */
1717	return 1;
1718}
1719
1720/* All non-normal interrupt conditions get serviced here.
1721 * Returns non-zero if we should just exit the interrupt
1722 * handler right now (ie. if we reset the card which invalidates
1723 * all of the other original irq status bits).
1724 */
1725static int cas_abnormal_irq(struct net_device *dev, struct cas *cp,
1726			    u32 status)
1727{
1728	if (status & INTR_RX_TAG_ERROR) {
1729		/* corrupt RX tag framing */
1730		netif_printk(cp, rx_err, KERN_DEBUG, cp->dev,
1731			     "corrupt rx tag framing\n");
1732		spin_lock(&cp->stat_lock[0]);
1733		cp->net_stats[0].rx_errors++;
1734		spin_unlock(&cp->stat_lock[0]);
1735		goto do_reset;
1736	}
1737
1738	if (status & INTR_RX_LEN_MISMATCH) {
1739		/* length mismatch. */
1740		netif_printk(cp, rx_err, KERN_DEBUG, cp->dev,
1741			     "length mismatch for rx frame\n");
1742		spin_lock(&cp->stat_lock[0]);
1743		cp->net_stats[0].rx_errors++;
1744		spin_unlock(&cp->stat_lock[0]);
1745		goto do_reset;
1746	}
1747
1748	if (status & INTR_PCS_STATUS) {
1749		if (cas_pcs_interrupt(dev, cp, status))
1750			goto do_reset;
1751	}
1752
1753	if (status & INTR_TX_MAC_STATUS) {
1754		if (cas_txmac_interrupt(dev, cp, status))
1755			goto do_reset;
1756	}
1757
1758	if (status & INTR_RX_MAC_STATUS) {
1759		if (cas_rxmac_interrupt(dev, cp, status))
1760			goto do_reset;
1761	}
1762
1763	if (status & INTR_MAC_CTRL_STATUS) {
1764		if (cas_mac_interrupt(dev, cp, status))
1765			goto do_reset;
1766	}
1767
1768	if (status & INTR_MIF_STATUS) {
1769		if (cas_mif_interrupt(dev, cp, status))
1770			goto do_reset;
1771	}
1772
1773	if (status & INTR_PCI_ERROR_STATUS) {
1774		if (cas_pci_interrupt(dev, cp, status))
1775			goto do_reset;
1776	}
1777	return 0;
1778
1779do_reset:
1780#if 1
1781	atomic_inc(&cp->reset_task_pending);
1782	atomic_inc(&cp->reset_task_pending_all);
1783	netdev_err(dev, "reset called in cas_abnormal_irq [0x%x]\n", status);
1784	schedule_work(&cp->reset_task);
1785#else
1786	atomic_set(&cp->reset_task_pending, CAS_RESET_ALL);
1787	netdev_err(dev, "reset called in cas_abnormal_irq\n");
1788	schedule_work(&cp->reset_task);
1789#endif
1790	return 1;
1791}
1792
1793/* NOTE: CAS_TABORT returns 1 or 2 so that it can be used when
1794 *       determining whether to do a netif_stop/wakeup
1795 */
1796#define CAS_TABORT(x)      (((x)->cas_flags & CAS_FLAG_TARGET_ABORT) ? 2 : 1)
1797#define CAS_ROUND_PAGE(x)  (((x) + PAGE_SIZE - 1) & PAGE_MASK)
1798static inline int cas_calc_tabort(struct cas *cp, const unsigned long addr,
1799				  const int len)
1800{
1801	unsigned long off = addr + len;
1802
1803	if (CAS_TABORT(cp) == 1)
1804		return 0;
1805	if ((CAS_ROUND_PAGE(off) - off) > TX_TARGET_ABORT_LEN)
1806		return 0;
1807	return TX_TARGET_ABORT_LEN;
1808}
1809
1810static inline void cas_tx_ringN(struct cas *cp, int ring, int limit)
1811{
1812	struct cas_tx_desc *txds;
1813	struct sk_buff **skbs;
1814	struct net_device *dev = cp->dev;
1815	int entry, count;
1816
1817	spin_lock(&cp->tx_lock[ring]);
1818	txds = cp->init_txds[ring];
1819	skbs = cp->tx_skbs[ring];
1820	entry = cp->tx_old[ring];
1821
1822	count = TX_BUFF_COUNT(ring, entry, limit);
1823	while (entry != limit) {
1824		struct sk_buff *skb = skbs[entry];
1825		dma_addr_t daddr;
1826		u32 dlen;
1827		int frag;
1828
1829		if (!skb) {
1830			/* this should never occur */
1831			entry = TX_DESC_NEXT(ring, entry);
1832			continue;
1833		}
1834
1835		/* however, we might get only a partial skb release. */
1836		count -= skb_shinfo(skb)->nr_frags +
1837			+ cp->tx_tiny_use[ring][entry].nbufs + 1;
1838		if (count < 0)
1839			break;
1840
1841		netif_printk(cp, tx_done, KERN_DEBUG, cp->dev,
1842			     "tx[%d] done, slot %d\n", ring, entry);
1843
1844		skbs[entry] = NULL;
1845		cp->tx_tiny_use[ring][entry].nbufs = 0;
1846
1847		for (frag = 0; frag <= skb_shinfo(skb)->nr_frags; frag++) {
1848			struct cas_tx_desc *txd = txds + entry;
1849
1850			daddr = le64_to_cpu(txd->buffer);
1851			dlen = CAS_VAL(TX_DESC_BUFLEN,
1852				       le64_to_cpu(txd->control));
1853			dma_unmap_page(&cp->pdev->dev, daddr, dlen,
1854				       DMA_TO_DEVICE);
1855			entry = TX_DESC_NEXT(ring, entry);
1856
1857			/* tiny buffer may follow */
1858			if (cp->tx_tiny_use[ring][entry].used) {
1859				cp->tx_tiny_use[ring][entry].used = 0;
1860				entry = TX_DESC_NEXT(ring, entry);
1861			}
1862		}
1863
1864		spin_lock(&cp->stat_lock[ring]);
1865		cp->net_stats[ring].tx_packets++;
1866		cp->net_stats[ring].tx_bytes += skb->len;
1867		spin_unlock(&cp->stat_lock[ring]);
1868		dev_consume_skb_irq(skb);
1869	}
1870	cp->tx_old[ring] = entry;
1871
1872	/* this is wrong for multiple tx rings. the net device needs
1873	 * multiple queues for this to do the right thing.  we wait
1874	 * for 2*packets to be available when using tiny buffers
1875	 */
1876	if (netif_queue_stopped(dev) &&
1877	    (TX_BUFFS_AVAIL(cp, ring) > CAS_TABORT(cp)*(MAX_SKB_FRAGS + 1)))
1878		netif_wake_queue(dev);
1879	spin_unlock(&cp->tx_lock[ring]);
1880}
1881
1882static void cas_tx(struct net_device *dev, struct cas *cp,
1883		   u32 status)
1884{
1885        int limit, ring;
1886#ifdef USE_TX_COMPWB
1887	u64 compwb = le64_to_cpu(cp->init_block->tx_compwb);
1888#endif
1889	netif_printk(cp, intr, KERN_DEBUG, cp->dev,
1890		     "tx interrupt, status: 0x%x, %llx\n",
1891		     status, (unsigned long long)compwb);
1892	/* process all the rings */
1893	for (ring = 0; ring < N_TX_RINGS; ring++) {
1894#ifdef USE_TX_COMPWB
1895		/* use the completion writeback registers */
1896		limit = (CAS_VAL(TX_COMPWB_MSB, compwb) << 8) |
1897			CAS_VAL(TX_COMPWB_LSB, compwb);
1898		compwb = TX_COMPWB_NEXT(compwb);
1899#else
1900		limit = readl(cp->regs + REG_TX_COMPN(ring));
1901#endif
1902		if (cp->tx_old[ring] != limit)
1903			cas_tx_ringN(cp, ring, limit);
1904	}
1905}
1906
1907
1908static int cas_rx_process_pkt(struct cas *cp, struct cas_rx_comp *rxc,
1909			      int entry, const u64 *words,
1910			      struct sk_buff **skbref)
1911{
1912	int dlen, hlen, len, i, alloclen;
1913	int off, swivel = RX_SWIVEL_OFF_VAL;
1914	struct cas_page *page;
1915	struct sk_buff *skb;
1916	void *crcaddr;
1917	__sum16 csum;
1918	char *p;
1919
1920	hlen = CAS_VAL(RX_COMP2_HDR_SIZE, words[1]);
1921	dlen = CAS_VAL(RX_COMP1_DATA_SIZE, words[0]);
1922	len  = hlen + dlen;
1923
1924	if (RX_COPY_ALWAYS || (words[2] & RX_COMP3_SMALL_PKT))
1925		alloclen = len;
1926	else
1927		alloclen = max(hlen, RX_COPY_MIN);
1928
1929	skb = netdev_alloc_skb(cp->dev, alloclen + swivel + cp->crc_size);
1930	if (skb == NULL)
1931		return -1;
1932
1933	*skbref = skb;
1934	skb_reserve(skb, swivel);
1935
1936	p = skb->data;
1937	crcaddr = NULL;
1938	if (hlen) { /* always copy header pages */
1939		i = CAS_VAL(RX_COMP2_HDR_INDEX, words[1]);
1940		page = cp->rx_pages[CAS_VAL(RX_INDEX_RING, i)][CAS_VAL(RX_INDEX_NUM, i)];
1941		off = CAS_VAL(RX_COMP2_HDR_OFF, words[1]) * 0x100 +
1942			swivel;
1943
1944		i = hlen;
1945		if (!dlen) /* attach FCS */
1946			i += cp->crc_size;
1947		dma_sync_single_for_cpu(&cp->pdev->dev, page->dma_addr + off,
1948					i, DMA_FROM_DEVICE);
1949		memcpy(p, page_address(page->buffer) + off, i);
1950		dma_sync_single_for_device(&cp->pdev->dev,
1951					   page->dma_addr + off, i,
1952					   DMA_FROM_DEVICE);
 
1953		RX_USED_ADD(page, 0x100);
1954		p += hlen;
1955		swivel = 0;
1956	}
1957
1958
1959	if (alloclen < (hlen + dlen)) {
1960		skb_frag_t *frag = skb_shinfo(skb)->frags;
1961
1962		/* normal or jumbo packets. we use frags */
1963		i = CAS_VAL(RX_COMP1_DATA_INDEX, words[0]);
1964		page = cp->rx_pages[CAS_VAL(RX_INDEX_RING, i)][CAS_VAL(RX_INDEX_NUM, i)];
1965		off = CAS_VAL(RX_COMP1_DATA_OFF, words[0]) + swivel;
1966
1967		hlen = min(cp->page_size - off, dlen);
1968		if (hlen < 0) {
1969			netif_printk(cp, rx_err, KERN_DEBUG, cp->dev,
1970				     "rx page overflow: %d\n", hlen);
1971			dev_kfree_skb_irq(skb);
1972			return -1;
1973		}
1974		i = hlen;
1975		if (i == dlen)  /* attach FCS */
1976			i += cp->crc_size;
1977		dma_sync_single_for_cpu(&cp->pdev->dev, page->dma_addr + off,
1978					i, DMA_FROM_DEVICE);
1979
1980		/* make sure we always copy a header */
1981		swivel = 0;
1982		if (p == (char *) skb->data) { /* not split */
1983			memcpy(p, page_address(page->buffer) + off,
1984			       RX_COPY_MIN);
1985			dma_sync_single_for_device(&cp->pdev->dev,
1986						   page->dma_addr + off, i,
1987						   DMA_FROM_DEVICE);
1988			off += RX_COPY_MIN;
1989			swivel = RX_COPY_MIN;
1990			RX_USED_ADD(page, cp->mtu_stride);
1991		} else {
1992			RX_USED_ADD(page, hlen);
1993		}
1994		skb_put(skb, alloclen);
1995
1996		skb_shinfo(skb)->nr_frags++;
1997		skb->data_len += hlen - swivel;
1998		skb->truesize += hlen - swivel;
1999		skb->len      += hlen - swivel;
2000
2001		skb_frag_fill_page_desc(frag, page->buffer, off, hlen - swivel);
2002		__skb_frag_ref(frag);
 
 
2003
2004		/* any more data? */
2005		if ((words[0] & RX_COMP1_SPLIT_PKT) && ((dlen -= hlen) > 0)) {
2006			hlen = dlen;
2007			off = 0;
2008
2009			i = CAS_VAL(RX_COMP2_NEXT_INDEX, words[1]);
2010			page = cp->rx_pages[CAS_VAL(RX_INDEX_RING, i)][CAS_VAL(RX_INDEX_NUM, i)];
2011			dma_sync_single_for_cpu(&cp->pdev->dev,
2012						page->dma_addr,
2013						hlen + cp->crc_size,
2014						DMA_FROM_DEVICE);
2015			dma_sync_single_for_device(&cp->pdev->dev,
2016						   page->dma_addr,
2017						   hlen + cp->crc_size,
2018						   DMA_FROM_DEVICE);
2019
2020			skb_shinfo(skb)->nr_frags++;
2021			skb->data_len += hlen;
2022			skb->len      += hlen;
2023			frag++;
2024
2025			skb_frag_fill_page_desc(frag, page->buffer, 0, hlen);
2026			__skb_frag_ref(frag);
 
 
2027			RX_USED_ADD(page, hlen + cp->crc_size);
2028		}
2029
2030		if (cp->crc_size)
2031			crcaddr = page_address(page->buffer) + off + hlen;
 
 
2032
2033	} else {
2034		/* copying packet */
2035		if (!dlen)
2036			goto end_copy_pkt;
2037
2038		i = CAS_VAL(RX_COMP1_DATA_INDEX, words[0]);
2039		page = cp->rx_pages[CAS_VAL(RX_INDEX_RING, i)][CAS_VAL(RX_INDEX_NUM, i)];
2040		off = CAS_VAL(RX_COMP1_DATA_OFF, words[0]) + swivel;
2041		hlen = min(cp->page_size - off, dlen);
2042		if (hlen < 0) {
2043			netif_printk(cp, rx_err, KERN_DEBUG, cp->dev,
2044				     "rx page overflow: %d\n", hlen);
2045			dev_kfree_skb_irq(skb);
2046			return -1;
2047		}
2048		i = hlen;
2049		if (i == dlen) /* attach FCS */
2050			i += cp->crc_size;
2051		dma_sync_single_for_cpu(&cp->pdev->dev, page->dma_addr + off,
2052					i, DMA_FROM_DEVICE);
2053		memcpy(p, page_address(page->buffer) + off, i);
2054		dma_sync_single_for_device(&cp->pdev->dev,
2055					   page->dma_addr + off, i,
2056					   DMA_FROM_DEVICE);
 
2057		if (p == (char *) skb->data) /* not split */
2058			RX_USED_ADD(page, cp->mtu_stride);
2059		else
2060			RX_USED_ADD(page, i);
2061
2062		/* any more data? */
2063		if ((words[0] & RX_COMP1_SPLIT_PKT) && ((dlen -= hlen) > 0)) {
2064			p += hlen;
2065			i = CAS_VAL(RX_COMP2_NEXT_INDEX, words[1]);
2066			page = cp->rx_pages[CAS_VAL(RX_INDEX_RING, i)][CAS_VAL(RX_INDEX_NUM, i)];
2067			dma_sync_single_for_cpu(&cp->pdev->dev,
2068						page->dma_addr,
2069						dlen + cp->crc_size,
2070						DMA_FROM_DEVICE);
2071			memcpy(p, page_address(page->buffer), dlen + cp->crc_size);
2072			dma_sync_single_for_device(&cp->pdev->dev,
2073						   page->dma_addr,
2074						   dlen + cp->crc_size,
2075						   DMA_FROM_DEVICE);
2076			RX_USED_ADD(page, dlen + cp->crc_size);
2077		}
2078end_copy_pkt:
2079		if (cp->crc_size)
 
2080			crcaddr = skb->data + alloclen;
2081
2082		skb_put(skb, alloclen);
2083	}
2084
2085	csum = (__force __sum16)htons(CAS_VAL(RX_COMP4_TCP_CSUM, words[3]));
2086	if (cp->crc_size) {
2087		/* checksum includes FCS. strip it out. */
2088		csum = csum_fold(csum_partial(crcaddr, cp->crc_size,
2089					      csum_unfold(csum)));
 
 
2090	}
2091	skb->protocol = eth_type_trans(skb, cp->dev);
2092	if (skb->protocol == htons(ETH_P_IP)) {
2093		skb->csum = csum_unfold(~csum);
2094		skb->ip_summed = CHECKSUM_COMPLETE;
2095	} else
2096		skb_checksum_none_assert(skb);
2097	return len;
2098}
2099
2100
2101/* we can handle up to 64 rx flows at a time. we do the same thing
2102 * as nonreassm except that we batch up the buffers.
2103 * NOTE: we currently just treat each flow as a bunch of packets that
2104 *       we pass up. a better way would be to coalesce the packets
2105 *       into a jumbo packet. to do that, we need to do the following:
2106 *       1) the first packet will have a clean split between header and
2107 *          data. save both.
2108 *       2) each time the next flow packet comes in, extend the
2109 *          data length and merge the checksums.
2110 *       3) on flow release, fix up the header.
2111 *       4) make sure the higher layer doesn't care.
2112 * because packets get coalesced, we shouldn't run into fragment count
2113 * issues.
2114 */
2115static inline void cas_rx_flow_pkt(struct cas *cp, const u64 *words,
2116				   struct sk_buff *skb)
2117{
2118	int flowid = CAS_VAL(RX_COMP3_FLOWID, words[2]) & (N_RX_FLOWS - 1);
2119	struct sk_buff_head *flow = &cp->rx_flows[flowid];
2120
2121	/* this is protected at a higher layer, so no need to
2122	 * do any additional locking here. stick the buffer
2123	 * at the end.
2124	 */
2125	__skb_queue_tail(flow, skb);
2126	if (words[0] & RX_COMP1_RELEASE_FLOW) {
2127		while ((skb = __skb_dequeue(flow))) {
2128			cas_skb_release(skb);
2129		}
2130	}
2131}
2132
2133/* put rx descriptor back on ring. if a buffer is in use by a higher
2134 * layer, this will need to put in a replacement.
2135 */
2136static void cas_post_page(struct cas *cp, const int ring, const int index)
2137{
2138	cas_page_t *new;
2139	int entry;
2140
2141	entry = cp->rx_old[ring];
2142
2143	new = cas_page_swap(cp, ring, index);
2144	cp->init_rxds[ring][entry].buffer = cpu_to_le64(new->dma_addr);
2145	cp->init_rxds[ring][entry].index  =
2146		cpu_to_le64(CAS_BASE(RX_INDEX_NUM, index) |
2147			    CAS_BASE(RX_INDEX_RING, ring));
2148
2149	entry = RX_DESC_ENTRY(ring, entry + 1);
2150	cp->rx_old[ring] = entry;
2151
2152	if (entry % 4)
2153		return;
2154
2155	if (ring == 0)
2156		writel(entry, cp->regs + REG_RX_KICK);
2157	else if ((N_RX_DESC_RINGS > 1) &&
2158		 (cp->cas_flags & CAS_FLAG_REG_PLUS))
2159		writel(entry, cp->regs + REG_PLUS_RX_KICK1);
2160}
2161
2162
2163/* only when things are bad */
2164static int cas_post_rxds_ringN(struct cas *cp, int ring, int num)
2165{
2166	unsigned int entry, last, count, released;
2167	int cluster;
2168	cas_page_t **page = cp->rx_pages[ring];
2169
2170	entry = cp->rx_old[ring];
2171
2172	netif_printk(cp, intr, KERN_DEBUG, cp->dev,
2173		     "rxd[%d] interrupt, done: %d\n", ring, entry);
2174
2175	cluster = -1;
2176	count = entry & 0x3;
2177	last = RX_DESC_ENTRY(ring, num ? entry + num - 4: entry - 4);
2178	released = 0;
2179	while (entry != last) {
2180		/* make a new buffer if it's still in use */
2181		if (page_count(page[entry]->buffer) > 1) {
2182			cas_page_t *new = cas_page_dequeue(cp);
2183			if (!new) {
2184				/* let the timer know that we need to
2185				 * do this again
2186				 */
2187				cp->cas_flags |= CAS_FLAG_RXD_POST(ring);
2188				if (!timer_pending(&cp->link_timer))
2189					mod_timer(&cp->link_timer, jiffies +
2190						  CAS_LINK_FAST_TIMEOUT);
2191				cp->rx_old[ring]  = entry;
2192				cp->rx_last[ring] = num ? num - released : 0;
2193				return -ENOMEM;
2194			}
2195			spin_lock(&cp->rx_inuse_lock);
2196			list_add(&page[entry]->list, &cp->rx_inuse_list);
2197			spin_unlock(&cp->rx_inuse_lock);
2198			cp->init_rxds[ring][entry].buffer =
2199				cpu_to_le64(new->dma_addr);
2200			page[entry] = new;
2201
2202		}
2203
2204		if (++count == 4) {
2205			cluster = entry;
2206			count = 0;
2207		}
2208		released++;
2209		entry = RX_DESC_ENTRY(ring, entry + 1);
2210	}
2211	cp->rx_old[ring] = entry;
2212
2213	if (cluster < 0)
2214		return 0;
2215
2216	if (ring == 0)
2217		writel(cluster, cp->regs + REG_RX_KICK);
2218	else if ((N_RX_DESC_RINGS > 1) &&
2219		 (cp->cas_flags & CAS_FLAG_REG_PLUS))
2220		writel(cluster, cp->regs + REG_PLUS_RX_KICK1);
2221	return 0;
2222}
2223
2224
2225/* process a completion ring. packets are set up in three basic ways:
2226 * small packets: should be copied header + data in single buffer.
2227 * large packets: header and data in a single buffer.
2228 * split packets: header in a separate buffer from data.
2229 *                data may be in multiple pages. data may be > 256
2230 *                bytes but in a single page.
2231 *
2232 * NOTE: RX page posting is done in this routine as well. while there's
2233 *       the capability of using multiple RX completion rings, it isn't
2234 *       really worthwhile due to the fact that the page posting will
2235 *       force serialization on the single descriptor ring.
2236 */
2237static int cas_rx_ringN(struct cas *cp, int ring, int budget)
2238{
2239	struct cas_rx_comp *rxcs = cp->init_rxcs[ring];
2240	int entry, drops;
2241	int npackets = 0;
2242
2243	netif_printk(cp, intr, KERN_DEBUG, cp->dev,
2244		     "rx[%d] interrupt, done: %d/%d\n",
2245		     ring,
2246		     readl(cp->regs + REG_RX_COMP_HEAD), cp->rx_new[ring]);
2247
2248	entry = cp->rx_new[ring];
2249	drops = 0;
2250	while (1) {
2251		struct cas_rx_comp *rxc = rxcs + entry;
2252		struct sk_buff *skb;
2253		int type, len;
2254		u64 words[4];
2255		int i, dring;
2256
2257		words[0] = le64_to_cpu(rxc->word1);
2258		words[1] = le64_to_cpu(rxc->word2);
2259		words[2] = le64_to_cpu(rxc->word3);
2260		words[3] = le64_to_cpu(rxc->word4);
2261
2262		/* don't touch if still owned by hw */
2263		type = CAS_VAL(RX_COMP1_TYPE, words[0]);
2264		if (type == 0)
2265			break;
2266
2267		/* hw hasn't cleared the zero bit yet */
2268		if (words[3] & RX_COMP4_ZERO) {
2269			break;
2270		}
2271
2272		/* get info on the packet */
2273		if (words[3] & (RX_COMP4_LEN_MISMATCH | RX_COMP4_BAD)) {
2274			spin_lock(&cp->stat_lock[ring]);
2275			cp->net_stats[ring].rx_errors++;
2276			if (words[3] & RX_COMP4_LEN_MISMATCH)
2277				cp->net_stats[ring].rx_length_errors++;
2278			if (words[3] & RX_COMP4_BAD)
2279				cp->net_stats[ring].rx_crc_errors++;
2280			spin_unlock(&cp->stat_lock[ring]);
2281
2282			/* We'll just return it to Cassini. */
2283		drop_it:
2284			spin_lock(&cp->stat_lock[ring]);
2285			++cp->net_stats[ring].rx_dropped;
2286			spin_unlock(&cp->stat_lock[ring]);
2287			goto next;
2288		}
2289
2290		len = cas_rx_process_pkt(cp, rxc, entry, words, &skb);
2291		if (len < 0) {
2292			++drops;
2293			goto drop_it;
2294		}
2295
2296		/* see if it's a flow re-assembly or not. the driver
2297		 * itself handles release back up.
2298		 */
2299		if (RX_DONT_BATCH || (type == 0x2)) {
2300			/* non-reassm: these always get released */
2301			cas_skb_release(skb);
2302		} else {
2303			cas_rx_flow_pkt(cp, words, skb);
2304		}
2305
2306		spin_lock(&cp->stat_lock[ring]);
2307		cp->net_stats[ring].rx_packets++;
2308		cp->net_stats[ring].rx_bytes += len;
2309		spin_unlock(&cp->stat_lock[ring]);
2310
2311	next:
2312		npackets++;
2313
2314		/* should it be released? */
2315		if (words[0] & RX_COMP1_RELEASE_HDR) {
2316			i = CAS_VAL(RX_COMP2_HDR_INDEX, words[1]);
2317			dring = CAS_VAL(RX_INDEX_RING, i);
2318			i = CAS_VAL(RX_INDEX_NUM, i);
2319			cas_post_page(cp, dring, i);
2320		}
2321
2322		if (words[0] & RX_COMP1_RELEASE_DATA) {
2323			i = CAS_VAL(RX_COMP1_DATA_INDEX, words[0]);
2324			dring = CAS_VAL(RX_INDEX_RING, i);
2325			i = CAS_VAL(RX_INDEX_NUM, i);
2326			cas_post_page(cp, dring, i);
2327		}
2328
2329		if (words[0] & RX_COMP1_RELEASE_NEXT) {
2330			i = CAS_VAL(RX_COMP2_NEXT_INDEX, words[1]);
2331			dring = CAS_VAL(RX_INDEX_RING, i);
2332			i = CAS_VAL(RX_INDEX_NUM, i);
2333			cas_post_page(cp, dring, i);
2334		}
2335
2336		/* skip to the next entry */
2337		entry = RX_COMP_ENTRY(ring, entry + 1 +
2338				      CAS_VAL(RX_COMP1_SKIP, words[0]));
2339#ifdef USE_NAPI
2340		if (budget && (npackets >= budget))
2341			break;
2342#endif
2343	}
2344	cp->rx_new[ring] = entry;
2345
2346	if (drops)
2347		netdev_info(cp->dev, "Memory squeeze, deferring packet\n");
2348	return npackets;
2349}
2350
2351
2352/* put completion entries back on the ring */
2353static void cas_post_rxcs_ringN(struct net_device *dev,
2354				struct cas *cp, int ring)
2355{
2356	struct cas_rx_comp *rxc = cp->init_rxcs[ring];
2357	int last, entry;
2358
2359	last = cp->rx_cur[ring];
2360	entry = cp->rx_new[ring];
2361	netif_printk(cp, intr, KERN_DEBUG, dev,
2362		     "rxc[%d] interrupt, done: %d/%d\n",
2363		     ring, readl(cp->regs + REG_RX_COMP_HEAD), entry);
2364
2365	/* zero and re-mark descriptors */
2366	while (last != entry) {
2367		cas_rxc_init(rxc + last);
2368		last = RX_COMP_ENTRY(ring, last + 1);
2369	}
2370	cp->rx_cur[ring] = last;
2371
2372	if (ring == 0)
2373		writel(last, cp->regs + REG_RX_COMP_TAIL);
2374	else if (cp->cas_flags & CAS_FLAG_REG_PLUS)
2375		writel(last, cp->regs + REG_PLUS_RX_COMPN_TAIL(ring));
2376}
2377
2378
2379
2380/* cassini can use all four PCI interrupts for the completion ring.
2381 * rings 3 and 4 are identical
2382 */
2383#if defined(USE_PCI_INTC) || defined(USE_PCI_INTD)
2384static inline void cas_handle_irqN(struct net_device *dev,
2385				   struct cas *cp, const u32 status,
2386				   const int ring)
2387{
2388	if (status & (INTR_RX_COMP_FULL_ALT | INTR_RX_COMP_AF_ALT))
2389		cas_post_rxcs_ringN(dev, cp, ring);
2390}
2391
2392static irqreturn_t cas_interruptN(int irq, void *dev_id)
2393{
2394	struct net_device *dev = dev_id;
2395	struct cas *cp = netdev_priv(dev);
2396	unsigned long flags;
2397	int ring = (irq == cp->pci_irq_INTC) ? 2 : 3;
2398	u32 status = readl(cp->regs + REG_PLUS_INTRN_STATUS(ring));
2399
2400	/* check for shared irq */
2401	if (status == 0)
2402		return IRQ_NONE;
2403
2404	spin_lock_irqsave(&cp->lock, flags);
2405	if (status & INTR_RX_DONE_ALT) { /* handle rx separately */
2406#ifdef USE_NAPI
2407		cas_mask_intr(cp);
2408		napi_schedule(&cp->napi);
2409#else
2410		cas_rx_ringN(cp, ring, 0);
2411#endif
2412		status &= ~INTR_RX_DONE_ALT;
2413	}
2414
2415	if (status)
2416		cas_handle_irqN(dev, cp, status, ring);
2417	spin_unlock_irqrestore(&cp->lock, flags);
2418	return IRQ_HANDLED;
2419}
2420#endif
2421
2422#ifdef USE_PCI_INTB
2423/* everything but rx packets */
2424static inline void cas_handle_irq1(struct cas *cp, const u32 status)
2425{
2426	if (status & INTR_RX_BUF_UNAVAIL_1) {
2427		/* Frame arrived, no free RX buffers available.
2428		 * NOTE: we can get this on a link transition. */
2429		cas_post_rxds_ringN(cp, 1, 0);
2430		spin_lock(&cp->stat_lock[1]);
2431		cp->net_stats[1].rx_dropped++;
2432		spin_unlock(&cp->stat_lock[1]);
2433	}
2434
2435	if (status & INTR_RX_BUF_AE_1)
2436		cas_post_rxds_ringN(cp, 1, RX_DESC_RINGN_SIZE(1) -
2437				    RX_AE_FREEN_VAL(1));
2438
2439	if (status & (INTR_RX_COMP_AF | INTR_RX_COMP_FULL))
2440		cas_post_rxcs_ringN(cp, 1);
2441}
2442
2443/* ring 2 handles a few more events than 3 and 4 */
2444static irqreturn_t cas_interrupt1(int irq, void *dev_id)
2445{
2446	struct net_device *dev = dev_id;
2447	struct cas *cp = netdev_priv(dev);
2448	unsigned long flags;
2449	u32 status = readl(cp->regs + REG_PLUS_INTRN_STATUS(1));
2450
2451	/* check for shared interrupt */
2452	if (status == 0)
2453		return IRQ_NONE;
2454
2455	spin_lock_irqsave(&cp->lock, flags);
2456	if (status & INTR_RX_DONE_ALT) { /* handle rx separately */
2457#ifdef USE_NAPI
2458		cas_mask_intr(cp);
2459		napi_schedule(&cp->napi);
2460#else
2461		cas_rx_ringN(cp, 1, 0);
2462#endif
2463		status &= ~INTR_RX_DONE_ALT;
2464	}
2465	if (status)
2466		cas_handle_irq1(cp, status);
2467	spin_unlock_irqrestore(&cp->lock, flags);
2468	return IRQ_HANDLED;
2469}
2470#endif
2471
2472static inline void cas_handle_irq(struct net_device *dev,
2473				  struct cas *cp, const u32 status)
2474{
2475	/* housekeeping interrupts */
2476	if (status & INTR_ERROR_MASK)
2477		cas_abnormal_irq(dev, cp, status);
2478
2479	if (status & INTR_RX_BUF_UNAVAIL) {
2480		/* Frame arrived, no free RX buffers available.
2481		 * NOTE: we can get this on a link transition.
2482		 */
2483		cas_post_rxds_ringN(cp, 0, 0);
2484		spin_lock(&cp->stat_lock[0]);
2485		cp->net_stats[0].rx_dropped++;
2486		spin_unlock(&cp->stat_lock[0]);
2487	} else if (status & INTR_RX_BUF_AE) {
2488		cas_post_rxds_ringN(cp, 0, RX_DESC_RINGN_SIZE(0) -
2489				    RX_AE_FREEN_VAL(0));
2490	}
2491
2492	if (status & (INTR_RX_COMP_AF | INTR_RX_COMP_FULL))
2493		cas_post_rxcs_ringN(dev, cp, 0);
2494}
2495
2496static irqreturn_t cas_interrupt(int irq, void *dev_id)
2497{
2498	struct net_device *dev = dev_id;
2499	struct cas *cp = netdev_priv(dev);
2500	unsigned long flags;
2501	u32 status = readl(cp->regs + REG_INTR_STATUS);
2502
2503	if (status == 0)
2504		return IRQ_NONE;
2505
2506	spin_lock_irqsave(&cp->lock, flags);
2507	if (status & (INTR_TX_ALL | INTR_TX_INTME)) {
2508		cas_tx(dev, cp, status);
2509		status &= ~(INTR_TX_ALL | INTR_TX_INTME);
2510	}
2511
2512	if (status & INTR_RX_DONE) {
2513#ifdef USE_NAPI
2514		cas_mask_intr(cp);
2515		napi_schedule(&cp->napi);
2516#else
2517		cas_rx_ringN(cp, 0, 0);
2518#endif
2519		status &= ~INTR_RX_DONE;
2520	}
2521
2522	if (status)
2523		cas_handle_irq(dev, cp, status);
2524	spin_unlock_irqrestore(&cp->lock, flags);
2525	return IRQ_HANDLED;
2526}
2527
2528
2529#ifdef USE_NAPI
2530static int cas_poll(struct napi_struct *napi, int budget)
2531{
2532	struct cas *cp = container_of(napi, struct cas, napi);
2533	struct net_device *dev = cp->dev;
2534	int i, enable_intr, credits;
2535	u32 status = readl(cp->regs + REG_INTR_STATUS);
2536	unsigned long flags;
2537
2538	spin_lock_irqsave(&cp->lock, flags);
2539	cas_tx(dev, cp, status);
2540	spin_unlock_irqrestore(&cp->lock, flags);
2541
2542	/* NAPI rx packets. we spread the credits across all of the
2543	 * rxc rings
2544	 *
2545	 * to make sure we're fair with the work we loop through each
2546	 * ring N_RX_COMP_RING times with a request of
2547	 * budget / N_RX_COMP_RINGS
2548	 */
2549	enable_intr = 1;
2550	credits = 0;
2551	for (i = 0; i < N_RX_COMP_RINGS; i++) {
2552		int j;
2553		for (j = 0; j < N_RX_COMP_RINGS; j++) {
2554			credits += cas_rx_ringN(cp, j, budget / N_RX_COMP_RINGS);
2555			if (credits >= budget) {
2556				enable_intr = 0;
2557				goto rx_comp;
2558			}
2559		}
2560	}
2561
2562rx_comp:
2563	/* final rx completion */
2564	spin_lock_irqsave(&cp->lock, flags);
2565	if (status)
2566		cas_handle_irq(dev, cp, status);
2567
2568#ifdef USE_PCI_INTB
2569	if (N_RX_COMP_RINGS > 1) {
2570		status = readl(cp->regs + REG_PLUS_INTRN_STATUS(1));
2571		if (status)
2572			cas_handle_irq1(dev, cp, status);
2573	}
2574#endif
2575
2576#ifdef USE_PCI_INTC
2577	if (N_RX_COMP_RINGS > 2) {
2578		status = readl(cp->regs + REG_PLUS_INTRN_STATUS(2));
2579		if (status)
2580			cas_handle_irqN(dev, cp, status, 2);
2581	}
2582#endif
2583
2584#ifdef USE_PCI_INTD
2585	if (N_RX_COMP_RINGS > 3) {
2586		status = readl(cp->regs + REG_PLUS_INTRN_STATUS(3));
2587		if (status)
2588			cas_handle_irqN(dev, cp, status, 3);
2589	}
2590#endif
2591	spin_unlock_irqrestore(&cp->lock, flags);
2592	if (enable_intr) {
2593		napi_complete(napi);
2594		cas_unmask_intr(cp);
2595	}
2596	return credits;
2597}
2598#endif
2599
2600#ifdef CONFIG_NET_POLL_CONTROLLER
2601static void cas_netpoll(struct net_device *dev)
2602{
2603	struct cas *cp = netdev_priv(dev);
2604
2605	cas_disable_irq(cp, 0);
2606	cas_interrupt(cp->pdev->irq, dev);
2607	cas_enable_irq(cp, 0);
2608
2609#ifdef USE_PCI_INTB
2610	if (N_RX_COMP_RINGS > 1) {
2611		/* cas_interrupt1(); */
2612	}
2613#endif
2614#ifdef USE_PCI_INTC
2615	if (N_RX_COMP_RINGS > 2) {
2616		/* cas_interruptN(); */
2617	}
2618#endif
2619#ifdef USE_PCI_INTD
2620	if (N_RX_COMP_RINGS > 3) {
2621		/* cas_interruptN(); */
2622	}
2623#endif
2624}
2625#endif
2626
2627static void cas_tx_timeout(struct net_device *dev, unsigned int txqueue)
2628{
2629	struct cas *cp = netdev_priv(dev);
2630
2631	netdev_err(dev, "transmit timed out, resetting\n");
2632	if (!cp->hw_running) {
2633		netdev_err(dev, "hrm.. hw not running!\n");
2634		return;
2635	}
2636
2637	netdev_err(dev, "MIF_STATE[%08x]\n",
2638		   readl(cp->regs + REG_MIF_STATE_MACHINE));
2639
2640	netdev_err(dev, "MAC_STATE[%08x]\n",
2641		   readl(cp->regs + REG_MAC_STATE_MACHINE));
2642
2643	netdev_err(dev, "TX_STATE[%08x:%08x:%08x] FIFO[%08x:%08x:%08x] SM1[%08x] SM2[%08x]\n",
2644		   readl(cp->regs + REG_TX_CFG),
2645		   readl(cp->regs + REG_MAC_TX_STATUS),
2646		   readl(cp->regs + REG_MAC_TX_CFG),
2647		   readl(cp->regs + REG_TX_FIFO_PKT_CNT),
2648		   readl(cp->regs + REG_TX_FIFO_WRITE_PTR),
2649		   readl(cp->regs + REG_TX_FIFO_READ_PTR),
2650		   readl(cp->regs + REG_TX_SM_1),
2651		   readl(cp->regs + REG_TX_SM_2));
2652
2653	netdev_err(dev, "RX_STATE[%08x:%08x:%08x]\n",
2654		   readl(cp->regs + REG_RX_CFG),
2655		   readl(cp->regs + REG_MAC_RX_STATUS),
2656		   readl(cp->regs + REG_MAC_RX_CFG));
2657
2658	netdev_err(dev, "HP_STATE[%08x:%08x:%08x:%08x]\n",
2659		   readl(cp->regs + REG_HP_STATE_MACHINE),
2660		   readl(cp->regs + REG_HP_STATUS0),
2661		   readl(cp->regs + REG_HP_STATUS1),
2662		   readl(cp->regs + REG_HP_STATUS2));
2663
2664#if 1
2665	atomic_inc(&cp->reset_task_pending);
2666	atomic_inc(&cp->reset_task_pending_all);
2667	schedule_work(&cp->reset_task);
2668#else
2669	atomic_set(&cp->reset_task_pending, CAS_RESET_ALL);
2670	schedule_work(&cp->reset_task);
2671#endif
2672}
2673
2674static inline int cas_intme(int ring, int entry)
2675{
2676	/* Algorithm: IRQ every 1/2 of descriptors. */
2677	if (!(entry & ((TX_DESC_RINGN_SIZE(ring) >> 1) - 1)))
2678		return 1;
2679	return 0;
2680}
2681
2682
2683static void cas_write_txd(struct cas *cp, int ring, int entry,
2684			  dma_addr_t mapping, int len, u64 ctrl, int last)
2685{
2686	struct cas_tx_desc *txd = cp->init_txds[ring] + entry;
2687
2688	ctrl |= CAS_BASE(TX_DESC_BUFLEN, len);
2689	if (cas_intme(ring, entry))
2690		ctrl |= TX_DESC_INTME;
2691	if (last)
2692		ctrl |= TX_DESC_EOF;
2693	txd->control = cpu_to_le64(ctrl);
2694	txd->buffer = cpu_to_le64(mapping);
2695}
2696
2697static inline void *tx_tiny_buf(struct cas *cp, const int ring,
2698				const int entry)
2699{
2700	return cp->tx_tiny_bufs[ring] + TX_TINY_BUF_LEN*entry;
2701}
2702
2703static inline dma_addr_t tx_tiny_map(struct cas *cp, const int ring,
2704				     const int entry, const int tentry)
2705{
2706	cp->tx_tiny_use[ring][tentry].nbufs++;
2707	cp->tx_tiny_use[ring][entry].used = 1;
2708	return cp->tx_tiny_dvma[ring] + TX_TINY_BUF_LEN*entry;
2709}
2710
2711static inline int cas_xmit_tx_ringN(struct cas *cp, int ring,
2712				    struct sk_buff *skb)
2713{
2714	struct net_device *dev = cp->dev;
2715	int entry, nr_frags, frag, tabort, tentry;
2716	dma_addr_t mapping;
2717	unsigned long flags;
2718	u64 ctrl;
2719	u32 len;
2720
2721	spin_lock_irqsave(&cp->tx_lock[ring], flags);
2722
2723	/* This is a hard error, log it. */
2724	if (TX_BUFFS_AVAIL(cp, ring) <=
2725	    CAS_TABORT(cp)*(skb_shinfo(skb)->nr_frags + 1)) {
2726		netif_stop_queue(dev);
2727		spin_unlock_irqrestore(&cp->tx_lock[ring], flags);
2728		netdev_err(dev, "BUG! Tx Ring full when queue awake!\n");
2729		return 1;
2730	}
2731
2732	ctrl = 0;
2733	if (skb->ip_summed == CHECKSUM_PARTIAL) {
2734		const u64 csum_start_off = skb_checksum_start_offset(skb);
2735		const u64 csum_stuff_off = csum_start_off + skb->csum_offset;
2736
2737		ctrl =  TX_DESC_CSUM_EN |
2738			CAS_BASE(TX_DESC_CSUM_START, csum_start_off) |
2739			CAS_BASE(TX_DESC_CSUM_STUFF, csum_stuff_off);
2740	}
2741
2742	entry = cp->tx_new[ring];
2743	cp->tx_skbs[ring][entry] = skb;
2744
2745	nr_frags = skb_shinfo(skb)->nr_frags;
2746	len = skb_headlen(skb);
2747	mapping = dma_map_page(&cp->pdev->dev, virt_to_page(skb->data),
2748			       offset_in_page(skb->data), len, DMA_TO_DEVICE);
 
2749
2750	tentry = entry;
2751	tabort = cas_calc_tabort(cp, (unsigned long) skb->data, len);
2752	if (unlikely(tabort)) {
2753		/* NOTE: len is always >  tabort */
2754		cas_write_txd(cp, ring, entry, mapping, len - tabort,
2755			      ctrl | TX_DESC_SOF, 0);
2756		entry = TX_DESC_NEXT(ring, entry);
2757
2758		skb_copy_from_linear_data_offset(skb, len - tabort,
2759			      tx_tiny_buf(cp, ring, entry), tabort);
2760		mapping = tx_tiny_map(cp, ring, entry, tentry);
2761		cas_write_txd(cp, ring, entry, mapping, tabort, ctrl,
2762			      (nr_frags == 0));
2763	} else {
2764		cas_write_txd(cp, ring, entry, mapping, len, ctrl |
2765			      TX_DESC_SOF, (nr_frags == 0));
2766	}
2767	entry = TX_DESC_NEXT(ring, entry);
2768
2769	for (frag = 0; frag < nr_frags; frag++) {
2770		const skb_frag_t *fragp = &skb_shinfo(skb)->frags[frag];
2771
2772		len = skb_frag_size(fragp);
2773		mapping = skb_frag_dma_map(&cp->pdev->dev, fragp, 0, len,
2774					   DMA_TO_DEVICE);
2775
2776		tabort = cas_calc_tabort(cp, skb_frag_off(fragp), len);
2777		if (unlikely(tabort)) {
 
 
2778			/* NOTE: len is always > tabort */
2779			cas_write_txd(cp, ring, entry, mapping, len - tabort,
2780				      ctrl, 0);
2781			entry = TX_DESC_NEXT(ring, entry);
2782			memcpy_from_page(tx_tiny_buf(cp, ring, entry),
2783					 skb_frag_page(fragp),
2784					 skb_frag_off(fragp) + len - tabort,
2785					 tabort);
 
 
2786			mapping = tx_tiny_map(cp, ring, entry, tentry);
2787			len     = tabort;
2788		}
2789
2790		cas_write_txd(cp, ring, entry, mapping, len, ctrl,
2791			      (frag + 1 == nr_frags));
2792		entry = TX_DESC_NEXT(ring, entry);
2793	}
2794
2795	cp->tx_new[ring] = entry;
2796	if (TX_BUFFS_AVAIL(cp, ring) <= CAS_TABORT(cp)*(MAX_SKB_FRAGS + 1))
2797		netif_stop_queue(dev);
2798
2799	netif_printk(cp, tx_queued, KERN_DEBUG, dev,
2800		     "tx[%d] queued, slot %d, skblen %d, avail %d\n",
2801		     ring, entry, skb->len, TX_BUFFS_AVAIL(cp, ring));
2802	writel(entry, cp->regs + REG_TX_KICKN(ring));
2803	spin_unlock_irqrestore(&cp->tx_lock[ring], flags);
2804	return 0;
2805}
2806
2807static netdev_tx_t cas_start_xmit(struct sk_buff *skb, struct net_device *dev)
2808{
2809	struct cas *cp = netdev_priv(dev);
2810
2811	/* this is only used as a load-balancing hint, so it doesn't
2812	 * need to be SMP safe
2813	 */
2814	static int ring;
2815
2816	if (skb_padto(skb, cp->min_frame_size))
2817		return NETDEV_TX_OK;
2818
2819	/* XXX: we need some higher-level QoS hooks to steer packets to
2820	 *      individual queues.
2821	 */
2822	if (cas_xmit_tx_ringN(cp, ring++ & N_TX_RINGS_MASK, skb))
2823		return NETDEV_TX_BUSY;
2824	return NETDEV_TX_OK;
2825}
2826
2827static void cas_init_tx_dma(struct cas *cp)
2828{
2829	u64 desc_dma = cp->block_dvma;
2830	unsigned long off;
2831	u32 val;
2832	int i;
2833
2834	/* set up tx completion writeback registers. must be 8-byte aligned */
2835#ifdef USE_TX_COMPWB
2836	off = offsetof(struct cas_init_block, tx_compwb);
2837	writel((desc_dma + off) >> 32, cp->regs + REG_TX_COMPWB_DB_HI);
2838	writel((desc_dma + off) & 0xffffffff, cp->regs + REG_TX_COMPWB_DB_LOW);
2839#endif
2840
2841	/* enable completion writebacks, enable paced mode,
2842	 * disable read pipe, and disable pre-interrupt compwbs
2843	 */
2844	val =   TX_CFG_COMPWB_Q1 | TX_CFG_COMPWB_Q2 |
2845		TX_CFG_COMPWB_Q3 | TX_CFG_COMPWB_Q4 |
2846		TX_CFG_DMA_RDPIPE_DIS | TX_CFG_PACED_MODE |
2847		TX_CFG_INTR_COMPWB_DIS;
2848
2849	/* write out tx ring info and tx desc bases */
2850	for (i = 0; i < MAX_TX_RINGS; i++) {
2851		off = (unsigned long) cp->init_txds[i] -
2852			(unsigned long) cp->init_block;
2853
2854		val |= CAS_TX_RINGN_BASE(i);
2855		writel((desc_dma + off) >> 32, cp->regs + REG_TX_DBN_HI(i));
2856		writel((desc_dma + off) & 0xffffffff, cp->regs +
2857		       REG_TX_DBN_LOW(i));
2858		/* don't zero out the kick register here as the system
2859		 * will wedge
2860		 */
2861	}
2862	writel(val, cp->regs + REG_TX_CFG);
2863
2864	/* program max burst sizes. these numbers should be different
2865	 * if doing QoS.
2866	 */
2867#ifdef USE_QOS
2868	writel(0x800, cp->regs + REG_TX_MAXBURST_0);
2869	writel(0x1600, cp->regs + REG_TX_MAXBURST_1);
2870	writel(0x2400, cp->regs + REG_TX_MAXBURST_2);
2871	writel(0x4800, cp->regs + REG_TX_MAXBURST_3);
2872#else
2873	writel(0x800, cp->regs + REG_TX_MAXBURST_0);
2874	writel(0x800, cp->regs + REG_TX_MAXBURST_1);
2875	writel(0x800, cp->regs + REG_TX_MAXBURST_2);
2876	writel(0x800, cp->regs + REG_TX_MAXBURST_3);
2877#endif
2878}
2879
2880/* Must be invoked under cp->lock. */
2881static inline void cas_init_dma(struct cas *cp)
2882{
2883	cas_init_tx_dma(cp);
2884	cas_init_rx_dma(cp);
2885}
2886
2887static void cas_process_mc_list(struct cas *cp)
2888{
2889	u16 hash_table[16];
2890	u32 crc;
2891	struct netdev_hw_addr *ha;
2892	int i = 1;
2893
2894	memset(hash_table, 0, sizeof(hash_table));
2895	netdev_for_each_mc_addr(ha, cp->dev) {
2896		if (i <= CAS_MC_EXACT_MATCH_SIZE) {
2897			/* use the alternate mac address registers for the
2898			 * first 15 multicast addresses
2899			 */
2900			writel((ha->addr[4] << 8) | ha->addr[5],
2901			       cp->regs + REG_MAC_ADDRN(i*3 + 0));
2902			writel((ha->addr[2] << 8) | ha->addr[3],
2903			       cp->regs + REG_MAC_ADDRN(i*3 + 1));
2904			writel((ha->addr[0] << 8) | ha->addr[1],
2905			       cp->regs + REG_MAC_ADDRN(i*3 + 2));
2906			i++;
2907		}
2908		else {
2909			/* use hw hash table for the next series of
2910			 * multicast addresses
2911			 */
2912			crc = ether_crc_le(ETH_ALEN, ha->addr);
2913			crc >>= 24;
2914			hash_table[crc >> 4] |= 1 << (15 - (crc & 0xf));
2915		}
2916	}
2917	for (i = 0; i < 16; i++)
2918		writel(hash_table[i], cp->regs + REG_MAC_HASH_TABLEN(i));
2919}
2920
2921/* Must be invoked under cp->lock. */
2922static u32 cas_setup_multicast(struct cas *cp)
2923{
2924	u32 rxcfg = 0;
2925	int i;
2926
2927	if (cp->dev->flags & IFF_PROMISC) {
2928		rxcfg |= MAC_RX_CFG_PROMISC_EN;
2929
2930	} else if (cp->dev->flags & IFF_ALLMULTI) {
2931	    	for (i=0; i < 16; i++)
2932			writel(0xFFFF, cp->regs + REG_MAC_HASH_TABLEN(i));
2933		rxcfg |= MAC_RX_CFG_HASH_FILTER_EN;
2934
2935	} else {
2936		cas_process_mc_list(cp);
2937		rxcfg |= MAC_RX_CFG_HASH_FILTER_EN;
2938	}
2939
2940	return rxcfg;
2941}
2942
2943/* must be invoked under cp->stat_lock[N_TX_RINGS] */
2944static void cas_clear_mac_err(struct cas *cp)
2945{
2946	writel(0, cp->regs + REG_MAC_COLL_NORMAL);
2947	writel(0, cp->regs + REG_MAC_COLL_FIRST);
2948	writel(0, cp->regs + REG_MAC_COLL_EXCESS);
2949	writel(0, cp->regs + REG_MAC_COLL_LATE);
2950	writel(0, cp->regs + REG_MAC_TIMER_DEFER);
2951	writel(0, cp->regs + REG_MAC_ATTEMPTS_PEAK);
2952	writel(0, cp->regs + REG_MAC_RECV_FRAME);
2953	writel(0, cp->regs + REG_MAC_LEN_ERR);
2954	writel(0, cp->regs + REG_MAC_ALIGN_ERR);
2955	writel(0, cp->regs + REG_MAC_FCS_ERR);
2956	writel(0, cp->regs + REG_MAC_RX_CODE_ERR);
2957}
2958
2959
2960static void cas_mac_reset(struct cas *cp)
2961{
2962	int i;
2963
2964	/* do both TX and RX reset */
2965	writel(0x1, cp->regs + REG_MAC_TX_RESET);
2966	writel(0x1, cp->regs + REG_MAC_RX_RESET);
2967
2968	/* wait for TX */
2969	i = STOP_TRIES;
2970	while (i-- > 0) {
2971		if (readl(cp->regs + REG_MAC_TX_RESET) == 0)
2972			break;
2973		udelay(10);
2974	}
2975
2976	/* wait for RX */
2977	i = STOP_TRIES;
2978	while (i-- > 0) {
2979		if (readl(cp->regs + REG_MAC_RX_RESET) == 0)
2980			break;
2981		udelay(10);
2982	}
2983
2984	if (readl(cp->regs + REG_MAC_TX_RESET) |
2985	    readl(cp->regs + REG_MAC_RX_RESET))
2986		netdev_err(cp->dev, "mac tx[%d]/rx[%d] reset failed [%08x]\n",
2987			   readl(cp->regs + REG_MAC_TX_RESET),
2988			   readl(cp->regs + REG_MAC_RX_RESET),
2989			   readl(cp->regs + REG_MAC_STATE_MACHINE));
2990}
2991
2992
2993/* Must be invoked under cp->lock. */
2994static void cas_init_mac(struct cas *cp)
2995{
2996	const unsigned char *e = &cp->dev->dev_addr[0];
2997	int i;
2998	cas_mac_reset(cp);
2999
3000	/* setup core arbitration weight register */
3001	writel(CAWR_RR_DIS, cp->regs + REG_CAWR);
3002
3003#if !defined(CONFIG_SPARC64) && !defined(CONFIG_ALPHA)
3004	/* set the infinite burst register for chips that don't have
3005	 * pci issues.
3006	 */
3007	if ((cp->cas_flags & CAS_FLAG_TARGET_ABORT) == 0)
3008		writel(INF_BURST_EN, cp->regs + REG_INF_BURST);
3009#endif
3010
3011	writel(0x1BF0, cp->regs + REG_MAC_SEND_PAUSE);
3012
3013	writel(0x00, cp->regs + REG_MAC_IPG0);
3014	writel(0x08, cp->regs + REG_MAC_IPG1);
3015	writel(0x04, cp->regs + REG_MAC_IPG2);
3016
3017	/* change later for 802.3z */
3018	writel(0x40, cp->regs + REG_MAC_SLOT_TIME);
3019
3020	/* min frame + FCS */
3021	writel(ETH_ZLEN + 4, cp->regs + REG_MAC_FRAMESIZE_MIN);
3022
3023	/* Ethernet payload + header + FCS + optional VLAN tag. NOTE: we
3024	 * specify the maximum frame size to prevent RX tag errors on
3025	 * oversized frames.
3026	 */
3027	writel(CAS_BASE(MAC_FRAMESIZE_MAX_BURST, 0x2000) |
3028	       CAS_BASE(MAC_FRAMESIZE_MAX_FRAME,
3029			(CAS_MAX_MTU + ETH_HLEN + 4 + 4)),
3030	       cp->regs + REG_MAC_FRAMESIZE_MAX);
3031
3032	/* NOTE: crc_size is used as a surrogate for half-duplex.
3033	 * workaround saturn half-duplex issue by increasing preamble
3034	 * size to 65 bytes.
3035	 */
3036	if ((cp->cas_flags & CAS_FLAG_SATURN) && cp->crc_size)
3037		writel(0x41, cp->regs + REG_MAC_PA_SIZE);
3038	else
3039		writel(0x07, cp->regs + REG_MAC_PA_SIZE);
3040	writel(0x04, cp->regs + REG_MAC_JAM_SIZE);
3041	writel(0x10, cp->regs + REG_MAC_ATTEMPT_LIMIT);
3042	writel(0x8808, cp->regs + REG_MAC_CTRL_TYPE);
3043
3044	writel((e[5] | (e[4] << 8)) & 0x3ff, cp->regs + REG_MAC_RANDOM_SEED);
3045
3046	writel(0, cp->regs + REG_MAC_ADDR_FILTER0);
3047	writel(0, cp->regs + REG_MAC_ADDR_FILTER1);
3048	writel(0, cp->regs + REG_MAC_ADDR_FILTER2);
3049	writel(0, cp->regs + REG_MAC_ADDR_FILTER2_1_MASK);
3050	writel(0, cp->regs + REG_MAC_ADDR_FILTER0_MASK);
3051
3052	/* setup mac address in perfect filter array */
3053	for (i = 0; i < 45; i++)
3054		writel(0x0, cp->regs + REG_MAC_ADDRN(i));
3055
3056	writel((e[4] << 8) | e[5], cp->regs + REG_MAC_ADDRN(0));
3057	writel((e[2] << 8) | e[3], cp->regs + REG_MAC_ADDRN(1));
3058	writel((e[0] << 8) | e[1], cp->regs + REG_MAC_ADDRN(2));
3059
3060	writel(0x0001, cp->regs + REG_MAC_ADDRN(42));
3061	writel(0xc200, cp->regs + REG_MAC_ADDRN(43));
3062	writel(0x0180, cp->regs + REG_MAC_ADDRN(44));
3063
3064	cp->mac_rx_cfg = cas_setup_multicast(cp);
3065
3066	spin_lock(&cp->stat_lock[N_TX_RINGS]);
3067	cas_clear_mac_err(cp);
3068	spin_unlock(&cp->stat_lock[N_TX_RINGS]);
3069
3070	/* Setup MAC interrupts.  We want to get all of the interesting
3071	 * counter expiration events, but we do not want to hear about
3072	 * normal rx/tx as the DMA engine tells us that.
3073	 */
3074	writel(MAC_TX_FRAME_XMIT, cp->regs + REG_MAC_TX_MASK);
3075	writel(MAC_RX_FRAME_RECV, cp->regs + REG_MAC_RX_MASK);
3076
3077	/* Don't enable even the PAUSE interrupts for now, we
3078	 * make no use of those events other than to record them.
3079	 */
3080	writel(0xffffffff, cp->regs + REG_MAC_CTRL_MASK);
3081}
3082
3083/* Must be invoked under cp->lock. */
3084static void cas_init_pause_thresholds(struct cas *cp)
3085{
3086	/* Calculate pause thresholds.  Setting the OFF threshold to the
3087	 * full RX fifo size effectively disables PAUSE generation
3088	 */
3089	if (cp->rx_fifo_size <= (2 * 1024)) {
3090		cp->rx_pause_off = cp->rx_pause_on = cp->rx_fifo_size;
3091	} else {
3092		int max_frame = (cp->dev->mtu + ETH_HLEN + 4 + 4 + 64) & ~63;
3093		if (max_frame * 3 > cp->rx_fifo_size) {
3094			cp->rx_pause_off = 7104;
3095			cp->rx_pause_on  = 960;
3096		} else {
3097			int off = (cp->rx_fifo_size - (max_frame * 2));
3098			int on = off - max_frame;
3099			cp->rx_pause_off = off;
3100			cp->rx_pause_on = on;
3101		}
3102	}
3103}
3104
3105static int cas_vpd_match(const void __iomem *p, const char *str)
3106{
3107	int len = strlen(str) + 1;
3108	int i;
3109
3110	for (i = 0; i < len; i++) {
3111		if (readb(p + i) != str[i])
3112			return 0;
3113	}
3114	return 1;
3115}
3116
3117
3118/* get the mac address by reading the vpd information in the rom.
3119 * also get the phy type and determine if there's an entropy generator.
3120 * NOTE: this is a bit convoluted for the following reasons:
3121 *  1) vpd info has order-dependent mac addresses for multinic cards
3122 *  2) the only way to determine the nic order is to use the slot
3123 *     number.
3124 *  3) fiber cards don't have bridges, so their slot numbers don't
3125 *     mean anything.
3126 *  4) we don't actually know we have a fiber card until after
3127 *     the mac addresses are parsed.
3128 */
3129static int cas_get_vpd_info(struct cas *cp, unsigned char *dev_addr,
3130			    const int offset)
3131{
3132	void __iomem *p = cp->regs + REG_EXPANSION_ROM_RUN_START;
3133	void __iomem *base, *kstart;
3134	int i, len;
3135	int found = 0;
3136#define VPD_FOUND_MAC        0x01
3137#define VPD_FOUND_PHY        0x02
3138
3139	int phy_type = CAS_PHY_MII_MDIO0; /* default phy type */
3140	int mac_off  = 0;
3141
3142#if defined(CONFIG_SPARC)
3143	const unsigned char *addr;
3144#endif
3145
3146	/* give us access to the PROM */
3147	writel(BIM_LOCAL_DEV_PROM | BIM_LOCAL_DEV_PAD,
3148	       cp->regs + REG_BIM_LOCAL_DEV_EN);
3149
3150	/* check for an expansion rom */
3151	if (readb(p) != 0x55 || readb(p + 1) != 0xaa)
3152		goto use_random_mac_addr;
3153
3154	/* search for beginning of vpd */
3155	base = NULL;
3156	for (i = 2; i < EXPANSION_ROM_SIZE; i++) {
3157		/* check for PCIR */
3158		if ((readb(p + i + 0) == 0x50) &&
3159		    (readb(p + i + 1) == 0x43) &&
3160		    (readb(p + i + 2) == 0x49) &&
3161		    (readb(p + i + 3) == 0x52)) {
3162			base = p + (readb(p + i + 8) |
3163				    (readb(p + i + 9) << 8));
3164			break;
3165		}
3166	}
3167
3168	if (!base || (readb(base) != 0x82))
3169		goto use_random_mac_addr;
3170
3171	i = (readb(base + 1) | (readb(base + 2) << 8)) + 3;
3172	while (i < EXPANSION_ROM_SIZE) {
3173		if (readb(base + i) != 0x90) /* no vpd found */
3174			goto use_random_mac_addr;
3175
3176		/* found a vpd field */
3177		len = readb(base + i + 1) | (readb(base + i + 2) << 8);
3178
3179		/* extract keywords */
3180		kstart = base + i + 3;
3181		p = kstart;
3182		while ((p - kstart) < len) {
3183			int klen = readb(p + 2);
3184			int j;
3185			char type;
3186
3187			p += 3;
3188
3189			/* look for the following things:
3190			 * -- correct length == 29
3191			 * 3 (type) + 2 (size) +
3192			 * 18 (strlen("local-mac-address") + 1) +
3193			 * 6 (mac addr)
3194			 * -- VPD Instance 'I'
3195			 * -- VPD Type Bytes 'B'
3196			 * -- VPD data length == 6
3197			 * -- property string == local-mac-address
3198			 *
3199			 * -- correct length == 24
3200			 * 3 (type) + 2 (size) +
3201			 * 12 (strlen("entropy-dev") + 1) +
3202			 * 7 (strlen("vms110") + 1)
3203			 * -- VPD Instance 'I'
3204			 * -- VPD Type String 'B'
3205			 * -- VPD data length == 7
3206			 * -- property string == entropy-dev
3207			 *
3208			 * -- correct length == 18
3209			 * 3 (type) + 2 (size) +
3210			 * 9 (strlen("phy-type") + 1) +
3211			 * 4 (strlen("pcs") + 1)
3212			 * -- VPD Instance 'I'
3213			 * -- VPD Type String 'S'
3214			 * -- VPD data length == 4
3215			 * -- property string == phy-type
3216			 *
3217			 * -- correct length == 23
3218			 * 3 (type) + 2 (size) +
3219			 * 14 (strlen("phy-interface") + 1) +
3220			 * 4 (strlen("pcs") + 1)
3221			 * -- VPD Instance 'I'
3222			 * -- VPD Type String 'S'
3223			 * -- VPD data length == 4
3224			 * -- property string == phy-interface
3225			 */
3226			if (readb(p) != 'I')
3227				goto next;
3228
3229			/* finally, check string and length */
3230			type = readb(p + 3);
3231			if (type == 'B') {
3232				if ((klen == 29) && readb(p + 4) == 6 &&
3233				    cas_vpd_match(p + 5,
3234						  "local-mac-address")) {
3235					if (mac_off++ > offset)
3236						goto next;
3237
3238					/* set mac address */
3239					for (j = 0; j < 6; j++)
3240						dev_addr[j] =
3241							readb(p + 23 + j);
3242					goto found_mac;
3243				}
3244			}
3245
3246			if (type != 'S')
3247				goto next;
3248
3249#ifdef USE_ENTROPY_DEV
3250			if ((klen == 24) &&
3251			    cas_vpd_match(p + 5, "entropy-dev") &&
3252			    cas_vpd_match(p + 17, "vms110")) {
3253				cp->cas_flags |= CAS_FLAG_ENTROPY_DEV;
3254				goto next;
3255			}
3256#endif
3257
3258			if (found & VPD_FOUND_PHY)
3259				goto next;
3260
3261			if ((klen == 18) && readb(p + 4) == 4 &&
3262			    cas_vpd_match(p + 5, "phy-type")) {
3263				if (cas_vpd_match(p + 14, "pcs")) {
3264					phy_type = CAS_PHY_SERDES;
3265					goto found_phy;
3266				}
3267			}
3268
3269			if ((klen == 23) && readb(p + 4) == 4 &&
3270			    cas_vpd_match(p + 5, "phy-interface")) {
3271				if (cas_vpd_match(p + 19, "pcs")) {
3272					phy_type = CAS_PHY_SERDES;
3273					goto found_phy;
3274				}
3275			}
3276found_mac:
3277			found |= VPD_FOUND_MAC;
3278			goto next;
3279
3280found_phy:
3281			found |= VPD_FOUND_PHY;
3282
3283next:
3284			p += klen;
3285		}
3286		i += len + 3;
3287	}
3288
3289use_random_mac_addr:
3290	if (found & VPD_FOUND_MAC)
3291		goto done;
3292
3293#if defined(CONFIG_SPARC)
3294	addr = of_get_property(cp->of_node, "local-mac-address", NULL);
3295	if (addr != NULL) {
3296		memcpy(dev_addr, addr, ETH_ALEN);
3297		goto done;
3298	}
3299#endif
3300
3301	/* Sun MAC prefix then 3 random bytes. */
3302	pr_info("MAC address not found in ROM VPD\n");
3303	dev_addr[0] = 0x08;
3304	dev_addr[1] = 0x00;
3305	dev_addr[2] = 0x20;
3306	get_random_bytes(dev_addr + 3, 3);
3307
3308done:
3309	writel(0, cp->regs + REG_BIM_LOCAL_DEV_EN);
3310	return phy_type;
3311}
3312
3313/* check pci invariants */
3314static void cas_check_pci_invariants(struct cas *cp)
3315{
3316	struct pci_dev *pdev = cp->pdev;
3317
3318	cp->cas_flags = 0;
3319	if ((pdev->vendor == PCI_VENDOR_ID_SUN) &&
3320	    (pdev->device == PCI_DEVICE_ID_SUN_CASSINI)) {
3321		if (pdev->revision >= CAS_ID_REVPLUS)
3322			cp->cas_flags |= CAS_FLAG_REG_PLUS;
3323		if (pdev->revision < CAS_ID_REVPLUS02u)
3324			cp->cas_flags |= CAS_FLAG_TARGET_ABORT;
3325
3326		/* Original Cassini supports HW CSUM, but it's not
3327		 * enabled by default as it can trigger TX hangs.
3328		 */
3329		if (pdev->revision < CAS_ID_REV2)
3330			cp->cas_flags |= CAS_FLAG_NO_HW_CSUM;
3331	} else {
3332		/* Only sun has original cassini chips.  */
3333		cp->cas_flags |= CAS_FLAG_REG_PLUS;
3334
3335		/* We use a flag because the same phy might be externally
3336		 * connected.
3337		 */
3338		if ((pdev->vendor == PCI_VENDOR_ID_NS) &&
3339		    (pdev->device == PCI_DEVICE_ID_NS_SATURN))
3340			cp->cas_flags |= CAS_FLAG_SATURN;
3341	}
3342}
3343
3344
3345static int cas_check_invariants(struct cas *cp)
3346{
3347	struct pci_dev *pdev = cp->pdev;
3348	u8 addr[ETH_ALEN];
3349	u32 cfg;
3350	int i;
3351
3352	/* get page size for rx buffers. */
3353	cp->page_order = 0;
3354#ifdef USE_PAGE_ORDER
3355	if (PAGE_SHIFT < CAS_JUMBO_PAGE_SHIFT) {
3356		/* see if we can allocate larger pages */
3357		struct page *page = alloc_pages(GFP_ATOMIC,
3358						CAS_JUMBO_PAGE_SHIFT -
3359						PAGE_SHIFT);
3360		if (page) {
3361			__free_pages(page, CAS_JUMBO_PAGE_SHIFT - PAGE_SHIFT);
3362			cp->page_order = CAS_JUMBO_PAGE_SHIFT - PAGE_SHIFT;
3363		} else {
3364			printk("MTU limited to %d bytes\n", CAS_MAX_MTU);
3365		}
3366	}
3367#endif
3368	cp->page_size = (PAGE_SIZE << cp->page_order);
3369
3370	/* Fetch the FIFO configurations. */
3371	cp->tx_fifo_size = readl(cp->regs + REG_TX_FIFO_SIZE) * 64;
3372	cp->rx_fifo_size = RX_FIFO_SIZE;
3373
3374	/* finish phy determination. MDIO1 takes precedence over MDIO0 if
3375	 * they're both connected.
3376	 */
3377	cp->phy_type = cas_get_vpd_info(cp, addr, PCI_SLOT(pdev->devfn));
3378	eth_hw_addr_set(cp->dev, addr);
3379	if (cp->phy_type & CAS_PHY_SERDES) {
3380		cp->cas_flags |= CAS_FLAG_1000MB_CAP;
3381		return 0; /* no more checking needed */
3382	}
3383
3384	/* MII */
3385	cfg = readl(cp->regs + REG_MIF_CFG);
3386	if (cfg & MIF_CFG_MDIO_1) {
3387		cp->phy_type = CAS_PHY_MII_MDIO1;
3388	} else if (cfg & MIF_CFG_MDIO_0) {
3389		cp->phy_type = CAS_PHY_MII_MDIO0;
3390	}
3391
3392	cas_mif_poll(cp, 0);
3393	writel(PCS_DATAPATH_MODE_MII, cp->regs + REG_PCS_DATAPATH_MODE);
3394
3395	for (i = 0; i < 32; i++) {
3396		u32 phy_id;
3397		int j;
3398
3399		for (j = 0; j < 3; j++) {
3400			cp->phy_addr = i;
3401			phy_id = cas_phy_read(cp, MII_PHYSID1) << 16;
3402			phy_id |= cas_phy_read(cp, MII_PHYSID2);
3403			if (phy_id && (phy_id != 0xFFFFFFFF)) {
3404				cp->phy_id = phy_id;
3405				goto done;
3406			}
3407		}
3408	}
3409	pr_err("MII phy did not respond [%08x]\n",
3410	       readl(cp->regs + REG_MIF_STATE_MACHINE));
3411	return -1;
3412
3413done:
3414	/* see if we can do gigabit */
3415	cfg = cas_phy_read(cp, MII_BMSR);
3416	if ((cfg & CAS_BMSR_1000_EXTEND) &&
3417	    cas_phy_read(cp, CAS_MII_1000_EXTEND))
3418		cp->cas_flags |= CAS_FLAG_1000MB_CAP;
3419	return 0;
3420}
3421
3422/* Must be invoked under cp->lock. */
3423static inline void cas_start_dma(struct cas *cp)
3424{
3425	int i;
3426	u32 val;
3427	int txfailed = 0;
3428
3429	/* enable dma */
3430	val = readl(cp->regs + REG_TX_CFG) | TX_CFG_DMA_EN;
3431	writel(val, cp->regs + REG_TX_CFG);
3432	val = readl(cp->regs + REG_RX_CFG) | RX_CFG_DMA_EN;
3433	writel(val, cp->regs + REG_RX_CFG);
3434
3435	/* enable the mac */
3436	val = readl(cp->regs + REG_MAC_TX_CFG) | MAC_TX_CFG_EN;
3437	writel(val, cp->regs + REG_MAC_TX_CFG);
3438	val = readl(cp->regs + REG_MAC_RX_CFG) | MAC_RX_CFG_EN;
3439	writel(val, cp->regs + REG_MAC_RX_CFG);
3440
3441	i = STOP_TRIES;
3442	while (i-- > 0) {
3443		val = readl(cp->regs + REG_MAC_TX_CFG);
3444		if ((val & MAC_TX_CFG_EN))
3445			break;
3446		udelay(10);
3447	}
3448	if (i < 0) txfailed = 1;
3449	i = STOP_TRIES;
3450	while (i-- > 0) {
3451		val = readl(cp->regs + REG_MAC_RX_CFG);
3452		if ((val & MAC_RX_CFG_EN)) {
3453			if (txfailed) {
3454				netdev_err(cp->dev,
3455					   "enabling mac failed [tx:%08x:%08x]\n",
3456					   readl(cp->regs + REG_MIF_STATE_MACHINE),
3457					   readl(cp->regs + REG_MAC_STATE_MACHINE));
3458			}
3459			goto enable_rx_done;
3460		}
3461		udelay(10);
3462	}
3463	netdev_err(cp->dev, "enabling mac failed [%s:%08x:%08x]\n",
3464		   (txfailed ? "tx,rx" : "rx"),
3465		   readl(cp->regs + REG_MIF_STATE_MACHINE),
3466		   readl(cp->regs + REG_MAC_STATE_MACHINE));
3467
3468enable_rx_done:
3469	cas_unmask_intr(cp); /* enable interrupts */
3470	writel(RX_DESC_RINGN_SIZE(0) - 4, cp->regs + REG_RX_KICK);
3471	writel(0, cp->regs + REG_RX_COMP_TAIL);
3472
3473	if (cp->cas_flags & CAS_FLAG_REG_PLUS) {
3474		if (N_RX_DESC_RINGS > 1)
3475			writel(RX_DESC_RINGN_SIZE(1) - 4,
3476			       cp->regs + REG_PLUS_RX_KICK1);
 
 
 
3477	}
3478}
3479
3480/* Must be invoked under cp->lock. */
3481static void cas_read_pcs_link_mode(struct cas *cp, int *fd, int *spd,
3482				   int *pause)
3483{
3484	u32 val = readl(cp->regs + REG_PCS_MII_LPA);
3485	*fd     = (val & PCS_MII_LPA_FD) ? 1 : 0;
3486	*pause  = (val & PCS_MII_LPA_SYM_PAUSE) ? 0x01 : 0x00;
3487	if (val & PCS_MII_LPA_ASYM_PAUSE)
3488		*pause |= 0x10;
3489	*spd = 1000;
3490}
3491
3492/* Must be invoked under cp->lock. */
3493static void cas_read_mii_link_mode(struct cas *cp, int *fd, int *spd,
3494				   int *pause)
3495{
3496	u32 val;
3497
3498	*fd = 0;
3499	*spd = 10;
3500	*pause = 0;
3501
3502	/* use GMII registers */
3503	val = cas_phy_read(cp, MII_LPA);
3504	if (val & CAS_LPA_PAUSE)
3505		*pause = 0x01;
3506
3507	if (val & CAS_LPA_ASYM_PAUSE)
3508		*pause |= 0x10;
3509
3510	if (val & LPA_DUPLEX)
3511		*fd = 1;
3512	if (val & LPA_100)
3513		*spd = 100;
3514
3515	if (cp->cas_flags & CAS_FLAG_1000MB_CAP) {
3516		val = cas_phy_read(cp, CAS_MII_1000_STATUS);
3517		if (val & (CAS_LPA_1000FULL | CAS_LPA_1000HALF))
3518			*spd = 1000;
3519		if (val & CAS_LPA_1000FULL)
3520			*fd = 1;
3521	}
3522}
3523
3524/* A link-up condition has occurred, initialize and enable the
3525 * rest of the chip.
3526 *
3527 * Must be invoked under cp->lock.
3528 */
3529static void cas_set_link_modes(struct cas *cp)
3530{
3531	u32 val;
3532	int full_duplex, speed, pause;
3533
3534	full_duplex = 0;
3535	speed = 10;
3536	pause = 0;
3537
3538	if (CAS_PHY_MII(cp->phy_type)) {
3539		cas_mif_poll(cp, 0);
3540		val = cas_phy_read(cp, MII_BMCR);
3541		if (val & BMCR_ANENABLE) {
3542			cas_read_mii_link_mode(cp, &full_duplex, &speed,
3543					       &pause);
3544		} else {
3545			if (val & BMCR_FULLDPLX)
3546				full_duplex = 1;
3547
3548			if (val & BMCR_SPEED100)
3549				speed = 100;
3550			else if (val & CAS_BMCR_SPEED1000)
3551				speed = (cp->cas_flags & CAS_FLAG_1000MB_CAP) ?
3552					1000 : 100;
3553		}
3554		cas_mif_poll(cp, 1);
3555
3556	} else {
3557		val = readl(cp->regs + REG_PCS_MII_CTRL);
3558		cas_read_pcs_link_mode(cp, &full_duplex, &speed, &pause);
3559		if ((val & PCS_MII_AUTONEG_EN) == 0) {
3560			if (val & PCS_MII_CTRL_DUPLEX)
3561				full_duplex = 1;
3562		}
3563	}
3564
3565	netif_info(cp, link, cp->dev, "Link up at %d Mbps, %s-duplex\n",
3566		   speed, full_duplex ? "full" : "half");
3567
3568	val = MAC_XIF_TX_MII_OUTPUT_EN | MAC_XIF_LINK_LED;
3569	if (CAS_PHY_MII(cp->phy_type)) {
3570		val |= MAC_XIF_MII_BUFFER_OUTPUT_EN;
3571		if (!full_duplex)
3572			val |= MAC_XIF_DISABLE_ECHO;
3573	}
3574	if (full_duplex)
3575		val |= MAC_XIF_FDPLX_LED;
3576	if (speed == 1000)
3577		val |= MAC_XIF_GMII_MODE;
3578	writel(val, cp->regs + REG_MAC_XIF_CFG);
3579
3580	/* deal with carrier and collision detect. */
3581	val = MAC_TX_CFG_IPG_EN;
3582	if (full_duplex) {
3583		val |= MAC_TX_CFG_IGNORE_CARRIER;
3584		val |= MAC_TX_CFG_IGNORE_COLL;
3585	} else {
3586#ifndef USE_CSMA_CD_PROTO
3587		val |= MAC_TX_CFG_NEVER_GIVE_UP_EN;
3588		val |= MAC_TX_CFG_NEVER_GIVE_UP_LIM;
3589#endif
3590	}
3591	/* val now set up for REG_MAC_TX_CFG */
3592
3593	/* If gigabit and half-duplex, enable carrier extension
3594	 * mode.  increase slot time to 512 bytes as well.
3595	 * else, disable it and make sure slot time is 64 bytes.
3596	 * also activate checksum bug workaround
3597	 */
3598	if ((speed == 1000) && !full_duplex) {
3599		writel(val | MAC_TX_CFG_CARRIER_EXTEND,
3600		       cp->regs + REG_MAC_TX_CFG);
3601
3602		val = readl(cp->regs + REG_MAC_RX_CFG);
3603		val &= ~MAC_RX_CFG_STRIP_FCS; /* checksum workaround */
3604		writel(val | MAC_RX_CFG_CARRIER_EXTEND,
3605		       cp->regs + REG_MAC_RX_CFG);
3606
3607		writel(0x200, cp->regs + REG_MAC_SLOT_TIME);
3608
3609		cp->crc_size = 4;
3610		/* minimum size gigabit frame at half duplex */
3611		cp->min_frame_size = CAS_1000MB_MIN_FRAME;
3612
3613	} else {
3614		writel(val, cp->regs + REG_MAC_TX_CFG);
3615
3616		/* checksum bug workaround. don't strip FCS when in
3617		 * half-duplex mode
3618		 */
3619		val = readl(cp->regs + REG_MAC_RX_CFG);
3620		if (full_duplex) {
3621			val |= MAC_RX_CFG_STRIP_FCS;
3622			cp->crc_size = 0;
3623			cp->min_frame_size = CAS_MIN_MTU;
3624		} else {
3625			val &= ~MAC_RX_CFG_STRIP_FCS;
3626			cp->crc_size = 4;
3627			cp->min_frame_size = CAS_MIN_FRAME;
3628		}
3629		writel(val & ~MAC_RX_CFG_CARRIER_EXTEND,
3630		       cp->regs + REG_MAC_RX_CFG);
3631		writel(0x40, cp->regs + REG_MAC_SLOT_TIME);
3632	}
3633
3634	if (netif_msg_link(cp)) {
3635		if (pause & 0x01) {
3636			netdev_info(cp->dev, "Pause is enabled (rxfifo: %d off: %d on: %d)\n",
3637				    cp->rx_fifo_size,
3638				    cp->rx_pause_off,
3639				    cp->rx_pause_on);
3640		} else if (pause & 0x10) {
3641			netdev_info(cp->dev, "TX pause enabled\n");
3642		} else {
3643			netdev_info(cp->dev, "Pause is disabled\n");
3644		}
3645	}
3646
3647	val = readl(cp->regs + REG_MAC_CTRL_CFG);
3648	val &= ~(MAC_CTRL_CFG_SEND_PAUSE_EN | MAC_CTRL_CFG_RECV_PAUSE_EN);
3649	if (pause) { /* symmetric or asymmetric pause */
3650		val |= MAC_CTRL_CFG_SEND_PAUSE_EN;
3651		if (pause & 0x01) { /* symmetric pause */
3652			val |= MAC_CTRL_CFG_RECV_PAUSE_EN;
3653		}
3654	}
3655	writel(val, cp->regs + REG_MAC_CTRL_CFG);
3656	cas_start_dma(cp);
3657}
3658
3659/* Must be invoked under cp->lock. */
3660static void cas_init_hw(struct cas *cp, int restart_link)
3661{
3662	if (restart_link)
3663		cas_phy_init(cp);
3664
3665	cas_init_pause_thresholds(cp);
3666	cas_init_mac(cp);
3667	cas_init_dma(cp);
3668
3669	if (restart_link) {
3670		/* Default aneg parameters */
3671		cp->timer_ticks = 0;
3672		cas_begin_auto_negotiation(cp, NULL);
3673	} else if (cp->lstate == link_up) {
3674		cas_set_link_modes(cp);
3675		netif_carrier_on(cp->dev);
3676	}
3677}
3678
3679/* Must be invoked under cp->lock. on earlier cassini boards,
3680 * SOFT_0 is tied to PCI reset. we use this to force a pci reset,
3681 * let it settle out, and then restore pci state.
3682 */
3683static void cas_hard_reset(struct cas *cp)
3684{
3685	writel(BIM_LOCAL_DEV_SOFT_0, cp->regs + REG_BIM_LOCAL_DEV_EN);
3686	udelay(20);
3687	pci_restore_state(cp->pdev);
3688}
3689
3690
3691static void cas_global_reset(struct cas *cp, int blkflag)
3692{
3693	int limit;
3694
3695	/* issue a global reset. don't use RSTOUT. */
3696	if (blkflag && !CAS_PHY_MII(cp->phy_type)) {
3697		/* For PCS, when the blkflag is set, we should set the
3698		 * SW_REST_BLOCK_PCS_SLINK bit to prevent the results of
3699		 * the last autonegotiation from being cleared.  We'll
3700		 * need some special handling if the chip is set into a
3701		 * loopback mode.
3702		 */
3703		writel((SW_RESET_TX | SW_RESET_RX | SW_RESET_BLOCK_PCS_SLINK),
3704		       cp->regs + REG_SW_RESET);
3705	} else {
3706		writel(SW_RESET_TX | SW_RESET_RX, cp->regs + REG_SW_RESET);
3707	}
3708
3709	/* need to wait at least 3ms before polling register */
3710	mdelay(3);
3711
3712	limit = STOP_TRIES;
3713	while (limit-- > 0) {
3714		u32 val = readl(cp->regs + REG_SW_RESET);
3715		if ((val & (SW_RESET_TX | SW_RESET_RX)) == 0)
3716			goto done;
3717		udelay(10);
3718	}
3719	netdev_err(cp->dev, "sw reset failed\n");
3720
3721done:
3722	/* enable various BIM interrupts */
3723	writel(BIM_CFG_DPAR_INTR_ENABLE | BIM_CFG_RMA_INTR_ENABLE |
3724	       BIM_CFG_RTA_INTR_ENABLE, cp->regs + REG_BIM_CFG);
3725
3726	/* clear out pci error status mask for handled errors.
3727	 * we don't deal with DMA counter overflows as they happen
3728	 * all the time.
3729	 */
3730	writel(0xFFFFFFFFU & ~(PCI_ERR_BADACK | PCI_ERR_DTRTO |
3731			       PCI_ERR_OTHER | PCI_ERR_BIM_DMA_WRITE |
3732			       PCI_ERR_BIM_DMA_READ), cp->regs +
3733	       REG_PCI_ERR_STATUS_MASK);
3734
3735	/* set up for MII by default to address mac rx reset timeout
3736	 * issue
3737	 */
3738	writel(PCS_DATAPATH_MODE_MII, cp->regs + REG_PCS_DATAPATH_MODE);
3739}
3740
3741static void cas_reset(struct cas *cp, int blkflag)
3742{
3743	u32 val;
3744
3745	cas_mask_intr(cp);
3746	cas_global_reset(cp, blkflag);
3747	cas_mac_reset(cp);
3748	cas_entropy_reset(cp);
3749
3750	/* disable dma engines. */
3751	val = readl(cp->regs + REG_TX_CFG);
3752	val &= ~TX_CFG_DMA_EN;
3753	writel(val, cp->regs + REG_TX_CFG);
3754
3755	val = readl(cp->regs + REG_RX_CFG);
3756	val &= ~RX_CFG_DMA_EN;
3757	writel(val, cp->regs + REG_RX_CFG);
3758
3759	/* program header parser */
3760	if ((cp->cas_flags & CAS_FLAG_TARGET_ABORT) ||
3761	    (&CAS_HP_ALT_FIRMWARE[0] == &cas_prog_null[0])) {
3762		cas_load_firmware(cp, CAS_HP_FIRMWARE);
3763	} else {
3764		cas_load_firmware(cp, CAS_HP_ALT_FIRMWARE);
3765	}
3766
3767	/* clear out error registers */
3768	spin_lock(&cp->stat_lock[N_TX_RINGS]);
3769	cas_clear_mac_err(cp);
3770	spin_unlock(&cp->stat_lock[N_TX_RINGS]);
3771}
3772
3773/* Shut down the chip, must be called with pm_mutex held.  */
3774static void cas_shutdown(struct cas *cp)
3775{
3776	unsigned long flags;
3777
3778	/* Make us not-running to avoid timers respawning */
3779	cp->hw_running = 0;
3780
3781	del_timer_sync(&cp->link_timer);
3782
3783	/* Stop the reset task */
3784#if 0
3785	while (atomic_read(&cp->reset_task_pending_mtu) ||
3786	       atomic_read(&cp->reset_task_pending_spare) ||
3787	       atomic_read(&cp->reset_task_pending_all))
3788		schedule();
3789
3790#else
3791	while (atomic_read(&cp->reset_task_pending))
3792		schedule();
3793#endif
3794	/* Actually stop the chip */
3795	cas_lock_all_save(cp, flags);
3796	cas_reset(cp, 0);
3797	if (cp->cas_flags & CAS_FLAG_SATURN)
3798		cas_phy_powerdown(cp);
3799	cas_unlock_all_restore(cp, flags);
3800}
3801
3802static int cas_change_mtu(struct net_device *dev, int new_mtu)
3803{
3804	struct cas *cp = netdev_priv(dev);
3805
3806	dev->mtu = new_mtu;
3807	if (!netif_running(dev) || !netif_device_present(dev))
3808		return 0;
3809
3810	/* let the reset task handle it */
3811#if 1
3812	atomic_inc(&cp->reset_task_pending);
3813	if ((cp->phy_type & CAS_PHY_SERDES)) {
3814		atomic_inc(&cp->reset_task_pending_all);
3815	} else {
3816		atomic_inc(&cp->reset_task_pending_mtu);
3817	}
3818	schedule_work(&cp->reset_task);
3819#else
3820	atomic_set(&cp->reset_task_pending, (cp->phy_type & CAS_PHY_SERDES) ?
3821		   CAS_RESET_ALL : CAS_RESET_MTU);
3822	pr_err("reset called in cas_change_mtu\n");
3823	schedule_work(&cp->reset_task);
3824#endif
3825
3826	flush_work(&cp->reset_task);
3827	return 0;
3828}
3829
3830static void cas_clean_txd(struct cas *cp, int ring)
3831{
3832	struct cas_tx_desc *txd = cp->init_txds[ring];
3833	struct sk_buff *skb, **skbs = cp->tx_skbs[ring];
3834	u64 daddr, dlen;
3835	int i, size;
3836
3837	size = TX_DESC_RINGN_SIZE(ring);
3838	for (i = 0; i < size; i++) {
3839		int frag;
3840
3841		if (skbs[i] == NULL)
3842			continue;
3843
3844		skb = skbs[i];
3845		skbs[i] = NULL;
3846
3847		for (frag = 0; frag <= skb_shinfo(skb)->nr_frags;  frag++) {
3848			int ent = i & (size - 1);
3849
3850			/* first buffer is never a tiny buffer and so
3851			 * needs to be unmapped.
3852			 */
3853			daddr = le64_to_cpu(txd[ent].buffer);
3854			dlen  =  CAS_VAL(TX_DESC_BUFLEN,
3855					 le64_to_cpu(txd[ent].control));
3856			dma_unmap_page(&cp->pdev->dev, daddr, dlen,
3857				       DMA_TO_DEVICE);
3858
3859			if (frag != skb_shinfo(skb)->nr_frags) {
3860				i++;
3861
3862				/* next buffer might by a tiny buffer.
3863				 * skip past it.
3864				 */
3865				ent = i & (size - 1);
3866				if (cp->tx_tiny_use[ring][ent].used)
3867					i++;
3868			}
3869		}
3870		dev_kfree_skb_any(skb);
3871	}
3872
3873	/* zero out tiny buf usage */
3874	memset(cp->tx_tiny_use[ring], 0, size*sizeof(*cp->tx_tiny_use[ring]));
3875}
3876
3877/* freed on close */
3878static inline void cas_free_rx_desc(struct cas *cp, int ring)
3879{
3880	cas_page_t **page = cp->rx_pages[ring];
3881	int i, size;
3882
3883	size = RX_DESC_RINGN_SIZE(ring);
3884	for (i = 0; i < size; i++) {
3885		if (page[i]) {
3886			cas_page_free(cp, page[i]);
3887			page[i] = NULL;
3888		}
3889	}
3890}
3891
3892static void cas_free_rxds(struct cas *cp)
3893{
3894	int i;
3895
3896	for (i = 0; i < N_RX_DESC_RINGS; i++)
3897		cas_free_rx_desc(cp, i);
3898}
3899
3900/* Must be invoked under cp->lock. */
3901static void cas_clean_rings(struct cas *cp)
3902{
3903	int i;
3904
3905	/* need to clean all tx rings */
3906	memset(cp->tx_old, 0, sizeof(*cp->tx_old)*N_TX_RINGS);
3907	memset(cp->tx_new, 0, sizeof(*cp->tx_new)*N_TX_RINGS);
3908	for (i = 0; i < N_TX_RINGS; i++)
3909		cas_clean_txd(cp, i);
3910
3911	/* zero out init block */
3912	memset(cp->init_block, 0, sizeof(struct cas_init_block));
3913	cas_clean_rxds(cp);
3914	cas_clean_rxcs(cp);
3915}
3916
3917/* allocated on open */
3918static inline int cas_alloc_rx_desc(struct cas *cp, int ring)
3919{
3920	cas_page_t **page = cp->rx_pages[ring];
3921	int size, i = 0;
3922
3923	size = RX_DESC_RINGN_SIZE(ring);
3924	for (i = 0; i < size; i++) {
3925		if ((page[i] = cas_page_alloc(cp, GFP_KERNEL)) == NULL)
3926			return -1;
3927	}
3928	return 0;
3929}
3930
3931static int cas_alloc_rxds(struct cas *cp)
3932{
3933	int i;
3934
3935	for (i = 0; i < N_RX_DESC_RINGS; i++) {
3936		if (cas_alloc_rx_desc(cp, i) < 0) {
3937			cas_free_rxds(cp);
3938			return -1;
3939		}
3940	}
3941	return 0;
3942}
3943
3944static void cas_reset_task(struct work_struct *work)
3945{
3946	struct cas *cp = container_of(work, struct cas, reset_task);
3947#if 0
3948	int pending = atomic_read(&cp->reset_task_pending);
3949#else
3950	int pending_all = atomic_read(&cp->reset_task_pending_all);
3951	int pending_spare = atomic_read(&cp->reset_task_pending_spare);
3952	int pending_mtu = atomic_read(&cp->reset_task_pending_mtu);
3953
3954	if (pending_all == 0 && pending_spare == 0 && pending_mtu == 0) {
3955		/* We can have more tasks scheduled than actually
3956		 * needed.
3957		 */
3958		atomic_dec(&cp->reset_task_pending);
3959		return;
3960	}
3961#endif
3962	/* The link went down, we reset the ring, but keep
3963	 * DMA stopped. Use this function for reset
3964	 * on error as well.
3965	 */
3966	if (cp->hw_running) {
3967		unsigned long flags;
3968
3969		/* Make sure we don't get interrupts or tx packets */
3970		netif_device_detach(cp->dev);
3971		cas_lock_all_save(cp, flags);
3972
3973		if (cp->opened) {
3974			/* We call cas_spare_recover when we call cas_open.
3975			 * but we do not initialize the lists cas_spare_recover
3976			 * uses until cas_open is called.
3977			 */
3978			cas_spare_recover(cp, GFP_ATOMIC);
3979		}
3980#if 1
3981		/* test => only pending_spare set */
3982		if (!pending_all && !pending_mtu)
3983			goto done;
3984#else
3985		if (pending == CAS_RESET_SPARE)
3986			goto done;
3987#endif
3988		/* when pending == CAS_RESET_ALL, the following
3989		 * call to cas_init_hw will restart auto negotiation.
3990		 * Setting the second argument of cas_reset to
3991		 * !(pending == CAS_RESET_ALL) will set this argument
3992		 * to 1 (avoiding reinitializing the PHY for the normal
3993		 * PCS case) when auto negotiation is not restarted.
3994		 */
3995#if 1
3996		cas_reset(cp, !(pending_all > 0));
3997		if (cp->opened)
3998			cas_clean_rings(cp);
3999		cas_init_hw(cp, (pending_all > 0));
4000#else
4001		cas_reset(cp, !(pending == CAS_RESET_ALL));
4002		if (cp->opened)
4003			cas_clean_rings(cp);
4004		cas_init_hw(cp, pending == CAS_RESET_ALL);
4005#endif
4006
4007done:
4008		cas_unlock_all_restore(cp, flags);
4009		netif_device_attach(cp->dev);
4010	}
4011#if 1
4012	atomic_sub(pending_all, &cp->reset_task_pending_all);
4013	atomic_sub(pending_spare, &cp->reset_task_pending_spare);
4014	atomic_sub(pending_mtu, &cp->reset_task_pending_mtu);
4015	atomic_dec(&cp->reset_task_pending);
4016#else
4017	atomic_set(&cp->reset_task_pending, 0);
4018#endif
4019}
4020
4021static void cas_link_timer(struct timer_list *t)
4022{
4023	struct cas *cp = from_timer(cp, t, link_timer);
4024	int mask, pending = 0, reset = 0;
4025	unsigned long flags;
4026
4027	if (link_transition_timeout != 0 &&
4028	    cp->link_transition_jiffies_valid &&
4029	    time_is_before_jiffies(cp->link_transition_jiffies +
4030	      link_transition_timeout)) {
4031		/* One-second counter so link-down workaround doesn't
4032		 * cause resets to occur so fast as to fool the switch
4033		 * into thinking the link is down.
4034		 */
4035		cp->link_transition_jiffies_valid = 0;
4036	}
4037
4038	if (!cp->hw_running)
4039		return;
4040
4041	spin_lock_irqsave(&cp->lock, flags);
4042	cas_lock_tx(cp);
4043	cas_entropy_gather(cp);
4044
4045	/* If the link task is still pending, we just
4046	 * reschedule the link timer
4047	 */
4048#if 1
4049	if (atomic_read(&cp->reset_task_pending_all) ||
4050	    atomic_read(&cp->reset_task_pending_spare) ||
4051	    atomic_read(&cp->reset_task_pending_mtu))
4052		goto done;
4053#else
4054	if (atomic_read(&cp->reset_task_pending))
4055		goto done;
4056#endif
4057
4058	/* check for rx cleaning */
4059	if ((mask = (cp->cas_flags & CAS_FLAG_RXD_POST_MASK))) {
4060		int i, rmask;
4061
4062		for (i = 0; i < MAX_RX_DESC_RINGS; i++) {
4063			rmask = CAS_FLAG_RXD_POST(i);
4064			if ((mask & rmask) == 0)
4065				continue;
4066
4067			/* post_rxds will do a mod_timer */
4068			if (cas_post_rxds_ringN(cp, i, cp->rx_last[i]) < 0) {
4069				pending = 1;
4070				continue;
4071			}
4072			cp->cas_flags &= ~rmask;
4073		}
4074	}
4075
4076	if (CAS_PHY_MII(cp->phy_type)) {
4077		u16 bmsr;
4078		cas_mif_poll(cp, 0);
4079		bmsr = cas_phy_read(cp, MII_BMSR);
4080		/* WTZ: Solaris driver reads this twice, but that
4081		 * may be due to the PCS case and the use of a
4082		 * common implementation. Read it twice here to be
4083		 * safe.
4084		 */
4085		bmsr = cas_phy_read(cp, MII_BMSR);
4086		cas_mif_poll(cp, 1);
4087		readl(cp->regs + REG_MIF_STATUS); /* avoid dups */
4088		reset = cas_mii_link_check(cp, bmsr);
4089	} else {
4090		reset = cas_pcs_link_check(cp);
4091	}
4092
4093	if (reset)
4094		goto done;
4095
4096	/* check for tx state machine confusion */
4097	if ((readl(cp->regs + REG_MAC_TX_STATUS) & MAC_TX_FRAME_XMIT) == 0) {
4098		u32 val = readl(cp->regs + REG_MAC_STATE_MACHINE);
4099		u32 wptr, rptr;
4100		int tlm  = CAS_VAL(MAC_SM_TLM, val);
4101
4102		if (((tlm == 0x5) || (tlm == 0x3)) &&
4103		    (CAS_VAL(MAC_SM_ENCAP_SM, val) == 0)) {
4104			netif_printk(cp, tx_err, KERN_DEBUG, cp->dev,
4105				     "tx err: MAC_STATE[%08x]\n", val);
4106			reset = 1;
4107			goto done;
4108		}
4109
4110		val  = readl(cp->regs + REG_TX_FIFO_PKT_CNT);
4111		wptr = readl(cp->regs + REG_TX_FIFO_WRITE_PTR);
4112		rptr = readl(cp->regs + REG_TX_FIFO_READ_PTR);
4113		if ((val == 0) && (wptr != rptr)) {
4114			netif_printk(cp, tx_err, KERN_DEBUG, cp->dev,
4115				     "tx err: TX_FIFO[%08x:%08x:%08x]\n",
4116				     val, wptr, rptr);
4117			reset = 1;
4118		}
4119
4120		if (reset)
4121			cas_hard_reset(cp);
4122	}
4123
4124done:
4125	if (reset) {
4126#if 1
4127		atomic_inc(&cp->reset_task_pending);
4128		atomic_inc(&cp->reset_task_pending_all);
4129		schedule_work(&cp->reset_task);
4130#else
4131		atomic_set(&cp->reset_task_pending, CAS_RESET_ALL);
4132		pr_err("reset called in cas_link_timer\n");
4133		schedule_work(&cp->reset_task);
4134#endif
4135	}
4136
4137	if (!pending)
4138		mod_timer(&cp->link_timer, jiffies + CAS_LINK_TIMEOUT);
4139	cas_unlock_tx(cp);
4140	spin_unlock_irqrestore(&cp->lock, flags);
4141}
4142
4143/* tiny buffers are used to avoid target abort issues with
4144 * older cassini's
4145 */
4146static void cas_tx_tiny_free(struct cas *cp)
4147{
4148	struct pci_dev *pdev = cp->pdev;
4149	int i;
4150
4151	for (i = 0; i < N_TX_RINGS; i++) {
4152		if (!cp->tx_tiny_bufs[i])
4153			continue;
4154
4155		dma_free_coherent(&pdev->dev, TX_TINY_BUF_BLOCK,
4156				  cp->tx_tiny_bufs[i], cp->tx_tiny_dvma[i]);
 
4157		cp->tx_tiny_bufs[i] = NULL;
4158	}
4159}
4160
4161static int cas_tx_tiny_alloc(struct cas *cp)
4162{
4163	struct pci_dev *pdev = cp->pdev;
4164	int i;
4165
4166	for (i = 0; i < N_TX_RINGS; i++) {
4167		cp->tx_tiny_bufs[i] =
4168			dma_alloc_coherent(&pdev->dev, TX_TINY_BUF_BLOCK,
4169					   &cp->tx_tiny_dvma[i], GFP_KERNEL);
4170		if (!cp->tx_tiny_bufs[i]) {
4171			cas_tx_tiny_free(cp);
4172			return -1;
4173		}
4174	}
4175	return 0;
4176}
4177
4178
4179static int cas_open(struct net_device *dev)
4180{
4181	struct cas *cp = netdev_priv(dev);
4182	int hw_was_up, err;
4183	unsigned long flags;
4184
4185	mutex_lock(&cp->pm_mutex);
4186
4187	hw_was_up = cp->hw_running;
4188
4189	/* The power-management mutex protects the hw_running
4190	 * etc. state so it is safe to do this bit without cp->lock
4191	 */
4192	if (!cp->hw_running) {
4193		/* Reset the chip */
4194		cas_lock_all_save(cp, flags);
4195		/* We set the second arg to cas_reset to zero
4196		 * because cas_init_hw below will have its second
4197		 * argument set to non-zero, which will force
4198		 * autonegotiation to start.
4199		 */
4200		cas_reset(cp, 0);
4201		cp->hw_running = 1;
4202		cas_unlock_all_restore(cp, flags);
4203	}
4204
4205	err = -ENOMEM;
4206	if (cas_tx_tiny_alloc(cp) < 0)
4207		goto err_unlock;
4208
4209	/* alloc rx descriptors */
4210	if (cas_alloc_rxds(cp) < 0)
4211		goto err_tx_tiny;
4212
4213	/* allocate spares */
4214	cas_spare_init(cp);
4215	cas_spare_recover(cp, GFP_KERNEL);
4216
4217	/* We can now request the interrupt as we know it's masked
4218	 * on the controller. cassini+ has up to 4 interrupts
4219	 * that can be used, but you need to do explicit pci interrupt
4220	 * mapping to expose them
4221	 */
4222	if (request_irq(cp->pdev->irq, cas_interrupt,
4223			IRQF_SHARED, dev->name, (void *) dev)) {
4224		netdev_err(cp->dev, "failed to request irq !\n");
4225		err = -EAGAIN;
4226		goto err_spare;
4227	}
4228
4229#ifdef USE_NAPI
4230	napi_enable(&cp->napi);
4231#endif
4232	/* init hw */
4233	cas_lock_all_save(cp, flags);
4234	cas_clean_rings(cp);
4235	cas_init_hw(cp, !hw_was_up);
4236	cp->opened = 1;
4237	cas_unlock_all_restore(cp, flags);
4238
4239	netif_start_queue(dev);
4240	mutex_unlock(&cp->pm_mutex);
4241	return 0;
4242
4243err_spare:
4244	cas_spare_free(cp);
4245	cas_free_rxds(cp);
4246err_tx_tiny:
4247	cas_tx_tiny_free(cp);
4248err_unlock:
4249	mutex_unlock(&cp->pm_mutex);
4250	return err;
4251}
4252
4253static int cas_close(struct net_device *dev)
4254{
4255	unsigned long flags;
4256	struct cas *cp = netdev_priv(dev);
4257
4258#ifdef USE_NAPI
4259	napi_disable(&cp->napi);
4260#endif
4261	/* Make sure we don't get distracted by suspend/resume */
4262	mutex_lock(&cp->pm_mutex);
4263
4264	netif_stop_queue(dev);
4265
4266	/* Stop traffic, mark us closed */
4267	cas_lock_all_save(cp, flags);
4268	cp->opened = 0;
4269	cas_reset(cp, 0);
4270	cas_phy_init(cp);
4271	cas_begin_auto_negotiation(cp, NULL);
4272	cas_clean_rings(cp);
4273	cas_unlock_all_restore(cp, flags);
4274
4275	free_irq(cp->pdev->irq, (void *) dev);
4276	cas_spare_free(cp);
4277	cas_free_rxds(cp);
4278	cas_tx_tiny_free(cp);
4279	mutex_unlock(&cp->pm_mutex);
4280	return 0;
4281}
4282
4283static struct {
4284	const char name[ETH_GSTRING_LEN];
4285} ethtool_cassini_statnames[] = {
4286	{"collisions"},
4287	{"rx_bytes"},
4288	{"rx_crc_errors"},
4289	{"rx_dropped"},
4290	{"rx_errors"},
4291	{"rx_fifo_errors"},
4292	{"rx_frame_errors"},
4293	{"rx_length_errors"},
4294	{"rx_over_errors"},
4295	{"rx_packets"},
4296	{"tx_aborted_errors"},
4297	{"tx_bytes"},
4298	{"tx_dropped"},
4299	{"tx_errors"},
4300	{"tx_fifo_errors"},
4301	{"tx_packets"}
4302};
4303#define CAS_NUM_STAT_KEYS ARRAY_SIZE(ethtool_cassini_statnames)
4304
4305static struct {
4306	const int offsets;	/* neg. values for 2nd arg to cas_read_phy */
4307} ethtool_register_table[] = {
4308	{-MII_BMSR},
4309	{-MII_BMCR},
4310	{REG_CAWR},
4311	{REG_INF_BURST},
4312	{REG_BIM_CFG},
4313	{REG_RX_CFG},
4314	{REG_HP_CFG},
4315	{REG_MAC_TX_CFG},
4316	{REG_MAC_RX_CFG},
4317	{REG_MAC_CTRL_CFG},
4318	{REG_MAC_XIF_CFG},
4319	{REG_MIF_CFG},
4320	{REG_PCS_CFG},
4321	{REG_SATURN_PCFG},
4322	{REG_PCS_MII_STATUS},
4323	{REG_PCS_STATE_MACHINE},
4324	{REG_MAC_COLL_EXCESS},
4325	{REG_MAC_COLL_LATE}
4326};
4327#define CAS_REG_LEN 	ARRAY_SIZE(ethtool_register_table)
4328#define CAS_MAX_REGS 	(sizeof (u32)*CAS_REG_LEN)
4329
4330static void cas_read_regs(struct cas *cp, u8 *ptr, int len)
4331{
4332	u8 *p;
4333	int i;
4334	unsigned long flags;
4335
4336	spin_lock_irqsave(&cp->lock, flags);
4337	for (i = 0, p = ptr; i < len ; i ++, p += sizeof(u32)) {
4338		u16 hval;
4339		u32 val;
4340		if (ethtool_register_table[i].offsets < 0) {
4341			hval = cas_phy_read(cp,
4342				    -ethtool_register_table[i].offsets);
4343			val = hval;
4344		} else {
4345			val= readl(cp->regs+ethtool_register_table[i].offsets);
4346		}
4347		memcpy(p, (u8 *)&val, sizeof(u32));
4348	}
4349	spin_unlock_irqrestore(&cp->lock, flags);
4350}
4351
4352static struct net_device_stats *cas_get_stats(struct net_device *dev)
4353{
4354	struct cas *cp = netdev_priv(dev);
4355	struct net_device_stats *stats = cp->net_stats;
4356	unsigned long flags;
4357	int i;
4358	unsigned long tmp;
4359
4360	/* we collate all of the stats into net_stats[N_TX_RING] */
4361	if (!cp->hw_running)
4362		return stats + N_TX_RINGS;
4363
4364	/* collect outstanding stats */
4365	/* WTZ: the Cassini spec gives these as 16 bit counters but
4366	 * stored in 32-bit words.  Added a mask of 0xffff to be safe,
4367	 * in case the chip somehow puts any garbage in the other bits.
4368	 * Also, counter usage didn't seem to mach what Adrian did
4369	 * in the parts of the code that set these quantities. Made
4370	 * that consistent.
4371	 */
4372	spin_lock_irqsave(&cp->stat_lock[N_TX_RINGS], flags);
4373	stats[N_TX_RINGS].rx_crc_errors +=
4374	  readl(cp->regs + REG_MAC_FCS_ERR) & 0xffff;
4375	stats[N_TX_RINGS].rx_frame_errors +=
4376		readl(cp->regs + REG_MAC_ALIGN_ERR) &0xffff;
4377	stats[N_TX_RINGS].rx_length_errors +=
4378		readl(cp->regs + REG_MAC_LEN_ERR) & 0xffff;
4379#if 1
4380	tmp = (readl(cp->regs + REG_MAC_COLL_EXCESS) & 0xffff) +
4381		(readl(cp->regs + REG_MAC_COLL_LATE) & 0xffff);
4382	stats[N_TX_RINGS].tx_aborted_errors += tmp;
4383	stats[N_TX_RINGS].collisions +=
4384	  tmp + (readl(cp->regs + REG_MAC_COLL_NORMAL) & 0xffff);
4385#else
4386	stats[N_TX_RINGS].tx_aborted_errors +=
4387		readl(cp->regs + REG_MAC_COLL_EXCESS);
4388	stats[N_TX_RINGS].collisions += readl(cp->regs + REG_MAC_COLL_EXCESS) +
4389		readl(cp->regs + REG_MAC_COLL_LATE);
4390#endif
4391	cas_clear_mac_err(cp);
4392
4393	/* saved bits that are unique to ring 0 */
4394	spin_lock(&cp->stat_lock[0]);
4395	stats[N_TX_RINGS].collisions        += stats[0].collisions;
4396	stats[N_TX_RINGS].rx_over_errors    += stats[0].rx_over_errors;
4397	stats[N_TX_RINGS].rx_frame_errors   += stats[0].rx_frame_errors;
4398	stats[N_TX_RINGS].rx_fifo_errors    += stats[0].rx_fifo_errors;
4399	stats[N_TX_RINGS].tx_aborted_errors += stats[0].tx_aborted_errors;
4400	stats[N_TX_RINGS].tx_fifo_errors    += stats[0].tx_fifo_errors;
4401	spin_unlock(&cp->stat_lock[0]);
4402
4403	for (i = 0; i < N_TX_RINGS; i++) {
4404		spin_lock(&cp->stat_lock[i]);
4405		stats[N_TX_RINGS].rx_length_errors +=
4406			stats[i].rx_length_errors;
4407		stats[N_TX_RINGS].rx_crc_errors += stats[i].rx_crc_errors;
4408		stats[N_TX_RINGS].rx_packets    += stats[i].rx_packets;
4409		stats[N_TX_RINGS].tx_packets    += stats[i].tx_packets;
4410		stats[N_TX_RINGS].rx_bytes      += stats[i].rx_bytes;
4411		stats[N_TX_RINGS].tx_bytes      += stats[i].tx_bytes;
4412		stats[N_TX_RINGS].rx_errors     += stats[i].rx_errors;
4413		stats[N_TX_RINGS].tx_errors     += stats[i].tx_errors;
4414		stats[N_TX_RINGS].rx_dropped    += stats[i].rx_dropped;
4415		stats[N_TX_RINGS].tx_dropped    += stats[i].tx_dropped;
4416		memset(stats + i, 0, sizeof(struct net_device_stats));
4417		spin_unlock(&cp->stat_lock[i]);
4418	}
4419	spin_unlock_irqrestore(&cp->stat_lock[N_TX_RINGS], flags);
4420	return stats + N_TX_RINGS;
4421}
4422
4423
4424static void cas_set_multicast(struct net_device *dev)
4425{
4426	struct cas *cp = netdev_priv(dev);
4427	u32 rxcfg, rxcfg_new;
4428	unsigned long flags;
4429	int limit = STOP_TRIES;
4430
4431	if (!cp->hw_running)
4432		return;
4433
4434	spin_lock_irqsave(&cp->lock, flags);
4435	rxcfg = readl(cp->regs + REG_MAC_RX_CFG);
4436
4437	/* disable RX MAC and wait for completion */
4438	writel(rxcfg & ~MAC_RX_CFG_EN, cp->regs + REG_MAC_RX_CFG);
4439	while (readl(cp->regs + REG_MAC_RX_CFG) & MAC_RX_CFG_EN) {
4440		if (!limit--)
4441			break;
4442		udelay(10);
4443	}
4444
4445	/* disable hash filter and wait for completion */
4446	limit = STOP_TRIES;
4447	rxcfg &= ~(MAC_RX_CFG_PROMISC_EN | MAC_RX_CFG_HASH_FILTER_EN);
4448	writel(rxcfg & ~MAC_RX_CFG_EN, cp->regs + REG_MAC_RX_CFG);
4449	while (readl(cp->regs + REG_MAC_RX_CFG) & MAC_RX_CFG_HASH_FILTER_EN) {
4450		if (!limit--)
4451			break;
4452		udelay(10);
4453	}
4454
4455	/* program hash filters */
4456	cp->mac_rx_cfg = rxcfg_new = cas_setup_multicast(cp);
4457	rxcfg |= rxcfg_new;
4458	writel(rxcfg, cp->regs + REG_MAC_RX_CFG);
4459	spin_unlock_irqrestore(&cp->lock, flags);
4460}
4461
4462static void cas_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
4463{
4464	struct cas *cp = netdev_priv(dev);
4465	strscpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
4466	strscpy(info->version, DRV_MODULE_VERSION, sizeof(info->version));
4467	strscpy(info->bus_info, pci_name(cp->pdev), sizeof(info->bus_info));
4468}
4469
4470static int cas_get_link_ksettings(struct net_device *dev,
4471				  struct ethtool_link_ksettings *cmd)
4472{
4473	struct cas *cp = netdev_priv(dev);
4474	u16 bmcr;
4475	int full_duplex, speed, pause;
4476	unsigned long flags;
4477	enum link_state linkstate = link_up;
4478	u32 supported, advertising;
4479
4480	advertising = 0;
4481	supported = SUPPORTED_Autoneg;
4482	if (cp->cas_flags & CAS_FLAG_1000MB_CAP) {
4483		supported |= SUPPORTED_1000baseT_Full;
4484		advertising |= ADVERTISED_1000baseT_Full;
4485	}
4486
4487	/* Record PHY settings if HW is on. */
4488	spin_lock_irqsave(&cp->lock, flags);
4489	bmcr = 0;
4490	linkstate = cp->lstate;
4491	if (CAS_PHY_MII(cp->phy_type)) {
4492		cmd->base.port = PORT_MII;
4493		cmd->base.phy_address = cp->phy_addr;
4494		advertising |= ADVERTISED_TP | ADVERTISED_MII |
4495			ADVERTISED_10baseT_Half |
4496			ADVERTISED_10baseT_Full |
4497			ADVERTISED_100baseT_Half |
4498			ADVERTISED_100baseT_Full;
4499
4500		supported |=
4501			(SUPPORTED_10baseT_Half |
4502			 SUPPORTED_10baseT_Full |
4503			 SUPPORTED_100baseT_Half |
4504			 SUPPORTED_100baseT_Full |
4505			 SUPPORTED_TP | SUPPORTED_MII);
4506
4507		if (cp->hw_running) {
4508			cas_mif_poll(cp, 0);
4509			bmcr = cas_phy_read(cp, MII_BMCR);
4510			cas_read_mii_link_mode(cp, &full_duplex,
4511					       &speed, &pause);
4512			cas_mif_poll(cp, 1);
4513		}
4514
4515	} else {
4516		cmd->base.port = PORT_FIBRE;
4517		cmd->base.phy_address = 0;
4518		supported   |= SUPPORTED_FIBRE;
4519		advertising |= ADVERTISED_FIBRE;
4520
4521		if (cp->hw_running) {
4522			/* pcs uses the same bits as mii */
4523			bmcr = readl(cp->regs + REG_PCS_MII_CTRL);
4524			cas_read_pcs_link_mode(cp, &full_duplex,
4525					       &speed, &pause);
4526		}
4527	}
4528	spin_unlock_irqrestore(&cp->lock, flags);
4529
4530	if (bmcr & BMCR_ANENABLE) {
4531		advertising |= ADVERTISED_Autoneg;
4532		cmd->base.autoneg = AUTONEG_ENABLE;
4533		cmd->base.speed =  ((speed == 10) ?
4534					    SPEED_10 :
4535					    ((speed == 1000) ?
4536					     SPEED_1000 : SPEED_100));
4537		cmd->base.duplex = full_duplex ? DUPLEX_FULL : DUPLEX_HALF;
4538	} else {
4539		cmd->base.autoneg = AUTONEG_DISABLE;
4540		cmd->base.speed = ((bmcr & CAS_BMCR_SPEED1000) ?
4541					    SPEED_1000 :
4542					    ((bmcr & BMCR_SPEED100) ?
4543					     SPEED_100 : SPEED_10));
4544		cmd->base.duplex = (bmcr & BMCR_FULLDPLX) ?
4545			DUPLEX_FULL : DUPLEX_HALF;
4546	}
4547	if (linkstate != link_up) {
4548		/* Force these to "unknown" if the link is not up and
4549		 * autonogotiation in enabled. We can set the link
4550		 * speed to 0, but not cmd->duplex,
4551		 * because its legal values are 0 and 1.  Ethtool will
4552		 * print the value reported in parentheses after the
4553		 * word "Unknown" for unrecognized values.
4554		 *
4555		 * If in forced mode, we report the speed and duplex
4556		 * settings that we configured.
4557		 */
4558		if (cp->link_cntl & BMCR_ANENABLE) {
4559			cmd->base.speed = 0;
4560			cmd->base.duplex = 0xff;
4561		} else {
4562			cmd->base.speed = SPEED_10;
4563			if (cp->link_cntl & BMCR_SPEED100) {
4564				cmd->base.speed = SPEED_100;
4565			} else if (cp->link_cntl & CAS_BMCR_SPEED1000) {
4566				cmd->base.speed = SPEED_1000;
4567			}
4568			cmd->base.duplex = (cp->link_cntl & BMCR_FULLDPLX) ?
4569				DUPLEX_FULL : DUPLEX_HALF;
4570		}
4571	}
4572
4573	ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
4574						supported);
4575	ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
4576						advertising);
4577
4578	return 0;
4579}
4580
4581static int cas_set_link_ksettings(struct net_device *dev,
4582				  const struct ethtool_link_ksettings *cmd)
4583{
4584	struct cas *cp = netdev_priv(dev);
4585	unsigned long flags;
4586	u32 speed = cmd->base.speed;
4587
4588	/* Verify the settings we care about. */
4589	if (cmd->base.autoneg != AUTONEG_ENABLE &&
4590	    cmd->base.autoneg != AUTONEG_DISABLE)
4591		return -EINVAL;
4592
4593	if (cmd->base.autoneg == AUTONEG_DISABLE &&
4594	    ((speed != SPEED_1000 &&
4595	      speed != SPEED_100 &&
4596	      speed != SPEED_10) ||
4597	     (cmd->base.duplex != DUPLEX_HALF &&
4598	      cmd->base.duplex != DUPLEX_FULL)))
4599		return -EINVAL;
4600
4601	/* Apply settings and restart link process. */
4602	spin_lock_irqsave(&cp->lock, flags);
4603	cas_begin_auto_negotiation(cp, cmd);
4604	spin_unlock_irqrestore(&cp->lock, flags);
4605	return 0;
4606}
4607
4608static int cas_nway_reset(struct net_device *dev)
4609{
4610	struct cas *cp = netdev_priv(dev);
4611	unsigned long flags;
4612
4613	if ((cp->link_cntl & BMCR_ANENABLE) == 0)
4614		return -EINVAL;
4615
4616	/* Restart link process. */
4617	spin_lock_irqsave(&cp->lock, flags);
4618	cas_begin_auto_negotiation(cp, NULL);
4619	spin_unlock_irqrestore(&cp->lock, flags);
4620
4621	return 0;
4622}
4623
4624static u32 cas_get_link(struct net_device *dev)
4625{
4626	struct cas *cp = netdev_priv(dev);
4627	return cp->lstate == link_up;
4628}
4629
4630static u32 cas_get_msglevel(struct net_device *dev)
4631{
4632	struct cas *cp = netdev_priv(dev);
4633	return cp->msg_enable;
4634}
4635
4636static void cas_set_msglevel(struct net_device *dev, u32 value)
4637{
4638	struct cas *cp = netdev_priv(dev);
4639	cp->msg_enable = value;
4640}
4641
4642static int cas_get_regs_len(struct net_device *dev)
4643{
4644	struct cas *cp = netdev_priv(dev);
4645	return min_t(int, cp->casreg_len, CAS_MAX_REGS);
4646}
4647
4648static void cas_get_regs(struct net_device *dev, struct ethtool_regs *regs,
4649			     void *p)
4650{
4651	struct cas *cp = netdev_priv(dev);
4652	regs->version = 0;
4653	/* cas_read_regs handles locks (cp->lock).  */
4654	cas_read_regs(cp, p, regs->len / sizeof(u32));
4655}
4656
4657static int cas_get_sset_count(struct net_device *dev, int sset)
4658{
4659	switch (sset) {
4660	case ETH_SS_STATS:
4661		return CAS_NUM_STAT_KEYS;
4662	default:
4663		return -EOPNOTSUPP;
4664	}
4665}
4666
4667static void cas_get_strings(struct net_device *dev, u32 stringset, u8 *data)
4668{
4669	 memcpy(data, &ethtool_cassini_statnames,
4670					 CAS_NUM_STAT_KEYS * ETH_GSTRING_LEN);
4671}
4672
4673static void cas_get_ethtool_stats(struct net_device *dev,
4674				      struct ethtool_stats *estats, u64 *data)
4675{
4676	struct cas *cp = netdev_priv(dev);
4677	struct net_device_stats *stats = cas_get_stats(cp->dev);
4678	int i = 0;
4679	data[i++] = stats->collisions;
4680	data[i++] = stats->rx_bytes;
4681	data[i++] = stats->rx_crc_errors;
4682	data[i++] = stats->rx_dropped;
4683	data[i++] = stats->rx_errors;
4684	data[i++] = stats->rx_fifo_errors;
4685	data[i++] = stats->rx_frame_errors;
4686	data[i++] = stats->rx_length_errors;
4687	data[i++] = stats->rx_over_errors;
4688	data[i++] = stats->rx_packets;
4689	data[i++] = stats->tx_aborted_errors;
4690	data[i++] = stats->tx_bytes;
4691	data[i++] = stats->tx_dropped;
4692	data[i++] = stats->tx_errors;
4693	data[i++] = stats->tx_fifo_errors;
4694	data[i++] = stats->tx_packets;
4695	BUG_ON(i != CAS_NUM_STAT_KEYS);
4696}
4697
4698static const struct ethtool_ops cas_ethtool_ops = {
4699	.get_drvinfo		= cas_get_drvinfo,
4700	.nway_reset		= cas_nway_reset,
4701	.get_link		= cas_get_link,
4702	.get_msglevel		= cas_get_msglevel,
4703	.set_msglevel		= cas_set_msglevel,
4704	.get_regs_len		= cas_get_regs_len,
4705	.get_regs		= cas_get_regs,
4706	.get_sset_count		= cas_get_sset_count,
4707	.get_strings		= cas_get_strings,
4708	.get_ethtool_stats	= cas_get_ethtool_stats,
4709	.get_link_ksettings	= cas_get_link_ksettings,
4710	.set_link_ksettings	= cas_set_link_ksettings,
4711};
4712
4713static int cas_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
4714{
4715	struct cas *cp = netdev_priv(dev);
4716	struct mii_ioctl_data *data = if_mii(ifr);
4717	unsigned long flags;
4718	int rc = -EOPNOTSUPP;
4719
4720	/* Hold the PM mutex while doing ioctl's or we may collide
4721	 * with open/close and power management and oops.
4722	 */
4723	mutex_lock(&cp->pm_mutex);
4724	switch (cmd) {
4725	case SIOCGMIIPHY:		/* Get address of MII PHY in use. */
4726		data->phy_id = cp->phy_addr;
4727		fallthrough;
4728
4729	case SIOCGMIIREG:		/* Read MII PHY register. */
4730		spin_lock_irqsave(&cp->lock, flags);
4731		cas_mif_poll(cp, 0);
4732		data->val_out = cas_phy_read(cp, data->reg_num & 0x1f);
4733		cas_mif_poll(cp, 1);
4734		spin_unlock_irqrestore(&cp->lock, flags);
4735		rc = 0;
4736		break;
4737
4738	case SIOCSMIIREG:		/* Write MII PHY register. */
4739		spin_lock_irqsave(&cp->lock, flags);
4740		cas_mif_poll(cp, 0);
4741		rc = cas_phy_write(cp, data->reg_num & 0x1f, data->val_in);
4742		cas_mif_poll(cp, 1);
4743		spin_unlock_irqrestore(&cp->lock, flags);
4744		break;
4745	default:
4746		break;
4747	}
4748
4749	mutex_unlock(&cp->pm_mutex);
4750	return rc;
4751}
4752
4753/* When this chip sits underneath an Intel 31154 bridge, it is the
4754 * only subordinate device and we can tweak the bridge settings to
4755 * reflect that fact.
4756 */
4757static void cas_program_bridge(struct pci_dev *cas_pdev)
4758{
4759	struct pci_dev *pdev = cas_pdev->bus->self;
4760	u32 val;
4761
4762	if (!pdev)
4763		return;
4764
4765	if (pdev->vendor != 0x8086 || pdev->device != 0x537c)
4766		return;
4767
4768	/* Clear bit 10 (Bus Parking Control) in the Secondary
4769	 * Arbiter Control/Status Register which lives at offset
4770	 * 0x41.  Using a 32-bit word read/modify/write at 0x40
4771	 * is much simpler so that's how we do this.
4772	 */
4773	pci_read_config_dword(pdev, 0x40, &val);
4774	val &= ~0x00040000;
4775	pci_write_config_dword(pdev, 0x40, val);
4776
4777	/* Max out the Multi-Transaction Timer settings since
4778	 * Cassini is the only device present.
4779	 *
4780	 * The register is 16-bit and lives at 0x50.  When the
4781	 * settings are enabled, it extends the GRANT# signal
4782	 * for a requestor after a transaction is complete.  This
4783	 * allows the next request to run without first needing
4784	 * to negotiate the GRANT# signal back.
4785	 *
4786	 * Bits 12:10 define the grant duration:
4787	 *
4788	 *	1	--	16 clocks
4789	 *	2	--	32 clocks
4790	 *	3	--	64 clocks
4791	 *	4	--	128 clocks
4792	 *	5	--	256 clocks
4793	 *
4794	 * All other values are illegal.
4795	 *
4796	 * Bits 09:00 define which REQ/GNT signal pairs get the
4797	 * GRANT# signal treatment.  We set them all.
4798	 */
4799	pci_write_config_word(pdev, 0x50, (5 << 10) | 0x3ff);
4800
4801	/* The Read Prefecth Policy register is 16-bit and sits at
4802	 * offset 0x52.  It enables a "smart" pre-fetch policy.  We
4803	 * enable it and max out all of the settings since only one
4804	 * device is sitting underneath and thus bandwidth sharing is
4805	 * not an issue.
4806	 *
4807	 * The register has several 3 bit fields, which indicates a
4808	 * multiplier applied to the base amount of prefetching the
4809	 * chip would do.  These fields are at:
4810	 *
4811	 *	15:13	---	ReRead Primary Bus
4812	 *	12:10	---	FirstRead Primary Bus
4813	 *	09:07	---	ReRead Secondary Bus
4814	 *	06:04	---	FirstRead Secondary Bus
4815	 *
4816	 * Bits 03:00 control which REQ/GNT pairs the prefetch settings
4817	 * get enabled on.  Bit 3 is a grouped enabler which controls
4818	 * all of the REQ/GNT pairs from [8:3].  Bits 2 to 0 control
4819	 * the individual REQ/GNT pairs [2:0].
4820	 */
4821	pci_write_config_word(pdev, 0x52,
4822			      (0x7 << 13) |
4823			      (0x7 << 10) |
4824			      (0x7 <<  7) |
4825			      (0x7 <<  4) |
4826			      (0xf <<  0));
4827
4828	/* Force cacheline size to 0x8 */
4829	pci_write_config_byte(pdev, PCI_CACHE_LINE_SIZE, 0x08);
4830
4831	/* Force latency timer to maximum setting so Cassini can
4832	 * sit on the bus as long as it likes.
4833	 */
4834	pci_write_config_byte(pdev, PCI_LATENCY_TIMER, 0xff);
4835}
4836
4837static const struct net_device_ops cas_netdev_ops = {
4838	.ndo_open		= cas_open,
4839	.ndo_stop		= cas_close,
4840	.ndo_start_xmit		= cas_start_xmit,
4841	.ndo_get_stats 		= cas_get_stats,
4842	.ndo_set_rx_mode	= cas_set_multicast,
4843	.ndo_eth_ioctl		= cas_ioctl,
4844	.ndo_tx_timeout		= cas_tx_timeout,
4845	.ndo_change_mtu		= cas_change_mtu,
4846	.ndo_set_mac_address	= eth_mac_addr,
4847	.ndo_validate_addr	= eth_validate_addr,
4848#ifdef CONFIG_NET_POLL_CONTROLLER
4849	.ndo_poll_controller	= cas_netpoll,
4850#endif
4851};
4852
4853static int cas_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
4854{
4855	static int cas_version_printed = 0;
4856	unsigned long casreg_len;
4857	struct net_device *dev;
4858	struct cas *cp;
 
4859	u16 pci_cmd;
4860	int i, err;
4861	u8 orig_cacheline_size = 0, cas_cacheline_size = 0;
4862
4863	if (cas_version_printed++ == 0)
4864		pr_info("%s", version);
4865
4866	err = pci_enable_device(pdev);
4867	if (err) {
4868		dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
4869		return err;
4870	}
4871
4872	if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
4873		dev_err(&pdev->dev, "Cannot find proper PCI device "
4874		       "base address, aborting\n");
4875		err = -ENODEV;
4876		goto err_out_disable_pdev;
4877	}
4878
4879	dev = alloc_etherdev(sizeof(*cp));
4880	if (!dev) {
4881		err = -ENOMEM;
4882		goto err_out_disable_pdev;
4883	}
4884	SET_NETDEV_DEV(dev, &pdev->dev);
4885
4886	err = pci_request_regions(pdev, dev->name);
4887	if (err) {
4888		dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
4889		goto err_out_free_netdev;
4890	}
4891	pci_set_master(pdev);
4892
4893	/* we must always turn on parity response or else parity
4894	 * doesn't get generated properly. disable SERR/PERR as well.
4895	 * in addition, we want to turn MWI on.
4896	 */
4897	pci_read_config_word(pdev, PCI_COMMAND, &pci_cmd);
4898	pci_cmd &= ~PCI_COMMAND_SERR;
4899	pci_cmd |= PCI_COMMAND_PARITY;
4900	pci_write_config_word(pdev, PCI_COMMAND, pci_cmd);
4901	if (pci_try_set_mwi(pdev))
4902		pr_warn("Could not enable MWI for %s\n", pci_name(pdev));
4903
4904	cas_program_bridge(pdev);
4905
4906	/*
4907	 * On some architectures, the default cache line size set
4908	 * by pci_try_set_mwi reduces perforamnce.  We have to increase
4909	 * it for this case.  To start, we'll print some configuration
4910	 * data.
4911	 */
4912#if 1
4913	pci_read_config_byte(pdev, PCI_CACHE_LINE_SIZE,
4914			     &orig_cacheline_size);
4915	if (orig_cacheline_size < CAS_PREF_CACHELINE_SIZE) {
4916		cas_cacheline_size =
4917			(CAS_PREF_CACHELINE_SIZE < SMP_CACHE_BYTES) ?
4918			CAS_PREF_CACHELINE_SIZE : SMP_CACHE_BYTES;
4919		if (pci_write_config_byte(pdev,
4920					  PCI_CACHE_LINE_SIZE,
4921					  cas_cacheline_size)) {
4922			dev_err(&pdev->dev, "Could not set PCI cache "
4923			       "line size\n");
4924			goto err_out_free_res;
4925		}
4926	}
4927#endif
4928
4929
4930	/* Configure DMA attributes. */
4931	err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
4932	if (err) {
4933		dev_err(&pdev->dev, "No usable DMA configuration, aborting\n");
4934		goto err_out_free_res;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4935	}
4936
4937	casreg_len = pci_resource_len(pdev, 0);
4938
4939	cp = netdev_priv(dev);
4940	cp->pdev = pdev;
4941#if 1
4942	/* A value of 0 indicates we never explicitly set it */
4943	cp->orig_cacheline_size = cas_cacheline_size ? orig_cacheline_size: 0;
4944#endif
4945	cp->dev = dev;
4946	cp->msg_enable = (cassini_debug < 0) ? CAS_DEF_MSG_ENABLE :
4947	  cassini_debug;
4948
4949#if defined(CONFIG_SPARC)
4950	cp->of_node = pci_device_to_OF_node(pdev);
4951#endif
4952
4953	cp->link_transition = LINK_TRANSITION_UNKNOWN;
4954	cp->link_transition_jiffies_valid = 0;
4955
4956	spin_lock_init(&cp->lock);
4957	spin_lock_init(&cp->rx_inuse_lock);
4958	spin_lock_init(&cp->rx_spare_lock);
4959	for (i = 0; i < N_TX_RINGS; i++) {
4960		spin_lock_init(&cp->stat_lock[i]);
4961		spin_lock_init(&cp->tx_lock[i]);
4962	}
4963	spin_lock_init(&cp->stat_lock[N_TX_RINGS]);
4964	mutex_init(&cp->pm_mutex);
4965
4966	timer_setup(&cp->link_timer, cas_link_timer, 0);
4967
4968#if 1
4969	/* Just in case the implementation of atomic operations
4970	 * change so that an explicit initialization is necessary.
4971	 */
4972	atomic_set(&cp->reset_task_pending, 0);
4973	atomic_set(&cp->reset_task_pending_all, 0);
4974	atomic_set(&cp->reset_task_pending_spare, 0);
4975	atomic_set(&cp->reset_task_pending_mtu, 0);
4976#endif
4977	INIT_WORK(&cp->reset_task, cas_reset_task);
4978
4979	/* Default link parameters */
4980	if (link_mode >= 0 && link_mode < 6)
4981		cp->link_cntl = link_modes[link_mode];
4982	else
4983		cp->link_cntl = BMCR_ANENABLE;
4984	cp->lstate = link_down;
4985	cp->link_transition = LINK_TRANSITION_LINK_DOWN;
4986	netif_carrier_off(cp->dev);
4987	cp->timer_ticks = 0;
4988
4989	/* give us access to cassini registers */
4990	cp->regs = pci_iomap(pdev, 0, casreg_len);
4991	if (!cp->regs) {
4992		dev_err(&pdev->dev, "Cannot map device registers, aborting\n");
4993		goto err_out_free_res;
4994	}
4995	cp->casreg_len = casreg_len;
4996
4997	pci_save_state(pdev);
4998	cas_check_pci_invariants(cp);
4999	cas_hard_reset(cp);
5000	cas_reset(cp, 0);
5001	if (cas_check_invariants(cp))
5002		goto err_out_iounmap;
5003	if (cp->cas_flags & CAS_FLAG_SATURN)
5004		cas_saturn_firmware_init(cp);
5005
5006	cp->init_block =
5007		dma_alloc_coherent(&pdev->dev, sizeof(struct cas_init_block),
5008				   &cp->block_dvma, GFP_KERNEL);
5009	if (!cp->init_block) {
5010		dev_err(&pdev->dev, "Cannot allocate init block, aborting\n");
5011		goto err_out_iounmap;
5012	}
5013
5014	for (i = 0; i < N_TX_RINGS; i++)
5015		cp->init_txds[i] = cp->init_block->txds[i];
5016
5017	for (i = 0; i < N_RX_DESC_RINGS; i++)
5018		cp->init_rxds[i] = cp->init_block->rxds[i];
5019
5020	for (i = 0; i < N_RX_COMP_RINGS; i++)
5021		cp->init_rxcs[i] = cp->init_block->rxcs[i];
5022
5023	for (i = 0; i < N_RX_FLOWS; i++)
5024		skb_queue_head_init(&cp->rx_flows[i]);
5025
5026	dev->netdev_ops = &cas_netdev_ops;
5027	dev->ethtool_ops = &cas_ethtool_ops;
5028	dev->watchdog_timeo = CAS_TX_TIMEOUT;
5029
5030#ifdef USE_NAPI
5031	netif_napi_add(dev, &cp->napi, cas_poll);
5032#endif
5033	dev->irq = pdev->irq;
5034	dev->dma = 0;
5035
5036	/* Cassini features. */
5037	if ((cp->cas_flags & CAS_FLAG_NO_HW_CSUM) == 0)
5038		dev->features |= NETIF_F_HW_CSUM | NETIF_F_SG;
5039
5040	dev->features |= NETIF_F_HIGHDMA;
 
5041
5042	/* MTU range: 60 - varies or 9000 */
5043	dev->min_mtu = CAS_MIN_MTU;
5044	dev->max_mtu = CAS_MAX_MTU;
5045
5046	if (register_netdev(dev)) {
5047		dev_err(&pdev->dev, "Cannot register net device, aborting\n");
5048		goto err_out_free_consistent;
5049	}
5050
5051	i = readl(cp->regs + REG_BIM_CFG);
5052	netdev_info(dev, "Sun Cassini%s (%sbit/%sMHz PCI/%s) Ethernet[%d] %pM\n",
5053		    (cp->cas_flags & CAS_FLAG_REG_PLUS) ? "+" : "",
5054		    (i & BIM_CFG_32BIT) ? "32" : "64",
5055		    (i & BIM_CFG_66MHZ) ? "66" : "33",
5056		    (cp->phy_type == CAS_PHY_SERDES) ? "Fi" : "Cu", pdev->irq,
5057		    dev->dev_addr);
5058
5059	pci_set_drvdata(pdev, dev);
5060	cp->hw_running = 1;
5061	cas_entropy_reset(cp);
5062	cas_phy_init(cp);
5063	cas_begin_auto_negotiation(cp, NULL);
5064	return 0;
5065
5066err_out_free_consistent:
5067	dma_free_coherent(&pdev->dev, sizeof(struct cas_init_block),
5068			  cp->init_block, cp->block_dvma);
5069
5070err_out_iounmap:
5071	mutex_lock(&cp->pm_mutex);
5072	if (cp->hw_running)
5073		cas_shutdown(cp);
5074	mutex_unlock(&cp->pm_mutex);
5075
5076	vfree(cp->fw_data);
5077
5078	pci_iounmap(pdev, cp->regs);
5079
5080
5081err_out_free_res:
5082	pci_release_regions(pdev);
5083
 
5084	/* Try to restore it in case the error occurred after we
5085	 * set it.
5086	 */
5087	pci_write_config_byte(pdev, PCI_CACHE_LINE_SIZE, orig_cacheline_size);
5088
5089err_out_free_netdev:
5090	free_netdev(dev);
5091
5092err_out_disable_pdev:
5093	pci_disable_device(pdev);
5094	return -ENODEV;
5095}
5096
5097static void cas_remove_one(struct pci_dev *pdev)
5098{
5099	struct net_device *dev = pci_get_drvdata(pdev);
5100	struct cas *cp;
5101	if (!dev)
5102		return;
5103
5104	cp = netdev_priv(dev);
5105	unregister_netdev(dev);
5106
5107	vfree(cp->fw_data);
5108
5109	mutex_lock(&cp->pm_mutex);
5110	cancel_work_sync(&cp->reset_task);
5111	if (cp->hw_running)
5112		cas_shutdown(cp);
5113	mutex_unlock(&cp->pm_mutex);
5114
5115#if 1
5116	if (cp->orig_cacheline_size) {
5117		/* Restore the cache line size if we had modified
5118		 * it.
5119		 */
5120		pci_write_config_byte(pdev, PCI_CACHE_LINE_SIZE,
5121				      cp->orig_cacheline_size);
5122	}
5123#endif
5124	dma_free_coherent(&pdev->dev, sizeof(struct cas_init_block),
5125			  cp->init_block, cp->block_dvma);
5126	pci_iounmap(pdev, cp->regs);
5127	free_netdev(dev);
5128	pci_release_regions(pdev);
5129	pci_disable_device(pdev);
5130}
5131
5132static int __maybe_unused cas_suspend(struct device *dev_d)
 
5133{
5134	struct net_device *dev = dev_get_drvdata(dev_d);
5135	struct cas *cp = netdev_priv(dev);
5136	unsigned long flags;
5137
5138	mutex_lock(&cp->pm_mutex);
5139
5140	/* If the driver is opened, we stop the DMA */
5141	if (cp->opened) {
5142		netif_device_detach(dev);
5143
5144		cas_lock_all_save(cp, flags);
5145
5146		/* We can set the second arg of cas_reset to 0
5147		 * because on resume, we'll call cas_init_hw with
5148		 * its second arg set so that autonegotiation is
5149		 * restarted.
5150		 */
5151		cas_reset(cp, 0);
5152		cas_clean_rings(cp);
5153		cas_unlock_all_restore(cp, flags);
5154	}
5155
5156	if (cp->hw_running)
5157		cas_shutdown(cp);
5158	mutex_unlock(&cp->pm_mutex);
5159
5160	return 0;
5161}
5162
5163static int __maybe_unused cas_resume(struct device *dev_d)
5164{
5165	struct net_device *dev = dev_get_drvdata(dev_d);
5166	struct cas *cp = netdev_priv(dev);
5167
5168	netdev_info(dev, "resuming\n");
5169
5170	mutex_lock(&cp->pm_mutex);
5171	cas_hard_reset(cp);
5172	if (cp->opened) {
5173		unsigned long flags;
5174		cas_lock_all_save(cp, flags);
5175		cas_reset(cp, 0);
5176		cp->hw_running = 1;
5177		cas_clean_rings(cp);
5178		cas_init_hw(cp, 1);
5179		cas_unlock_all_restore(cp, flags);
5180
5181		netif_device_attach(dev);
5182	}
5183	mutex_unlock(&cp->pm_mutex);
5184	return 0;
5185}
5186
5187static SIMPLE_DEV_PM_OPS(cas_pm_ops, cas_suspend, cas_resume);
5188
5189static struct pci_driver cas_driver = {
5190	.name		= DRV_MODULE_NAME,
5191	.id_table	= cas_pci_tbl,
5192	.probe		= cas_init_one,
5193	.remove		= cas_remove_one,
5194	.driver.pm	= &cas_pm_ops,
 
 
 
5195};
5196
5197static int __init cas_init(void)
5198{
5199	if (linkdown_timeout > 0)
5200		link_transition_timeout = linkdown_timeout * HZ;
5201	else
5202		link_transition_timeout = 0;
5203
5204	return pci_register_driver(&cas_driver);
5205}
5206
5207static void __exit cas_cleanup(void)
5208{
5209	pci_unregister_driver(&cas_driver);
5210}
5211
5212module_init(cas_init);
5213module_exit(cas_cleanup);
v4.17
   1// SPDX-License-Identifier: GPL-2.0
   2/* cassini.c: Sun Microsystems Cassini(+) ethernet driver.
   3 *
   4 * Copyright (C) 2004 Sun Microsystems Inc.
   5 * Copyright (C) 2003 Adrian Sun (asun@darksunrising.com)
   6 *
   7 * This program is free software; you can redistribute it and/or
   8 * modify it under the terms of the GNU General Public License as
   9 * published by the Free Software Foundation; either version 2 of the
  10 * License, or (at your option) any later version.
  11 *
  12 * This program is distributed in the hope that it will be useful,
  13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  15 * GNU General Public License for more details.
  16 *
  17 * You should have received a copy of the GNU General Public License
  18 * along with this program; if not, see <http://www.gnu.org/licenses/>.
  19 *
  20 * This driver uses the sungem driver (c) David Miller
  21 * (davem@redhat.com) as its basis.
  22 *
  23 * The cassini chip has a number of features that distinguish it from
  24 * the gem chip:
  25 *  4 transmit descriptor rings that are used for either QoS (VLAN) or
  26 *      load balancing (non-VLAN mode)
  27 *  batching of multiple packets
  28 *  multiple CPU dispatching
  29 *  page-based RX descriptor engine with separate completion rings
  30 *  Gigabit support (GMII and PCS interface)
  31 *  MIF link up/down detection works
  32 *
  33 * RX is handled by page sized buffers that are attached as fragments to
  34 * the skb. here's what's done:
  35 *  -- driver allocates pages at a time and keeps reference counts
  36 *     on them.
  37 *  -- the upper protocol layers assume that the header is in the skb
  38 *     itself. as a result, cassini will copy a small amount (64 bytes)
  39 *     to make them happy.
  40 *  -- driver appends the rest of the data pages as frags to skbuffs
  41 *     and increments the reference count
  42 *  -- on page reclamation, the driver swaps the page with a spare page.
  43 *     if that page is still in use, it frees its reference to that page,
  44 *     and allocates a new page for use. otherwise, it just recycles the
  45 *     the page.
  46 *
  47 * NOTE: cassini can parse the header. however, it's not worth it
  48 *       as long as the network stack requires a header copy.
  49 *
  50 * TX has 4 queues. currently these queues are used in a round-robin
  51 * fashion for load balancing. They can also be used for QoS. for that
  52 * to work, however, QoS information needs to be exposed down to the driver
  53 * level so that subqueues get targeted to particular transmit rings.
  54 * alternatively, the queues can be configured via use of the all-purpose
  55 * ioctl.
  56 *
  57 * RX DATA: the rx completion ring has all the info, but the rx desc
  58 * ring has all of the data. RX can conceivably come in under multiple
  59 * interrupts, but the INT# assignment needs to be set up properly by
  60 * the BIOS and conveyed to the driver. PCI BIOSes don't know how to do
  61 * that. also, the two descriptor rings are designed to distinguish between
  62 * encrypted and non-encrypted packets, but we use them for buffering
  63 * instead.
  64 *
  65 * by default, the selective clear mask is set up to process rx packets.
  66 */
  67
  68#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  69
  70#include <linux/module.h>
  71#include <linux/kernel.h>
  72#include <linux/types.h>
  73#include <linux/compiler.h>
  74#include <linux/slab.h>
  75#include <linux/delay.h>
  76#include <linux/init.h>
  77#include <linux/interrupt.h>
  78#include <linux/vmalloc.h>
  79#include <linux/ioport.h>
  80#include <linux/pci.h>
  81#include <linux/mm.h>
  82#include <linux/highmem.h>
  83#include <linux/list.h>
  84#include <linux/dma-mapping.h>
  85
  86#include <linux/netdevice.h>
  87#include <linux/etherdevice.h>
  88#include <linux/skbuff.h>
  89#include <linux/ethtool.h>
  90#include <linux/crc32.h>
  91#include <linux/random.h>
  92#include <linux/mii.h>
  93#include <linux/ip.h>
  94#include <linux/tcp.h>
  95#include <linux/mutex.h>
  96#include <linux/firmware.h>
  97
  98#include <net/checksum.h>
  99
 100#include <linux/atomic.h>
 101#include <asm/io.h>
 102#include <asm/byteorder.h>
 103#include <linux/uaccess.h>
 
 104
 105#define cas_page_map(x)      kmap_atomic((x))
 106#define cas_page_unmap(x)    kunmap_atomic((x))
 107#define CAS_NCPUS            num_online_cpus()
 108
 109#define cas_skb_release(x)  netif_rx(x)
 110
 111/* select which firmware to use */
 112#define USE_HP_WORKAROUND
 113#define HP_WORKAROUND_DEFAULT /* select which firmware to use as default */
 114#define CAS_HP_ALT_FIRMWARE   cas_prog_null /* alternate firmware */
 115
 116#include "cassini.h"
 117
 118#define USE_TX_COMPWB      /* use completion writeback registers */
 119#define USE_CSMA_CD_PROTO  /* standard CSMA/CD */
 120#define USE_RX_BLANK       /* hw interrupt mitigation */
 121#undef USE_ENTROPY_DEV     /* don't test for entropy device */
 122
 123/* NOTE: these aren't useable unless PCI interrupts can be assigned.
 124 * also, we need to make cp->lock finer-grained.
 125 */
 126#undef  USE_PCI_INTB
 127#undef  USE_PCI_INTC
 128#undef  USE_PCI_INTD
 129#undef  USE_QOS
 130
 131#undef  USE_VPD_DEBUG       /* debug vpd information if defined */
 132
 133/* rx processing options */
 134#define USE_PAGE_ORDER      /* specify to allocate large rx pages */
 135#define RX_DONT_BATCH  0    /* if 1, don't batch flows */
 136#define RX_COPY_ALWAYS 0    /* if 0, use frags */
 137#define RX_COPY_MIN    64   /* copy a little to make upper layers happy */
 138#undef  RX_COUNT_BUFFERS    /* define to calculate RX buffer stats */
 139
 140#define DRV_MODULE_NAME		"cassini"
 141#define DRV_MODULE_VERSION	"1.6"
 142#define DRV_MODULE_RELDATE	"21 May 2008"
 143
 144#define CAS_DEF_MSG_ENABLE	  \
 145	(NETIF_MSG_DRV		| \
 146	 NETIF_MSG_PROBE	| \
 147	 NETIF_MSG_LINK		| \
 148	 NETIF_MSG_TIMER	| \
 149	 NETIF_MSG_IFDOWN	| \
 150	 NETIF_MSG_IFUP		| \
 151	 NETIF_MSG_RX_ERR	| \
 152	 NETIF_MSG_TX_ERR)
 153
 154/* length of time before we decide the hardware is borked,
 155 * and dev->tx_timeout() should be called to fix the problem
 156 */
 157#define CAS_TX_TIMEOUT			(HZ)
 158#define CAS_LINK_TIMEOUT                (22*HZ/10)
 159#define CAS_LINK_FAST_TIMEOUT           (1)
 160
 161/* timeout values for state changing. these specify the number
 162 * of 10us delays to be used before giving up.
 163 */
 164#define STOP_TRIES_PHY 1000
 165#define STOP_TRIES     5000
 166
 167/* specify a minimum frame size to deal with some fifo issues
 168 * max mtu == 2 * page size - ethernet header - 64 - swivel =
 169 *            2 * page_size - 0x50
 170 */
 171#define CAS_MIN_FRAME			97
 172#define CAS_1000MB_MIN_FRAME            255
 173#define CAS_MIN_MTU                     60
 174#define CAS_MAX_MTU                     min(((cp->page_size << 1) - 0x50), 9000)
 175
 176#if 1
 177/*
 178 * Eliminate these and use separate atomic counters for each, to
 179 * avoid a race condition.
 180 */
 181#else
 182#define CAS_RESET_MTU                   1
 183#define CAS_RESET_ALL                   2
 184#define CAS_RESET_SPARE                 3
 185#endif
 186
 187static char version[] =
 188	DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
 189
 190static int cassini_debug = -1;	/* -1 == use CAS_DEF_MSG_ENABLE as value */
 191static int link_mode;
 192
 193MODULE_AUTHOR("Adrian Sun (asun@darksunrising.com)");
 194MODULE_DESCRIPTION("Sun Cassini(+) ethernet driver");
 195MODULE_LICENSE("GPL");
 196MODULE_FIRMWARE("sun/cassini.bin");
 197module_param(cassini_debug, int, 0);
 198MODULE_PARM_DESC(cassini_debug, "Cassini bitmapped debugging message enable value");
 199module_param(link_mode, int, 0);
 200MODULE_PARM_DESC(link_mode, "default link mode");
 201
 202/*
 203 * Work around for a PCS bug in which the link goes down due to the chip
 204 * being confused and never showing a link status of "up."
 205 */
 206#define DEFAULT_LINKDOWN_TIMEOUT 5
 207/*
 208 * Value in seconds, for user input.
 209 */
 210static int linkdown_timeout = DEFAULT_LINKDOWN_TIMEOUT;
 211module_param(linkdown_timeout, int, 0);
 212MODULE_PARM_DESC(linkdown_timeout,
 213"min reset interval in sec. for PCS linkdown issue; disabled if not positive");
 214
 215/*
 216 * value in 'ticks' (units used by jiffies). Set when we init the
 217 * module because 'HZ' in actually a function call on some flavors of
 218 * Linux.  This will default to DEFAULT_LINKDOWN_TIMEOUT * HZ.
 219 */
 220static int link_transition_timeout;
 221
 222
 223
 224static u16 link_modes[] = {
 225	BMCR_ANENABLE,			 /* 0 : autoneg */
 226	0,				 /* 1 : 10bt half duplex */
 227	BMCR_SPEED100,			 /* 2 : 100bt half duplex */
 228	BMCR_FULLDPLX,			 /* 3 : 10bt full duplex */
 229	BMCR_SPEED100|BMCR_FULLDPLX,	 /* 4 : 100bt full duplex */
 230	CAS_BMCR_SPEED1000|BMCR_FULLDPLX /* 5 : 1000bt full duplex */
 231};
 232
 233static const struct pci_device_id cas_pci_tbl[] = {
 234	{ PCI_VENDOR_ID_SUN, PCI_DEVICE_ID_SUN_CASSINI,
 235	  PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
 236	{ PCI_VENDOR_ID_NS, PCI_DEVICE_ID_NS_SATURN,
 237	  PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
 238	{ 0, }
 239};
 240
 241MODULE_DEVICE_TABLE(pci, cas_pci_tbl);
 242
 243static void cas_set_link_modes(struct cas *cp);
 244
 245static inline void cas_lock_tx(struct cas *cp)
 246{
 247	int i;
 248
 249	for (i = 0; i < N_TX_RINGS; i++)
 250		spin_lock_nested(&cp->tx_lock[i], i);
 251}
 252
 253static inline void cas_lock_all(struct cas *cp)
 254{
 255	spin_lock_irq(&cp->lock);
 256	cas_lock_tx(cp);
 257}
 258
 259/* WTZ: QA was finding deadlock problems with the previous
 260 * versions after long test runs with multiple cards per machine.
 261 * See if replacing cas_lock_all with safer versions helps. The
 262 * symptoms QA is reporting match those we'd expect if interrupts
 263 * aren't being properly restored, and we fixed a previous deadlock
 264 * with similar symptoms by using save/restore versions in other
 265 * places.
 266 */
 267#define cas_lock_all_save(cp, flags) \
 268do { \
 269	struct cas *xxxcp = (cp); \
 270	spin_lock_irqsave(&xxxcp->lock, flags); \
 271	cas_lock_tx(xxxcp); \
 272} while (0)
 273
 274static inline void cas_unlock_tx(struct cas *cp)
 275{
 276	int i;
 277
 278	for (i = N_TX_RINGS; i > 0; i--)
 279		spin_unlock(&cp->tx_lock[i - 1]);
 280}
 281
 282static inline void cas_unlock_all(struct cas *cp)
 283{
 284	cas_unlock_tx(cp);
 285	spin_unlock_irq(&cp->lock);
 286}
 287
 288#define cas_unlock_all_restore(cp, flags) \
 289do { \
 290	struct cas *xxxcp = (cp); \
 291	cas_unlock_tx(xxxcp); \
 292	spin_unlock_irqrestore(&xxxcp->lock, flags); \
 293} while (0)
 294
 295static void cas_disable_irq(struct cas *cp, const int ring)
 296{
 297	/* Make sure we won't get any more interrupts */
 298	if (ring == 0) {
 299		writel(0xFFFFFFFF, cp->regs + REG_INTR_MASK);
 300		return;
 301	}
 302
 303	/* disable completion interrupts and selectively mask */
 304	if (cp->cas_flags & CAS_FLAG_REG_PLUS) {
 305		switch (ring) {
 306#if defined (USE_PCI_INTB) || defined(USE_PCI_INTC) || defined(USE_PCI_INTD)
 307#ifdef USE_PCI_INTB
 308		case 1:
 309#endif
 310#ifdef USE_PCI_INTC
 311		case 2:
 312#endif
 313#ifdef USE_PCI_INTD
 314		case 3:
 315#endif
 316			writel(INTRN_MASK_CLEAR_ALL | INTRN_MASK_RX_EN,
 317			       cp->regs + REG_PLUS_INTRN_MASK(ring));
 318			break;
 319#endif
 320		default:
 321			writel(INTRN_MASK_CLEAR_ALL, cp->regs +
 322			       REG_PLUS_INTRN_MASK(ring));
 323			break;
 324		}
 325	}
 326}
 327
 328static inline void cas_mask_intr(struct cas *cp)
 329{
 330	int i;
 331
 332	for (i = 0; i < N_RX_COMP_RINGS; i++)
 333		cas_disable_irq(cp, i);
 334}
 335
 336static void cas_enable_irq(struct cas *cp, const int ring)
 337{
 338	if (ring == 0) { /* all but TX_DONE */
 339		writel(INTR_TX_DONE, cp->regs + REG_INTR_MASK);
 340		return;
 341	}
 342
 343	if (cp->cas_flags & CAS_FLAG_REG_PLUS) {
 344		switch (ring) {
 345#if defined (USE_PCI_INTB) || defined(USE_PCI_INTC) || defined(USE_PCI_INTD)
 346#ifdef USE_PCI_INTB
 347		case 1:
 348#endif
 349#ifdef USE_PCI_INTC
 350		case 2:
 351#endif
 352#ifdef USE_PCI_INTD
 353		case 3:
 354#endif
 355			writel(INTRN_MASK_RX_EN, cp->regs +
 356			       REG_PLUS_INTRN_MASK(ring));
 357			break;
 358#endif
 359		default:
 360			break;
 361		}
 362	}
 363}
 364
 365static inline void cas_unmask_intr(struct cas *cp)
 366{
 367	int i;
 368
 369	for (i = 0; i < N_RX_COMP_RINGS; i++)
 370		cas_enable_irq(cp, i);
 371}
 372
 373static inline void cas_entropy_gather(struct cas *cp)
 374{
 375#ifdef USE_ENTROPY_DEV
 376	if ((cp->cas_flags & CAS_FLAG_ENTROPY_DEV) == 0)
 377		return;
 378
 379	batch_entropy_store(readl(cp->regs + REG_ENTROPY_IV),
 380			    readl(cp->regs + REG_ENTROPY_IV),
 381			    sizeof(uint64_t)*8);
 382#endif
 383}
 384
 385static inline void cas_entropy_reset(struct cas *cp)
 386{
 387#ifdef USE_ENTROPY_DEV
 388	if ((cp->cas_flags & CAS_FLAG_ENTROPY_DEV) == 0)
 389		return;
 390
 391	writel(BIM_LOCAL_DEV_PAD | BIM_LOCAL_DEV_PROM | BIM_LOCAL_DEV_EXT,
 392	       cp->regs + REG_BIM_LOCAL_DEV_EN);
 393	writeb(ENTROPY_RESET_STC_MODE, cp->regs + REG_ENTROPY_RESET);
 394	writeb(0x55, cp->regs + REG_ENTROPY_RAND_REG);
 395
 396	/* if we read back 0x0, we don't have an entropy device */
 397	if (readb(cp->regs + REG_ENTROPY_RAND_REG) == 0)
 398		cp->cas_flags &= ~CAS_FLAG_ENTROPY_DEV;
 399#endif
 400}
 401
 402/* access to the phy. the following assumes that we've initialized the MIF to
 403 * be in frame rather than bit-bang mode
 404 */
 405static u16 cas_phy_read(struct cas *cp, int reg)
 406{
 407	u32 cmd;
 408	int limit = STOP_TRIES_PHY;
 409
 410	cmd = MIF_FRAME_ST | MIF_FRAME_OP_READ;
 411	cmd |= CAS_BASE(MIF_FRAME_PHY_ADDR, cp->phy_addr);
 412	cmd |= CAS_BASE(MIF_FRAME_REG_ADDR, reg);
 413	cmd |= MIF_FRAME_TURN_AROUND_MSB;
 414	writel(cmd, cp->regs + REG_MIF_FRAME);
 415
 416	/* poll for completion */
 417	while (limit-- > 0) {
 418		udelay(10);
 419		cmd = readl(cp->regs + REG_MIF_FRAME);
 420		if (cmd & MIF_FRAME_TURN_AROUND_LSB)
 421			return cmd & MIF_FRAME_DATA_MASK;
 422	}
 423	return 0xFFFF; /* -1 */
 424}
 425
 426static int cas_phy_write(struct cas *cp, int reg, u16 val)
 427{
 428	int limit = STOP_TRIES_PHY;
 429	u32 cmd;
 430
 431	cmd = MIF_FRAME_ST | MIF_FRAME_OP_WRITE;
 432	cmd |= CAS_BASE(MIF_FRAME_PHY_ADDR, cp->phy_addr);
 433	cmd |= CAS_BASE(MIF_FRAME_REG_ADDR, reg);
 434	cmd |= MIF_FRAME_TURN_AROUND_MSB;
 435	cmd |= val & MIF_FRAME_DATA_MASK;
 436	writel(cmd, cp->regs + REG_MIF_FRAME);
 437
 438	/* poll for completion */
 439	while (limit-- > 0) {
 440		udelay(10);
 441		cmd = readl(cp->regs + REG_MIF_FRAME);
 442		if (cmd & MIF_FRAME_TURN_AROUND_LSB)
 443			return 0;
 444	}
 445	return -1;
 446}
 447
 448static void cas_phy_powerup(struct cas *cp)
 449{
 450	u16 ctl = cas_phy_read(cp, MII_BMCR);
 451
 452	if ((ctl & BMCR_PDOWN) == 0)
 453		return;
 454	ctl &= ~BMCR_PDOWN;
 455	cas_phy_write(cp, MII_BMCR, ctl);
 456}
 457
 458static void cas_phy_powerdown(struct cas *cp)
 459{
 460	u16 ctl = cas_phy_read(cp, MII_BMCR);
 461
 462	if (ctl & BMCR_PDOWN)
 463		return;
 464	ctl |= BMCR_PDOWN;
 465	cas_phy_write(cp, MII_BMCR, ctl);
 466}
 467
 468/* cp->lock held. note: the last put_page will free the buffer */
 469static int cas_page_free(struct cas *cp, cas_page_t *page)
 470{
 471	pci_unmap_page(cp->pdev, page->dma_addr, cp->page_size,
 472		       PCI_DMA_FROMDEVICE);
 473	__free_pages(page->buffer, cp->page_order);
 474	kfree(page);
 475	return 0;
 476}
 477
 478#ifdef RX_COUNT_BUFFERS
 479#define RX_USED_ADD(x, y)       ((x)->used += (y))
 480#define RX_USED_SET(x, y)       ((x)->used  = (y))
 481#else
 482#define RX_USED_ADD(x, y)
 483#define RX_USED_SET(x, y)
 484#endif
 485
 486/* local page allocation routines for the receive buffers. jumbo pages
 487 * require at least 8K contiguous and 8K aligned buffers.
 488 */
 489static cas_page_t *cas_page_alloc(struct cas *cp, const gfp_t flags)
 490{
 491	cas_page_t *page;
 492
 493	page = kmalloc(sizeof(cas_page_t), flags);
 494	if (!page)
 495		return NULL;
 496
 497	INIT_LIST_HEAD(&page->list);
 498	RX_USED_SET(page, 0);
 499	page->buffer = alloc_pages(flags, cp->page_order);
 500	if (!page->buffer)
 501		goto page_err;
 502	page->dma_addr = pci_map_page(cp->pdev, page->buffer, 0,
 503				      cp->page_size, PCI_DMA_FROMDEVICE);
 504	return page;
 505
 506page_err:
 507	kfree(page);
 508	return NULL;
 509}
 510
 511/* initialize spare pool of rx buffers, but allocate during the open */
 512static void cas_spare_init(struct cas *cp)
 513{
 514  	spin_lock(&cp->rx_inuse_lock);
 515	INIT_LIST_HEAD(&cp->rx_inuse_list);
 516	spin_unlock(&cp->rx_inuse_lock);
 517
 518	spin_lock(&cp->rx_spare_lock);
 519	INIT_LIST_HEAD(&cp->rx_spare_list);
 520	cp->rx_spares_needed = RX_SPARE_COUNT;
 521	spin_unlock(&cp->rx_spare_lock);
 522}
 523
 524/* used on close. free all the spare buffers. */
 525static void cas_spare_free(struct cas *cp)
 526{
 527	struct list_head list, *elem, *tmp;
 528
 529	/* free spare buffers */
 530	INIT_LIST_HEAD(&list);
 531	spin_lock(&cp->rx_spare_lock);
 532	list_splice_init(&cp->rx_spare_list, &list);
 533	spin_unlock(&cp->rx_spare_lock);
 534	list_for_each_safe(elem, tmp, &list) {
 535		cas_page_free(cp, list_entry(elem, cas_page_t, list));
 536	}
 537
 538	INIT_LIST_HEAD(&list);
 539#if 1
 540	/*
 541	 * Looks like Adrian had protected this with a different
 542	 * lock than used everywhere else to manipulate this list.
 543	 */
 544	spin_lock(&cp->rx_inuse_lock);
 545	list_splice_init(&cp->rx_inuse_list, &list);
 546	spin_unlock(&cp->rx_inuse_lock);
 547#else
 548	spin_lock(&cp->rx_spare_lock);
 549	list_splice_init(&cp->rx_inuse_list, &list);
 550	spin_unlock(&cp->rx_spare_lock);
 551#endif
 552	list_for_each_safe(elem, tmp, &list) {
 553		cas_page_free(cp, list_entry(elem, cas_page_t, list));
 554	}
 555}
 556
 557/* replenish spares if needed */
 558static void cas_spare_recover(struct cas *cp, const gfp_t flags)
 559{
 560	struct list_head list, *elem, *tmp;
 561	int needed, i;
 562
 563	/* check inuse list. if we don't need any more free buffers,
 564	 * just free it
 565	 */
 566
 567	/* make a local copy of the list */
 568	INIT_LIST_HEAD(&list);
 569	spin_lock(&cp->rx_inuse_lock);
 570	list_splice_init(&cp->rx_inuse_list, &list);
 571	spin_unlock(&cp->rx_inuse_lock);
 572
 573	list_for_each_safe(elem, tmp, &list) {
 574		cas_page_t *page = list_entry(elem, cas_page_t, list);
 575
 576		/*
 577		 * With the lockless pagecache, cassini buffering scheme gets
 578		 * slightly less accurate: we might find that a page has an
 579		 * elevated reference count here, due to a speculative ref,
 580		 * and skip it as in-use. Ideally we would be able to reclaim
 581		 * it. However this would be such a rare case, it doesn't
 582		 * matter too much as we should pick it up the next time round.
 583		 *
 584		 * Importantly, if we find that the page has a refcount of 1
 585		 * here (our refcount), then we know it is definitely not inuse
 586		 * so we can reuse it.
 587		 */
 588		if (page_count(page->buffer) > 1)
 589			continue;
 590
 591		list_del(elem);
 592		spin_lock(&cp->rx_spare_lock);
 593		if (cp->rx_spares_needed > 0) {
 594			list_add(elem, &cp->rx_spare_list);
 595			cp->rx_spares_needed--;
 596			spin_unlock(&cp->rx_spare_lock);
 597		} else {
 598			spin_unlock(&cp->rx_spare_lock);
 599			cas_page_free(cp, page);
 600		}
 601	}
 602
 603	/* put any inuse buffers back on the list */
 604	if (!list_empty(&list)) {
 605		spin_lock(&cp->rx_inuse_lock);
 606		list_splice(&list, &cp->rx_inuse_list);
 607		spin_unlock(&cp->rx_inuse_lock);
 608	}
 609
 610	spin_lock(&cp->rx_spare_lock);
 611	needed = cp->rx_spares_needed;
 612	spin_unlock(&cp->rx_spare_lock);
 613	if (!needed)
 614		return;
 615
 616	/* we still need spares, so try to allocate some */
 617	INIT_LIST_HEAD(&list);
 618	i = 0;
 619	while (i < needed) {
 620		cas_page_t *spare = cas_page_alloc(cp, flags);
 621		if (!spare)
 622			break;
 623		list_add(&spare->list, &list);
 624		i++;
 625	}
 626
 627	spin_lock(&cp->rx_spare_lock);
 628	list_splice(&list, &cp->rx_spare_list);
 629	cp->rx_spares_needed -= i;
 630	spin_unlock(&cp->rx_spare_lock);
 631}
 632
 633/* pull a page from the list. */
 634static cas_page_t *cas_page_dequeue(struct cas *cp)
 635{
 636	struct list_head *entry;
 637	int recover;
 638
 639	spin_lock(&cp->rx_spare_lock);
 640	if (list_empty(&cp->rx_spare_list)) {
 641		/* try to do a quick recovery */
 642		spin_unlock(&cp->rx_spare_lock);
 643		cas_spare_recover(cp, GFP_ATOMIC);
 644		spin_lock(&cp->rx_spare_lock);
 645		if (list_empty(&cp->rx_spare_list)) {
 646			netif_err(cp, rx_err, cp->dev,
 647				  "no spare buffers available\n");
 648			spin_unlock(&cp->rx_spare_lock);
 649			return NULL;
 650		}
 651	}
 652
 653	entry = cp->rx_spare_list.next;
 654	list_del(entry);
 655	recover = ++cp->rx_spares_needed;
 656	spin_unlock(&cp->rx_spare_lock);
 657
 658	/* trigger the timer to do the recovery */
 659	if ((recover & (RX_SPARE_RECOVER_VAL - 1)) == 0) {
 660#if 1
 661		atomic_inc(&cp->reset_task_pending);
 662		atomic_inc(&cp->reset_task_pending_spare);
 663		schedule_work(&cp->reset_task);
 664#else
 665		atomic_set(&cp->reset_task_pending, CAS_RESET_SPARE);
 666		schedule_work(&cp->reset_task);
 667#endif
 668	}
 669	return list_entry(entry, cas_page_t, list);
 670}
 671
 672
 673static void cas_mif_poll(struct cas *cp, const int enable)
 674{
 675	u32 cfg;
 676
 677	cfg  = readl(cp->regs + REG_MIF_CFG);
 678	cfg &= (MIF_CFG_MDIO_0 | MIF_CFG_MDIO_1);
 679
 680	if (cp->phy_type & CAS_PHY_MII_MDIO1)
 681		cfg |= MIF_CFG_PHY_SELECT;
 682
 683	/* poll and interrupt on link status change. */
 684	if (enable) {
 685		cfg |= MIF_CFG_POLL_EN;
 686		cfg |= CAS_BASE(MIF_CFG_POLL_REG, MII_BMSR);
 687		cfg |= CAS_BASE(MIF_CFG_POLL_PHY, cp->phy_addr);
 688	}
 689	writel((enable) ? ~(BMSR_LSTATUS | BMSR_ANEGCOMPLETE) : 0xFFFF,
 690	       cp->regs + REG_MIF_MASK);
 691	writel(cfg, cp->regs + REG_MIF_CFG);
 692}
 693
 694/* Must be invoked under cp->lock */
 695static void cas_begin_auto_negotiation(struct cas *cp,
 696				       const struct ethtool_link_ksettings *ep)
 697{
 698	u16 ctl;
 699#if 1
 700	int lcntl;
 701	int changed = 0;
 702	int oldstate = cp->lstate;
 703	int link_was_not_down = !(oldstate == link_down);
 704#endif
 705	/* Setup link parameters */
 706	if (!ep)
 707		goto start_aneg;
 708	lcntl = cp->link_cntl;
 709	if (ep->base.autoneg == AUTONEG_ENABLE) {
 710		cp->link_cntl = BMCR_ANENABLE;
 711	} else {
 712		u32 speed = ep->base.speed;
 713		cp->link_cntl = 0;
 714		if (speed == SPEED_100)
 715			cp->link_cntl |= BMCR_SPEED100;
 716		else if (speed == SPEED_1000)
 717			cp->link_cntl |= CAS_BMCR_SPEED1000;
 718		if (ep->base.duplex == DUPLEX_FULL)
 719			cp->link_cntl |= BMCR_FULLDPLX;
 720	}
 721#if 1
 722	changed = (lcntl != cp->link_cntl);
 723#endif
 724start_aneg:
 725	if (cp->lstate == link_up) {
 726		netdev_info(cp->dev, "PCS link down\n");
 727	} else {
 728		if (changed) {
 729			netdev_info(cp->dev, "link configuration changed\n");
 730		}
 731	}
 732	cp->lstate = link_down;
 733	cp->link_transition = LINK_TRANSITION_LINK_DOWN;
 734	if (!cp->hw_running)
 735		return;
 736#if 1
 737	/*
 738	 * WTZ: If the old state was link_up, we turn off the carrier
 739	 * to replicate everything we do elsewhere on a link-down
 740	 * event when we were already in a link-up state..
 741	 */
 742	if (oldstate == link_up)
 743		netif_carrier_off(cp->dev);
 744	if (changed  && link_was_not_down) {
 745		/*
 746		 * WTZ: This branch will simply schedule a full reset after
 747		 * we explicitly changed link modes in an ioctl. See if this
 748		 * fixes the link-problems we were having for forced mode.
 749		 */
 750		atomic_inc(&cp->reset_task_pending);
 751		atomic_inc(&cp->reset_task_pending_all);
 752		schedule_work(&cp->reset_task);
 753		cp->timer_ticks = 0;
 754		mod_timer(&cp->link_timer, jiffies + CAS_LINK_TIMEOUT);
 755		return;
 756	}
 757#endif
 758	if (cp->phy_type & CAS_PHY_SERDES) {
 759		u32 val = readl(cp->regs + REG_PCS_MII_CTRL);
 760
 761		if (cp->link_cntl & BMCR_ANENABLE) {
 762			val |= (PCS_MII_RESTART_AUTONEG | PCS_MII_AUTONEG_EN);
 763			cp->lstate = link_aneg;
 764		} else {
 765			if (cp->link_cntl & BMCR_FULLDPLX)
 766				val |= PCS_MII_CTRL_DUPLEX;
 767			val &= ~PCS_MII_AUTONEG_EN;
 768			cp->lstate = link_force_ok;
 769		}
 770		cp->link_transition = LINK_TRANSITION_LINK_CONFIG;
 771		writel(val, cp->regs + REG_PCS_MII_CTRL);
 772
 773	} else {
 774		cas_mif_poll(cp, 0);
 775		ctl = cas_phy_read(cp, MII_BMCR);
 776		ctl &= ~(BMCR_FULLDPLX | BMCR_SPEED100 |
 777			 CAS_BMCR_SPEED1000 | BMCR_ANENABLE);
 778		ctl |= cp->link_cntl;
 779		if (ctl & BMCR_ANENABLE) {
 780			ctl |= BMCR_ANRESTART;
 781			cp->lstate = link_aneg;
 782		} else {
 783			cp->lstate = link_force_ok;
 784		}
 785		cp->link_transition = LINK_TRANSITION_LINK_CONFIG;
 786		cas_phy_write(cp, MII_BMCR, ctl);
 787		cas_mif_poll(cp, 1);
 788	}
 789
 790	cp->timer_ticks = 0;
 791	mod_timer(&cp->link_timer, jiffies + CAS_LINK_TIMEOUT);
 792}
 793
 794/* Must be invoked under cp->lock. */
 795static int cas_reset_mii_phy(struct cas *cp)
 796{
 797	int limit = STOP_TRIES_PHY;
 798	u16 val;
 799
 800	cas_phy_write(cp, MII_BMCR, BMCR_RESET);
 801	udelay(100);
 802	while (--limit) {
 803		val = cas_phy_read(cp, MII_BMCR);
 804		if ((val & BMCR_RESET) == 0)
 805			break;
 806		udelay(10);
 807	}
 808	return limit <= 0;
 809}
 810
 811static void cas_saturn_firmware_init(struct cas *cp)
 812{
 813	const struct firmware *fw;
 814	const char fw_name[] = "sun/cassini.bin";
 815	int err;
 816
 817	if (PHY_NS_DP83065 != cp->phy_id)
 818		return;
 819
 820	err = request_firmware(&fw, fw_name, &cp->pdev->dev);
 821	if (err) {
 822		pr_err("Failed to load firmware \"%s\"\n",
 823		       fw_name);
 824		return;
 825	}
 826	if (fw->size < 2) {
 827		pr_err("bogus length %zu in \"%s\"\n",
 828		       fw->size, fw_name);
 829		goto out;
 830	}
 831	cp->fw_load_addr= fw->data[1] << 8 | fw->data[0];
 832	cp->fw_size = fw->size - 2;
 833	cp->fw_data = vmalloc(cp->fw_size);
 834	if (!cp->fw_data)
 835		goto out;
 836	memcpy(cp->fw_data, &fw->data[2], cp->fw_size);
 837out:
 838	release_firmware(fw);
 839}
 840
 841static void cas_saturn_firmware_load(struct cas *cp)
 842{
 843	int i;
 844
 845	if (!cp->fw_data)
 846		return;
 847
 848	cas_phy_powerdown(cp);
 849
 850	/* expanded memory access mode */
 851	cas_phy_write(cp, DP83065_MII_MEM, 0x0);
 852
 853	/* pointer configuration for new firmware */
 854	cas_phy_write(cp, DP83065_MII_REGE, 0x8ff9);
 855	cas_phy_write(cp, DP83065_MII_REGD, 0xbd);
 856	cas_phy_write(cp, DP83065_MII_REGE, 0x8ffa);
 857	cas_phy_write(cp, DP83065_MII_REGD, 0x82);
 858	cas_phy_write(cp, DP83065_MII_REGE, 0x8ffb);
 859	cas_phy_write(cp, DP83065_MII_REGD, 0x0);
 860	cas_phy_write(cp, DP83065_MII_REGE, 0x8ffc);
 861	cas_phy_write(cp, DP83065_MII_REGD, 0x39);
 862
 863	/* download new firmware */
 864	cas_phy_write(cp, DP83065_MII_MEM, 0x1);
 865	cas_phy_write(cp, DP83065_MII_REGE, cp->fw_load_addr);
 866	for (i = 0; i < cp->fw_size; i++)
 867		cas_phy_write(cp, DP83065_MII_REGD, cp->fw_data[i]);
 868
 869	/* enable firmware */
 870	cas_phy_write(cp, DP83065_MII_REGE, 0x8ff8);
 871	cas_phy_write(cp, DP83065_MII_REGD, 0x1);
 872}
 873
 874
 875/* phy initialization */
 876static void cas_phy_init(struct cas *cp)
 877{
 878	u16 val;
 879
 880	/* if we're in MII/GMII mode, set up phy */
 881	if (CAS_PHY_MII(cp->phy_type)) {
 882		writel(PCS_DATAPATH_MODE_MII,
 883		       cp->regs + REG_PCS_DATAPATH_MODE);
 884
 885		cas_mif_poll(cp, 0);
 886		cas_reset_mii_phy(cp); /* take out of isolate mode */
 887
 888		if (PHY_LUCENT_B0 == cp->phy_id) {
 889			/* workaround link up/down issue with lucent */
 890			cas_phy_write(cp, LUCENT_MII_REG, 0x8000);
 891			cas_phy_write(cp, MII_BMCR, 0x00f1);
 892			cas_phy_write(cp, LUCENT_MII_REG, 0x0);
 893
 894		} else if (PHY_BROADCOM_B0 == (cp->phy_id & 0xFFFFFFFC)) {
 895			/* workarounds for broadcom phy */
 896			cas_phy_write(cp, BROADCOM_MII_REG8, 0x0C20);
 897			cas_phy_write(cp, BROADCOM_MII_REG7, 0x0012);
 898			cas_phy_write(cp, BROADCOM_MII_REG5, 0x1804);
 899			cas_phy_write(cp, BROADCOM_MII_REG7, 0x0013);
 900			cas_phy_write(cp, BROADCOM_MII_REG5, 0x1204);
 901			cas_phy_write(cp, BROADCOM_MII_REG7, 0x8006);
 902			cas_phy_write(cp, BROADCOM_MII_REG5, 0x0132);
 903			cas_phy_write(cp, BROADCOM_MII_REG7, 0x8006);
 904			cas_phy_write(cp, BROADCOM_MII_REG5, 0x0232);
 905			cas_phy_write(cp, BROADCOM_MII_REG7, 0x201F);
 906			cas_phy_write(cp, BROADCOM_MII_REG5, 0x0A20);
 907
 908		} else if (PHY_BROADCOM_5411 == cp->phy_id) {
 909			val = cas_phy_read(cp, BROADCOM_MII_REG4);
 910			val = cas_phy_read(cp, BROADCOM_MII_REG4);
 911			if (val & 0x0080) {
 912				/* link workaround */
 913				cas_phy_write(cp, BROADCOM_MII_REG4,
 914					      val & ~0x0080);
 915			}
 916
 917		} else if (cp->cas_flags & CAS_FLAG_SATURN) {
 918			writel((cp->phy_type & CAS_PHY_MII_MDIO0) ?
 919			       SATURN_PCFG_FSI : 0x0,
 920			       cp->regs + REG_SATURN_PCFG);
 921
 922			/* load firmware to address 10Mbps auto-negotiation
 923			 * issue. NOTE: this will need to be changed if the
 924			 * default firmware gets fixed.
 925			 */
 926			if (PHY_NS_DP83065 == cp->phy_id) {
 927				cas_saturn_firmware_load(cp);
 928			}
 929			cas_phy_powerup(cp);
 930		}
 931
 932		/* advertise capabilities */
 933		val = cas_phy_read(cp, MII_BMCR);
 934		val &= ~BMCR_ANENABLE;
 935		cas_phy_write(cp, MII_BMCR, val);
 936		udelay(10);
 937
 938		cas_phy_write(cp, MII_ADVERTISE,
 939			      cas_phy_read(cp, MII_ADVERTISE) |
 940			      (ADVERTISE_10HALF | ADVERTISE_10FULL |
 941			       ADVERTISE_100HALF | ADVERTISE_100FULL |
 942			       CAS_ADVERTISE_PAUSE |
 943			       CAS_ADVERTISE_ASYM_PAUSE));
 944
 945		if (cp->cas_flags & CAS_FLAG_1000MB_CAP) {
 946			/* make sure that we don't advertise half
 947			 * duplex to avoid a chip issue
 948			 */
 949			val  = cas_phy_read(cp, CAS_MII_1000_CTRL);
 950			val &= ~CAS_ADVERTISE_1000HALF;
 951			val |= CAS_ADVERTISE_1000FULL;
 952			cas_phy_write(cp, CAS_MII_1000_CTRL, val);
 953		}
 954
 955	} else {
 956		/* reset pcs for serdes */
 957		u32 val;
 958		int limit;
 959
 960		writel(PCS_DATAPATH_MODE_SERDES,
 961		       cp->regs + REG_PCS_DATAPATH_MODE);
 962
 963		/* enable serdes pins on saturn */
 964		if (cp->cas_flags & CAS_FLAG_SATURN)
 965			writel(0, cp->regs + REG_SATURN_PCFG);
 966
 967		/* Reset PCS unit. */
 968		val = readl(cp->regs + REG_PCS_MII_CTRL);
 969		val |= PCS_MII_RESET;
 970		writel(val, cp->regs + REG_PCS_MII_CTRL);
 971
 972		limit = STOP_TRIES;
 973		while (--limit > 0) {
 974			udelay(10);
 975			if ((readl(cp->regs + REG_PCS_MII_CTRL) &
 976			     PCS_MII_RESET) == 0)
 977				break;
 978		}
 979		if (limit <= 0)
 980			netdev_warn(cp->dev, "PCS reset bit would not clear [%08x]\n",
 981				    readl(cp->regs + REG_PCS_STATE_MACHINE));
 982
 983		/* Make sure PCS is disabled while changing advertisement
 984		 * configuration.
 985		 */
 986		writel(0x0, cp->regs + REG_PCS_CFG);
 987
 988		/* Advertise all capabilities except half-duplex. */
 989		val  = readl(cp->regs + REG_PCS_MII_ADVERT);
 990		val &= ~PCS_MII_ADVERT_HD;
 991		val |= (PCS_MII_ADVERT_FD | PCS_MII_ADVERT_SYM_PAUSE |
 992			PCS_MII_ADVERT_ASYM_PAUSE);
 993		writel(val, cp->regs + REG_PCS_MII_ADVERT);
 994
 995		/* enable PCS */
 996		writel(PCS_CFG_EN, cp->regs + REG_PCS_CFG);
 997
 998		/* pcs workaround: enable sync detect */
 999		writel(PCS_SERDES_CTRL_SYNCD_EN,
1000		       cp->regs + REG_PCS_SERDES_CTRL);
1001	}
1002}
1003
1004
1005static int cas_pcs_link_check(struct cas *cp)
1006{
1007	u32 stat, state_machine;
1008	int retval = 0;
1009
1010	/* The link status bit latches on zero, so you must
1011	 * read it twice in such a case to see a transition
1012	 * to the link being up.
1013	 */
1014	stat = readl(cp->regs + REG_PCS_MII_STATUS);
1015	if ((stat & PCS_MII_STATUS_LINK_STATUS) == 0)
1016		stat = readl(cp->regs + REG_PCS_MII_STATUS);
1017
1018	/* The remote-fault indication is only valid
1019	 * when autoneg has completed.
1020	 */
1021	if ((stat & (PCS_MII_STATUS_AUTONEG_COMP |
1022		     PCS_MII_STATUS_REMOTE_FAULT)) ==
1023	    (PCS_MII_STATUS_AUTONEG_COMP | PCS_MII_STATUS_REMOTE_FAULT))
1024		netif_info(cp, link, cp->dev, "PCS RemoteFault\n");
1025
1026	/* work around link detection issue by querying the PCS state
1027	 * machine directly.
1028	 */
1029	state_machine = readl(cp->regs + REG_PCS_STATE_MACHINE);
1030	if ((state_machine & PCS_SM_LINK_STATE_MASK) != SM_LINK_STATE_UP) {
1031		stat &= ~PCS_MII_STATUS_LINK_STATUS;
1032	} else if (state_machine & PCS_SM_WORD_SYNC_STATE_MASK) {
1033		stat |= PCS_MII_STATUS_LINK_STATUS;
1034	}
1035
1036	if (stat & PCS_MII_STATUS_LINK_STATUS) {
1037		if (cp->lstate != link_up) {
1038			if (cp->opened) {
1039				cp->lstate = link_up;
1040				cp->link_transition = LINK_TRANSITION_LINK_UP;
1041
1042				cas_set_link_modes(cp);
1043				netif_carrier_on(cp->dev);
1044			}
1045		}
1046	} else if (cp->lstate == link_up) {
1047		cp->lstate = link_down;
1048		if (link_transition_timeout != 0 &&
1049		    cp->link_transition != LINK_TRANSITION_REQUESTED_RESET &&
1050		    !cp->link_transition_jiffies_valid) {
1051			/*
1052			 * force a reset, as a workaround for the
1053			 * link-failure problem. May want to move this to a
1054			 * point a bit earlier in the sequence. If we had
1055			 * generated a reset a short time ago, we'll wait for
1056			 * the link timer to check the status until a
1057			 * timer expires (link_transistion_jiffies_valid is
1058			 * true when the timer is running.)  Instead of using
1059			 * a system timer, we just do a check whenever the
1060			 * link timer is running - this clears the flag after
1061			 * a suitable delay.
1062			 */
1063			retval = 1;
1064			cp->link_transition = LINK_TRANSITION_REQUESTED_RESET;
1065			cp->link_transition_jiffies = jiffies;
1066			cp->link_transition_jiffies_valid = 1;
1067		} else {
1068			cp->link_transition = LINK_TRANSITION_ON_FAILURE;
1069		}
1070		netif_carrier_off(cp->dev);
1071		if (cp->opened)
1072			netif_info(cp, link, cp->dev, "PCS link down\n");
1073
1074		/* Cassini only: if you force a mode, there can be
1075		 * sync problems on link down. to fix that, the following
1076		 * things need to be checked:
1077		 * 1) read serialink state register
1078		 * 2) read pcs status register to verify link down.
1079		 * 3) if link down and serial link == 0x03, then you need
1080		 *    to global reset the chip.
1081		 */
1082		if ((cp->cas_flags & CAS_FLAG_REG_PLUS) == 0) {
1083			/* should check to see if we're in a forced mode */
1084			stat = readl(cp->regs + REG_PCS_SERDES_STATE);
1085			if (stat == 0x03)
1086				return 1;
1087		}
1088	} else if (cp->lstate == link_down) {
1089		if (link_transition_timeout != 0 &&
1090		    cp->link_transition != LINK_TRANSITION_REQUESTED_RESET &&
1091		    !cp->link_transition_jiffies_valid) {
1092			/* force a reset, as a workaround for the
1093			 * link-failure problem.  May want to move
1094			 * this to a point a bit earlier in the
1095			 * sequence.
1096			 */
1097			retval = 1;
1098			cp->link_transition = LINK_TRANSITION_REQUESTED_RESET;
1099			cp->link_transition_jiffies = jiffies;
1100			cp->link_transition_jiffies_valid = 1;
1101		} else {
1102			cp->link_transition = LINK_TRANSITION_STILL_FAILED;
1103		}
1104	}
1105
1106	return retval;
1107}
1108
1109static int cas_pcs_interrupt(struct net_device *dev,
1110			     struct cas *cp, u32 status)
1111{
1112	u32 stat = readl(cp->regs + REG_PCS_INTR_STATUS);
1113
1114	if ((stat & PCS_INTR_STATUS_LINK_CHANGE) == 0)
1115		return 0;
1116	return cas_pcs_link_check(cp);
1117}
1118
1119static int cas_txmac_interrupt(struct net_device *dev,
1120			       struct cas *cp, u32 status)
1121{
1122	u32 txmac_stat = readl(cp->regs + REG_MAC_TX_STATUS);
1123
1124	if (!txmac_stat)
1125		return 0;
1126
1127	netif_printk(cp, intr, KERN_DEBUG, cp->dev,
1128		     "txmac interrupt, txmac_stat: 0x%x\n", txmac_stat);
1129
1130	/* Defer timer expiration is quite normal,
1131	 * don't even log the event.
1132	 */
1133	if ((txmac_stat & MAC_TX_DEFER_TIMER) &&
1134	    !(txmac_stat & ~MAC_TX_DEFER_TIMER))
1135		return 0;
1136
1137	spin_lock(&cp->stat_lock[0]);
1138	if (txmac_stat & MAC_TX_UNDERRUN) {
1139		netdev_err(dev, "TX MAC xmit underrun\n");
1140		cp->net_stats[0].tx_fifo_errors++;
1141	}
1142
1143	if (txmac_stat & MAC_TX_MAX_PACKET_ERR) {
1144		netdev_err(dev, "TX MAC max packet size error\n");
1145		cp->net_stats[0].tx_errors++;
1146	}
1147
1148	/* The rest are all cases of one of the 16-bit TX
1149	 * counters expiring.
1150	 */
1151	if (txmac_stat & MAC_TX_COLL_NORMAL)
1152		cp->net_stats[0].collisions += 0x10000;
1153
1154	if (txmac_stat & MAC_TX_COLL_EXCESS) {
1155		cp->net_stats[0].tx_aborted_errors += 0x10000;
1156		cp->net_stats[0].collisions += 0x10000;
1157	}
1158
1159	if (txmac_stat & MAC_TX_COLL_LATE) {
1160		cp->net_stats[0].tx_aborted_errors += 0x10000;
1161		cp->net_stats[0].collisions += 0x10000;
1162	}
1163	spin_unlock(&cp->stat_lock[0]);
1164
1165	/* We do not keep track of MAC_TX_COLL_FIRST and
1166	 * MAC_TX_PEAK_ATTEMPTS events.
1167	 */
1168	return 0;
1169}
1170
1171static void cas_load_firmware(struct cas *cp, cas_hp_inst_t *firmware)
1172{
1173	cas_hp_inst_t *inst;
1174	u32 val;
1175	int i;
1176
1177	i = 0;
1178	while ((inst = firmware) && inst->note) {
1179		writel(i, cp->regs + REG_HP_INSTR_RAM_ADDR);
1180
1181		val = CAS_BASE(HP_INSTR_RAM_HI_VAL, inst->val);
1182		val |= CAS_BASE(HP_INSTR_RAM_HI_MASK, inst->mask);
1183		writel(val, cp->regs + REG_HP_INSTR_RAM_DATA_HI);
1184
1185		val = CAS_BASE(HP_INSTR_RAM_MID_OUTARG, inst->outarg >> 10);
1186		val |= CAS_BASE(HP_INSTR_RAM_MID_OUTOP, inst->outop);
1187		val |= CAS_BASE(HP_INSTR_RAM_MID_FNEXT, inst->fnext);
1188		val |= CAS_BASE(HP_INSTR_RAM_MID_FOFF, inst->foff);
1189		val |= CAS_BASE(HP_INSTR_RAM_MID_SNEXT, inst->snext);
1190		val |= CAS_BASE(HP_INSTR_RAM_MID_SOFF, inst->soff);
1191		val |= CAS_BASE(HP_INSTR_RAM_MID_OP, inst->op);
1192		writel(val, cp->regs + REG_HP_INSTR_RAM_DATA_MID);
1193
1194		val = CAS_BASE(HP_INSTR_RAM_LOW_OUTMASK, inst->outmask);
1195		val |= CAS_BASE(HP_INSTR_RAM_LOW_OUTSHIFT, inst->outshift);
1196		val |= CAS_BASE(HP_INSTR_RAM_LOW_OUTEN, inst->outenab);
1197		val |= CAS_BASE(HP_INSTR_RAM_LOW_OUTARG, inst->outarg);
1198		writel(val, cp->regs + REG_HP_INSTR_RAM_DATA_LOW);
1199		++firmware;
1200		++i;
1201	}
1202}
1203
1204static void cas_init_rx_dma(struct cas *cp)
1205{
1206	u64 desc_dma = cp->block_dvma;
1207	u32 val;
1208	int i, size;
1209
1210	/* rx free descriptors */
1211	val = CAS_BASE(RX_CFG_SWIVEL, RX_SWIVEL_OFF_VAL);
1212	val |= CAS_BASE(RX_CFG_DESC_RING, RX_DESC_RINGN_INDEX(0));
1213	val |= CAS_BASE(RX_CFG_COMP_RING, RX_COMP_RINGN_INDEX(0));
1214	if ((N_RX_DESC_RINGS > 1) &&
1215	    (cp->cas_flags & CAS_FLAG_REG_PLUS))  /* do desc 2 */
1216		val |= CAS_BASE(RX_CFG_DESC_RING1, RX_DESC_RINGN_INDEX(1));
1217	writel(val, cp->regs + REG_RX_CFG);
1218
1219	val = (unsigned long) cp->init_rxds[0] -
1220		(unsigned long) cp->init_block;
1221	writel((desc_dma + val) >> 32, cp->regs + REG_RX_DB_HI);
1222	writel((desc_dma + val) & 0xffffffff, cp->regs + REG_RX_DB_LOW);
1223	writel(RX_DESC_RINGN_SIZE(0) - 4, cp->regs + REG_RX_KICK);
1224
1225	if (cp->cas_flags & CAS_FLAG_REG_PLUS) {
1226		/* rx desc 2 is for IPSEC packets. however,
1227		 * we don't it that for that purpose.
1228		 */
1229		val = (unsigned long) cp->init_rxds[1] -
1230			(unsigned long) cp->init_block;
1231		writel((desc_dma + val) >> 32, cp->regs + REG_PLUS_RX_DB1_HI);
1232		writel((desc_dma + val) & 0xffffffff, cp->regs +
1233		       REG_PLUS_RX_DB1_LOW);
1234		writel(RX_DESC_RINGN_SIZE(1) - 4, cp->regs +
1235		       REG_PLUS_RX_KICK1);
1236	}
1237
1238	/* rx completion registers */
1239	val = (unsigned long) cp->init_rxcs[0] -
1240		(unsigned long) cp->init_block;
1241	writel((desc_dma + val) >> 32, cp->regs + REG_RX_CB_HI);
1242	writel((desc_dma + val) & 0xffffffff, cp->regs + REG_RX_CB_LOW);
1243
1244	if (cp->cas_flags & CAS_FLAG_REG_PLUS) {
1245		/* rx comp 2-4 */
1246		for (i = 1; i < MAX_RX_COMP_RINGS; i++) {
1247			val = (unsigned long) cp->init_rxcs[i] -
1248				(unsigned long) cp->init_block;
1249			writel((desc_dma + val) >> 32, cp->regs +
1250			       REG_PLUS_RX_CBN_HI(i));
1251			writel((desc_dma + val) & 0xffffffff, cp->regs +
1252			       REG_PLUS_RX_CBN_LOW(i));
1253		}
1254	}
1255
1256	/* read selective clear regs to prevent spurious interrupts
1257	 * on reset because complete == kick.
1258	 * selective clear set up to prevent interrupts on resets
1259	 */
1260	readl(cp->regs + REG_INTR_STATUS_ALIAS);
1261	writel(INTR_RX_DONE | INTR_RX_BUF_UNAVAIL, cp->regs + REG_ALIAS_CLEAR);
1262	if (cp->cas_flags & CAS_FLAG_REG_PLUS) {
1263		for (i = 1; i < N_RX_COMP_RINGS; i++)
1264			readl(cp->regs + REG_PLUS_INTRN_STATUS_ALIAS(i));
1265
1266		/* 2 is different from 3 and 4 */
1267		if (N_RX_COMP_RINGS > 1)
1268			writel(INTR_RX_DONE_ALT | INTR_RX_BUF_UNAVAIL_1,
1269			       cp->regs + REG_PLUS_ALIASN_CLEAR(1));
1270
1271		for (i = 2; i < N_RX_COMP_RINGS; i++)
1272			writel(INTR_RX_DONE_ALT,
1273			       cp->regs + REG_PLUS_ALIASN_CLEAR(i));
1274	}
1275
1276	/* set up pause thresholds */
1277	val  = CAS_BASE(RX_PAUSE_THRESH_OFF,
1278			cp->rx_pause_off / RX_PAUSE_THRESH_QUANTUM);
1279	val |= CAS_BASE(RX_PAUSE_THRESH_ON,
1280			cp->rx_pause_on / RX_PAUSE_THRESH_QUANTUM);
1281	writel(val, cp->regs + REG_RX_PAUSE_THRESH);
1282
1283	/* zero out dma reassembly buffers */
1284	for (i = 0; i < 64; i++) {
1285		writel(i, cp->regs + REG_RX_TABLE_ADDR);
1286		writel(0x0, cp->regs + REG_RX_TABLE_DATA_LOW);
1287		writel(0x0, cp->regs + REG_RX_TABLE_DATA_MID);
1288		writel(0x0, cp->regs + REG_RX_TABLE_DATA_HI);
1289	}
1290
1291	/* make sure address register is 0 for normal operation */
1292	writel(0x0, cp->regs + REG_RX_CTRL_FIFO_ADDR);
1293	writel(0x0, cp->regs + REG_RX_IPP_FIFO_ADDR);
1294
1295	/* interrupt mitigation */
1296#ifdef USE_RX_BLANK
1297	val = CAS_BASE(RX_BLANK_INTR_TIME, RX_BLANK_INTR_TIME_VAL);
1298	val |= CAS_BASE(RX_BLANK_INTR_PKT, RX_BLANK_INTR_PKT_VAL);
1299	writel(val, cp->regs + REG_RX_BLANK);
1300#else
1301	writel(0x0, cp->regs + REG_RX_BLANK);
1302#endif
1303
1304	/* interrupt generation as a function of low water marks for
1305	 * free desc and completion entries. these are used to trigger
1306	 * housekeeping for rx descs. we don't use the free interrupt
1307	 * as it's not very useful
1308	 */
1309	/* val = CAS_BASE(RX_AE_THRESH_FREE, RX_AE_FREEN_VAL(0)); */
1310	val = CAS_BASE(RX_AE_THRESH_COMP, RX_AE_COMP_VAL);
1311	writel(val, cp->regs + REG_RX_AE_THRESH);
1312	if (cp->cas_flags & CAS_FLAG_REG_PLUS) {
1313		val = CAS_BASE(RX_AE1_THRESH_FREE, RX_AE_FREEN_VAL(1));
1314		writel(val, cp->regs + REG_PLUS_RX_AE1_THRESH);
1315	}
1316
1317	/* Random early detect registers. useful for congestion avoidance.
1318	 * this should be tunable.
1319	 */
1320	writel(0x0, cp->regs + REG_RX_RED);
1321
1322	/* receive page sizes. default == 2K (0x800) */
1323	val = 0;
1324	if (cp->page_size == 0x1000)
1325		val = 0x1;
1326	else if (cp->page_size == 0x2000)
1327		val = 0x2;
1328	else if (cp->page_size == 0x4000)
1329		val = 0x3;
1330
1331	/* round mtu + offset. constrain to page size. */
1332	size = cp->dev->mtu + 64;
1333	if (size > cp->page_size)
1334		size = cp->page_size;
1335
1336	if (size <= 0x400)
1337		i = 0x0;
1338	else if (size <= 0x800)
1339		i = 0x1;
1340	else if (size <= 0x1000)
1341		i = 0x2;
1342	else
1343		i = 0x3;
1344
1345	cp->mtu_stride = 1 << (i + 10);
1346	val  = CAS_BASE(RX_PAGE_SIZE, val);
1347	val |= CAS_BASE(RX_PAGE_SIZE_MTU_STRIDE, i);
1348	val |= CAS_BASE(RX_PAGE_SIZE_MTU_COUNT, cp->page_size >> (i + 10));
1349	val |= CAS_BASE(RX_PAGE_SIZE_MTU_OFF, 0x1);
1350	writel(val, cp->regs + REG_RX_PAGE_SIZE);
1351
1352	/* enable the header parser if desired */
1353	if (CAS_HP_FIRMWARE == cas_prog_null)
1354		return;
1355
1356	val = CAS_BASE(HP_CFG_NUM_CPU, CAS_NCPUS > 63 ? 0 : CAS_NCPUS);
1357	val |= HP_CFG_PARSE_EN | HP_CFG_SYN_INC_MASK;
1358	val |= CAS_BASE(HP_CFG_TCP_THRESH, HP_TCP_THRESH_VAL);
1359	writel(val, cp->regs + REG_HP_CFG);
1360}
1361
1362static inline void cas_rxc_init(struct cas_rx_comp *rxc)
1363{
1364	memset(rxc, 0, sizeof(*rxc));
1365	rxc->word4 = cpu_to_le64(RX_COMP4_ZERO);
1366}
1367
1368/* NOTE: we use the ENC RX DESC ring for spares. the rx_page[0,1]
1369 * flipping is protected by the fact that the chip will not
1370 * hand back the same page index while it's being processed.
1371 */
1372static inline cas_page_t *cas_page_spare(struct cas *cp, const int index)
1373{
1374	cas_page_t *page = cp->rx_pages[1][index];
1375	cas_page_t *new;
1376
1377	if (page_count(page->buffer) == 1)
1378		return page;
1379
1380	new = cas_page_dequeue(cp);
1381	if (new) {
1382		spin_lock(&cp->rx_inuse_lock);
1383		list_add(&page->list, &cp->rx_inuse_list);
1384		spin_unlock(&cp->rx_inuse_lock);
1385	}
1386	return new;
1387}
1388
1389/* this needs to be changed if we actually use the ENC RX DESC ring */
1390static cas_page_t *cas_page_swap(struct cas *cp, const int ring,
1391				 const int index)
1392{
1393	cas_page_t **page0 = cp->rx_pages[0];
1394	cas_page_t **page1 = cp->rx_pages[1];
1395
1396	/* swap if buffer is in use */
1397	if (page_count(page0[index]->buffer) > 1) {
1398		cas_page_t *new = cas_page_spare(cp, index);
1399		if (new) {
1400			page1[index] = page0[index];
1401			page0[index] = new;
1402		}
1403	}
1404	RX_USED_SET(page0[index], 0);
1405	return page0[index];
1406}
1407
1408static void cas_clean_rxds(struct cas *cp)
1409{
1410	/* only clean ring 0 as ring 1 is used for spare buffers */
1411        struct cas_rx_desc *rxd = cp->init_rxds[0];
1412	int i, size;
1413
1414	/* release all rx flows */
1415	for (i = 0; i < N_RX_FLOWS; i++) {
1416		struct sk_buff *skb;
1417		while ((skb = __skb_dequeue(&cp->rx_flows[i]))) {
1418			cas_skb_release(skb);
1419		}
1420	}
1421
1422	/* initialize descriptors */
1423	size = RX_DESC_RINGN_SIZE(0);
1424	for (i = 0; i < size; i++) {
1425		cas_page_t *page = cas_page_swap(cp, 0, i);
1426		rxd[i].buffer = cpu_to_le64(page->dma_addr);
1427		rxd[i].index  = cpu_to_le64(CAS_BASE(RX_INDEX_NUM, i) |
1428					    CAS_BASE(RX_INDEX_RING, 0));
1429	}
1430
1431	cp->rx_old[0]  = RX_DESC_RINGN_SIZE(0) - 4;
1432	cp->rx_last[0] = 0;
1433	cp->cas_flags &= ~CAS_FLAG_RXD_POST(0);
1434}
1435
1436static void cas_clean_rxcs(struct cas *cp)
1437{
1438	int i, j;
1439
1440	/* take ownership of rx comp descriptors */
1441	memset(cp->rx_cur, 0, sizeof(*cp->rx_cur)*N_RX_COMP_RINGS);
1442	memset(cp->rx_new, 0, sizeof(*cp->rx_new)*N_RX_COMP_RINGS);
1443	for (i = 0; i < N_RX_COMP_RINGS; i++) {
1444		struct cas_rx_comp *rxc = cp->init_rxcs[i];
1445		for (j = 0; j < RX_COMP_RINGN_SIZE(i); j++) {
1446			cas_rxc_init(rxc + j);
1447		}
1448	}
1449}
1450
1451#if 0
1452/* When we get a RX fifo overflow, the RX unit is probably hung
1453 * so we do the following.
1454 *
1455 * If any part of the reset goes wrong, we return 1 and that causes the
1456 * whole chip to be reset.
1457 */
1458static int cas_rxmac_reset(struct cas *cp)
1459{
1460	struct net_device *dev = cp->dev;
1461	int limit;
1462	u32 val;
1463
1464	/* First, reset MAC RX. */
1465	writel(cp->mac_rx_cfg & ~MAC_RX_CFG_EN, cp->regs + REG_MAC_RX_CFG);
1466	for (limit = 0; limit < STOP_TRIES; limit++) {
1467		if (!(readl(cp->regs + REG_MAC_RX_CFG) & MAC_RX_CFG_EN))
1468			break;
1469		udelay(10);
1470	}
1471	if (limit == STOP_TRIES) {
1472		netdev_err(dev, "RX MAC will not disable, resetting whole chip\n");
1473		return 1;
1474	}
1475
1476	/* Second, disable RX DMA. */
1477	writel(0, cp->regs + REG_RX_CFG);
1478	for (limit = 0; limit < STOP_TRIES; limit++) {
1479		if (!(readl(cp->regs + REG_RX_CFG) & RX_CFG_DMA_EN))
1480			break;
1481		udelay(10);
1482	}
1483	if (limit == STOP_TRIES) {
1484		netdev_err(dev, "RX DMA will not disable, resetting whole chip\n");
1485		return 1;
1486	}
1487
1488	mdelay(5);
1489
1490	/* Execute RX reset command. */
1491	writel(SW_RESET_RX, cp->regs + REG_SW_RESET);
1492	for (limit = 0; limit < STOP_TRIES; limit++) {
1493		if (!(readl(cp->regs + REG_SW_RESET) & SW_RESET_RX))
1494			break;
1495		udelay(10);
1496	}
1497	if (limit == STOP_TRIES) {
1498		netdev_err(dev, "RX reset command will not execute, resetting whole chip\n");
1499		return 1;
1500	}
1501
1502	/* reset driver rx state */
1503	cas_clean_rxds(cp);
1504	cas_clean_rxcs(cp);
1505
1506	/* Now, reprogram the rest of RX unit. */
1507	cas_init_rx_dma(cp);
1508
1509	/* re-enable */
1510	val = readl(cp->regs + REG_RX_CFG);
1511	writel(val | RX_CFG_DMA_EN, cp->regs + REG_RX_CFG);
1512	writel(MAC_RX_FRAME_RECV, cp->regs + REG_MAC_RX_MASK);
1513	val = readl(cp->regs + REG_MAC_RX_CFG);
1514	writel(val | MAC_RX_CFG_EN, cp->regs + REG_MAC_RX_CFG);
1515	return 0;
1516}
1517#endif
1518
1519static int cas_rxmac_interrupt(struct net_device *dev, struct cas *cp,
1520			       u32 status)
1521{
1522	u32 stat = readl(cp->regs + REG_MAC_RX_STATUS);
1523
1524	if (!stat)
1525		return 0;
1526
1527	netif_dbg(cp, intr, cp->dev, "rxmac interrupt, stat: 0x%x\n", stat);
1528
1529	/* these are all rollovers */
1530	spin_lock(&cp->stat_lock[0]);
1531	if (stat & MAC_RX_ALIGN_ERR)
1532		cp->net_stats[0].rx_frame_errors += 0x10000;
1533
1534	if (stat & MAC_RX_CRC_ERR)
1535		cp->net_stats[0].rx_crc_errors += 0x10000;
1536
1537	if (stat & MAC_RX_LEN_ERR)
1538		cp->net_stats[0].rx_length_errors += 0x10000;
1539
1540	if (stat & MAC_RX_OVERFLOW) {
1541		cp->net_stats[0].rx_over_errors++;
1542		cp->net_stats[0].rx_fifo_errors++;
1543	}
1544
1545	/* We do not track MAC_RX_FRAME_COUNT and MAC_RX_VIOL_ERR
1546	 * events.
1547	 */
1548	spin_unlock(&cp->stat_lock[0]);
1549	return 0;
1550}
1551
1552static int cas_mac_interrupt(struct net_device *dev, struct cas *cp,
1553			     u32 status)
1554{
1555	u32 stat = readl(cp->regs + REG_MAC_CTRL_STATUS);
1556
1557	if (!stat)
1558		return 0;
1559
1560	netif_printk(cp, intr, KERN_DEBUG, cp->dev,
1561		     "mac interrupt, stat: 0x%x\n", stat);
1562
1563	/* This interrupt is just for pause frame and pause
1564	 * tracking.  It is useful for diagnostics and debug
1565	 * but probably by default we will mask these events.
1566	 */
1567	if (stat & MAC_CTRL_PAUSE_STATE)
1568		cp->pause_entered++;
1569
1570	if (stat & MAC_CTRL_PAUSE_RECEIVED)
1571		cp->pause_last_time_recvd = (stat >> 16);
1572
1573	return 0;
1574}
1575
1576
1577/* Must be invoked under cp->lock. */
1578static inline int cas_mdio_link_not_up(struct cas *cp)
1579{
1580	u16 val;
1581
1582	switch (cp->lstate) {
1583	case link_force_ret:
1584		netif_info(cp, link, cp->dev, "Autoneg failed again, keeping forced mode\n");
1585		cas_phy_write(cp, MII_BMCR, cp->link_fcntl);
1586		cp->timer_ticks = 5;
1587		cp->lstate = link_force_ok;
1588		cp->link_transition = LINK_TRANSITION_LINK_CONFIG;
1589		break;
1590
1591	case link_aneg:
1592		val = cas_phy_read(cp, MII_BMCR);
1593
1594		/* Try forced modes. we try things in the following order:
1595		 * 1000 full -> 100 full/half -> 10 half
1596		 */
1597		val &= ~(BMCR_ANRESTART | BMCR_ANENABLE);
1598		val |= BMCR_FULLDPLX;
1599		val |= (cp->cas_flags & CAS_FLAG_1000MB_CAP) ?
1600			CAS_BMCR_SPEED1000 : BMCR_SPEED100;
1601		cas_phy_write(cp, MII_BMCR, val);
1602		cp->timer_ticks = 5;
1603		cp->lstate = link_force_try;
1604		cp->link_transition = LINK_TRANSITION_LINK_CONFIG;
1605		break;
1606
1607	case link_force_try:
1608		/* Downgrade from 1000 to 100 to 10 Mbps if necessary. */
1609		val = cas_phy_read(cp, MII_BMCR);
1610		cp->timer_ticks = 5;
1611		if (val & CAS_BMCR_SPEED1000) { /* gigabit */
1612			val &= ~CAS_BMCR_SPEED1000;
1613			val |= (BMCR_SPEED100 | BMCR_FULLDPLX);
1614			cas_phy_write(cp, MII_BMCR, val);
1615			break;
1616		}
1617
1618		if (val & BMCR_SPEED100) {
1619			if (val & BMCR_FULLDPLX) /* fd failed */
1620				val &= ~BMCR_FULLDPLX;
1621			else { /* 100Mbps failed */
1622				val &= ~BMCR_SPEED100;
1623			}
1624			cas_phy_write(cp, MII_BMCR, val);
1625			break;
1626		}
 
1627	default:
1628		break;
1629	}
1630	return 0;
1631}
1632
1633
1634/* must be invoked with cp->lock held */
1635static int cas_mii_link_check(struct cas *cp, const u16 bmsr)
1636{
1637	int restart;
1638
1639	if (bmsr & BMSR_LSTATUS) {
1640		/* Ok, here we got a link. If we had it due to a forced
1641		 * fallback, and we were configured for autoneg, we
1642		 * retry a short autoneg pass. If you know your hub is
1643		 * broken, use ethtool ;)
1644		 */
1645		if ((cp->lstate == link_force_try) &&
1646		    (cp->link_cntl & BMCR_ANENABLE)) {
1647			cp->lstate = link_force_ret;
1648			cp->link_transition = LINK_TRANSITION_LINK_CONFIG;
1649			cas_mif_poll(cp, 0);
1650			cp->link_fcntl = cas_phy_read(cp, MII_BMCR);
1651			cp->timer_ticks = 5;
1652			if (cp->opened)
1653				netif_info(cp, link, cp->dev,
1654					   "Got link after fallback, retrying autoneg once...\n");
1655			cas_phy_write(cp, MII_BMCR,
1656				      cp->link_fcntl | BMCR_ANENABLE |
1657				      BMCR_ANRESTART);
1658			cas_mif_poll(cp, 1);
1659
1660		} else if (cp->lstate != link_up) {
1661			cp->lstate = link_up;
1662			cp->link_transition = LINK_TRANSITION_LINK_UP;
1663
1664			if (cp->opened) {
1665				cas_set_link_modes(cp);
1666				netif_carrier_on(cp->dev);
1667			}
1668		}
1669		return 0;
1670	}
1671
1672	/* link not up. if the link was previously up, we restart the
1673	 * whole process
1674	 */
1675	restart = 0;
1676	if (cp->lstate == link_up) {
1677		cp->lstate = link_down;
1678		cp->link_transition = LINK_TRANSITION_LINK_DOWN;
1679
1680		netif_carrier_off(cp->dev);
1681		if (cp->opened)
1682			netif_info(cp, link, cp->dev, "Link down\n");
1683		restart = 1;
1684
1685	} else if (++cp->timer_ticks > 10)
1686		cas_mdio_link_not_up(cp);
1687
1688	return restart;
1689}
1690
1691static int cas_mif_interrupt(struct net_device *dev, struct cas *cp,
1692			     u32 status)
1693{
1694	u32 stat = readl(cp->regs + REG_MIF_STATUS);
1695	u16 bmsr;
1696
1697	/* check for a link change */
1698	if (CAS_VAL(MIF_STATUS_POLL_STATUS, stat) == 0)
1699		return 0;
1700
1701	bmsr = CAS_VAL(MIF_STATUS_POLL_DATA, stat);
1702	return cas_mii_link_check(cp, bmsr);
1703}
1704
1705static int cas_pci_interrupt(struct net_device *dev, struct cas *cp,
1706			     u32 status)
1707{
1708	u32 stat = readl(cp->regs + REG_PCI_ERR_STATUS);
1709
1710	if (!stat)
1711		return 0;
1712
1713	netdev_err(dev, "PCI error [%04x:%04x]",
1714		   stat, readl(cp->regs + REG_BIM_DIAG));
1715
1716	/* cassini+ has this reserved */
1717	if ((stat & PCI_ERR_BADACK) &&
1718	    ((cp->cas_flags & CAS_FLAG_REG_PLUS) == 0))
1719		pr_cont(" <No ACK64# during ABS64 cycle>");
1720
1721	if (stat & PCI_ERR_DTRTO)
1722		pr_cont(" <Delayed transaction timeout>");
1723	if (stat & PCI_ERR_OTHER)
1724		pr_cont(" <other>");
1725	if (stat & PCI_ERR_BIM_DMA_WRITE)
1726		pr_cont(" <BIM DMA 0 write req>");
1727	if (stat & PCI_ERR_BIM_DMA_READ)
1728		pr_cont(" <BIM DMA 0 read req>");
1729	pr_cont("\n");
1730
1731	if (stat & PCI_ERR_OTHER) {
1732		u16 cfg;
1733
1734		/* Interrogate PCI config space for the
1735		 * true cause.
1736		 */
1737		pci_read_config_word(cp->pdev, PCI_STATUS, &cfg);
1738		netdev_err(dev, "Read PCI cfg space status [%04x]\n", cfg);
1739		if (cfg & PCI_STATUS_PARITY)
 
1740			netdev_err(dev, "PCI parity error detected\n");
1741		if (cfg & PCI_STATUS_SIG_TARGET_ABORT)
1742			netdev_err(dev, "PCI target abort\n");
1743		if (cfg & PCI_STATUS_REC_TARGET_ABORT)
1744			netdev_err(dev, "PCI master acks target abort\n");
1745		if (cfg & PCI_STATUS_REC_MASTER_ABORT)
1746			netdev_err(dev, "PCI master abort\n");
1747		if (cfg & PCI_STATUS_SIG_SYSTEM_ERROR)
1748			netdev_err(dev, "PCI system error SERR#\n");
1749		if (cfg & PCI_STATUS_DETECTED_PARITY)
1750			netdev_err(dev, "PCI parity error\n");
1751
1752		/* Write the error bits back to clear them. */
1753		cfg &= (PCI_STATUS_PARITY |
1754			PCI_STATUS_SIG_TARGET_ABORT |
1755			PCI_STATUS_REC_TARGET_ABORT |
1756			PCI_STATUS_REC_MASTER_ABORT |
1757			PCI_STATUS_SIG_SYSTEM_ERROR |
1758			PCI_STATUS_DETECTED_PARITY);
1759		pci_write_config_word(cp->pdev, PCI_STATUS, cfg);
1760	}
1761
1762	/* For all PCI errors, we should reset the chip. */
1763	return 1;
1764}
1765
1766/* All non-normal interrupt conditions get serviced here.
1767 * Returns non-zero if we should just exit the interrupt
1768 * handler right now (ie. if we reset the card which invalidates
1769 * all of the other original irq status bits).
1770 */
1771static int cas_abnormal_irq(struct net_device *dev, struct cas *cp,
1772			    u32 status)
1773{
1774	if (status & INTR_RX_TAG_ERROR) {
1775		/* corrupt RX tag framing */
1776		netif_printk(cp, rx_err, KERN_DEBUG, cp->dev,
1777			     "corrupt rx tag framing\n");
1778		spin_lock(&cp->stat_lock[0]);
1779		cp->net_stats[0].rx_errors++;
1780		spin_unlock(&cp->stat_lock[0]);
1781		goto do_reset;
1782	}
1783
1784	if (status & INTR_RX_LEN_MISMATCH) {
1785		/* length mismatch. */
1786		netif_printk(cp, rx_err, KERN_DEBUG, cp->dev,
1787			     "length mismatch for rx frame\n");
1788		spin_lock(&cp->stat_lock[0]);
1789		cp->net_stats[0].rx_errors++;
1790		spin_unlock(&cp->stat_lock[0]);
1791		goto do_reset;
1792	}
1793
1794	if (status & INTR_PCS_STATUS) {
1795		if (cas_pcs_interrupt(dev, cp, status))
1796			goto do_reset;
1797	}
1798
1799	if (status & INTR_TX_MAC_STATUS) {
1800		if (cas_txmac_interrupt(dev, cp, status))
1801			goto do_reset;
1802	}
1803
1804	if (status & INTR_RX_MAC_STATUS) {
1805		if (cas_rxmac_interrupt(dev, cp, status))
1806			goto do_reset;
1807	}
1808
1809	if (status & INTR_MAC_CTRL_STATUS) {
1810		if (cas_mac_interrupt(dev, cp, status))
1811			goto do_reset;
1812	}
1813
1814	if (status & INTR_MIF_STATUS) {
1815		if (cas_mif_interrupt(dev, cp, status))
1816			goto do_reset;
1817	}
1818
1819	if (status & INTR_PCI_ERROR_STATUS) {
1820		if (cas_pci_interrupt(dev, cp, status))
1821			goto do_reset;
1822	}
1823	return 0;
1824
1825do_reset:
1826#if 1
1827	atomic_inc(&cp->reset_task_pending);
1828	atomic_inc(&cp->reset_task_pending_all);
1829	netdev_err(dev, "reset called in cas_abnormal_irq [0x%x]\n", status);
1830	schedule_work(&cp->reset_task);
1831#else
1832	atomic_set(&cp->reset_task_pending, CAS_RESET_ALL);
1833	netdev_err(dev, "reset called in cas_abnormal_irq\n");
1834	schedule_work(&cp->reset_task);
1835#endif
1836	return 1;
1837}
1838
1839/* NOTE: CAS_TABORT returns 1 or 2 so that it can be used when
1840 *       determining whether to do a netif_stop/wakeup
1841 */
1842#define CAS_TABORT(x)      (((x)->cas_flags & CAS_FLAG_TARGET_ABORT) ? 2 : 1)
1843#define CAS_ROUND_PAGE(x)  (((x) + PAGE_SIZE - 1) & PAGE_MASK)
1844static inline int cas_calc_tabort(struct cas *cp, const unsigned long addr,
1845				  const int len)
1846{
1847	unsigned long off = addr + len;
1848
1849	if (CAS_TABORT(cp) == 1)
1850		return 0;
1851	if ((CAS_ROUND_PAGE(off) - off) > TX_TARGET_ABORT_LEN)
1852		return 0;
1853	return TX_TARGET_ABORT_LEN;
1854}
1855
1856static inline void cas_tx_ringN(struct cas *cp, int ring, int limit)
1857{
1858	struct cas_tx_desc *txds;
1859	struct sk_buff **skbs;
1860	struct net_device *dev = cp->dev;
1861	int entry, count;
1862
1863	spin_lock(&cp->tx_lock[ring]);
1864	txds = cp->init_txds[ring];
1865	skbs = cp->tx_skbs[ring];
1866	entry = cp->tx_old[ring];
1867
1868	count = TX_BUFF_COUNT(ring, entry, limit);
1869	while (entry != limit) {
1870		struct sk_buff *skb = skbs[entry];
1871		dma_addr_t daddr;
1872		u32 dlen;
1873		int frag;
1874
1875		if (!skb) {
1876			/* this should never occur */
1877			entry = TX_DESC_NEXT(ring, entry);
1878			continue;
1879		}
1880
1881		/* however, we might get only a partial skb release. */
1882		count -= skb_shinfo(skb)->nr_frags +
1883			+ cp->tx_tiny_use[ring][entry].nbufs + 1;
1884		if (count < 0)
1885			break;
1886
1887		netif_printk(cp, tx_done, KERN_DEBUG, cp->dev,
1888			     "tx[%d] done, slot %d\n", ring, entry);
1889
1890		skbs[entry] = NULL;
1891		cp->tx_tiny_use[ring][entry].nbufs = 0;
1892
1893		for (frag = 0; frag <= skb_shinfo(skb)->nr_frags; frag++) {
1894			struct cas_tx_desc *txd = txds + entry;
1895
1896			daddr = le64_to_cpu(txd->buffer);
1897			dlen = CAS_VAL(TX_DESC_BUFLEN,
1898				       le64_to_cpu(txd->control));
1899			pci_unmap_page(cp->pdev, daddr, dlen,
1900				       PCI_DMA_TODEVICE);
1901			entry = TX_DESC_NEXT(ring, entry);
1902
1903			/* tiny buffer may follow */
1904			if (cp->tx_tiny_use[ring][entry].used) {
1905				cp->tx_tiny_use[ring][entry].used = 0;
1906				entry = TX_DESC_NEXT(ring, entry);
1907			}
1908		}
1909
1910		spin_lock(&cp->stat_lock[ring]);
1911		cp->net_stats[ring].tx_packets++;
1912		cp->net_stats[ring].tx_bytes += skb->len;
1913		spin_unlock(&cp->stat_lock[ring]);
1914		dev_kfree_skb_irq(skb);
1915	}
1916	cp->tx_old[ring] = entry;
1917
1918	/* this is wrong for multiple tx rings. the net device needs
1919	 * multiple queues for this to do the right thing.  we wait
1920	 * for 2*packets to be available when using tiny buffers
1921	 */
1922	if (netif_queue_stopped(dev) &&
1923	    (TX_BUFFS_AVAIL(cp, ring) > CAS_TABORT(cp)*(MAX_SKB_FRAGS + 1)))
1924		netif_wake_queue(dev);
1925	spin_unlock(&cp->tx_lock[ring]);
1926}
1927
1928static void cas_tx(struct net_device *dev, struct cas *cp,
1929		   u32 status)
1930{
1931        int limit, ring;
1932#ifdef USE_TX_COMPWB
1933	u64 compwb = le64_to_cpu(cp->init_block->tx_compwb);
1934#endif
1935	netif_printk(cp, intr, KERN_DEBUG, cp->dev,
1936		     "tx interrupt, status: 0x%x, %llx\n",
1937		     status, (unsigned long long)compwb);
1938	/* process all the rings */
1939	for (ring = 0; ring < N_TX_RINGS; ring++) {
1940#ifdef USE_TX_COMPWB
1941		/* use the completion writeback registers */
1942		limit = (CAS_VAL(TX_COMPWB_MSB, compwb) << 8) |
1943			CAS_VAL(TX_COMPWB_LSB, compwb);
1944		compwb = TX_COMPWB_NEXT(compwb);
1945#else
1946		limit = readl(cp->regs + REG_TX_COMPN(ring));
1947#endif
1948		if (cp->tx_old[ring] != limit)
1949			cas_tx_ringN(cp, ring, limit);
1950	}
1951}
1952
1953
1954static int cas_rx_process_pkt(struct cas *cp, struct cas_rx_comp *rxc,
1955			      int entry, const u64 *words,
1956			      struct sk_buff **skbref)
1957{
1958	int dlen, hlen, len, i, alloclen;
1959	int off, swivel = RX_SWIVEL_OFF_VAL;
1960	struct cas_page *page;
1961	struct sk_buff *skb;
1962	void *addr, *crcaddr;
1963	__sum16 csum;
1964	char *p;
1965
1966	hlen = CAS_VAL(RX_COMP2_HDR_SIZE, words[1]);
1967	dlen = CAS_VAL(RX_COMP1_DATA_SIZE, words[0]);
1968	len  = hlen + dlen;
1969
1970	if (RX_COPY_ALWAYS || (words[2] & RX_COMP3_SMALL_PKT))
1971		alloclen = len;
1972	else
1973		alloclen = max(hlen, RX_COPY_MIN);
1974
1975	skb = netdev_alloc_skb(cp->dev, alloclen + swivel + cp->crc_size);
1976	if (skb == NULL)
1977		return -1;
1978
1979	*skbref = skb;
1980	skb_reserve(skb, swivel);
1981
1982	p = skb->data;
1983	addr = crcaddr = NULL;
1984	if (hlen) { /* always copy header pages */
1985		i = CAS_VAL(RX_COMP2_HDR_INDEX, words[1]);
1986		page = cp->rx_pages[CAS_VAL(RX_INDEX_RING, i)][CAS_VAL(RX_INDEX_NUM, i)];
1987		off = CAS_VAL(RX_COMP2_HDR_OFF, words[1]) * 0x100 +
1988			swivel;
1989
1990		i = hlen;
1991		if (!dlen) /* attach FCS */
1992			i += cp->crc_size;
1993		pci_dma_sync_single_for_cpu(cp->pdev, page->dma_addr + off, i,
1994				    PCI_DMA_FROMDEVICE);
1995		addr = cas_page_map(page->buffer);
1996		memcpy(p, addr + off, i);
1997		pci_dma_sync_single_for_device(cp->pdev, page->dma_addr + off, i,
1998				    PCI_DMA_FROMDEVICE);
1999		cas_page_unmap(addr);
2000		RX_USED_ADD(page, 0x100);
2001		p += hlen;
2002		swivel = 0;
2003	}
2004
2005
2006	if (alloclen < (hlen + dlen)) {
2007		skb_frag_t *frag = skb_shinfo(skb)->frags;
2008
2009		/* normal or jumbo packets. we use frags */
2010		i = CAS_VAL(RX_COMP1_DATA_INDEX, words[0]);
2011		page = cp->rx_pages[CAS_VAL(RX_INDEX_RING, i)][CAS_VAL(RX_INDEX_NUM, i)];
2012		off = CAS_VAL(RX_COMP1_DATA_OFF, words[0]) + swivel;
2013
2014		hlen = min(cp->page_size - off, dlen);
2015		if (hlen < 0) {
2016			netif_printk(cp, rx_err, KERN_DEBUG, cp->dev,
2017				     "rx page overflow: %d\n", hlen);
2018			dev_kfree_skb_irq(skb);
2019			return -1;
2020		}
2021		i = hlen;
2022		if (i == dlen)  /* attach FCS */
2023			i += cp->crc_size;
2024		pci_dma_sync_single_for_cpu(cp->pdev, page->dma_addr + off, i,
2025				    PCI_DMA_FROMDEVICE);
2026
2027		/* make sure we always copy a header */
2028		swivel = 0;
2029		if (p == (char *) skb->data) { /* not split */
2030			addr = cas_page_map(page->buffer);
2031			memcpy(p, addr + off, RX_COPY_MIN);
2032			pci_dma_sync_single_for_device(cp->pdev, page->dma_addr + off, i,
2033					PCI_DMA_FROMDEVICE);
2034			cas_page_unmap(addr);
2035			off += RX_COPY_MIN;
2036			swivel = RX_COPY_MIN;
2037			RX_USED_ADD(page, cp->mtu_stride);
2038		} else {
2039			RX_USED_ADD(page, hlen);
2040		}
2041		skb_put(skb, alloclen);
2042
2043		skb_shinfo(skb)->nr_frags++;
2044		skb->data_len += hlen - swivel;
2045		skb->truesize += hlen - swivel;
2046		skb->len      += hlen - swivel;
2047
2048		__skb_frag_set_page(frag, page->buffer);
2049		__skb_frag_ref(frag);
2050		frag->page_offset = off;
2051		skb_frag_size_set(frag, hlen - swivel);
2052
2053		/* any more data? */
2054		if ((words[0] & RX_COMP1_SPLIT_PKT) && ((dlen -= hlen) > 0)) {
2055			hlen = dlen;
2056			off = 0;
2057
2058			i = CAS_VAL(RX_COMP2_NEXT_INDEX, words[1]);
2059			page = cp->rx_pages[CAS_VAL(RX_INDEX_RING, i)][CAS_VAL(RX_INDEX_NUM, i)];
2060			pci_dma_sync_single_for_cpu(cp->pdev, page->dma_addr,
2061					    hlen + cp->crc_size,
2062					    PCI_DMA_FROMDEVICE);
2063			pci_dma_sync_single_for_device(cp->pdev, page->dma_addr,
2064					    hlen + cp->crc_size,
2065					    PCI_DMA_FROMDEVICE);
 
 
2066
2067			skb_shinfo(skb)->nr_frags++;
2068			skb->data_len += hlen;
2069			skb->len      += hlen;
2070			frag++;
2071
2072			__skb_frag_set_page(frag, page->buffer);
2073			__skb_frag_ref(frag);
2074			frag->page_offset = 0;
2075			skb_frag_size_set(frag, hlen);
2076			RX_USED_ADD(page, hlen + cp->crc_size);
2077		}
2078
2079		if (cp->crc_size) {
2080			addr = cas_page_map(page->buffer);
2081			crcaddr  = addr + off + hlen;
2082		}
2083
2084	} else {
2085		/* copying packet */
2086		if (!dlen)
2087			goto end_copy_pkt;
2088
2089		i = CAS_VAL(RX_COMP1_DATA_INDEX, words[0]);
2090		page = cp->rx_pages[CAS_VAL(RX_INDEX_RING, i)][CAS_VAL(RX_INDEX_NUM, i)];
2091		off = CAS_VAL(RX_COMP1_DATA_OFF, words[0]) + swivel;
2092		hlen = min(cp->page_size - off, dlen);
2093		if (hlen < 0) {
2094			netif_printk(cp, rx_err, KERN_DEBUG, cp->dev,
2095				     "rx page overflow: %d\n", hlen);
2096			dev_kfree_skb_irq(skb);
2097			return -1;
2098		}
2099		i = hlen;
2100		if (i == dlen) /* attach FCS */
2101			i += cp->crc_size;
2102		pci_dma_sync_single_for_cpu(cp->pdev, page->dma_addr + off, i,
2103				    PCI_DMA_FROMDEVICE);
2104		addr = cas_page_map(page->buffer);
2105		memcpy(p, addr + off, i);
2106		pci_dma_sync_single_for_device(cp->pdev, page->dma_addr + off, i,
2107				    PCI_DMA_FROMDEVICE);
2108		cas_page_unmap(addr);
2109		if (p == (char *) skb->data) /* not split */
2110			RX_USED_ADD(page, cp->mtu_stride);
2111		else
2112			RX_USED_ADD(page, i);
2113
2114		/* any more data? */
2115		if ((words[0] & RX_COMP1_SPLIT_PKT) && ((dlen -= hlen) > 0)) {
2116			p += hlen;
2117			i = CAS_VAL(RX_COMP2_NEXT_INDEX, words[1]);
2118			page = cp->rx_pages[CAS_VAL(RX_INDEX_RING, i)][CAS_VAL(RX_INDEX_NUM, i)];
2119			pci_dma_sync_single_for_cpu(cp->pdev, page->dma_addr,
2120					    dlen + cp->crc_size,
2121					    PCI_DMA_FROMDEVICE);
2122			addr = cas_page_map(page->buffer);
2123			memcpy(p, addr, dlen + cp->crc_size);
2124			pci_dma_sync_single_for_device(cp->pdev, page->dma_addr,
2125					    dlen + cp->crc_size,
2126					    PCI_DMA_FROMDEVICE);
2127			cas_page_unmap(addr);
2128			RX_USED_ADD(page, dlen + cp->crc_size);
2129		}
2130end_copy_pkt:
2131		if (cp->crc_size) {
2132			addr    = NULL;
2133			crcaddr = skb->data + alloclen;
2134		}
2135		skb_put(skb, alloclen);
2136	}
2137
2138	csum = (__force __sum16)htons(CAS_VAL(RX_COMP4_TCP_CSUM, words[3]));
2139	if (cp->crc_size) {
2140		/* checksum includes FCS. strip it out. */
2141		csum = csum_fold(csum_partial(crcaddr, cp->crc_size,
2142					      csum_unfold(csum)));
2143		if (addr)
2144			cas_page_unmap(addr);
2145	}
2146	skb->protocol = eth_type_trans(skb, cp->dev);
2147	if (skb->protocol == htons(ETH_P_IP)) {
2148		skb->csum = csum_unfold(~csum);
2149		skb->ip_summed = CHECKSUM_COMPLETE;
2150	} else
2151		skb_checksum_none_assert(skb);
2152	return len;
2153}
2154
2155
2156/* we can handle up to 64 rx flows at a time. we do the same thing
2157 * as nonreassm except that we batch up the buffers.
2158 * NOTE: we currently just treat each flow as a bunch of packets that
2159 *       we pass up. a better way would be to coalesce the packets
2160 *       into a jumbo packet. to do that, we need to do the following:
2161 *       1) the first packet will have a clean split between header and
2162 *          data. save both.
2163 *       2) each time the next flow packet comes in, extend the
2164 *          data length and merge the checksums.
2165 *       3) on flow release, fix up the header.
2166 *       4) make sure the higher layer doesn't care.
2167 * because packets get coalesced, we shouldn't run into fragment count
2168 * issues.
2169 */
2170static inline void cas_rx_flow_pkt(struct cas *cp, const u64 *words,
2171				   struct sk_buff *skb)
2172{
2173	int flowid = CAS_VAL(RX_COMP3_FLOWID, words[2]) & (N_RX_FLOWS - 1);
2174	struct sk_buff_head *flow = &cp->rx_flows[flowid];
2175
2176	/* this is protected at a higher layer, so no need to
2177	 * do any additional locking here. stick the buffer
2178	 * at the end.
2179	 */
2180	__skb_queue_tail(flow, skb);
2181	if (words[0] & RX_COMP1_RELEASE_FLOW) {
2182		while ((skb = __skb_dequeue(flow))) {
2183			cas_skb_release(skb);
2184		}
2185	}
2186}
2187
2188/* put rx descriptor back on ring. if a buffer is in use by a higher
2189 * layer, this will need to put in a replacement.
2190 */
2191static void cas_post_page(struct cas *cp, const int ring, const int index)
2192{
2193	cas_page_t *new;
2194	int entry;
2195
2196	entry = cp->rx_old[ring];
2197
2198	new = cas_page_swap(cp, ring, index);
2199	cp->init_rxds[ring][entry].buffer = cpu_to_le64(new->dma_addr);
2200	cp->init_rxds[ring][entry].index  =
2201		cpu_to_le64(CAS_BASE(RX_INDEX_NUM, index) |
2202			    CAS_BASE(RX_INDEX_RING, ring));
2203
2204	entry = RX_DESC_ENTRY(ring, entry + 1);
2205	cp->rx_old[ring] = entry;
2206
2207	if (entry % 4)
2208		return;
2209
2210	if (ring == 0)
2211		writel(entry, cp->regs + REG_RX_KICK);
2212	else if ((N_RX_DESC_RINGS > 1) &&
2213		 (cp->cas_flags & CAS_FLAG_REG_PLUS))
2214		writel(entry, cp->regs + REG_PLUS_RX_KICK1);
2215}
2216
2217
2218/* only when things are bad */
2219static int cas_post_rxds_ringN(struct cas *cp, int ring, int num)
2220{
2221	unsigned int entry, last, count, released;
2222	int cluster;
2223	cas_page_t **page = cp->rx_pages[ring];
2224
2225	entry = cp->rx_old[ring];
2226
2227	netif_printk(cp, intr, KERN_DEBUG, cp->dev,
2228		     "rxd[%d] interrupt, done: %d\n", ring, entry);
2229
2230	cluster = -1;
2231	count = entry & 0x3;
2232	last = RX_DESC_ENTRY(ring, num ? entry + num - 4: entry - 4);
2233	released = 0;
2234	while (entry != last) {
2235		/* make a new buffer if it's still in use */
2236		if (page_count(page[entry]->buffer) > 1) {
2237			cas_page_t *new = cas_page_dequeue(cp);
2238			if (!new) {
2239				/* let the timer know that we need to
2240				 * do this again
2241				 */
2242				cp->cas_flags |= CAS_FLAG_RXD_POST(ring);
2243				if (!timer_pending(&cp->link_timer))
2244					mod_timer(&cp->link_timer, jiffies +
2245						  CAS_LINK_FAST_TIMEOUT);
2246				cp->rx_old[ring]  = entry;
2247				cp->rx_last[ring] = num ? num - released : 0;
2248				return -ENOMEM;
2249			}
2250			spin_lock(&cp->rx_inuse_lock);
2251			list_add(&page[entry]->list, &cp->rx_inuse_list);
2252			spin_unlock(&cp->rx_inuse_lock);
2253			cp->init_rxds[ring][entry].buffer =
2254				cpu_to_le64(new->dma_addr);
2255			page[entry] = new;
2256
2257		}
2258
2259		if (++count == 4) {
2260			cluster = entry;
2261			count = 0;
2262		}
2263		released++;
2264		entry = RX_DESC_ENTRY(ring, entry + 1);
2265	}
2266	cp->rx_old[ring] = entry;
2267
2268	if (cluster < 0)
2269		return 0;
2270
2271	if (ring == 0)
2272		writel(cluster, cp->regs + REG_RX_KICK);
2273	else if ((N_RX_DESC_RINGS > 1) &&
2274		 (cp->cas_flags & CAS_FLAG_REG_PLUS))
2275		writel(cluster, cp->regs + REG_PLUS_RX_KICK1);
2276	return 0;
2277}
2278
2279
2280/* process a completion ring. packets are set up in three basic ways:
2281 * small packets: should be copied header + data in single buffer.
2282 * large packets: header and data in a single buffer.
2283 * split packets: header in a separate buffer from data.
2284 *                data may be in multiple pages. data may be > 256
2285 *                bytes but in a single page.
2286 *
2287 * NOTE: RX page posting is done in this routine as well. while there's
2288 *       the capability of using multiple RX completion rings, it isn't
2289 *       really worthwhile due to the fact that the page posting will
2290 *       force serialization on the single descriptor ring.
2291 */
2292static int cas_rx_ringN(struct cas *cp, int ring, int budget)
2293{
2294	struct cas_rx_comp *rxcs = cp->init_rxcs[ring];
2295	int entry, drops;
2296	int npackets = 0;
2297
2298	netif_printk(cp, intr, KERN_DEBUG, cp->dev,
2299		     "rx[%d] interrupt, done: %d/%d\n",
2300		     ring,
2301		     readl(cp->regs + REG_RX_COMP_HEAD), cp->rx_new[ring]);
2302
2303	entry = cp->rx_new[ring];
2304	drops = 0;
2305	while (1) {
2306		struct cas_rx_comp *rxc = rxcs + entry;
2307		struct sk_buff *uninitialized_var(skb);
2308		int type, len;
2309		u64 words[4];
2310		int i, dring;
2311
2312		words[0] = le64_to_cpu(rxc->word1);
2313		words[1] = le64_to_cpu(rxc->word2);
2314		words[2] = le64_to_cpu(rxc->word3);
2315		words[3] = le64_to_cpu(rxc->word4);
2316
2317		/* don't touch if still owned by hw */
2318		type = CAS_VAL(RX_COMP1_TYPE, words[0]);
2319		if (type == 0)
2320			break;
2321
2322		/* hw hasn't cleared the zero bit yet */
2323		if (words[3] & RX_COMP4_ZERO) {
2324			break;
2325		}
2326
2327		/* get info on the packet */
2328		if (words[3] & (RX_COMP4_LEN_MISMATCH | RX_COMP4_BAD)) {
2329			spin_lock(&cp->stat_lock[ring]);
2330			cp->net_stats[ring].rx_errors++;
2331			if (words[3] & RX_COMP4_LEN_MISMATCH)
2332				cp->net_stats[ring].rx_length_errors++;
2333			if (words[3] & RX_COMP4_BAD)
2334				cp->net_stats[ring].rx_crc_errors++;
2335			spin_unlock(&cp->stat_lock[ring]);
2336
2337			/* We'll just return it to Cassini. */
2338		drop_it:
2339			spin_lock(&cp->stat_lock[ring]);
2340			++cp->net_stats[ring].rx_dropped;
2341			spin_unlock(&cp->stat_lock[ring]);
2342			goto next;
2343		}
2344
2345		len = cas_rx_process_pkt(cp, rxc, entry, words, &skb);
2346		if (len < 0) {
2347			++drops;
2348			goto drop_it;
2349		}
2350
2351		/* see if it's a flow re-assembly or not. the driver
2352		 * itself handles release back up.
2353		 */
2354		if (RX_DONT_BATCH || (type == 0x2)) {
2355			/* non-reassm: these always get released */
2356			cas_skb_release(skb);
2357		} else {
2358			cas_rx_flow_pkt(cp, words, skb);
2359		}
2360
2361		spin_lock(&cp->stat_lock[ring]);
2362		cp->net_stats[ring].rx_packets++;
2363		cp->net_stats[ring].rx_bytes += len;
2364		spin_unlock(&cp->stat_lock[ring]);
2365
2366	next:
2367		npackets++;
2368
2369		/* should it be released? */
2370		if (words[0] & RX_COMP1_RELEASE_HDR) {
2371			i = CAS_VAL(RX_COMP2_HDR_INDEX, words[1]);
2372			dring = CAS_VAL(RX_INDEX_RING, i);
2373			i = CAS_VAL(RX_INDEX_NUM, i);
2374			cas_post_page(cp, dring, i);
2375		}
2376
2377		if (words[0] & RX_COMP1_RELEASE_DATA) {
2378			i = CAS_VAL(RX_COMP1_DATA_INDEX, words[0]);
2379			dring = CAS_VAL(RX_INDEX_RING, i);
2380			i = CAS_VAL(RX_INDEX_NUM, i);
2381			cas_post_page(cp, dring, i);
2382		}
2383
2384		if (words[0] & RX_COMP1_RELEASE_NEXT) {
2385			i = CAS_VAL(RX_COMP2_NEXT_INDEX, words[1]);
2386			dring = CAS_VAL(RX_INDEX_RING, i);
2387			i = CAS_VAL(RX_INDEX_NUM, i);
2388			cas_post_page(cp, dring, i);
2389		}
2390
2391		/* skip to the next entry */
2392		entry = RX_COMP_ENTRY(ring, entry + 1 +
2393				      CAS_VAL(RX_COMP1_SKIP, words[0]));
2394#ifdef USE_NAPI
2395		if (budget && (npackets >= budget))
2396			break;
2397#endif
2398	}
2399	cp->rx_new[ring] = entry;
2400
2401	if (drops)
2402		netdev_info(cp->dev, "Memory squeeze, deferring packet\n");
2403	return npackets;
2404}
2405
2406
2407/* put completion entries back on the ring */
2408static void cas_post_rxcs_ringN(struct net_device *dev,
2409				struct cas *cp, int ring)
2410{
2411	struct cas_rx_comp *rxc = cp->init_rxcs[ring];
2412	int last, entry;
2413
2414	last = cp->rx_cur[ring];
2415	entry = cp->rx_new[ring];
2416	netif_printk(cp, intr, KERN_DEBUG, dev,
2417		     "rxc[%d] interrupt, done: %d/%d\n",
2418		     ring, readl(cp->regs + REG_RX_COMP_HEAD), entry);
2419
2420	/* zero and re-mark descriptors */
2421	while (last != entry) {
2422		cas_rxc_init(rxc + last);
2423		last = RX_COMP_ENTRY(ring, last + 1);
2424	}
2425	cp->rx_cur[ring] = last;
2426
2427	if (ring == 0)
2428		writel(last, cp->regs + REG_RX_COMP_TAIL);
2429	else if (cp->cas_flags & CAS_FLAG_REG_PLUS)
2430		writel(last, cp->regs + REG_PLUS_RX_COMPN_TAIL(ring));
2431}
2432
2433
2434
2435/* cassini can use all four PCI interrupts for the completion ring.
2436 * rings 3 and 4 are identical
2437 */
2438#if defined(USE_PCI_INTC) || defined(USE_PCI_INTD)
2439static inline void cas_handle_irqN(struct net_device *dev,
2440				   struct cas *cp, const u32 status,
2441				   const int ring)
2442{
2443	if (status & (INTR_RX_COMP_FULL_ALT | INTR_RX_COMP_AF_ALT))
2444		cas_post_rxcs_ringN(dev, cp, ring);
2445}
2446
2447static irqreturn_t cas_interruptN(int irq, void *dev_id)
2448{
2449	struct net_device *dev = dev_id;
2450	struct cas *cp = netdev_priv(dev);
2451	unsigned long flags;
2452	int ring = (irq == cp->pci_irq_INTC) ? 2 : 3;
2453	u32 status = readl(cp->regs + REG_PLUS_INTRN_STATUS(ring));
2454
2455	/* check for shared irq */
2456	if (status == 0)
2457		return IRQ_NONE;
2458
2459	spin_lock_irqsave(&cp->lock, flags);
2460	if (status & INTR_RX_DONE_ALT) { /* handle rx separately */
2461#ifdef USE_NAPI
2462		cas_mask_intr(cp);
2463		napi_schedule(&cp->napi);
2464#else
2465		cas_rx_ringN(cp, ring, 0);
2466#endif
2467		status &= ~INTR_RX_DONE_ALT;
2468	}
2469
2470	if (status)
2471		cas_handle_irqN(dev, cp, status, ring);
2472	spin_unlock_irqrestore(&cp->lock, flags);
2473	return IRQ_HANDLED;
2474}
2475#endif
2476
2477#ifdef USE_PCI_INTB
2478/* everything but rx packets */
2479static inline void cas_handle_irq1(struct cas *cp, const u32 status)
2480{
2481	if (status & INTR_RX_BUF_UNAVAIL_1) {
2482		/* Frame arrived, no free RX buffers available.
2483		 * NOTE: we can get this on a link transition. */
2484		cas_post_rxds_ringN(cp, 1, 0);
2485		spin_lock(&cp->stat_lock[1]);
2486		cp->net_stats[1].rx_dropped++;
2487		spin_unlock(&cp->stat_lock[1]);
2488	}
2489
2490	if (status & INTR_RX_BUF_AE_1)
2491		cas_post_rxds_ringN(cp, 1, RX_DESC_RINGN_SIZE(1) -
2492				    RX_AE_FREEN_VAL(1));
2493
2494	if (status & (INTR_RX_COMP_AF | INTR_RX_COMP_FULL))
2495		cas_post_rxcs_ringN(cp, 1);
2496}
2497
2498/* ring 2 handles a few more events than 3 and 4 */
2499static irqreturn_t cas_interrupt1(int irq, void *dev_id)
2500{
2501	struct net_device *dev = dev_id;
2502	struct cas *cp = netdev_priv(dev);
2503	unsigned long flags;
2504	u32 status = readl(cp->regs + REG_PLUS_INTRN_STATUS(1));
2505
2506	/* check for shared interrupt */
2507	if (status == 0)
2508		return IRQ_NONE;
2509
2510	spin_lock_irqsave(&cp->lock, flags);
2511	if (status & INTR_RX_DONE_ALT) { /* handle rx separately */
2512#ifdef USE_NAPI
2513		cas_mask_intr(cp);
2514		napi_schedule(&cp->napi);
2515#else
2516		cas_rx_ringN(cp, 1, 0);
2517#endif
2518		status &= ~INTR_RX_DONE_ALT;
2519	}
2520	if (status)
2521		cas_handle_irq1(cp, status);
2522	spin_unlock_irqrestore(&cp->lock, flags);
2523	return IRQ_HANDLED;
2524}
2525#endif
2526
2527static inline void cas_handle_irq(struct net_device *dev,
2528				  struct cas *cp, const u32 status)
2529{
2530	/* housekeeping interrupts */
2531	if (status & INTR_ERROR_MASK)
2532		cas_abnormal_irq(dev, cp, status);
2533
2534	if (status & INTR_RX_BUF_UNAVAIL) {
2535		/* Frame arrived, no free RX buffers available.
2536		 * NOTE: we can get this on a link transition.
2537		 */
2538		cas_post_rxds_ringN(cp, 0, 0);
2539		spin_lock(&cp->stat_lock[0]);
2540		cp->net_stats[0].rx_dropped++;
2541		spin_unlock(&cp->stat_lock[0]);
2542	} else if (status & INTR_RX_BUF_AE) {
2543		cas_post_rxds_ringN(cp, 0, RX_DESC_RINGN_SIZE(0) -
2544				    RX_AE_FREEN_VAL(0));
2545	}
2546
2547	if (status & (INTR_RX_COMP_AF | INTR_RX_COMP_FULL))
2548		cas_post_rxcs_ringN(dev, cp, 0);
2549}
2550
2551static irqreturn_t cas_interrupt(int irq, void *dev_id)
2552{
2553	struct net_device *dev = dev_id;
2554	struct cas *cp = netdev_priv(dev);
2555	unsigned long flags;
2556	u32 status = readl(cp->regs + REG_INTR_STATUS);
2557
2558	if (status == 0)
2559		return IRQ_NONE;
2560
2561	spin_lock_irqsave(&cp->lock, flags);
2562	if (status & (INTR_TX_ALL | INTR_TX_INTME)) {
2563		cas_tx(dev, cp, status);
2564		status &= ~(INTR_TX_ALL | INTR_TX_INTME);
2565	}
2566
2567	if (status & INTR_RX_DONE) {
2568#ifdef USE_NAPI
2569		cas_mask_intr(cp);
2570		napi_schedule(&cp->napi);
2571#else
2572		cas_rx_ringN(cp, 0, 0);
2573#endif
2574		status &= ~INTR_RX_DONE;
2575	}
2576
2577	if (status)
2578		cas_handle_irq(dev, cp, status);
2579	spin_unlock_irqrestore(&cp->lock, flags);
2580	return IRQ_HANDLED;
2581}
2582
2583
2584#ifdef USE_NAPI
2585static int cas_poll(struct napi_struct *napi, int budget)
2586{
2587	struct cas *cp = container_of(napi, struct cas, napi);
2588	struct net_device *dev = cp->dev;
2589	int i, enable_intr, credits;
2590	u32 status = readl(cp->regs + REG_INTR_STATUS);
2591	unsigned long flags;
2592
2593	spin_lock_irqsave(&cp->lock, flags);
2594	cas_tx(dev, cp, status);
2595	spin_unlock_irqrestore(&cp->lock, flags);
2596
2597	/* NAPI rx packets. we spread the credits across all of the
2598	 * rxc rings
2599	 *
2600	 * to make sure we're fair with the work we loop through each
2601	 * ring N_RX_COMP_RING times with a request of
2602	 * budget / N_RX_COMP_RINGS
2603	 */
2604	enable_intr = 1;
2605	credits = 0;
2606	for (i = 0; i < N_RX_COMP_RINGS; i++) {
2607		int j;
2608		for (j = 0; j < N_RX_COMP_RINGS; j++) {
2609			credits += cas_rx_ringN(cp, j, budget / N_RX_COMP_RINGS);
2610			if (credits >= budget) {
2611				enable_intr = 0;
2612				goto rx_comp;
2613			}
2614		}
2615	}
2616
2617rx_comp:
2618	/* final rx completion */
2619	spin_lock_irqsave(&cp->lock, flags);
2620	if (status)
2621		cas_handle_irq(dev, cp, status);
2622
2623#ifdef USE_PCI_INTB
2624	if (N_RX_COMP_RINGS > 1) {
2625		status = readl(cp->regs + REG_PLUS_INTRN_STATUS(1));
2626		if (status)
2627			cas_handle_irq1(dev, cp, status);
2628	}
2629#endif
2630
2631#ifdef USE_PCI_INTC
2632	if (N_RX_COMP_RINGS > 2) {
2633		status = readl(cp->regs + REG_PLUS_INTRN_STATUS(2));
2634		if (status)
2635			cas_handle_irqN(dev, cp, status, 2);
2636	}
2637#endif
2638
2639#ifdef USE_PCI_INTD
2640	if (N_RX_COMP_RINGS > 3) {
2641		status = readl(cp->regs + REG_PLUS_INTRN_STATUS(3));
2642		if (status)
2643			cas_handle_irqN(dev, cp, status, 3);
2644	}
2645#endif
2646	spin_unlock_irqrestore(&cp->lock, flags);
2647	if (enable_intr) {
2648		napi_complete(napi);
2649		cas_unmask_intr(cp);
2650	}
2651	return credits;
2652}
2653#endif
2654
2655#ifdef CONFIG_NET_POLL_CONTROLLER
2656static void cas_netpoll(struct net_device *dev)
2657{
2658	struct cas *cp = netdev_priv(dev);
2659
2660	cas_disable_irq(cp, 0);
2661	cas_interrupt(cp->pdev->irq, dev);
2662	cas_enable_irq(cp, 0);
2663
2664#ifdef USE_PCI_INTB
2665	if (N_RX_COMP_RINGS > 1) {
2666		/* cas_interrupt1(); */
2667	}
2668#endif
2669#ifdef USE_PCI_INTC
2670	if (N_RX_COMP_RINGS > 2) {
2671		/* cas_interruptN(); */
2672	}
2673#endif
2674#ifdef USE_PCI_INTD
2675	if (N_RX_COMP_RINGS > 3) {
2676		/* cas_interruptN(); */
2677	}
2678#endif
2679}
2680#endif
2681
2682static void cas_tx_timeout(struct net_device *dev)
2683{
2684	struct cas *cp = netdev_priv(dev);
2685
2686	netdev_err(dev, "transmit timed out, resetting\n");
2687	if (!cp->hw_running) {
2688		netdev_err(dev, "hrm.. hw not running!\n");
2689		return;
2690	}
2691
2692	netdev_err(dev, "MIF_STATE[%08x]\n",
2693		   readl(cp->regs + REG_MIF_STATE_MACHINE));
2694
2695	netdev_err(dev, "MAC_STATE[%08x]\n",
2696		   readl(cp->regs + REG_MAC_STATE_MACHINE));
2697
2698	netdev_err(dev, "TX_STATE[%08x:%08x:%08x] FIFO[%08x:%08x:%08x] SM1[%08x] SM2[%08x]\n",
2699		   readl(cp->regs + REG_TX_CFG),
2700		   readl(cp->regs + REG_MAC_TX_STATUS),
2701		   readl(cp->regs + REG_MAC_TX_CFG),
2702		   readl(cp->regs + REG_TX_FIFO_PKT_CNT),
2703		   readl(cp->regs + REG_TX_FIFO_WRITE_PTR),
2704		   readl(cp->regs + REG_TX_FIFO_READ_PTR),
2705		   readl(cp->regs + REG_TX_SM_1),
2706		   readl(cp->regs + REG_TX_SM_2));
2707
2708	netdev_err(dev, "RX_STATE[%08x:%08x:%08x]\n",
2709		   readl(cp->regs + REG_RX_CFG),
2710		   readl(cp->regs + REG_MAC_RX_STATUS),
2711		   readl(cp->regs + REG_MAC_RX_CFG));
2712
2713	netdev_err(dev, "HP_STATE[%08x:%08x:%08x:%08x]\n",
2714		   readl(cp->regs + REG_HP_STATE_MACHINE),
2715		   readl(cp->regs + REG_HP_STATUS0),
2716		   readl(cp->regs + REG_HP_STATUS1),
2717		   readl(cp->regs + REG_HP_STATUS2));
2718
2719#if 1
2720	atomic_inc(&cp->reset_task_pending);
2721	atomic_inc(&cp->reset_task_pending_all);
2722	schedule_work(&cp->reset_task);
2723#else
2724	atomic_set(&cp->reset_task_pending, CAS_RESET_ALL);
2725	schedule_work(&cp->reset_task);
2726#endif
2727}
2728
2729static inline int cas_intme(int ring, int entry)
2730{
2731	/* Algorithm: IRQ every 1/2 of descriptors. */
2732	if (!(entry & ((TX_DESC_RINGN_SIZE(ring) >> 1) - 1)))
2733		return 1;
2734	return 0;
2735}
2736
2737
2738static void cas_write_txd(struct cas *cp, int ring, int entry,
2739			  dma_addr_t mapping, int len, u64 ctrl, int last)
2740{
2741	struct cas_tx_desc *txd = cp->init_txds[ring] + entry;
2742
2743	ctrl |= CAS_BASE(TX_DESC_BUFLEN, len);
2744	if (cas_intme(ring, entry))
2745		ctrl |= TX_DESC_INTME;
2746	if (last)
2747		ctrl |= TX_DESC_EOF;
2748	txd->control = cpu_to_le64(ctrl);
2749	txd->buffer = cpu_to_le64(mapping);
2750}
2751
2752static inline void *tx_tiny_buf(struct cas *cp, const int ring,
2753				const int entry)
2754{
2755	return cp->tx_tiny_bufs[ring] + TX_TINY_BUF_LEN*entry;
2756}
2757
2758static inline dma_addr_t tx_tiny_map(struct cas *cp, const int ring,
2759				     const int entry, const int tentry)
2760{
2761	cp->tx_tiny_use[ring][tentry].nbufs++;
2762	cp->tx_tiny_use[ring][entry].used = 1;
2763	return cp->tx_tiny_dvma[ring] + TX_TINY_BUF_LEN*entry;
2764}
2765
2766static inline int cas_xmit_tx_ringN(struct cas *cp, int ring,
2767				    struct sk_buff *skb)
2768{
2769	struct net_device *dev = cp->dev;
2770	int entry, nr_frags, frag, tabort, tentry;
2771	dma_addr_t mapping;
2772	unsigned long flags;
2773	u64 ctrl;
2774	u32 len;
2775
2776	spin_lock_irqsave(&cp->tx_lock[ring], flags);
2777
2778	/* This is a hard error, log it. */
2779	if (TX_BUFFS_AVAIL(cp, ring) <=
2780	    CAS_TABORT(cp)*(skb_shinfo(skb)->nr_frags + 1)) {
2781		netif_stop_queue(dev);
2782		spin_unlock_irqrestore(&cp->tx_lock[ring], flags);
2783		netdev_err(dev, "BUG! Tx Ring full when queue awake!\n");
2784		return 1;
2785	}
2786
2787	ctrl = 0;
2788	if (skb->ip_summed == CHECKSUM_PARTIAL) {
2789		const u64 csum_start_off = skb_checksum_start_offset(skb);
2790		const u64 csum_stuff_off = csum_start_off + skb->csum_offset;
2791
2792		ctrl =  TX_DESC_CSUM_EN |
2793			CAS_BASE(TX_DESC_CSUM_START, csum_start_off) |
2794			CAS_BASE(TX_DESC_CSUM_STUFF, csum_stuff_off);
2795	}
2796
2797	entry = cp->tx_new[ring];
2798	cp->tx_skbs[ring][entry] = skb;
2799
2800	nr_frags = skb_shinfo(skb)->nr_frags;
2801	len = skb_headlen(skb);
2802	mapping = pci_map_page(cp->pdev, virt_to_page(skb->data),
2803			       offset_in_page(skb->data), len,
2804			       PCI_DMA_TODEVICE);
2805
2806	tentry = entry;
2807	tabort = cas_calc_tabort(cp, (unsigned long) skb->data, len);
2808	if (unlikely(tabort)) {
2809		/* NOTE: len is always >  tabort */
2810		cas_write_txd(cp, ring, entry, mapping, len - tabort,
2811			      ctrl | TX_DESC_SOF, 0);
2812		entry = TX_DESC_NEXT(ring, entry);
2813
2814		skb_copy_from_linear_data_offset(skb, len - tabort,
2815			      tx_tiny_buf(cp, ring, entry), tabort);
2816		mapping = tx_tiny_map(cp, ring, entry, tentry);
2817		cas_write_txd(cp, ring, entry, mapping, tabort, ctrl,
2818			      (nr_frags == 0));
2819	} else {
2820		cas_write_txd(cp, ring, entry, mapping, len, ctrl |
2821			      TX_DESC_SOF, (nr_frags == 0));
2822	}
2823	entry = TX_DESC_NEXT(ring, entry);
2824
2825	for (frag = 0; frag < nr_frags; frag++) {
2826		const skb_frag_t *fragp = &skb_shinfo(skb)->frags[frag];
2827
2828		len = skb_frag_size(fragp);
2829		mapping = skb_frag_dma_map(&cp->pdev->dev, fragp, 0, len,
2830					   DMA_TO_DEVICE);
2831
2832		tabort = cas_calc_tabort(cp, fragp->page_offset, len);
2833		if (unlikely(tabort)) {
2834			void *addr;
2835
2836			/* NOTE: len is always > tabort */
2837			cas_write_txd(cp, ring, entry, mapping, len - tabort,
2838				      ctrl, 0);
2839			entry = TX_DESC_NEXT(ring, entry);
2840
2841			addr = cas_page_map(skb_frag_page(fragp));
2842			memcpy(tx_tiny_buf(cp, ring, entry),
2843			       addr + fragp->page_offset + len - tabort,
2844			       tabort);
2845			cas_page_unmap(addr);
2846			mapping = tx_tiny_map(cp, ring, entry, tentry);
2847			len     = tabort;
2848		}
2849
2850		cas_write_txd(cp, ring, entry, mapping, len, ctrl,
2851			      (frag + 1 == nr_frags));
2852		entry = TX_DESC_NEXT(ring, entry);
2853	}
2854
2855	cp->tx_new[ring] = entry;
2856	if (TX_BUFFS_AVAIL(cp, ring) <= CAS_TABORT(cp)*(MAX_SKB_FRAGS + 1))
2857		netif_stop_queue(dev);
2858
2859	netif_printk(cp, tx_queued, KERN_DEBUG, dev,
2860		     "tx[%d] queued, slot %d, skblen %d, avail %d\n",
2861		     ring, entry, skb->len, TX_BUFFS_AVAIL(cp, ring));
2862	writel(entry, cp->regs + REG_TX_KICKN(ring));
2863	spin_unlock_irqrestore(&cp->tx_lock[ring], flags);
2864	return 0;
2865}
2866
2867static netdev_tx_t cas_start_xmit(struct sk_buff *skb, struct net_device *dev)
2868{
2869	struct cas *cp = netdev_priv(dev);
2870
2871	/* this is only used as a load-balancing hint, so it doesn't
2872	 * need to be SMP safe
2873	 */
2874	static int ring;
2875
2876	if (skb_padto(skb, cp->min_frame_size))
2877		return NETDEV_TX_OK;
2878
2879	/* XXX: we need some higher-level QoS hooks to steer packets to
2880	 *      individual queues.
2881	 */
2882	if (cas_xmit_tx_ringN(cp, ring++ & N_TX_RINGS_MASK, skb))
2883		return NETDEV_TX_BUSY;
2884	return NETDEV_TX_OK;
2885}
2886
2887static void cas_init_tx_dma(struct cas *cp)
2888{
2889	u64 desc_dma = cp->block_dvma;
2890	unsigned long off;
2891	u32 val;
2892	int i;
2893
2894	/* set up tx completion writeback registers. must be 8-byte aligned */
2895#ifdef USE_TX_COMPWB
2896	off = offsetof(struct cas_init_block, tx_compwb);
2897	writel((desc_dma + off) >> 32, cp->regs + REG_TX_COMPWB_DB_HI);
2898	writel((desc_dma + off) & 0xffffffff, cp->regs + REG_TX_COMPWB_DB_LOW);
2899#endif
2900
2901	/* enable completion writebacks, enable paced mode,
2902	 * disable read pipe, and disable pre-interrupt compwbs
2903	 */
2904	val =   TX_CFG_COMPWB_Q1 | TX_CFG_COMPWB_Q2 |
2905		TX_CFG_COMPWB_Q3 | TX_CFG_COMPWB_Q4 |
2906		TX_CFG_DMA_RDPIPE_DIS | TX_CFG_PACED_MODE |
2907		TX_CFG_INTR_COMPWB_DIS;
2908
2909	/* write out tx ring info and tx desc bases */
2910	for (i = 0; i < MAX_TX_RINGS; i++) {
2911		off = (unsigned long) cp->init_txds[i] -
2912			(unsigned long) cp->init_block;
2913
2914		val |= CAS_TX_RINGN_BASE(i);
2915		writel((desc_dma + off) >> 32, cp->regs + REG_TX_DBN_HI(i));
2916		writel((desc_dma + off) & 0xffffffff, cp->regs +
2917		       REG_TX_DBN_LOW(i));
2918		/* don't zero out the kick register here as the system
2919		 * will wedge
2920		 */
2921	}
2922	writel(val, cp->regs + REG_TX_CFG);
2923
2924	/* program max burst sizes. these numbers should be different
2925	 * if doing QoS.
2926	 */
2927#ifdef USE_QOS
2928	writel(0x800, cp->regs + REG_TX_MAXBURST_0);
2929	writel(0x1600, cp->regs + REG_TX_MAXBURST_1);
2930	writel(0x2400, cp->regs + REG_TX_MAXBURST_2);
2931	writel(0x4800, cp->regs + REG_TX_MAXBURST_3);
2932#else
2933	writel(0x800, cp->regs + REG_TX_MAXBURST_0);
2934	writel(0x800, cp->regs + REG_TX_MAXBURST_1);
2935	writel(0x800, cp->regs + REG_TX_MAXBURST_2);
2936	writel(0x800, cp->regs + REG_TX_MAXBURST_3);
2937#endif
2938}
2939
2940/* Must be invoked under cp->lock. */
2941static inline void cas_init_dma(struct cas *cp)
2942{
2943	cas_init_tx_dma(cp);
2944	cas_init_rx_dma(cp);
2945}
2946
2947static void cas_process_mc_list(struct cas *cp)
2948{
2949	u16 hash_table[16];
2950	u32 crc;
2951	struct netdev_hw_addr *ha;
2952	int i = 1;
2953
2954	memset(hash_table, 0, sizeof(hash_table));
2955	netdev_for_each_mc_addr(ha, cp->dev) {
2956		if (i <= CAS_MC_EXACT_MATCH_SIZE) {
2957			/* use the alternate mac address registers for the
2958			 * first 15 multicast addresses
2959			 */
2960			writel((ha->addr[4] << 8) | ha->addr[5],
2961			       cp->regs + REG_MAC_ADDRN(i*3 + 0));
2962			writel((ha->addr[2] << 8) | ha->addr[3],
2963			       cp->regs + REG_MAC_ADDRN(i*3 + 1));
2964			writel((ha->addr[0] << 8) | ha->addr[1],
2965			       cp->regs + REG_MAC_ADDRN(i*3 + 2));
2966			i++;
2967		}
2968		else {
2969			/* use hw hash table for the next series of
2970			 * multicast addresses
2971			 */
2972			crc = ether_crc_le(ETH_ALEN, ha->addr);
2973			crc >>= 24;
2974			hash_table[crc >> 4] |= 1 << (15 - (crc & 0xf));
2975		}
2976	}
2977	for (i = 0; i < 16; i++)
2978		writel(hash_table[i], cp->regs + REG_MAC_HASH_TABLEN(i));
2979}
2980
2981/* Must be invoked under cp->lock. */
2982static u32 cas_setup_multicast(struct cas *cp)
2983{
2984	u32 rxcfg = 0;
2985	int i;
2986
2987	if (cp->dev->flags & IFF_PROMISC) {
2988		rxcfg |= MAC_RX_CFG_PROMISC_EN;
2989
2990	} else if (cp->dev->flags & IFF_ALLMULTI) {
2991	    	for (i=0; i < 16; i++)
2992			writel(0xFFFF, cp->regs + REG_MAC_HASH_TABLEN(i));
2993		rxcfg |= MAC_RX_CFG_HASH_FILTER_EN;
2994
2995	} else {
2996		cas_process_mc_list(cp);
2997		rxcfg |= MAC_RX_CFG_HASH_FILTER_EN;
2998	}
2999
3000	return rxcfg;
3001}
3002
3003/* must be invoked under cp->stat_lock[N_TX_RINGS] */
3004static void cas_clear_mac_err(struct cas *cp)
3005{
3006	writel(0, cp->regs + REG_MAC_COLL_NORMAL);
3007	writel(0, cp->regs + REG_MAC_COLL_FIRST);
3008	writel(0, cp->regs + REG_MAC_COLL_EXCESS);
3009	writel(0, cp->regs + REG_MAC_COLL_LATE);
3010	writel(0, cp->regs + REG_MAC_TIMER_DEFER);
3011	writel(0, cp->regs + REG_MAC_ATTEMPTS_PEAK);
3012	writel(0, cp->regs + REG_MAC_RECV_FRAME);
3013	writel(0, cp->regs + REG_MAC_LEN_ERR);
3014	writel(0, cp->regs + REG_MAC_ALIGN_ERR);
3015	writel(0, cp->regs + REG_MAC_FCS_ERR);
3016	writel(0, cp->regs + REG_MAC_RX_CODE_ERR);
3017}
3018
3019
3020static void cas_mac_reset(struct cas *cp)
3021{
3022	int i;
3023
3024	/* do both TX and RX reset */
3025	writel(0x1, cp->regs + REG_MAC_TX_RESET);
3026	writel(0x1, cp->regs + REG_MAC_RX_RESET);
3027
3028	/* wait for TX */
3029	i = STOP_TRIES;
3030	while (i-- > 0) {
3031		if (readl(cp->regs + REG_MAC_TX_RESET) == 0)
3032			break;
3033		udelay(10);
3034	}
3035
3036	/* wait for RX */
3037	i = STOP_TRIES;
3038	while (i-- > 0) {
3039		if (readl(cp->regs + REG_MAC_RX_RESET) == 0)
3040			break;
3041		udelay(10);
3042	}
3043
3044	if (readl(cp->regs + REG_MAC_TX_RESET) |
3045	    readl(cp->regs + REG_MAC_RX_RESET))
3046		netdev_err(cp->dev, "mac tx[%d]/rx[%d] reset failed [%08x]\n",
3047			   readl(cp->regs + REG_MAC_TX_RESET),
3048			   readl(cp->regs + REG_MAC_RX_RESET),
3049			   readl(cp->regs + REG_MAC_STATE_MACHINE));
3050}
3051
3052
3053/* Must be invoked under cp->lock. */
3054static void cas_init_mac(struct cas *cp)
3055{
3056	unsigned char *e = &cp->dev->dev_addr[0];
3057	int i;
3058	cas_mac_reset(cp);
3059
3060	/* setup core arbitration weight register */
3061	writel(CAWR_RR_DIS, cp->regs + REG_CAWR);
3062
3063#if !defined(CONFIG_SPARC64) && !defined(CONFIG_ALPHA)
3064	/* set the infinite burst register for chips that don't have
3065	 * pci issues.
3066	 */
3067	if ((cp->cas_flags & CAS_FLAG_TARGET_ABORT) == 0)
3068		writel(INF_BURST_EN, cp->regs + REG_INF_BURST);
3069#endif
3070
3071	writel(0x1BF0, cp->regs + REG_MAC_SEND_PAUSE);
3072
3073	writel(0x00, cp->regs + REG_MAC_IPG0);
3074	writel(0x08, cp->regs + REG_MAC_IPG1);
3075	writel(0x04, cp->regs + REG_MAC_IPG2);
3076
3077	/* change later for 802.3z */
3078	writel(0x40, cp->regs + REG_MAC_SLOT_TIME);
3079
3080	/* min frame + FCS */
3081	writel(ETH_ZLEN + 4, cp->regs + REG_MAC_FRAMESIZE_MIN);
3082
3083	/* Ethernet payload + header + FCS + optional VLAN tag. NOTE: we
3084	 * specify the maximum frame size to prevent RX tag errors on
3085	 * oversized frames.
3086	 */
3087	writel(CAS_BASE(MAC_FRAMESIZE_MAX_BURST, 0x2000) |
3088	       CAS_BASE(MAC_FRAMESIZE_MAX_FRAME,
3089			(CAS_MAX_MTU + ETH_HLEN + 4 + 4)),
3090	       cp->regs + REG_MAC_FRAMESIZE_MAX);
3091
3092	/* NOTE: crc_size is used as a surrogate for half-duplex.
3093	 * workaround saturn half-duplex issue by increasing preamble
3094	 * size to 65 bytes.
3095	 */
3096	if ((cp->cas_flags & CAS_FLAG_SATURN) && cp->crc_size)
3097		writel(0x41, cp->regs + REG_MAC_PA_SIZE);
3098	else
3099		writel(0x07, cp->regs + REG_MAC_PA_SIZE);
3100	writel(0x04, cp->regs + REG_MAC_JAM_SIZE);
3101	writel(0x10, cp->regs + REG_MAC_ATTEMPT_LIMIT);
3102	writel(0x8808, cp->regs + REG_MAC_CTRL_TYPE);
3103
3104	writel((e[5] | (e[4] << 8)) & 0x3ff, cp->regs + REG_MAC_RANDOM_SEED);
3105
3106	writel(0, cp->regs + REG_MAC_ADDR_FILTER0);
3107	writel(0, cp->regs + REG_MAC_ADDR_FILTER1);
3108	writel(0, cp->regs + REG_MAC_ADDR_FILTER2);
3109	writel(0, cp->regs + REG_MAC_ADDR_FILTER2_1_MASK);
3110	writel(0, cp->regs + REG_MAC_ADDR_FILTER0_MASK);
3111
3112	/* setup mac address in perfect filter array */
3113	for (i = 0; i < 45; i++)
3114		writel(0x0, cp->regs + REG_MAC_ADDRN(i));
3115
3116	writel((e[4] << 8) | e[5], cp->regs + REG_MAC_ADDRN(0));
3117	writel((e[2] << 8) | e[3], cp->regs + REG_MAC_ADDRN(1));
3118	writel((e[0] << 8) | e[1], cp->regs + REG_MAC_ADDRN(2));
3119
3120	writel(0x0001, cp->regs + REG_MAC_ADDRN(42));
3121	writel(0xc200, cp->regs + REG_MAC_ADDRN(43));
3122	writel(0x0180, cp->regs + REG_MAC_ADDRN(44));
3123
3124	cp->mac_rx_cfg = cas_setup_multicast(cp);
3125
3126	spin_lock(&cp->stat_lock[N_TX_RINGS]);
3127	cas_clear_mac_err(cp);
3128	spin_unlock(&cp->stat_lock[N_TX_RINGS]);
3129
3130	/* Setup MAC interrupts.  We want to get all of the interesting
3131	 * counter expiration events, but we do not want to hear about
3132	 * normal rx/tx as the DMA engine tells us that.
3133	 */
3134	writel(MAC_TX_FRAME_XMIT, cp->regs + REG_MAC_TX_MASK);
3135	writel(MAC_RX_FRAME_RECV, cp->regs + REG_MAC_RX_MASK);
3136
3137	/* Don't enable even the PAUSE interrupts for now, we
3138	 * make no use of those events other than to record them.
3139	 */
3140	writel(0xffffffff, cp->regs + REG_MAC_CTRL_MASK);
3141}
3142
3143/* Must be invoked under cp->lock. */
3144static void cas_init_pause_thresholds(struct cas *cp)
3145{
3146	/* Calculate pause thresholds.  Setting the OFF threshold to the
3147	 * full RX fifo size effectively disables PAUSE generation
3148	 */
3149	if (cp->rx_fifo_size <= (2 * 1024)) {
3150		cp->rx_pause_off = cp->rx_pause_on = cp->rx_fifo_size;
3151	} else {
3152		int max_frame = (cp->dev->mtu + ETH_HLEN + 4 + 4 + 64) & ~63;
3153		if (max_frame * 3 > cp->rx_fifo_size) {
3154			cp->rx_pause_off = 7104;
3155			cp->rx_pause_on  = 960;
3156		} else {
3157			int off = (cp->rx_fifo_size - (max_frame * 2));
3158			int on = off - max_frame;
3159			cp->rx_pause_off = off;
3160			cp->rx_pause_on = on;
3161		}
3162	}
3163}
3164
3165static int cas_vpd_match(const void __iomem *p, const char *str)
3166{
3167	int len = strlen(str) + 1;
3168	int i;
3169
3170	for (i = 0; i < len; i++) {
3171		if (readb(p + i) != str[i])
3172			return 0;
3173	}
3174	return 1;
3175}
3176
3177
3178/* get the mac address by reading the vpd information in the rom.
3179 * also get the phy type and determine if there's an entropy generator.
3180 * NOTE: this is a bit convoluted for the following reasons:
3181 *  1) vpd info has order-dependent mac addresses for multinic cards
3182 *  2) the only way to determine the nic order is to use the slot
3183 *     number.
3184 *  3) fiber cards don't have bridges, so their slot numbers don't
3185 *     mean anything.
3186 *  4) we don't actually know we have a fiber card until after
3187 *     the mac addresses are parsed.
3188 */
3189static int cas_get_vpd_info(struct cas *cp, unsigned char *dev_addr,
3190			    const int offset)
3191{
3192	void __iomem *p = cp->regs + REG_EXPANSION_ROM_RUN_START;
3193	void __iomem *base, *kstart;
3194	int i, len;
3195	int found = 0;
3196#define VPD_FOUND_MAC        0x01
3197#define VPD_FOUND_PHY        0x02
3198
3199	int phy_type = CAS_PHY_MII_MDIO0; /* default phy type */
3200	int mac_off  = 0;
3201
3202#if defined(CONFIG_SPARC)
3203	const unsigned char *addr;
3204#endif
3205
3206	/* give us access to the PROM */
3207	writel(BIM_LOCAL_DEV_PROM | BIM_LOCAL_DEV_PAD,
3208	       cp->regs + REG_BIM_LOCAL_DEV_EN);
3209
3210	/* check for an expansion rom */
3211	if (readb(p) != 0x55 || readb(p + 1) != 0xaa)
3212		goto use_random_mac_addr;
3213
3214	/* search for beginning of vpd */
3215	base = NULL;
3216	for (i = 2; i < EXPANSION_ROM_SIZE; i++) {
3217		/* check for PCIR */
3218		if ((readb(p + i + 0) == 0x50) &&
3219		    (readb(p + i + 1) == 0x43) &&
3220		    (readb(p + i + 2) == 0x49) &&
3221		    (readb(p + i + 3) == 0x52)) {
3222			base = p + (readb(p + i + 8) |
3223				    (readb(p + i + 9) << 8));
3224			break;
3225		}
3226	}
3227
3228	if (!base || (readb(base) != 0x82))
3229		goto use_random_mac_addr;
3230
3231	i = (readb(base + 1) | (readb(base + 2) << 8)) + 3;
3232	while (i < EXPANSION_ROM_SIZE) {
3233		if (readb(base + i) != 0x90) /* no vpd found */
3234			goto use_random_mac_addr;
3235
3236		/* found a vpd field */
3237		len = readb(base + i + 1) | (readb(base + i + 2) << 8);
3238
3239		/* extract keywords */
3240		kstart = base + i + 3;
3241		p = kstart;
3242		while ((p - kstart) < len) {
3243			int klen = readb(p + 2);
3244			int j;
3245			char type;
3246
3247			p += 3;
3248
3249			/* look for the following things:
3250			 * -- correct length == 29
3251			 * 3 (type) + 2 (size) +
3252			 * 18 (strlen("local-mac-address") + 1) +
3253			 * 6 (mac addr)
3254			 * -- VPD Instance 'I'
3255			 * -- VPD Type Bytes 'B'
3256			 * -- VPD data length == 6
3257			 * -- property string == local-mac-address
3258			 *
3259			 * -- correct length == 24
3260			 * 3 (type) + 2 (size) +
3261			 * 12 (strlen("entropy-dev") + 1) +
3262			 * 7 (strlen("vms110") + 1)
3263			 * -- VPD Instance 'I'
3264			 * -- VPD Type String 'B'
3265			 * -- VPD data length == 7
3266			 * -- property string == entropy-dev
3267			 *
3268			 * -- correct length == 18
3269			 * 3 (type) + 2 (size) +
3270			 * 9 (strlen("phy-type") + 1) +
3271			 * 4 (strlen("pcs") + 1)
3272			 * -- VPD Instance 'I'
3273			 * -- VPD Type String 'S'
3274			 * -- VPD data length == 4
3275			 * -- property string == phy-type
3276			 *
3277			 * -- correct length == 23
3278			 * 3 (type) + 2 (size) +
3279			 * 14 (strlen("phy-interface") + 1) +
3280			 * 4 (strlen("pcs") + 1)
3281			 * -- VPD Instance 'I'
3282			 * -- VPD Type String 'S'
3283			 * -- VPD data length == 4
3284			 * -- property string == phy-interface
3285			 */
3286			if (readb(p) != 'I')
3287				goto next;
3288
3289			/* finally, check string and length */
3290			type = readb(p + 3);
3291			if (type == 'B') {
3292				if ((klen == 29) && readb(p + 4) == 6 &&
3293				    cas_vpd_match(p + 5,
3294						  "local-mac-address")) {
3295					if (mac_off++ > offset)
3296						goto next;
3297
3298					/* set mac address */
3299					for (j = 0; j < 6; j++)
3300						dev_addr[j] =
3301							readb(p + 23 + j);
3302					goto found_mac;
3303				}
3304			}
3305
3306			if (type != 'S')
3307				goto next;
3308
3309#ifdef USE_ENTROPY_DEV
3310			if ((klen == 24) &&
3311			    cas_vpd_match(p + 5, "entropy-dev") &&
3312			    cas_vpd_match(p + 17, "vms110")) {
3313				cp->cas_flags |= CAS_FLAG_ENTROPY_DEV;
3314				goto next;
3315			}
3316#endif
3317
3318			if (found & VPD_FOUND_PHY)
3319				goto next;
3320
3321			if ((klen == 18) && readb(p + 4) == 4 &&
3322			    cas_vpd_match(p + 5, "phy-type")) {
3323				if (cas_vpd_match(p + 14, "pcs")) {
3324					phy_type = CAS_PHY_SERDES;
3325					goto found_phy;
3326				}
3327			}
3328
3329			if ((klen == 23) && readb(p + 4) == 4 &&
3330			    cas_vpd_match(p + 5, "phy-interface")) {
3331				if (cas_vpd_match(p + 19, "pcs")) {
3332					phy_type = CAS_PHY_SERDES;
3333					goto found_phy;
3334				}
3335			}
3336found_mac:
3337			found |= VPD_FOUND_MAC;
3338			goto next;
3339
3340found_phy:
3341			found |= VPD_FOUND_PHY;
3342
3343next:
3344			p += klen;
3345		}
3346		i += len + 3;
3347	}
3348
3349use_random_mac_addr:
3350	if (found & VPD_FOUND_MAC)
3351		goto done;
3352
3353#if defined(CONFIG_SPARC)
3354	addr = of_get_property(cp->of_node, "local-mac-address", NULL);
3355	if (addr != NULL) {
3356		memcpy(dev_addr, addr, ETH_ALEN);
3357		goto done;
3358	}
3359#endif
3360
3361	/* Sun MAC prefix then 3 random bytes. */
3362	pr_info("MAC address not found in ROM VPD\n");
3363	dev_addr[0] = 0x08;
3364	dev_addr[1] = 0x00;
3365	dev_addr[2] = 0x20;
3366	get_random_bytes(dev_addr + 3, 3);
3367
3368done:
3369	writel(0, cp->regs + REG_BIM_LOCAL_DEV_EN);
3370	return phy_type;
3371}
3372
3373/* check pci invariants */
3374static void cas_check_pci_invariants(struct cas *cp)
3375{
3376	struct pci_dev *pdev = cp->pdev;
3377
3378	cp->cas_flags = 0;
3379	if ((pdev->vendor == PCI_VENDOR_ID_SUN) &&
3380	    (pdev->device == PCI_DEVICE_ID_SUN_CASSINI)) {
3381		if (pdev->revision >= CAS_ID_REVPLUS)
3382			cp->cas_flags |= CAS_FLAG_REG_PLUS;
3383		if (pdev->revision < CAS_ID_REVPLUS02u)
3384			cp->cas_flags |= CAS_FLAG_TARGET_ABORT;
3385
3386		/* Original Cassini supports HW CSUM, but it's not
3387		 * enabled by default as it can trigger TX hangs.
3388		 */
3389		if (pdev->revision < CAS_ID_REV2)
3390			cp->cas_flags |= CAS_FLAG_NO_HW_CSUM;
3391	} else {
3392		/* Only sun has original cassini chips.  */
3393		cp->cas_flags |= CAS_FLAG_REG_PLUS;
3394
3395		/* We use a flag because the same phy might be externally
3396		 * connected.
3397		 */
3398		if ((pdev->vendor == PCI_VENDOR_ID_NS) &&
3399		    (pdev->device == PCI_DEVICE_ID_NS_SATURN))
3400			cp->cas_flags |= CAS_FLAG_SATURN;
3401	}
3402}
3403
3404
3405static int cas_check_invariants(struct cas *cp)
3406{
3407	struct pci_dev *pdev = cp->pdev;
 
3408	u32 cfg;
3409	int i;
3410
3411	/* get page size for rx buffers. */
3412	cp->page_order = 0;
3413#ifdef USE_PAGE_ORDER
3414	if (PAGE_SHIFT < CAS_JUMBO_PAGE_SHIFT) {
3415		/* see if we can allocate larger pages */
3416		struct page *page = alloc_pages(GFP_ATOMIC,
3417						CAS_JUMBO_PAGE_SHIFT -
3418						PAGE_SHIFT);
3419		if (page) {
3420			__free_pages(page, CAS_JUMBO_PAGE_SHIFT - PAGE_SHIFT);
3421			cp->page_order = CAS_JUMBO_PAGE_SHIFT - PAGE_SHIFT;
3422		} else {
3423			printk("MTU limited to %d bytes\n", CAS_MAX_MTU);
3424		}
3425	}
3426#endif
3427	cp->page_size = (PAGE_SIZE << cp->page_order);
3428
3429	/* Fetch the FIFO configurations. */
3430	cp->tx_fifo_size = readl(cp->regs + REG_TX_FIFO_SIZE) * 64;
3431	cp->rx_fifo_size = RX_FIFO_SIZE;
3432
3433	/* finish phy determination. MDIO1 takes precedence over MDIO0 if
3434	 * they're both connected.
3435	 */
3436	cp->phy_type = cas_get_vpd_info(cp, cp->dev->dev_addr,
3437					PCI_SLOT(pdev->devfn));
3438	if (cp->phy_type & CAS_PHY_SERDES) {
3439		cp->cas_flags |= CAS_FLAG_1000MB_CAP;
3440		return 0; /* no more checking needed */
3441	}
3442
3443	/* MII */
3444	cfg = readl(cp->regs + REG_MIF_CFG);
3445	if (cfg & MIF_CFG_MDIO_1) {
3446		cp->phy_type = CAS_PHY_MII_MDIO1;
3447	} else if (cfg & MIF_CFG_MDIO_0) {
3448		cp->phy_type = CAS_PHY_MII_MDIO0;
3449	}
3450
3451	cas_mif_poll(cp, 0);
3452	writel(PCS_DATAPATH_MODE_MII, cp->regs + REG_PCS_DATAPATH_MODE);
3453
3454	for (i = 0; i < 32; i++) {
3455		u32 phy_id;
3456		int j;
3457
3458		for (j = 0; j < 3; j++) {
3459			cp->phy_addr = i;
3460			phy_id = cas_phy_read(cp, MII_PHYSID1) << 16;
3461			phy_id |= cas_phy_read(cp, MII_PHYSID2);
3462			if (phy_id && (phy_id != 0xFFFFFFFF)) {
3463				cp->phy_id = phy_id;
3464				goto done;
3465			}
3466		}
3467	}
3468	pr_err("MII phy did not respond [%08x]\n",
3469	       readl(cp->regs + REG_MIF_STATE_MACHINE));
3470	return -1;
3471
3472done:
3473	/* see if we can do gigabit */
3474	cfg = cas_phy_read(cp, MII_BMSR);
3475	if ((cfg & CAS_BMSR_1000_EXTEND) &&
3476	    cas_phy_read(cp, CAS_MII_1000_EXTEND))
3477		cp->cas_flags |= CAS_FLAG_1000MB_CAP;
3478	return 0;
3479}
3480
3481/* Must be invoked under cp->lock. */
3482static inline void cas_start_dma(struct cas *cp)
3483{
3484	int i;
3485	u32 val;
3486	int txfailed = 0;
3487
3488	/* enable dma */
3489	val = readl(cp->regs + REG_TX_CFG) | TX_CFG_DMA_EN;
3490	writel(val, cp->regs + REG_TX_CFG);
3491	val = readl(cp->regs + REG_RX_CFG) | RX_CFG_DMA_EN;
3492	writel(val, cp->regs + REG_RX_CFG);
3493
3494	/* enable the mac */
3495	val = readl(cp->regs + REG_MAC_TX_CFG) | MAC_TX_CFG_EN;
3496	writel(val, cp->regs + REG_MAC_TX_CFG);
3497	val = readl(cp->regs + REG_MAC_RX_CFG) | MAC_RX_CFG_EN;
3498	writel(val, cp->regs + REG_MAC_RX_CFG);
3499
3500	i = STOP_TRIES;
3501	while (i-- > 0) {
3502		val = readl(cp->regs + REG_MAC_TX_CFG);
3503		if ((val & MAC_TX_CFG_EN))
3504			break;
3505		udelay(10);
3506	}
3507	if (i < 0) txfailed = 1;
3508	i = STOP_TRIES;
3509	while (i-- > 0) {
3510		val = readl(cp->regs + REG_MAC_RX_CFG);
3511		if ((val & MAC_RX_CFG_EN)) {
3512			if (txfailed) {
3513				netdev_err(cp->dev,
3514					   "enabling mac failed [tx:%08x:%08x]\n",
3515					   readl(cp->regs + REG_MIF_STATE_MACHINE),
3516					   readl(cp->regs + REG_MAC_STATE_MACHINE));
3517			}
3518			goto enable_rx_done;
3519		}
3520		udelay(10);
3521	}
3522	netdev_err(cp->dev, "enabling mac failed [%s:%08x:%08x]\n",
3523		   (txfailed ? "tx,rx" : "rx"),
3524		   readl(cp->regs + REG_MIF_STATE_MACHINE),
3525		   readl(cp->regs + REG_MAC_STATE_MACHINE));
3526
3527enable_rx_done:
3528	cas_unmask_intr(cp); /* enable interrupts */
3529	writel(RX_DESC_RINGN_SIZE(0) - 4, cp->regs + REG_RX_KICK);
3530	writel(0, cp->regs + REG_RX_COMP_TAIL);
3531
3532	if (cp->cas_flags & CAS_FLAG_REG_PLUS) {
3533		if (N_RX_DESC_RINGS > 1)
3534			writel(RX_DESC_RINGN_SIZE(1) - 4,
3535			       cp->regs + REG_PLUS_RX_KICK1);
3536
3537		for (i = 1; i < N_RX_COMP_RINGS; i++)
3538			writel(0, cp->regs + REG_PLUS_RX_COMPN_TAIL(i));
3539	}
3540}
3541
3542/* Must be invoked under cp->lock. */
3543static void cas_read_pcs_link_mode(struct cas *cp, int *fd, int *spd,
3544				   int *pause)
3545{
3546	u32 val = readl(cp->regs + REG_PCS_MII_LPA);
3547	*fd     = (val & PCS_MII_LPA_FD) ? 1 : 0;
3548	*pause  = (val & PCS_MII_LPA_SYM_PAUSE) ? 0x01 : 0x00;
3549	if (val & PCS_MII_LPA_ASYM_PAUSE)
3550		*pause |= 0x10;
3551	*spd = 1000;
3552}
3553
3554/* Must be invoked under cp->lock. */
3555static void cas_read_mii_link_mode(struct cas *cp, int *fd, int *spd,
3556				   int *pause)
3557{
3558	u32 val;
3559
3560	*fd = 0;
3561	*spd = 10;
3562	*pause = 0;
3563
3564	/* use GMII registers */
3565	val = cas_phy_read(cp, MII_LPA);
3566	if (val & CAS_LPA_PAUSE)
3567		*pause = 0x01;
3568
3569	if (val & CAS_LPA_ASYM_PAUSE)
3570		*pause |= 0x10;
3571
3572	if (val & LPA_DUPLEX)
3573		*fd = 1;
3574	if (val & LPA_100)
3575		*spd = 100;
3576
3577	if (cp->cas_flags & CAS_FLAG_1000MB_CAP) {
3578		val = cas_phy_read(cp, CAS_MII_1000_STATUS);
3579		if (val & (CAS_LPA_1000FULL | CAS_LPA_1000HALF))
3580			*spd = 1000;
3581		if (val & CAS_LPA_1000FULL)
3582			*fd = 1;
3583	}
3584}
3585
3586/* A link-up condition has occurred, initialize and enable the
3587 * rest of the chip.
3588 *
3589 * Must be invoked under cp->lock.
3590 */
3591static void cas_set_link_modes(struct cas *cp)
3592{
3593	u32 val;
3594	int full_duplex, speed, pause;
3595
3596	full_duplex = 0;
3597	speed = 10;
3598	pause = 0;
3599
3600	if (CAS_PHY_MII(cp->phy_type)) {
3601		cas_mif_poll(cp, 0);
3602		val = cas_phy_read(cp, MII_BMCR);
3603		if (val & BMCR_ANENABLE) {
3604			cas_read_mii_link_mode(cp, &full_duplex, &speed,
3605					       &pause);
3606		} else {
3607			if (val & BMCR_FULLDPLX)
3608				full_duplex = 1;
3609
3610			if (val & BMCR_SPEED100)
3611				speed = 100;
3612			else if (val & CAS_BMCR_SPEED1000)
3613				speed = (cp->cas_flags & CAS_FLAG_1000MB_CAP) ?
3614					1000 : 100;
3615		}
3616		cas_mif_poll(cp, 1);
3617
3618	} else {
3619		val = readl(cp->regs + REG_PCS_MII_CTRL);
3620		cas_read_pcs_link_mode(cp, &full_duplex, &speed, &pause);
3621		if ((val & PCS_MII_AUTONEG_EN) == 0) {
3622			if (val & PCS_MII_CTRL_DUPLEX)
3623				full_duplex = 1;
3624		}
3625	}
3626
3627	netif_info(cp, link, cp->dev, "Link up at %d Mbps, %s-duplex\n",
3628		   speed, full_duplex ? "full" : "half");
3629
3630	val = MAC_XIF_TX_MII_OUTPUT_EN | MAC_XIF_LINK_LED;
3631	if (CAS_PHY_MII(cp->phy_type)) {
3632		val |= MAC_XIF_MII_BUFFER_OUTPUT_EN;
3633		if (!full_duplex)
3634			val |= MAC_XIF_DISABLE_ECHO;
3635	}
3636	if (full_duplex)
3637		val |= MAC_XIF_FDPLX_LED;
3638	if (speed == 1000)
3639		val |= MAC_XIF_GMII_MODE;
3640	writel(val, cp->regs + REG_MAC_XIF_CFG);
3641
3642	/* deal with carrier and collision detect. */
3643	val = MAC_TX_CFG_IPG_EN;
3644	if (full_duplex) {
3645		val |= MAC_TX_CFG_IGNORE_CARRIER;
3646		val |= MAC_TX_CFG_IGNORE_COLL;
3647	} else {
3648#ifndef USE_CSMA_CD_PROTO
3649		val |= MAC_TX_CFG_NEVER_GIVE_UP_EN;
3650		val |= MAC_TX_CFG_NEVER_GIVE_UP_LIM;
3651#endif
3652	}
3653	/* val now set up for REG_MAC_TX_CFG */
3654
3655	/* If gigabit and half-duplex, enable carrier extension
3656	 * mode.  increase slot time to 512 bytes as well.
3657	 * else, disable it and make sure slot time is 64 bytes.
3658	 * also activate checksum bug workaround
3659	 */
3660	if ((speed == 1000) && !full_duplex) {
3661		writel(val | MAC_TX_CFG_CARRIER_EXTEND,
3662		       cp->regs + REG_MAC_TX_CFG);
3663
3664		val = readl(cp->regs + REG_MAC_RX_CFG);
3665		val &= ~MAC_RX_CFG_STRIP_FCS; /* checksum workaround */
3666		writel(val | MAC_RX_CFG_CARRIER_EXTEND,
3667		       cp->regs + REG_MAC_RX_CFG);
3668
3669		writel(0x200, cp->regs + REG_MAC_SLOT_TIME);
3670
3671		cp->crc_size = 4;
3672		/* minimum size gigabit frame at half duplex */
3673		cp->min_frame_size = CAS_1000MB_MIN_FRAME;
3674
3675	} else {
3676		writel(val, cp->regs + REG_MAC_TX_CFG);
3677
3678		/* checksum bug workaround. don't strip FCS when in
3679		 * half-duplex mode
3680		 */
3681		val = readl(cp->regs + REG_MAC_RX_CFG);
3682		if (full_duplex) {
3683			val |= MAC_RX_CFG_STRIP_FCS;
3684			cp->crc_size = 0;
3685			cp->min_frame_size = CAS_MIN_MTU;
3686		} else {
3687			val &= ~MAC_RX_CFG_STRIP_FCS;
3688			cp->crc_size = 4;
3689			cp->min_frame_size = CAS_MIN_FRAME;
3690		}
3691		writel(val & ~MAC_RX_CFG_CARRIER_EXTEND,
3692		       cp->regs + REG_MAC_RX_CFG);
3693		writel(0x40, cp->regs + REG_MAC_SLOT_TIME);
3694	}
3695
3696	if (netif_msg_link(cp)) {
3697		if (pause & 0x01) {
3698			netdev_info(cp->dev, "Pause is enabled (rxfifo: %d off: %d on: %d)\n",
3699				    cp->rx_fifo_size,
3700				    cp->rx_pause_off,
3701				    cp->rx_pause_on);
3702		} else if (pause & 0x10) {
3703			netdev_info(cp->dev, "TX pause enabled\n");
3704		} else {
3705			netdev_info(cp->dev, "Pause is disabled\n");
3706		}
3707	}
3708
3709	val = readl(cp->regs + REG_MAC_CTRL_CFG);
3710	val &= ~(MAC_CTRL_CFG_SEND_PAUSE_EN | MAC_CTRL_CFG_RECV_PAUSE_EN);
3711	if (pause) { /* symmetric or asymmetric pause */
3712		val |= MAC_CTRL_CFG_SEND_PAUSE_EN;
3713		if (pause & 0x01) { /* symmetric pause */
3714			val |= MAC_CTRL_CFG_RECV_PAUSE_EN;
3715		}
3716	}
3717	writel(val, cp->regs + REG_MAC_CTRL_CFG);
3718	cas_start_dma(cp);
3719}
3720
3721/* Must be invoked under cp->lock. */
3722static void cas_init_hw(struct cas *cp, int restart_link)
3723{
3724	if (restart_link)
3725		cas_phy_init(cp);
3726
3727	cas_init_pause_thresholds(cp);
3728	cas_init_mac(cp);
3729	cas_init_dma(cp);
3730
3731	if (restart_link) {
3732		/* Default aneg parameters */
3733		cp->timer_ticks = 0;
3734		cas_begin_auto_negotiation(cp, NULL);
3735	} else if (cp->lstate == link_up) {
3736		cas_set_link_modes(cp);
3737		netif_carrier_on(cp->dev);
3738	}
3739}
3740
3741/* Must be invoked under cp->lock. on earlier cassini boards,
3742 * SOFT_0 is tied to PCI reset. we use this to force a pci reset,
3743 * let it settle out, and then restore pci state.
3744 */
3745static void cas_hard_reset(struct cas *cp)
3746{
3747	writel(BIM_LOCAL_DEV_SOFT_0, cp->regs + REG_BIM_LOCAL_DEV_EN);
3748	udelay(20);
3749	pci_restore_state(cp->pdev);
3750}
3751
3752
3753static void cas_global_reset(struct cas *cp, int blkflag)
3754{
3755	int limit;
3756
3757	/* issue a global reset. don't use RSTOUT. */
3758	if (blkflag && !CAS_PHY_MII(cp->phy_type)) {
3759		/* For PCS, when the blkflag is set, we should set the
3760		 * SW_REST_BLOCK_PCS_SLINK bit to prevent the results of
3761		 * the last autonegotiation from being cleared.  We'll
3762		 * need some special handling if the chip is set into a
3763		 * loopback mode.
3764		 */
3765		writel((SW_RESET_TX | SW_RESET_RX | SW_RESET_BLOCK_PCS_SLINK),
3766		       cp->regs + REG_SW_RESET);
3767	} else {
3768		writel(SW_RESET_TX | SW_RESET_RX, cp->regs + REG_SW_RESET);
3769	}
3770
3771	/* need to wait at least 3ms before polling register */
3772	mdelay(3);
3773
3774	limit = STOP_TRIES;
3775	while (limit-- > 0) {
3776		u32 val = readl(cp->regs + REG_SW_RESET);
3777		if ((val & (SW_RESET_TX | SW_RESET_RX)) == 0)
3778			goto done;
3779		udelay(10);
3780	}
3781	netdev_err(cp->dev, "sw reset failed\n");
3782
3783done:
3784	/* enable various BIM interrupts */
3785	writel(BIM_CFG_DPAR_INTR_ENABLE | BIM_CFG_RMA_INTR_ENABLE |
3786	       BIM_CFG_RTA_INTR_ENABLE, cp->regs + REG_BIM_CFG);
3787
3788	/* clear out pci error status mask for handled errors.
3789	 * we don't deal with DMA counter overflows as they happen
3790	 * all the time.
3791	 */
3792	writel(0xFFFFFFFFU & ~(PCI_ERR_BADACK | PCI_ERR_DTRTO |
3793			       PCI_ERR_OTHER | PCI_ERR_BIM_DMA_WRITE |
3794			       PCI_ERR_BIM_DMA_READ), cp->regs +
3795	       REG_PCI_ERR_STATUS_MASK);
3796
3797	/* set up for MII by default to address mac rx reset timeout
3798	 * issue
3799	 */
3800	writel(PCS_DATAPATH_MODE_MII, cp->regs + REG_PCS_DATAPATH_MODE);
3801}
3802
3803static void cas_reset(struct cas *cp, int blkflag)
3804{
3805	u32 val;
3806
3807	cas_mask_intr(cp);
3808	cas_global_reset(cp, blkflag);
3809	cas_mac_reset(cp);
3810	cas_entropy_reset(cp);
3811
3812	/* disable dma engines. */
3813	val = readl(cp->regs + REG_TX_CFG);
3814	val &= ~TX_CFG_DMA_EN;
3815	writel(val, cp->regs + REG_TX_CFG);
3816
3817	val = readl(cp->regs + REG_RX_CFG);
3818	val &= ~RX_CFG_DMA_EN;
3819	writel(val, cp->regs + REG_RX_CFG);
3820
3821	/* program header parser */
3822	if ((cp->cas_flags & CAS_FLAG_TARGET_ABORT) ||
3823	    (CAS_HP_ALT_FIRMWARE == cas_prog_null)) {
3824		cas_load_firmware(cp, CAS_HP_FIRMWARE);
3825	} else {
3826		cas_load_firmware(cp, CAS_HP_ALT_FIRMWARE);
3827	}
3828
3829	/* clear out error registers */
3830	spin_lock(&cp->stat_lock[N_TX_RINGS]);
3831	cas_clear_mac_err(cp);
3832	spin_unlock(&cp->stat_lock[N_TX_RINGS]);
3833}
3834
3835/* Shut down the chip, must be called with pm_mutex held.  */
3836static void cas_shutdown(struct cas *cp)
3837{
3838	unsigned long flags;
3839
3840	/* Make us not-running to avoid timers respawning */
3841	cp->hw_running = 0;
3842
3843	del_timer_sync(&cp->link_timer);
3844
3845	/* Stop the reset task */
3846#if 0
3847	while (atomic_read(&cp->reset_task_pending_mtu) ||
3848	       atomic_read(&cp->reset_task_pending_spare) ||
3849	       atomic_read(&cp->reset_task_pending_all))
3850		schedule();
3851
3852#else
3853	while (atomic_read(&cp->reset_task_pending))
3854		schedule();
3855#endif
3856	/* Actually stop the chip */
3857	cas_lock_all_save(cp, flags);
3858	cas_reset(cp, 0);
3859	if (cp->cas_flags & CAS_FLAG_SATURN)
3860		cas_phy_powerdown(cp);
3861	cas_unlock_all_restore(cp, flags);
3862}
3863
3864static int cas_change_mtu(struct net_device *dev, int new_mtu)
3865{
3866	struct cas *cp = netdev_priv(dev);
3867
3868	dev->mtu = new_mtu;
3869	if (!netif_running(dev) || !netif_device_present(dev))
3870		return 0;
3871
3872	/* let the reset task handle it */
3873#if 1
3874	atomic_inc(&cp->reset_task_pending);
3875	if ((cp->phy_type & CAS_PHY_SERDES)) {
3876		atomic_inc(&cp->reset_task_pending_all);
3877	} else {
3878		atomic_inc(&cp->reset_task_pending_mtu);
3879	}
3880	schedule_work(&cp->reset_task);
3881#else
3882	atomic_set(&cp->reset_task_pending, (cp->phy_type & CAS_PHY_SERDES) ?
3883		   CAS_RESET_ALL : CAS_RESET_MTU);
3884	pr_err("reset called in cas_change_mtu\n");
3885	schedule_work(&cp->reset_task);
3886#endif
3887
3888	flush_work(&cp->reset_task);
3889	return 0;
3890}
3891
3892static void cas_clean_txd(struct cas *cp, int ring)
3893{
3894	struct cas_tx_desc *txd = cp->init_txds[ring];
3895	struct sk_buff *skb, **skbs = cp->tx_skbs[ring];
3896	u64 daddr, dlen;
3897	int i, size;
3898
3899	size = TX_DESC_RINGN_SIZE(ring);
3900	for (i = 0; i < size; i++) {
3901		int frag;
3902
3903		if (skbs[i] == NULL)
3904			continue;
3905
3906		skb = skbs[i];
3907		skbs[i] = NULL;
3908
3909		for (frag = 0; frag <= skb_shinfo(skb)->nr_frags;  frag++) {
3910			int ent = i & (size - 1);
3911
3912			/* first buffer is never a tiny buffer and so
3913			 * needs to be unmapped.
3914			 */
3915			daddr = le64_to_cpu(txd[ent].buffer);
3916			dlen  =  CAS_VAL(TX_DESC_BUFLEN,
3917					 le64_to_cpu(txd[ent].control));
3918			pci_unmap_page(cp->pdev, daddr, dlen,
3919				       PCI_DMA_TODEVICE);
3920
3921			if (frag != skb_shinfo(skb)->nr_frags) {
3922				i++;
3923
3924				/* next buffer might by a tiny buffer.
3925				 * skip past it.
3926				 */
3927				ent = i & (size - 1);
3928				if (cp->tx_tiny_use[ring][ent].used)
3929					i++;
3930			}
3931		}
3932		dev_kfree_skb_any(skb);
3933	}
3934
3935	/* zero out tiny buf usage */
3936	memset(cp->tx_tiny_use[ring], 0, size*sizeof(*cp->tx_tiny_use[ring]));
3937}
3938
3939/* freed on close */
3940static inline void cas_free_rx_desc(struct cas *cp, int ring)
3941{
3942	cas_page_t **page = cp->rx_pages[ring];
3943	int i, size;
3944
3945	size = RX_DESC_RINGN_SIZE(ring);
3946	for (i = 0; i < size; i++) {
3947		if (page[i]) {
3948			cas_page_free(cp, page[i]);
3949			page[i] = NULL;
3950		}
3951	}
3952}
3953
3954static void cas_free_rxds(struct cas *cp)
3955{
3956	int i;
3957
3958	for (i = 0; i < N_RX_DESC_RINGS; i++)
3959		cas_free_rx_desc(cp, i);
3960}
3961
3962/* Must be invoked under cp->lock. */
3963static void cas_clean_rings(struct cas *cp)
3964{
3965	int i;
3966
3967	/* need to clean all tx rings */
3968	memset(cp->tx_old, 0, sizeof(*cp->tx_old)*N_TX_RINGS);
3969	memset(cp->tx_new, 0, sizeof(*cp->tx_new)*N_TX_RINGS);
3970	for (i = 0; i < N_TX_RINGS; i++)
3971		cas_clean_txd(cp, i);
3972
3973	/* zero out init block */
3974	memset(cp->init_block, 0, sizeof(struct cas_init_block));
3975	cas_clean_rxds(cp);
3976	cas_clean_rxcs(cp);
3977}
3978
3979/* allocated on open */
3980static inline int cas_alloc_rx_desc(struct cas *cp, int ring)
3981{
3982	cas_page_t **page = cp->rx_pages[ring];
3983	int size, i = 0;
3984
3985	size = RX_DESC_RINGN_SIZE(ring);
3986	for (i = 0; i < size; i++) {
3987		if ((page[i] = cas_page_alloc(cp, GFP_KERNEL)) == NULL)
3988			return -1;
3989	}
3990	return 0;
3991}
3992
3993static int cas_alloc_rxds(struct cas *cp)
3994{
3995	int i;
3996
3997	for (i = 0; i < N_RX_DESC_RINGS; i++) {
3998		if (cas_alloc_rx_desc(cp, i) < 0) {
3999			cas_free_rxds(cp);
4000			return -1;
4001		}
4002	}
4003	return 0;
4004}
4005
4006static void cas_reset_task(struct work_struct *work)
4007{
4008	struct cas *cp = container_of(work, struct cas, reset_task);
4009#if 0
4010	int pending = atomic_read(&cp->reset_task_pending);
4011#else
4012	int pending_all = atomic_read(&cp->reset_task_pending_all);
4013	int pending_spare = atomic_read(&cp->reset_task_pending_spare);
4014	int pending_mtu = atomic_read(&cp->reset_task_pending_mtu);
4015
4016	if (pending_all == 0 && pending_spare == 0 && pending_mtu == 0) {
4017		/* We can have more tasks scheduled than actually
4018		 * needed.
4019		 */
4020		atomic_dec(&cp->reset_task_pending);
4021		return;
4022	}
4023#endif
4024	/* The link went down, we reset the ring, but keep
4025	 * DMA stopped. Use this function for reset
4026	 * on error as well.
4027	 */
4028	if (cp->hw_running) {
4029		unsigned long flags;
4030
4031		/* Make sure we don't get interrupts or tx packets */
4032		netif_device_detach(cp->dev);
4033		cas_lock_all_save(cp, flags);
4034
4035		if (cp->opened) {
4036			/* We call cas_spare_recover when we call cas_open.
4037			 * but we do not initialize the lists cas_spare_recover
4038			 * uses until cas_open is called.
4039			 */
4040			cas_spare_recover(cp, GFP_ATOMIC);
4041		}
4042#if 1
4043		/* test => only pending_spare set */
4044		if (!pending_all && !pending_mtu)
4045			goto done;
4046#else
4047		if (pending == CAS_RESET_SPARE)
4048			goto done;
4049#endif
4050		/* when pending == CAS_RESET_ALL, the following
4051		 * call to cas_init_hw will restart auto negotiation.
4052		 * Setting the second argument of cas_reset to
4053		 * !(pending == CAS_RESET_ALL) will set this argument
4054		 * to 1 (avoiding reinitializing the PHY for the normal
4055		 * PCS case) when auto negotiation is not restarted.
4056		 */
4057#if 1
4058		cas_reset(cp, !(pending_all > 0));
4059		if (cp->opened)
4060			cas_clean_rings(cp);
4061		cas_init_hw(cp, (pending_all > 0));
4062#else
4063		cas_reset(cp, !(pending == CAS_RESET_ALL));
4064		if (cp->opened)
4065			cas_clean_rings(cp);
4066		cas_init_hw(cp, pending == CAS_RESET_ALL);
4067#endif
4068
4069done:
4070		cas_unlock_all_restore(cp, flags);
4071		netif_device_attach(cp->dev);
4072	}
4073#if 1
4074	atomic_sub(pending_all, &cp->reset_task_pending_all);
4075	atomic_sub(pending_spare, &cp->reset_task_pending_spare);
4076	atomic_sub(pending_mtu, &cp->reset_task_pending_mtu);
4077	atomic_dec(&cp->reset_task_pending);
4078#else
4079	atomic_set(&cp->reset_task_pending, 0);
4080#endif
4081}
4082
4083static void cas_link_timer(struct timer_list *t)
4084{
4085	struct cas *cp = from_timer(cp, t, link_timer);
4086	int mask, pending = 0, reset = 0;
4087	unsigned long flags;
4088
4089	if (link_transition_timeout != 0 &&
4090	    cp->link_transition_jiffies_valid &&
4091	    ((jiffies - cp->link_transition_jiffies) >
4092	      (link_transition_timeout))) {
4093		/* One-second counter so link-down workaround doesn't
4094		 * cause resets to occur so fast as to fool the switch
4095		 * into thinking the link is down.
4096		 */
4097		cp->link_transition_jiffies_valid = 0;
4098	}
4099
4100	if (!cp->hw_running)
4101		return;
4102
4103	spin_lock_irqsave(&cp->lock, flags);
4104	cas_lock_tx(cp);
4105	cas_entropy_gather(cp);
4106
4107	/* If the link task is still pending, we just
4108	 * reschedule the link timer
4109	 */
4110#if 1
4111	if (atomic_read(&cp->reset_task_pending_all) ||
4112	    atomic_read(&cp->reset_task_pending_spare) ||
4113	    atomic_read(&cp->reset_task_pending_mtu))
4114		goto done;
4115#else
4116	if (atomic_read(&cp->reset_task_pending))
4117		goto done;
4118#endif
4119
4120	/* check for rx cleaning */
4121	if ((mask = (cp->cas_flags & CAS_FLAG_RXD_POST_MASK))) {
4122		int i, rmask;
4123
4124		for (i = 0; i < MAX_RX_DESC_RINGS; i++) {
4125			rmask = CAS_FLAG_RXD_POST(i);
4126			if ((mask & rmask) == 0)
4127				continue;
4128
4129			/* post_rxds will do a mod_timer */
4130			if (cas_post_rxds_ringN(cp, i, cp->rx_last[i]) < 0) {
4131				pending = 1;
4132				continue;
4133			}
4134			cp->cas_flags &= ~rmask;
4135		}
4136	}
4137
4138	if (CAS_PHY_MII(cp->phy_type)) {
4139		u16 bmsr;
4140		cas_mif_poll(cp, 0);
4141		bmsr = cas_phy_read(cp, MII_BMSR);
4142		/* WTZ: Solaris driver reads this twice, but that
4143		 * may be due to the PCS case and the use of a
4144		 * common implementation. Read it twice here to be
4145		 * safe.
4146		 */
4147		bmsr = cas_phy_read(cp, MII_BMSR);
4148		cas_mif_poll(cp, 1);
4149		readl(cp->regs + REG_MIF_STATUS); /* avoid dups */
4150		reset = cas_mii_link_check(cp, bmsr);
4151	} else {
4152		reset = cas_pcs_link_check(cp);
4153	}
4154
4155	if (reset)
4156		goto done;
4157
4158	/* check for tx state machine confusion */
4159	if ((readl(cp->regs + REG_MAC_TX_STATUS) & MAC_TX_FRAME_XMIT) == 0) {
4160		u32 val = readl(cp->regs + REG_MAC_STATE_MACHINE);
4161		u32 wptr, rptr;
4162		int tlm  = CAS_VAL(MAC_SM_TLM, val);
4163
4164		if (((tlm == 0x5) || (tlm == 0x3)) &&
4165		    (CAS_VAL(MAC_SM_ENCAP_SM, val) == 0)) {
4166			netif_printk(cp, tx_err, KERN_DEBUG, cp->dev,
4167				     "tx err: MAC_STATE[%08x]\n", val);
4168			reset = 1;
4169			goto done;
4170		}
4171
4172		val  = readl(cp->regs + REG_TX_FIFO_PKT_CNT);
4173		wptr = readl(cp->regs + REG_TX_FIFO_WRITE_PTR);
4174		rptr = readl(cp->regs + REG_TX_FIFO_READ_PTR);
4175		if ((val == 0) && (wptr != rptr)) {
4176			netif_printk(cp, tx_err, KERN_DEBUG, cp->dev,
4177				     "tx err: TX_FIFO[%08x:%08x:%08x]\n",
4178				     val, wptr, rptr);
4179			reset = 1;
4180		}
4181
4182		if (reset)
4183			cas_hard_reset(cp);
4184	}
4185
4186done:
4187	if (reset) {
4188#if 1
4189		atomic_inc(&cp->reset_task_pending);
4190		atomic_inc(&cp->reset_task_pending_all);
4191		schedule_work(&cp->reset_task);
4192#else
4193		atomic_set(&cp->reset_task_pending, CAS_RESET_ALL);
4194		pr_err("reset called in cas_link_timer\n");
4195		schedule_work(&cp->reset_task);
4196#endif
4197	}
4198
4199	if (!pending)
4200		mod_timer(&cp->link_timer, jiffies + CAS_LINK_TIMEOUT);
4201	cas_unlock_tx(cp);
4202	spin_unlock_irqrestore(&cp->lock, flags);
4203}
4204
4205/* tiny buffers are used to avoid target abort issues with
4206 * older cassini's
4207 */
4208static void cas_tx_tiny_free(struct cas *cp)
4209{
4210	struct pci_dev *pdev = cp->pdev;
4211	int i;
4212
4213	for (i = 0; i < N_TX_RINGS; i++) {
4214		if (!cp->tx_tiny_bufs[i])
4215			continue;
4216
4217		pci_free_consistent(pdev, TX_TINY_BUF_BLOCK,
4218				    cp->tx_tiny_bufs[i],
4219				    cp->tx_tiny_dvma[i]);
4220		cp->tx_tiny_bufs[i] = NULL;
4221	}
4222}
4223
4224static int cas_tx_tiny_alloc(struct cas *cp)
4225{
4226	struct pci_dev *pdev = cp->pdev;
4227	int i;
4228
4229	for (i = 0; i < N_TX_RINGS; i++) {
4230		cp->tx_tiny_bufs[i] =
4231			pci_alloc_consistent(pdev, TX_TINY_BUF_BLOCK,
4232					     &cp->tx_tiny_dvma[i]);
4233		if (!cp->tx_tiny_bufs[i]) {
4234			cas_tx_tiny_free(cp);
4235			return -1;
4236		}
4237	}
4238	return 0;
4239}
4240
4241
4242static int cas_open(struct net_device *dev)
4243{
4244	struct cas *cp = netdev_priv(dev);
4245	int hw_was_up, err;
4246	unsigned long flags;
4247
4248	mutex_lock(&cp->pm_mutex);
4249
4250	hw_was_up = cp->hw_running;
4251
4252	/* The power-management mutex protects the hw_running
4253	 * etc. state so it is safe to do this bit without cp->lock
4254	 */
4255	if (!cp->hw_running) {
4256		/* Reset the chip */
4257		cas_lock_all_save(cp, flags);
4258		/* We set the second arg to cas_reset to zero
4259		 * because cas_init_hw below will have its second
4260		 * argument set to non-zero, which will force
4261		 * autonegotiation to start.
4262		 */
4263		cas_reset(cp, 0);
4264		cp->hw_running = 1;
4265		cas_unlock_all_restore(cp, flags);
4266	}
4267
4268	err = -ENOMEM;
4269	if (cas_tx_tiny_alloc(cp) < 0)
4270		goto err_unlock;
4271
4272	/* alloc rx descriptors */
4273	if (cas_alloc_rxds(cp) < 0)
4274		goto err_tx_tiny;
4275
4276	/* allocate spares */
4277	cas_spare_init(cp);
4278	cas_spare_recover(cp, GFP_KERNEL);
4279
4280	/* We can now request the interrupt as we know it's masked
4281	 * on the controller. cassini+ has up to 4 interrupts
4282	 * that can be used, but you need to do explicit pci interrupt
4283	 * mapping to expose them
4284	 */
4285	if (request_irq(cp->pdev->irq, cas_interrupt,
4286			IRQF_SHARED, dev->name, (void *) dev)) {
4287		netdev_err(cp->dev, "failed to request irq !\n");
4288		err = -EAGAIN;
4289		goto err_spare;
4290	}
4291
4292#ifdef USE_NAPI
4293	napi_enable(&cp->napi);
4294#endif
4295	/* init hw */
4296	cas_lock_all_save(cp, flags);
4297	cas_clean_rings(cp);
4298	cas_init_hw(cp, !hw_was_up);
4299	cp->opened = 1;
4300	cas_unlock_all_restore(cp, flags);
4301
4302	netif_start_queue(dev);
4303	mutex_unlock(&cp->pm_mutex);
4304	return 0;
4305
4306err_spare:
4307	cas_spare_free(cp);
4308	cas_free_rxds(cp);
4309err_tx_tiny:
4310	cas_tx_tiny_free(cp);
4311err_unlock:
4312	mutex_unlock(&cp->pm_mutex);
4313	return err;
4314}
4315
4316static int cas_close(struct net_device *dev)
4317{
4318	unsigned long flags;
4319	struct cas *cp = netdev_priv(dev);
4320
4321#ifdef USE_NAPI
4322	napi_disable(&cp->napi);
4323#endif
4324	/* Make sure we don't get distracted by suspend/resume */
4325	mutex_lock(&cp->pm_mutex);
4326
4327	netif_stop_queue(dev);
4328
4329	/* Stop traffic, mark us closed */
4330	cas_lock_all_save(cp, flags);
4331	cp->opened = 0;
4332	cas_reset(cp, 0);
4333	cas_phy_init(cp);
4334	cas_begin_auto_negotiation(cp, NULL);
4335	cas_clean_rings(cp);
4336	cas_unlock_all_restore(cp, flags);
4337
4338	free_irq(cp->pdev->irq, (void *) dev);
4339	cas_spare_free(cp);
4340	cas_free_rxds(cp);
4341	cas_tx_tiny_free(cp);
4342	mutex_unlock(&cp->pm_mutex);
4343	return 0;
4344}
4345
4346static struct {
4347	const char name[ETH_GSTRING_LEN];
4348} ethtool_cassini_statnames[] = {
4349	{"collisions"},
4350	{"rx_bytes"},
4351	{"rx_crc_errors"},
4352	{"rx_dropped"},
4353	{"rx_errors"},
4354	{"rx_fifo_errors"},
4355	{"rx_frame_errors"},
4356	{"rx_length_errors"},
4357	{"rx_over_errors"},
4358	{"rx_packets"},
4359	{"tx_aborted_errors"},
4360	{"tx_bytes"},
4361	{"tx_dropped"},
4362	{"tx_errors"},
4363	{"tx_fifo_errors"},
4364	{"tx_packets"}
4365};
4366#define CAS_NUM_STAT_KEYS ARRAY_SIZE(ethtool_cassini_statnames)
4367
4368static struct {
4369	const int offsets;	/* neg. values for 2nd arg to cas_read_phy */
4370} ethtool_register_table[] = {
4371	{-MII_BMSR},
4372	{-MII_BMCR},
4373	{REG_CAWR},
4374	{REG_INF_BURST},
4375	{REG_BIM_CFG},
4376	{REG_RX_CFG},
4377	{REG_HP_CFG},
4378	{REG_MAC_TX_CFG},
4379	{REG_MAC_RX_CFG},
4380	{REG_MAC_CTRL_CFG},
4381	{REG_MAC_XIF_CFG},
4382	{REG_MIF_CFG},
4383	{REG_PCS_CFG},
4384	{REG_SATURN_PCFG},
4385	{REG_PCS_MII_STATUS},
4386	{REG_PCS_STATE_MACHINE},
4387	{REG_MAC_COLL_EXCESS},
4388	{REG_MAC_COLL_LATE}
4389};
4390#define CAS_REG_LEN 	ARRAY_SIZE(ethtool_register_table)
4391#define CAS_MAX_REGS 	(sizeof (u32)*CAS_REG_LEN)
4392
4393static void cas_read_regs(struct cas *cp, u8 *ptr, int len)
4394{
4395	u8 *p;
4396	int i;
4397	unsigned long flags;
4398
4399	spin_lock_irqsave(&cp->lock, flags);
4400	for (i = 0, p = ptr; i < len ; i ++, p += sizeof(u32)) {
4401		u16 hval;
4402		u32 val;
4403		if (ethtool_register_table[i].offsets < 0) {
4404			hval = cas_phy_read(cp,
4405				    -ethtool_register_table[i].offsets);
4406			val = hval;
4407		} else {
4408			val= readl(cp->regs+ethtool_register_table[i].offsets);
4409		}
4410		memcpy(p, (u8 *)&val, sizeof(u32));
4411	}
4412	spin_unlock_irqrestore(&cp->lock, flags);
4413}
4414
4415static struct net_device_stats *cas_get_stats(struct net_device *dev)
4416{
4417	struct cas *cp = netdev_priv(dev);
4418	struct net_device_stats *stats = cp->net_stats;
4419	unsigned long flags;
4420	int i;
4421	unsigned long tmp;
4422
4423	/* we collate all of the stats into net_stats[N_TX_RING] */
4424	if (!cp->hw_running)
4425		return stats + N_TX_RINGS;
4426
4427	/* collect outstanding stats */
4428	/* WTZ: the Cassini spec gives these as 16 bit counters but
4429	 * stored in 32-bit words.  Added a mask of 0xffff to be safe,
4430	 * in case the chip somehow puts any garbage in the other bits.
4431	 * Also, counter usage didn't seem to mach what Adrian did
4432	 * in the parts of the code that set these quantities. Made
4433	 * that consistent.
4434	 */
4435	spin_lock_irqsave(&cp->stat_lock[N_TX_RINGS], flags);
4436	stats[N_TX_RINGS].rx_crc_errors +=
4437	  readl(cp->regs + REG_MAC_FCS_ERR) & 0xffff;
4438	stats[N_TX_RINGS].rx_frame_errors +=
4439		readl(cp->regs + REG_MAC_ALIGN_ERR) &0xffff;
4440	stats[N_TX_RINGS].rx_length_errors +=
4441		readl(cp->regs + REG_MAC_LEN_ERR) & 0xffff;
4442#if 1
4443	tmp = (readl(cp->regs + REG_MAC_COLL_EXCESS) & 0xffff) +
4444		(readl(cp->regs + REG_MAC_COLL_LATE) & 0xffff);
4445	stats[N_TX_RINGS].tx_aborted_errors += tmp;
4446	stats[N_TX_RINGS].collisions +=
4447	  tmp + (readl(cp->regs + REG_MAC_COLL_NORMAL) & 0xffff);
4448#else
4449	stats[N_TX_RINGS].tx_aborted_errors +=
4450		readl(cp->regs + REG_MAC_COLL_EXCESS);
4451	stats[N_TX_RINGS].collisions += readl(cp->regs + REG_MAC_COLL_EXCESS) +
4452		readl(cp->regs + REG_MAC_COLL_LATE);
4453#endif
4454	cas_clear_mac_err(cp);
4455
4456	/* saved bits that are unique to ring 0 */
4457	spin_lock(&cp->stat_lock[0]);
4458	stats[N_TX_RINGS].collisions        += stats[0].collisions;
4459	stats[N_TX_RINGS].rx_over_errors    += stats[0].rx_over_errors;
4460	stats[N_TX_RINGS].rx_frame_errors   += stats[0].rx_frame_errors;
4461	stats[N_TX_RINGS].rx_fifo_errors    += stats[0].rx_fifo_errors;
4462	stats[N_TX_RINGS].tx_aborted_errors += stats[0].tx_aborted_errors;
4463	stats[N_TX_RINGS].tx_fifo_errors    += stats[0].tx_fifo_errors;
4464	spin_unlock(&cp->stat_lock[0]);
4465
4466	for (i = 0; i < N_TX_RINGS; i++) {
4467		spin_lock(&cp->stat_lock[i]);
4468		stats[N_TX_RINGS].rx_length_errors +=
4469			stats[i].rx_length_errors;
4470		stats[N_TX_RINGS].rx_crc_errors += stats[i].rx_crc_errors;
4471		stats[N_TX_RINGS].rx_packets    += stats[i].rx_packets;
4472		stats[N_TX_RINGS].tx_packets    += stats[i].tx_packets;
4473		stats[N_TX_RINGS].rx_bytes      += stats[i].rx_bytes;
4474		stats[N_TX_RINGS].tx_bytes      += stats[i].tx_bytes;
4475		stats[N_TX_RINGS].rx_errors     += stats[i].rx_errors;
4476		stats[N_TX_RINGS].tx_errors     += stats[i].tx_errors;
4477		stats[N_TX_RINGS].rx_dropped    += stats[i].rx_dropped;
4478		stats[N_TX_RINGS].tx_dropped    += stats[i].tx_dropped;
4479		memset(stats + i, 0, sizeof(struct net_device_stats));
4480		spin_unlock(&cp->stat_lock[i]);
4481	}
4482	spin_unlock_irqrestore(&cp->stat_lock[N_TX_RINGS], flags);
4483	return stats + N_TX_RINGS;
4484}
4485
4486
4487static void cas_set_multicast(struct net_device *dev)
4488{
4489	struct cas *cp = netdev_priv(dev);
4490	u32 rxcfg, rxcfg_new;
4491	unsigned long flags;
4492	int limit = STOP_TRIES;
4493
4494	if (!cp->hw_running)
4495		return;
4496
4497	spin_lock_irqsave(&cp->lock, flags);
4498	rxcfg = readl(cp->regs + REG_MAC_RX_CFG);
4499
4500	/* disable RX MAC and wait for completion */
4501	writel(rxcfg & ~MAC_RX_CFG_EN, cp->regs + REG_MAC_RX_CFG);
4502	while (readl(cp->regs + REG_MAC_RX_CFG) & MAC_RX_CFG_EN) {
4503		if (!limit--)
4504			break;
4505		udelay(10);
4506	}
4507
4508	/* disable hash filter and wait for completion */
4509	limit = STOP_TRIES;
4510	rxcfg &= ~(MAC_RX_CFG_PROMISC_EN | MAC_RX_CFG_HASH_FILTER_EN);
4511	writel(rxcfg & ~MAC_RX_CFG_EN, cp->regs + REG_MAC_RX_CFG);
4512	while (readl(cp->regs + REG_MAC_RX_CFG) & MAC_RX_CFG_HASH_FILTER_EN) {
4513		if (!limit--)
4514			break;
4515		udelay(10);
4516	}
4517
4518	/* program hash filters */
4519	cp->mac_rx_cfg = rxcfg_new = cas_setup_multicast(cp);
4520	rxcfg |= rxcfg_new;
4521	writel(rxcfg, cp->regs + REG_MAC_RX_CFG);
4522	spin_unlock_irqrestore(&cp->lock, flags);
4523}
4524
4525static void cas_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
4526{
4527	struct cas *cp = netdev_priv(dev);
4528	strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
4529	strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version));
4530	strlcpy(info->bus_info, pci_name(cp->pdev), sizeof(info->bus_info));
4531}
4532
4533static int cas_get_link_ksettings(struct net_device *dev,
4534				  struct ethtool_link_ksettings *cmd)
4535{
4536	struct cas *cp = netdev_priv(dev);
4537	u16 bmcr;
4538	int full_duplex, speed, pause;
4539	unsigned long flags;
4540	enum link_state linkstate = link_up;
4541	u32 supported, advertising;
4542
4543	advertising = 0;
4544	supported = SUPPORTED_Autoneg;
4545	if (cp->cas_flags & CAS_FLAG_1000MB_CAP) {
4546		supported |= SUPPORTED_1000baseT_Full;
4547		advertising |= ADVERTISED_1000baseT_Full;
4548	}
4549
4550	/* Record PHY settings if HW is on. */
4551	spin_lock_irqsave(&cp->lock, flags);
4552	bmcr = 0;
4553	linkstate = cp->lstate;
4554	if (CAS_PHY_MII(cp->phy_type)) {
4555		cmd->base.port = PORT_MII;
4556		cmd->base.phy_address = cp->phy_addr;
4557		advertising |= ADVERTISED_TP | ADVERTISED_MII |
4558			ADVERTISED_10baseT_Half |
4559			ADVERTISED_10baseT_Full |
4560			ADVERTISED_100baseT_Half |
4561			ADVERTISED_100baseT_Full;
4562
4563		supported |=
4564			(SUPPORTED_10baseT_Half |
4565			 SUPPORTED_10baseT_Full |
4566			 SUPPORTED_100baseT_Half |
4567			 SUPPORTED_100baseT_Full |
4568			 SUPPORTED_TP | SUPPORTED_MII);
4569
4570		if (cp->hw_running) {
4571			cas_mif_poll(cp, 0);
4572			bmcr = cas_phy_read(cp, MII_BMCR);
4573			cas_read_mii_link_mode(cp, &full_duplex,
4574					       &speed, &pause);
4575			cas_mif_poll(cp, 1);
4576		}
4577
4578	} else {
4579		cmd->base.port = PORT_FIBRE;
4580		cmd->base.phy_address = 0;
4581		supported   |= SUPPORTED_FIBRE;
4582		advertising |= ADVERTISED_FIBRE;
4583
4584		if (cp->hw_running) {
4585			/* pcs uses the same bits as mii */
4586			bmcr = readl(cp->regs + REG_PCS_MII_CTRL);
4587			cas_read_pcs_link_mode(cp, &full_duplex,
4588					       &speed, &pause);
4589		}
4590	}
4591	spin_unlock_irqrestore(&cp->lock, flags);
4592
4593	if (bmcr & BMCR_ANENABLE) {
4594		advertising |= ADVERTISED_Autoneg;
4595		cmd->base.autoneg = AUTONEG_ENABLE;
4596		cmd->base.speed =  ((speed == 10) ?
4597					    SPEED_10 :
4598					    ((speed == 1000) ?
4599					     SPEED_1000 : SPEED_100));
4600		cmd->base.duplex = full_duplex ? DUPLEX_FULL : DUPLEX_HALF;
4601	} else {
4602		cmd->base.autoneg = AUTONEG_DISABLE;
4603		cmd->base.speed = ((bmcr & CAS_BMCR_SPEED1000) ?
4604					    SPEED_1000 :
4605					    ((bmcr & BMCR_SPEED100) ?
4606					     SPEED_100 : SPEED_10));
4607		cmd->base.duplex = (bmcr & BMCR_FULLDPLX) ?
4608			DUPLEX_FULL : DUPLEX_HALF;
4609	}
4610	if (linkstate != link_up) {
4611		/* Force these to "unknown" if the link is not up and
4612		 * autonogotiation in enabled. We can set the link
4613		 * speed to 0, but not cmd->duplex,
4614		 * because its legal values are 0 and 1.  Ethtool will
4615		 * print the value reported in parentheses after the
4616		 * word "Unknown" for unrecognized values.
4617		 *
4618		 * If in forced mode, we report the speed and duplex
4619		 * settings that we configured.
4620		 */
4621		if (cp->link_cntl & BMCR_ANENABLE) {
4622			cmd->base.speed = 0;
4623			cmd->base.duplex = 0xff;
4624		} else {
4625			cmd->base.speed = SPEED_10;
4626			if (cp->link_cntl & BMCR_SPEED100) {
4627				cmd->base.speed = SPEED_100;
4628			} else if (cp->link_cntl & CAS_BMCR_SPEED1000) {
4629				cmd->base.speed = SPEED_1000;
4630			}
4631			cmd->base.duplex = (cp->link_cntl & BMCR_FULLDPLX) ?
4632				DUPLEX_FULL : DUPLEX_HALF;
4633		}
4634	}
4635
4636	ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
4637						supported);
4638	ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
4639						advertising);
4640
4641	return 0;
4642}
4643
4644static int cas_set_link_ksettings(struct net_device *dev,
4645				  const struct ethtool_link_ksettings *cmd)
4646{
4647	struct cas *cp = netdev_priv(dev);
4648	unsigned long flags;
4649	u32 speed = cmd->base.speed;
4650
4651	/* Verify the settings we care about. */
4652	if (cmd->base.autoneg != AUTONEG_ENABLE &&
4653	    cmd->base.autoneg != AUTONEG_DISABLE)
4654		return -EINVAL;
4655
4656	if (cmd->base.autoneg == AUTONEG_DISABLE &&
4657	    ((speed != SPEED_1000 &&
4658	      speed != SPEED_100 &&
4659	      speed != SPEED_10) ||
4660	     (cmd->base.duplex != DUPLEX_HALF &&
4661	      cmd->base.duplex != DUPLEX_FULL)))
4662		return -EINVAL;
4663
4664	/* Apply settings and restart link process. */
4665	spin_lock_irqsave(&cp->lock, flags);
4666	cas_begin_auto_negotiation(cp, cmd);
4667	spin_unlock_irqrestore(&cp->lock, flags);
4668	return 0;
4669}
4670
4671static int cas_nway_reset(struct net_device *dev)
4672{
4673	struct cas *cp = netdev_priv(dev);
4674	unsigned long flags;
4675
4676	if ((cp->link_cntl & BMCR_ANENABLE) == 0)
4677		return -EINVAL;
4678
4679	/* Restart link process. */
4680	spin_lock_irqsave(&cp->lock, flags);
4681	cas_begin_auto_negotiation(cp, NULL);
4682	spin_unlock_irqrestore(&cp->lock, flags);
4683
4684	return 0;
4685}
4686
4687static u32 cas_get_link(struct net_device *dev)
4688{
4689	struct cas *cp = netdev_priv(dev);
4690	return cp->lstate == link_up;
4691}
4692
4693static u32 cas_get_msglevel(struct net_device *dev)
4694{
4695	struct cas *cp = netdev_priv(dev);
4696	return cp->msg_enable;
4697}
4698
4699static void cas_set_msglevel(struct net_device *dev, u32 value)
4700{
4701	struct cas *cp = netdev_priv(dev);
4702	cp->msg_enable = value;
4703}
4704
4705static int cas_get_regs_len(struct net_device *dev)
4706{
4707	struct cas *cp = netdev_priv(dev);
4708	return cp->casreg_len < CAS_MAX_REGS ? cp->casreg_len: CAS_MAX_REGS;
4709}
4710
4711static void cas_get_regs(struct net_device *dev, struct ethtool_regs *regs,
4712			     void *p)
4713{
4714	struct cas *cp = netdev_priv(dev);
4715	regs->version = 0;
4716	/* cas_read_regs handles locks (cp->lock).  */
4717	cas_read_regs(cp, p, regs->len / sizeof(u32));
4718}
4719
4720static int cas_get_sset_count(struct net_device *dev, int sset)
4721{
4722	switch (sset) {
4723	case ETH_SS_STATS:
4724		return CAS_NUM_STAT_KEYS;
4725	default:
4726		return -EOPNOTSUPP;
4727	}
4728}
4729
4730static void cas_get_strings(struct net_device *dev, u32 stringset, u8 *data)
4731{
4732	 memcpy(data, &ethtool_cassini_statnames,
4733					 CAS_NUM_STAT_KEYS * ETH_GSTRING_LEN);
4734}
4735
4736static void cas_get_ethtool_stats(struct net_device *dev,
4737				      struct ethtool_stats *estats, u64 *data)
4738{
4739	struct cas *cp = netdev_priv(dev);
4740	struct net_device_stats *stats = cas_get_stats(cp->dev);
4741	int i = 0;
4742	data[i++] = stats->collisions;
4743	data[i++] = stats->rx_bytes;
4744	data[i++] = stats->rx_crc_errors;
4745	data[i++] = stats->rx_dropped;
4746	data[i++] = stats->rx_errors;
4747	data[i++] = stats->rx_fifo_errors;
4748	data[i++] = stats->rx_frame_errors;
4749	data[i++] = stats->rx_length_errors;
4750	data[i++] = stats->rx_over_errors;
4751	data[i++] = stats->rx_packets;
4752	data[i++] = stats->tx_aborted_errors;
4753	data[i++] = stats->tx_bytes;
4754	data[i++] = stats->tx_dropped;
4755	data[i++] = stats->tx_errors;
4756	data[i++] = stats->tx_fifo_errors;
4757	data[i++] = stats->tx_packets;
4758	BUG_ON(i != CAS_NUM_STAT_KEYS);
4759}
4760
4761static const struct ethtool_ops cas_ethtool_ops = {
4762	.get_drvinfo		= cas_get_drvinfo,
4763	.nway_reset		= cas_nway_reset,
4764	.get_link		= cas_get_link,
4765	.get_msglevel		= cas_get_msglevel,
4766	.set_msglevel		= cas_set_msglevel,
4767	.get_regs_len		= cas_get_regs_len,
4768	.get_regs		= cas_get_regs,
4769	.get_sset_count		= cas_get_sset_count,
4770	.get_strings		= cas_get_strings,
4771	.get_ethtool_stats	= cas_get_ethtool_stats,
4772	.get_link_ksettings	= cas_get_link_ksettings,
4773	.set_link_ksettings	= cas_set_link_ksettings,
4774};
4775
4776static int cas_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
4777{
4778	struct cas *cp = netdev_priv(dev);
4779	struct mii_ioctl_data *data = if_mii(ifr);
4780	unsigned long flags;
4781	int rc = -EOPNOTSUPP;
4782
4783	/* Hold the PM mutex while doing ioctl's or we may collide
4784	 * with open/close and power management and oops.
4785	 */
4786	mutex_lock(&cp->pm_mutex);
4787	switch (cmd) {
4788	case SIOCGMIIPHY:		/* Get address of MII PHY in use. */
4789		data->phy_id = cp->phy_addr;
4790		/* Fallthrough... */
4791
4792	case SIOCGMIIREG:		/* Read MII PHY register. */
4793		spin_lock_irqsave(&cp->lock, flags);
4794		cas_mif_poll(cp, 0);
4795		data->val_out = cas_phy_read(cp, data->reg_num & 0x1f);
4796		cas_mif_poll(cp, 1);
4797		spin_unlock_irqrestore(&cp->lock, flags);
4798		rc = 0;
4799		break;
4800
4801	case SIOCSMIIREG:		/* Write MII PHY register. */
4802		spin_lock_irqsave(&cp->lock, flags);
4803		cas_mif_poll(cp, 0);
4804		rc = cas_phy_write(cp, data->reg_num & 0x1f, data->val_in);
4805		cas_mif_poll(cp, 1);
4806		spin_unlock_irqrestore(&cp->lock, flags);
4807		break;
4808	default:
4809		break;
4810	}
4811
4812	mutex_unlock(&cp->pm_mutex);
4813	return rc;
4814}
4815
4816/* When this chip sits underneath an Intel 31154 bridge, it is the
4817 * only subordinate device and we can tweak the bridge settings to
4818 * reflect that fact.
4819 */
4820static void cas_program_bridge(struct pci_dev *cas_pdev)
4821{
4822	struct pci_dev *pdev = cas_pdev->bus->self;
4823	u32 val;
4824
4825	if (!pdev)
4826		return;
4827
4828	if (pdev->vendor != 0x8086 || pdev->device != 0x537c)
4829		return;
4830
4831	/* Clear bit 10 (Bus Parking Control) in the Secondary
4832	 * Arbiter Control/Status Register which lives at offset
4833	 * 0x41.  Using a 32-bit word read/modify/write at 0x40
4834	 * is much simpler so that's how we do this.
4835	 */
4836	pci_read_config_dword(pdev, 0x40, &val);
4837	val &= ~0x00040000;
4838	pci_write_config_dword(pdev, 0x40, val);
4839
4840	/* Max out the Multi-Transaction Timer settings since
4841	 * Cassini is the only device present.
4842	 *
4843	 * The register is 16-bit and lives at 0x50.  When the
4844	 * settings are enabled, it extends the GRANT# signal
4845	 * for a requestor after a transaction is complete.  This
4846	 * allows the next request to run without first needing
4847	 * to negotiate the GRANT# signal back.
4848	 *
4849	 * Bits 12:10 define the grant duration:
4850	 *
4851	 *	1	--	16 clocks
4852	 *	2	--	32 clocks
4853	 *	3	--	64 clocks
4854	 *	4	--	128 clocks
4855	 *	5	--	256 clocks
4856	 *
4857	 * All other values are illegal.
4858	 *
4859	 * Bits 09:00 define which REQ/GNT signal pairs get the
4860	 * GRANT# signal treatment.  We set them all.
4861	 */
4862	pci_write_config_word(pdev, 0x50, (5 << 10) | 0x3ff);
4863
4864	/* The Read Prefecth Policy register is 16-bit and sits at
4865	 * offset 0x52.  It enables a "smart" pre-fetch policy.  We
4866	 * enable it and max out all of the settings since only one
4867	 * device is sitting underneath and thus bandwidth sharing is
4868	 * not an issue.
4869	 *
4870	 * The register has several 3 bit fields, which indicates a
4871	 * multiplier applied to the base amount of prefetching the
4872	 * chip would do.  These fields are at:
4873	 *
4874	 *	15:13	---	ReRead Primary Bus
4875	 *	12:10	---	FirstRead Primary Bus
4876	 *	09:07	---	ReRead Secondary Bus
4877	 *	06:04	---	FirstRead Secondary Bus
4878	 *
4879	 * Bits 03:00 control which REQ/GNT pairs the prefetch settings
4880	 * get enabled on.  Bit 3 is a grouped enabler which controls
4881	 * all of the REQ/GNT pairs from [8:3].  Bits 2 to 0 control
4882	 * the individual REQ/GNT pairs [2:0].
4883	 */
4884	pci_write_config_word(pdev, 0x52,
4885			      (0x7 << 13) |
4886			      (0x7 << 10) |
4887			      (0x7 <<  7) |
4888			      (0x7 <<  4) |
4889			      (0xf <<  0));
4890
4891	/* Force cacheline size to 0x8 */
4892	pci_write_config_byte(pdev, PCI_CACHE_LINE_SIZE, 0x08);
4893
4894	/* Force latency timer to maximum setting so Cassini can
4895	 * sit on the bus as long as it likes.
4896	 */
4897	pci_write_config_byte(pdev, PCI_LATENCY_TIMER, 0xff);
4898}
4899
4900static const struct net_device_ops cas_netdev_ops = {
4901	.ndo_open		= cas_open,
4902	.ndo_stop		= cas_close,
4903	.ndo_start_xmit		= cas_start_xmit,
4904	.ndo_get_stats 		= cas_get_stats,
4905	.ndo_set_rx_mode	= cas_set_multicast,
4906	.ndo_do_ioctl		= cas_ioctl,
4907	.ndo_tx_timeout		= cas_tx_timeout,
4908	.ndo_change_mtu		= cas_change_mtu,
4909	.ndo_set_mac_address	= eth_mac_addr,
4910	.ndo_validate_addr	= eth_validate_addr,
4911#ifdef CONFIG_NET_POLL_CONTROLLER
4912	.ndo_poll_controller	= cas_netpoll,
4913#endif
4914};
4915
4916static int cas_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
4917{
4918	static int cas_version_printed = 0;
4919	unsigned long casreg_len;
4920	struct net_device *dev;
4921	struct cas *cp;
4922	int i, err, pci_using_dac;
4923	u16 pci_cmd;
 
4924	u8 orig_cacheline_size = 0, cas_cacheline_size = 0;
4925
4926	if (cas_version_printed++ == 0)
4927		pr_info("%s", version);
4928
4929	err = pci_enable_device(pdev);
4930	if (err) {
4931		dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
4932		return err;
4933	}
4934
4935	if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
4936		dev_err(&pdev->dev, "Cannot find proper PCI device "
4937		       "base address, aborting\n");
4938		err = -ENODEV;
4939		goto err_out_disable_pdev;
4940	}
4941
4942	dev = alloc_etherdev(sizeof(*cp));
4943	if (!dev) {
4944		err = -ENOMEM;
4945		goto err_out_disable_pdev;
4946	}
4947	SET_NETDEV_DEV(dev, &pdev->dev);
4948
4949	err = pci_request_regions(pdev, dev->name);
4950	if (err) {
4951		dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
4952		goto err_out_free_netdev;
4953	}
4954	pci_set_master(pdev);
4955
4956	/* we must always turn on parity response or else parity
4957	 * doesn't get generated properly. disable SERR/PERR as well.
4958	 * in addition, we want to turn MWI on.
4959	 */
4960	pci_read_config_word(pdev, PCI_COMMAND, &pci_cmd);
4961	pci_cmd &= ~PCI_COMMAND_SERR;
4962	pci_cmd |= PCI_COMMAND_PARITY;
4963	pci_write_config_word(pdev, PCI_COMMAND, pci_cmd);
4964	if (pci_try_set_mwi(pdev))
4965		pr_warn("Could not enable MWI for %s\n", pci_name(pdev));
4966
4967	cas_program_bridge(pdev);
4968
4969	/*
4970	 * On some architectures, the default cache line size set
4971	 * by pci_try_set_mwi reduces perforamnce.  We have to increase
4972	 * it for this case.  To start, we'll print some configuration
4973	 * data.
4974	 */
4975#if 1
4976	pci_read_config_byte(pdev, PCI_CACHE_LINE_SIZE,
4977			     &orig_cacheline_size);
4978	if (orig_cacheline_size < CAS_PREF_CACHELINE_SIZE) {
4979		cas_cacheline_size =
4980			(CAS_PREF_CACHELINE_SIZE < SMP_CACHE_BYTES) ?
4981			CAS_PREF_CACHELINE_SIZE : SMP_CACHE_BYTES;
4982		if (pci_write_config_byte(pdev,
4983					  PCI_CACHE_LINE_SIZE,
4984					  cas_cacheline_size)) {
4985			dev_err(&pdev->dev, "Could not set PCI cache "
4986			       "line size\n");
4987			goto err_write_cacheline;
4988		}
4989	}
4990#endif
4991
4992
4993	/* Configure DMA attributes. */
4994	if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
4995		pci_using_dac = 1;
4996		err = pci_set_consistent_dma_mask(pdev,
4997						  DMA_BIT_MASK(64));
4998		if (err < 0) {
4999			dev_err(&pdev->dev, "Unable to obtain 64-bit DMA "
5000			       "for consistent allocations\n");
5001			goto err_out_free_res;
5002		}
5003
5004	} else {
5005		err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
5006		if (err) {
5007			dev_err(&pdev->dev, "No usable DMA configuration, "
5008			       "aborting\n");
5009			goto err_out_free_res;
5010		}
5011		pci_using_dac = 0;
5012	}
5013
5014	casreg_len = pci_resource_len(pdev, 0);
5015
5016	cp = netdev_priv(dev);
5017	cp->pdev = pdev;
5018#if 1
5019	/* A value of 0 indicates we never explicitly set it */
5020	cp->orig_cacheline_size = cas_cacheline_size ? orig_cacheline_size: 0;
5021#endif
5022	cp->dev = dev;
5023	cp->msg_enable = (cassini_debug < 0) ? CAS_DEF_MSG_ENABLE :
5024	  cassini_debug;
5025
5026#if defined(CONFIG_SPARC)
5027	cp->of_node = pci_device_to_OF_node(pdev);
5028#endif
5029
5030	cp->link_transition = LINK_TRANSITION_UNKNOWN;
5031	cp->link_transition_jiffies_valid = 0;
5032
5033	spin_lock_init(&cp->lock);
5034	spin_lock_init(&cp->rx_inuse_lock);
5035	spin_lock_init(&cp->rx_spare_lock);
5036	for (i = 0; i < N_TX_RINGS; i++) {
5037		spin_lock_init(&cp->stat_lock[i]);
5038		spin_lock_init(&cp->tx_lock[i]);
5039	}
5040	spin_lock_init(&cp->stat_lock[N_TX_RINGS]);
5041	mutex_init(&cp->pm_mutex);
5042
5043	timer_setup(&cp->link_timer, cas_link_timer, 0);
5044
5045#if 1
5046	/* Just in case the implementation of atomic operations
5047	 * change so that an explicit initialization is necessary.
5048	 */
5049	atomic_set(&cp->reset_task_pending, 0);
5050	atomic_set(&cp->reset_task_pending_all, 0);
5051	atomic_set(&cp->reset_task_pending_spare, 0);
5052	atomic_set(&cp->reset_task_pending_mtu, 0);
5053#endif
5054	INIT_WORK(&cp->reset_task, cas_reset_task);
5055
5056	/* Default link parameters */
5057	if (link_mode >= 0 && link_mode < 6)
5058		cp->link_cntl = link_modes[link_mode];
5059	else
5060		cp->link_cntl = BMCR_ANENABLE;
5061	cp->lstate = link_down;
5062	cp->link_transition = LINK_TRANSITION_LINK_DOWN;
5063	netif_carrier_off(cp->dev);
5064	cp->timer_ticks = 0;
5065
5066	/* give us access to cassini registers */
5067	cp->regs = pci_iomap(pdev, 0, casreg_len);
5068	if (!cp->regs) {
5069		dev_err(&pdev->dev, "Cannot map device registers, aborting\n");
5070		goto err_out_free_res;
5071	}
5072	cp->casreg_len = casreg_len;
5073
5074	pci_save_state(pdev);
5075	cas_check_pci_invariants(cp);
5076	cas_hard_reset(cp);
5077	cas_reset(cp, 0);
5078	if (cas_check_invariants(cp))
5079		goto err_out_iounmap;
5080	if (cp->cas_flags & CAS_FLAG_SATURN)
5081		cas_saturn_firmware_init(cp);
5082
5083	cp->init_block = (struct cas_init_block *)
5084		pci_alloc_consistent(pdev, sizeof(struct cas_init_block),
5085				     &cp->block_dvma);
5086	if (!cp->init_block) {
5087		dev_err(&pdev->dev, "Cannot allocate init block, aborting\n");
5088		goto err_out_iounmap;
5089	}
5090
5091	for (i = 0; i < N_TX_RINGS; i++)
5092		cp->init_txds[i] = cp->init_block->txds[i];
5093
5094	for (i = 0; i < N_RX_DESC_RINGS; i++)
5095		cp->init_rxds[i] = cp->init_block->rxds[i];
5096
5097	for (i = 0; i < N_RX_COMP_RINGS; i++)
5098		cp->init_rxcs[i] = cp->init_block->rxcs[i];
5099
5100	for (i = 0; i < N_RX_FLOWS; i++)
5101		skb_queue_head_init(&cp->rx_flows[i]);
5102
5103	dev->netdev_ops = &cas_netdev_ops;
5104	dev->ethtool_ops = &cas_ethtool_ops;
5105	dev->watchdog_timeo = CAS_TX_TIMEOUT;
5106
5107#ifdef USE_NAPI
5108	netif_napi_add(dev, &cp->napi, cas_poll, 64);
5109#endif
5110	dev->irq = pdev->irq;
5111	dev->dma = 0;
5112
5113	/* Cassini features. */
5114	if ((cp->cas_flags & CAS_FLAG_NO_HW_CSUM) == 0)
5115		dev->features |= NETIF_F_HW_CSUM | NETIF_F_SG;
5116
5117	if (pci_using_dac)
5118		dev->features |= NETIF_F_HIGHDMA;
5119
5120	/* MTU range: 60 - varies or 9000 */
5121	dev->min_mtu = CAS_MIN_MTU;
5122	dev->max_mtu = CAS_MAX_MTU;
5123
5124	if (register_netdev(dev)) {
5125		dev_err(&pdev->dev, "Cannot register net device, aborting\n");
5126		goto err_out_free_consistent;
5127	}
5128
5129	i = readl(cp->regs + REG_BIM_CFG);
5130	netdev_info(dev, "Sun Cassini%s (%sbit/%sMHz PCI/%s) Ethernet[%d] %pM\n",
5131		    (cp->cas_flags & CAS_FLAG_REG_PLUS) ? "+" : "",
5132		    (i & BIM_CFG_32BIT) ? "32" : "64",
5133		    (i & BIM_CFG_66MHZ) ? "66" : "33",
5134		    (cp->phy_type == CAS_PHY_SERDES) ? "Fi" : "Cu", pdev->irq,
5135		    dev->dev_addr);
5136
5137	pci_set_drvdata(pdev, dev);
5138	cp->hw_running = 1;
5139	cas_entropy_reset(cp);
5140	cas_phy_init(cp);
5141	cas_begin_auto_negotiation(cp, NULL);
5142	return 0;
5143
5144err_out_free_consistent:
5145	pci_free_consistent(pdev, sizeof(struct cas_init_block),
5146			    cp->init_block, cp->block_dvma);
5147
5148err_out_iounmap:
5149	mutex_lock(&cp->pm_mutex);
5150	if (cp->hw_running)
5151		cas_shutdown(cp);
5152	mutex_unlock(&cp->pm_mutex);
5153
 
 
5154	pci_iounmap(pdev, cp->regs);
5155
5156
5157err_out_free_res:
5158	pci_release_regions(pdev);
5159
5160err_write_cacheline:
5161	/* Try to restore it in case the error occurred after we
5162	 * set it.
5163	 */
5164	pci_write_config_byte(pdev, PCI_CACHE_LINE_SIZE, orig_cacheline_size);
5165
5166err_out_free_netdev:
5167	free_netdev(dev);
5168
5169err_out_disable_pdev:
5170	pci_disable_device(pdev);
5171	return -ENODEV;
5172}
5173
5174static void cas_remove_one(struct pci_dev *pdev)
5175{
5176	struct net_device *dev = pci_get_drvdata(pdev);
5177	struct cas *cp;
5178	if (!dev)
5179		return;
5180
5181	cp = netdev_priv(dev);
5182	unregister_netdev(dev);
5183
5184	vfree(cp->fw_data);
5185
5186	mutex_lock(&cp->pm_mutex);
5187	cancel_work_sync(&cp->reset_task);
5188	if (cp->hw_running)
5189		cas_shutdown(cp);
5190	mutex_unlock(&cp->pm_mutex);
5191
5192#if 1
5193	if (cp->orig_cacheline_size) {
5194		/* Restore the cache line size if we had modified
5195		 * it.
5196		 */
5197		pci_write_config_byte(pdev, PCI_CACHE_LINE_SIZE,
5198				      cp->orig_cacheline_size);
5199	}
5200#endif
5201	pci_free_consistent(pdev, sizeof(struct cas_init_block),
5202			    cp->init_block, cp->block_dvma);
5203	pci_iounmap(pdev, cp->regs);
5204	free_netdev(dev);
5205	pci_release_regions(pdev);
5206	pci_disable_device(pdev);
5207}
5208
5209#ifdef CONFIG_PM
5210static int cas_suspend(struct pci_dev *pdev, pm_message_t state)
5211{
5212	struct net_device *dev = pci_get_drvdata(pdev);
5213	struct cas *cp = netdev_priv(dev);
5214	unsigned long flags;
5215
5216	mutex_lock(&cp->pm_mutex);
5217
5218	/* If the driver is opened, we stop the DMA */
5219	if (cp->opened) {
5220		netif_device_detach(dev);
5221
5222		cas_lock_all_save(cp, flags);
5223
5224		/* We can set the second arg of cas_reset to 0
5225		 * because on resume, we'll call cas_init_hw with
5226		 * its second arg set so that autonegotiation is
5227		 * restarted.
5228		 */
5229		cas_reset(cp, 0);
5230		cas_clean_rings(cp);
5231		cas_unlock_all_restore(cp, flags);
5232	}
5233
5234	if (cp->hw_running)
5235		cas_shutdown(cp);
5236	mutex_unlock(&cp->pm_mutex);
5237
5238	return 0;
5239}
5240
5241static int cas_resume(struct pci_dev *pdev)
5242{
5243	struct net_device *dev = pci_get_drvdata(pdev);
5244	struct cas *cp = netdev_priv(dev);
5245
5246	netdev_info(dev, "resuming\n");
5247
5248	mutex_lock(&cp->pm_mutex);
5249	cas_hard_reset(cp);
5250	if (cp->opened) {
5251		unsigned long flags;
5252		cas_lock_all_save(cp, flags);
5253		cas_reset(cp, 0);
5254		cp->hw_running = 1;
5255		cas_clean_rings(cp);
5256		cas_init_hw(cp, 1);
5257		cas_unlock_all_restore(cp, flags);
5258
5259		netif_device_attach(dev);
5260	}
5261	mutex_unlock(&cp->pm_mutex);
5262	return 0;
5263}
5264#endif /* CONFIG_PM */
 
5265
5266static struct pci_driver cas_driver = {
5267	.name		= DRV_MODULE_NAME,
5268	.id_table	= cas_pci_tbl,
5269	.probe		= cas_init_one,
5270	.remove		= cas_remove_one,
5271#ifdef CONFIG_PM
5272	.suspend	= cas_suspend,
5273	.resume		= cas_resume
5274#endif
5275};
5276
5277static int __init cas_init(void)
5278{
5279	if (linkdown_timeout > 0)
5280		link_transition_timeout = linkdown_timeout * HZ;
5281	else
5282		link_transition_timeout = 0;
5283
5284	return pci_register_driver(&cas_driver);
5285}
5286
5287static void __exit cas_cleanup(void)
5288{
5289	pci_unregister_driver(&cas_driver);
5290}
5291
5292module_init(cas_init);
5293module_exit(cas_cleanup);