Linux Audio

Check our new training course

Loading...
v6.8
   1// SPDX-License-Identifier: GPL-2.0+
   2/* cassini.c: Sun Microsystems Cassini(+) ethernet driver.
   3 *
   4 * Copyright (C) 2004 Sun Microsystems Inc.
   5 * Copyright (C) 2003 Adrian Sun (asun@darksunrising.com)
   6 *
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
   7 * This driver uses the sungem driver (c) David Miller
   8 * (davem@redhat.com) as its basis.
   9 *
  10 * The cassini chip has a number of features that distinguish it from
  11 * the gem chip:
  12 *  4 transmit descriptor rings that are used for either QoS (VLAN) or
  13 *      load balancing (non-VLAN mode)
  14 *  batching of multiple packets
  15 *  multiple CPU dispatching
  16 *  page-based RX descriptor engine with separate completion rings
  17 *  Gigabit support (GMII and PCS interface)
  18 *  MIF link up/down detection works
  19 *
  20 * RX is handled by page sized buffers that are attached as fragments to
  21 * the skb. here's what's done:
  22 *  -- driver allocates pages at a time and keeps reference counts
  23 *     on them.
  24 *  -- the upper protocol layers assume that the header is in the skb
  25 *     itself. as a result, cassini will copy a small amount (64 bytes)
  26 *     to make them happy.
  27 *  -- driver appends the rest of the data pages as frags to skbuffs
  28 *     and increments the reference count
  29 *  -- on page reclamation, the driver swaps the page with a spare page.
  30 *     if that page is still in use, it frees its reference to that page,
  31 *     and allocates a new page for use. otherwise, it just recycles the
  32 *     page.
  33 *
  34 * NOTE: cassini can parse the header. however, it's not worth it
  35 *       as long as the network stack requires a header copy.
  36 *
  37 * TX has 4 queues. currently these queues are used in a round-robin
  38 * fashion for load balancing. They can also be used for QoS. for that
  39 * to work, however, QoS information needs to be exposed down to the driver
  40 * level so that subqueues get targeted to particular transmit rings.
  41 * alternatively, the queues can be configured via use of the all-purpose
  42 * ioctl.
  43 *
  44 * RX DATA: the rx completion ring has all the info, but the rx desc
  45 * ring has all of the data. RX can conceivably come in under multiple
  46 * interrupts, but the INT# assignment needs to be set up properly by
  47 * the BIOS and conveyed to the driver. PCI BIOSes don't know how to do
  48 * that. also, the two descriptor rings are designed to distinguish between
  49 * encrypted and non-encrypted packets, but we use them for buffering
  50 * instead.
  51 *
  52 * by default, the selective clear mask is set up to process rx packets.
  53 */
  54
  55#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  56
  57#include <linux/module.h>
  58#include <linux/kernel.h>
  59#include <linux/types.h>
  60#include <linux/compiler.h>
  61#include <linux/slab.h>
  62#include <linux/delay.h>
  63#include <linux/init.h>
  64#include <linux/interrupt.h>
  65#include <linux/vmalloc.h>
  66#include <linux/ioport.h>
  67#include <linux/pci.h>
  68#include <linux/mm.h>
  69#include <linux/highmem.h>
  70#include <linux/list.h>
  71#include <linux/dma-mapping.h>
  72
  73#include <linux/netdevice.h>
  74#include <linux/etherdevice.h>
  75#include <linux/skbuff.h>
  76#include <linux/ethtool.h>
  77#include <linux/crc32.h>
  78#include <linux/random.h>
  79#include <linux/mii.h>
  80#include <linux/ip.h>
  81#include <linux/tcp.h>
  82#include <linux/mutex.h>
  83#include <linux/firmware.h>
  84
  85#include <net/checksum.h>
  86
  87#include <linux/atomic.h>
  88#include <asm/io.h>
  89#include <asm/byteorder.h>
  90#include <linux/uaccess.h>
  91#include <linux/jiffies.h>
  92
 
 
  93#define CAS_NCPUS            num_online_cpus()
  94
  95#define cas_skb_release(x)  netif_rx(x)
  96
  97/* select which firmware to use */
  98#define USE_HP_WORKAROUND
  99#define HP_WORKAROUND_DEFAULT /* select which firmware to use as default */
 100#define CAS_HP_ALT_FIRMWARE   cas_prog_null /* alternate firmware */
 101
 102#include "cassini.h"
 103
 104#define USE_TX_COMPWB      /* use completion writeback registers */
 105#define USE_CSMA_CD_PROTO  /* standard CSMA/CD */
 106#define USE_RX_BLANK       /* hw interrupt mitigation */
 107#undef USE_ENTROPY_DEV     /* don't test for entropy device */
 108
 109/* NOTE: these aren't useable unless PCI interrupts can be assigned.
 110 * also, we need to make cp->lock finer-grained.
 111 */
 112#undef  USE_PCI_INTB
 113#undef  USE_PCI_INTC
 114#undef  USE_PCI_INTD
 115#undef  USE_QOS
 116
 117#undef  USE_VPD_DEBUG       /* debug vpd information if defined */
 118
 119/* rx processing options */
 120#define USE_PAGE_ORDER      /* specify to allocate large rx pages */
 121#define RX_DONT_BATCH  0    /* if 1, don't batch flows */
 122#define RX_COPY_ALWAYS 0    /* if 0, use frags */
 123#define RX_COPY_MIN    64   /* copy a little to make upper layers happy */
 124#undef  RX_COUNT_BUFFERS    /* define to calculate RX buffer stats */
 125
 126#define DRV_MODULE_NAME		"cassini"
 127#define DRV_MODULE_VERSION	"1.6"
 128#define DRV_MODULE_RELDATE	"21 May 2008"
 129
 130#define CAS_DEF_MSG_ENABLE	  \
 131	(NETIF_MSG_DRV		| \
 132	 NETIF_MSG_PROBE	| \
 133	 NETIF_MSG_LINK		| \
 134	 NETIF_MSG_TIMER	| \
 135	 NETIF_MSG_IFDOWN	| \
 136	 NETIF_MSG_IFUP		| \
 137	 NETIF_MSG_RX_ERR	| \
 138	 NETIF_MSG_TX_ERR)
 139
 140/* length of time before we decide the hardware is borked,
 141 * and dev->tx_timeout() should be called to fix the problem
 142 */
 143#define CAS_TX_TIMEOUT			(HZ)
 144#define CAS_LINK_TIMEOUT                (22*HZ/10)
 145#define CAS_LINK_FAST_TIMEOUT           (1)
 146
 147/* timeout values for state changing. these specify the number
 148 * of 10us delays to be used before giving up.
 149 */
 150#define STOP_TRIES_PHY 1000
 151#define STOP_TRIES     5000
 152
 153/* specify a minimum frame size to deal with some fifo issues
 154 * max mtu == 2 * page size - ethernet header - 64 - swivel =
 155 *            2 * page_size - 0x50
 156 */
 157#define CAS_MIN_FRAME			97
 158#define CAS_1000MB_MIN_FRAME            255
 159#define CAS_MIN_MTU                     60
 160#define CAS_MAX_MTU                     min(((cp->page_size << 1) - 0x50), 9000)
 161
 162#if 1
 163/*
 164 * Eliminate these and use separate atomic counters for each, to
 165 * avoid a race condition.
 166 */
 167#else
 168#define CAS_RESET_MTU                   1
 169#define CAS_RESET_ALL                   2
 170#define CAS_RESET_SPARE                 3
 171#endif
 172
 173static char version[] =
 174	DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
 175
 176static int cassini_debug = -1;	/* -1 == use CAS_DEF_MSG_ENABLE as value */
 177static int link_mode;
 178
 179MODULE_AUTHOR("Adrian Sun (asun@darksunrising.com)");
 180MODULE_DESCRIPTION("Sun Cassini(+) ethernet driver");
 181MODULE_LICENSE("GPL");
 182MODULE_FIRMWARE("sun/cassini.bin");
 183module_param(cassini_debug, int, 0);
 184MODULE_PARM_DESC(cassini_debug, "Cassini bitmapped debugging message enable value");
 185module_param(link_mode, int, 0);
 186MODULE_PARM_DESC(link_mode, "default link mode");
 187
 188/*
 189 * Work around for a PCS bug in which the link goes down due to the chip
 190 * being confused and never showing a link status of "up."
 191 */
 192#define DEFAULT_LINKDOWN_TIMEOUT 5
 193/*
 194 * Value in seconds, for user input.
 195 */
 196static int linkdown_timeout = DEFAULT_LINKDOWN_TIMEOUT;
 197module_param(linkdown_timeout, int, 0);
 198MODULE_PARM_DESC(linkdown_timeout,
 199"min reset interval in sec. for PCS linkdown issue; disabled if not positive");
 200
 201/*
 202 * value in 'ticks' (units used by jiffies). Set when we init the
 203 * module because 'HZ' in actually a function call on some flavors of
 204 * Linux.  This will default to DEFAULT_LINKDOWN_TIMEOUT * HZ.
 205 */
 206static int link_transition_timeout;
 207
 208
 209
 210static u16 link_modes[] = {
 211	BMCR_ANENABLE,			 /* 0 : autoneg */
 212	0,				 /* 1 : 10bt half duplex */
 213	BMCR_SPEED100,			 /* 2 : 100bt half duplex */
 214	BMCR_FULLDPLX,			 /* 3 : 10bt full duplex */
 215	BMCR_SPEED100|BMCR_FULLDPLX,	 /* 4 : 100bt full duplex */
 216	CAS_BMCR_SPEED1000|BMCR_FULLDPLX /* 5 : 1000bt full duplex */
 217};
 218
 219static const struct pci_device_id cas_pci_tbl[] = {
 220	{ PCI_VENDOR_ID_SUN, PCI_DEVICE_ID_SUN_CASSINI,
 221	  PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
 222	{ PCI_VENDOR_ID_NS, PCI_DEVICE_ID_NS_SATURN,
 223	  PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
 224	{ 0, }
 225};
 226
 227MODULE_DEVICE_TABLE(pci, cas_pci_tbl);
 228
 229static void cas_set_link_modes(struct cas *cp);
 230
 231static inline void cas_lock_tx(struct cas *cp)
 232{
 233	int i;
 234
 235	for (i = 0; i < N_TX_RINGS; i++)
 236		spin_lock_nested(&cp->tx_lock[i], i);
 
 
 
 
 
 
 237}
 238
 239/* WTZ: QA was finding deadlock problems with the previous
 240 * versions after long test runs with multiple cards per machine.
 241 * See if replacing cas_lock_all with safer versions helps. The
 242 * symptoms QA is reporting match those we'd expect if interrupts
 243 * aren't being properly restored, and we fixed a previous deadlock
 244 * with similar symptoms by using save/restore versions in other
 245 * places.
 246 */
 247#define cas_lock_all_save(cp, flags) \
 248do { \
 249	struct cas *xxxcp = (cp); \
 250	spin_lock_irqsave(&xxxcp->lock, flags); \
 251	cas_lock_tx(xxxcp); \
 252} while (0)
 253
 254static inline void cas_unlock_tx(struct cas *cp)
 255{
 256	int i;
 257
 258	for (i = N_TX_RINGS; i > 0; i--)
 259		spin_unlock(&cp->tx_lock[i - 1]);
 260}
 261
 
 
 
 
 
 
 262#define cas_unlock_all_restore(cp, flags) \
 263do { \
 264	struct cas *xxxcp = (cp); \
 265	cas_unlock_tx(xxxcp); \
 266	spin_unlock_irqrestore(&xxxcp->lock, flags); \
 267} while (0)
 268
 269static void cas_disable_irq(struct cas *cp, const int ring)
 270{
 271	/* Make sure we won't get any more interrupts */
 272	if (ring == 0) {
 273		writel(0xFFFFFFFF, cp->regs + REG_INTR_MASK);
 274		return;
 275	}
 276
 277	/* disable completion interrupts and selectively mask */
 278	if (cp->cas_flags & CAS_FLAG_REG_PLUS) {
 279		switch (ring) {
 280#if defined (USE_PCI_INTB) || defined(USE_PCI_INTC) || defined(USE_PCI_INTD)
 281#ifdef USE_PCI_INTB
 282		case 1:
 283#endif
 284#ifdef USE_PCI_INTC
 285		case 2:
 286#endif
 287#ifdef USE_PCI_INTD
 288		case 3:
 289#endif
 290			writel(INTRN_MASK_CLEAR_ALL | INTRN_MASK_RX_EN,
 291			       cp->regs + REG_PLUS_INTRN_MASK(ring));
 292			break;
 293#endif
 294		default:
 295			writel(INTRN_MASK_CLEAR_ALL, cp->regs +
 296			       REG_PLUS_INTRN_MASK(ring));
 297			break;
 298		}
 299	}
 300}
 301
 302static inline void cas_mask_intr(struct cas *cp)
 303{
 304	int i;
 305
 306	for (i = 0; i < N_RX_COMP_RINGS; i++)
 307		cas_disable_irq(cp, i);
 308}
 309
 310static void cas_enable_irq(struct cas *cp, const int ring)
 311{
 312	if (ring == 0) { /* all but TX_DONE */
 313		writel(INTR_TX_DONE, cp->regs + REG_INTR_MASK);
 314		return;
 315	}
 316
 317	if (cp->cas_flags & CAS_FLAG_REG_PLUS) {
 318		switch (ring) {
 319#if defined (USE_PCI_INTB) || defined(USE_PCI_INTC) || defined(USE_PCI_INTD)
 320#ifdef USE_PCI_INTB
 321		case 1:
 322#endif
 323#ifdef USE_PCI_INTC
 324		case 2:
 325#endif
 326#ifdef USE_PCI_INTD
 327		case 3:
 328#endif
 329			writel(INTRN_MASK_RX_EN, cp->regs +
 330			       REG_PLUS_INTRN_MASK(ring));
 331			break;
 332#endif
 333		default:
 334			break;
 335		}
 336	}
 337}
 338
 339static inline void cas_unmask_intr(struct cas *cp)
 340{
 341	int i;
 342
 343	for (i = 0; i < N_RX_COMP_RINGS; i++)
 344		cas_enable_irq(cp, i);
 345}
 346
 347static inline void cas_entropy_gather(struct cas *cp)
 348{
 349#ifdef USE_ENTROPY_DEV
 350	if ((cp->cas_flags & CAS_FLAG_ENTROPY_DEV) == 0)
 351		return;
 352
 353	batch_entropy_store(readl(cp->regs + REG_ENTROPY_IV),
 354			    readl(cp->regs + REG_ENTROPY_IV),
 355			    sizeof(uint64_t)*8);
 356#endif
 357}
 358
 359static inline void cas_entropy_reset(struct cas *cp)
 360{
 361#ifdef USE_ENTROPY_DEV
 362	if ((cp->cas_flags & CAS_FLAG_ENTROPY_DEV) == 0)
 363		return;
 364
 365	writel(BIM_LOCAL_DEV_PAD | BIM_LOCAL_DEV_PROM | BIM_LOCAL_DEV_EXT,
 366	       cp->regs + REG_BIM_LOCAL_DEV_EN);
 367	writeb(ENTROPY_RESET_STC_MODE, cp->regs + REG_ENTROPY_RESET);
 368	writeb(0x55, cp->regs + REG_ENTROPY_RAND_REG);
 369
 370	/* if we read back 0x0, we don't have an entropy device */
 371	if (readb(cp->regs + REG_ENTROPY_RAND_REG) == 0)
 372		cp->cas_flags &= ~CAS_FLAG_ENTROPY_DEV;
 373#endif
 374}
 375
 376/* access to the phy. the following assumes that we've initialized the MIF to
 377 * be in frame rather than bit-bang mode
 378 */
 379static u16 cas_phy_read(struct cas *cp, int reg)
 380{
 381	u32 cmd;
 382	int limit = STOP_TRIES_PHY;
 383
 384	cmd = MIF_FRAME_ST | MIF_FRAME_OP_READ;
 385	cmd |= CAS_BASE(MIF_FRAME_PHY_ADDR, cp->phy_addr);
 386	cmd |= CAS_BASE(MIF_FRAME_REG_ADDR, reg);
 387	cmd |= MIF_FRAME_TURN_AROUND_MSB;
 388	writel(cmd, cp->regs + REG_MIF_FRAME);
 389
 390	/* poll for completion */
 391	while (limit-- > 0) {
 392		udelay(10);
 393		cmd = readl(cp->regs + REG_MIF_FRAME);
 394		if (cmd & MIF_FRAME_TURN_AROUND_LSB)
 395			return cmd & MIF_FRAME_DATA_MASK;
 396	}
 397	return 0xFFFF; /* -1 */
 398}
 399
 400static int cas_phy_write(struct cas *cp, int reg, u16 val)
 401{
 402	int limit = STOP_TRIES_PHY;
 403	u32 cmd;
 404
 405	cmd = MIF_FRAME_ST | MIF_FRAME_OP_WRITE;
 406	cmd |= CAS_BASE(MIF_FRAME_PHY_ADDR, cp->phy_addr);
 407	cmd |= CAS_BASE(MIF_FRAME_REG_ADDR, reg);
 408	cmd |= MIF_FRAME_TURN_AROUND_MSB;
 409	cmd |= val & MIF_FRAME_DATA_MASK;
 410	writel(cmd, cp->regs + REG_MIF_FRAME);
 411
 412	/* poll for completion */
 413	while (limit-- > 0) {
 414		udelay(10);
 415		cmd = readl(cp->regs + REG_MIF_FRAME);
 416		if (cmd & MIF_FRAME_TURN_AROUND_LSB)
 417			return 0;
 418	}
 419	return -1;
 420}
 421
 422static void cas_phy_powerup(struct cas *cp)
 423{
 424	u16 ctl = cas_phy_read(cp, MII_BMCR);
 425
 426	if ((ctl & BMCR_PDOWN) == 0)
 427		return;
 428	ctl &= ~BMCR_PDOWN;
 429	cas_phy_write(cp, MII_BMCR, ctl);
 430}
 431
 432static void cas_phy_powerdown(struct cas *cp)
 433{
 434	u16 ctl = cas_phy_read(cp, MII_BMCR);
 435
 436	if (ctl & BMCR_PDOWN)
 437		return;
 438	ctl |= BMCR_PDOWN;
 439	cas_phy_write(cp, MII_BMCR, ctl);
 440}
 441
 442/* cp->lock held. note: the last put_page will free the buffer */
 443static int cas_page_free(struct cas *cp, cas_page_t *page)
 444{
 445	dma_unmap_page(&cp->pdev->dev, page->dma_addr, cp->page_size,
 446		       DMA_FROM_DEVICE);
 447	__free_pages(page->buffer, cp->page_order);
 448	kfree(page);
 449	return 0;
 450}
 451
 452#ifdef RX_COUNT_BUFFERS
 453#define RX_USED_ADD(x, y)       ((x)->used += (y))
 454#define RX_USED_SET(x, y)       ((x)->used  = (y))
 455#else
 456#define RX_USED_ADD(x, y) do { } while(0)
 457#define RX_USED_SET(x, y) do { } while(0)
 458#endif
 459
 460/* local page allocation routines for the receive buffers. jumbo pages
 461 * require at least 8K contiguous and 8K aligned buffers.
 462 */
 463static cas_page_t *cas_page_alloc(struct cas *cp, const gfp_t flags)
 464{
 465	cas_page_t *page;
 466
 467	page = kmalloc(sizeof(cas_page_t), flags);
 468	if (!page)
 469		return NULL;
 470
 471	INIT_LIST_HEAD(&page->list);
 472	RX_USED_SET(page, 0);
 473	page->buffer = alloc_pages(flags, cp->page_order);
 474	if (!page->buffer)
 475		goto page_err;
 476	page->dma_addr = dma_map_page(&cp->pdev->dev, page->buffer, 0,
 477				      cp->page_size, DMA_FROM_DEVICE);
 478	return page;
 479
 480page_err:
 481	kfree(page);
 482	return NULL;
 483}
 484
 485/* initialize spare pool of rx buffers, but allocate during the open */
 486static void cas_spare_init(struct cas *cp)
 487{
 488	spin_lock(&cp->rx_inuse_lock);
 489	INIT_LIST_HEAD(&cp->rx_inuse_list);
 490	spin_unlock(&cp->rx_inuse_lock);
 491
 492	spin_lock(&cp->rx_spare_lock);
 493	INIT_LIST_HEAD(&cp->rx_spare_list);
 494	cp->rx_spares_needed = RX_SPARE_COUNT;
 495	spin_unlock(&cp->rx_spare_lock);
 496}
 497
 498/* used on close. free all the spare buffers. */
 499static void cas_spare_free(struct cas *cp)
 500{
 501	struct list_head list, *elem, *tmp;
 502
 503	/* free spare buffers */
 504	INIT_LIST_HEAD(&list);
 505	spin_lock(&cp->rx_spare_lock);
 506	list_splice_init(&cp->rx_spare_list, &list);
 507	spin_unlock(&cp->rx_spare_lock);
 508	list_for_each_safe(elem, tmp, &list) {
 509		cas_page_free(cp, list_entry(elem, cas_page_t, list));
 510	}
 511
 512	INIT_LIST_HEAD(&list);
 513#if 1
 514	/*
 515	 * Looks like Adrian had protected this with a different
 516	 * lock than used everywhere else to manipulate this list.
 517	 */
 518	spin_lock(&cp->rx_inuse_lock);
 519	list_splice_init(&cp->rx_inuse_list, &list);
 520	spin_unlock(&cp->rx_inuse_lock);
 521#else
 522	spin_lock(&cp->rx_spare_lock);
 523	list_splice_init(&cp->rx_inuse_list, &list);
 524	spin_unlock(&cp->rx_spare_lock);
 525#endif
 526	list_for_each_safe(elem, tmp, &list) {
 527		cas_page_free(cp, list_entry(elem, cas_page_t, list));
 528	}
 529}
 530
 531/* replenish spares if needed */
 532static void cas_spare_recover(struct cas *cp, const gfp_t flags)
 533{
 534	struct list_head list, *elem, *tmp;
 535	int needed, i;
 536
 537	/* check inuse list. if we don't need any more free buffers,
 538	 * just free it
 539	 */
 540
 541	/* make a local copy of the list */
 542	INIT_LIST_HEAD(&list);
 543	spin_lock(&cp->rx_inuse_lock);
 544	list_splice_init(&cp->rx_inuse_list, &list);
 545	spin_unlock(&cp->rx_inuse_lock);
 546
 547	list_for_each_safe(elem, tmp, &list) {
 548		cas_page_t *page = list_entry(elem, cas_page_t, list);
 549
 550		/*
 551		 * With the lockless pagecache, cassini buffering scheme gets
 552		 * slightly less accurate: we might find that a page has an
 553		 * elevated reference count here, due to a speculative ref,
 554		 * and skip it as in-use. Ideally we would be able to reclaim
 555		 * it. However this would be such a rare case, it doesn't
 556		 * matter too much as we should pick it up the next time round.
 557		 *
 558		 * Importantly, if we find that the page has a refcount of 1
 559		 * here (our refcount), then we know it is definitely not inuse
 560		 * so we can reuse it.
 561		 */
 562		if (page_count(page->buffer) > 1)
 563			continue;
 564
 565		list_del(elem);
 566		spin_lock(&cp->rx_spare_lock);
 567		if (cp->rx_spares_needed > 0) {
 568			list_add(elem, &cp->rx_spare_list);
 569			cp->rx_spares_needed--;
 570			spin_unlock(&cp->rx_spare_lock);
 571		} else {
 572			spin_unlock(&cp->rx_spare_lock);
 573			cas_page_free(cp, page);
 574		}
 575	}
 576
 577	/* put any inuse buffers back on the list */
 578	if (!list_empty(&list)) {
 579		spin_lock(&cp->rx_inuse_lock);
 580		list_splice(&list, &cp->rx_inuse_list);
 581		spin_unlock(&cp->rx_inuse_lock);
 582	}
 583
 584	spin_lock(&cp->rx_spare_lock);
 585	needed = cp->rx_spares_needed;
 586	spin_unlock(&cp->rx_spare_lock);
 587	if (!needed)
 588		return;
 589
 590	/* we still need spares, so try to allocate some */
 591	INIT_LIST_HEAD(&list);
 592	i = 0;
 593	while (i < needed) {
 594		cas_page_t *spare = cas_page_alloc(cp, flags);
 595		if (!spare)
 596			break;
 597		list_add(&spare->list, &list);
 598		i++;
 599	}
 600
 601	spin_lock(&cp->rx_spare_lock);
 602	list_splice(&list, &cp->rx_spare_list);
 603	cp->rx_spares_needed -= i;
 604	spin_unlock(&cp->rx_spare_lock);
 605}
 606
 607/* pull a page from the list. */
 608static cas_page_t *cas_page_dequeue(struct cas *cp)
 609{
 610	struct list_head *entry;
 611	int recover;
 612
 613	spin_lock(&cp->rx_spare_lock);
 614	if (list_empty(&cp->rx_spare_list)) {
 615		/* try to do a quick recovery */
 616		spin_unlock(&cp->rx_spare_lock);
 617		cas_spare_recover(cp, GFP_ATOMIC);
 618		spin_lock(&cp->rx_spare_lock);
 619		if (list_empty(&cp->rx_spare_list)) {
 620			netif_err(cp, rx_err, cp->dev,
 621				  "no spare buffers available\n");
 622			spin_unlock(&cp->rx_spare_lock);
 623			return NULL;
 624		}
 625	}
 626
 627	entry = cp->rx_spare_list.next;
 628	list_del(entry);
 629	recover = ++cp->rx_spares_needed;
 630	spin_unlock(&cp->rx_spare_lock);
 631
 632	/* trigger the timer to do the recovery */
 633	if ((recover & (RX_SPARE_RECOVER_VAL - 1)) == 0) {
 634#if 1
 635		atomic_inc(&cp->reset_task_pending);
 636		atomic_inc(&cp->reset_task_pending_spare);
 637		schedule_work(&cp->reset_task);
 638#else
 639		atomic_set(&cp->reset_task_pending, CAS_RESET_SPARE);
 640		schedule_work(&cp->reset_task);
 641#endif
 642	}
 643	return list_entry(entry, cas_page_t, list);
 644}
 645
 646
 647static void cas_mif_poll(struct cas *cp, const int enable)
 648{
 649	u32 cfg;
 650
 651	cfg  = readl(cp->regs + REG_MIF_CFG);
 652	cfg &= (MIF_CFG_MDIO_0 | MIF_CFG_MDIO_1);
 653
 654	if (cp->phy_type & CAS_PHY_MII_MDIO1)
 655		cfg |= MIF_CFG_PHY_SELECT;
 656
 657	/* poll and interrupt on link status change. */
 658	if (enable) {
 659		cfg |= MIF_CFG_POLL_EN;
 660		cfg |= CAS_BASE(MIF_CFG_POLL_REG, MII_BMSR);
 661		cfg |= CAS_BASE(MIF_CFG_POLL_PHY, cp->phy_addr);
 662	}
 663	writel((enable) ? ~(BMSR_LSTATUS | BMSR_ANEGCOMPLETE) : 0xFFFF,
 664	       cp->regs + REG_MIF_MASK);
 665	writel(cfg, cp->regs + REG_MIF_CFG);
 666}
 667
 668/* Must be invoked under cp->lock */
 669static void cas_begin_auto_negotiation(struct cas *cp,
 670				       const struct ethtool_link_ksettings *ep)
 671{
 672	u16 ctl;
 673#if 1
 674	int lcntl;
 675	int changed = 0;
 676	int oldstate = cp->lstate;
 677	int link_was_not_down = !(oldstate == link_down);
 678#endif
 679	/* Setup link parameters */
 680	if (!ep)
 681		goto start_aneg;
 682	lcntl = cp->link_cntl;
 683	if (ep->base.autoneg == AUTONEG_ENABLE) {
 684		cp->link_cntl = BMCR_ANENABLE;
 685	} else {
 686		u32 speed = ep->base.speed;
 687		cp->link_cntl = 0;
 688		if (speed == SPEED_100)
 689			cp->link_cntl |= BMCR_SPEED100;
 690		else if (speed == SPEED_1000)
 691			cp->link_cntl |= CAS_BMCR_SPEED1000;
 692		if (ep->base.duplex == DUPLEX_FULL)
 693			cp->link_cntl |= BMCR_FULLDPLX;
 694	}
 695#if 1
 696	changed = (lcntl != cp->link_cntl);
 697#endif
 698start_aneg:
 699	if (cp->lstate == link_up) {
 700		netdev_info(cp->dev, "PCS link down\n");
 701	} else {
 702		if (changed) {
 703			netdev_info(cp->dev, "link configuration changed\n");
 704		}
 705	}
 706	cp->lstate = link_down;
 707	cp->link_transition = LINK_TRANSITION_LINK_DOWN;
 708	if (!cp->hw_running)
 709		return;
 710#if 1
 711	/*
 712	 * WTZ: If the old state was link_up, we turn off the carrier
 713	 * to replicate everything we do elsewhere on a link-down
 714	 * event when we were already in a link-up state..
 715	 */
 716	if (oldstate == link_up)
 717		netif_carrier_off(cp->dev);
 718	if (changed  && link_was_not_down) {
 719		/*
 720		 * WTZ: This branch will simply schedule a full reset after
 721		 * we explicitly changed link modes in an ioctl. See if this
 722		 * fixes the link-problems we were having for forced mode.
 723		 */
 724		atomic_inc(&cp->reset_task_pending);
 725		atomic_inc(&cp->reset_task_pending_all);
 726		schedule_work(&cp->reset_task);
 727		cp->timer_ticks = 0;
 728		mod_timer(&cp->link_timer, jiffies + CAS_LINK_TIMEOUT);
 729		return;
 730	}
 731#endif
 732	if (cp->phy_type & CAS_PHY_SERDES) {
 733		u32 val = readl(cp->regs + REG_PCS_MII_CTRL);
 734
 735		if (cp->link_cntl & BMCR_ANENABLE) {
 736			val |= (PCS_MII_RESTART_AUTONEG | PCS_MII_AUTONEG_EN);
 737			cp->lstate = link_aneg;
 738		} else {
 739			if (cp->link_cntl & BMCR_FULLDPLX)
 740				val |= PCS_MII_CTRL_DUPLEX;
 741			val &= ~PCS_MII_AUTONEG_EN;
 742			cp->lstate = link_force_ok;
 743		}
 744		cp->link_transition = LINK_TRANSITION_LINK_CONFIG;
 745		writel(val, cp->regs + REG_PCS_MII_CTRL);
 746
 747	} else {
 748		cas_mif_poll(cp, 0);
 749		ctl = cas_phy_read(cp, MII_BMCR);
 750		ctl &= ~(BMCR_FULLDPLX | BMCR_SPEED100 |
 751			 CAS_BMCR_SPEED1000 | BMCR_ANENABLE);
 752		ctl |= cp->link_cntl;
 753		if (ctl & BMCR_ANENABLE) {
 754			ctl |= BMCR_ANRESTART;
 755			cp->lstate = link_aneg;
 756		} else {
 757			cp->lstate = link_force_ok;
 758		}
 759		cp->link_transition = LINK_TRANSITION_LINK_CONFIG;
 760		cas_phy_write(cp, MII_BMCR, ctl);
 761		cas_mif_poll(cp, 1);
 762	}
 763
 764	cp->timer_ticks = 0;
 765	mod_timer(&cp->link_timer, jiffies + CAS_LINK_TIMEOUT);
 766}
 767
 768/* Must be invoked under cp->lock. */
 769static int cas_reset_mii_phy(struct cas *cp)
 770{
 771	int limit = STOP_TRIES_PHY;
 772	u16 val;
 773
 774	cas_phy_write(cp, MII_BMCR, BMCR_RESET);
 775	udelay(100);
 776	while (--limit) {
 777		val = cas_phy_read(cp, MII_BMCR);
 778		if ((val & BMCR_RESET) == 0)
 779			break;
 780		udelay(10);
 781	}
 782	return limit <= 0;
 783}
 784
 785static void cas_saturn_firmware_init(struct cas *cp)
 786{
 787	const struct firmware *fw;
 788	const char fw_name[] = "sun/cassini.bin";
 789	int err;
 790
 791	if (PHY_NS_DP83065 != cp->phy_id)
 792		return;
 793
 794	err = request_firmware(&fw, fw_name, &cp->pdev->dev);
 795	if (err) {
 796		pr_err("Failed to load firmware \"%s\"\n",
 797		       fw_name);
 798		return;
 799	}
 800	if (fw->size < 2) {
 801		pr_err("bogus length %zu in \"%s\"\n",
 802		       fw->size, fw_name);
 
 803		goto out;
 804	}
 805	cp->fw_load_addr= fw->data[1] << 8 | fw->data[0];
 806	cp->fw_size = fw->size - 2;
 807	cp->fw_data = vmalloc(cp->fw_size);
 808	if (!cp->fw_data)
 
 809		goto out;
 
 810	memcpy(cp->fw_data, &fw->data[2], cp->fw_size);
 811out:
 812	release_firmware(fw);
 
 813}
 814
 815static void cas_saturn_firmware_load(struct cas *cp)
 816{
 817	int i;
 818
 819	if (!cp->fw_data)
 820		return;
 821
 822	cas_phy_powerdown(cp);
 823
 824	/* expanded memory access mode */
 825	cas_phy_write(cp, DP83065_MII_MEM, 0x0);
 826
 827	/* pointer configuration for new firmware */
 828	cas_phy_write(cp, DP83065_MII_REGE, 0x8ff9);
 829	cas_phy_write(cp, DP83065_MII_REGD, 0xbd);
 830	cas_phy_write(cp, DP83065_MII_REGE, 0x8ffa);
 831	cas_phy_write(cp, DP83065_MII_REGD, 0x82);
 832	cas_phy_write(cp, DP83065_MII_REGE, 0x8ffb);
 833	cas_phy_write(cp, DP83065_MII_REGD, 0x0);
 834	cas_phy_write(cp, DP83065_MII_REGE, 0x8ffc);
 835	cas_phy_write(cp, DP83065_MII_REGD, 0x39);
 836
 837	/* download new firmware */
 838	cas_phy_write(cp, DP83065_MII_MEM, 0x1);
 839	cas_phy_write(cp, DP83065_MII_REGE, cp->fw_load_addr);
 840	for (i = 0; i < cp->fw_size; i++)
 841		cas_phy_write(cp, DP83065_MII_REGD, cp->fw_data[i]);
 842
 843	/* enable firmware */
 844	cas_phy_write(cp, DP83065_MII_REGE, 0x8ff8);
 845	cas_phy_write(cp, DP83065_MII_REGD, 0x1);
 846}
 847
 848
 849/* phy initialization */
 850static void cas_phy_init(struct cas *cp)
 851{
 852	u16 val;
 853
 854	/* if we're in MII/GMII mode, set up phy */
 855	if (CAS_PHY_MII(cp->phy_type)) {
 856		writel(PCS_DATAPATH_MODE_MII,
 857		       cp->regs + REG_PCS_DATAPATH_MODE);
 858
 859		cas_mif_poll(cp, 0);
 860		cas_reset_mii_phy(cp); /* take out of isolate mode */
 861
 862		if (PHY_LUCENT_B0 == cp->phy_id) {
 863			/* workaround link up/down issue with lucent */
 864			cas_phy_write(cp, LUCENT_MII_REG, 0x8000);
 865			cas_phy_write(cp, MII_BMCR, 0x00f1);
 866			cas_phy_write(cp, LUCENT_MII_REG, 0x0);
 867
 868		} else if (PHY_BROADCOM_B0 == (cp->phy_id & 0xFFFFFFFC)) {
 869			/* workarounds for broadcom phy */
 870			cas_phy_write(cp, BROADCOM_MII_REG8, 0x0C20);
 871			cas_phy_write(cp, BROADCOM_MII_REG7, 0x0012);
 872			cas_phy_write(cp, BROADCOM_MII_REG5, 0x1804);
 873			cas_phy_write(cp, BROADCOM_MII_REG7, 0x0013);
 874			cas_phy_write(cp, BROADCOM_MII_REG5, 0x1204);
 875			cas_phy_write(cp, BROADCOM_MII_REG7, 0x8006);
 876			cas_phy_write(cp, BROADCOM_MII_REG5, 0x0132);
 877			cas_phy_write(cp, BROADCOM_MII_REG7, 0x8006);
 878			cas_phy_write(cp, BROADCOM_MII_REG5, 0x0232);
 879			cas_phy_write(cp, BROADCOM_MII_REG7, 0x201F);
 880			cas_phy_write(cp, BROADCOM_MII_REG5, 0x0A20);
 881
 882		} else if (PHY_BROADCOM_5411 == cp->phy_id) {
 883			val = cas_phy_read(cp, BROADCOM_MII_REG4);
 884			val = cas_phy_read(cp, BROADCOM_MII_REG4);
 885			if (val & 0x0080) {
 886				/* link workaround */
 887				cas_phy_write(cp, BROADCOM_MII_REG4,
 888					      val & ~0x0080);
 889			}
 890
 891		} else if (cp->cas_flags & CAS_FLAG_SATURN) {
 892			writel((cp->phy_type & CAS_PHY_MII_MDIO0) ?
 893			       SATURN_PCFG_FSI : 0x0,
 894			       cp->regs + REG_SATURN_PCFG);
 895
 896			/* load firmware to address 10Mbps auto-negotiation
 897			 * issue. NOTE: this will need to be changed if the
 898			 * default firmware gets fixed.
 899			 */
 900			if (PHY_NS_DP83065 == cp->phy_id) {
 901				cas_saturn_firmware_load(cp);
 902			}
 903			cas_phy_powerup(cp);
 904		}
 905
 906		/* advertise capabilities */
 907		val = cas_phy_read(cp, MII_BMCR);
 908		val &= ~BMCR_ANENABLE;
 909		cas_phy_write(cp, MII_BMCR, val);
 910		udelay(10);
 911
 912		cas_phy_write(cp, MII_ADVERTISE,
 913			      cas_phy_read(cp, MII_ADVERTISE) |
 914			      (ADVERTISE_10HALF | ADVERTISE_10FULL |
 915			       ADVERTISE_100HALF | ADVERTISE_100FULL |
 916			       CAS_ADVERTISE_PAUSE |
 917			       CAS_ADVERTISE_ASYM_PAUSE));
 918
 919		if (cp->cas_flags & CAS_FLAG_1000MB_CAP) {
 920			/* make sure that we don't advertise half
 921			 * duplex to avoid a chip issue
 922			 */
 923			val  = cas_phy_read(cp, CAS_MII_1000_CTRL);
 924			val &= ~CAS_ADVERTISE_1000HALF;
 925			val |= CAS_ADVERTISE_1000FULL;
 926			cas_phy_write(cp, CAS_MII_1000_CTRL, val);
 927		}
 928
 929	} else {
 930		/* reset pcs for serdes */
 931		u32 val;
 932		int limit;
 933
 934		writel(PCS_DATAPATH_MODE_SERDES,
 935		       cp->regs + REG_PCS_DATAPATH_MODE);
 936
 937		/* enable serdes pins on saturn */
 938		if (cp->cas_flags & CAS_FLAG_SATURN)
 939			writel(0, cp->regs + REG_SATURN_PCFG);
 940
 941		/* Reset PCS unit. */
 942		val = readl(cp->regs + REG_PCS_MII_CTRL);
 943		val |= PCS_MII_RESET;
 944		writel(val, cp->regs + REG_PCS_MII_CTRL);
 945
 946		limit = STOP_TRIES;
 947		while (--limit > 0) {
 948			udelay(10);
 949			if ((readl(cp->regs + REG_PCS_MII_CTRL) &
 950			     PCS_MII_RESET) == 0)
 951				break;
 952		}
 953		if (limit <= 0)
 954			netdev_warn(cp->dev, "PCS reset bit would not clear [%08x]\n",
 955				    readl(cp->regs + REG_PCS_STATE_MACHINE));
 956
 957		/* Make sure PCS is disabled while changing advertisement
 958		 * configuration.
 959		 */
 960		writel(0x0, cp->regs + REG_PCS_CFG);
 961
 962		/* Advertise all capabilities except half-duplex. */
 963		val  = readl(cp->regs + REG_PCS_MII_ADVERT);
 964		val &= ~PCS_MII_ADVERT_HD;
 965		val |= (PCS_MII_ADVERT_FD | PCS_MII_ADVERT_SYM_PAUSE |
 966			PCS_MII_ADVERT_ASYM_PAUSE);
 967		writel(val, cp->regs + REG_PCS_MII_ADVERT);
 968
 969		/* enable PCS */
 970		writel(PCS_CFG_EN, cp->regs + REG_PCS_CFG);
 971
 972		/* pcs workaround: enable sync detect */
 973		writel(PCS_SERDES_CTRL_SYNCD_EN,
 974		       cp->regs + REG_PCS_SERDES_CTRL);
 975	}
 976}
 977
 978
 979static int cas_pcs_link_check(struct cas *cp)
 980{
 981	u32 stat, state_machine;
 982	int retval = 0;
 983
 984	/* The link status bit latches on zero, so you must
 985	 * read it twice in such a case to see a transition
 986	 * to the link being up.
 987	 */
 988	stat = readl(cp->regs + REG_PCS_MII_STATUS);
 989	if ((stat & PCS_MII_STATUS_LINK_STATUS) == 0)
 990		stat = readl(cp->regs + REG_PCS_MII_STATUS);
 991
 992	/* The remote-fault indication is only valid
 993	 * when autoneg has completed.
 994	 */
 995	if ((stat & (PCS_MII_STATUS_AUTONEG_COMP |
 996		     PCS_MII_STATUS_REMOTE_FAULT)) ==
 997	    (PCS_MII_STATUS_AUTONEG_COMP | PCS_MII_STATUS_REMOTE_FAULT))
 998		netif_info(cp, link, cp->dev, "PCS RemoteFault\n");
 999
1000	/* work around link detection issue by querying the PCS state
1001	 * machine directly.
1002	 */
1003	state_machine = readl(cp->regs + REG_PCS_STATE_MACHINE);
1004	if ((state_machine & PCS_SM_LINK_STATE_MASK) != SM_LINK_STATE_UP) {
1005		stat &= ~PCS_MII_STATUS_LINK_STATUS;
1006	} else if (state_machine & PCS_SM_WORD_SYNC_STATE_MASK) {
1007		stat |= PCS_MII_STATUS_LINK_STATUS;
1008	}
1009
1010	if (stat & PCS_MII_STATUS_LINK_STATUS) {
1011		if (cp->lstate != link_up) {
1012			if (cp->opened) {
1013				cp->lstate = link_up;
1014				cp->link_transition = LINK_TRANSITION_LINK_UP;
1015
1016				cas_set_link_modes(cp);
1017				netif_carrier_on(cp->dev);
1018			}
1019		}
1020	} else if (cp->lstate == link_up) {
1021		cp->lstate = link_down;
1022		if (link_transition_timeout != 0 &&
1023		    cp->link_transition != LINK_TRANSITION_REQUESTED_RESET &&
1024		    !cp->link_transition_jiffies_valid) {
1025			/*
1026			 * force a reset, as a workaround for the
1027			 * link-failure problem. May want to move this to a
1028			 * point a bit earlier in the sequence. If we had
1029			 * generated a reset a short time ago, we'll wait for
1030			 * the link timer to check the status until a
1031			 * timer expires (link_transistion_jiffies_valid is
1032			 * true when the timer is running.)  Instead of using
1033			 * a system timer, we just do a check whenever the
1034			 * link timer is running - this clears the flag after
1035			 * a suitable delay.
1036			 */
1037			retval = 1;
1038			cp->link_transition = LINK_TRANSITION_REQUESTED_RESET;
1039			cp->link_transition_jiffies = jiffies;
1040			cp->link_transition_jiffies_valid = 1;
1041		} else {
1042			cp->link_transition = LINK_TRANSITION_ON_FAILURE;
1043		}
1044		netif_carrier_off(cp->dev);
1045		if (cp->opened)
1046			netif_info(cp, link, cp->dev, "PCS link down\n");
1047
1048		/* Cassini only: if you force a mode, there can be
1049		 * sync problems on link down. to fix that, the following
1050		 * things need to be checked:
1051		 * 1) read serialink state register
1052		 * 2) read pcs status register to verify link down.
1053		 * 3) if link down and serial link == 0x03, then you need
1054		 *    to global reset the chip.
1055		 */
1056		if ((cp->cas_flags & CAS_FLAG_REG_PLUS) == 0) {
1057			/* should check to see if we're in a forced mode */
1058			stat = readl(cp->regs + REG_PCS_SERDES_STATE);
1059			if (stat == 0x03)
1060				return 1;
1061		}
1062	} else if (cp->lstate == link_down) {
1063		if (link_transition_timeout != 0 &&
1064		    cp->link_transition != LINK_TRANSITION_REQUESTED_RESET &&
1065		    !cp->link_transition_jiffies_valid) {
1066			/* force a reset, as a workaround for the
1067			 * link-failure problem.  May want to move
1068			 * this to a point a bit earlier in the
1069			 * sequence.
1070			 */
1071			retval = 1;
1072			cp->link_transition = LINK_TRANSITION_REQUESTED_RESET;
1073			cp->link_transition_jiffies = jiffies;
1074			cp->link_transition_jiffies_valid = 1;
1075		} else {
1076			cp->link_transition = LINK_TRANSITION_STILL_FAILED;
1077		}
1078	}
1079
1080	return retval;
1081}
1082
1083static int cas_pcs_interrupt(struct net_device *dev,
1084			     struct cas *cp, u32 status)
1085{
1086	u32 stat = readl(cp->regs + REG_PCS_INTR_STATUS);
1087
1088	if ((stat & PCS_INTR_STATUS_LINK_CHANGE) == 0)
1089		return 0;
1090	return cas_pcs_link_check(cp);
1091}
1092
1093static int cas_txmac_interrupt(struct net_device *dev,
1094			       struct cas *cp, u32 status)
1095{
1096	u32 txmac_stat = readl(cp->regs + REG_MAC_TX_STATUS);
1097
1098	if (!txmac_stat)
1099		return 0;
1100
1101	netif_printk(cp, intr, KERN_DEBUG, cp->dev,
1102		     "txmac interrupt, txmac_stat: 0x%x\n", txmac_stat);
1103
1104	/* Defer timer expiration is quite normal,
1105	 * don't even log the event.
1106	 */
1107	if ((txmac_stat & MAC_TX_DEFER_TIMER) &&
1108	    !(txmac_stat & ~MAC_TX_DEFER_TIMER))
1109		return 0;
1110
1111	spin_lock(&cp->stat_lock[0]);
1112	if (txmac_stat & MAC_TX_UNDERRUN) {
1113		netdev_err(dev, "TX MAC xmit underrun\n");
1114		cp->net_stats[0].tx_fifo_errors++;
1115	}
1116
1117	if (txmac_stat & MAC_TX_MAX_PACKET_ERR) {
1118		netdev_err(dev, "TX MAC max packet size error\n");
1119		cp->net_stats[0].tx_errors++;
1120	}
1121
1122	/* The rest are all cases of one of the 16-bit TX
1123	 * counters expiring.
1124	 */
1125	if (txmac_stat & MAC_TX_COLL_NORMAL)
1126		cp->net_stats[0].collisions += 0x10000;
1127
1128	if (txmac_stat & MAC_TX_COLL_EXCESS) {
1129		cp->net_stats[0].tx_aborted_errors += 0x10000;
1130		cp->net_stats[0].collisions += 0x10000;
1131	}
1132
1133	if (txmac_stat & MAC_TX_COLL_LATE) {
1134		cp->net_stats[0].tx_aborted_errors += 0x10000;
1135		cp->net_stats[0].collisions += 0x10000;
1136	}
1137	spin_unlock(&cp->stat_lock[0]);
1138
1139	/* We do not keep track of MAC_TX_COLL_FIRST and
1140	 * MAC_TX_PEAK_ATTEMPTS events.
1141	 */
1142	return 0;
1143}
1144
1145static void cas_load_firmware(struct cas *cp, cas_hp_inst_t *firmware)
1146{
1147	cas_hp_inst_t *inst;
1148	u32 val;
1149	int i;
1150
1151	i = 0;
1152	while ((inst = firmware) && inst->note) {
1153		writel(i, cp->regs + REG_HP_INSTR_RAM_ADDR);
1154
1155		val = CAS_BASE(HP_INSTR_RAM_HI_VAL, inst->val);
1156		val |= CAS_BASE(HP_INSTR_RAM_HI_MASK, inst->mask);
1157		writel(val, cp->regs + REG_HP_INSTR_RAM_DATA_HI);
1158
1159		val = CAS_BASE(HP_INSTR_RAM_MID_OUTARG, inst->outarg >> 10);
1160		val |= CAS_BASE(HP_INSTR_RAM_MID_OUTOP, inst->outop);
1161		val |= CAS_BASE(HP_INSTR_RAM_MID_FNEXT, inst->fnext);
1162		val |= CAS_BASE(HP_INSTR_RAM_MID_FOFF, inst->foff);
1163		val |= CAS_BASE(HP_INSTR_RAM_MID_SNEXT, inst->snext);
1164		val |= CAS_BASE(HP_INSTR_RAM_MID_SOFF, inst->soff);
1165		val |= CAS_BASE(HP_INSTR_RAM_MID_OP, inst->op);
1166		writel(val, cp->regs + REG_HP_INSTR_RAM_DATA_MID);
1167
1168		val = CAS_BASE(HP_INSTR_RAM_LOW_OUTMASK, inst->outmask);
1169		val |= CAS_BASE(HP_INSTR_RAM_LOW_OUTSHIFT, inst->outshift);
1170		val |= CAS_BASE(HP_INSTR_RAM_LOW_OUTEN, inst->outenab);
1171		val |= CAS_BASE(HP_INSTR_RAM_LOW_OUTARG, inst->outarg);
1172		writel(val, cp->regs + REG_HP_INSTR_RAM_DATA_LOW);
1173		++firmware;
1174		++i;
1175	}
1176}
1177
1178static void cas_init_rx_dma(struct cas *cp)
1179{
1180	u64 desc_dma = cp->block_dvma;
1181	u32 val;
1182	int i, size;
1183
1184	/* rx free descriptors */
1185	val = CAS_BASE(RX_CFG_SWIVEL, RX_SWIVEL_OFF_VAL);
1186	val |= CAS_BASE(RX_CFG_DESC_RING, RX_DESC_RINGN_INDEX(0));
1187	val |= CAS_BASE(RX_CFG_COMP_RING, RX_COMP_RINGN_INDEX(0));
1188	if ((N_RX_DESC_RINGS > 1) &&
1189	    (cp->cas_flags & CAS_FLAG_REG_PLUS))  /* do desc 2 */
1190		val |= CAS_BASE(RX_CFG_DESC_RING1, RX_DESC_RINGN_INDEX(1));
1191	writel(val, cp->regs + REG_RX_CFG);
1192
1193	val = (unsigned long) cp->init_rxds[0] -
1194		(unsigned long) cp->init_block;
1195	writel((desc_dma + val) >> 32, cp->regs + REG_RX_DB_HI);
1196	writel((desc_dma + val) & 0xffffffff, cp->regs + REG_RX_DB_LOW);
1197	writel(RX_DESC_RINGN_SIZE(0) - 4, cp->regs + REG_RX_KICK);
1198
1199	if (cp->cas_flags & CAS_FLAG_REG_PLUS) {
1200		/* rx desc 2 is for IPSEC packets. however,
1201		 * we don't it that for that purpose.
1202		 */
1203		val = (unsigned long) cp->init_rxds[1] -
1204			(unsigned long) cp->init_block;
1205		writel((desc_dma + val) >> 32, cp->regs + REG_PLUS_RX_DB1_HI);
1206		writel((desc_dma + val) & 0xffffffff, cp->regs +
1207		       REG_PLUS_RX_DB1_LOW);
1208		writel(RX_DESC_RINGN_SIZE(1) - 4, cp->regs +
1209		       REG_PLUS_RX_KICK1);
1210	}
1211
1212	/* rx completion registers */
1213	val = (unsigned long) cp->init_rxcs[0] -
1214		(unsigned long) cp->init_block;
1215	writel((desc_dma + val) >> 32, cp->regs + REG_RX_CB_HI);
1216	writel((desc_dma + val) & 0xffffffff, cp->regs + REG_RX_CB_LOW);
1217
1218	if (cp->cas_flags & CAS_FLAG_REG_PLUS) {
1219		/* rx comp 2-4 */
1220		for (i = 1; i < MAX_RX_COMP_RINGS; i++) {
1221			val = (unsigned long) cp->init_rxcs[i] -
1222				(unsigned long) cp->init_block;
1223			writel((desc_dma + val) >> 32, cp->regs +
1224			       REG_PLUS_RX_CBN_HI(i));
1225			writel((desc_dma + val) & 0xffffffff, cp->regs +
1226			       REG_PLUS_RX_CBN_LOW(i));
1227		}
1228	}
1229
1230	/* read selective clear regs to prevent spurious interrupts
1231	 * on reset because complete == kick.
1232	 * selective clear set up to prevent interrupts on resets
1233	 */
1234	readl(cp->regs + REG_INTR_STATUS_ALIAS);
1235	writel(INTR_RX_DONE | INTR_RX_BUF_UNAVAIL, cp->regs + REG_ALIAS_CLEAR);
 
 
 
 
 
 
 
 
 
 
 
 
 
1236
1237	/* set up pause thresholds */
1238	val  = CAS_BASE(RX_PAUSE_THRESH_OFF,
1239			cp->rx_pause_off / RX_PAUSE_THRESH_QUANTUM);
1240	val |= CAS_BASE(RX_PAUSE_THRESH_ON,
1241			cp->rx_pause_on / RX_PAUSE_THRESH_QUANTUM);
1242	writel(val, cp->regs + REG_RX_PAUSE_THRESH);
1243
1244	/* zero out dma reassembly buffers */
1245	for (i = 0; i < 64; i++) {
1246		writel(i, cp->regs + REG_RX_TABLE_ADDR);
1247		writel(0x0, cp->regs + REG_RX_TABLE_DATA_LOW);
1248		writel(0x0, cp->regs + REG_RX_TABLE_DATA_MID);
1249		writel(0x0, cp->regs + REG_RX_TABLE_DATA_HI);
1250	}
1251
1252	/* make sure address register is 0 for normal operation */
1253	writel(0x0, cp->regs + REG_RX_CTRL_FIFO_ADDR);
1254	writel(0x0, cp->regs + REG_RX_IPP_FIFO_ADDR);
1255
1256	/* interrupt mitigation */
1257#ifdef USE_RX_BLANK
1258	val = CAS_BASE(RX_BLANK_INTR_TIME, RX_BLANK_INTR_TIME_VAL);
1259	val |= CAS_BASE(RX_BLANK_INTR_PKT, RX_BLANK_INTR_PKT_VAL);
1260	writel(val, cp->regs + REG_RX_BLANK);
1261#else
1262	writel(0x0, cp->regs + REG_RX_BLANK);
1263#endif
1264
1265	/* interrupt generation as a function of low water marks for
1266	 * free desc and completion entries. these are used to trigger
1267	 * housekeeping for rx descs. we don't use the free interrupt
1268	 * as it's not very useful
1269	 */
1270	/* val = CAS_BASE(RX_AE_THRESH_FREE, RX_AE_FREEN_VAL(0)); */
1271	val = CAS_BASE(RX_AE_THRESH_COMP, RX_AE_COMP_VAL);
1272	writel(val, cp->regs + REG_RX_AE_THRESH);
1273	if (cp->cas_flags & CAS_FLAG_REG_PLUS) {
1274		val = CAS_BASE(RX_AE1_THRESH_FREE, RX_AE_FREEN_VAL(1));
1275		writel(val, cp->regs + REG_PLUS_RX_AE1_THRESH);
1276	}
1277
1278	/* Random early detect registers. useful for congestion avoidance.
1279	 * this should be tunable.
1280	 */
1281	writel(0x0, cp->regs + REG_RX_RED);
1282
1283	/* receive page sizes. default == 2K (0x800) */
1284	val = 0;
1285	if (cp->page_size == 0x1000)
1286		val = 0x1;
1287	else if (cp->page_size == 0x2000)
1288		val = 0x2;
1289	else if (cp->page_size == 0x4000)
1290		val = 0x3;
1291
1292	/* round mtu + offset. constrain to page size. */
1293	size = cp->dev->mtu + 64;
1294	if (size > cp->page_size)
1295		size = cp->page_size;
1296
1297	if (size <= 0x400)
1298		i = 0x0;
1299	else if (size <= 0x800)
1300		i = 0x1;
1301	else if (size <= 0x1000)
1302		i = 0x2;
1303	else
1304		i = 0x3;
1305
1306	cp->mtu_stride = 1 << (i + 10);
1307	val  = CAS_BASE(RX_PAGE_SIZE, val);
1308	val |= CAS_BASE(RX_PAGE_SIZE_MTU_STRIDE, i);
1309	val |= CAS_BASE(RX_PAGE_SIZE_MTU_COUNT, cp->page_size >> (i + 10));
1310	val |= CAS_BASE(RX_PAGE_SIZE_MTU_OFF, 0x1);
1311	writel(val, cp->regs + REG_RX_PAGE_SIZE);
1312
1313	/* enable the header parser if desired */
1314	if (&CAS_HP_FIRMWARE[0] == &cas_prog_null[0])
1315		return;
1316
1317	val = CAS_BASE(HP_CFG_NUM_CPU, CAS_NCPUS > 63 ? 0 : CAS_NCPUS);
1318	val |= HP_CFG_PARSE_EN | HP_CFG_SYN_INC_MASK;
1319	val |= CAS_BASE(HP_CFG_TCP_THRESH, HP_TCP_THRESH_VAL);
1320	writel(val, cp->regs + REG_HP_CFG);
1321}
1322
1323static inline void cas_rxc_init(struct cas_rx_comp *rxc)
1324{
1325	memset(rxc, 0, sizeof(*rxc));
1326	rxc->word4 = cpu_to_le64(RX_COMP4_ZERO);
1327}
1328
1329/* NOTE: we use the ENC RX DESC ring for spares. the rx_page[0,1]
1330 * flipping is protected by the fact that the chip will not
1331 * hand back the same page index while it's being processed.
1332 */
1333static inline cas_page_t *cas_page_spare(struct cas *cp, const int index)
1334{
1335	cas_page_t *page = cp->rx_pages[1][index];
1336	cas_page_t *new;
1337
1338	if (page_count(page->buffer) == 1)
1339		return page;
1340
1341	new = cas_page_dequeue(cp);
1342	if (new) {
1343		spin_lock(&cp->rx_inuse_lock);
1344		list_add(&page->list, &cp->rx_inuse_list);
1345		spin_unlock(&cp->rx_inuse_lock);
1346	}
1347	return new;
1348}
1349
1350/* this needs to be changed if we actually use the ENC RX DESC ring */
1351static cas_page_t *cas_page_swap(struct cas *cp, const int ring,
1352				 const int index)
1353{
1354	cas_page_t **page0 = cp->rx_pages[0];
1355	cas_page_t **page1 = cp->rx_pages[1];
1356
1357	/* swap if buffer is in use */
1358	if (page_count(page0[index]->buffer) > 1) {
1359		cas_page_t *new = cas_page_spare(cp, index);
1360		if (new) {
1361			page1[index] = page0[index];
1362			page0[index] = new;
1363		}
1364	}
1365	RX_USED_SET(page0[index], 0);
1366	return page0[index];
1367}
1368
1369static void cas_clean_rxds(struct cas *cp)
1370{
1371	/* only clean ring 0 as ring 1 is used for spare buffers */
1372        struct cas_rx_desc *rxd = cp->init_rxds[0];
1373	int i, size;
1374
1375	/* release all rx flows */
1376	for (i = 0; i < N_RX_FLOWS; i++) {
1377		struct sk_buff *skb;
1378		while ((skb = __skb_dequeue(&cp->rx_flows[i]))) {
1379			cas_skb_release(skb);
1380		}
1381	}
1382
1383	/* initialize descriptors */
1384	size = RX_DESC_RINGN_SIZE(0);
1385	for (i = 0; i < size; i++) {
1386		cas_page_t *page = cas_page_swap(cp, 0, i);
1387		rxd[i].buffer = cpu_to_le64(page->dma_addr);
1388		rxd[i].index  = cpu_to_le64(CAS_BASE(RX_INDEX_NUM, i) |
1389					    CAS_BASE(RX_INDEX_RING, 0));
1390	}
1391
1392	cp->rx_old[0]  = RX_DESC_RINGN_SIZE(0) - 4;
1393	cp->rx_last[0] = 0;
1394	cp->cas_flags &= ~CAS_FLAG_RXD_POST(0);
1395}
1396
1397static void cas_clean_rxcs(struct cas *cp)
1398{
1399	int i, j;
1400
1401	/* take ownership of rx comp descriptors */
1402	memset(cp->rx_cur, 0, sizeof(*cp->rx_cur)*N_RX_COMP_RINGS);
1403	memset(cp->rx_new, 0, sizeof(*cp->rx_new)*N_RX_COMP_RINGS);
1404	for (i = 0; i < N_RX_COMP_RINGS; i++) {
1405		struct cas_rx_comp *rxc = cp->init_rxcs[i];
1406		for (j = 0; j < RX_COMP_RINGN_SIZE(i); j++) {
1407			cas_rxc_init(rxc + j);
1408		}
1409	}
1410}
1411
1412#if 0
1413/* When we get a RX fifo overflow, the RX unit is probably hung
1414 * so we do the following.
1415 *
1416 * If any part of the reset goes wrong, we return 1 and that causes the
1417 * whole chip to be reset.
1418 */
1419static int cas_rxmac_reset(struct cas *cp)
1420{
1421	struct net_device *dev = cp->dev;
1422	int limit;
1423	u32 val;
1424
1425	/* First, reset MAC RX. */
1426	writel(cp->mac_rx_cfg & ~MAC_RX_CFG_EN, cp->regs + REG_MAC_RX_CFG);
1427	for (limit = 0; limit < STOP_TRIES; limit++) {
1428		if (!(readl(cp->regs + REG_MAC_RX_CFG) & MAC_RX_CFG_EN))
1429			break;
1430		udelay(10);
1431	}
1432	if (limit == STOP_TRIES) {
1433		netdev_err(dev, "RX MAC will not disable, resetting whole chip\n");
1434		return 1;
1435	}
1436
1437	/* Second, disable RX DMA. */
1438	writel(0, cp->regs + REG_RX_CFG);
1439	for (limit = 0; limit < STOP_TRIES; limit++) {
1440		if (!(readl(cp->regs + REG_RX_CFG) & RX_CFG_DMA_EN))
1441			break;
1442		udelay(10);
1443	}
1444	if (limit == STOP_TRIES) {
1445		netdev_err(dev, "RX DMA will not disable, resetting whole chip\n");
1446		return 1;
1447	}
1448
1449	mdelay(5);
1450
1451	/* Execute RX reset command. */
1452	writel(SW_RESET_RX, cp->regs + REG_SW_RESET);
1453	for (limit = 0; limit < STOP_TRIES; limit++) {
1454		if (!(readl(cp->regs + REG_SW_RESET) & SW_RESET_RX))
1455			break;
1456		udelay(10);
1457	}
1458	if (limit == STOP_TRIES) {
1459		netdev_err(dev, "RX reset command will not execute, resetting whole chip\n");
1460		return 1;
1461	}
1462
1463	/* reset driver rx state */
1464	cas_clean_rxds(cp);
1465	cas_clean_rxcs(cp);
1466
1467	/* Now, reprogram the rest of RX unit. */
1468	cas_init_rx_dma(cp);
1469
1470	/* re-enable */
1471	val = readl(cp->regs + REG_RX_CFG);
1472	writel(val | RX_CFG_DMA_EN, cp->regs + REG_RX_CFG);
1473	writel(MAC_RX_FRAME_RECV, cp->regs + REG_MAC_RX_MASK);
1474	val = readl(cp->regs + REG_MAC_RX_CFG);
1475	writel(val | MAC_RX_CFG_EN, cp->regs + REG_MAC_RX_CFG);
1476	return 0;
1477}
1478#endif
1479
1480static int cas_rxmac_interrupt(struct net_device *dev, struct cas *cp,
1481			       u32 status)
1482{
1483	u32 stat = readl(cp->regs + REG_MAC_RX_STATUS);
1484
1485	if (!stat)
1486		return 0;
1487
1488	netif_dbg(cp, intr, cp->dev, "rxmac interrupt, stat: 0x%x\n", stat);
1489
1490	/* these are all rollovers */
1491	spin_lock(&cp->stat_lock[0]);
1492	if (stat & MAC_RX_ALIGN_ERR)
1493		cp->net_stats[0].rx_frame_errors += 0x10000;
1494
1495	if (stat & MAC_RX_CRC_ERR)
1496		cp->net_stats[0].rx_crc_errors += 0x10000;
1497
1498	if (stat & MAC_RX_LEN_ERR)
1499		cp->net_stats[0].rx_length_errors += 0x10000;
1500
1501	if (stat & MAC_RX_OVERFLOW) {
1502		cp->net_stats[0].rx_over_errors++;
1503		cp->net_stats[0].rx_fifo_errors++;
1504	}
1505
1506	/* We do not track MAC_RX_FRAME_COUNT and MAC_RX_VIOL_ERR
1507	 * events.
1508	 */
1509	spin_unlock(&cp->stat_lock[0]);
1510	return 0;
1511}
1512
1513static int cas_mac_interrupt(struct net_device *dev, struct cas *cp,
1514			     u32 status)
1515{
1516	u32 stat = readl(cp->regs + REG_MAC_CTRL_STATUS);
1517
1518	if (!stat)
1519		return 0;
1520
1521	netif_printk(cp, intr, KERN_DEBUG, cp->dev,
1522		     "mac interrupt, stat: 0x%x\n", stat);
1523
1524	/* This interrupt is just for pause frame and pause
1525	 * tracking.  It is useful for diagnostics and debug
1526	 * but probably by default we will mask these events.
1527	 */
1528	if (stat & MAC_CTRL_PAUSE_STATE)
1529		cp->pause_entered++;
1530
1531	if (stat & MAC_CTRL_PAUSE_RECEIVED)
1532		cp->pause_last_time_recvd = (stat >> 16);
1533
1534	return 0;
1535}
1536
1537
1538/* Must be invoked under cp->lock. */
1539static inline int cas_mdio_link_not_up(struct cas *cp)
1540{
1541	u16 val;
1542
1543	switch (cp->lstate) {
1544	case link_force_ret:
1545		netif_info(cp, link, cp->dev, "Autoneg failed again, keeping forced mode\n");
1546		cas_phy_write(cp, MII_BMCR, cp->link_fcntl);
1547		cp->timer_ticks = 5;
1548		cp->lstate = link_force_ok;
1549		cp->link_transition = LINK_TRANSITION_LINK_CONFIG;
1550		break;
1551
1552	case link_aneg:
1553		val = cas_phy_read(cp, MII_BMCR);
1554
1555		/* Try forced modes. we try things in the following order:
1556		 * 1000 full -> 100 full/half -> 10 half
1557		 */
1558		val &= ~(BMCR_ANRESTART | BMCR_ANENABLE);
1559		val |= BMCR_FULLDPLX;
1560		val |= (cp->cas_flags & CAS_FLAG_1000MB_CAP) ?
1561			CAS_BMCR_SPEED1000 : BMCR_SPEED100;
1562		cas_phy_write(cp, MII_BMCR, val);
1563		cp->timer_ticks = 5;
1564		cp->lstate = link_force_try;
1565		cp->link_transition = LINK_TRANSITION_LINK_CONFIG;
1566		break;
1567
1568	case link_force_try:
1569		/* Downgrade from 1000 to 100 to 10 Mbps if necessary. */
1570		val = cas_phy_read(cp, MII_BMCR);
1571		cp->timer_ticks = 5;
1572		if (val & CAS_BMCR_SPEED1000) { /* gigabit */
1573			val &= ~CAS_BMCR_SPEED1000;
1574			val |= (BMCR_SPEED100 | BMCR_FULLDPLX);
1575			cas_phy_write(cp, MII_BMCR, val);
1576			break;
1577		}
1578
1579		if (val & BMCR_SPEED100) {
1580			if (val & BMCR_FULLDPLX) /* fd failed */
1581				val &= ~BMCR_FULLDPLX;
1582			else { /* 100Mbps failed */
1583				val &= ~BMCR_SPEED100;
1584			}
1585			cas_phy_write(cp, MII_BMCR, val);
1586			break;
1587		}
1588		break;
1589	default:
1590		break;
1591	}
1592	return 0;
1593}
1594
1595
1596/* must be invoked with cp->lock held */
1597static int cas_mii_link_check(struct cas *cp, const u16 bmsr)
1598{
1599	int restart;
1600
1601	if (bmsr & BMSR_LSTATUS) {
1602		/* Ok, here we got a link. If we had it due to a forced
1603		 * fallback, and we were configured for autoneg, we
1604		 * retry a short autoneg pass. If you know your hub is
1605		 * broken, use ethtool ;)
1606		 */
1607		if ((cp->lstate == link_force_try) &&
1608		    (cp->link_cntl & BMCR_ANENABLE)) {
1609			cp->lstate = link_force_ret;
1610			cp->link_transition = LINK_TRANSITION_LINK_CONFIG;
1611			cas_mif_poll(cp, 0);
1612			cp->link_fcntl = cas_phy_read(cp, MII_BMCR);
1613			cp->timer_ticks = 5;
1614			if (cp->opened)
1615				netif_info(cp, link, cp->dev,
1616					   "Got link after fallback, retrying autoneg once...\n");
1617			cas_phy_write(cp, MII_BMCR,
1618				      cp->link_fcntl | BMCR_ANENABLE |
1619				      BMCR_ANRESTART);
1620			cas_mif_poll(cp, 1);
1621
1622		} else if (cp->lstate != link_up) {
1623			cp->lstate = link_up;
1624			cp->link_transition = LINK_TRANSITION_LINK_UP;
1625
1626			if (cp->opened) {
1627				cas_set_link_modes(cp);
1628				netif_carrier_on(cp->dev);
1629			}
1630		}
1631		return 0;
1632	}
1633
1634	/* link not up. if the link was previously up, we restart the
1635	 * whole process
1636	 */
1637	restart = 0;
1638	if (cp->lstate == link_up) {
1639		cp->lstate = link_down;
1640		cp->link_transition = LINK_TRANSITION_LINK_DOWN;
1641
1642		netif_carrier_off(cp->dev);
1643		if (cp->opened)
1644			netif_info(cp, link, cp->dev, "Link down\n");
1645		restart = 1;
1646
1647	} else if (++cp->timer_ticks > 10)
1648		cas_mdio_link_not_up(cp);
1649
1650	return restart;
1651}
1652
1653static int cas_mif_interrupt(struct net_device *dev, struct cas *cp,
1654			     u32 status)
1655{
1656	u32 stat = readl(cp->regs + REG_MIF_STATUS);
1657	u16 bmsr;
1658
1659	/* check for a link change */
1660	if (CAS_VAL(MIF_STATUS_POLL_STATUS, stat) == 0)
1661		return 0;
1662
1663	bmsr = CAS_VAL(MIF_STATUS_POLL_DATA, stat);
1664	return cas_mii_link_check(cp, bmsr);
1665}
1666
1667static int cas_pci_interrupt(struct net_device *dev, struct cas *cp,
1668			     u32 status)
1669{
1670	u32 stat = readl(cp->regs + REG_PCI_ERR_STATUS);
1671
1672	if (!stat)
1673		return 0;
1674
1675	netdev_err(dev, "PCI error [%04x:%04x]",
1676		   stat, readl(cp->regs + REG_BIM_DIAG));
1677
1678	/* cassini+ has this reserved */
1679	if ((stat & PCI_ERR_BADACK) &&
1680	    ((cp->cas_flags & CAS_FLAG_REG_PLUS) == 0))
1681		pr_cont(" <No ACK64# during ABS64 cycle>");
1682
1683	if (stat & PCI_ERR_DTRTO)
1684		pr_cont(" <Delayed transaction timeout>");
1685	if (stat & PCI_ERR_OTHER)
1686		pr_cont(" <other>");
1687	if (stat & PCI_ERR_BIM_DMA_WRITE)
1688		pr_cont(" <BIM DMA 0 write req>");
1689	if (stat & PCI_ERR_BIM_DMA_READ)
1690		pr_cont(" <BIM DMA 0 read req>");
1691	pr_cont("\n");
1692
1693	if (stat & PCI_ERR_OTHER) {
1694		int pci_errs;
1695
1696		/* Interrogate PCI config space for the
1697		 * true cause.
1698		 */
1699		pci_errs = pci_status_get_and_clear_errors(cp->pdev);
1700
1701		netdev_err(dev, "PCI status errors[%04x]\n", pci_errs);
1702		if (pci_errs & PCI_STATUS_PARITY)
1703			netdev_err(dev, "PCI parity error detected\n");
1704		if (pci_errs & PCI_STATUS_SIG_TARGET_ABORT)
1705			netdev_err(dev, "PCI target abort\n");
1706		if (pci_errs & PCI_STATUS_REC_TARGET_ABORT)
1707			netdev_err(dev, "PCI master acks target abort\n");
1708		if (pci_errs & PCI_STATUS_REC_MASTER_ABORT)
1709			netdev_err(dev, "PCI master abort\n");
1710		if (pci_errs & PCI_STATUS_SIG_SYSTEM_ERROR)
1711			netdev_err(dev, "PCI system error SERR#\n");
1712		if (pci_errs & PCI_STATUS_DETECTED_PARITY)
1713			netdev_err(dev, "PCI parity error\n");
 
 
 
 
 
 
 
 
 
1714	}
1715
1716	/* For all PCI errors, we should reset the chip. */
1717	return 1;
1718}
1719
1720/* All non-normal interrupt conditions get serviced here.
1721 * Returns non-zero if we should just exit the interrupt
1722 * handler right now (ie. if we reset the card which invalidates
1723 * all of the other original irq status bits).
1724 */
1725static int cas_abnormal_irq(struct net_device *dev, struct cas *cp,
1726			    u32 status)
1727{
1728	if (status & INTR_RX_TAG_ERROR) {
1729		/* corrupt RX tag framing */
1730		netif_printk(cp, rx_err, KERN_DEBUG, cp->dev,
1731			     "corrupt rx tag framing\n");
1732		spin_lock(&cp->stat_lock[0]);
1733		cp->net_stats[0].rx_errors++;
1734		spin_unlock(&cp->stat_lock[0]);
1735		goto do_reset;
1736	}
1737
1738	if (status & INTR_RX_LEN_MISMATCH) {
1739		/* length mismatch. */
1740		netif_printk(cp, rx_err, KERN_DEBUG, cp->dev,
1741			     "length mismatch for rx frame\n");
1742		spin_lock(&cp->stat_lock[0]);
1743		cp->net_stats[0].rx_errors++;
1744		spin_unlock(&cp->stat_lock[0]);
1745		goto do_reset;
1746	}
1747
1748	if (status & INTR_PCS_STATUS) {
1749		if (cas_pcs_interrupt(dev, cp, status))
1750			goto do_reset;
1751	}
1752
1753	if (status & INTR_TX_MAC_STATUS) {
1754		if (cas_txmac_interrupt(dev, cp, status))
1755			goto do_reset;
1756	}
1757
1758	if (status & INTR_RX_MAC_STATUS) {
1759		if (cas_rxmac_interrupt(dev, cp, status))
1760			goto do_reset;
1761	}
1762
1763	if (status & INTR_MAC_CTRL_STATUS) {
1764		if (cas_mac_interrupt(dev, cp, status))
1765			goto do_reset;
1766	}
1767
1768	if (status & INTR_MIF_STATUS) {
1769		if (cas_mif_interrupt(dev, cp, status))
1770			goto do_reset;
1771	}
1772
1773	if (status & INTR_PCI_ERROR_STATUS) {
1774		if (cas_pci_interrupt(dev, cp, status))
1775			goto do_reset;
1776	}
1777	return 0;
1778
1779do_reset:
1780#if 1
1781	atomic_inc(&cp->reset_task_pending);
1782	atomic_inc(&cp->reset_task_pending_all);
1783	netdev_err(dev, "reset called in cas_abnormal_irq [0x%x]\n", status);
1784	schedule_work(&cp->reset_task);
1785#else
1786	atomic_set(&cp->reset_task_pending, CAS_RESET_ALL);
1787	netdev_err(dev, "reset called in cas_abnormal_irq\n");
1788	schedule_work(&cp->reset_task);
1789#endif
1790	return 1;
1791}
1792
1793/* NOTE: CAS_TABORT returns 1 or 2 so that it can be used when
1794 *       determining whether to do a netif_stop/wakeup
1795 */
1796#define CAS_TABORT(x)      (((x)->cas_flags & CAS_FLAG_TARGET_ABORT) ? 2 : 1)
1797#define CAS_ROUND_PAGE(x)  (((x) + PAGE_SIZE - 1) & PAGE_MASK)
1798static inline int cas_calc_tabort(struct cas *cp, const unsigned long addr,
1799				  const int len)
1800{
1801	unsigned long off = addr + len;
1802
1803	if (CAS_TABORT(cp) == 1)
1804		return 0;
1805	if ((CAS_ROUND_PAGE(off) - off) > TX_TARGET_ABORT_LEN)
1806		return 0;
1807	return TX_TARGET_ABORT_LEN;
1808}
1809
1810static inline void cas_tx_ringN(struct cas *cp, int ring, int limit)
1811{
1812	struct cas_tx_desc *txds;
1813	struct sk_buff **skbs;
1814	struct net_device *dev = cp->dev;
1815	int entry, count;
1816
1817	spin_lock(&cp->tx_lock[ring]);
1818	txds = cp->init_txds[ring];
1819	skbs = cp->tx_skbs[ring];
1820	entry = cp->tx_old[ring];
1821
1822	count = TX_BUFF_COUNT(ring, entry, limit);
1823	while (entry != limit) {
1824		struct sk_buff *skb = skbs[entry];
1825		dma_addr_t daddr;
1826		u32 dlen;
1827		int frag;
1828
1829		if (!skb) {
1830			/* this should never occur */
1831			entry = TX_DESC_NEXT(ring, entry);
1832			continue;
1833		}
1834
1835		/* however, we might get only a partial skb release. */
1836		count -= skb_shinfo(skb)->nr_frags +
1837			+ cp->tx_tiny_use[ring][entry].nbufs + 1;
1838		if (count < 0)
1839			break;
1840
1841		netif_printk(cp, tx_done, KERN_DEBUG, cp->dev,
1842			     "tx[%d] done, slot %d\n", ring, entry);
1843
1844		skbs[entry] = NULL;
1845		cp->tx_tiny_use[ring][entry].nbufs = 0;
1846
1847		for (frag = 0; frag <= skb_shinfo(skb)->nr_frags; frag++) {
1848			struct cas_tx_desc *txd = txds + entry;
1849
1850			daddr = le64_to_cpu(txd->buffer);
1851			dlen = CAS_VAL(TX_DESC_BUFLEN,
1852				       le64_to_cpu(txd->control));
1853			dma_unmap_page(&cp->pdev->dev, daddr, dlen,
1854				       DMA_TO_DEVICE);
1855			entry = TX_DESC_NEXT(ring, entry);
1856
1857			/* tiny buffer may follow */
1858			if (cp->tx_tiny_use[ring][entry].used) {
1859				cp->tx_tiny_use[ring][entry].used = 0;
1860				entry = TX_DESC_NEXT(ring, entry);
1861			}
1862		}
1863
1864		spin_lock(&cp->stat_lock[ring]);
1865		cp->net_stats[ring].tx_packets++;
1866		cp->net_stats[ring].tx_bytes += skb->len;
1867		spin_unlock(&cp->stat_lock[ring]);
1868		dev_consume_skb_irq(skb);
1869	}
1870	cp->tx_old[ring] = entry;
1871
1872	/* this is wrong for multiple tx rings. the net device needs
1873	 * multiple queues for this to do the right thing.  we wait
1874	 * for 2*packets to be available when using tiny buffers
1875	 */
1876	if (netif_queue_stopped(dev) &&
1877	    (TX_BUFFS_AVAIL(cp, ring) > CAS_TABORT(cp)*(MAX_SKB_FRAGS + 1)))
1878		netif_wake_queue(dev);
1879	spin_unlock(&cp->tx_lock[ring]);
1880}
1881
1882static void cas_tx(struct net_device *dev, struct cas *cp,
1883		   u32 status)
1884{
1885        int limit, ring;
1886#ifdef USE_TX_COMPWB
1887	u64 compwb = le64_to_cpu(cp->init_block->tx_compwb);
1888#endif
1889	netif_printk(cp, intr, KERN_DEBUG, cp->dev,
1890		     "tx interrupt, status: 0x%x, %llx\n",
1891		     status, (unsigned long long)compwb);
1892	/* process all the rings */
1893	for (ring = 0; ring < N_TX_RINGS; ring++) {
1894#ifdef USE_TX_COMPWB
1895		/* use the completion writeback registers */
1896		limit = (CAS_VAL(TX_COMPWB_MSB, compwb) << 8) |
1897			CAS_VAL(TX_COMPWB_LSB, compwb);
1898		compwb = TX_COMPWB_NEXT(compwb);
1899#else
1900		limit = readl(cp->regs + REG_TX_COMPN(ring));
1901#endif
1902		if (cp->tx_old[ring] != limit)
1903			cas_tx_ringN(cp, ring, limit);
1904	}
1905}
1906
1907
1908static int cas_rx_process_pkt(struct cas *cp, struct cas_rx_comp *rxc,
1909			      int entry, const u64 *words,
1910			      struct sk_buff **skbref)
1911{
1912	int dlen, hlen, len, i, alloclen;
1913	int off, swivel = RX_SWIVEL_OFF_VAL;
1914	struct cas_page *page;
1915	struct sk_buff *skb;
1916	void *crcaddr;
1917	__sum16 csum;
1918	char *p;
1919
1920	hlen = CAS_VAL(RX_COMP2_HDR_SIZE, words[1]);
1921	dlen = CAS_VAL(RX_COMP1_DATA_SIZE, words[0]);
1922	len  = hlen + dlen;
1923
1924	if (RX_COPY_ALWAYS || (words[2] & RX_COMP3_SMALL_PKT))
1925		alloclen = len;
1926	else
1927		alloclen = max(hlen, RX_COPY_MIN);
1928
1929	skb = netdev_alloc_skb(cp->dev, alloclen + swivel + cp->crc_size);
1930	if (skb == NULL)
1931		return -1;
1932
1933	*skbref = skb;
1934	skb_reserve(skb, swivel);
1935
1936	p = skb->data;
1937	crcaddr = NULL;
1938	if (hlen) { /* always copy header pages */
1939		i = CAS_VAL(RX_COMP2_HDR_INDEX, words[1]);
1940		page = cp->rx_pages[CAS_VAL(RX_INDEX_RING, i)][CAS_VAL(RX_INDEX_NUM, i)];
1941		off = CAS_VAL(RX_COMP2_HDR_OFF, words[1]) * 0x100 +
1942			swivel;
1943
1944		i = hlen;
1945		if (!dlen) /* attach FCS */
1946			i += cp->crc_size;
1947		dma_sync_single_for_cpu(&cp->pdev->dev, page->dma_addr + off,
1948					i, DMA_FROM_DEVICE);
1949		memcpy(p, page_address(page->buffer) + off, i);
1950		dma_sync_single_for_device(&cp->pdev->dev,
1951					   page->dma_addr + off, i,
1952					   DMA_FROM_DEVICE);
 
1953		RX_USED_ADD(page, 0x100);
1954		p += hlen;
1955		swivel = 0;
1956	}
1957
1958
1959	if (alloclen < (hlen + dlen)) {
1960		skb_frag_t *frag = skb_shinfo(skb)->frags;
1961
1962		/* normal or jumbo packets. we use frags */
1963		i = CAS_VAL(RX_COMP1_DATA_INDEX, words[0]);
1964		page = cp->rx_pages[CAS_VAL(RX_INDEX_RING, i)][CAS_VAL(RX_INDEX_NUM, i)];
1965		off = CAS_VAL(RX_COMP1_DATA_OFF, words[0]) + swivel;
1966
1967		hlen = min(cp->page_size - off, dlen);
1968		if (hlen < 0) {
1969			netif_printk(cp, rx_err, KERN_DEBUG, cp->dev,
1970				     "rx page overflow: %d\n", hlen);
1971			dev_kfree_skb_irq(skb);
1972			return -1;
1973		}
1974		i = hlen;
1975		if (i == dlen)  /* attach FCS */
1976			i += cp->crc_size;
1977		dma_sync_single_for_cpu(&cp->pdev->dev, page->dma_addr + off,
1978					i, DMA_FROM_DEVICE);
1979
1980		/* make sure we always copy a header */
1981		swivel = 0;
1982		if (p == (char *) skb->data) { /* not split */
1983			memcpy(p, page_address(page->buffer) + off,
1984			       RX_COPY_MIN);
1985			dma_sync_single_for_device(&cp->pdev->dev,
1986						   page->dma_addr + off, i,
1987						   DMA_FROM_DEVICE);
1988			off += RX_COPY_MIN;
1989			swivel = RX_COPY_MIN;
1990			RX_USED_ADD(page, cp->mtu_stride);
1991		} else {
1992			RX_USED_ADD(page, hlen);
1993		}
1994		skb_put(skb, alloclen);
1995
1996		skb_shinfo(skb)->nr_frags++;
1997		skb->data_len += hlen - swivel;
1998		skb->truesize += hlen - swivel;
1999		skb->len      += hlen - swivel;
2000
2001		skb_frag_fill_page_desc(frag, page->buffer, off, hlen - swivel);
2002		__skb_frag_ref(frag);
 
 
2003
2004		/* any more data? */
2005		if ((words[0] & RX_COMP1_SPLIT_PKT) && ((dlen -= hlen) > 0)) {
2006			hlen = dlen;
2007			off = 0;
2008
2009			i = CAS_VAL(RX_COMP2_NEXT_INDEX, words[1]);
2010			page = cp->rx_pages[CAS_VAL(RX_INDEX_RING, i)][CAS_VAL(RX_INDEX_NUM, i)];
2011			dma_sync_single_for_cpu(&cp->pdev->dev,
2012						page->dma_addr,
2013						hlen + cp->crc_size,
2014						DMA_FROM_DEVICE);
2015			dma_sync_single_for_device(&cp->pdev->dev,
2016						   page->dma_addr,
2017						   hlen + cp->crc_size,
2018						   DMA_FROM_DEVICE);
2019
2020			skb_shinfo(skb)->nr_frags++;
2021			skb->data_len += hlen;
2022			skb->len      += hlen;
2023			frag++;
2024
2025			skb_frag_fill_page_desc(frag, page->buffer, 0, hlen);
2026			__skb_frag_ref(frag);
 
 
2027			RX_USED_ADD(page, hlen + cp->crc_size);
2028		}
2029
2030		if (cp->crc_size)
2031			crcaddr = page_address(page->buffer) + off + hlen;
 
 
2032
2033	} else {
2034		/* copying packet */
2035		if (!dlen)
2036			goto end_copy_pkt;
2037
2038		i = CAS_VAL(RX_COMP1_DATA_INDEX, words[0]);
2039		page = cp->rx_pages[CAS_VAL(RX_INDEX_RING, i)][CAS_VAL(RX_INDEX_NUM, i)];
2040		off = CAS_VAL(RX_COMP1_DATA_OFF, words[0]) + swivel;
2041		hlen = min(cp->page_size - off, dlen);
2042		if (hlen < 0) {
2043			netif_printk(cp, rx_err, KERN_DEBUG, cp->dev,
2044				     "rx page overflow: %d\n", hlen);
2045			dev_kfree_skb_irq(skb);
2046			return -1;
2047		}
2048		i = hlen;
2049		if (i == dlen) /* attach FCS */
2050			i += cp->crc_size;
2051		dma_sync_single_for_cpu(&cp->pdev->dev, page->dma_addr + off,
2052					i, DMA_FROM_DEVICE);
2053		memcpy(p, page_address(page->buffer) + off, i);
2054		dma_sync_single_for_device(&cp->pdev->dev,
2055					   page->dma_addr + off, i,
2056					   DMA_FROM_DEVICE);
 
2057		if (p == (char *) skb->data) /* not split */
2058			RX_USED_ADD(page, cp->mtu_stride);
2059		else
2060			RX_USED_ADD(page, i);
2061
2062		/* any more data? */
2063		if ((words[0] & RX_COMP1_SPLIT_PKT) && ((dlen -= hlen) > 0)) {
2064			p += hlen;
2065			i = CAS_VAL(RX_COMP2_NEXT_INDEX, words[1]);
2066			page = cp->rx_pages[CAS_VAL(RX_INDEX_RING, i)][CAS_VAL(RX_INDEX_NUM, i)];
2067			dma_sync_single_for_cpu(&cp->pdev->dev,
2068						page->dma_addr,
2069						dlen + cp->crc_size,
2070						DMA_FROM_DEVICE);
2071			memcpy(p, page_address(page->buffer), dlen + cp->crc_size);
2072			dma_sync_single_for_device(&cp->pdev->dev,
2073						   page->dma_addr,
2074						   dlen + cp->crc_size,
2075						   DMA_FROM_DEVICE);
2076			RX_USED_ADD(page, dlen + cp->crc_size);
2077		}
2078end_copy_pkt:
2079		if (cp->crc_size)
 
2080			crcaddr = skb->data + alloclen;
2081
2082		skb_put(skb, alloclen);
2083	}
2084
2085	csum = (__force __sum16)htons(CAS_VAL(RX_COMP4_TCP_CSUM, words[3]));
2086	if (cp->crc_size) {
2087		/* checksum includes FCS. strip it out. */
2088		csum = csum_fold(csum_partial(crcaddr, cp->crc_size,
2089					      csum_unfold(csum)));
 
 
2090	}
2091	skb->protocol = eth_type_trans(skb, cp->dev);
2092	if (skb->protocol == htons(ETH_P_IP)) {
2093		skb->csum = csum_unfold(~csum);
2094		skb->ip_summed = CHECKSUM_COMPLETE;
2095	} else
2096		skb_checksum_none_assert(skb);
2097	return len;
2098}
2099
2100
2101/* we can handle up to 64 rx flows at a time. we do the same thing
2102 * as nonreassm except that we batch up the buffers.
2103 * NOTE: we currently just treat each flow as a bunch of packets that
2104 *       we pass up. a better way would be to coalesce the packets
2105 *       into a jumbo packet. to do that, we need to do the following:
2106 *       1) the first packet will have a clean split between header and
2107 *          data. save both.
2108 *       2) each time the next flow packet comes in, extend the
2109 *          data length and merge the checksums.
2110 *       3) on flow release, fix up the header.
2111 *       4) make sure the higher layer doesn't care.
2112 * because packets get coalesced, we shouldn't run into fragment count
2113 * issues.
2114 */
2115static inline void cas_rx_flow_pkt(struct cas *cp, const u64 *words,
2116				   struct sk_buff *skb)
2117{
2118	int flowid = CAS_VAL(RX_COMP3_FLOWID, words[2]) & (N_RX_FLOWS - 1);
2119	struct sk_buff_head *flow = &cp->rx_flows[flowid];
2120
2121	/* this is protected at a higher layer, so no need to
2122	 * do any additional locking here. stick the buffer
2123	 * at the end.
2124	 */
2125	__skb_queue_tail(flow, skb);
2126	if (words[0] & RX_COMP1_RELEASE_FLOW) {
2127		while ((skb = __skb_dequeue(flow))) {
2128			cas_skb_release(skb);
2129		}
2130	}
2131}
2132
2133/* put rx descriptor back on ring. if a buffer is in use by a higher
2134 * layer, this will need to put in a replacement.
2135 */
2136static void cas_post_page(struct cas *cp, const int ring, const int index)
2137{
2138	cas_page_t *new;
2139	int entry;
2140
2141	entry = cp->rx_old[ring];
2142
2143	new = cas_page_swap(cp, ring, index);
2144	cp->init_rxds[ring][entry].buffer = cpu_to_le64(new->dma_addr);
2145	cp->init_rxds[ring][entry].index  =
2146		cpu_to_le64(CAS_BASE(RX_INDEX_NUM, index) |
2147			    CAS_BASE(RX_INDEX_RING, ring));
2148
2149	entry = RX_DESC_ENTRY(ring, entry + 1);
2150	cp->rx_old[ring] = entry;
2151
2152	if (entry % 4)
2153		return;
2154
2155	if (ring == 0)
2156		writel(entry, cp->regs + REG_RX_KICK);
2157	else if ((N_RX_DESC_RINGS > 1) &&
2158		 (cp->cas_flags & CAS_FLAG_REG_PLUS))
2159		writel(entry, cp->regs + REG_PLUS_RX_KICK1);
2160}
2161
2162
2163/* only when things are bad */
2164static int cas_post_rxds_ringN(struct cas *cp, int ring, int num)
2165{
2166	unsigned int entry, last, count, released;
2167	int cluster;
2168	cas_page_t **page = cp->rx_pages[ring];
2169
2170	entry = cp->rx_old[ring];
2171
2172	netif_printk(cp, intr, KERN_DEBUG, cp->dev,
2173		     "rxd[%d] interrupt, done: %d\n", ring, entry);
2174
2175	cluster = -1;
2176	count = entry & 0x3;
2177	last = RX_DESC_ENTRY(ring, num ? entry + num - 4: entry - 4);
2178	released = 0;
2179	while (entry != last) {
2180		/* make a new buffer if it's still in use */
2181		if (page_count(page[entry]->buffer) > 1) {
2182			cas_page_t *new = cas_page_dequeue(cp);
2183			if (!new) {
2184				/* let the timer know that we need to
2185				 * do this again
2186				 */
2187				cp->cas_flags |= CAS_FLAG_RXD_POST(ring);
2188				if (!timer_pending(&cp->link_timer))
2189					mod_timer(&cp->link_timer, jiffies +
2190						  CAS_LINK_FAST_TIMEOUT);
2191				cp->rx_old[ring]  = entry;
2192				cp->rx_last[ring] = num ? num - released : 0;
2193				return -ENOMEM;
2194			}
2195			spin_lock(&cp->rx_inuse_lock);
2196			list_add(&page[entry]->list, &cp->rx_inuse_list);
2197			spin_unlock(&cp->rx_inuse_lock);
2198			cp->init_rxds[ring][entry].buffer =
2199				cpu_to_le64(new->dma_addr);
2200			page[entry] = new;
2201
2202		}
2203
2204		if (++count == 4) {
2205			cluster = entry;
2206			count = 0;
2207		}
2208		released++;
2209		entry = RX_DESC_ENTRY(ring, entry + 1);
2210	}
2211	cp->rx_old[ring] = entry;
2212
2213	if (cluster < 0)
2214		return 0;
2215
2216	if (ring == 0)
2217		writel(cluster, cp->regs + REG_RX_KICK);
2218	else if ((N_RX_DESC_RINGS > 1) &&
2219		 (cp->cas_flags & CAS_FLAG_REG_PLUS))
2220		writel(cluster, cp->regs + REG_PLUS_RX_KICK1);
2221	return 0;
2222}
2223
2224
2225/* process a completion ring. packets are set up in three basic ways:
2226 * small packets: should be copied header + data in single buffer.
2227 * large packets: header and data in a single buffer.
2228 * split packets: header in a separate buffer from data.
2229 *                data may be in multiple pages. data may be > 256
2230 *                bytes but in a single page.
2231 *
2232 * NOTE: RX page posting is done in this routine as well. while there's
2233 *       the capability of using multiple RX completion rings, it isn't
2234 *       really worthwhile due to the fact that the page posting will
2235 *       force serialization on the single descriptor ring.
2236 */
2237static int cas_rx_ringN(struct cas *cp, int ring, int budget)
2238{
2239	struct cas_rx_comp *rxcs = cp->init_rxcs[ring];
2240	int entry, drops;
2241	int npackets = 0;
2242
2243	netif_printk(cp, intr, KERN_DEBUG, cp->dev,
2244		     "rx[%d] interrupt, done: %d/%d\n",
2245		     ring,
2246		     readl(cp->regs + REG_RX_COMP_HEAD), cp->rx_new[ring]);
2247
2248	entry = cp->rx_new[ring];
2249	drops = 0;
2250	while (1) {
2251		struct cas_rx_comp *rxc = rxcs + entry;
2252		struct sk_buff *skb;
2253		int type, len;
2254		u64 words[4];
2255		int i, dring;
2256
2257		words[0] = le64_to_cpu(rxc->word1);
2258		words[1] = le64_to_cpu(rxc->word2);
2259		words[2] = le64_to_cpu(rxc->word3);
2260		words[3] = le64_to_cpu(rxc->word4);
2261
2262		/* don't touch if still owned by hw */
2263		type = CAS_VAL(RX_COMP1_TYPE, words[0]);
2264		if (type == 0)
2265			break;
2266
2267		/* hw hasn't cleared the zero bit yet */
2268		if (words[3] & RX_COMP4_ZERO) {
2269			break;
2270		}
2271
2272		/* get info on the packet */
2273		if (words[3] & (RX_COMP4_LEN_MISMATCH | RX_COMP4_BAD)) {
2274			spin_lock(&cp->stat_lock[ring]);
2275			cp->net_stats[ring].rx_errors++;
2276			if (words[3] & RX_COMP4_LEN_MISMATCH)
2277				cp->net_stats[ring].rx_length_errors++;
2278			if (words[3] & RX_COMP4_BAD)
2279				cp->net_stats[ring].rx_crc_errors++;
2280			spin_unlock(&cp->stat_lock[ring]);
2281
2282			/* We'll just return it to Cassini. */
2283		drop_it:
2284			spin_lock(&cp->stat_lock[ring]);
2285			++cp->net_stats[ring].rx_dropped;
2286			spin_unlock(&cp->stat_lock[ring]);
2287			goto next;
2288		}
2289
2290		len = cas_rx_process_pkt(cp, rxc, entry, words, &skb);
2291		if (len < 0) {
2292			++drops;
2293			goto drop_it;
2294		}
2295
2296		/* see if it's a flow re-assembly or not. the driver
2297		 * itself handles release back up.
2298		 */
2299		if (RX_DONT_BATCH || (type == 0x2)) {
2300			/* non-reassm: these always get released */
2301			cas_skb_release(skb);
2302		} else {
2303			cas_rx_flow_pkt(cp, words, skb);
2304		}
2305
2306		spin_lock(&cp->stat_lock[ring]);
2307		cp->net_stats[ring].rx_packets++;
2308		cp->net_stats[ring].rx_bytes += len;
2309		spin_unlock(&cp->stat_lock[ring]);
2310
2311	next:
2312		npackets++;
2313
2314		/* should it be released? */
2315		if (words[0] & RX_COMP1_RELEASE_HDR) {
2316			i = CAS_VAL(RX_COMP2_HDR_INDEX, words[1]);
2317			dring = CAS_VAL(RX_INDEX_RING, i);
2318			i = CAS_VAL(RX_INDEX_NUM, i);
2319			cas_post_page(cp, dring, i);
2320		}
2321
2322		if (words[0] & RX_COMP1_RELEASE_DATA) {
2323			i = CAS_VAL(RX_COMP1_DATA_INDEX, words[0]);
2324			dring = CAS_VAL(RX_INDEX_RING, i);
2325			i = CAS_VAL(RX_INDEX_NUM, i);
2326			cas_post_page(cp, dring, i);
2327		}
2328
2329		if (words[0] & RX_COMP1_RELEASE_NEXT) {
2330			i = CAS_VAL(RX_COMP2_NEXT_INDEX, words[1]);
2331			dring = CAS_VAL(RX_INDEX_RING, i);
2332			i = CAS_VAL(RX_INDEX_NUM, i);
2333			cas_post_page(cp, dring, i);
2334		}
2335
2336		/* skip to the next entry */
2337		entry = RX_COMP_ENTRY(ring, entry + 1 +
2338				      CAS_VAL(RX_COMP1_SKIP, words[0]));
2339#ifdef USE_NAPI
2340		if (budget && (npackets >= budget))
2341			break;
2342#endif
2343	}
2344	cp->rx_new[ring] = entry;
2345
2346	if (drops)
2347		netdev_info(cp->dev, "Memory squeeze, deferring packet\n");
2348	return npackets;
2349}
2350
2351
2352/* put completion entries back on the ring */
2353static void cas_post_rxcs_ringN(struct net_device *dev,
2354				struct cas *cp, int ring)
2355{
2356	struct cas_rx_comp *rxc = cp->init_rxcs[ring];
2357	int last, entry;
2358
2359	last = cp->rx_cur[ring];
2360	entry = cp->rx_new[ring];
2361	netif_printk(cp, intr, KERN_DEBUG, dev,
2362		     "rxc[%d] interrupt, done: %d/%d\n",
2363		     ring, readl(cp->regs + REG_RX_COMP_HEAD), entry);
2364
2365	/* zero and re-mark descriptors */
2366	while (last != entry) {
2367		cas_rxc_init(rxc + last);
2368		last = RX_COMP_ENTRY(ring, last + 1);
2369	}
2370	cp->rx_cur[ring] = last;
2371
2372	if (ring == 0)
2373		writel(last, cp->regs + REG_RX_COMP_TAIL);
2374	else if (cp->cas_flags & CAS_FLAG_REG_PLUS)
2375		writel(last, cp->regs + REG_PLUS_RX_COMPN_TAIL(ring));
2376}
2377
2378
2379
2380/* cassini can use all four PCI interrupts for the completion ring.
2381 * rings 3 and 4 are identical
2382 */
2383#if defined(USE_PCI_INTC) || defined(USE_PCI_INTD)
2384static inline void cas_handle_irqN(struct net_device *dev,
2385				   struct cas *cp, const u32 status,
2386				   const int ring)
2387{
2388	if (status & (INTR_RX_COMP_FULL_ALT | INTR_RX_COMP_AF_ALT))
2389		cas_post_rxcs_ringN(dev, cp, ring);
2390}
2391
2392static irqreturn_t cas_interruptN(int irq, void *dev_id)
2393{
2394	struct net_device *dev = dev_id;
2395	struct cas *cp = netdev_priv(dev);
2396	unsigned long flags;
2397	int ring = (irq == cp->pci_irq_INTC) ? 2 : 3;
2398	u32 status = readl(cp->regs + REG_PLUS_INTRN_STATUS(ring));
2399
2400	/* check for shared irq */
2401	if (status == 0)
2402		return IRQ_NONE;
2403
2404	spin_lock_irqsave(&cp->lock, flags);
2405	if (status & INTR_RX_DONE_ALT) { /* handle rx separately */
2406#ifdef USE_NAPI
2407		cas_mask_intr(cp);
2408		napi_schedule(&cp->napi);
2409#else
2410		cas_rx_ringN(cp, ring, 0);
2411#endif
2412		status &= ~INTR_RX_DONE_ALT;
2413	}
2414
2415	if (status)
2416		cas_handle_irqN(dev, cp, status, ring);
2417	spin_unlock_irqrestore(&cp->lock, flags);
2418	return IRQ_HANDLED;
2419}
2420#endif
2421
2422#ifdef USE_PCI_INTB
2423/* everything but rx packets */
2424static inline void cas_handle_irq1(struct cas *cp, const u32 status)
2425{
2426	if (status & INTR_RX_BUF_UNAVAIL_1) {
2427		/* Frame arrived, no free RX buffers available.
2428		 * NOTE: we can get this on a link transition. */
2429		cas_post_rxds_ringN(cp, 1, 0);
2430		spin_lock(&cp->stat_lock[1]);
2431		cp->net_stats[1].rx_dropped++;
2432		spin_unlock(&cp->stat_lock[1]);
2433	}
2434
2435	if (status & INTR_RX_BUF_AE_1)
2436		cas_post_rxds_ringN(cp, 1, RX_DESC_RINGN_SIZE(1) -
2437				    RX_AE_FREEN_VAL(1));
2438
2439	if (status & (INTR_RX_COMP_AF | INTR_RX_COMP_FULL))
2440		cas_post_rxcs_ringN(cp, 1);
2441}
2442
2443/* ring 2 handles a few more events than 3 and 4 */
2444static irqreturn_t cas_interrupt1(int irq, void *dev_id)
2445{
2446	struct net_device *dev = dev_id;
2447	struct cas *cp = netdev_priv(dev);
2448	unsigned long flags;
2449	u32 status = readl(cp->regs + REG_PLUS_INTRN_STATUS(1));
2450
2451	/* check for shared interrupt */
2452	if (status == 0)
2453		return IRQ_NONE;
2454
2455	spin_lock_irqsave(&cp->lock, flags);
2456	if (status & INTR_RX_DONE_ALT) { /* handle rx separately */
2457#ifdef USE_NAPI
2458		cas_mask_intr(cp);
2459		napi_schedule(&cp->napi);
2460#else
2461		cas_rx_ringN(cp, 1, 0);
2462#endif
2463		status &= ~INTR_RX_DONE_ALT;
2464	}
2465	if (status)
2466		cas_handle_irq1(cp, status);
2467	spin_unlock_irqrestore(&cp->lock, flags);
2468	return IRQ_HANDLED;
2469}
2470#endif
2471
2472static inline void cas_handle_irq(struct net_device *dev,
2473				  struct cas *cp, const u32 status)
2474{
2475	/* housekeeping interrupts */
2476	if (status & INTR_ERROR_MASK)
2477		cas_abnormal_irq(dev, cp, status);
2478
2479	if (status & INTR_RX_BUF_UNAVAIL) {
2480		/* Frame arrived, no free RX buffers available.
2481		 * NOTE: we can get this on a link transition.
2482		 */
2483		cas_post_rxds_ringN(cp, 0, 0);
2484		spin_lock(&cp->stat_lock[0]);
2485		cp->net_stats[0].rx_dropped++;
2486		spin_unlock(&cp->stat_lock[0]);
2487	} else if (status & INTR_RX_BUF_AE) {
2488		cas_post_rxds_ringN(cp, 0, RX_DESC_RINGN_SIZE(0) -
2489				    RX_AE_FREEN_VAL(0));
2490	}
2491
2492	if (status & (INTR_RX_COMP_AF | INTR_RX_COMP_FULL))
2493		cas_post_rxcs_ringN(dev, cp, 0);
2494}
2495
2496static irqreturn_t cas_interrupt(int irq, void *dev_id)
2497{
2498	struct net_device *dev = dev_id;
2499	struct cas *cp = netdev_priv(dev);
2500	unsigned long flags;
2501	u32 status = readl(cp->regs + REG_INTR_STATUS);
2502
2503	if (status == 0)
2504		return IRQ_NONE;
2505
2506	spin_lock_irqsave(&cp->lock, flags);
2507	if (status & (INTR_TX_ALL | INTR_TX_INTME)) {
2508		cas_tx(dev, cp, status);
2509		status &= ~(INTR_TX_ALL | INTR_TX_INTME);
2510	}
2511
2512	if (status & INTR_RX_DONE) {
2513#ifdef USE_NAPI
2514		cas_mask_intr(cp);
2515		napi_schedule(&cp->napi);
2516#else
2517		cas_rx_ringN(cp, 0, 0);
2518#endif
2519		status &= ~INTR_RX_DONE;
2520	}
2521
2522	if (status)
2523		cas_handle_irq(dev, cp, status);
2524	spin_unlock_irqrestore(&cp->lock, flags);
2525	return IRQ_HANDLED;
2526}
2527
2528
2529#ifdef USE_NAPI
2530static int cas_poll(struct napi_struct *napi, int budget)
2531{
2532	struct cas *cp = container_of(napi, struct cas, napi);
2533	struct net_device *dev = cp->dev;
2534	int i, enable_intr, credits;
2535	u32 status = readl(cp->regs + REG_INTR_STATUS);
2536	unsigned long flags;
2537
2538	spin_lock_irqsave(&cp->lock, flags);
2539	cas_tx(dev, cp, status);
2540	spin_unlock_irqrestore(&cp->lock, flags);
2541
2542	/* NAPI rx packets. we spread the credits across all of the
2543	 * rxc rings
2544	 *
2545	 * to make sure we're fair with the work we loop through each
2546	 * ring N_RX_COMP_RING times with a request of
2547	 * budget / N_RX_COMP_RINGS
2548	 */
2549	enable_intr = 1;
2550	credits = 0;
2551	for (i = 0; i < N_RX_COMP_RINGS; i++) {
2552		int j;
2553		for (j = 0; j < N_RX_COMP_RINGS; j++) {
2554			credits += cas_rx_ringN(cp, j, budget / N_RX_COMP_RINGS);
2555			if (credits >= budget) {
2556				enable_intr = 0;
2557				goto rx_comp;
2558			}
2559		}
2560	}
2561
2562rx_comp:
2563	/* final rx completion */
2564	spin_lock_irqsave(&cp->lock, flags);
2565	if (status)
2566		cas_handle_irq(dev, cp, status);
2567
2568#ifdef USE_PCI_INTB
2569	if (N_RX_COMP_RINGS > 1) {
2570		status = readl(cp->regs + REG_PLUS_INTRN_STATUS(1));
2571		if (status)
2572			cas_handle_irq1(dev, cp, status);
2573	}
2574#endif
2575
2576#ifdef USE_PCI_INTC
2577	if (N_RX_COMP_RINGS > 2) {
2578		status = readl(cp->regs + REG_PLUS_INTRN_STATUS(2));
2579		if (status)
2580			cas_handle_irqN(dev, cp, status, 2);
2581	}
2582#endif
2583
2584#ifdef USE_PCI_INTD
2585	if (N_RX_COMP_RINGS > 3) {
2586		status = readl(cp->regs + REG_PLUS_INTRN_STATUS(3));
2587		if (status)
2588			cas_handle_irqN(dev, cp, status, 3);
2589	}
2590#endif
2591	spin_unlock_irqrestore(&cp->lock, flags);
2592	if (enable_intr) {
2593		napi_complete(napi);
2594		cas_unmask_intr(cp);
2595	}
2596	return credits;
2597}
2598#endif
2599
2600#ifdef CONFIG_NET_POLL_CONTROLLER
2601static void cas_netpoll(struct net_device *dev)
2602{
2603	struct cas *cp = netdev_priv(dev);
2604
2605	cas_disable_irq(cp, 0);
2606	cas_interrupt(cp->pdev->irq, dev);
2607	cas_enable_irq(cp, 0);
2608
2609#ifdef USE_PCI_INTB
2610	if (N_RX_COMP_RINGS > 1) {
2611		/* cas_interrupt1(); */
2612	}
2613#endif
2614#ifdef USE_PCI_INTC
2615	if (N_RX_COMP_RINGS > 2) {
2616		/* cas_interruptN(); */
2617	}
2618#endif
2619#ifdef USE_PCI_INTD
2620	if (N_RX_COMP_RINGS > 3) {
2621		/* cas_interruptN(); */
2622	}
2623#endif
2624}
2625#endif
2626
2627static void cas_tx_timeout(struct net_device *dev, unsigned int txqueue)
2628{
2629	struct cas *cp = netdev_priv(dev);
2630
2631	netdev_err(dev, "transmit timed out, resetting\n");
2632	if (!cp->hw_running) {
2633		netdev_err(dev, "hrm.. hw not running!\n");
2634		return;
2635	}
2636
2637	netdev_err(dev, "MIF_STATE[%08x]\n",
2638		   readl(cp->regs + REG_MIF_STATE_MACHINE));
2639
2640	netdev_err(dev, "MAC_STATE[%08x]\n",
2641		   readl(cp->regs + REG_MAC_STATE_MACHINE));
2642
2643	netdev_err(dev, "TX_STATE[%08x:%08x:%08x] FIFO[%08x:%08x:%08x] SM1[%08x] SM2[%08x]\n",
2644		   readl(cp->regs + REG_TX_CFG),
2645		   readl(cp->regs + REG_MAC_TX_STATUS),
2646		   readl(cp->regs + REG_MAC_TX_CFG),
2647		   readl(cp->regs + REG_TX_FIFO_PKT_CNT),
2648		   readl(cp->regs + REG_TX_FIFO_WRITE_PTR),
2649		   readl(cp->regs + REG_TX_FIFO_READ_PTR),
2650		   readl(cp->regs + REG_TX_SM_1),
2651		   readl(cp->regs + REG_TX_SM_2));
2652
2653	netdev_err(dev, "RX_STATE[%08x:%08x:%08x]\n",
2654		   readl(cp->regs + REG_RX_CFG),
2655		   readl(cp->regs + REG_MAC_RX_STATUS),
2656		   readl(cp->regs + REG_MAC_RX_CFG));
2657
2658	netdev_err(dev, "HP_STATE[%08x:%08x:%08x:%08x]\n",
2659		   readl(cp->regs + REG_HP_STATE_MACHINE),
2660		   readl(cp->regs + REG_HP_STATUS0),
2661		   readl(cp->regs + REG_HP_STATUS1),
2662		   readl(cp->regs + REG_HP_STATUS2));
2663
2664#if 1
2665	atomic_inc(&cp->reset_task_pending);
2666	atomic_inc(&cp->reset_task_pending_all);
2667	schedule_work(&cp->reset_task);
2668#else
2669	atomic_set(&cp->reset_task_pending, CAS_RESET_ALL);
2670	schedule_work(&cp->reset_task);
2671#endif
2672}
2673
2674static inline int cas_intme(int ring, int entry)
2675{
2676	/* Algorithm: IRQ every 1/2 of descriptors. */
2677	if (!(entry & ((TX_DESC_RINGN_SIZE(ring) >> 1) - 1)))
2678		return 1;
2679	return 0;
2680}
2681
2682
2683static void cas_write_txd(struct cas *cp, int ring, int entry,
2684			  dma_addr_t mapping, int len, u64 ctrl, int last)
2685{
2686	struct cas_tx_desc *txd = cp->init_txds[ring] + entry;
2687
2688	ctrl |= CAS_BASE(TX_DESC_BUFLEN, len);
2689	if (cas_intme(ring, entry))
2690		ctrl |= TX_DESC_INTME;
2691	if (last)
2692		ctrl |= TX_DESC_EOF;
2693	txd->control = cpu_to_le64(ctrl);
2694	txd->buffer = cpu_to_le64(mapping);
2695}
2696
2697static inline void *tx_tiny_buf(struct cas *cp, const int ring,
2698				const int entry)
2699{
2700	return cp->tx_tiny_bufs[ring] + TX_TINY_BUF_LEN*entry;
2701}
2702
2703static inline dma_addr_t tx_tiny_map(struct cas *cp, const int ring,
2704				     const int entry, const int tentry)
2705{
2706	cp->tx_tiny_use[ring][tentry].nbufs++;
2707	cp->tx_tiny_use[ring][entry].used = 1;
2708	return cp->tx_tiny_dvma[ring] + TX_TINY_BUF_LEN*entry;
2709}
2710
2711static inline int cas_xmit_tx_ringN(struct cas *cp, int ring,
2712				    struct sk_buff *skb)
2713{
2714	struct net_device *dev = cp->dev;
2715	int entry, nr_frags, frag, tabort, tentry;
2716	dma_addr_t mapping;
2717	unsigned long flags;
2718	u64 ctrl;
2719	u32 len;
2720
2721	spin_lock_irqsave(&cp->tx_lock[ring], flags);
2722
2723	/* This is a hard error, log it. */
2724	if (TX_BUFFS_AVAIL(cp, ring) <=
2725	    CAS_TABORT(cp)*(skb_shinfo(skb)->nr_frags + 1)) {
2726		netif_stop_queue(dev);
2727		spin_unlock_irqrestore(&cp->tx_lock[ring], flags);
2728		netdev_err(dev, "BUG! Tx Ring full when queue awake!\n");
2729		return 1;
2730	}
2731
2732	ctrl = 0;
2733	if (skb->ip_summed == CHECKSUM_PARTIAL) {
2734		const u64 csum_start_off = skb_checksum_start_offset(skb);
2735		const u64 csum_stuff_off = csum_start_off + skb->csum_offset;
2736
2737		ctrl =  TX_DESC_CSUM_EN |
2738			CAS_BASE(TX_DESC_CSUM_START, csum_start_off) |
2739			CAS_BASE(TX_DESC_CSUM_STUFF, csum_stuff_off);
2740	}
2741
2742	entry = cp->tx_new[ring];
2743	cp->tx_skbs[ring][entry] = skb;
2744
2745	nr_frags = skb_shinfo(skb)->nr_frags;
2746	len = skb_headlen(skb);
2747	mapping = dma_map_page(&cp->pdev->dev, virt_to_page(skb->data),
2748			       offset_in_page(skb->data), len, DMA_TO_DEVICE);
 
2749
2750	tentry = entry;
2751	tabort = cas_calc_tabort(cp, (unsigned long) skb->data, len);
2752	if (unlikely(tabort)) {
2753		/* NOTE: len is always >  tabort */
2754		cas_write_txd(cp, ring, entry, mapping, len - tabort,
2755			      ctrl | TX_DESC_SOF, 0);
2756		entry = TX_DESC_NEXT(ring, entry);
2757
2758		skb_copy_from_linear_data_offset(skb, len - tabort,
2759			      tx_tiny_buf(cp, ring, entry), tabort);
2760		mapping = tx_tiny_map(cp, ring, entry, tentry);
2761		cas_write_txd(cp, ring, entry, mapping, tabort, ctrl,
2762			      (nr_frags == 0));
2763	} else {
2764		cas_write_txd(cp, ring, entry, mapping, len, ctrl |
2765			      TX_DESC_SOF, (nr_frags == 0));
2766	}
2767	entry = TX_DESC_NEXT(ring, entry);
2768
2769	for (frag = 0; frag < nr_frags; frag++) {
2770		const skb_frag_t *fragp = &skb_shinfo(skb)->frags[frag];
2771
2772		len = skb_frag_size(fragp);
2773		mapping = skb_frag_dma_map(&cp->pdev->dev, fragp, 0, len,
2774					   DMA_TO_DEVICE);
2775
2776		tabort = cas_calc_tabort(cp, skb_frag_off(fragp), len);
2777		if (unlikely(tabort)) {
 
 
2778			/* NOTE: len is always > tabort */
2779			cas_write_txd(cp, ring, entry, mapping, len - tabort,
2780				      ctrl, 0);
2781			entry = TX_DESC_NEXT(ring, entry);
2782			memcpy_from_page(tx_tiny_buf(cp, ring, entry),
2783					 skb_frag_page(fragp),
2784					 skb_frag_off(fragp) + len - tabort,
2785					 tabort);
 
 
2786			mapping = tx_tiny_map(cp, ring, entry, tentry);
2787			len     = tabort;
2788		}
2789
2790		cas_write_txd(cp, ring, entry, mapping, len, ctrl,
2791			      (frag + 1 == nr_frags));
2792		entry = TX_DESC_NEXT(ring, entry);
2793	}
2794
2795	cp->tx_new[ring] = entry;
2796	if (TX_BUFFS_AVAIL(cp, ring) <= CAS_TABORT(cp)*(MAX_SKB_FRAGS + 1))
2797		netif_stop_queue(dev);
2798
2799	netif_printk(cp, tx_queued, KERN_DEBUG, dev,
2800		     "tx[%d] queued, slot %d, skblen %d, avail %d\n",
2801		     ring, entry, skb->len, TX_BUFFS_AVAIL(cp, ring));
2802	writel(entry, cp->regs + REG_TX_KICKN(ring));
2803	spin_unlock_irqrestore(&cp->tx_lock[ring], flags);
2804	return 0;
2805}
2806
2807static netdev_tx_t cas_start_xmit(struct sk_buff *skb, struct net_device *dev)
2808{
2809	struct cas *cp = netdev_priv(dev);
2810
2811	/* this is only used as a load-balancing hint, so it doesn't
2812	 * need to be SMP safe
2813	 */
2814	static int ring;
2815
2816	if (skb_padto(skb, cp->min_frame_size))
2817		return NETDEV_TX_OK;
2818
2819	/* XXX: we need some higher-level QoS hooks to steer packets to
2820	 *      individual queues.
2821	 */
2822	if (cas_xmit_tx_ringN(cp, ring++ & N_TX_RINGS_MASK, skb))
2823		return NETDEV_TX_BUSY;
2824	return NETDEV_TX_OK;
2825}
2826
2827static void cas_init_tx_dma(struct cas *cp)
2828{
2829	u64 desc_dma = cp->block_dvma;
2830	unsigned long off;
2831	u32 val;
2832	int i;
2833
2834	/* set up tx completion writeback registers. must be 8-byte aligned */
2835#ifdef USE_TX_COMPWB
2836	off = offsetof(struct cas_init_block, tx_compwb);
2837	writel((desc_dma + off) >> 32, cp->regs + REG_TX_COMPWB_DB_HI);
2838	writel((desc_dma + off) & 0xffffffff, cp->regs + REG_TX_COMPWB_DB_LOW);
2839#endif
2840
2841	/* enable completion writebacks, enable paced mode,
2842	 * disable read pipe, and disable pre-interrupt compwbs
2843	 */
2844	val =   TX_CFG_COMPWB_Q1 | TX_CFG_COMPWB_Q2 |
2845		TX_CFG_COMPWB_Q3 | TX_CFG_COMPWB_Q4 |
2846		TX_CFG_DMA_RDPIPE_DIS | TX_CFG_PACED_MODE |
2847		TX_CFG_INTR_COMPWB_DIS;
2848
2849	/* write out tx ring info and tx desc bases */
2850	for (i = 0; i < MAX_TX_RINGS; i++) {
2851		off = (unsigned long) cp->init_txds[i] -
2852			(unsigned long) cp->init_block;
2853
2854		val |= CAS_TX_RINGN_BASE(i);
2855		writel((desc_dma + off) >> 32, cp->regs + REG_TX_DBN_HI(i));
2856		writel((desc_dma + off) & 0xffffffff, cp->regs +
2857		       REG_TX_DBN_LOW(i));
2858		/* don't zero out the kick register here as the system
2859		 * will wedge
2860		 */
2861	}
2862	writel(val, cp->regs + REG_TX_CFG);
2863
2864	/* program max burst sizes. these numbers should be different
2865	 * if doing QoS.
2866	 */
2867#ifdef USE_QOS
2868	writel(0x800, cp->regs + REG_TX_MAXBURST_0);
2869	writel(0x1600, cp->regs + REG_TX_MAXBURST_1);
2870	writel(0x2400, cp->regs + REG_TX_MAXBURST_2);
2871	writel(0x4800, cp->regs + REG_TX_MAXBURST_3);
2872#else
2873	writel(0x800, cp->regs + REG_TX_MAXBURST_0);
2874	writel(0x800, cp->regs + REG_TX_MAXBURST_1);
2875	writel(0x800, cp->regs + REG_TX_MAXBURST_2);
2876	writel(0x800, cp->regs + REG_TX_MAXBURST_3);
2877#endif
2878}
2879
2880/* Must be invoked under cp->lock. */
2881static inline void cas_init_dma(struct cas *cp)
2882{
2883	cas_init_tx_dma(cp);
2884	cas_init_rx_dma(cp);
2885}
2886
2887static void cas_process_mc_list(struct cas *cp)
2888{
2889	u16 hash_table[16];
2890	u32 crc;
2891	struct netdev_hw_addr *ha;
2892	int i = 1;
2893
2894	memset(hash_table, 0, sizeof(hash_table));
2895	netdev_for_each_mc_addr(ha, cp->dev) {
2896		if (i <= CAS_MC_EXACT_MATCH_SIZE) {
2897			/* use the alternate mac address registers for the
2898			 * first 15 multicast addresses
2899			 */
2900			writel((ha->addr[4] << 8) | ha->addr[5],
2901			       cp->regs + REG_MAC_ADDRN(i*3 + 0));
2902			writel((ha->addr[2] << 8) | ha->addr[3],
2903			       cp->regs + REG_MAC_ADDRN(i*3 + 1));
2904			writel((ha->addr[0] << 8) | ha->addr[1],
2905			       cp->regs + REG_MAC_ADDRN(i*3 + 2));
2906			i++;
2907		}
2908		else {
2909			/* use hw hash table for the next series of
2910			 * multicast addresses
2911			 */
2912			crc = ether_crc_le(ETH_ALEN, ha->addr);
2913			crc >>= 24;
2914			hash_table[crc >> 4] |= 1 << (15 - (crc & 0xf));
2915		}
2916	}
2917	for (i = 0; i < 16; i++)
2918		writel(hash_table[i], cp->regs + REG_MAC_HASH_TABLEN(i));
2919}
2920
2921/* Must be invoked under cp->lock. */
2922static u32 cas_setup_multicast(struct cas *cp)
2923{
2924	u32 rxcfg = 0;
2925	int i;
2926
2927	if (cp->dev->flags & IFF_PROMISC) {
2928		rxcfg |= MAC_RX_CFG_PROMISC_EN;
2929
2930	} else if (cp->dev->flags & IFF_ALLMULTI) {
2931	    	for (i=0; i < 16; i++)
2932			writel(0xFFFF, cp->regs + REG_MAC_HASH_TABLEN(i));
2933		rxcfg |= MAC_RX_CFG_HASH_FILTER_EN;
2934
2935	} else {
2936		cas_process_mc_list(cp);
2937		rxcfg |= MAC_RX_CFG_HASH_FILTER_EN;
2938	}
2939
2940	return rxcfg;
2941}
2942
2943/* must be invoked under cp->stat_lock[N_TX_RINGS] */
2944static void cas_clear_mac_err(struct cas *cp)
2945{
2946	writel(0, cp->regs + REG_MAC_COLL_NORMAL);
2947	writel(0, cp->regs + REG_MAC_COLL_FIRST);
2948	writel(0, cp->regs + REG_MAC_COLL_EXCESS);
2949	writel(0, cp->regs + REG_MAC_COLL_LATE);
2950	writel(0, cp->regs + REG_MAC_TIMER_DEFER);
2951	writel(0, cp->regs + REG_MAC_ATTEMPTS_PEAK);
2952	writel(0, cp->regs + REG_MAC_RECV_FRAME);
2953	writel(0, cp->regs + REG_MAC_LEN_ERR);
2954	writel(0, cp->regs + REG_MAC_ALIGN_ERR);
2955	writel(0, cp->regs + REG_MAC_FCS_ERR);
2956	writel(0, cp->regs + REG_MAC_RX_CODE_ERR);
2957}
2958
2959
2960static void cas_mac_reset(struct cas *cp)
2961{
2962	int i;
2963
2964	/* do both TX and RX reset */
2965	writel(0x1, cp->regs + REG_MAC_TX_RESET);
2966	writel(0x1, cp->regs + REG_MAC_RX_RESET);
2967
2968	/* wait for TX */
2969	i = STOP_TRIES;
2970	while (i-- > 0) {
2971		if (readl(cp->regs + REG_MAC_TX_RESET) == 0)
2972			break;
2973		udelay(10);
2974	}
2975
2976	/* wait for RX */
2977	i = STOP_TRIES;
2978	while (i-- > 0) {
2979		if (readl(cp->regs + REG_MAC_RX_RESET) == 0)
2980			break;
2981		udelay(10);
2982	}
2983
2984	if (readl(cp->regs + REG_MAC_TX_RESET) |
2985	    readl(cp->regs + REG_MAC_RX_RESET))
2986		netdev_err(cp->dev, "mac tx[%d]/rx[%d] reset failed [%08x]\n",
2987			   readl(cp->regs + REG_MAC_TX_RESET),
2988			   readl(cp->regs + REG_MAC_RX_RESET),
2989			   readl(cp->regs + REG_MAC_STATE_MACHINE));
2990}
2991
2992
2993/* Must be invoked under cp->lock. */
2994static void cas_init_mac(struct cas *cp)
2995{
2996	const unsigned char *e = &cp->dev->dev_addr[0];
2997	int i;
2998	cas_mac_reset(cp);
2999
3000	/* setup core arbitration weight register */
3001	writel(CAWR_RR_DIS, cp->regs + REG_CAWR);
3002
 
3003#if !defined(CONFIG_SPARC64) && !defined(CONFIG_ALPHA)
3004	/* set the infinite burst register for chips that don't have
3005	 * pci issues.
3006	 */
3007	if ((cp->cas_flags & CAS_FLAG_TARGET_ABORT) == 0)
3008		writel(INF_BURST_EN, cp->regs + REG_INF_BURST);
3009#endif
3010
3011	writel(0x1BF0, cp->regs + REG_MAC_SEND_PAUSE);
3012
3013	writel(0x00, cp->regs + REG_MAC_IPG0);
3014	writel(0x08, cp->regs + REG_MAC_IPG1);
3015	writel(0x04, cp->regs + REG_MAC_IPG2);
3016
3017	/* change later for 802.3z */
3018	writel(0x40, cp->regs + REG_MAC_SLOT_TIME);
3019
3020	/* min frame + FCS */
3021	writel(ETH_ZLEN + 4, cp->regs + REG_MAC_FRAMESIZE_MIN);
3022
3023	/* Ethernet payload + header + FCS + optional VLAN tag. NOTE: we
3024	 * specify the maximum frame size to prevent RX tag errors on
3025	 * oversized frames.
3026	 */
3027	writel(CAS_BASE(MAC_FRAMESIZE_MAX_BURST, 0x2000) |
3028	       CAS_BASE(MAC_FRAMESIZE_MAX_FRAME,
3029			(CAS_MAX_MTU + ETH_HLEN + 4 + 4)),
3030	       cp->regs + REG_MAC_FRAMESIZE_MAX);
3031
3032	/* NOTE: crc_size is used as a surrogate for half-duplex.
3033	 * workaround saturn half-duplex issue by increasing preamble
3034	 * size to 65 bytes.
3035	 */
3036	if ((cp->cas_flags & CAS_FLAG_SATURN) && cp->crc_size)
3037		writel(0x41, cp->regs + REG_MAC_PA_SIZE);
3038	else
3039		writel(0x07, cp->regs + REG_MAC_PA_SIZE);
3040	writel(0x04, cp->regs + REG_MAC_JAM_SIZE);
3041	writel(0x10, cp->regs + REG_MAC_ATTEMPT_LIMIT);
3042	writel(0x8808, cp->regs + REG_MAC_CTRL_TYPE);
3043
3044	writel((e[5] | (e[4] << 8)) & 0x3ff, cp->regs + REG_MAC_RANDOM_SEED);
3045
3046	writel(0, cp->regs + REG_MAC_ADDR_FILTER0);
3047	writel(0, cp->regs + REG_MAC_ADDR_FILTER1);
3048	writel(0, cp->regs + REG_MAC_ADDR_FILTER2);
3049	writel(0, cp->regs + REG_MAC_ADDR_FILTER2_1_MASK);
3050	writel(0, cp->regs + REG_MAC_ADDR_FILTER0_MASK);
3051
3052	/* setup mac address in perfect filter array */
3053	for (i = 0; i < 45; i++)
3054		writel(0x0, cp->regs + REG_MAC_ADDRN(i));
3055
3056	writel((e[4] << 8) | e[5], cp->regs + REG_MAC_ADDRN(0));
3057	writel((e[2] << 8) | e[3], cp->regs + REG_MAC_ADDRN(1));
3058	writel((e[0] << 8) | e[1], cp->regs + REG_MAC_ADDRN(2));
3059
3060	writel(0x0001, cp->regs + REG_MAC_ADDRN(42));
3061	writel(0xc200, cp->regs + REG_MAC_ADDRN(43));
3062	writel(0x0180, cp->regs + REG_MAC_ADDRN(44));
3063
3064	cp->mac_rx_cfg = cas_setup_multicast(cp);
3065
3066	spin_lock(&cp->stat_lock[N_TX_RINGS]);
3067	cas_clear_mac_err(cp);
3068	spin_unlock(&cp->stat_lock[N_TX_RINGS]);
3069
3070	/* Setup MAC interrupts.  We want to get all of the interesting
3071	 * counter expiration events, but we do not want to hear about
3072	 * normal rx/tx as the DMA engine tells us that.
3073	 */
3074	writel(MAC_TX_FRAME_XMIT, cp->regs + REG_MAC_TX_MASK);
3075	writel(MAC_RX_FRAME_RECV, cp->regs + REG_MAC_RX_MASK);
3076
3077	/* Don't enable even the PAUSE interrupts for now, we
3078	 * make no use of those events other than to record them.
3079	 */
3080	writel(0xffffffff, cp->regs + REG_MAC_CTRL_MASK);
3081}
3082
3083/* Must be invoked under cp->lock. */
3084static void cas_init_pause_thresholds(struct cas *cp)
3085{
3086	/* Calculate pause thresholds.  Setting the OFF threshold to the
3087	 * full RX fifo size effectively disables PAUSE generation
3088	 */
3089	if (cp->rx_fifo_size <= (2 * 1024)) {
3090		cp->rx_pause_off = cp->rx_pause_on = cp->rx_fifo_size;
3091	} else {
3092		int max_frame = (cp->dev->mtu + ETH_HLEN + 4 + 4 + 64) & ~63;
3093		if (max_frame * 3 > cp->rx_fifo_size) {
3094			cp->rx_pause_off = 7104;
3095			cp->rx_pause_on  = 960;
3096		} else {
3097			int off = (cp->rx_fifo_size - (max_frame * 2));
3098			int on = off - max_frame;
3099			cp->rx_pause_off = off;
3100			cp->rx_pause_on = on;
3101		}
3102	}
3103}
3104
3105static int cas_vpd_match(const void __iomem *p, const char *str)
3106{
3107	int len = strlen(str) + 1;
3108	int i;
3109
3110	for (i = 0; i < len; i++) {
3111		if (readb(p + i) != str[i])
3112			return 0;
3113	}
3114	return 1;
3115}
3116
3117
3118/* get the mac address by reading the vpd information in the rom.
3119 * also get the phy type and determine if there's an entropy generator.
3120 * NOTE: this is a bit convoluted for the following reasons:
3121 *  1) vpd info has order-dependent mac addresses for multinic cards
3122 *  2) the only way to determine the nic order is to use the slot
3123 *     number.
3124 *  3) fiber cards don't have bridges, so their slot numbers don't
3125 *     mean anything.
3126 *  4) we don't actually know we have a fiber card until after
3127 *     the mac addresses are parsed.
3128 */
3129static int cas_get_vpd_info(struct cas *cp, unsigned char *dev_addr,
3130			    const int offset)
3131{
3132	void __iomem *p = cp->regs + REG_EXPANSION_ROM_RUN_START;
3133	void __iomem *base, *kstart;
3134	int i, len;
3135	int found = 0;
3136#define VPD_FOUND_MAC        0x01
3137#define VPD_FOUND_PHY        0x02
3138
3139	int phy_type = CAS_PHY_MII_MDIO0; /* default phy type */
3140	int mac_off  = 0;
3141
3142#if defined(CONFIG_SPARC)
3143	const unsigned char *addr;
3144#endif
3145
3146	/* give us access to the PROM */
3147	writel(BIM_LOCAL_DEV_PROM | BIM_LOCAL_DEV_PAD,
3148	       cp->regs + REG_BIM_LOCAL_DEV_EN);
3149
3150	/* check for an expansion rom */
3151	if (readb(p) != 0x55 || readb(p + 1) != 0xaa)
3152		goto use_random_mac_addr;
3153
3154	/* search for beginning of vpd */
3155	base = NULL;
3156	for (i = 2; i < EXPANSION_ROM_SIZE; i++) {
3157		/* check for PCIR */
3158		if ((readb(p + i + 0) == 0x50) &&
3159		    (readb(p + i + 1) == 0x43) &&
3160		    (readb(p + i + 2) == 0x49) &&
3161		    (readb(p + i + 3) == 0x52)) {
3162			base = p + (readb(p + i + 8) |
3163				    (readb(p + i + 9) << 8));
3164			break;
3165		}
3166	}
3167
3168	if (!base || (readb(base) != 0x82))
3169		goto use_random_mac_addr;
3170
3171	i = (readb(base + 1) | (readb(base + 2) << 8)) + 3;
3172	while (i < EXPANSION_ROM_SIZE) {
3173		if (readb(base + i) != 0x90) /* no vpd found */
3174			goto use_random_mac_addr;
3175
3176		/* found a vpd field */
3177		len = readb(base + i + 1) | (readb(base + i + 2) << 8);
3178
3179		/* extract keywords */
3180		kstart = base + i + 3;
3181		p = kstart;
3182		while ((p - kstart) < len) {
3183			int klen = readb(p + 2);
3184			int j;
3185			char type;
3186
3187			p += 3;
3188
3189			/* look for the following things:
3190			 * -- correct length == 29
3191			 * 3 (type) + 2 (size) +
3192			 * 18 (strlen("local-mac-address") + 1) +
3193			 * 6 (mac addr)
3194			 * -- VPD Instance 'I'
3195			 * -- VPD Type Bytes 'B'
3196			 * -- VPD data length == 6
3197			 * -- property string == local-mac-address
3198			 *
3199			 * -- correct length == 24
3200			 * 3 (type) + 2 (size) +
3201			 * 12 (strlen("entropy-dev") + 1) +
3202			 * 7 (strlen("vms110") + 1)
3203			 * -- VPD Instance 'I'
3204			 * -- VPD Type String 'B'
3205			 * -- VPD data length == 7
3206			 * -- property string == entropy-dev
3207			 *
3208			 * -- correct length == 18
3209			 * 3 (type) + 2 (size) +
3210			 * 9 (strlen("phy-type") + 1) +
3211			 * 4 (strlen("pcs") + 1)
3212			 * -- VPD Instance 'I'
3213			 * -- VPD Type String 'S'
3214			 * -- VPD data length == 4
3215			 * -- property string == phy-type
3216			 *
3217			 * -- correct length == 23
3218			 * 3 (type) + 2 (size) +
3219			 * 14 (strlen("phy-interface") + 1) +
3220			 * 4 (strlen("pcs") + 1)
3221			 * -- VPD Instance 'I'
3222			 * -- VPD Type String 'S'
3223			 * -- VPD data length == 4
3224			 * -- property string == phy-interface
3225			 */
3226			if (readb(p) != 'I')
3227				goto next;
3228
3229			/* finally, check string and length */
3230			type = readb(p + 3);
3231			if (type == 'B') {
3232				if ((klen == 29) && readb(p + 4) == 6 &&
3233				    cas_vpd_match(p + 5,
3234						  "local-mac-address")) {
3235					if (mac_off++ > offset)
3236						goto next;
3237
3238					/* set mac address */
3239					for (j = 0; j < 6; j++)
3240						dev_addr[j] =
3241							readb(p + 23 + j);
3242					goto found_mac;
3243				}
3244			}
3245
3246			if (type != 'S')
3247				goto next;
3248
3249#ifdef USE_ENTROPY_DEV
3250			if ((klen == 24) &&
3251			    cas_vpd_match(p + 5, "entropy-dev") &&
3252			    cas_vpd_match(p + 17, "vms110")) {
3253				cp->cas_flags |= CAS_FLAG_ENTROPY_DEV;
3254				goto next;
3255			}
3256#endif
3257
3258			if (found & VPD_FOUND_PHY)
3259				goto next;
3260
3261			if ((klen == 18) && readb(p + 4) == 4 &&
3262			    cas_vpd_match(p + 5, "phy-type")) {
3263				if (cas_vpd_match(p + 14, "pcs")) {
3264					phy_type = CAS_PHY_SERDES;
3265					goto found_phy;
3266				}
3267			}
3268
3269			if ((klen == 23) && readb(p + 4) == 4 &&
3270			    cas_vpd_match(p + 5, "phy-interface")) {
3271				if (cas_vpd_match(p + 19, "pcs")) {
3272					phy_type = CAS_PHY_SERDES;
3273					goto found_phy;
3274				}
3275			}
3276found_mac:
3277			found |= VPD_FOUND_MAC;
3278			goto next;
3279
3280found_phy:
3281			found |= VPD_FOUND_PHY;
3282
3283next:
3284			p += klen;
3285		}
3286		i += len + 3;
3287	}
3288
3289use_random_mac_addr:
3290	if (found & VPD_FOUND_MAC)
3291		goto done;
3292
3293#if defined(CONFIG_SPARC)
3294	addr = of_get_property(cp->of_node, "local-mac-address", NULL);
3295	if (addr != NULL) {
3296		memcpy(dev_addr, addr, ETH_ALEN);
3297		goto done;
3298	}
3299#endif
3300
3301	/* Sun MAC prefix then 3 random bytes. */
3302	pr_info("MAC address not found in ROM VPD\n");
3303	dev_addr[0] = 0x08;
3304	dev_addr[1] = 0x00;
3305	dev_addr[2] = 0x20;
3306	get_random_bytes(dev_addr + 3, 3);
3307
3308done:
3309	writel(0, cp->regs + REG_BIM_LOCAL_DEV_EN);
3310	return phy_type;
3311}
3312
3313/* check pci invariants */
3314static void cas_check_pci_invariants(struct cas *cp)
3315{
3316	struct pci_dev *pdev = cp->pdev;
3317
3318	cp->cas_flags = 0;
3319	if ((pdev->vendor == PCI_VENDOR_ID_SUN) &&
3320	    (pdev->device == PCI_DEVICE_ID_SUN_CASSINI)) {
3321		if (pdev->revision >= CAS_ID_REVPLUS)
3322			cp->cas_flags |= CAS_FLAG_REG_PLUS;
3323		if (pdev->revision < CAS_ID_REVPLUS02u)
3324			cp->cas_flags |= CAS_FLAG_TARGET_ABORT;
3325
3326		/* Original Cassini supports HW CSUM, but it's not
3327		 * enabled by default as it can trigger TX hangs.
3328		 */
3329		if (pdev->revision < CAS_ID_REV2)
3330			cp->cas_flags |= CAS_FLAG_NO_HW_CSUM;
3331	} else {
3332		/* Only sun has original cassini chips.  */
3333		cp->cas_flags |= CAS_FLAG_REG_PLUS;
3334
3335		/* We use a flag because the same phy might be externally
3336		 * connected.
3337		 */
3338		if ((pdev->vendor == PCI_VENDOR_ID_NS) &&
3339		    (pdev->device == PCI_DEVICE_ID_NS_SATURN))
3340			cp->cas_flags |= CAS_FLAG_SATURN;
3341	}
3342}
3343
3344
3345static int cas_check_invariants(struct cas *cp)
3346{
3347	struct pci_dev *pdev = cp->pdev;
3348	u8 addr[ETH_ALEN];
3349	u32 cfg;
3350	int i;
3351
3352	/* get page size for rx buffers. */
3353	cp->page_order = 0;
3354#ifdef USE_PAGE_ORDER
3355	if (PAGE_SHIFT < CAS_JUMBO_PAGE_SHIFT) {
3356		/* see if we can allocate larger pages */
3357		struct page *page = alloc_pages(GFP_ATOMIC,
3358						CAS_JUMBO_PAGE_SHIFT -
3359						PAGE_SHIFT);
3360		if (page) {
3361			__free_pages(page, CAS_JUMBO_PAGE_SHIFT - PAGE_SHIFT);
3362			cp->page_order = CAS_JUMBO_PAGE_SHIFT - PAGE_SHIFT;
3363		} else {
3364			printk("MTU limited to %d bytes\n", CAS_MAX_MTU);
3365		}
3366	}
3367#endif
3368	cp->page_size = (PAGE_SIZE << cp->page_order);
3369
3370	/* Fetch the FIFO configurations. */
3371	cp->tx_fifo_size = readl(cp->regs + REG_TX_FIFO_SIZE) * 64;
3372	cp->rx_fifo_size = RX_FIFO_SIZE;
3373
3374	/* finish phy determination. MDIO1 takes precedence over MDIO0 if
3375	 * they're both connected.
3376	 */
3377	cp->phy_type = cas_get_vpd_info(cp, addr, PCI_SLOT(pdev->devfn));
3378	eth_hw_addr_set(cp->dev, addr);
3379	if (cp->phy_type & CAS_PHY_SERDES) {
3380		cp->cas_flags |= CAS_FLAG_1000MB_CAP;
3381		return 0; /* no more checking needed */
3382	}
3383
3384	/* MII */
3385	cfg = readl(cp->regs + REG_MIF_CFG);
3386	if (cfg & MIF_CFG_MDIO_1) {
3387		cp->phy_type = CAS_PHY_MII_MDIO1;
3388	} else if (cfg & MIF_CFG_MDIO_0) {
3389		cp->phy_type = CAS_PHY_MII_MDIO0;
3390	}
3391
3392	cas_mif_poll(cp, 0);
3393	writel(PCS_DATAPATH_MODE_MII, cp->regs + REG_PCS_DATAPATH_MODE);
3394
3395	for (i = 0; i < 32; i++) {
3396		u32 phy_id;
3397		int j;
3398
3399		for (j = 0; j < 3; j++) {
3400			cp->phy_addr = i;
3401			phy_id = cas_phy_read(cp, MII_PHYSID1) << 16;
3402			phy_id |= cas_phy_read(cp, MII_PHYSID2);
3403			if (phy_id && (phy_id != 0xFFFFFFFF)) {
3404				cp->phy_id = phy_id;
3405				goto done;
3406			}
3407		}
3408	}
3409	pr_err("MII phy did not respond [%08x]\n",
3410	       readl(cp->regs + REG_MIF_STATE_MACHINE));
3411	return -1;
3412
3413done:
3414	/* see if we can do gigabit */
3415	cfg = cas_phy_read(cp, MII_BMSR);
3416	if ((cfg & CAS_BMSR_1000_EXTEND) &&
3417	    cas_phy_read(cp, CAS_MII_1000_EXTEND))
3418		cp->cas_flags |= CAS_FLAG_1000MB_CAP;
3419	return 0;
3420}
3421
3422/* Must be invoked under cp->lock. */
3423static inline void cas_start_dma(struct cas *cp)
3424{
3425	int i;
3426	u32 val;
3427	int txfailed = 0;
3428
3429	/* enable dma */
3430	val = readl(cp->regs + REG_TX_CFG) | TX_CFG_DMA_EN;
3431	writel(val, cp->regs + REG_TX_CFG);
3432	val = readl(cp->regs + REG_RX_CFG) | RX_CFG_DMA_EN;
3433	writel(val, cp->regs + REG_RX_CFG);
3434
3435	/* enable the mac */
3436	val = readl(cp->regs + REG_MAC_TX_CFG) | MAC_TX_CFG_EN;
3437	writel(val, cp->regs + REG_MAC_TX_CFG);
3438	val = readl(cp->regs + REG_MAC_RX_CFG) | MAC_RX_CFG_EN;
3439	writel(val, cp->regs + REG_MAC_RX_CFG);
3440
3441	i = STOP_TRIES;
3442	while (i-- > 0) {
3443		val = readl(cp->regs + REG_MAC_TX_CFG);
3444		if ((val & MAC_TX_CFG_EN))
3445			break;
3446		udelay(10);
3447	}
3448	if (i < 0) txfailed = 1;
3449	i = STOP_TRIES;
3450	while (i-- > 0) {
3451		val = readl(cp->regs + REG_MAC_RX_CFG);
3452		if ((val & MAC_RX_CFG_EN)) {
3453			if (txfailed) {
3454				netdev_err(cp->dev,
3455					   "enabling mac failed [tx:%08x:%08x]\n",
3456					   readl(cp->regs + REG_MIF_STATE_MACHINE),
3457					   readl(cp->regs + REG_MAC_STATE_MACHINE));
3458			}
3459			goto enable_rx_done;
3460		}
3461		udelay(10);
3462	}
3463	netdev_err(cp->dev, "enabling mac failed [%s:%08x:%08x]\n",
3464		   (txfailed ? "tx,rx" : "rx"),
3465		   readl(cp->regs + REG_MIF_STATE_MACHINE),
3466		   readl(cp->regs + REG_MAC_STATE_MACHINE));
3467
3468enable_rx_done:
3469	cas_unmask_intr(cp); /* enable interrupts */
3470	writel(RX_DESC_RINGN_SIZE(0) - 4, cp->regs + REG_RX_KICK);
3471	writel(0, cp->regs + REG_RX_COMP_TAIL);
3472
3473	if (cp->cas_flags & CAS_FLAG_REG_PLUS) {
3474		if (N_RX_DESC_RINGS > 1)
3475			writel(RX_DESC_RINGN_SIZE(1) - 4,
3476			       cp->regs + REG_PLUS_RX_KICK1);
 
 
 
3477	}
3478}
3479
3480/* Must be invoked under cp->lock. */
3481static void cas_read_pcs_link_mode(struct cas *cp, int *fd, int *spd,
3482				   int *pause)
3483{
3484	u32 val = readl(cp->regs + REG_PCS_MII_LPA);
3485	*fd     = (val & PCS_MII_LPA_FD) ? 1 : 0;
3486	*pause  = (val & PCS_MII_LPA_SYM_PAUSE) ? 0x01 : 0x00;
3487	if (val & PCS_MII_LPA_ASYM_PAUSE)
3488		*pause |= 0x10;
3489	*spd = 1000;
3490}
3491
3492/* Must be invoked under cp->lock. */
3493static void cas_read_mii_link_mode(struct cas *cp, int *fd, int *spd,
3494				   int *pause)
3495{
3496	u32 val;
3497
3498	*fd = 0;
3499	*spd = 10;
3500	*pause = 0;
3501
3502	/* use GMII registers */
3503	val = cas_phy_read(cp, MII_LPA);
3504	if (val & CAS_LPA_PAUSE)
3505		*pause = 0x01;
3506
3507	if (val & CAS_LPA_ASYM_PAUSE)
3508		*pause |= 0x10;
3509
3510	if (val & LPA_DUPLEX)
3511		*fd = 1;
3512	if (val & LPA_100)
3513		*spd = 100;
3514
3515	if (cp->cas_flags & CAS_FLAG_1000MB_CAP) {
3516		val = cas_phy_read(cp, CAS_MII_1000_STATUS);
3517		if (val & (CAS_LPA_1000FULL | CAS_LPA_1000HALF))
3518			*spd = 1000;
3519		if (val & CAS_LPA_1000FULL)
3520			*fd = 1;
3521	}
3522}
3523
3524/* A link-up condition has occurred, initialize and enable the
3525 * rest of the chip.
3526 *
3527 * Must be invoked under cp->lock.
3528 */
3529static void cas_set_link_modes(struct cas *cp)
3530{
3531	u32 val;
3532	int full_duplex, speed, pause;
3533
3534	full_duplex = 0;
3535	speed = 10;
3536	pause = 0;
3537
3538	if (CAS_PHY_MII(cp->phy_type)) {
3539		cas_mif_poll(cp, 0);
3540		val = cas_phy_read(cp, MII_BMCR);
3541		if (val & BMCR_ANENABLE) {
3542			cas_read_mii_link_mode(cp, &full_duplex, &speed,
3543					       &pause);
3544		} else {
3545			if (val & BMCR_FULLDPLX)
3546				full_duplex = 1;
3547
3548			if (val & BMCR_SPEED100)
3549				speed = 100;
3550			else if (val & CAS_BMCR_SPEED1000)
3551				speed = (cp->cas_flags & CAS_FLAG_1000MB_CAP) ?
3552					1000 : 100;
3553		}
3554		cas_mif_poll(cp, 1);
3555
3556	} else {
3557		val = readl(cp->regs + REG_PCS_MII_CTRL);
3558		cas_read_pcs_link_mode(cp, &full_duplex, &speed, &pause);
3559		if ((val & PCS_MII_AUTONEG_EN) == 0) {
3560			if (val & PCS_MII_CTRL_DUPLEX)
3561				full_duplex = 1;
3562		}
3563	}
3564
3565	netif_info(cp, link, cp->dev, "Link up at %d Mbps, %s-duplex\n",
3566		   speed, full_duplex ? "full" : "half");
3567
3568	val = MAC_XIF_TX_MII_OUTPUT_EN | MAC_XIF_LINK_LED;
3569	if (CAS_PHY_MII(cp->phy_type)) {
3570		val |= MAC_XIF_MII_BUFFER_OUTPUT_EN;
3571		if (!full_duplex)
3572			val |= MAC_XIF_DISABLE_ECHO;
3573	}
3574	if (full_duplex)
3575		val |= MAC_XIF_FDPLX_LED;
3576	if (speed == 1000)
3577		val |= MAC_XIF_GMII_MODE;
3578	writel(val, cp->regs + REG_MAC_XIF_CFG);
3579
3580	/* deal with carrier and collision detect. */
3581	val = MAC_TX_CFG_IPG_EN;
3582	if (full_duplex) {
3583		val |= MAC_TX_CFG_IGNORE_CARRIER;
3584		val |= MAC_TX_CFG_IGNORE_COLL;
3585	} else {
3586#ifndef USE_CSMA_CD_PROTO
3587		val |= MAC_TX_CFG_NEVER_GIVE_UP_EN;
3588		val |= MAC_TX_CFG_NEVER_GIVE_UP_LIM;
3589#endif
3590	}
3591	/* val now set up for REG_MAC_TX_CFG */
3592
3593	/* If gigabit and half-duplex, enable carrier extension
3594	 * mode.  increase slot time to 512 bytes as well.
3595	 * else, disable it and make sure slot time is 64 bytes.
3596	 * also activate checksum bug workaround
3597	 */
3598	if ((speed == 1000) && !full_duplex) {
3599		writel(val | MAC_TX_CFG_CARRIER_EXTEND,
3600		       cp->regs + REG_MAC_TX_CFG);
3601
3602		val = readl(cp->regs + REG_MAC_RX_CFG);
3603		val &= ~MAC_RX_CFG_STRIP_FCS; /* checksum workaround */
3604		writel(val | MAC_RX_CFG_CARRIER_EXTEND,
3605		       cp->regs + REG_MAC_RX_CFG);
3606
3607		writel(0x200, cp->regs + REG_MAC_SLOT_TIME);
3608
3609		cp->crc_size = 4;
3610		/* minimum size gigabit frame at half duplex */
3611		cp->min_frame_size = CAS_1000MB_MIN_FRAME;
3612
3613	} else {
3614		writel(val, cp->regs + REG_MAC_TX_CFG);
3615
3616		/* checksum bug workaround. don't strip FCS when in
3617		 * half-duplex mode
3618		 */
3619		val = readl(cp->regs + REG_MAC_RX_CFG);
3620		if (full_duplex) {
3621			val |= MAC_RX_CFG_STRIP_FCS;
3622			cp->crc_size = 0;
3623			cp->min_frame_size = CAS_MIN_MTU;
3624		} else {
3625			val &= ~MAC_RX_CFG_STRIP_FCS;
3626			cp->crc_size = 4;
3627			cp->min_frame_size = CAS_MIN_FRAME;
3628		}
3629		writel(val & ~MAC_RX_CFG_CARRIER_EXTEND,
3630		       cp->regs + REG_MAC_RX_CFG);
3631		writel(0x40, cp->regs + REG_MAC_SLOT_TIME);
3632	}
3633
3634	if (netif_msg_link(cp)) {
3635		if (pause & 0x01) {
3636			netdev_info(cp->dev, "Pause is enabled (rxfifo: %d off: %d on: %d)\n",
3637				    cp->rx_fifo_size,
3638				    cp->rx_pause_off,
3639				    cp->rx_pause_on);
3640		} else if (pause & 0x10) {
3641			netdev_info(cp->dev, "TX pause enabled\n");
3642		} else {
3643			netdev_info(cp->dev, "Pause is disabled\n");
3644		}
3645	}
3646
3647	val = readl(cp->regs + REG_MAC_CTRL_CFG);
3648	val &= ~(MAC_CTRL_CFG_SEND_PAUSE_EN | MAC_CTRL_CFG_RECV_PAUSE_EN);
3649	if (pause) { /* symmetric or asymmetric pause */
3650		val |= MAC_CTRL_CFG_SEND_PAUSE_EN;
3651		if (pause & 0x01) { /* symmetric pause */
3652			val |= MAC_CTRL_CFG_RECV_PAUSE_EN;
3653		}
3654	}
3655	writel(val, cp->regs + REG_MAC_CTRL_CFG);
3656	cas_start_dma(cp);
3657}
3658
3659/* Must be invoked under cp->lock. */
3660static void cas_init_hw(struct cas *cp, int restart_link)
3661{
3662	if (restart_link)
3663		cas_phy_init(cp);
3664
3665	cas_init_pause_thresholds(cp);
3666	cas_init_mac(cp);
3667	cas_init_dma(cp);
3668
3669	if (restart_link) {
3670		/* Default aneg parameters */
3671		cp->timer_ticks = 0;
3672		cas_begin_auto_negotiation(cp, NULL);
3673	} else if (cp->lstate == link_up) {
3674		cas_set_link_modes(cp);
3675		netif_carrier_on(cp->dev);
3676	}
3677}
3678
3679/* Must be invoked under cp->lock. on earlier cassini boards,
3680 * SOFT_0 is tied to PCI reset. we use this to force a pci reset,
3681 * let it settle out, and then restore pci state.
3682 */
3683static void cas_hard_reset(struct cas *cp)
3684{
3685	writel(BIM_LOCAL_DEV_SOFT_0, cp->regs + REG_BIM_LOCAL_DEV_EN);
3686	udelay(20);
3687	pci_restore_state(cp->pdev);
3688}
3689
3690
3691static void cas_global_reset(struct cas *cp, int blkflag)
3692{
3693	int limit;
3694
3695	/* issue a global reset. don't use RSTOUT. */
3696	if (blkflag && !CAS_PHY_MII(cp->phy_type)) {
3697		/* For PCS, when the blkflag is set, we should set the
3698		 * SW_REST_BLOCK_PCS_SLINK bit to prevent the results of
3699		 * the last autonegotiation from being cleared.  We'll
3700		 * need some special handling if the chip is set into a
3701		 * loopback mode.
3702		 */
3703		writel((SW_RESET_TX | SW_RESET_RX | SW_RESET_BLOCK_PCS_SLINK),
3704		       cp->regs + REG_SW_RESET);
3705	} else {
3706		writel(SW_RESET_TX | SW_RESET_RX, cp->regs + REG_SW_RESET);
3707	}
3708
3709	/* need to wait at least 3ms before polling register */
3710	mdelay(3);
3711
3712	limit = STOP_TRIES;
3713	while (limit-- > 0) {
3714		u32 val = readl(cp->regs + REG_SW_RESET);
3715		if ((val & (SW_RESET_TX | SW_RESET_RX)) == 0)
3716			goto done;
3717		udelay(10);
3718	}
3719	netdev_err(cp->dev, "sw reset failed\n");
3720
3721done:
3722	/* enable various BIM interrupts */
3723	writel(BIM_CFG_DPAR_INTR_ENABLE | BIM_CFG_RMA_INTR_ENABLE |
3724	       BIM_CFG_RTA_INTR_ENABLE, cp->regs + REG_BIM_CFG);
3725
3726	/* clear out pci error status mask for handled errors.
3727	 * we don't deal with DMA counter overflows as they happen
3728	 * all the time.
3729	 */
3730	writel(0xFFFFFFFFU & ~(PCI_ERR_BADACK | PCI_ERR_DTRTO |
3731			       PCI_ERR_OTHER | PCI_ERR_BIM_DMA_WRITE |
3732			       PCI_ERR_BIM_DMA_READ), cp->regs +
3733	       REG_PCI_ERR_STATUS_MASK);
3734
3735	/* set up for MII by default to address mac rx reset timeout
3736	 * issue
3737	 */
3738	writel(PCS_DATAPATH_MODE_MII, cp->regs + REG_PCS_DATAPATH_MODE);
3739}
3740
3741static void cas_reset(struct cas *cp, int blkflag)
3742{
3743	u32 val;
3744
3745	cas_mask_intr(cp);
3746	cas_global_reset(cp, blkflag);
3747	cas_mac_reset(cp);
3748	cas_entropy_reset(cp);
3749
3750	/* disable dma engines. */
3751	val = readl(cp->regs + REG_TX_CFG);
3752	val &= ~TX_CFG_DMA_EN;
3753	writel(val, cp->regs + REG_TX_CFG);
3754
3755	val = readl(cp->regs + REG_RX_CFG);
3756	val &= ~RX_CFG_DMA_EN;
3757	writel(val, cp->regs + REG_RX_CFG);
3758
3759	/* program header parser */
3760	if ((cp->cas_flags & CAS_FLAG_TARGET_ABORT) ||
3761	    (&CAS_HP_ALT_FIRMWARE[0] == &cas_prog_null[0])) {
3762		cas_load_firmware(cp, CAS_HP_FIRMWARE);
3763	} else {
3764		cas_load_firmware(cp, CAS_HP_ALT_FIRMWARE);
3765	}
3766
3767	/* clear out error registers */
3768	spin_lock(&cp->stat_lock[N_TX_RINGS]);
3769	cas_clear_mac_err(cp);
3770	spin_unlock(&cp->stat_lock[N_TX_RINGS]);
3771}
3772
3773/* Shut down the chip, must be called with pm_mutex held.  */
3774static void cas_shutdown(struct cas *cp)
3775{
3776	unsigned long flags;
3777
3778	/* Make us not-running to avoid timers respawning */
3779	cp->hw_running = 0;
3780
3781	del_timer_sync(&cp->link_timer);
3782
3783	/* Stop the reset task */
3784#if 0
3785	while (atomic_read(&cp->reset_task_pending_mtu) ||
3786	       atomic_read(&cp->reset_task_pending_spare) ||
3787	       atomic_read(&cp->reset_task_pending_all))
3788		schedule();
3789
3790#else
3791	while (atomic_read(&cp->reset_task_pending))
3792		schedule();
3793#endif
3794	/* Actually stop the chip */
3795	cas_lock_all_save(cp, flags);
3796	cas_reset(cp, 0);
3797	if (cp->cas_flags & CAS_FLAG_SATURN)
3798		cas_phy_powerdown(cp);
3799	cas_unlock_all_restore(cp, flags);
3800}
3801
3802static int cas_change_mtu(struct net_device *dev, int new_mtu)
3803{
3804	struct cas *cp = netdev_priv(dev);
3805
 
 
 
3806	dev->mtu = new_mtu;
3807	if (!netif_running(dev) || !netif_device_present(dev))
3808		return 0;
3809
3810	/* let the reset task handle it */
3811#if 1
3812	atomic_inc(&cp->reset_task_pending);
3813	if ((cp->phy_type & CAS_PHY_SERDES)) {
3814		atomic_inc(&cp->reset_task_pending_all);
3815	} else {
3816		atomic_inc(&cp->reset_task_pending_mtu);
3817	}
3818	schedule_work(&cp->reset_task);
3819#else
3820	atomic_set(&cp->reset_task_pending, (cp->phy_type & CAS_PHY_SERDES) ?
3821		   CAS_RESET_ALL : CAS_RESET_MTU);
3822	pr_err("reset called in cas_change_mtu\n");
3823	schedule_work(&cp->reset_task);
3824#endif
3825
3826	flush_work(&cp->reset_task);
3827	return 0;
3828}
3829
3830static void cas_clean_txd(struct cas *cp, int ring)
3831{
3832	struct cas_tx_desc *txd = cp->init_txds[ring];
3833	struct sk_buff *skb, **skbs = cp->tx_skbs[ring];
3834	u64 daddr, dlen;
3835	int i, size;
3836
3837	size = TX_DESC_RINGN_SIZE(ring);
3838	for (i = 0; i < size; i++) {
3839		int frag;
3840
3841		if (skbs[i] == NULL)
3842			continue;
3843
3844		skb = skbs[i];
3845		skbs[i] = NULL;
3846
3847		for (frag = 0; frag <= skb_shinfo(skb)->nr_frags;  frag++) {
3848			int ent = i & (size - 1);
3849
3850			/* first buffer is never a tiny buffer and so
3851			 * needs to be unmapped.
3852			 */
3853			daddr = le64_to_cpu(txd[ent].buffer);
3854			dlen  =  CAS_VAL(TX_DESC_BUFLEN,
3855					 le64_to_cpu(txd[ent].control));
3856			dma_unmap_page(&cp->pdev->dev, daddr, dlen,
3857				       DMA_TO_DEVICE);
3858
3859			if (frag != skb_shinfo(skb)->nr_frags) {
3860				i++;
3861
3862				/* next buffer might by a tiny buffer.
3863				 * skip past it.
3864				 */
3865				ent = i & (size - 1);
3866				if (cp->tx_tiny_use[ring][ent].used)
3867					i++;
3868			}
3869		}
3870		dev_kfree_skb_any(skb);
3871	}
3872
3873	/* zero out tiny buf usage */
3874	memset(cp->tx_tiny_use[ring], 0, size*sizeof(*cp->tx_tiny_use[ring]));
3875}
3876
3877/* freed on close */
3878static inline void cas_free_rx_desc(struct cas *cp, int ring)
3879{
3880	cas_page_t **page = cp->rx_pages[ring];
3881	int i, size;
3882
3883	size = RX_DESC_RINGN_SIZE(ring);
3884	for (i = 0; i < size; i++) {
3885		if (page[i]) {
3886			cas_page_free(cp, page[i]);
3887			page[i] = NULL;
3888		}
3889	}
3890}
3891
3892static void cas_free_rxds(struct cas *cp)
3893{
3894	int i;
3895
3896	for (i = 0; i < N_RX_DESC_RINGS; i++)
3897		cas_free_rx_desc(cp, i);
3898}
3899
3900/* Must be invoked under cp->lock. */
3901static void cas_clean_rings(struct cas *cp)
3902{
3903	int i;
3904
3905	/* need to clean all tx rings */
3906	memset(cp->tx_old, 0, sizeof(*cp->tx_old)*N_TX_RINGS);
3907	memset(cp->tx_new, 0, sizeof(*cp->tx_new)*N_TX_RINGS);
3908	for (i = 0; i < N_TX_RINGS; i++)
3909		cas_clean_txd(cp, i);
3910
3911	/* zero out init block */
3912	memset(cp->init_block, 0, sizeof(struct cas_init_block));
3913	cas_clean_rxds(cp);
3914	cas_clean_rxcs(cp);
3915}
3916
3917/* allocated on open */
3918static inline int cas_alloc_rx_desc(struct cas *cp, int ring)
3919{
3920	cas_page_t **page = cp->rx_pages[ring];
3921	int size, i = 0;
3922
3923	size = RX_DESC_RINGN_SIZE(ring);
3924	for (i = 0; i < size; i++) {
3925		if ((page[i] = cas_page_alloc(cp, GFP_KERNEL)) == NULL)
3926			return -1;
3927	}
3928	return 0;
3929}
3930
3931static int cas_alloc_rxds(struct cas *cp)
3932{
3933	int i;
3934
3935	for (i = 0; i < N_RX_DESC_RINGS; i++) {
3936		if (cas_alloc_rx_desc(cp, i) < 0) {
3937			cas_free_rxds(cp);
3938			return -1;
3939		}
3940	}
3941	return 0;
3942}
3943
3944static void cas_reset_task(struct work_struct *work)
3945{
3946	struct cas *cp = container_of(work, struct cas, reset_task);
3947#if 0
3948	int pending = atomic_read(&cp->reset_task_pending);
3949#else
3950	int pending_all = atomic_read(&cp->reset_task_pending_all);
3951	int pending_spare = atomic_read(&cp->reset_task_pending_spare);
3952	int pending_mtu = atomic_read(&cp->reset_task_pending_mtu);
3953
3954	if (pending_all == 0 && pending_spare == 0 && pending_mtu == 0) {
3955		/* We can have more tasks scheduled than actually
3956		 * needed.
3957		 */
3958		atomic_dec(&cp->reset_task_pending);
3959		return;
3960	}
3961#endif
3962	/* The link went down, we reset the ring, but keep
3963	 * DMA stopped. Use this function for reset
3964	 * on error as well.
3965	 */
3966	if (cp->hw_running) {
3967		unsigned long flags;
3968
3969		/* Make sure we don't get interrupts or tx packets */
3970		netif_device_detach(cp->dev);
3971		cas_lock_all_save(cp, flags);
3972
3973		if (cp->opened) {
3974			/* We call cas_spare_recover when we call cas_open.
3975			 * but we do not initialize the lists cas_spare_recover
3976			 * uses until cas_open is called.
3977			 */
3978			cas_spare_recover(cp, GFP_ATOMIC);
3979		}
3980#if 1
3981		/* test => only pending_spare set */
3982		if (!pending_all && !pending_mtu)
3983			goto done;
3984#else
3985		if (pending == CAS_RESET_SPARE)
3986			goto done;
3987#endif
3988		/* when pending == CAS_RESET_ALL, the following
3989		 * call to cas_init_hw will restart auto negotiation.
3990		 * Setting the second argument of cas_reset to
3991		 * !(pending == CAS_RESET_ALL) will set this argument
3992		 * to 1 (avoiding reinitializing the PHY for the normal
3993		 * PCS case) when auto negotiation is not restarted.
3994		 */
3995#if 1
3996		cas_reset(cp, !(pending_all > 0));
3997		if (cp->opened)
3998			cas_clean_rings(cp);
3999		cas_init_hw(cp, (pending_all > 0));
4000#else
4001		cas_reset(cp, !(pending == CAS_RESET_ALL));
4002		if (cp->opened)
4003			cas_clean_rings(cp);
4004		cas_init_hw(cp, pending == CAS_RESET_ALL);
4005#endif
4006
4007done:
4008		cas_unlock_all_restore(cp, flags);
4009		netif_device_attach(cp->dev);
4010	}
4011#if 1
4012	atomic_sub(pending_all, &cp->reset_task_pending_all);
4013	atomic_sub(pending_spare, &cp->reset_task_pending_spare);
4014	atomic_sub(pending_mtu, &cp->reset_task_pending_mtu);
4015	atomic_dec(&cp->reset_task_pending);
4016#else
4017	atomic_set(&cp->reset_task_pending, 0);
4018#endif
4019}
4020
4021static void cas_link_timer(struct timer_list *t)
4022{
4023	struct cas *cp = from_timer(cp, t, link_timer);
4024	int mask, pending = 0, reset = 0;
4025	unsigned long flags;
4026
4027	if (link_transition_timeout != 0 &&
4028	    cp->link_transition_jiffies_valid &&
4029	    time_is_before_jiffies(cp->link_transition_jiffies +
4030	      link_transition_timeout)) {
4031		/* One-second counter so link-down workaround doesn't
4032		 * cause resets to occur so fast as to fool the switch
4033		 * into thinking the link is down.
4034		 */
4035		cp->link_transition_jiffies_valid = 0;
4036	}
4037
4038	if (!cp->hw_running)
4039		return;
4040
4041	spin_lock_irqsave(&cp->lock, flags);
4042	cas_lock_tx(cp);
4043	cas_entropy_gather(cp);
4044
4045	/* If the link task is still pending, we just
4046	 * reschedule the link timer
4047	 */
4048#if 1
4049	if (atomic_read(&cp->reset_task_pending_all) ||
4050	    atomic_read(&cp->reset_task_pending_spare) ||
4051	    atomic_read(&cp->reset_task_pending_mtu))
4052		goto done;
4053#else
4054	if (atomic_read(&cp->reset_task_pending))
4055		goto done;
4056#endif
4057
4058	/* check for rx cleaning */
4059	if ((mask = (cp->cas_flags & CAS_FLAG_RXD_POST_MASK))) {
4060		int i, rmask;
4061
4062		for (i = 0; i < MAX_RX_DESC_RINGS; i++) {
4063			rmask = CAS_FLAG_RXD_POST(i);
4064			if ((mask & rmask) == 0)
4065				continue;
4066
4067			/* post_rxds will do a mod_timer */
4068			if (cas_post_rxds_ringN(cp, i, cp->rx_last[i]) < 0) {
4069				pending = 1;
4070				continue;
4071			}
4072			cp->cas_flags &= ~rmask;
4073		}
4074	}
4075
4076	if (CAS_PHY_MII(cp->phy_type)) {
4077		u16 bmsr;
4078		cas_mif_poll(cp, 0);
4079		bmsr = cas_phy_read(cp, MII_BMSR);
4080		/* WTZ: Solaris driver reads this twice, but that
4081		 * may be due to the PCS case and the use of a
4082		 * common implementation. Read it twice here to be
4083		 * safe.
4084		 */
4085		bmsr = cas_phy_read(cp, MII_BMSR);
4086		cas_mif_poll(cp, 1);
4087		readl(cp->regs + REG_MIF_STATUS); /* avoid dups */
4088		reset = cas_mii_link_check(cp, bmsr);
4089	} else {
4090		reset = cas_pcs_link_check(cp);
4091	}
4092
4093	if (reset)
4094		goto done;
4095
4096	/* check for tx state machine confusion */
4097	if ((readl(cp->regs + REG_MAC_TX_STATUS) & MAC_TX_FRAME_XMIT) == 0) {
4098		u32 val = readl(cp->regs + REG_MAC_STATE_MACHINE);
4099		u32 wptr, rptr;
4100		int tlm  = CAS_VAL(MAC_SM_TLM, val);
4101
4102		if (((tlm == 0x5) || (tlm == 0x3)) &&
4103		    (CAS_VAL(MAC_SM_ENCAP_SM, val) == 0)) {
4104			netif_printk(cp, tx_err, KERN_DEBUG, cp->dev,
4105				     "tx err: MAC_STATE[%08x]\n", val);
4106			reset = 1;
4107			goto done;
4108		}
4109
4110		val  = readl(cp->regs + REG_TX_FIFO_PKT_CNT);
4111		wptr = readl(cp->regs + REG_TX_FIFO_WRITE_PTR);
4112		rptr = readl(cp->regs + REG_TX_FIFO_READ_PTR);
4113		if ((val == 0) && (wptr != rptr)) {
4114			netif_printk(cp, tx_err, KERN_DEBUG, cp->dev,
4115				     "tx err: TX_FIFO[%08x:%08x:%08x]\n",
4116				     val, wptr, rptr);
4117			reset = 1;
4118		}
4119
4120		if (reset)
4121			cas_hard_reset(cp);
4122	}
4123
4124done:
4125	if (reset) {
4126#if 1
4127		atomic_inc(&cp->reset_task_pending);
4128		atomic_inc(&cp->reset_task_pending_all);
4129		schedule_work(&cp->reset_task);
4130#else
4131		atomic_set(&cp->reset_task_pending, CAS_RESET_ALL);
4132		pr_err("reset called in cas_link_timer\n");
4133		schedule_work(&cp->reset_task);
4134#endif
4135	}
4136
4137	if (!pending)
4138		mod_timer(&cp->link_timer, jiffies + CAS_LINK_TIMEOUT);
4139	cas_unlock_tx(cp);
4140	spin_unlock_irqrestore(&cp->lock, flags);
4141}
4142
4143/* tiny buffers are used to avoid target abort issues with
4144 * older cassini's
4145 */
4146static void cas_tx_tiny_free(struct cas *cp)
4147{
4148	struct pci_dev *pdev = cp->pdev;
4149	int i;
4150
4151	for (i = 0; i < N_TX_RINGS; i++) {
4152		if (!cp->tx_tiny_bufs[i])
4153			continue;
4154
4155		dma_free_coherent(&pdev->dev, TX_TINY_BUF_BLOCK,
4156				  cp->tx_tiny_bufs[i], cp->tx_tiny_dvma[i]);
 
4157		cp->tx_tiny_bufs[i] = NULL;
4158	}
4159}
4160
4161static int cas_tx_tiny_alloc(struct cas *cp)
4162{
4163	struct pci_dev *pdev = cp->pdev;
4164	int i;
4165
4166	for (i = 0; i < N_TX_RINGS; i++) {
4167		cp->tx_tiny_bufs[i] =
4168			dma_alloc_coherent(&pdev->dev, TX_TINY_BUF_BLOCK,
4169					   &cp->tx_tiny_dvma[i], GFP_KERNEL);
4170		if (!cp->tx_tiny_bufs[i]) {
4171			cas_tx_tiny_free(cp);
4172			return -1;
4173		}
4174	}
4175	return 0;
4176}
4177
4178
4179static int cas_open(struct net_device *dev)
4180{
4181	struct cas *cp = netdev_priv(dev);
4182	int hw_was_up, err;
4183	unsigned long flags;
4184
4185	mutex_lock(&cp->pm_mutex);
4186
4187	hw_was_up = cp->hw_running;
4188
4189	/* The power-management mutex protects the hw_running
4190	 * etc. state so it is safe to do this bit without cp->lock
4191	 */
4192	if (!cp->hw_running) {
4193		/* Reset the chip */
4194		cas_lock_all_save(cp, flags);
4195		/* We set the second arg to cas_reset to zero
4196		 * because cas_init_hw below will have its second
4197		 * argument set to non-zero, which will force
4198		 * autonegotiation to start.
4199		 */
4200		cas_reset(cp, 0);
4201		cp->hw_running = 1;
4202		cas_unlock_all_restore(cp, flags);
4203	}
4204
4205	err = -ENOMEM;
4206	if (cas_tx_tiny_alloc(cp) < 0)
4207		goto err_unlock;
4208
4209	/* alloc rx descriptors */
4210	if (cas_alloc_rxds(cp) < 0)
4211		goto err_tx_tiny;
4212
4213	/* allocate spares */
4214	cas_spare_init(cp);
4215	cas_spare_recover(cp, GFP_KERNEL);
4216
4217	/* We can now request the interrupt as we know it's masked
4218	 * on the controller. cassini+ has up to 4 interrupts
4219	 * that can be used, but you need to do explicit pci interrupt
4220	 * mapping to expose them
4221	 */
4222	if (request_irq(cp->pdev->irq, cas_interrupt,
4223			IRQF_SHARED, dev->name, (void *) dev)) {
4224		netdev_err(cp->dev, "failed to request irq !\n");
4225		err = -EAGAIN;
4226		goto err_spare;
4227	}
4228
4229#ifdef USE_NAPI
4230	napi_enable(&cp->napi);
4231#endif
4232	/* init hw */
4233	cas_lock_all_save(cp, flags);
4234	cas_clean_rings(cp);
4235	cas_init_hw(cp, !hw_was_up);
4236	cp->opened = 1;
4237	cas_unlock_all_restore(cp, flags);
4238
4239	netif_start_queue(dev);
4240	mutex_unlock(&cp->pm_mutex);
4241	return 0;
4242
4243err_spare:
4244	cas_spare_free(cp);
4245	cas_free_rxds(cp);
4246err_tx_tiny:
4247	cas_tx_tiny_free(cp);
4248err_unlock:
4249	mutex_unlock(&cp->pm_mutex);
4250	return err;
4251}
4252
4253static int cas_close(struct net_device *dev)
4254{
4255	unsigned long flags;
4256	struct cas *cp = netdev_priv(dev);
4257
4258#ifdef USE_NAPI
4259	napi_disable(&cp->napi);
4260#endif
4261	/* Make sure we don't get distracted by suspend/resume */
4262	mutex_lock(&cp->pm_mutex);
4263
4264	netif_stop_queue(dev);
4265
4266	/* Stop traffic, mark us closed */
4267	cas_lock_all_save(cp, flags);
4268	cp->opened = 0;
4269	cas_reset(cp, 0);
4270	cas_phy_init(cp);
4271	cas_begin_auto_negotiation(cp, NULL);
4272	cas_clean_rings(cp);
4273	cas_unlock_all_restore(cp, flags);
4274
4275	free_irq(cp->pdev->irq, (void *) dev);
4276	cas_spare_free(cp);
4277	cas_free_rxds(cp);
4278	cas_tx_tiny_free(cp);
4279	mutex_unlock(&cp->pm_mutex);
4280	return 0;
4281}
4282
4283static struct {
4284	const char name[ETH_GSTRING_LEN];
4285} ethtool_cassini_statnames[] = {
4286	{"collisions"},
4287	{"rx_bytes"},
4288	{"rx_crc_errors"},
4289	{"rx_dropped"},
4290	{"rx_errors"},
4291	{"rx_fifo_errors"},
4292	{"rx_frame_errors"},
4293	{"rx_length_errors"},
4294	{"rx_over_errors"},
4295	{"rx_packets"},
4296	{"tx_aborted_errors"},
4297	{"tx_bytes"},
4298	{"tx_dropped"},
4299	{"tx_errors"},
4300	{"tx_fifo_errors"},
4301	{"tx_packets"}
4302};
4303#define CAS_NUM_STAT_KEYS ARRAY_SIZE(ethtool_cassini_statnames)
4304
4305static struct {
4306	const int offsets;	/* neg. values for 2nd arg to cas_read_phy */
4307} ethtool_register_table[] = {
4308	{-MII_BMSR},
4309	{-MII_BMCR},
4310	{REG_CAWR},
4311	{REG_INF_BURST},
4312	{REG_BIM_CFG},
4313	{REG_RX_CFG},
4314	{REG_HP_CFG},
4315	{REG_MAC_TX_CFG},
4316	{REG_MAC_RX_CFG},
4317	{REG_MAC_CTRL_CFG},
4318	{REG_MAC_XIF_CFG},
4319	{REG_MIF_CFG},
4320	{REG_PCS_CFG},
4321	{REG_SATURN_PCFG},
4322	{REG_PCS_MII_STATUS},
4323	{REG_PCS_STATE_MACHINE},
4324	{REG_MAC_COLL_EXCESS},
4325	{REG_MAC_COLL_LATE}
4326};
4327#define CAS_REG_LEN 	ARRAY_SIZE(ethtool_register_table)
4328#define CAS_MAX_REGS 	(sizeof (u32)*CAS_REG_LEN)
4329
4330static void cas_read_regs(struct cas *cp, u8 *ptr, int len)
4331{
4332	u8 *p;
4333	int i;
4334	unsigned long flags;
4335
4336	spin_lock_irqsave(&cp->lock, flags);
4337	for (i = 0, p = ptr; i < len ; i ++, p += sizeof(u32)) {
4338		u16 hval;
4339		u32 val;
4340		if (ethtool_register_table[i].offsets < 0) {
4341			hval = cas_phy_read(cp,
4342				    -ethtool_register_table[i].offsets);
4343			val = hval;
4344		} else {
4345			val= readl(cp->regs+ethtool_register_table[i].offsets);
4346		}
4347		memcpy(p, (u8 *)&val, sizeof(u32));
4348	}
4349	spin_unlock_irqrestore(&cp->lock, flags);
4350}
4351
4352static struct net_device_stats *cas_get_stats(struct net_device *dev)
4353{
4354	struct cas *cp = netdev_priv(dev);
4355	struct net_device_stats *stats = cp->net_stats;
4356	unsigned long flags;
4357	int i;
4358	unsigned long tmp;
4359
4360	/* we collate all of the stats into net_stats[N_TX_RING] */
4361	if (!cp->hw_running)
4362		return stats + N_TX_RINGS;
4363
4364	/* collect outstanding stats */
4365	/* WTZ: the Cassini spec gives these as 16 bit counters but
4366	 * stored in 32-bit words.  Added a mask of 0xffff to be safe,
4367	 * in case the chip somehow puts any garbage in the other bits.
4368	 * Also, counter usage didn't seem to mach what Adrian did
4369	 * in the parts of the code that set these quantities. Made
4370	 * that consistent.
4371	 */
4372	spin_lock_irqsave(&cp->stat_lock[N_TX_RINGS], flags);
4373	stats[N_TX_RINGS].rx_crc_errors +=
4374	  readl(cp->regs + REG_MAC_FCS_ERR) & 0xffff;
4375	stats[N_TX_RINGS].rx_frame_errors +=
4376		readl(cp->regs + REG_MAC_ALIGN_ERR) &0xffff;
4377	stats[N_TX_RINGS].rx_length_errors +=
4378		readl(cp->regs + REG_MAC_LEN_ERR) & 0xffff;
4379#if 1
4380	tmp = (readl(cp->regs + REG_MAC_COLL_EXCESS) & 0xffff) +
4381		(readl(cp->regs + REG_MAC_COLL_LATE) & 0xffff);
4382	stats[N_TX_RINGS].tx_aborted_errors += tmp;
4383	stats[N_TX_RINGS].collisions +=
4384	  tmp + (readl(cp->regs + REG_MAC_COLL_NORMAL) & 0xffff);
4385#else
4386	stats[N_TX_RINGS].tx_aborted_errors +=
4387		readl(cp->regs + REG_MAC_COLL_EXCESS);
4388	stats[N_TX_RINGS].collisions += readl(cp->regs + REG_MAC_COLL_EXCESS) +
4389		readl(cp->regs + REG_MAC_COLL_LATE);
4390#endif
4391	cas_clear_mac_err(cp);
4392
4393	/* saved bits that are unique to ring 0 */
4394	spin_lock(&cp->stat_lock[0]);
4395	stats[N_TX_RINGS].collisions        += stats[0].collisions;
4396	stats[N_TX_RINGS].rx_over_errors    += stats[0].rx_over_errors;
4397	stats[N_TX_RINGS].rx_frame_errors   += stats[0].rx_frame_errors;
4398	stats[N_TX_RINGS].rx_fifo_errors    += stats[0].rx_fifo_errors;
4399	stats[N_TX_RINGS].tx_aborted_errors += stats[0].tx_aborted_errors;
4400	stats[N_TX_RINGS].tx_fifo_errors    += stats[0].tx_fifo_errors;
4401	spin_unlock(&cp->stat_lock[0]);
4402
4403	for (i = 0; i < N_TX_RINGS; i++) {
4404		spin_lock(&cp->stat_lock[i]);
4405		stats[N_TX_RINGS].rx_length_errors +=
4406			stats[i].rx_length_errors;
4407		stats[N_TX_RINGS].rx_crc_errors += stats[i].rx_crc_errors;
4408		stats[N_TX_RINGS].rx_packets    += stats[i].rx_packets;
4409		stats[N_TX_RINGS].tx_packets    += stats[i].tx_packets;
4410		stats[N_TX_RINGS].rx_bytes      += stats[i].rx_bytes;
4411		stats[N_TX_RINGS].tx_bytes      += stats[i].tx_bytes;
4412		stats[N_TX_RINGS].rx_errors     += stats[i].rx_errors;
4413		stats[N_TX_RINGS].tx_errors     += stats[i].tx_errors;
4414		stats[N_TX_RINGS].rx_dropped    += stats[i].rx_dropped;
4415		stats[N_TX_RINGS].tx_dropped    += stats[i].tx_dropped;
4416		memset(stats + i, 0, sizeof(struct net_device_stats));
4417		spin_unlock(&cp->stat_lock[i]);
4418	}
4419	spin_unlock_irqrestore(&cp->stat_lock[N_TX_RINGS], flags);
4420	return stats + N_TX_RINGS;
4421}
4422
4423
4424static void cas_set_multicast(struct net_device *dev)
4425{
4426	struct cas *cp = netdev_priv(dev);
4427	u32 rxcfg, rxcfg_new;
4428	unsigned long flags;
4429	int limit = STOP_TRIES;
4430
4431	if (!cp->hw_running)
4432		return;
4433
4434	spin_lock_irqsave(&cp->lock, flags);
4435	rxcfg = readl(cp->regs + REG_MAC_RX_CFG);
4436
4437	/* disable RX MAC and wait for completion */
4438	writel(rxcfg & ~MAC_RX_CFG_EN, cp->regs + REG_MAC_RX_CFG);
4439	while (readl(cp->regs + REG_MAC_RX_CFG) & MAC_RX_CFG_EN) {
4440		if (!limit--)
4441			break;
4442		udelay(10);
4443	}
4444
4445	/* disable hash filter and wait for completion */
4446	limit = STOP_TRIES;
4447	rxcfg &= ~(MAC_RX_CFG_PROMISC_EN | MAC_RX_CFG_HASH_FILTER_EN);
4448	writel(rxcfg & ~MAC_RX_CFG_EN, cp->regs + REG_MAC_RX_CFG);
4449	while (readl(cp->regs + REG_MAC_RX_CFG) & MAC_RX_CFG_HASH_FILTER_EN) {
4450		if (!limit--)
4451			break;
4452		udelay(10);
4453	}
4454
4455	/* program hash filters */
4456	cp->mac_rx_cfg = rxcfg_new = cas_setup_multicast(cp);
4457	rxcfg |= rxcfg_new;
4458	writel(rxcfg, cp->regs + REG_MAC_RX_CFG);
4459	spin_unlock_irqrestore(&cp->lock, flags);
4460}
4461
4462static void cas_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
4463{
4464	struct cas *cp = netdev_priv(dev);
4465	strscpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
4466	strscpy(info->version, DRV_MODULE_VERSION, sizeof(info->version));
4467	strscpy(info->bus_info, pci_name(cp->pdev), sizeof(info->bus_info));
 
 
 
4468}
4469
4470static int cas_get_link_ksettings(struct net_device *dev,
4471				  struct ethtool_link_ksettings *cmd)
4472{
4473	struct cas *cp = netdev_priv(dev);
4474	u16 bmcr;
4475	int full_duplex, speed, pause;
4476	unsigned long flags;
4477	enum link_state linkstate = link_up;
4478	u32 supported, advertising;
4479
4480	advertising = 0;
4481	supported = SUPPORTED_Autoneg;
4482	if (cp->cas_flags & CAS_FLAG_1000MB_CAP) {
4483		supported |= SUPPORTED_1000baseT_Full;
4484		advertising |= ADVERTISED_1000baseT_Full;
4485	}
4486
4487	/* Record PHY settings if HW is on. */
4488	spin_lock_irqsave(&cp->lock, flags);
4489	bmcr = 0;
4490	linkstate = cp->lstate;
4491	if (CAS_PHY_MII(cp->phy_type)) {
4492		cmd->base.port = PORT_MII;
4493		cmd->base.phy_address = cp->phy_addr;
4494		advertising |= ADVERTISED_TP | ADVERTISED_MII |
 
 
4495			ADVERTISED_10baseT_Half |
4496			ADVERTISED_10baseT_Full |
4497			ADVERTISED_100baseT_Half |
4498			ADVERTISED_100baseT_Full;
4499
4500		supported |=
4501			(SUPPORTED_10baseT_Half |
4502			 SUPPORTED_10baseT_Full |
4503			 SUPPORTED_100baseT_Half |
4504			 SUPPORTED_100baseT_Full |
4505			 SUPPORTED_TP | SUPPORTED_MII);
4506
4507		if (cp->hw_running) {
4508			cas_mif_poll(cp, 0);
4509			bmcr = cas_phy_read(cp, MII_BMCR);
4510			cas_read_mii_link_mode(cp, &full_duplex,
4511					       &speed, &pause);
4512			cas_mif_poll(cp, 1);
4513		}
4514
4515	} else {
4516		cmd->base.port = PORT_FIBRE;
4517		cmd->base.phy_address = 0;
4518		supported   |= SUPPORTED_FIBRE;
4519		advertising |= ADVERTISED_FIBRE;
 
4520
4521		if (cp->hw_running) {
4522			/* pcs uses the same bits as mii */
4523			bmcr = readl(cp->regs + REG_PCS_MII_CTRL);
4524			cas_read_pcs_link_mode(cp, &full_duplex,
4525					       &speed, &pause);
4526		}
4527	}
4528	spin_unlock_irqrestore(&cp->lock, flags);
4529
4530	if (bmcr & BMCR_ANENABLE) {
4531		advertising |= ADVERTISED_Autoneg;
4532		cmd->base.autoneg = AUTONEG_ENABLE;
4533		cmd->base.speed =  ((speed == 10) ?
4534					    SPEED_10 :
4535					    ((speed == 1000) ?
4536					     SPEED_1000 : SPEED_100));
4537		cmd->base.duplex = full_duplex ? DUPLEX_FULL : DUPLEX_HALF;
4538	} else {
4539		cmd->base.autoneg = AUTONEG_DISABLE;
4540		cmd->base.speed = ((bmcr & CAS_BMCR_SPEED1000) ?
4541					    SPEED_1000 :
4542					    ((bmcr & BMCR_SPEED100) ?
4543					     SPEED_100 : SPEED_10));
4544		cmd->base.duplex = (bmcr & BMCR_FULLDPLX) ?
 
4545			DUPLEX_FULL : DUPLEX_HALF;
4546	}
4547	if (linkstate != link_up) {
4548		/* Force these to "unknown" if the link is not up and
4549		 * autonogotiation in enabled. We can set the link
4550		 * speed to 0, but not cmd->duplex,
4551		 * because its legal values are 0 and 1.  Ethtool will
4552		 * print the value reported in parentheses after the
4553		 * word "Unknown" for unrecognized values.
4554		 *
4555		 * If in forced mode, we report the speed and duplex
4556		 * settings that we configured.
4557		 */
4558		if (cp->link_cntl & BMCR_ANENABLE) {
4559			cmd->base.speed = 0;
4560			cmd->base.duplex = 0xff;
4561		} else {
4562			cmd->base.speed = SPEED_10;
4563			if (cp->link_cntl & BMCR_SPEED100) {
4564				cmd->base.speed = SPEED_100;
4565			} else if (cp->link_cntl & CAS_BMCR_SPEED1000) {
4566				cmd->base.speed = SPEED_1000;
4567			}
4568			cmd->base.duplex = (cp->link_cntl & BMCR_FULLDPLX) ?
4569				DUPLEX_FULL : DUPLEX_HALF;
4570		}
4571	}
4572
4573	ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
4574						supported);
4575	ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
4576						advertising);
4577
4578	return 0;
4579}
4580
4581static int cas_set_link_ksettings(struct net_device *dev,
4582				  const struct ethtool_link_ksettings *cmd)
4583{
4584	struct cas *cp = netdev_priv(dev);
4585	unsigned long flags;
4586	u32 speed = cmd->base.speed;
4587
4588	/* Verify the settings we care about. */
4589	if (cmd->base.autoneg != AUTONEG_ENABLE &&
4590	    cmd->base.autoneg != AUTONEG_DISABLE)
4591		return -EINVAL;
4592
4593	if (cmd->base.autoneg == AUTONEG_DISABLE &&
4594	    ((speed != SPEED_1000 &&
4595	      speed != SPEED_100 &&
4596	      speed != SPEED_10) ||
4597	     (cmd->base.duplex != DUPLEX_HALF &&
4598	      cmd->base.duplex != DUPLEX_FULL)))
4599		return -EINVAL;
4600
4601	/* Apply settings and restart link process. */
4602	spin_lock_irqsave(&cp->lock, flags);
4603	cas_begin_auto_negotiation(cp, cmd);
4604	spin_unlock_irqrestore(&cp->lock, flags);
4605	return 0;
4606}
4607
4608static int cas_nway_reset(struct net_device *dev)
4609{
4610	struct cas *cp = netdev_priv(dev);
4611	unsigned long flags;
4612
4613	if ((cp->link_cntl & BMCR_ANENABLE) == 0)
4614		return -EINVAL;
4615
4616	/* Restart link process. */
4617	spin_lock_irqsave(&cp->lock, flags);
4618	cas_begin_auto_negotiation(cp, NULL);
4619	spin_unlock_irqrestore(&cp->lock, flags);
4620
4621	return 0;
4622}
4623
4624static u32 cas_get_link(struct net_device *dev)
4625{
4626	struct cas *cp = netdev_priv(dev);
4627	return cp->lstate == link_up;
4628}
4629
4630static u32 cas_get_msglevel(struct net_device *dev)
4631{
4632	struct cas *cp = netdev_priv(dev);
4633	return cp->msg_enable;
4634}
4635
4636static void cas_set_msglevel(struct net_device *dev, u32 value)
4637{
4638	struct cas *cp = netdev_priv(dev);
4639	cp->msg_enable = value;
4640}
4641
4642static int cas_get_regs_len(struct net_device *dev)
4643{
4644	struct cas *cp = netdev_priv(dev);
4645	return min_t(int, cp->casreg_len, CAS_MAX_REGS);
4646}
4647
4648static void cas_get_regs(struct net_device *dev, struct ethtool_regs *regs,
4649			     void *p)
4650{
4651	struct cas *cp = netdev_priv(dev);
4652	regs->version = 0;
4653	/* cas_read_regs handles locks (cp->lock).  */
4654	cas_read_regs(cp, p, regs->len / sizeof(u32));
4655}
4656
4657static int cas_get_sset_count(struct net_device *dev, int sset)
4658{
4659	switch (sset) {
4660	case ETH_SS_STATS:
4661		return CAS_NUM_STAT_KEYS;
4662	default:
4663		return -EOPNOTSUPP;
4664	}
4665}
4666
4667static void cas_get_strings(struct net_device *dev, u32 stringset, u8 *data)
4668{
4669	 memcpy(data, &ethtool_cassini_statnames,
4670					 CAS_NUM_STAT_KEYS * ETH_GSTRING_LEN);
4671}
4672
4673static void cas_get_ethtool_stats(struct net_device *dev,
4674				      struct ethtool_stats *estats, u64 *data)
4675{
4676	struct cas *cp = netdev_priv(dev);
4677	struct net_device_stats *stats = cas_get_stats(cp->dev);
4678	int i = 0;
4679	data[i++] = stats->collisions;
4680	data[i++] = stats->rx_bytes;
4681	data[i++] = stats->rx_crc_errors;
4682	data[i++] = stats->rx_dropped;
4683	data[i++] = stats->rx_errors;
4684	data[i++] = stats->rx_fifo_errors;
4685	data[i++] = stats->rx_frame_errors;
4686	data[i++] = stats->rx_length_errors;
4687	data[i++] = stats->rx_over_errors;
4688	data[i++] = stats->rx_packets;
4689	data[i++] = stats->tx_aborted_errors;
4690	data[i++] = stats->tx_bytes;
4691	data[i++] = stats->tx_dropped;
4692	data[i++] = stats->tx_errors;
4693	data[i++] = stats->tx_fifo_errors;
4694	data[i++] = stats->tx_packets;
4695	BUG_ON(i != CAS_NUM_STAT_KEYS);
4696}
4697
4698static const struct ethtool_ops cas_ethtool_ops = {
4699	.get_drvinfo		= cas_get_drvinfo,
 
 
4700	.nway_reset		= cas_nway_reset,
4701	.get_link		= cas_get_link,
4702	.get_msglevel		= cas_get_msglevel,
4703	.set_msglevel		= cas_set_msglevel,
4704	.get_regs_len		= cas_get_regs_len,
4705	.get_regs		= cas_get_regs,
4706	.get_sset_count		= cas_get_sset_count,
4707	.get_strings		= cas_get_strings,
4708	.get_ethtool_stats	= cas_get_ethtool_stats,
4709	.get_link_ksettings	= cas_get_link_ksettings,
4710	.set_link_ksettings	= cas_set_link_ksettings,
4711};
4712
4713static int cas_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
4714{
4715	struct cas *cp = netdev_priv(dev);
4716	struct mii_ioctl_data *data = if_mii(ifr);
4717	unsigned long flags;
4718	int rc = -EOPNOTSUPP;
4719
4720	/* Hold the PM mutex while doing ioctl's or we may collide
4721	 * with open/close and power management and oops.
4722	 */
4723	mutex_lock(&cp->pm_mutex);
4724	switch (cmd) {
4725	case SIOCGMIIPHY:		/* Get address of MII PHY in use. */
4726		data->phy_id = cp->phy_addr;
4727		fallthrough;
4728
4729	case SIOCGMIIREG:		/* Read MII PHY register. */
4730		spin_lock_irqsave(&cp->lock, flags);
4731		cas_mif_poll(cp, 0);
4732		data->val_out = cas_phy_read(cp, data->reg_num & 0x1f);
4733		cas_mif_poll(cp, 1);
4734		spin_unlock_irqrestore(&cp->lock, flags);
4735		rc = 0;
4736		break;
4737
4738	case SIOCSMIIREG:		/* Write MII PHY register. */
4739		spin_lock_irqsave(&cp->lock, flags);
4740		cas_mif_poll(cp, 0);
4741		rc = cas_phy_write(cp, data->reg_num & 0x1f, data->val_in);
4742		cas_mif_poll(cp, 1);
4743		spin_unlock_irqrestore(&cp->lock, flags);
4744		break;
4745	default:
4746		break;
4747	}
4748
4749	mutex_unlock(&cp->pm_mutex);
4750	return rc;
4751}
4752
4753/* When this chip sits underneath an Intel 31154 bridge, it is the
4754 * only subordinate device and we can tweak the bridge settings to
4755 * reflect that fact.
4756 */
4757static void cas_program_bridge(struct pci_dev *cas_pdev)
4758{
4759	struct pci_dev *pdev = cas_pdev->bus->self;
4760	u32 val;
4761
4762	if (!pdev)
4763		return;
4764
4765	if (pdev->vendor != 0x8086 || pdev->device != 0x537c)
4766		return;
4767
4768	/* Clear bit 10 (Bus Parking Control) in the Secondary
4769	 * Arbiter Control/Status Register which lives at offset
4770	 * 0x41.  Using a 32-bit word read/modify/write at 0x40
4771	 * is much simpler so that's how we do this.
4772	 */
4773	pci_read_config_dword(pdev, 0x40, &val);
4774	val &= ~0x00040000;
4775	pci_write_config_dword(pdev, 0x40, val);
4776
4777	/* Max out the Multi-Transaction Timer settings since
4778	 * Cassini is the only device present.
4779	 *
4780	 * The register is 16-bit and lives at 0x50.  When the
4781	 * settings are enabled, it extends the GRANT# signal
4782	 * for a requestor after a transaction is complete.  This
4783	 * allows the next request to run without first needing
4784	 * to negotiate the GRANT# signal back.
4785	 *
4786	 * Bits 12:10 define the grant duration:
4787	 *
4788	 *	1	--	16 clocks
4789	 *	2	--	32 clocks
4790	 *	3	--	64 clocks
4791	 *	4	--	128 clocks
4792	 *	5	--	256 clocks
4793	 *
4794	 * All other values are illegal.
4795	 *
4796	 * Bits 09:00 define which REQ/GNT signal pairs get the
4797	 * GRANT# signal treatment.  We set them all.
4798	 */
4799	pci_write_config_word(pdev, 0x50, (5 << 10) | 0x3ff);
4800
4801	/* The Read Prefecth Policy register is 16-bit and sits at
4802	 * offset 0x52.  It enables a "smart" pre-fetch policy.  We
4803	 * enable it and max out all of the settings since only one
4804	 * device is sitting underneath and thus bandwidth sharing is
4805	 * not an issue.
4806	 *
4807	 * The register has several 3 bit fields, which indicates a
4808	 * multiplier applied to the base amount of prefetching the
4809	 * chip would do.  These fields are at:
4810	 *
4811	 *	15:13	---	ReRead Primary Bus
4812	 *	12:10	---	FirstRead Primary Bus
4813	 *	09:07	---	ReRead Secondary Bus
4814	 *	06:04	---	FirstRead Secondary Bus
4815	 *
4816	 * Bits 03:00 control which REQ/GNT pairs the prefetch settings
4817	 * get enabled on.  Bit 3 is a grouped enabler which controls
4818	 * all of the REQ/GNT pairs from [8:3].  Bits 2 to 0 control
4819	 * the individual REQ/GNT pairs [2:0].
4820	 */
4821	pci_write_config_word(pdev, 0x52,
4822			      (0x7 << 13) |
4823			      (0x7 << 10) |
4824			      (0x7 <<  7) |
4825			      (0x7 <<  4) |
4826			      (0xf <<  0));
4827
4828	/* Force cacheline size to 0x8 */
4829	pci_write_config_byte(pdev, PCI_CACHE_LINE_SIZE, 0x08);
4830
4831	/* Force latency timer to maximum setting so Cassini can
4832	 * sit on the bus as long as it likes.
4833	 */
4834	pci_write_config_byte(pdev, PCI_LATENCY_TIMER, 0xff);
4835}
4836
4837static const struct net_device_ops cas_netdev_ops = {
4838	.ndo_open		= cas_open,
4839	.ndo_stop		= cas_close,
4840	.ndo_start_xmit		= cas_start_xmit,
4841	.ndo_get_stats 		= cas_get_stats,
4842	.ndo_set_rx_mode	= cas_set_multicast,
4843	.ndo_eth_ioctl		= cas_ioctl,
4844	.ndo_tx_timeout		= cas_tx_timeout,
4845	.ndo_change_mtu		= cas_change_mtu,
4846	.ndo_set_mac_address	= eth_mac_addr,
4847	.ndo_validate_addr	= eth_validate_addr,
4848#ifdef CONFIG_NET_POLL_CONTROLLER
4849	.ndo_poll_controller	= cas_netpoll,
4850#endif
4851};
4852
4853static int cas_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
 
4854{
4855	static int cas_version_printed = 0;
4856	unsigned long casreg_len;
4857	struct net_device *dev;
4858	struct cas *cp;
 
4859	u16 pci_cmd;
4860	int i, err;
4861	u8 orig_cacheline_size = 0, cas_cacheline_size = 0;
4862
4863	if (cas_version_printed++ == 0)
4864		pr_info("%s", version);
4865
4866	err = pci_enable_device(pdev);
4867	if (err) {
4868		dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
4869		return err;
4870	}
4871
4872	if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
4873		dev_err(&pdev->dev, "Cannot find proper PCI device "
4874		       "base address, aborting\n");
4875		err = -ENODEV;
4876		goto err_out_disable_pdev;
4877	}
4878
4879	dev = alloc_etherdev(sizeof(*cp));
4880	if (!dev) {
4881		err = -ENOMEM;
4882		goto err_out_disable_pdev;
4883	}
4884	SET_NETDEV_DEV(dev, &pdev->dev);
4885
4886	err = pci_request_regions(pdev, dev->name);
4887	if (err) {
4888		dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
4889		goto err_out_free_netdev;
4890	}
4891	pci_set_master(pdev);
4892
4893	/* we must always turn on parity response or else parity
4894	 * doesn't get generated properly. disable SERR/PERR as well.
4895	 * in addition, we want to turn MWI on.
4896	 */
4897	pci_read_config_word(pdev, PCI_COMMAND, &pci_cmd);
4898	pci_cmd &= ~PCI_COMMAND_SERR;
4899	pci_cmd |= PCI_COMMAND_PARITY;
4900	pci_write_config_word(pdev, PCI_COMMAND, pci_cmd);
4901	if (pci_try_set_mwi(pdev))
4902		pr_warn("Could not enable MWI for %s\n", pci_name(pdev));
4903
4904	cas_program_bridge(pdev);
4905
4906	/*
4907	 * On some architectures, the default cache line size set
4908	 * by pci_try_set_mwi reduces perforamnce.  We have to increase
4909	 * it for this case.  To start, we'll print some configuration
4910	 * data.
4911	 */
4912#if 1
4913	pci_read_config_byte(pdev, PCI_CACHE_LINE_SIZE,
4914			     &orig_cacheline_size);
4915	if (orig_cacheline_size < CAS_PREF_CACHELINE_SIZE) {
4916		cas_cacheline_size =
4917			(CAS_PREF_CACHELINE_SIZE < SMP_CACHE_BYTES) ?
4918			CAS_PREF_CACHELINE_SIZE : SMP_CACHE_BYTES;
4919		if (pci_write_config_byte(pdev,
4920					  PCI_CACHE_LINE_SIZE,
4921					  cas_cacheline_size)) {
4922			dev_err(&pdev->dev, "Could not set PCI cache "
4923			       "line size\n");
4924			goto err_out_free_res;
4925		}
4926	}
4927#endif
4928
4929
4930	/* Configure DMA attributes. */
4931	err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
4932	if (err) {
4933		dev_err(&pdev->dev, "No usable DMA configuration, aborting\n");
4934		goto err_out_free_res;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4935	}
4936
4937	casreg_len = pci_resource_len(pdev, 0);
4938
4939	cp = netdev_priv(dev);
4940	cp->pdev = pdev;
4941#if 1
4942	/* A value of 0 indicates we never explicitly set it */
4943	cp->orig_cacheline_size = cas_cacheline_size ? orig_cacheline_size: 0;
4944#endif
4945	cp->dev = dev;
4946	cp->msg_enable = (cassini_debug < 0) ? CAS_DEF_MSG_ENABLE :
4947	  cassini_debug;
4948
4949#if defined(CONFIG_SPARC)
4950	cp->of_node = pci_device_to_OF_node(pdev);
4951#endif
4952
4953	cp->link_transition = LINK_TRANSITION_UNKNOWN;
4954	cp->link_transition_jiffies_valid = 0;
4955
4956	spin_lock_init(&cp->lock);
4957	spin_lock_init(&cp->rx_inuse_lock);
4958	spin_lock_init(&cp->rx_spare_lock);
4959	for (i = 0; i < N_TX_RINGS; i++) {
4960		spin_lock_init(&cp->stat_lock[i]);
4961		spin_lock_init(&cp->tx_lock[i]);
4962	}
4963	spin_lock_init(&cp->stat_lock[N_TX_RINGS]);
4964	mutex_init(&cp->pm_mutex);
4965
4966	timer_setup(&cp->link_timer, cas_link_timer, 0);
 
 
4967
4968#if 1
4969	/* Just in case the implementation of atomic operations
4970	 * change so that an explicit initialization is necessary.
4971	 */
4972	atomic_set(&cp->reset_task_pending, 0);
4973	atomic_set(&cp->reset_task_pending_all, 0);
4974	atomic_set(&cp->reset_task_pending_spare, 0);
4975	atomic_set(&cp->reset_task_pending_mtu, 0);
4976#endif
4977	INIT_WORK(&cp->reset_task, cas_reset_task);
4978
4979	/* Default link parameters */
4980	if (link_mode >= 0 && link_mode < 6)
4981		cp->link_cntl = link_modes[link_mode];
4982	else
4983		cp->link_cntl = BMCR_ANENABLE;
4984	cp->lstate = link_down;
4985	cp->link_transition = LINK_TRANSITION_LINK_DOWN;
4986	netif_carrier_off(cp->dev);
4987	cp->timer_ticks = 0;
4988
4989	/* give us access to cassini registers */
4990	cp->regs = pci_iomap(pdev, 0, casreg_len);
4991	if (!cp->regs) {
4992		dev_err(&pdev->dev, "Cannot map device registers, aborting\n");
4993		goto err_out_free_res;
4994	}
4995	cp->casreg_len = casreg_len;
4996
4997	pci_save_state(pdev);
4998	cas_check_pci_invariants(cp);
4999	cas_hard_reset(cp);
5000	cas_reset(cp, 0);
5001	if (cas_check_invariants(cp))
5002		goto err_out_iounmap;
5003	if (cp->cas_flags & CAS_FLAG_SATURN)
5004		cas_saturn_firmware_init(cp);
 
5005
5006	cp->init_block =
5007		dma_alloc_coherent(&pdev->dev, sizeof(struct cas_init_block),
5008				   &cp->block_dvma, GFP_KERNEL);
5009	if (!cp->init_block) {
5010		dev_err(&pdev->dev, "Cannot allocate init block, aborting\n");
5011		goto err_out_iounmap;
5012	}
5013
5014	for (i = 0; i < N_TX_RINGS; i++)
5015		cp->init_txds[i] = cp->init_block->txds[i];
5016
5017	for (i = 0; i < N_RX_DESC_RINGS; i++)
5018		cp->init_rxds[i] = cp->init_block->rxds[i];
5019
5020	for (i = 0; i < N_RX_COMP_RINGS; i++)
5021		cp->init_rxcs[i] = cp->init_block->rxcs[i];
5022
5023	for (i = 0; i < N_RX_FLOWS; i++)
5024		skb_queue_head_init(&cp->rx_flows[i]);
5025
5026	dev->netdev_ops = &cas_netdev_ops;
5027	dev->ethtool_ops = &cas_ethtool_ops;
5028	dev->watchdog_timeo = CAS_TX_TIMEOUT;
5029
5030#ifdef USE_NAPI
5031	netif_napi_add(dev, &cp->napi, cas_poll);
5032#endif
5033	dev->irq = pdev->irq;
5034	dev->dma = 0;
5035
5036	/* Cassini features. */
5037	if ((cp->cas_flags & CAS_FLAG_NO_HW_CSUM) == 0)
5038		dev->features |= NETIF_F_HW_CSUM | NETIF_F_SG;
5039
5040	dev->features |= NETIF_F_HIGHDMA;
5041
5042	/* MTU range: 60 - varies or 9000 */
5043	dev->min_mtu = CAS_MIN_MTU;
5044	dev->max_mtu = CAS_MAX_MTU;
5045
5046	if (register_netdev(dev)) {
5047		dev_err(&pdev->dev, "Cannot register net device, aborting\n");
5048		goto err_out_free_consistent;
5049	}
5050
5051	i = readl(cp->regs + REG_BIM_CFG);
5052	netdev_info(dev, "Sun Cassini%s (%sbit/%sMHz PCI/%s) Ethernet[%d] %pM\n",
5053		    (cp->cas_flags & CAS_FLAG_REG_PLUS) ? "+" : "",
5054		    (i & BIM_CFG_32BIT) ? "32" : "64",
5055		    (i & BIM_CFG_66MHZ) ? "66" : "33",
5056		    (cp->phy_type == CAS_PHY_SERDES) ? "Fi" : "Cu", pdev->irq,
5057		    dev->dev_addr);
5058
5059	pci_set_drvdata(pdev, dev);
5060	cp->hw_running = 1;
5061	cas_entropy_reset(cp);
5062	cas_phy_init(cp);
5063	cas_begin_auto_negotiation(cp, NULL);
5064	return 0;
5065
5066err_out_free_consistent:
5067	dma_free_coherent(&pdev->dev, sizeof(struct cas_init_block),
5068			  cp->init_block, cp->block_dvma);
5069
5070err_out_iounmap:
5071	mutex_lock(&cp->pm_mutex);
5072	if (cp->hw_running)
5073		cas_shutdown(cp);
5074	mutex_unlock(&cp->pm_mutex);
5075
5076	vfree(cp->fw_data);
5077
5078	pci_iounmap(pdev, cp->regs);
5079
5080
5081err_out_free_res:
5082	pci_release_regions(pdev);
5083
 
5084	/* Try to restore it in case the error occurred after we
5085	 * set it.
5086	 */
5087	pci_write_config_byte(pdev, PCI_CACHE_LINE_SIZE, orig_cacheline_size);
5088
5089err_out_free_netdev:
5090	free_netdev(dev);
5091
5092err_out_disable_pdev:
5093	pci_disable_device(pdev);
 
5094	return -ENODEV;
5095}
5096
5097static void cas_remove_one(struct pci_dev *pdev)
5098{
5099	struct net_device *dev = pci_get_drvdata(pdev);
5100	struct cas *cp;
5101	if (!dev)
5102		return;
5103
5104	cp = netdev_priv(dev);
5105	unregister_netdev(dev);
5106
5107	vfree(cp->fw_data);
 
5108
5109	mutex_lock(&cp->pm_mutex);
5110	cancel_work_sync(&cp->reset_task);
5111	if (cp->hw_running)
5112		cas_shutdown(cp);
5113	mutex_unlock(&cp->pm_mutex);
5114
5115#if 1
5116	if (cp->orig_cacheline_size) {
5117		/* Restore the cache line size if we had modified
5118		 * it.
5119		 */
5120		pci_write_config_byte(pdev, PCI_CACHE_LINE_SIZE,
5121				      cp->orig_cacheline_size);
5122	}
5123#endif
5124	dma_free_coherent(&pdev->dev, sizeof(struct cas_init_block),
5125			  cp->init_block, cp->block_dvma);
5126	pci_iounmap(pdev, cp->regs);
5127	free_netdev(dev);
5128	pci_release_regions(pdev);
5129	pci_disable_device(pdev);
 
5130}
5131
5132static int __maybe_unused cas_suspend(struct device *dev_d)
 
5133{
5134	struct net_device *dev = dev_get_drvdata(dev_d);
5135	struct cas *cp = netdev_priv(dev);
5136	unsigned long flags;
5137
5138	mutex_lock(&cp->pm_mutex);
5139
5140	/* If the driver is opened, we stop the DMA */
5141	if (cp->opened) {
5142		netif_device_detach(dev);
5143
5144		cas_lock_all_save(cp, flags);
5145
5146		/* We can set the second arg of cas_reset to 0
5147		 * because on resume, we'll call cas_init_hw with
5148		 * its second arg set so that autonegotiation is
5149		 * restarted.
5150		 */
5151		cas_reset(cp, 0);
5152		cas_clean_rings(cp);
5153		cas_unlock_all_restore(cp, flags);
5154	}
5155
5156	if (cp->hw_running)
5157		cas_shutdown(cp);
5158	mutex_unlock(&cp->pm_mutex);
5159
5160	return 0;
5161}
5162
5163static int __maybe_unused cas_resume(struct device *dev_d)
5164{
5165	struct net_device *dev = dev_get_drvdata(dev_d);
5166	struct cas *cp = netdev_priv(dev);
5167
5168	netdev_info(dev, "resuming\n");
5169
5170	mutex_lock(&cp->pm_mutex);
5171	cas_hard_reset(cp);
5172	if (cp->opened) {
5173		unsigned long flags;
5174		cas_lock_all_save(cp, flags);
5175		cas_reset(cp, 0);
5176		cp->hw_running = 1;
5177		cas_clean_rings(cp);
5178		cas_init_hw(cp, 1);
5179		cas_unlock_all_restore(cp, flags);
5180
5181		netif_device_attach(dev);
5182	}
5183	mutex_unlock(&cp->pm_mutex);
5184	return 0;
5185}
5186
5187static SIMPLE_DEV_PM_OPS(cas_pm_ops, cas_suspend, cas_resume);
5188
5189static struct pci_driver cas_driver = {
5190	.name		= DRV_MODULE_NAME,
5191	.id_table	= cas_pci_tbl,
5192	.probe		= cas_init_one,
5193	.remove		= cas_remove_one,
5194	.driver.pm	= &cas_pm_ops,
 
 
 
5195};
5196
5197static int __init cas_init(void)
5198{
5199	if (linkdown_timeout > 0)
5200		link_transition_timeout = linkdown_timeout * HZ;
5201	else
5202		link_transition_timeout = 0;
5203
5204	return pci_register_driver(&cas_driver);
5205}
5206
5207static void __exit cas_cleanup(void)
5208{
5209	pci_unregister_driver(&cas_driver);
5210}
5211
5212module_init(cas_init);
5213module_exit(cas_cleanup);
v3.5.6
 
   1/* cassini.c: Sun Microsystems Cassini(+) ethernet driver.
   2 *
   3 * Copyright (C) 2004 Sun Microsystems Inc.
   4 * Copyright (C) 2003 Adrian Sun (asun@darksunrising.com)
   5 *
   6 * This program is free software; you can redistribute it and/or
   7 * modify it under the terms of the GNU General Public License as
   8 * published by the Free Software Foundation; either version 2 of the
   9 * License, or (at your option) any later version.
  10 *
  11 * This program is distributed in the hope that it will be useful,
  12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  14 * GNU General Public License for more details.
  15 *
  16 * You should have received a copy of the GNU General Public License
  17 * along with this program; if not, write to the Free Software
  18 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA
  19 * 02111-1307, USA.
  20 *
  21 * This driver uses the sungem driver (c) David Miller
  22 * (davem@redhat.com) as its basis.
  23 *
  24 * The cassini chip has a number of features that distinguish it from
  25 * the gem chip:
  26 *  4 transmit descriptor rings that are used for either QoS (VLAN) or
  27 *      load balancing (non-VLAN mode)
  28 *  batching of multiple packets
  29 *  multiple CPU dispatching
  30 *  page-based RX descriptor engine with separate completion rings
  31 *  Gigabit support (GMII and PCS interface)
  32 *  MIF link up/down detection works
  33 *
  34 * RX is handled by page sized buffers that are attached as fragments to
  35 * the skb. here's what's done:
  36 *  -- driver allocates pages at a time and keeps reference counts
  37 *     on them.
  38 *  -- the upper protocol layers assume that the header is in the skb
  39 *     itself. as a result, cassini will copy a small amount (64 bytes)
  40 *     to make them happy.
  41 *  -- driver appends the rest of the data pages as frags to skbuffs
  42 *     and increments the reference count
  43 *  -- on page reclamation, the driver swaps the page with a spare page.
  44 *     if that page is still in use, it frees its reference to that page,
  45 *     and allocates a new page for use. otherwise, it just recycles the
  46 *     the page.
  47 *
  48 * NOTE: cassini can parse the header. however, it's not worth it
  49 *       as long as the network stack requires a header copy.
  50 *
  51 * TX has 4 queues. currently these queues are used in a round-robin
  52 * fashion for load balancing. They can also be used for QoS. for that
  53 * to work, however, QoS information needs to be exposed down to the driver
  54 * level so that subqueues get targeted to particular transmit rings.
  55 * alternatively, the queues can be configured via use of the all-purpose
  56 * ioctl.
  57 *
  58 * RX DATA: the rx completion ring has all the info, but the rx desc
  59 * ring has all of the data. RX can conceivably come in under multiple
  60 * interrupts, but the INT# assignment needs to be set up properly by
  61 * the BIOS and conveyed to the driver. PCI BIOSes don't know how to do
  62 * that. also, the two descriptor rings are designed to distinguish between
  63 * encrypted and non-encrypted packets, but we use them for buffering
  64 * instead.
  65 *
  66 * by default, the selective clear mask is set up to process rx packets.
  67 */
  68
  69#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  70
  71#include <linux/module.h>
  72#include <linux/kernel.h>
  73#include <linux/types.h>
  74#include <linux/compiler.h>
  75#include <linux/slab.h>
  76#include <linux/delay.h>
  77#include <linux/init.h>
  78#include <linux/interrupt.h>
  79#include <linux/vmalloc.h>
  80#include <linux/ioport.h>
  81#include <linux/pci.h>
  82#include <linux/mm.h>
  83#include <linux/highmem.h>
  84#include <linux/list.h>
  85#include <linux/dma-mapping.h>
  86
  87#include <linux/netdevice.h>
  88#include <linux/etherdevice.h>
  89#include <linux/skbuff.h>
  90#include <linux/ethtool.h>
  91#include <linux/crc32.h>
  92#include <linux/random.h>
  93#include <linux/mii.h>
  94#include <linux/ip.h>
  95#include <linux/tcp.h>
  96#include <linux/mutex.h>
  97#include <linux/firmware.h>
  98
  99#include <net/checksum.h>
 100
 101#include <linux/atomic.h>
 102#include <asm/io.h>
 103#include <asm/byteorder.h>
 104#include <asm/uaccess.h>
 
 105
 106#define cas_page_map(x)      kmap_atomic((x))
 107#define cas_page_unmap(x)    kunmap_atomic((x))
 108#define CAS_NCPUS            num_online_cpus()
 109
 110#define cas_skb_release(x)  netif_rx(x)
 111
 112/* select which firmware to use */
 113#define USE_HP_WORKAROUND
 114#define HP_WORKAROUND_DEFAULT /* select which firmware to use as default */
 115#define CAS_HP_ALT_FIRMWARE   cas_prog_null /* alternate firmware */
 116
 117#include "cassini.h"
 118
 119#define USE_TX_COMPWB      /* use completion writeback registers */
 120#define USE_CSMA_CD_PROTO  /* standard CSMA/CD */
 121#define USE_RX_BLANK       /* hw interrupt mitigation */
 122#undef USE_ENTROPY_DEV     /* don't test for entropy device */
 123
 124/* NOTE: these aren't useable unless PCI interrupts can be assigned.
 125 * also, we need to make cp->lock finer-grained.
 126 */
 127#undef  USE_PCI_INTB
 128#undef  USE_PCI_INTC
 129#undef  USE_PCI_INTD
 130#undef  USE_QOS
 131
 132#undef  USE_VPD_DEBUG       /* debug vpd information if defined */
 133
 134/* rx processing options */
 135#define USE_PAGE_ORDER      /* specify to allocate large rx pages */
 136#define RX_DONT_BATCH  0    /* if 1, don't batch flows */
 137#define RX_COPY_ALWAYS 0    /* if 0, use frags */
 138#define RX_COPY_MIN    64   /* copy a little to make upper layers happy */
 139#undef  RX_COUNT_BUFFERS    /* define to calculate RX buffer stats */
 140
 141#define DRV_MODULE_NAME		"cassini"
 142#define DRV_MODULE_VERSION	"1.6"
 143#define DRV_MODULE_RELDATE	"21 May 2008"
 144
 145#define CAS_DEF_MSG_ENABLE	  \
 146	(NETIF_MSG_DRV		| \
 147	 NETIF_MSG_PROBE	| \
 148	 NETIF_MSG_LINK		| \
 149	 NETIF_MSG_TIMER	| \
 150	 NETIF_MSG_IFDOWN	| \
 151	 NETIF_MSG_IFUP		| \
 152	 NETIF_MSG_RX_ERR	| \
 153	 NETIF_MSG_TX_ERR)
 154
 155/* length of time before we decide the hardware is borked,
 156 * and dev->tx_timeout() should be called to fix the problem
 157 */
 158#define CAS_TX_TIMEOUT			(HZ)
 159#define CAS_LINK_TIMEOUT                (22*HZ/10)
 160#define CAS_LINK_FAST_TIMEOUT           (1)
 161
 162/* timeout values for state changing. these specify the number
 163 * of 10us delays to be used before giving up.
 164 */
 165#define STOP_TRIES_PHY 1000
 166#define STOP_TRIES     5000
 167
 168/* specify a minimum frame size to deal with some fifo issues
 169 * max mtu == 2 * page size - ethernet header - 64 - swivel =
 170 *            2 * page_size - 0x50
 171 */
 172#define CAS_MIN_FRAME			97
 173#define CAS_1000MB_MIN_FRAME            255
 174#define CAS_MIN_MTU                     60
 175#define CAS_MAX_MTU                     min(((cp->page_size << 1) - 0x50), 9000)
 176
 177#if 1
 178/*
 179 * Eliminate these and use separate atomic counters for each, to
 180 * avoid a race condition.
 181 */
 182#else
 183#define CAS_RESET_MTU                   1
 184#define CAS_RESET_ALL                   2
 185#define CAS_RESET_SPARE                 3
 186#endif
 187
 188static char version[] __devinitdata =
 189	DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
 190
 191static int cassini_debug = -1;	/* -1 == use CAS_DEF_MSG_ENABLE as value */
 192static int link_mode;
 193
 194MODULE_AUTHOR("Adrian Sun (asun@darksunrising.com)");
 195MODULE_DESCRIPTION("Sun Cassini(+) ethernet driver");
 196MODULE_LICENSE("GPL");
 197MODULE_FIRMWARE("sun/cassini.bin");
 198module_param(cassini_debug, int, 0);
 199MODULE_PARM_DESC(cassini_debug, "Cassini bitmapped debugging message enable value");
 200module_param(link_mode, int, 0);
 201MODULE_PARM_DESC(link_mode, "default link mode");
 202
 203/*
 204 * Work around for a PCS bug in which the link goes down due to the chip
 205 * being confused and never showing a link status of "up."
 206 */
 207#define DEFAULT_LINKDOWN_TIMEOUT 5
 208/*
 209 * Value in seconds, for user input.
 210 */
 211static int linkdown_timeout = DEFAULT_LINKDOWN_TIMEOUT;
 212module_param(linkdown_timeout, int, 0);
 213MODULE_PARM_DESC(linkdown_timeout,
 214"min reset interval in sec. for PCS linkdown issue; disabled if not positive");
 215
 216/*
 217 * value in 'ticks' (units used by jiffies). Set when we init the
 218 * module because 'HZ' in actually a function call on some flavors of
 219 * Linux.  This will default to DEFAULT_LINKDOWN_TIMEOUT * HZ.
 220 */
 221static int link_transition_timeout;
 222
 223
 224
 225static u16 link_modes[] __devinitdata = {
 226	BMCR_ANENABLE,			 /* 0 : autoneg */
 227	0,				 /* 1 : 10bt half duplex */
 228	BMCR_SPEED100,			 /* 2 : 100bt half duplex */
 229	BMCR_FULLDPLX,			 /* 3 : 10bt full duplex */
 230	BMCR_SPEED100|BMCR_FULLDPLX,	 /* 4 : 100bt full duplex */
 231	CAS_BMCR_SPEED1000|BMCR_FULLDPLX /* 5 : 1000bt full duplex */
 232};
 233
 234static DEFINE_PCI_DEVICE_TABLE(cas_pci_tbl) = {
 235	{ PCI_VENDOR_ID_SUN, PCI_DEVICE_ID_SUN_CASSINI,
 236	  PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
 237	{ PCI_VENDOR_ID_NS, PCI_DEVICE_ID_NS_SATURN,
 238	  PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
 239	{ 0, }
 240};
 241
 242MODULE_DEVICE_TABLE(pci, cas_pci_tbl);
 243
 244static void cas_set_link_modes(struct cas *cp);
 245
 246static inline void cas_lock_tx(struct cas *cp)
 247{
 248	int i;
 249
 250	for (i = 0; i < N_TX_RINGS; i++)
 251		spin_lock(&cp->tx_lock[i]);
 252}
 253
 254static inline void cas_lock_all(struct cas *cp)
 255{
 256	spin_lock_irq(&cp->lock);
 257	cas_lock_tx(cp);
 258}
 259
 260/* WTZ: QA was finding deadlock problems with the previous
 261 * versions after long test runs with multiple cards per machine.
 262 * See if replacing cas_lock_all with safer versions helps. The
 263 * symptoms QA is reporting match those we'd expect if interrupts
 264 * aren't being properly restored, and we fixed a previous deadlock
 265 * with similar symptoms by using save/restore versions in other
 266 * places.
 267 */
 268#define cas_lock_all_save(cp, flags) \
 269do { \
 270	struct cas *xxxcp = (cp); \
 271	spin_lock_irqsave(&xxxcp->lock, flags); \
 272	cas_lock_tx(xxxcp); \
 273} while (0)
 274
 275static inline void cas_unlock_tx(struct cas *cp)
 276{
 277	int i;
 278
 279	for (i = N_TX_RINGS; i > 0; i--)
 280		spin_unlock(&cp->tx_lock[i - 1]);
 281}
 282
 283static inline void cas_unlock_all(struct cas *cp)
 284{
 285	cas_unlock_tx(cp);
 286	spin_unlock_irq(&cp->lock);
 287}
 288
 289#define cas_unlock_all_restore(cp, flags) \
 290do { \
 291	struct cas *xxxcp = (cp); \
 292	cas_unlock_tx(xxxcp); \
 293	spin_unlock_irqrestore(&xxxcp->lock, flags); \
 294} while (0)
 295
 296static void cas_disable_irq(struct cas *cp, const int ring)
 297{
 298	/* Make sure we won't get any more interrupts */
 299	if (ring == 0) {
 300		writel(0xFFFFFFFF, cp->regs + REG_INTR_MASK);
 301		return;
 302	}
 303
 304	/* disable completion interrupts and selectively mask */
 305	if (cp->cas_flags & CAS_FLAG_REG_PLUS) {
 306		switch (ring) {
 307#if defined (USE_PCI_INTB) || defined(USE_PCI_INTC) || defined(USE_PCI_INTD)
 308#ifdef USE_PCI_INTB
 309		case 1:
 310#endif
 311#ifdef USE_PCI_INTC
 312		case 2:
 313#endif
 314#ifdef USE_PCI_INTD
 315		case 3:
 316#endif
 317			writel(INTRN_MASK_CLEAR_ALL | INTRN_MASK_RX_EN,
 318			       cp->regs + REG_PLUS_INTRN_MASK(ring));
 319			break;
 320#endif
 321		default:
 322			writel(INTRN_MASK_CLEAR_ALL, cp->regs +
 323			       REG_PLUS_INTRN_MASK(ring));
 324			break;
 325		}
 326	}
 327}
 328
 329static inline void cas_mask_intr(struct cas *cp)
 330{
 331	int i;
 332
 333	for (i = 0; i < N_RX_COMP_RINGS; i++)
 334		cas_disable_irq(cp, i);
 335}
 336
 337static void cas_enable_irq(struct cas *cp, const int ring)
 338{
 339	if (ring == 0) { /* all but TX_DONE */
 340		writel(INTR_TX_DONE, cp->regs + REG_INTR_MASK);
 341		return;
 342	}
 343
 344	if (cp->cas_flags & CAS_FLAG_REG_PLUS) {
 345		switch (ring) {
 346#if defined (USE_PCI_INTB) || defined(USE_PCI_INTC) || defined(USE_PCI_INTD)
 347#ifdef USE_PCI_INTB
 348		case 1:
 349#endif
 350#ifdef USE_PCI_INTC
 351		case 2:
 352#endif
 353#ifdef USE_PCI_INTD
 354		case 3:
 355#endif
 356			writel(INTRN_MASK_RX_EN, cp->regs +
 357			       REG_PLUS_INTRN_MASK(ring));
 358			break;
 359#endif
 360		default:
 361			break;
 362		}
 363	}
 364}
 365
 366static inline void cas_unmask_intr(struct cas *cp)
 367{
 368	int i;
 369
 370	for (i = 0; i < N_RX_COMP_RINGS; i++)
 371		cas_enable_irq(cp, i);
 372}
 373
 374static inline void cas_entropy_gather(struct cas *cp)
 375{
 376#ifdef USE_ENTROPY_DEV
 377	if ((cp->cas_flags & CAS_FLAG_ENTROPY_DEV) == 0)
 378		return;
 379
 380	batch_entropy_store(readl(cp->regs + REG_ENTROPY_IV),
 381			    readl(cp->regs + REG_ENTROPY_IV),
 382			    sizeof(uint64_t)*8);
 383#endif
 384}
 385
 386static inline void cas_entropy_reset(struct cas *cp)
 387{
 388#ifdef USE_ENTROPY_DEV
 389	if ((cp->cas_flags & CAS_FLAG_ENTROPY_DEV) == 0)
 390		return;
 391
 392	writel(BIM_LOCAL_DEV_PAD | BIM_LOCAL_DEV_PROM | BIM_LOCAL_DEV_EXT,
 393	       cp->regs + REG_BIM_LOCAL_DEV_EN);
 394	writeb(ENTROPY_RESET_STC_MODE, cp->regs + REG_ENTROPY_RESET);
 395	writeb(0x55, cp->regs + REG_ENTROPY_RAND_REG);
 396
 397	/* if we read back 0x0, we don't have an entropy device */
 398	if (readb(cp->regs + REG_ENTROPY_RAND_REG) == 0)
 399		cp->cas_flags &= ~CAS_FLAG_ENTROPY_DEV;
 400#endif
 401}
 402
 403/* access to the phy. the following assumes that we've initialized the MIF to
 404 * be in frame rather than bit-bang mode
 405 */
 406static u16 cas_phy_read(struct cas *cp, int reg)
 407{
 408	u32 cmd;
 409	int limit = STOP_TRIES_PHY;
 410
 411	cmd = MIF_FRAME_ST | MIF_FRAME_OP_READ;
 412	cmd |= CAS_BASE(MIF_FRAME_PHY_ADDR, cp->phy_addr);
 413	cmd |= CAS_BASE(MIF_FRAME_REG_ADDR, reg);
 414	cmd |= MIF_FRAME_TURN_AROUND_MSB;
 415	writel(cmd, cp->regs + REG_MIF_FRAME);
 416
 417	/* poll for completion */
 418	while (limit-- > 0) {
 419		udelay(10);
 420		cmd = readl(cp->regs + REG_MIF_FRAME);
 421		if (cmd & MIF_FRAME_TURN_AROUND_LSB)
 422			return cmd & MIF_FRAME_DATA_MASK;
 423	}
 424	return 0xFFFF; /* -1 */
 425}
 426
 427static int cas_phy_write(struct cas *cp, int reg, u16 val)
 428{
 429	int limit = STOP_TRIES_PHY;
 430	u32 cmd;
 431
 432	cmd = MIF_FRAME_ST | MIF_FRAME_OP_WRITE;
 433	cmd |= CAS_BASE(MIF_FRAME_PHY_ADDR, cp->phy_addr);
 434	cmd |= CAS_BASE(MIF_FRAME_REG_ADDR, reg);
 435	cmd |= MIF_FRAME_TURN_AROUND_MSB;
 436	cmd |= val & MIF_FRAME_DATA_MASK;
 437	writel(cmd, cp->regs + REG_MIF_FRAME);
 438
 439	/* poll for completion */
 440	while (limit-- > 0) {
 441		udelay(10);
 442		cmd = readl(cp->regs + REG_MIF_FRAME);
 443		if (cmd & MIF_FRAME_TURN_AROUND_LSB)
 444			return 0;
 445	}
 446	return -1;
 447}
 448
 449static void cas_phy_powerup(struct cas *cp)
 450{
 451	u16 ctl = cas_phy_read(cp, MII_BMCR);
 452
 453	if ((ctl & BMCR_PDOWN) == 0)
 454		return;
 455	ctl &= ~BMCR_PDOWN;
 456	cas_phy_write(cp, MII_BMCR, ctl);
 457}
 458
 459static void cas_phy_powerdown(struct cas *cp)
 460{
 461	u16 ctl = cas_phy_read(cp, MII_BMCR);
 462
 463	if (ctl & BMCR_PDOWN)
 464		return;
 465	ctl |= BMCR_PDOWN;
 466	cas_phy_write(cp, MII_BMCR, ctl);
 467}
 468
 469/* cp->lock held. note: the last put_page will free the buffer */
 470static int cas_page_free(struct cas *cp, cas_page_t *page)
 471{
 472	pci_unmap_page(cp->pdev, page->dma_addr, cp->page_size,
 473		       PCI_DMA_FROMDEVICE);
 474	__free_pages(page->buffer, cp->page_order);
 475	kfree(page);
 476	return 0;
 477}
 478
 479#ifdef RX_COUNT_BUFFERS
 480#define RX_USED_ADD(x, y)       ((x)->used += (y))
 481#define RX_USED_SET(x, y)       ((x)->used  = (y))
 482#else
 483#define RX_USED_ADD(x, y)
 484#define RX_USED_SET(x, y)
 485#endif
 486
 487/* local page allocation routines for the receive buffers. jumbo pages
 488 * require at least 8K contiguous and 8K aligned buffers.
 489 */
 490static cas_page_t *cas_page_alloc(struct cas *cp, const gfp_t flags)
 491{
 492	cas_page_t *page;
 493
 494	page = kmalloc(sizeof(cas_page_t), flags);
 495	if (!page)
 496		return NULL;
 497
 498	INIT_LIST_HEAD(&page->list);
 499	RX_USED_SET(page, 0);
 500	page->buffer = alloc_pages(flags, cp->page_order);
 501	if (!page->buffer)
 502		goto page_err;
 503	page->dma_addr = pci_map_page(cp->pdev, page->buffer, 0,
 504				      cp->page_size, PCI_DMA_FROMDEVICE);
 505	return page;
 506
 507page_err:
 508	kfree(page);
 509	return NULL;
 510}
 511
 512/* initialize spare pool of rx buffers, but allocate during the open */
 513static void cas_spare_init(struct cas *cp)
 514{
 515  	spin_lock(&cp->rx_inuse_lock);
 516	INIT_LIST_HEAD(&cp->rx_inuse_list);
 517	spin_unlock(&cp->rx_inuse_lock);
 518
 519	spin_lock(&cp->rx_spare_lock);
 520	INIT_LIST_HEAD(&cp->rx_spare_list);
 521	cp->rx_spares_needed = RX_SPARE_COUNT;
 522	spin_unlock(&cp->rx_spare_lock);
 523}
 524
 525/* used on close. free all the spare buffers. */
 526static void cas_spare_free(struct cas *cp)
 527{
 528	struct list_head list, *elem, *tmp;
 529
 530	/* free spare buffers */
 531	INIT_LIST_HEAD(&list);
 532	spin_lock(&cp->rx_spare_lock);
 533	list_splice_init(&cp->rx_spare_list, &list);
 534	spin_unlock(&cp->rx_spare_lock);
 535	list_for_each_safe(elem, tmp, &list) {
 536		cas_page_free(cp, list_entry(elem, cas_page_t, list));
 537	}
 538
 539	INIT_LIST_HEAD(&list);
 540#if 1
 541	/*
 542	 * Looks like Adrian had protected this with a different
 543	 * lock than used everywhere else to manipulate this list.
 544	 */
 545	spin_lock(&cp->rx_inuse_lock);
 546	list_splice_init(&cp->rx_inuse_list, &list);
 547	spin_unlock(&cp->rx_inuse_lock);
 548#else
 549	spin_lock(&cp->rx_spare_lock);
 550	list_splice_init(&cp->rx_inuse_list, &list);
 551	spin_unlock(&cp->rx_spare_lock);
 552#endif
 553	list_for_each_safe(elem, tmp, &list) {
 554		cas_page_free(cp, list_entry(elem, cas_page_t, list));
 555	}
 556}
 557
 558/* replenish spares if needed */
 559static void cas_spare_recover(struct cas *cp, const gfp_t flags)
 560{
 561	struct list_head list, *elem, *tmp;
 562	int needed, i;
 563
 564	/* check inuse list. if we don't need any more free buffers,
 565	 * just free it
 566	 */
 567
 568	/* make a local copy of the list */
 569	INIT_LIST_HEAD(&list);
 570	spin_lock(&cp->rx_inuse_lock);
 571	list_splice_init(&cp->rx_inuse_list, &list);
 572	spin_unlock(&cp->rx_inuse_lock);
 573
 574	list_for_each_safe(elem, tmp, &list) {
 575		cas_page_t *page = list_entry(elem, cas_page_t, list);
 576
 577		/*
 578		 * With the lockless pagecache, cassini buffering scheme gets
 579		 * slightly less accurate: we might find that a page has an
 580		 * elevated reference count here, due to a speculative ref,
 581		 * and skip it as in-use. Ideally we would be able to reclaim
 582		 * it. However this would be such a rare case, it doesn't
 583		 * matter too much as we should pick it up the next time round.
 584		 *
 585		 * Importantly, if we find that the page has a refcount of 1
 586		 * here (our refcount), then we know it is definitely not inuse
 587		 * so we can reuse it.
 588		 */
 589		if (page_count(page->buffer) > 1)
 590			continue;
 591
 592		list_del(elem);
 593		spin_lock(&cp->rx_spare_lock);
 594		if (cp->rx_spares_needed > 0) {
 595			list_add(elem, &cp->rx_spare_list);
 596			cp->rx_spares_needed--;
 597			spin_unlock(&cp->rx_spare_lock);
 598		} else {
 599			spin_unlock(&cp->rx_spare_lock);
 600			cas_page_free(cp, page);
 601		}
 602	}
 603
 604	/* put any inuse buffers back on the list */
 605	if (!list_empty(&list)) {
 606		spin_lock(&cp->rx_inuse_lock);
 607		list_splice(&list, &cp->rx_inuse_list);
 608		spin_unlock(&cp->rx_inuse_lock);
 609	}
 610
 611	spin_lock(&cp->rx_spare_lock);
 612	needed = cp->rx_spares_needed;
 613	spin_unlock(&cp->rx_spare_lock);
 614	if (!needed)
 615		return;
 616
 617	/* we still need spares, so try to allocate some */
 618	INIT_LIST_HEAD(&list);
 619	i = 0;
 620	while (i < needed) {
 621		cas_page_t *spare = cas_page_alloc(cp, flags);
 622		if (!spare)
 623			break;
 624		list_add(&spare->list, &list);
 625		i++;
 626	}
 627
 628	spin_lock(&cp->rx_spare_lock);
 629	list_splice(&list, &cp->rx_spare_list);
 630	cp->rx_spares_needed -= i;
 631	spin_unlock(&cp->rx_spare_lock);
 632}
 633
 634/* pull a page from the list. */
 635static cas_page_t *cas_page_dequeue(struct cas *cp)
 636{
 637	struct list_head *entry;
 638	int recover;
 639
 640	spin_lock(&cp->rx_spare_lock);
 641	if (list_empty(&cp->rx_spare_list)) {
 642		/* try to do a quick recovery */
 643		spin_unlock(&cp->rx_spare_lock);
 644		cas_spare_recover(cp, GFP_ATOMIC);
 645		spin_lock(&cp->rx_spare_lock);
 646		if (list_empty(&cp->rx_spare_list)) {
 647			netif_err(cp, rx_err, cp->dev,
 648				  "no spare buffers available\n");
 649			spin_unlock(&cp->rx_spare_lock);
 650			return NULL;
 651		}
 652	}
 653
 654	entry = cp->rx_spare_list.next;
 655	list_del(entry);
 656	recover = ++cp->rx_spares_needed;
 657	spin_unlock(&cp->rx_spare_lock);
 658
 659	/* trigger the timer to do the recovery */
 660	if ((recover & (RX_SPARE_RECOVER_VAL - 1)) == 0) {
 661#if 1
 662		atomic_inc(&cp->reset_task_pending);
 663		atomic_inc(&cp->reset_task_pending_spare);
 664		schedule_work(&cp->reset_task);
 665#else
 666		atomic_set(&cp->reset_task_pending, CAS_RESET_SPARE);
 667		schedule_work(&cp->reset_task);
 668#endif
 669	}
 670	return list_entry(entry, cas_page_t, list);
 671}
 672
 673
 674static void cas_mif_poll(struct cas *cp, const int enable)
 675{
 676	u32 cfg;
 677
 678	cfg  = readl(cp->regs + REG_MIF_CFG);
 679	cfg &= (MIF_CFG_MDIO_0 | MIF_CFG_MDIO_1);
 680
 681	if (cp->phy_type & CAS_PHY_MII_MDIO1)
 682		cfg |= MIF_CFG_PHY_SELECT;
 683
 684	/* poll and interrupt on link status change. */
 685	if (enable) {
 686		cfg |= MIF_CFG_POLL_EN;
 687		cfg |= CAS_BASE(MIF_CFG_POLL_REG, MII_BMSR);
 688		cfg |= CAS_BASE(MIF_CFG_POLL_PHY, cp->phy_addr);
 689	}
 690	writel((enable) ? ~(BMSR_LSTATUS | BMSR_ANEGCOMPLETE) : 0xFFFF,
 691	       cp->regs + REG_MIF_MASK);
 692	writel(cfg, cp->regs + REG_MIF_CFG);
 693}
 694
 695/* Must be invoked under cp->lock */
 696static void cas_begin_auto_negotiation(struct cas *cp, struct ethtool_cmd *ep)
 
 697{
 698	u16 ctl;
 699#if 1
 700	int lcntl;
 701	int changed = 0;
 702	int oldstate = cp->lstate;
 703	int link_was_not_down = !(oldstate == link_down);
 704#endif
 705	/* Setup link parameters */
 706	if (!ep)
 707		goto start_aneg;
 708	lcntl = cp->link_cntl;
 709	if (ep->autoneg == AUTONEG_ENABLE)
 710		cp->link_cntl = BMCR_ANENABLE;
 711	else {
 712		u32 speed = ethtool_cmd_speed(ep);
 713		cp->link_cntl = 0;
 714		if (speed == SPEED_100)
 715			cp->link_cntl |= BMCR_SPEED100;
 716		else if (speed == SPEED_1000)
 717			cp->link_cntl |= CAS_BMCR_SPEED1000;
 718		if (ep->duplex == DUPLEX_FULL)
 719			cp->link_cntl |= BMCR_FULLDPLX;
 720	}
 721#if 1
 722	changed = (lcntl != cp->link_cntl);
 723#endif
 724start_aneg:
 725	if (cp->lstate == link_up) {
 726		netdev_info(cp->dev, "PCS link down\n");
 727	} else {
 728		if (changed) {
 729			netdev_info(cp->dev, "link configuration changed\n");
 730		}
 731	}
 732	cp->lstate = link_down;
 733	cp->link_transition = LINK_TRANSITION_LINK_DOWN;
 734	if (!cp->hw_running)
 735		return;
 736#if 1
 737	/*
 738	 * WTZ: If the old state was link_up, we turn off the carrier
 739	 * to replicate everything we do elsewhere on a link-down
 740	 * event when we were already in a link-up state..
 741	 */
 742	if (oldstate == link_up)
 743		netif_carrier_off(cp->dev);
 744	if (changed  && link_was_not_down) {
 745		/*
 746		 * WTZ: This branch will simply schedule a full reset after
 747		 * we explicitly changed link modes in an ioctl. See if this
 748		 * fixes the link-problems we were having for forced mode.
 749		 */
 750		atomic_inc(&cp->reset_task_pending);
 751		atomic_inc(&cp->reset_task_pending_all);
 752		schedule_work(&cp->reset_task);
 753		cp->timer_ticks = 0;
 754		mod_timer(&cp->link_timer, jiffies + CAS_LINK_TIMEOUT);
 755		return;
 756	}
 757#endif
 758	if (cp->phy_type & CAS_PHY_SERDES) {
 759		u32 val = readl(cp->regs + REG_PCS_MII_CTRL);
 760
 761		if (cp->link_cntl & BMCR_ANENABLE) {
 762			val |= (PCS_MII_RESTART_AUTONEG | PCS_MII_AUTONEG_EN);
 763			cp->lstate = link_aneg;
 764		} else {
 765			if (cp->link_cntl & BMCR_FULLDPLX)
 766				val |= PCS_MII_CTRL_DUPLEX;
 767			val &= ~PCS_MII_AUTONEG_EN;
 768			cp->lstate = link_force_ok;
 769		}
 770		cp->link_transition = LINK_TRANSITION_LINK_CONFIG;
 771		writel(val, cp->regs + REG_PCS_MII_CTRL);
 772
 773	} else {
 774		cas_mif_poll(cp, 0);
 775		ctl = cas_phy_read(cp, MII_BMCR);
 776		ctl &= ~(BMCR_FULLDPLX | BMCR_SPEED100 |
 777			 CAS_BMCR_SPEED1000 | BMCR_ANENABLE);
 778		ctl |= cp->link_cntl;
 779		if (ctl & BMCR_ANENABLE) {
 780			ctl |= BMCR_ANRESTART;
 781			cp->lstate = link_aneg;
 782		} else {
 783			cp->lstate = link_force_ok;
 784		}
 785		cp->link_transition = LINK_TRANSITION_LINK_CONFIG;
 786		cas_phy_write(cp, MII_BMCR, ctl);
 787		cas_mif_poll(cp, 1);
 788	}
 789
 790	cp->timer_ticks = 0;
 791	mod_timer(&cp->link_timer, jiffies + CAS_LINK_TIMEOUT);
 792}
 793
 794/* Must be invoked under cp->lock. */
 795static int cas_reset_mii_phy(struct cas *cp)
 796{
 797	int limit = STOP_TRIES_PHY;
 798	u16 val;
 799
 800	cas_phy_write(cp, MII_BMCR, BMCR_RESET);
 801	udelay(100);
 802	while (--limit) {
 803		val = cas_phy_read(cp, MII_BMCR);
 804		if ((val & BMCR_RESET) == 0)
 805			break;
 806		udelay(10);
 807	}
 808	return limit <= 0;
 809}
 810
 811static int cas_saturn_firmware_init(struct cas *cp)
 812{
 813	const struct firmware *fw;
 814	const char fw_name[] = "sun/cassini.bin";
 815	int err;
 816
 817	if (PHY_NS_DP83065 != cp->phy_id)
 818		return 0;
 819
 820	err = request_firmware(&fw, fw_name, &cp->pdev->dev);
 821	if (err) {
 822		pr_err("Failed to load firmware \"%s\"\n",
 823		       fw_name);
 824		return err;
 825	}
 826	if (fw->size < 2) {
 827		pr_err("bogus length %zu in \"%s\"\n",
 828		       fw->size, fw_name);
 829		err = -EINVAL;
 830		goto out;
 831	}
 832	cp->fw_load_addr= fw->data[1] << 8 | fw->data[0];
 833	cp->fw_size = fw->size - 2;
 834	cp->fw_data = vmalloc(cp->fw_size);
 835	if (!cp->fw_data) {
 836		err = -ENOMEM;
 837		goto out;
 838	}
 839	memcpy(cp->fw_data, &fw->data[2], cp->fw_size);
 840out:
 841	release_firmware(fw);
 842	return err;
 843}
 844
 845static void cas_saturn_firmware_load(struct cas *cp)
 846{
 847	int i;
 848
 
 
 
 849	cas_phy_powerdown(cp);
 850
 851	/* expanded memory access mode */
 852	cas_phy_write(cp, DP83065_MII_MEM, 0x0);
 853
 854	/* pointer configuration for new firmware */
 855	cas_phy_write(cp, DP83065_MII_REGE, 0x8ff9);
 856	cas_phy_write(cp, DP83065_MII_REGD, 0xbd);
 857	cas_phy_write(cp, DP83065_MII_REGE, 0x8ffa);
 858	cas_phy_write(cp, DP83065_MII_REGD, 0x82);
 859	cas_phy_write(cp, DP83065_MII_REGE, 0x8ffb);
 860	cas_phy_write(cp, DP83065_MII_REGD, 0x0);
 861	cas_phy_write(cp, DP83065_MII_REGE, 0x8ffc);
 862	cas_phy_write(cp, DP83065_MII_REGD, 0x39);
 863
 864	/* download new firmware */
 865	cas_phy_write(cp, DP83065_MII_MEM, 0x1);
 866	cas_phy_write(cp, DP83065_MII_REGE, cp->fw_load_addr);
 867	for (i = 0; i < cp->fw_size; i++)
 868		cas_phy_write(cp, DP83065_MII_REGD, cp->fw_data[i]);
 869
 870	/* enable firmware */
 871	cas_phy_write(cp, DP83065_MII_REGE, 0x8ff8);
 872	cas_phy_write(cp, DP83065_MII_REGD, 0x1);
 873}
 874
 875
 876/* phy initialization */
 877static void cas_phy_init(struct cas *cp)
 878{
 879	u16 val;
 880
 881	/* if we're in MII/GMII mode, set up phy */
 882	if (CAS_PHY_MII(cp->phy_type)) {
 883		writel(PCS_DATAPATH_MODE_MII,
 884		       cp->regs + REG_PCS_DATAPATH_MODE);
 885
 886		cas_mif_poll(cp, 0);
 887		cas_reset_mii_phy(cp); /* take out of isolate mode */
 888
 889		if (PHY_LUCENT_B0 == cp->phy_id) {
 890			/* workaround link up/down issue with lucent */
 891			cas_phy_write(cp, LUCENT_MII_REG, 0x8000);
 892			cas_phy_write(cp, MII_BMCR, 0x00f1);
 893			cas_phy_write(cp, LUCENT_MII_REG, 0x0);
 894
 895		} else if (PHY_BROADCOM_B0 == (cp->phy_id & 0xFFFFFFFC)) {
 896			/* workarounds for broadcom phy */
 897			cas_phy_write(cp, BROADCOM_MII_REG8, 0x0C20);
 898			cas_phy_write(cp, BROADCOM_MII_REG7, 0x0012);
 899			cas_phy_write(cp, BROADCOM_MII_REG5, 0x1804);
 900			cas_phy_write(cp, BROADCOM_MII_REG7, 0x0013);
 901			cas_phy_write(cp, BROADCOM_MII_REG5, 0x1204);
 902			cas_phy_write(cp, BROADCOM_MII_REG7, 0x8006);
 903			cas_phy_write(cp, BROADCOM_MII_REG5, 0x0132);
 904			cas_phy_write(cp, BROADCOM_MII_REG7, 0x8006);
 905			cas_phy_write(cp, BROADCOM_MII_REG5, 0x0232);
 906			cas_phy_write(cp, BROADCOM_MII_REG7, 0x201F);
 907			cas_phy_write(cp, BROADCOM_MII_REG5, 0x0A20);
 908
 909		} else if (PHY_BROADCOM_5411 == cp->phy_id) {
 910			val = cas_phy_read(cp, BROADCOM_MII_REG4);
 911			val = cas_phy_read(cp, BROADCOM_MII_REG4);
 912			if (val & 0x0080) {
 913				/* link workaround */
 914				cas_phy_write(cp, BROADCOM_MII_REG4,
 915					      val & ~0x0080);
 916			}
 917
 918		} else if (cp->cas_flags & CAS_FLAG_SATURN) {
 919			writel((cp->phy_type & CAS_PHY_MII_MDIO0) ?
 920			       SATURN_PCFG_FSI : 0x0,
 921			       cp->regs + REG_SATURN_PCFG);
 922
 923			/* load firmware to address 10Mbps auto-negotiation
 924			 * issue. NOTE: this will need to be changed if the
 925			 * default firmware gets fixed.
 926			 */
 927			if (PHY_NS_DP83065 == cp->phy_id) {
 928				cas_saturn_firmware_load(cp);
 929			}
 930			cas_phy_powerup(cp);
 931		}
 932
 933		/* advertise capabilities */
 934		val = cas_phy_read(cp, MII_BMCR);
 935		val &= ~BMCR_ANENABLE;
 936		cas_phy_write(cp, MII_BMCR, val);
 937		udelay(10);
 938
 939		cas_phy_write(cp, MII_ADVERTISE,
 940			      cas_phy_read(cp, MII_ADVERTISE) |
 941			      (ADVERTISE_10HALF | ADVERTISE_10FULL |
 942			       ADVERTISE_100HALF | ADVERTISE_100FULL |
 943			       CAS_ADVERTISE_PAUSE |
 944			       CAS_ADVERTISE_ASYM_PAUSE));
 945
 946		if (cp->cas_flags & CAS_FLAG_1000MB_CAP) {
 947			/* make sure that we don't advertise half
 948			 * duplex to avoid a chip issue
 949			 */
 950			val  = cas_phy_read(cp, CAS_MII_1000_CTRL);
 951			val &= ~CAS_ADVERTISE_1000HALF;
 952			val |= CAS_ADVERTISE_1000FULL;
 953			cas_phy_write(cp, CAS_MII_1000_CTRL, val);
 954		}
 955
 956	} else {
 957		/* reset pcs for serdes */
 958		u32 val;
 959		int limit;
 960
 961		writel(PCS_DATAPATH_MODE_SERDES,
 962		       cp->regs + REG_PCS_DATAPATH_MODE);
 963
 964		/* enable serdes pins on saturn */
 965		if (cp->cas_flags & CAS_FLAG_SATURN)
 966			writel(0, cp->regs + REG_SATURN_PCFG);
 967
 968		/* Reset PCS unit. */
 969		val = readl(cp->regs + REG_PCS_MII_CTRL);
 970		val |= PCS_MII_RESET;
 971		writel(val, cp->regs + REG_PCS_MII_CTRL);
 972
 973		limit = STOP_TRIES;
 974		while (--limit > 0) {
 975			udelay(10);
 976			if ((readl(cp->regs + REG_PCS_MII_CTRL) &
 977			     PCS_MII_RESET) == 0)
 978				break;
 979		}
 980		if (limit <= 0)
 981			netdev_warn(cp->dev, "PCS reset bit would not clear [%08x]\n",
 982				    readl(cp->regs + REG_PCS_STATE_MACHINE));
 983
 984		/* Make sure PCS is disabled while changing advertisement
 985		 * configuration.
 986		 */
 987		writel(0x0, cp->regs + REG_PCS_CFG);
 988
 989		/* Advertise all capabilities except half-duplex. */
 990		val  = readl(cp->regs + REG_PCS_MII_ADVERT);
 991		val &= ~PCS_MII_ADVERT_HD;
 992		val |= (PCS_MII_ADVERT_FD | PCS_MII_ADVERT_SYM_PAUSE |
 993			PCS_MII_ADVERT_ASYM_PAUSE);
 994		writel(val, cp->regs + REG_PCS_MII_ADVERT);
 995
 996		/* enable PCS */
 997		writel(PCS_CFG_EN, cp->regs + REG_PCS_CFG);
 998
 999		/* pcs workaround: enable sync detect */
1000		writel(PCS_SERDES_CTRL_SYNCD_EN,
1001		       cp->regs + REG_PCS_SERDES_CTRL);
1002	}
1003}
1004
1005
1006static int cas_pcs_link_check(struct cas *cp)
1007{
1008	u32 stat, state_machine;
1009	int retval = 0;
1010
1011	/* The link status bit latches on zero, so you must
1012	 * read it twice in such a case to see a transition
1013	 * to the link being up.
1014	 */
1015	stat = readl(cp->regs + REG_PCS_MII_STATUS);
1016	if ((stat & PCS_MII_STATUS_LINK_STATUS) == 0)
1017		stat = readl(cp->regs + REG_PCS_MII_STATUS);
1018
1019	/* The remote-fault indication is only valid
1020	 * when autoneg has completed.
1021	 */
1022	if ((stat & (PCS_MII_STATUS_AUTONEG_COMP |
1023		     PCS_MII_STATUS_REMOTE_FAULT)) ==
1024	    (PCS_MII_STATUS_AUTONEG_COMP | PCS_MII_STATUS_REMOTE_FAULT))
1025		netif_info(cp, link, cp->dev, "PCS RemoteFault\n");
1026
1027	/* work around link detection issue by querying the PCS state
1028	 * machine directly.
1029	 */
1030	state_machine = readl(cp->regs + REG_PCS_STATE_MACHINE);
1031	if ((state_machine & PCS_SM_LINK_STATE_MASK) != SM_LINK_STATE_UP) {
1032		stat &= ~PCS_MII_STATUS_LINK_STATUS;
1033	} else if (state_machine & PCS_SM_WORD_SYNC_STATE_MASK) {
1034		stat |= PCS_MII_STATUS_LINK_STATUS;
1035	}
1036
1037	if (stat & PCS_MII_STATUS_LINK_STATUS) {
1038		if (cp->lstate != link_up) {
1039			if (cp->opened) {
1040				cp->lstate = link_up;
1041				cp->link_transition = LINK_TRANSITION_LINK_UP;
1042
1043				cas_set_link_modes(cp);
1044				netif_carrier_on(cp->dev);
1045			}
1046		}
1047	} else if (cp->lstate == link_up) {
1048		cp->lstate = link_down;
1049		if (link_transition_timeout != 0 &&
1050		    cp->link_transition != LINK_TRANSITION_REQUESTED_RESET &&
1051		    !cp->link_transition_jiffies_valid) {
1052			/*
1053			 * force a reset, as a workaround for the
1054			 * link-failure problem. May want to move this to a
1055			 * point a bit earlier in the sequence. If we had
1056			 * generated a reset a short time ago, we'll wait for
1057			 * the link timer to check the status until a
1058			 * timer expires (link_transistion_jiffies_valid is
1059			 * true when the timer is running.)  Instead of using
1060			 * a system timer, we just do a check whenever the
1061			 * link timer is running - this clears the flag after
1062			 * a suitable delay.
1063			 */
1064			retval = 1;
1065			cp->link_transition = LINK_TRANSITION_REQUESTED_RESET;
1066			cp->link_transition_jiffies = jiffies;
1067			cp->link_transition_jiffies_valid = 1;
1068		} else {
1069			cp->link_transition = LINK_TRANSITION_ON_FAILURE;
1070		}
1071		netif_carrier_off(cp->dev);
1072		if (cp->opened)
1073			netif_info(cp, link, cp->dev, "PCS link down\n");
1074
1075		/* Cassini only: if you force a mode, there can be
1076		 * sync problems on link down. to fix that, the following
1077		 * things need to be checked:
1078		 * 1) read serialink state register
1079		 * 2) read pcs status register to verify link down.
1080		 * 3) if link down and serial link == 0x03, then you need
1081		 *    to global reset the chip.
1082		 */
1083		if ((cp->cas_flags & CAS_FLAG_REG_PLUS) == 0) {
1084			/* should check to see if we're in a forced mode */
1085			stat = readl(cp->regs + REG_PCS_SERDES_STATE);
1086			if (stat == 0x03)
1087				return 1;
1088		}
1089	} else if (cp->lstate == link_down) {
1090		if (link_transition_timeout != 0 &&
1091		    cp->link_transition != LINK_TRANSITION_REQUESTED_RESET &&
1092		    !cp->link_transition_jiffies_valid) {
1093			/* force a reset, as a workaround for the
1094			 * link-failure problem.  May want to move
1095			 * this to a point a bit earlier in the
1096			 * sequence.
1097			 */
1098			retval = 1;
1099			cp->link_transition = LINK_TRANSITION_REQUESTED_RESET;
1100			cp->link_transition_jiffies = jiffies;
1101			cp->link_transition_jiffies_valid = 1;
1102		} else {
1103			cp->link_transition = LINK_TRANSITION_STILL_FAILED;
1104		}
1105	}
1106
1107	return retval;
1108}
1109
1110static int cas_pcs_interrupt(struct net_device *dev,
1111			     struct cas *cp, u32 status)
1112{
1113	u32 stat = readl(cp->regs + REG_PCS_INTR_STATUS);
1114
1115	if ((stat & PCS_INTR_STATUS_LINK_CHANGE) == 0)
1116		return 0;
1117	return cas_pcs_link_check(cp);
1118}
1119
1120static int cas_txmac_interrupt(struct net_device *dev,
1121			       struct cas *cp, u32 status)
1122{
1123	u32 txmac_stat = readl(cp->regs + REG_MAC_TX_STATUS);
1124
1125	if (!txmac_stat)
1126		return 0;
1127
1128	netif_printk(cp, intr, KERN_DEBUG, cp->dev,
1129		     "txmac interrupt, txmac_stat: 0x%x\n", txmac_stat);
1130
1131	/* Defer timer expiration is quite normal,
1132	 * don't even log the event.
1133	 */
1134	if ((txmac_stat & MAC_TX_DEFER_TIMER) &&
1135	    !(txmac_stat & ~MAC_TX_DEFER_TIMER))
1136		return 0;
1137
1138	spin_lock(&cp->stat_lock[0]);
1139	if (txmac_stat & MAC_TX_UNDERRUN) {
1140		netdev_err(dev, "TX MAC xmit underrun\n");
1141		cp->net_stats[0].tx_fifo_errors++;
1142	}
1143
1144	if (txmac_stat & MAC_TX_MAX_PACKET_ERR) {
1145		netdev_err(dev, "TX MAC max packet size error\n");
1146		cp->net_stats[0].tx_errors++;
1147	}
1148
1149	/* The rest are all cases of one of the 16-bit TX
1150	 * counters expiring.
1151	 */
1152	if (txmac_stat & MAC_TX_COLL_NORMAL)
1153		cp->net_stats[0].collisions += 0x10000;
1154
1155	if (txmac_stat & MAC_TX_COLL_EXCESS) {
1156		cp->net_stats[0].tx_aborted_errors += 0x10000;
1157		cp->net_stats[0].collisions += 0x10000;
1158	}
1159
1160	if (txmac_stat & MAC_TX_COLL_LATE) {
1161		cp->net_stats[0].tx_aborted_errors += 0x10000;
1162		cp->net_stats[0].collisions += 0x10000;
1163	}
1164	spin_unlock(&cp->stat_lock[0]);
1165
1166	/* We do not keep track of MAC_TX_COLL_FIRST and
1167	 * MAC_TX_PEAK_ATTEMPTS events.
1168	 */
1169	return 0;
1170}
1171
1172static void cas_load_firmware(struct cas *cp, cas_hp_inst_t *firmware)
1173{
1174	cas_hp_inst_t *inst;
1175	u32 val;
1176	int i;
1177
1178	i = 0;
1179	while ((inst = firmware) && inst->note) {
1180		writel(i, cp->regs + REG_HP_INSTR_RAM_ADDR);
1181
1182		val = CAS_BASE(HP_INSTR_RAM_HI_VAL, inst->val);
1183		val |= CAS_BASE(HP_INSTR_RAM_HI_MASK, inst->mask);
1184		writel(val, cp->regs + REG_HP_INSTR_RAM_DATA_HI);
1185
1186		val = CAS_BASE(HP_INSTR_RAM_MID_OUTARG, inst->outarg >> 10);
1187		val |= CAS_BASE(HP_INSTR_RAM_MID_OUTOP, inst->outop);
1188		val |= CAS_BASE(HP_INSTR_RAM_MID_FNEXT, inst->fnext);
1189		val |= CAS_BASE(HP_INSTR_RAM_MID_FOFF, inst->foff);
1190		val |= CAS_BASE(HP_INSTR_RAM_MID_SNEXT, inst->snext);
1191		val |= CAS_BASE(HP_INSTR_RAM_MID_SOFF, inst->soff);
1192		val |= CAS_BASE(HP_INSTR_RAM_MID_OP, inst->op);
1193		writel(val, cp->regs + REG_HP_INSTR_RAM_DATA_MID);
1194
1195		val = CAS_BASE(HP_INSTR_RAM_LOW_OUTMASK, inst->outmask);
1196		val |= CAS_BASE(HP_INSTR_RAM_LOW_OUTSHIFT, inst->outshift);
1197		val |= CAS_BASE(HP_INSTR_RAM_LOW_OUTEN, inst->outenab);
1198		val |= CAS_BASE(HP_INSTR_RAM_LOW_OUTARG, inst->outarg);
1199		writel(val, cp->regs + REG_HP_INSTR_RAM_DATA_LOW);
1200		++firmware;
1201		++i;
1202	}
1203}
1204
1205static void cas_init_rx_dma(struct cas *cp)
1206{
1207	u64 desc_dma = cp->block_dvma;
1208	u32 val;
1209	int i, size;
1210
1211	/* rx free descriptors */
1212	val = CAS_BASE(RX_CFG_SWIVEL, RX_SWIVEL_OFF_VAL);
1213	val |= CAS_BASE(RX_CFG_DESC_RING, RX_DESC_RINGN_INDEX(0));
1214	val |= CAS_BASE(RX_CFG_COMP_RING, RX_COMP_RINGN_INDEX(0));
1215	if ((N_RX_DESC_RINGS > 1) &&
1216	    (cp->cas_flags & CAS_FLAG_REG_PLUS))  /* do desc 2 */
1217		val |= CAS_BASE(RX_CFG_DESC_RING1, RX_DESC_RINGN_INDEX(1));
1218	writel(val, cp->regs + REG_RX_CFG);
1219
1220	val = (unsigned long) cp->init_rxds[0] -
1221		(unsigned long) cp->init_block;
1222	writel((desc_dma + val) >> 32, cp->regs + REG_RX_DB_HI);
1223	writel((desc_dma + val) & 0xffffffff, cp->regs + REG_RX_DB_LOW);
1224	writel(RX_DESC_RINGN_SIZE(0) - 4, cp->regs + REG_RX_KICK);
1225
1226	if (cp->cas_flags & CAS_FLAG_REG_PLUS) {
1227		/* rx desc 2 is for IPSEC packets. however,
1228		 * we don't it that for that purpose.
1229		 */
1230		val = (unsigned long) cp->init_rxds[1] -
1231			(unsigned long) cp->init_block;
1232		writel((desc_dma + val) >> 32, cp->regs + REG_PLUS_RX_DB1_HI);
1233		writel((desc_dma + val) & 0xffffffff, cp->regs +
1234		       REG_PLUS_RX_DB1_LOW);
1235		writel(RX_DESC_RINGN_SIZE(1) - 4, cp->regs +
1236		       REG_PLUS_RX_KICK1);
1237	}
1238
1239	/* rx completion registers */
1240	val = (unsigned long) cp->init_rxcs[0] -
1241		(unsigned long) cp->init_block;
1242	writel((desc_dma + val) >> 32, cp->regs + REG_RX_CB_HI);
1243	writel((desc_dma + val) & 0xffffffff, cp->regs + REG_RX_CB_LOW);
1244
1245	if (cp->cas_flags & CAS_FLAG_REG_PLUS) {
1246		/* rx comp 2-4 */
1247		for (i = 1; i < MAX_RX_COMP_RINGS; i++) {
1248			val = (unsigned long) cp->init_rxcs[i] -
1249				(unsigned long) cp->init_block;
1250			writel((desc_dma + val) >> 32, cp->regs +
1251			       REG_PLUS_RX_CBN_HI(i));
1252			writel((desc_dma + val) & 0xffffffff, cp->regs +
1253			       REG_PLUS_RX_CBN_LOW(i));
1254		}
1255	}
1256
1257	/* read selective clear regs to prevent spurious interrupts
1258	 * on reset because complete == kick.
1259	 * selective clear set up to prevent interrupts on resets
1260	 */
1261	readl(cp->regs + REG_INTR_STATUS_ALIAS);
1262	writel(INTR_RX_DONE | INTR_RX_BUF_UNAVAIL, cp->regs + REG_ALIAS_CLEAR);
1263	if (cp->cas_flags & CAS_FLAG_REG_PLUS) {
1264		for (i = 1; i < N_RX_COMP_RINGS; i++)
1265			readl(cp->regs + REG_PLUS_INTRN_STATUS_ALIAS(i));
1266
1267		/* 2 is different from 3 and 4 */
1268		if (N_RX_COMP_RINGS > 1)
1269			writel(INTR_RX_DONE_ALT | INTR_RX_BUF_UNAVAIL_1,
1270			       cp->regs + REG_PLUS_ALIASN_CLEAR(1));
1271
1272		for (i = 2; i < N_RX_COMP_RINGS; i++)
1273			writel(INTR_RX_DONE_ALT,
1274			       cp->regs + REG_PLUS_ALIASN_CLEAR(i));
1275	}
1276
1277	/* set up pause thresholds */
1278	val  = CAS_BASE(RX_PAUSE_THRESH_OFF,
1279			cp->rx_pause_off / RX_PAUSE_THRESH_QUANTUM);
1280	val |= CAS_BASE(RX_PAUSE_THRESH_ON,
1281			cp->rx_pause_on / RX_PAUSE_THRESH_QUANTUM);
1282	writel(val, cp->regs + REG_RX_PAUSE_THRESH);
1283
1284	/* zero out dma reassembly buffers */
1285	for (i = 0; i < 64; i++) {
1286		writel(i, cp->regs + REG_RX_TABLE_ADDR);
1287		writel(0x0, cp->regs + REG_RX_TABLE_DATA_LOW);
1288		writel(0x0, cp->regs + REG_RX_TABLE_DATA_MID);
1289		writel(0x0, cp->regs + REG_RX_TABLE_DATA_HI);
1290	}
1291
1292	/* make sure address register is 0 for normal operation */
1293	writel(0x0, cp->regs + REG_RX_CTRL_FIFO_ADDR);
1294	writel(0x0, cp->regs + REG_RX_IPP_FIFO_ADDR);
1295
1296	/* interrupt mitigation */
1297#ifdef USE_RX_BLANK
1298	val = CAS_BASE(RX_BLANK_INTR_TIME, RX_BLANK_INTR_TIME_VAL);
1299	val |= CAS_BASE(RX_BLANK_INTR_PKT, RX_BLANK_INTR_PKT_VAL);
1300	writel(val, cp->regs + REG_RX_BLANK);
1301#else
1302	writel(0x0, cp->regs + REG_RX_BLANK);
1303#endif
1304
1305	/* interrupt generation as a function of low water marks for
1306	 * free desc and completion entries. these are used to trigger
1307	 * housekeeping for rx descs. we don't use the free interrupt
1308	 * as it's not very useful
1309	 */
1310	/* val = CAS_BASE(RX_AE_THRESH_FREE, RX_AE_FREEN_VAL(0)); */
1311	val = CAS_BASE(RX_AE_THRESH_COMP, RX_AE_COMP_VAL);
1312	writel(val, cp->regs + REG_RX_AE_THRESH);
1313	if (cp->cas_flags & CAS_FLAG_REG_PLUS) {
1314		val = CAS_BASE(RX_AE1_THRESH_FREE, RX_AE_FREEN_VAL(1));
1315		writel(val, cp->regs + REG_PLUS_RX_AE1_THRESH);
1316	}
1317
1318	/* Random early detect registers. useful for congestion avoidance.
1319	 * this should be tunable.
1320	 */
1321	writel(0x0, cp->regs + REG_RX_RED);
1322
1323	/* receive page sizes. default == 2K (0x800) */
1324	val = 0;
1325	if (cp->page_size == 0x1000)
1326		val = 0x1;
1327	else if (cp->page_size == 0x2000)
1328		val = 0x2;
1329	else if (cp->page_size == 0x4000)
1330		val = 0x3;
1331
1332	/* round mtu + offset. constrain to page size. */
1333	size = cp->dev->mtu + 64;
1334	if (size > cp->page_size)
1335		size = cp->page_size;
1336
1337	if (size <= 0x400)
1338		i = 0x0;
1339	else if (size <= 0x800)
1340		i = 0x1;
1341	else if (size <= 0x1000)
1342		i = 0x2;
1343	else
1344		i = 0x3;
1345
1346	cp->mtu_stride = 1 << (i + 10);
1347	val  = CAS_BASE(RX_PAGE_SIZE, val);
1348	val |= CAS_BASE(RX_PAGE_SIZE_MTU_STRIDE, i);
1349	val |= CAS_BASE(RX_PAGE_SIZE_MTU_COUNT, cp->page_size >> (i + 10));
1350	val |= CAS_BASE(RX_PAGE_SIZE_MTU_OFF, 0x1);
1351	writel(val, cp->regs + REG_RX_PAGE_SIZE);
1352
1353	/* enable the header parser if desired */
1354	if (CAS_HP_FIRMWARE == cas_prog_null)
1355		return;
1356
1357	val = CAS_BASE(HP_CFG_NUM_CPU, CAS_NCPUS > 63 ? 0 : CAS_NCPUS);
1358	val |= HP_CFG_PARSE_EN | HP_CFG_SYN_INC_MASK;
1359	val |= CAS_BASE(HP_CFG_TCP_THRESH, HP_TCP_THRESH_VAL);
1360	writel(val, cp->regs + REG_HP_CFG);
1361}
1362
1363static inline void cas_rxc_init(struct cas_rx_comp *rxc)
1364{
1365	memset(rxc, 0, sizeof(*rxc));
1366	rxc->word4 = cpu_to_le64(RX_COMP4_ZERO);
1367}
1368
1369/* NOTE: we use the ENC RX DESC ring for spares. the rx_page[0,1]
1370 * flipping is protected by the fact that the chip will not
1371 * hand back the same page index while it's being processed.
1372 */
1373static inline cas_page_t *cas_page_spare(struct cas *cp, const int index)
1374{
1375	cas_page_t *page = cp->rx_pages[1][index];
1376	cas_page_t *new;
1377
1378	if (page_count(page->buffer) == 1)
1379		return page;
1380
1381	new = cas_page_dequeue(cp);
1382	if (new) {
1383		spin_lock(&cp->rx_inuse_lock);
1384		list_add(&page->list, &cp->rx_inuse_list);
1385		spin_unlock(&cp->rx_inuse_lock);
1386	}
1387	return new;
1388}
1389
1390/* this needs to be changed if we actually use the ENC RX DESC ring */
1391static cas_page_t *cas_page_swap(struct cas *cp, const int ring,
1392				 const int index)
1393{
1394	cas_page_t **page0 = cp->rx_pages[0];
1395	cas_page_t **page1 = cp->rx_pages[1];
1396
1397	/* swap if buffer is in use */
1398	if (page_count(page0[index]->buffer) > 1) {
1399		cas_page_t *new = cas_page_spare(cp, index);
1400		if (new) {
1401			page1[index] = page0[index];
1402			page0[index] = new;
1403		}
1404	}
1405	RX_USED_SET(page0[index], 0);
1406	return page0[index];
1407}
1408
1409static void cas_clean_rxds(struct cas *cp)
1410{
1411	/* only clean ring 0 as ring 1 is used for spare buffers */
1412        struct cas_rx_desc *rxd = cp->init_rxds[0];
1413	int i, size;
1414
1415	/* release all rx flows */
1416	for (i = 0; i < N_RX_FLOWS; i++) {
1417		struct sk_buff *skb;
1418		while ((skb = __skb_dequeue(&cp->rx_flows[i]))) {
1419			cas_skb_release(skb);
1420		}
1421	}
1422
1423	/* initialize descriptors */
1424	size = RX_DESC_RINGN_SIZE(0);
1425	for (i = 0; i < size; i++) {
1426		cas_page_t *page = cas_page_swap(cp, 0, i);
1427		rxd[i].buffer = cpu_to_le64(page->dma_addr);
1428		rxd[i].index  = cpu_to_le64(CAS_BASE(RX_INDEX_NUM, i) |
1429					    CAS_BASE(RX_INDEX_RING, 0));
1430	}
1431
1432	cp->rx_old[0]  = RX_DESC_RINGN_SIZE(0) - 4;
1433	cp->rx_last[0] = 0;
1434	cp->cas_flags &= ~CAS_FLAG_RXD_POST(0);
1435}
1436
1437static void cas_clean_rxcs(struct cas *cp)
1438{
1439	int i, j;
1440
1441	/* take ownership of rx comp descriptors */
1442	memset(cp->rx_cur, 0, sizeof(*cp->rx_cur)*N_RX_COMP_RINGS);
1443	memset(cp->rx_new, 0, sizeof(*cp->rx_new)*N_RX_COMP_RINGS);
1444	for (i = 0; i < N_RX_COMP_RINGS; i++) {
1445		struct cas_rx_comp *rxc = cp->init_rxcs[i];
1446		for (j = 0; j < RX_COMP_RINGN_SIZE(i); j++) {
1447			cas_rxc_init(rxc + j);
1448		}
1449	}
1450}
1451
1452#if 0
1453/* When we get a RX fifo overflow, the RX unit is probably hung
1454 * so we do the following.
1455 *
1456 * If any part of the reset goes wrong, we return 1 and that causes the
1457 * whole chip to be reset.
1458 */
1459static int cas_rxmac_reset(struct cas *cp)
1460{
1461	struct net_device *dev = cp->dev;
1462	int limit;
1463	u32 val;
1464
1465	/* First, reset MAC RX. */
1466	writel(cp->mac_rx_cfg & ~MAC_RX_CFG_EN, cp->regs + REG_MAC_RX_CFG);
1467	for (limit = 0; limit < STOP_TRIES; limit++) {
1468		if (!(readl(cp->regs + REG_MAC_RX_CFG) & MAC_RX_CFG_EN))
1469			break;
1470		udelay(10);
1471	}
1472	if (limit == STOP_TRIES) {
1473		netdev_err(dev, "RX MAC will not disable, resetting whole chip\n");
1474		return 1;
1475	}
1476
1477	/* Second, disable RX DMA. */
1478	writel(0, cp->regs + REG_RX_CFG);
1479	for (limit = 0; limit < STOP_TRIES; limit++) {
1480		if (!(readl(cp->regs + REG_RX_CFG) & RX_CFG_DMA_EN))
1481			break;
1482		udelay(10);
1483	}
1484	if (limit == STOP_TRIES) {
1485		netdev_err(dev, "RX DMA will not disable, resetting whole chip\n");
1486		return 1;
1487	}
1488
1489	mdelay(5);
1490
1491	/* Execute RX reset command. */
1492	writel(SW_RESET_RX, cp->regs + REG_SW_RESET);
1493	for (limit = 0; limit < STOP_TRIES; limit++) {
1494		if (!(readl(cp->regs + REG_SW_RESET) & SW_RESET_RX))
1495			break;
1496		udelay(10);
1497	}
1498	if (limit == STOP_TRIES) {
1499		netdev_err(dev, "RX reset command will not execute, resetting whole chip\n");
1500		return 1;
1501	}
1502
1503	/* reset driver rx state */
1504	cas_clean_rxds(cp);
1505	cas_clean_rxcs(cp);
1506
1507	/* Now, reprogram the rest of RX unit. */
1508	cas_init_rx_dma(cp);
1509
1510	/* re-enable */
1511	val = readl(cp->regs + REG_RX_CFG);
1512	writel(val | RX_CFG_DMA_EN, cp->regs + REG_RX_CFG);
1513	writel(MAC_RX_FRAME_RECV, cp->regs + REG_MAC_RX_MASK);
1514	val = readl(cp->regs + REG_MAC_RX_CFG);
1515	writel(val | MAC_RX_CFG_EN, cp->regs + REG_MAC_RX_CFG);
1516	return 0;
1517}
1518#endif
1519
1520static int cas_rxmac_interrupt(struct net_device *dev, struct cas *cp,
1521			       u32 status)
1522{
1523	u32 stat = readl(cp->regs + REG_MAC_RX_STATUS);
1524
1525	if (!stat)
1526		return 0;
1527
1528	netif_dbg(cp, intr, cp->dev, "rxmac interrupt, stat: 0x%x\n", stat);
1529
1530	/* these are all rollovers */
1531	spin_lock(&cp->stat_lock[0]);
1532	if (stat & MAC_RX_ALIGN_ERR)
1533		cp->net_stats[0].rx_frame_errors += 0x10000;
1534
1535	if (stat & MAC_RX_CRC_ERR)
1536		cp->net_stats[0].rx_crc_errors += 0x10000;
1537
1538	if (stat & MAC_RX_LEN_ERR)
1539		cp->net_stats[0].rx_length_errors += 0x10000;
1540
1541	if (stat & MAC_RX_OVERFLOW) {
1542		cp->net_stats[0].rx_over_errors++;
1543		cp->net_stats[0].rx_fifo_errors++;
1544	}
1545
1546	/* We do not track MAC_RX_FRAME_COUNT and MAC_RX_VIOL_ERR
1547	 * events.
1548	 */
1549	spin_unlock(&cp->stat_lock[0]);
1550	return 0;
1551}
1552
1553static int cas_mac_interrupt(struct net_device *dev, struct cas *cp,
1554			     u32 status)
1555{
1556	u32 stat = readl(cp->regs + REG_MAC_CTRL_STATUS);
1557
1558	if (!stat)
1559		return 0;
1560
1561	netif_printk(cp, intr, KERN_DEBUG, cp->dev,
1562		     "mac interrupt, stat: 0x%x\n", stat);
1563
1564	/* This interrupt is just for pause frame and pause
1565	 * tracking.  It is useful for diagnostics and debug
1566	 * but probably by default we will mask these events.
1567	 */
1568	if (stat & MAC_CTRL_PAUSE_STATE)
1569		cp->pause_entered++;
1570
1571	if (stat & MAC_CTRL_PAUSE_RECEIVED)
1572		cp->pause_last_time_recvd = (stat >> 16);
1573
1574	return 0;
1575}
1576
1577
1578/* Must be invoked under cp->lock. */
1579static inline int cas_mdio_link_not_up(struct cas *cp)
1580{
1581	u16 val;
1582
1583	switch (cp->lstate) {
1584	case link_force_ret:
1585		netif_info(cp, link, cp->dev, "Autoneg failed again, keeping forced mode\n");
1586		cas_phy_write(cp, MII_BMCR, cp->link_fcntl);
1587		cp->timer_ticks = 5;
1588		cp->lstate = link_force_ok;
1589		cp->link_transition = LINK_TRANSITION_LINK_CONFIG;
1590		break;
1591
1592	case link_aneg:
1593		val = cas_phy_read(cp, MII_BMCR);
1594
1595		/* Try forced modes. we try things in the following order:
1596		 * 1000 full -> 100 full/half -> 10 half
1597		 */
1598		val &= ~(BMCR_ANRESTART | BMCR_ANENABLE);
1599		val |= BMCR_FULLDPLX;
1600		val |= (cp->cas_flags & CAS_FLAG_1000MB_CAP) ?
1601			CAS_BMCR_SPEED1000 : BMCR_SPEED100;
1602		cas_phy_write(cp, MII_BMCR, val);
1603		cp->timer_ticks = 5;
1604		cp->lstate = link_force_try;
1605		cp->link_transition = LINK_TRANSITION_LINK_CONFIG;
1606		break;
1607
1608	case link_force_try:
1609		/* Downgrade from 1000 to 100 to 10 Mbps if necessary. */
1610		val = cas_phy_read(cp, MII_BMCR);
1611		cp->timer_ticks = 5;
1612		if (val & CAS_BMCR_SPEED1000) { /* gigabit */
1613			val &= ~CAS_BMCR_SPEED1000;
1614			val |= (BMCR_SPEED100 | BMCR_FULLDPLX);
1615			cas_phy_write(cp, MII_BMCR, val);
1616			break;
1617		}
1618
1619		if (val & BMCR_SPEED100) {
1620			if (val & BMCR_FULLDPLX) /* fd failed */
1621				val &= ~BMCR_FULLDPLX;
1622			else { /* 100Mbps failed */
1623				val &= ~BMCR_SPEED100;
1624			}
1625			cas_phy_write(cp, MII_BMCR, val);
1626			break;
1627		}
 
1628	default:
1629		break;
1630	}
1631	return 0;
1632}
1633
1634
1635/* must be invoked with cp->lock held */
1636static int cas_mii_link_check(struct cas *cp, const u16 bmsr)
1637{
1638	int restart;
1639
1640	if (bmsr & BMSR_LSTATUS) {
1641		/* Ok, here we got a link. If we had it due to a forced
1642		 * fallback, and we were configured for autoneg, we
1643		 * retry a short autoneg pass. If you know your hub is
1644		 * broken, use ethtool ;)
1645		 */
1646		if ((cp->lstate == link_force_try) &&
1647		    (cp->link_cntl & BMCR_ANENABLE)) {
1648			cp->lstate = link_force_ret;
1649			cp->link_transition = LINK_TRANSITION_LINK_CONFIG;
1650			cas_mif_poll(cp, 0);
1651			cp->link_fcntl = cas_phy_read(cp, MII_BMCR);
1652			cp->timer_ticks = 5;
1653			if (cp->opened)
1654				netif_info(cp, link, cp->dev,
1655					   "Got link after fallback, retrying autoneg once...\n");
1656			cas_phy_write(cp, MII_BMCR,
1657				      cp->link_fcntl | BMCR_ANENABLE |
1658				      BMCR_ANRESTART);
1659			cas_mif_poll(cp, 1);
1660
1661		} else if (cp->lstate != link_up) {
1662			cp->lstate = link_up;
1663			cp->link_transition = LINK_TRANSITION_LINK_UP;
1664
1665			if (cp->opened) {
1666				cas_set_link_modes(cp);
1667				netif_carrier_on(cp->dev);
1668			}
1669		}
1670		return 0;
1671	}
1672
1673	/* link not up. if the link was previously up, we restart the
1674	 * whole process
1675	 */
1676	restart = 0;
1677	if (cp->lstate == link_up) {
1678		cp->lstate = link_down;
1679		cp->link_transition = LINK_TRANSITION_LINK_DOWN;
1680
1681		netif_carrier_off(cp->dev);
1682		if (cp->opened)
1683			netif_info(cp, link, cp->dev, "Link down\n");
1684		restart = 1;
1685
1686	} else if (++cp->timer_ticks > 10)
1687		cas_mdio_link_not_up(cp);
1688
1689	return restart;
1690}
1691
1692static int cas_mif_interrupt(struct net_device *dev, struct cas *cp,
1693			     u32 status)
1694{
1695	u32 stat = readl(cp->regs + REG_MIF_STATUS);
1696	u16 bmsr;
1697
1698	/* check for a link change */
1699	if (CAS_VAL(MIF_STATUS_POLL_STATUS, stat) == 0)
1700		return 0;
1701
1702	bmsr = CAS_VAL(MIF_STATUS_POLL_DATA, stat);
1703	return cas_mii_link_check(cp, bmsr);
1704}
1705
1706static int cas_pci_interrupt(struct net_device *dev, struct cas *cp,
1707			     u32 status)
1708{
1709	u32 stat = readl(cp->regs + REG_PCI_ERR_STATUS);
1710
1711	if (!stat)
1712		return 0;
1713
1714	netdev_err(dev, "PCI error [%04x:%04x]",
1715		   stat, readl(cp->regs + REG_BIM_DIAG));
1716
1717	/* cassini+ has this reserved */
1718	if ((stat & PCI_ERR_BADACK) &&
1719	    ((cp->cas_flags & CAS_FLAG_REG_PLUS) == 0))
1720		pr_cont(" <No ACK64# during ABS64 cycle>");
1721
1722	if (stat & PCI_ERR_DTRTO)
1723		pr_cont(" <Delayed transaction timeout>");
1724	if (stat & PCI_ERR_OTHER)
1725		pr_cont(" <other>");
1726	if (stat & PCI_ERR_BIM_DMA_WRITE)
1727		pr_cont(" <BIM DMA 0 write req>");
1728	if (stat & PCI_ERR_BIM_DMA_READ)
1729		pr_cont(" <BIM DMA 0 read req>");
1730	pr_cont("\n");
1731
1732	if (stat & PCI_ERR_OTHER) {
1733		u16 cfg;
1734
1735		/* Interrogate PCI config space for the
1736		 * true cause.
1737		 */
1738		pci_read_config_word(cp->pdev, PCI_STATUS, &cfg);
1739		netdev_err(dev, "Read PCI cfg space status [%04x]\n", cfg);
1740		if (cfg & PCI_STATUS_PARITY)
 
1741			netdev_err(dev, "PCI parity error detected\n");
1742		if (cfg & PCI_STATUS_SIG_TARGET_ABORT)
1743			netdev_err(dev, "PCI target abort\n");
1744		if (cfg & PCI_STATUS_REC_TARGET_ABORT)
1745			netdev_err(dev, "PCI master acks target abort\n");
1746		if (cfg & PCI_STATUS_REC_MASTER_ABORT)
1747			netdev_err(dev, "PCI master abort\n");
1748		if (cfg & PCI_STATUS_SIG_SYSTEM_ERROR)
1749			netdev_err(dev, "PCI system error SERR#\n");
1750		if (cfg & PCI_STATUS_DETECTED_PARITY)
1751			netdev_err(dev, "PCI parity error\n");
1752
1753		/* Write the error bits back to clear them. */
1754		cfg &= (PCI_STATUS_PARITY |
1755			PCI_STATUS_SIG_TARGET_ABORT |
1756			PCI_STATUS_REC_TARGET_ABORT |
1757			PCI_STATUS_REC_MASTER_ABORT |
1758			PCI_STATUS_SIG_SYSTEM_ERROR |
1759			PCI_STATUS_DETECTED_PARITY);
1760		pci_write_config_word(cp->pdev, PCI_STATUS, cfg);
1761	}
1762
1763	/* For all PCI errors, we should reset the chip. */
1764	return 1;
1765}
1766
1767/* All non-normal interrupt conditions get serviced here.
1768 * Returns non-zero if we should just exit the interrupt
1769 * handler right now (ie. if we reset the card which invalidates
1770 * all of the other original irq status bits).
1771 */
1772static int cas_abnormal_irq(struct net_device *dev, struct cas *cp,
1773			    u32 status)
1774{
1775	if (status & INTR_RX_TAG_ERROR) {
1776		/* corrupt RX tag framing */
1777		netif_printk(cp, rx_err, KERN_DEBUG, cp->dev,
1778			     "corrupt rx tag framing\n");
1779		spin_lock(&cp->stat_lock[0]);
1780		cp->net_stats[0].rx_errors++;
1781		spin_unlock(&cp->stat_lock[0]);
1782		goto do_reset;
1783	}
1784
1785	if (status & INTR_RX_LEN_MISMATCH) {
1786		/* length mismatch. */
1787		netif_printk(cp, rx_err, KERN_DEBUG, cp->dev,
1788			     "length mismatch for rx frame\n");
1789		spin_lock(&cp->stat_lock[0]);
1790		cp->net_stats[0].rx_errors++;
1791		spin_unlock(&cp->stat_lock[0]);
1792		goto do_reset;
1793	}
1794
1795	if (status & INTR_PCS_STATUS) {
1796		if (cas_pcs_interrupt(dev, cp, status))
1797			goto do_reset;
1798	}
1799
1800	if (status & INTR_TX_MAC_STATUS) {
1801		if (cas_txmac_interrupt(dev, cp, status))
1802			goto do_reset;
1803	}
1804
1805	if (status & INTR_RX_MAC_STATUS) {
1806		if (cas_rxmac_interrupt(dev, cp, status))
1807			goto do_reset;
1808	}
1809
1810	if (status & INTR_MAC_CTRL_STATUS) {
1811		if (cas_mac_interrupt(dev, cp, status))
1812			goto do_reset;
1813	}
1814
1815	if (status & INTR_MIF_STATUS) {
1816		if (cas_mif_interrupt(dev, cp, status))
1817			goto do_reset;
1818	}
1819
1820	if (status & INTR_PCI_ERROR_STATUS) {
1821		if (cas_pci_interrupt(dev, cp, status))
1822			goto do_reset;
1823	}
1824	return 0;
1825
1826do_reset:
1827#if 1
1828	atomic_inc(&cp->reset_task_pending);
1829	atomic_inc(&cp->reset_task_pending_all);
1830	netdev_err(dev, "reset called in cas_abnormal_irq [0x%x]\n", status);
1831	schedule_work(&cp->reset_task);
1832#else
1833	atomic_set(&cp->reset_task_pending, CAS_RESET_ALL);
1834	netdev_err(dev, "reset called in cas_abnormal_irq\n");
1835	schedule_work(&cp->reset_task);
1836#endif
1837	return 1;
1838}
1839
1840/* NOTE: CAS_TABORT returns 1 or 2 so that it can be used when
1841 *       determining whether to do a netif_stop/wakeup
1842 */
1843#define CAS_TABORT(x)      (((x)->cas_flags & CAS_FLAG_TARGET_ABORT) ? 2 : 1)
1844#define CAS_ROUND_PAGE(x)  (((x) + PAGE_SIZE - 1) & PAGE_MASK)
1845static inline int cas_calc_tabort(struct cas *cp, const unsigned long addr,
1846				  const int len)
1847{
1848	unsigned long off = addr + len;
1849
1850	if (CAS_TABORT(cp) == 1)
1851		return 0;
1852	if ((CAS_ROUND_PAGE(off) - off) > TX_TARGET_ABORT_LEN)
1853		return 0;
1854	return TX_TARGET_ABORT_LEN;
1855}
1856
1857static inline void cas_tx_ringN(struct cas *cp, int ring, int limit)
1858{
1859	struct cas_tx_desc *txds;
1860	struct sk_buff **skbs;
1861	struct net_device *dev = cp->dev;
1862	int entry, count;
1863
1864	spin_lock(&cp->tx_lock[ring]);
1865	txds = cp->init_txds[ring];
1866	skbs = cp->tx_skbs[ring];
1867	entry = cp->tx_old[ring];
1868
1869	count = TX_BUFF_COUNT(ring, entry, limit);
1870	while (entry != limit) {
1871		struct sk_buff *skb = skbs[entry];
1872		dma_addr_t daddr;
1873		u32 dlen;
1874		int frag;
1875
1876		if (!skb) {
1877			/* this should never occur */
1878			entry = TX_DESC_NEXT(ring, entry);
1879			continue;
1880		}
1881
1882		/* however, we might get only a partial skb release. */
1883		count -= skb_shinfo(skb)->nr_frags +
1884			+ cp->tx_tiny_use[ring][entry].nbufs + 1;
1885		if (count < 0)
1886			break;
1887
1888		netif_printk(cp, tx_done, KERN_DEBUG, cp->dev,
1889			     "tx[%d] done, slot %d\n", ring, entry);
1890
1891		skbs[entry] = NULL;
1892		cp->tx_tiny_use[ring][entry].nbufs = 0;
1893
1894		for (frag = 0; frag <= skb_shinfo(skb)->nr_frags; frag++) {
1895			struct cas_tx_desc *txd = txds + entry;
1896
1897			daddr = le64_to_cpu(txd->buffer);
1898			dlen = CAS_VAL(TX_DESC_BUFLEN,
1899				       le64_to_cpu(txd->control));
1900			pci_unmap_page(cp->pdev, daddr, dlen,
1901				       PCI_DMA_TODEVICE);
1902			entry = TX_DESC_NEXT(ring, entry);
1903
1904			/* tiny buffer may follow */
1905			if (cp->tx_tiny_use[ring][entry].used) {
1906				cp->tx_tiny_use[ring][entry].used = 0;
1907				entry = TX_DESC_NEXT(ring, entry);
1908			}
1909		}
1910
1911		spin_lock(&cp->stat_lock[ring]);
1912		cp->net_stats[ring].tx_packets++;
1913		cp->net_stats[ring].tx_bytes += skb->len;
1914		spin_unlock(&cp->stat_lock[ring]);
1915		dev_kfree_skb_irq(skb);
1916	}
1917	cp->tx_old[ring] = entry;
1918
1919	/* this is wrong for multiple tx rings. the net device needs
1920	 * multiple queues for this to do the right thing.  we wait
1921	 * for 2*packets to be available when using tiny buffers
1922	 */
1923	if (netif_queue_stopped(dev) &&
1924	    (TX_BUFFS_AVAIL(cp, ring) > CAS_TABORT(cp)*(MAX_SKB_FRAGS + 1)))
1925		netif_wake_queue(dev);
1926	spin_unlock(&cp->tx_lock[ring]);
1927}
1928
1929static void cas_tx(struct net_device *dev, struct cas *cp,
1930		   u32 status)
1931{
1932        int limit, ring;
1933#ifdef USE_TX_COMPWB
1934	u64 compwb = le64_to_cpu(cp->init_block->tx_compwb);
1935#endif
1936	netif_printk(cp, intr, KERN_DEBUG, cp->dev,
1937		     "tx interrupt, status: 0x%x, %llx\n",
1938		     status, (unsigned long long)compwb);
1939	/* process all the rings */
1940	for (ring = 0; ring < N_TX_RINGS; ring++) {
1941#ifdef USE_TX_COMPWB
1942		/* use the completion writeback registers */
1943		limit = (CAS_VAL(TX_COMPWB_MSB, compwb) << 8) |
1944			CAS_VAL(TX_COMPWB_LSB, compwb);
1945		compwb = TX_COMPWB_NEXT(compwb);
1946#else
1947		limit = readl(cp->regs + REG_TX_COMPN(ring));
1948#endif
1949		if (cp->tx_old[ring] != limit)
1950			cas_tx_ringN(cp, ring, limit);
1951	}
1952}
1953
1954
1955static int cas_rx_process_pkt(struct cas *cp, struct cas_rx_comp *rxc,
1956			      int entry, const u64 *words,
1957			      struct sk_buff **skbref)
1958{
1959	int dlen, hlen, len, i, alloclen;
1960	int off, swivel = RX_SWIVEL_OFF_VAL;
1961	struct cas_page *page;
1962	struct sk_buff *skb;
1963	void *addr, *crcaddr;
1964	__sum16 csum;
1965	char *p;
1966
1967	hlen = CAS_VAL(RX_COMP2_HDR_SIZE, words[1]);
1968	dlen = CAS_VAL(RX_COMP1_DATA_SIZE, words[0]);
1969	len  = hlen + dlen;
1970
1971	if (RX_COPY_ALWAYS || (words[2] & RX_COMP3_SMALL_PKT))
1972		alloclen = len;
1973	else
1974		alloclen = max(hlen, RX_COPY_MIN);
1975
1976	skb = netdev_alloc_skb(cp->dev, alloclen + swivel + cp->crc_size);
1977	if (skb == NULL)
1978		return -1;
1979
1980	*skbref = skb;
1981	skb_reserve(skb, swivel);
1982
1983	p = skb->data;
1984	addr = crcaddr = NULL;
1985	if (hlen) { /* always copy header pages */
1986		i = CAS_VAL(RX_COMP2_HDR_INDEX, words[1]);
1987		page = cp->rx_pages[CAS_VAL(RX_INDEX_RING, i)][CAS_VAL(RX_INDEX_NUM, i)];
1988		off = CAS_VAL(RX_COMP2_HDR_OFF, words[1]) * 0x100 +
1989			swivel;
1990
1991		i = hlen;
1992		if (!dlen) /* attach FCS */
1993			i += cp->crc_size;
1994		pci_dma_sync_single_for_cpu(cp->pdev, page->dma_addr + off, i,
1995				    PCI_DMA_FROMDEVICE);
1996		addr = cas_page_map(page->buffer);
1997		memcpy(p, addr + off, i);
1998		pci_dma_sync_single_for_device(cp->pdev, page->dma_addr + off, i,
1999				    PCI_DMA_FROMDEVICE);
2000		cas_page_unmap(addr);
2001		RX_USED_ADD(page, 0x100);
2002		p += hlen;
2003		swivel = 0;
2004	}
2005
2006
2007	if (alloclen < (hlen + dlen)) {
2008		skb_frag_t *frag = skb_shinfo(skb)->frags;
2009
2010		/* normal or jumbo packets. we use frags */
2011		i = CAS_VAL(RX_COMP1_DATA_INDEX, words[0]);
2012		page = cp->rx_pages[CAS_VAL(RX_INDEX_RING, i)][CAS_VAL(RX_INDEX_NUM, i)];
2013		off = CAS_VAL(RX_COMP1_DATA_OFF, words[0]) + swivel;
2014
2015		hlen = min(cp->page_size - off, dlen);
2016		if (hlen < 0) {
2017			netif_printk(cp, rx_err, KERN_DEBUG, cp->dev,
2018				     "rx page overflow: %d\n", hlen);
2019			dev_kfree_skb_irq(skb);
2020			return -1;
2021		}
2022		i = hlen;
2023		if (i == dlen)  /* attach FCS */
2024			i += cp->crc_size;
2025		pci_dma_sync_single_for_cpu(cp->pdev, page->dma_addr + off, i,
2026				    PCI_DMA_FROMDEVICE);
2027
2028		/* make sure we always copy a header */
2029		swivel = 0;
2030		if (p == (char *) skb->data) { /* not split */
2031			addr = cas_page_map(page->buffer);
2032			memcpy(p, addr + off, RX_COPY_MIN);
2033			pci_dma_sync_single_for_device(cp->pdev, page->dma_addr + off, i,
2034					PCI_DMA_FROMDEVICE);
2035			cas_page_unmap(addr);
2036			off += RX_COPY_MIN;
2037			swivel = RX_COPY_MIN;
2038			RX_USED_ADD(page, cp->mtu_stride);
2039		} else {
2040			RX_USED_ADD(page, hlen);
2041		}
2042		skb_put(skb, alloclen);
2043
2044		skb_shinfo(skb)->nr_frags++;
2045		skb->data_len += hlen - swivel;
2046		skb->truesize += hlen - swivel;
2047		skb->len      += hlen - swivel;
2048
2049		__skb_frag_set_page(frag, page->buffer);
2050		__skb_frag_ref(frag);
2051		frag->page_offset = off;
2052		skb_frag_size_set(frag, hlen - swivel);
2053
2054		/* any more data? */
2055		if ((words[0] & RX_COMP1_SPLIT_PKT) && ((dlen -= hlen) > 0)) {
2056			hlen = dlen;
2057			off = 0;
2058
2059			i = CAS_VAL(RX_COMP2_NEXT_INDEX, words[1]);
2060			page = cp->rx_pages[CAS_VAL(RX_INDEX_RING, i)][CAS_VAL(RX_INDEX_NUM, i)];
2061			pci_dma_sync_single_for_cpu(cp->pdev, page->dma_addr,
2062					    hlen + cp->crc_size,
2063					    PCI_DMA_FROMDEVICE);
2064			pci_dma_sync_single_for_device(cp->pdev, page->dma_addr,
2065					    hlen + cp->crc_size,
2066					    PCI_DMA_FROMDEVICE);
 
 
2067
2068			skb_shinfo(skb)->nr_frags++;
2069			skb->data_len += hlen;
2070			skb->len      += hlen;
2071			frag++;
2072
2073			__skb_frag_set_page(frag, page->buffer);
2074			__skb_frag_ref(frag);
2075			frag->page_offset = 0;
2076			skb_frag_size_set(frag, hlen);
2077			RX_USED_ADD(page, hlen + cp->crc_size);
2078		}
2079
2080		if (cp->crc_size) {
2081			addr = cas_page_map(page->buffer);
2082			crcaddr  = addr + off + hlen;
2083		}
2084
2085	} else {
2086		/* copying packet */
2087		if (!dlen)
2088			goto end_copy_pkt;
2089
2090		i = CAS_VAL(RX_COMP1_DATA_INDEX, words[0]);
2091		page = cp->rx_pages[CAS_VAL(RX_INDEX_RING, i)][CAS_VAL(RX_INDEX_NUM, i)];
2092		off = CAS_VAL(RX_COMP1_DATA_OFF, words[0]) + swivel;
2093		hlen = min(cp->page_size - off, dlen);
2094		if (hlen < 0) {
2095			netif_printk(cp, rx_err, KERN_DEBUG, cp->dev,
2096				     "rx page overflow: %d\n", hlen);
2097			dev_kfree_skb_irq(skb);
2098			return -1;
2099		}
2100		i = hlen;
2101		if (i == dlen) /* attach FCS */
2102			i += cp->crc_size;
2103		pci_dma_sync_single_for_cpu(cp->pdev, page->dma_addr + off, i,
2104				    PCI_DMA_FROMDEVICE);
2105		addr = cas_page_map(page->buffer);
2106		memcpy(p, addr + off, i);
2107		pci_dma_sync_single_for_device(cp->pdev, page->dma_addr + off, i,
2108				    PCI_DMA_FROMDEVICE);
2109		cas_page_unmap(addr);
2110		if (p == (char *) skb->data) /* not split */
2111			RX_USED_ADD(page, cp->mtu_stride);
2112		else
2113			RX_USED_ADD(page, i);
2114
2115		/* any more data? */
2116		if ((words[0] & RX_COMP1_SPLIT_PKT) && ((dlen -= hlen) > 0)) {
2117			p += hlen;
2118			i = CAS_VAL(RX_COMP2_NEXT_INDEX, words[1]);
2119			page = cp->rx_pages[CAS_VAL(RX_INDEX_RING, i)][CAS_VAL(RX_INDEX_NUM, i)];
2120			pci_dma_sync_single_for_cpu(cp->pdev, page->dma_addr,
2121					    dlen + cp->crc_size,
2122					    PCI_DMA_FROMDEVICE);
2123			addr = cas_page_map(page->buffer);
2124			memcpy(p, addr, dlen + cp->crc_size);
2125			pci_dma_sync_single_for_device(cp->pdev, page->dma_addr,
2126					    dlen + cp->crc_size,
2127					    PCI_DMA_FROMDEVICE);
2128			cas_page_unmap(addr);
2129			RX_USED_ADD(page, dlen + cp->crc_size);
2130		}
2131end_copy_pkt:
2132		if (cp->crc_size) {
2133			addr    = NULL;
2134			crcaddr = skb->data + alloclen;
2135		}
2136		skb_put(skb, alloclen);
2137	}
2138
2139	csum = (__force __sum16)htons(CAS_VAL(RX_COMP4_TCP_CSUM, words[3]));
2140	if (cp->crc_size) {
2141		/* checksum includes FCS. strip it out. */
2142		csum = csum_fold(csum_partial(crcaddr, cp->crc_size,
2143					      csum_unfold(csum)));
2144		if (addr)
2145			cas_page_unmap(addr);
2146	}
2147	skb->protocol = eth_type_trans(skb, cp->dev);
2148	if (skb->protocol == htons(ETH_P_IP)) {
2149		skb->csum = csum_unfold(~csum);
2150		skb->ip_summed = CHECKSUM_COMPLETE;
2151	} else
2152		skb_checksum_none_assert(skb);
2153	return len;
2154}
2155
2156
2157/* we can handle up to 64 rx flows at a time. we do the same thing
2158 * as nonreassm except that we batch up the buffers.
2159 * NOTE: we currently just treat each flow as a bunch of packets that
2160 *       we pass up. a better way would be to coalesce the packets
2161 *       into a jumbo packet. to do that, we need to do the following:
2162 *       1) the first packet will have a clean split between header and
2163 *          data. save both.
2164 *       2) each time the next flow packet comes in, extend the
2165 *          data length and merge the checksums.
2166 *       3) on flow release, fix up the header.
2167 *       4) make sure the higher layer doesn't care.
2168 * because packets get coalesced, we shouldn't run into fragment count
2169 * issues.
2170 */
2171static inline void cas_rx_flow_pkt(struct cas *cp, const u64 *words,
2172				   struct sk_buff *skb)
2173{
2174	int flowid = CAS_VAL(RX_COMP3_FLOWID, words[2]) & (N_RX_FLOWS - 1);
2175	struct sk_buff_head *flow = &cp->rx_flows[flowid];
2176
2177	/* this is protected at a higher layer, so no need to
2178	 * do any additional locking here. stick the buffer
2179	 * at the end.
2180	 */
2181	__skb_queue_tail(flow, skb);
2182	if (words[0] & RX_COMP1_RELEASE_FLOW) {
2183		while ((skb = __skb_dequeue(flow))) {
2184			cas_skb_release(skb);
2185		}
2186	}
2187}
2188
2189/* put rx descriptor back on ring. if a buffer is in use by a higher
2190 * layer, this will need to put in a replacement.
2191 */
2192static void cas_post_page(struct cas *cp, const int ring, const int index)
2193{
2194	cas_page_t *new;
2195	int entry;
2196
2197	entry = cp->rx_old[ring];
2198
2199	new = cas_page_swap(cp, ring, index);
2200	cp->init_rxds[ring][entry].buffer = cpu_to_le64(new->dma_addr);
2201	cp->init_rxds[ring][entry].index  =
2202		cpu_to_le64(CAS_BASE(RX_INDEX_NUM, index) |
2203			    CAS_BASE(RX_INDEX_RING, ring));
2204
2205	entry = RX_DESC_ENTRY(ring, entry + 1);
2206	cp->rx_old[ring] = entry;
2207
2208	if (entry % 4)
2209		return;
2210
2211	if (ring == 0)
2212		writel(entry, cp->regs + REG_RX_KICK);
2213	else if ((N_RX_DESC_RINGS > 1) &&
2214		 (cp->cas_flags & CAS_FLAG_REG_PLUS))
2215		writel(entry, cp->regs + REG_PLUS_RX_KICK1);
2216}
2217
2218
2219/* only when things are bad */
2220static int cas_post_rxds_ringN(struct cas *cp, int ring, int num)
2221{
2222	unsigned int entry, last, count, released;
2223	int cluster;
2224	cas_page_t **page = cp->rx_pages[ring];
2225
2226	entry = cp->rx_old[ring];
2227
2228	netif_printk(cp, intr, KERN_DEBUG, cp->dev,
2229		     "rxd[%d] interrupt, done: %d\n", ring, entry);
2230
2231	cluster = -1;
2232	count = entry & 0x3;
2233	last = RX_DESC_ENTRY(ring, num ? entry + num - 4: entry - 4);
2234	released = 0;
2235	while (entry != last) {
2236		/* make a new buffer if it's still in use */
2237		if (page_count(page[entry]->buffer) > 1) {
2238			cas_page_t *new = cas_page_dequeue(cp);
2239			if (!new) {
2240				/* let the timer know that we need to
2241				 * do this again
2242				 */
2243				cp->cas_flags |= CAS_FLAG_RXD_POST(ring);
2244				if (!timer_pending(&cp->link_timer))
2245					mod_timer(&cp->link_timer, jiffies +
2246						  CAS_LINK_FAST_TIMEOUT);
2247				cp->rx_old[ring]  = entry;
2248				cp->rx_last[ring] = num ? num - released : 0;
2249				return -ENOMEM;
2250			}
2251			spin_lock(&cp->rx_inuse_lock);
2252			list_add(&page[entry]->list, &cp->rx_inuse_list);
2253			spin_unlock(&cp->rx_inuse_lock);
2254			cp->init_rxds[ring][entry].buffer =
2255				cpu_to_le64(new->dma_addr);
2256			page[entry] = new;
2257
2258		}
2259
2260		if (++count == 4) {
2261			cluster = entry;
2262			count = 0;
2263		}
2264		released++;
2265		entry = RX_DESC_ENTRY(ring, entry + 1);
2266	}
2267	cp->rx_old[ring] = entry;
2268
2269	if (cluster < 0)
2270		return 0;
2271
2272	if (ring == 0)
2273		writel(cluster, cp->regs + REG_RX_KICK);
2274	else if ((N_RX_DESC_RINGS > 1) &&
2275		 (cp->cas_flags & CAS_FLAG_REG_PLUS))
2276		writel(cluster, cp->regs + REG_PLUS_RX_KICK1);
2277	return 0;
2278}
2279
2280
2281/* process a completion ring. packets are set up in three basic ways:
2282 * small packets: should be copied header + data in single buffer.
2283 * large packets: header and data in a single buffer.
2284 * split packets: header in a separate buffer from data.
2285 *                data may be in multiple pages. data may be > 256
2286 *                bytes but in a single page.
2287 *
2288 * NOTE: RX page posting is done in this routine as well. while there's
2289 *       the capability of using multiple RX completion rings, it isn't
2290 *       really worthwhile due to the fact that the page posting will
2291 *       force serialization on the single descriptor ring.
2292 */
2293static int cas_rx_ringN(struct cas *cp, int ring, int budget)
2294{
2295	struct cas_rx_comp *rxcs = cp->init_rxcs[ring];
2296	int entry, drops;
2297	int npackets = 0;
2298
2299	netif_printk(cp, intr, KERN_DEBUG, cp->dev,
2300		     "rx[%d] interrupt, done: %d/%d\n",
2301		     ring,
2302		     readl(cp->regs + REG_RX_COMP_HEAD), cp->rx_new[ring]);
2303
2304	entry = cp->rx_new[ring];
2305	drops = 0;
2306	while (1) {
2307		struct cas_rx_comp *rxc = rxcs + entry;
2308		struct sk_buff *uninitialized_var(skb);
2309		int type, len;
2310		u64 words[4];
2311		int i, dring;
2312
2313		words[0] = le64_to_cpu(rxc->word1);
2314		words[1] = le64_to_cpu(rxc->word2);
2315		words[2] = le64_to_cpu(rxc->word3);
2316		words[3] = le64_to_cpu(rxc->word4);
2317
2318		/* don't touch if still owned by hw */
2319		type = CAS_VAL(RX_COMP1_TYPE, words[0]);
2320		if (type == 0)
2321			break;
2322
2323		/* hw hasn't cleared the zero bit yet */
2324		if (words[3] & RX_COMP4_ZERO) {
2325			break;
2326		}
2327
2328		/* get info on the packet */
2329		if (words[3] & (RX_COMP4_LEN_MISMATCH | RX_COMP4_BAD)) {
2330			spin_lock(&cp->stat_lock[ring]);
2331			cp->net_stats[ring].rx_errors++;
2332			if (words[3] & RX_COMP4_LEN_MISMATCH)
2333				cp->net_stats[ring].rx_length_errors++;
2334			if (words[3] & RX_COMP4_BAD)
2335				cp->net_stats[ring].rx_crc_errors++;
2336			spin_unlock(&cp->stat_lock[ring]);
2337
2338			/* We'll just return it to Cassini. */
2339		drop_it:
2340			spin_lock(&cp->stat_lock[ring]);
2341			++cp->net_stats[ring].rx_dropped;
2342			spin_unlock(&cp->stat_lock[ring]);
2343			goto next;
2344		}
2345
2346		len = cas_rx_process_pkt(cp, rxc, entry, words, &skb);
2347		if (len < 0) {
2348			++drops;
2349			goto drop_it;
2350		}
2351
2352		/* see if it's a flow re-assembly or not. the driver
2353		 * itself handles release back up.
2354		 */
2355		if (RX_DONT_BATCH || (type == 0x2)) {
2356			/* non-reassm: these always get released */
2357			cas_skb_release(skb);
2358		} else {
2359			cas_rx_flow_pkt(cp, words, skb);
2360		}
2361
2362		spin_lock(&cp->stat_lock[ring]);
2363		cp->net_stats[ring].rx_packets++;
2364		cp->net_stats[ring].rx_bytes += len;
2365		spin_unlock(&cp->stat_lock[ring]);
2366
2367	next:
2368		npackets++;
2369
2370		/* should it be released? */
2371		if (words[0] & RX_COMP1_RELEASE_HDR) {
2372			i = CAS_VAL(RX_COMP2_HDR_INDEX, words[1]);
2373			dring = CAS_VAL(RX_INDEX_RING, i);
2374			i = CAS_VAL(RX_INDEX_NUM, i);
2375			cas_post_page(cp, dring, i);
2376		}
2377
2378		if (words[0] & RX_COMP1_RELEASE_DATA) {
2379			i = CAS_VAL(RX_COMP1_DATA_INDEX, words[0]);
2380			dring = CAS_VAL(RX_INDEX_RING, i);
2381			i = CAS_VAL(RX_INDEX_NUM, i);
2382			cas_post_page(cp, dring, i);
2383		}
2384
2385		if (words[0] & RX_COMP1_RELEASE_NEXT) {
2386			i = CAS_VAL(RX_COMP2_NEXT_INDEX, words[1]);
2387			dring = CAS_VAL(RX_INDEX_RING, i);
2388			i = CAS_VAL(RX_INDEX_NUM, i);
2389			cas_post_page(cp, dring, i);
2390		}
2391
2392		/* skip to the next entry */
2393		entry = RX_COMP_ENTRY(ring, entry + 1 +
2394				      CAS_VAL(RX_COMP1_SKIP, words[0]));
2395#ifdef USE_NAPI
2396		if (budget && (npackets >= budget))
2397			break;
2398#endif
2399	}
2400	cp->rx_new[ring] = entry;
2401
2402	if (drops)
2403		netdev_info(cp->dev, "Memory squeeze, deferring packet\n");
2404	return npackets;
2405}
2406
2407
2408/* put completion entries back on the ring */
2409static void cas_post_rxcs_ringN(struct net_device *dev,
2410				struct cas *cp, int ring)
2411{
2412	struct cas_rx_comp *rxc = cp->init_rxcs[ring];
2413	int last, entry;
2414
2415	last = cp->rx_cur[ring];
2416	entry = cp->rx_new[ring];
2417	netif_printk(cp, intr, KERN_DEBUG, dev,
2418		     "rxc[%d] interrupt, done: %d/%d\n",
2419		     ring, readl(cp->regs + REG_RX_COMP_HEAD), entry);
2420
2421	/* zero and re-mark descriptors */
2422	while (last != entry) {
2423		cas_rxc_init(rxc + last);
2424		last = RX_COMP_ENTRY(ring, last + 1);
2425	}
2426	cp->rx_cur[ring] = last;
2427
2428	if (ring == 0)
2429		writel(last, cp->regs + REG_RX_COMP_TAIL);
2430	else if (cp->cas_flags & CAS_FLAG_REG_PLUS)
2431		writel(last, cp->regs + REG_PLUS_RX_COMPN_TAIL(ring));
2432}
2433
2434
2435
2436/* cassini can use all four PCI interrupts for the completion ring.
2437 * rings 3 and 4 are identical
2438 */
2439#if defined(USE_PCI_INTC) || defined(USE_PCI_INTD)
2440static inline void cas_handle_irqN(struct net_device *dev,
2441				   struct cas *cp, const u32 status,
2442				   const int ring)
2443{
2444	if (status & (INTR_RX_COMP_FULL_ALT | INTR_RX_COMP_AF_ALT))
2445		cas_post_rxcs_ringN(dev, cp, ring);
2446}
2447
2448static irqreturn_t cas_interruptN(int irq, void *dev_id)
2449{
2450	struct net_device *dev = dev_id;
2451	struct cas *cp = netdev_priv(dev);
2452	unsigned long flags;
2453	int ring = (irq == cp->pci_irq_INTC) ? 2 : 3;
2454	u32 status = readl(cp->regs + REG_PLUS_INTRN_STATUS(ring));
2455
2456	/* check for shared irq */
2457	if (status == 0)
2458		return IRQ_NONE;
2459
2460	spin_lock_irqsave(&cp->lock, flags);
2461	if (status & INTR_RX_DONE_ALT) { /* handle rx separately */
2462#ifdef USE_NAPI
2463		cas_mask_intr(cp);
2464		napi_schedule(&cp->napi);
2465#else
2466		cas_rx_ringN(cp, ring, 0);
2467#endif
2468		status &= ~INTR_RX_DONE_ALT;
2469	}
2470
2471	if (status)
2472		cas_handle_irqN(dev, cp, status, ring);
2473	spin_unlock_irqrestore(&cp->lock, flags);
2474	return IRQ_HANDLED;
2475}
2476#endif
2477
2478#ifdef USE_PCI_INTB
2479/* everything but rx packets */
2480static inline void cas_handle_irq1(struct cas *cp, const u32 status)
2481{
2482	if (status & INTR_RX_BUF_UNAVAIL_1) {
2483		/* Frame arrived, no free RX buffers available.
2484		 * NOTE: we can get this on a link transition. */
2485		cas_post_rxds_ringN(cp, 1, 0);
2486		spin_lock(&cp->stat_lock[1]);
2487		cp->net_stats[1].rx_dropped++;
2488		spin_unlock(&cp->stat_lock[1]);
2489	}
2490
2491	if (status & INTR_RX_BUF_AE_1)
2492		cas_post_rxds_ringN(cp, 1, RX_DESC_RINGN_SIZE(1) -
2493				    RX_AE_FREEN_VAL(1));
2494
2495	if (status & (INTR_RX_COMP_AF | INTR_RX_COMP_FULL))
2496		cas_post_rxcs_ringN(cp, 1);
2497}
2498
2499/* ring 2 handles a few more events than 3 and 4 */
2500static irqreturn_t cas_interrupt1(int irq, void *dev_id)
2501{
2502	struct net_device *dev = dev_id;
2503	struct cas *cp = netdev_priv(dev);
2504	unsigned long flags;
2505	u32 status = readl(cp->regs + REG_PLUS_INTRN_STATUS(1));
2506
2507	/* check for shared interrupt */
2508	if (status == 0)
2509		return IRQ_NONE;
2510
2511	spin_lock_irqsave(&cp->lock, flags);
2512	if (status & INTR_RX_DONE_ALT) { /* handle rx separately */
2513#ifdef USE_NAPI
2514		cas_mask_intr(cp);
2515		napi_schedule(&cp->napi);
2516#else
2517		cas_rx_ringN(cp, 1, 0);
2518#endif
2519		status &= ~INTR_RX_DONE_ALT;
2520	}
2521	if (status)
2522		cas_handle_irq1(cp, status);
2523	spin_unlock_irqrestore(&cp->lock, flags);
2524	return IRQ_HANDLED;
2525}
2526#endif
2527
2528static inline void cas_handle_irq(struct net_device *dev,
2529				  struct cas *cp, const u32 status)
2530{
2531	/* housekeeping interrupts */
2532	if (status & INTR_ERROR_MASK)
2533		cas_abnormal_irq(dev, cp, status);
2534
2535	if (status & INTR_RX_BUF_UNAVAIL) {
2536		/* Frame arrived, no free RX buffers available.
2537		 * NOTE: we can get this on a link transition.
2538		 */
2539		cas_post_rxds_ringN(cp, 0, 0);
2540		spin_lock(&cp->stat_lock[0]);
2541		cp->net_stats[0].rx_dropped++;
2542		spin_unlock(&cp->stat_lock[0]);
2543	} else if (status & INTR_RX_BUF_AE) {
2544		cas_post_rxds_ringN(cp, 0, RX_DESC_RINGN_SIZE(0) -
2545				    RX_AE_FREEN_VAL(0));
2546	}
2547
2548	if (status & (INTR_RX_COMP_AF | INTR_RX_COMP_FULL))
2549		cas_post_rxcs_ringN(dev, cp, 0);
2550}
2551
2552static irqreturn_t cas_interrupt(int irq, void *dev_id)
2553{
2554	struct net_device *dev = dev_id;
2555	struct cas *cp = netdev_priv(dev);
2556	unsigned long flags;
2557	u32 status = readl(cp->regs + REG_INTR_STATUS);
2558
2559	if (status == 0)
2560		return IRQ_NONE;
2561
2562	spin_lock_irqsave(&cp->lock, flags);
2563	if (status & (INTR_TX_ALL | INTR_TX_INTME)) {
2564		cas_tx(dev, cp, status);
2565		status &= ~(INTR_TX_ALL | INTR_TX_INTME);
2566	}
2567
2568	if (status & INTR_RX_DONE) {
2569#ifdef USE_NAPI
2570		cas_mask_intr(cp);
2571		napi_schedule(&cp->napi);
2572#else
2573		cas_rx_ringN(cp, 0, 0);
2574#endif
2575		status &= ~INTR_RX_DONE;
2576	}
2577
2578	if (status)
2579		cas_handle_irq(dev, cp, status);
2580	spin_unlock_irqrestore(&cp->lock, flags);
2581	return IRQ_HANDLED;
2582}
2583
2584
2585#ifdef USE_NAPI
2586static int cas_poll(struct napi_struct *napi, int budget)
2587{
2588	struct cas *cp = container_of(napi, struct cas, napi);
2589	struct net_device *dev = cp->dev;
2590	int i, enable_intr, credits;
2591	u32 status = readl(cp->regs + REG_INTR_STATUS);
2592	unsigned long flags;
2593
2594	spin_lock_irqsave(&cp->lock, flags);
2595	cas_tx(dev, cp, status);
2596	spin_unlock_irqrestore(&cp->lock, flags);
2597
2598	/* NAPI rx packets. we spread the credits across all of the
2599	 * rxc rings
2600	 *
2601	 * to make sure we're fair with the work we loop through each
2602	 * ring N_RX_COMP_RING times with a request of
2603	 * budget / N_RX_COMP_RINGS
2604	 */
2605	enable_intr = 1;
2606	credits = 0;
2607	for (i = 0; i < N_RX_COMP_RINGS; i++) {
2608		int j;
2609		for (j = 0; j < N_RX_COMP_RINGS; j++) {
2610			credits += cas_rx_ringN(cp, j, budget / N_RX_COMP_RINGS);
2611			if (credits >= budget) {
2612				enable_intr = 0;
2613				goto rx_comp;
2614			}
2615		}
2616	}
2617
2618rx_comp:
2619	/* final rx completion */
2620	spin_lock_irqsave(&cp->lock, flags);
2621	if (status)
2622		cas_handle_irq(dev, cp, status);
2623
2624#ifdef USE_PCI_INTB
2625	if (N_RX_COMP_RINGS > 1) {
2626		status = readl(cp->regs + REG_PLUS_INTRN_STATUS(1));
2627		if (status)
2628			cas_handle_irq1(dev, cp, status);
2629	}
2630#endif
2631
2632#ifdef USE_PCI_INTC
2633	if (N_RX_COMP_RINGS > 2) {
2634		status = readl(cp->regs + REG_PLUS_INTRN_STATUS(2));
2635		if (status)
2636			cas_handle_irqN(dev, cp, status, 2);
2637	}
2638#endif
2639
2640#ifdef USE_PCI_INTD
2641	if (N_RX_COMP_RINGS > 3) {
2642		status = readl(cp->regs + REG_PLUS_INTRN_STATUS(3));
2643		if (status)
2644			cas_handle_irqN(dev, cp, status, 3);
2645	}
2646#endif
2647	spin_unlock_irqrestore(&cp->lock, flags);
2648	if (enable_intr) {
2649		napi_complete(napi);
2650		cas_unmask_intr(cp);
2651	}
2652	return credits;
2653}
2654#endif
2655
2656#ifdef CONFIG_NET_POLL_CONTROLLER
2657static void cas_netpoll(struct net_device *dev)
2658{
2659	struct cas *cp = netdev_priv(dev);
2660
2661	cas_disable_irq(cp, 0);
2662	cas_interrupt(cp->pdev->irq, dev);
2663	cas_enable_irq(cp, 0);
2664
2665#ifdef USE_PCI_INTB
2666	if (N_RX_COMP_RINGS > 1) {
2667		/* cas_interrupt1(); */
2668	}
2669#endif
2670#ifdef USE_PCI_INTC
2671	if (N_RX_COMP_RINGS > 2) {
2672		/* cas_interruptN(); */
2673	}
2674#endif
2675#ifdef USE_PCI_INTD
2676	if (N_RX_COMP_RINGS > 3) {
2677		/* cas_interruptN(); */
2678	}
2679#endif
2680}
2681#endif
2682
2683static void cas_tx_timeout(struct net_device *dev)
2684{
2685	struct cas *cp = netdev_priv(dev);
2686
2687	netdev_err(dev, "transmit timed out, resetting\n");
2688	if (!cp->hw_running) {
2689		netdev_err(dev, "hrm.. hw not running!\n");
2690		return;
2691	}
2692
2693	netdev_err(dev, "MIF_STATE[%08x]\n",
2694		   readl(cp->regs + REG_MIF_STATE_MACHINE));
2695
2696	netdev_err(dev, "MAC_STATE[%08x]\n",
2697		   readl(cp->regs + REG_MAC_STATE_MACHINE));
2698
2699	netdev_err(dev, "TX_STATE[%08x:%08x:%08x] FIFO[%08x:%08x:%08x] SM1[%08x] SM2[%08x]\n",
2700		   readl(cp->regs + REG_TX_CFG),
2701		   readl(cp->regs + REG_MAC_TX_STATUS),
2702		   readl(cp->regs + REG_MAC_TX_CFG),
2703		   readl(cp->regs + REG_TX_FIFO_PKT_CNT),
2704		   readl(cp->regs + REG_TX_FIFO_WRITE_PTR),
2705		   readl(cp->regs + REG_TX_FIFO_READ_PTR),
2706		   readl(cp->regs + REG_TX_SM_1),
2707		   readl(cp->regs + REG_TX_SM_2));
2708
2709	netdev_err(dev, "RX_STATE[%08x:%08x:%08x]\n",
2710		   readl(cp->regs + REG_RX_CFG),
2711		   readl(cp->regs + REG_MAC_RX_STATUS),
2712		   readl(cp->regs + REG_MAC_RX_CFG));
2713
2714	netdev_err(dev, "HP_STATE[%08x:%08x:%08x:%08x]\n",
2715		   readl(cp->regs + REG_HP_STATE_MACHINE),
2716		   readl(cp->regs + REG_HP_STATUS0),
2717		   readl(cp->regs + REG_HP_STATUS1),
2718		   readl(cp->regs + REG_HP_STATUS2));
2719
2720#if 1
2721	atomic_inc(&cp->reset_task_pending);
2722	atomic_inc(&cp->reset_task_pending_all);
2723	schedule_work(&cp->reset_task);
2724#else
2725	atomic_set(&cp->reset_task_pending, CAS_RESET_ALL);
2726	schedule_work(&cp->reset_task);
2727#endif
2728}
2729
2730static inline int cas_intme(int ring, int entry)
2731{
2732	/* Algorithm: IRQ every 1/2 of descriptors. */
2733	if (!(entry & ((TX_DESC_RINGN_SIZE(ring) >> 1) - 1)))
2734		return 1;
2735	return 0;
2736}
2737
2738
2739static void cas_write_txd(struct cas *cp, int ring, int entry,
2740			  dma_addr_t mapping, int len, u64 ctrl, int last)
2741{
2742	struct cas_tx_desc *txd = cp->init_txds[ring] + entry;
2743
2744	ctrl |= CAS_BASE(TX_DESC_BUFLEN, len);
2745	if (cas_intme(ring, entry))
2746		ctrl |= TX_DESC_INTME;
2747	if (last)
2748		ctrl |= TX_DESC_EOF;
2749	txd->control = cpu_to_le64(ctrl);
2750	txd->buffer = cpu_to_le64(mapping);
2751}
2752
2753static inline void *tx_tiny_buf(struct cas *cp, const int ring,
2754				const int entry)
2755{
2756	return cp->tx_tiny_bufs[ring] + TX_TINY_BUF_LEN*entry;
2757}
2758
2759static inline dma_addr_t tx_tiny_map(struct cas *cp, const int ring,
2760				     const int entry, const int tentry)
2761{
2762	cp->tx_tiny_use[ring][tentry].nbufs++;
2763	cp->tx_tiny_use[ring][entry].used = 1;
2764	return cp->tx_tiny_dvma[ring] + TX_TINY_BUF_LEN*entry;
2765}
2766
2767static inline int cas_xmit_tx_ringN(struct cas *cp, int ring,
2768				    struct sk_buff *skb)
2769{
2770	struct net_device *dev = cp->dev;
2771	int entry, nr_frags, frag, tabort, tentry;
2772	dma_addr_t mapping;
2773	unsigned long flags;
2774	u64 ctrl;
2775	u32 len;
2776
2777	spin_lock_irqsave(&cp->tx_lock[ring], flags);
2778
2779	/* This is a hard error, log it. */
2780	if (TX_BUFFS_AVAIL(cp, ring) <=
2781	    CAS_TABORT(cp)*(skb_shinfo(skb)->nr_frags + 1)) {
2782		netif_stop_queue(dev);
2783		spin_unlock_irqrestore(&cp->tx_lock[ring], flags);
2784		netdev_err(dev, "BUG! Tx Ring full when queue awake!\n");
2785		return 1;
2786	}
2787
2788	ctrl = 0;
2789	if (skb->ip_summed == CHECKSUM_PARTIAL) {
2790		const u64 csum_start_off = skb_checksum_start_offset(skb);
2791		const u64 csum_stuff_off = csum_start_off + skb->csum_offset;
2792
2793		ctrl =  TX_DESC_CSUM_EN |
2794			CAS_BASE(TX_DESC_CSUM_START, csum_start_off) |
2795			CAS_BASE(TX_DESC_CSUM_STUFF, csum_stuff_off);
2796	}
2797
2798	entry = cp->tx_new[ring];
2799	cp->tx_skbs[ring][entry] = skb;
2800
2801	nr_frags = skb_shinfo(skb)->nr_frags;
2802	len = skb_headlen(skb);
2803	mapping = pci_map_page(cp->pdev, virt_to_page(skb->data),
2804			       offset_in_page(skb->data), len,
2805			       PCI_DMA_TODEVICE);
2806
2807	tentry = entry;
2808	tabort = cas_calc_tabort(cp, (unsigned long) skb->data, len);
2809	if (unlikely(tabort)) {
2810		/* NOTE: len is always >  tabort */
2811		cas_write_txd(cp, ring, entry, mapping, len - tabort,
2812			      ctrl | TX_DESC_SOF, 0);
2813		entry = TX_DESC_NEXT(ring, entry);
2814
2815		skb_copy_from_linear_data_offset(skb, len - tabort,
2816			      tx_tiny_buf(cp, ring, entry), tabort);
2817		mapping = tx_tiny_map(cp, ring, entry, tentry);
2818		cas_write_txd(cp, ring, entry, mapping, tabort, ctrl,
2819			      (nr_frags == 0));
2820	} else {
2821		cas_write_txd(cp, ring, entry, mapping, len, ctrl |
2822			      TX_DESC_SOF, (nr_frags == 0));
2823	}
2824	entry = TX_DESC_NEXT(ring, entry);
2825
2826	for (frag = 0; frag < nr_frags; frag++) {
2827		const skb_frag_t *fragp = &skb_shinfo(skb)->frags[frag];
2828
2829		len = skb_frag_size(fragp);
2830		mapping = skb_frag_dma_map(&cp->pdev->dev, fragp, 0, len,
2831					   DMA_TO_DEVICE);
2832
2833		tabort = cas_calc_tabort(cp, fragp->page_offset, len);
2834		if (unlikely(tabort)) {
2835			void *addr;
2836
2837			/* NOTE: len is always > tabort */
2838			cas_write_txd(cp, ring, entry, mapping, len - tabort,
2839				      ctrl, 0);
2840			entry = TX_DESC_NEXT(ring, entry);
2841
2842			addr = cas_page_map(skb_frag_page(fragp));
2843			memcpy(tx_tiny_buf(cp, ring, entry),
2844			       addr + fragp->page_offset + len - tabort,
2845			       tabort);
2846			cas_page_unmap(addr);
2847			mapping = tx_tiny_map(cp, ring, entry, tentry);
2848			len     = tabort;
2849		}
2850
2851		cas_write_txd(cp, ring, entry, mapping, len, ctrl,
2852			      (frag + 1 == nr_frags));
2853		entry = TX_DESC_NEXT(ring, entry);
2854	}
2855
2856	cp->tx_new[ring] = entry;
2857	if (TX_BUFFS_AVAIL(cp, ring) <= CAS_TABORT(cp)*(MAX_SKB_FRAGS + 1))
2858		netif_stop_queue(dev);
2859
2860	netif_printk(cp, tx_queued, KERN_DEBUG, dev,
2861		     "tx[%d] queued, slot %d, skblen %d, avail %d\n",
2862		     ring, entry, skb->len, TX_BUFFS_AVAIL(cp, ring));
2863	writel(entry, cp->regs + REG_TX_KICKN(ring));
2864	spin_unlock_irqrestore(&cp->tx_lock[ring], flags);
2865	return 0;
2866}
2867
2868static netdev_tx_t cas_start_xmit(struct sk_buff *skb, struct net_device *dev)
2869{
2870	struct cas *cp = netdev_priv(dev);
2871
2872	/* this is only used as a load-balancing hint, so it doesn't
2873	 * need to be SMP safe
2874	 */
2875	static int ring;
2876
2877	if (skb_padto(skb, cp->min_frame_size))
2878		return NETDEV_TX_OK;
2879
2880	/* XXX: we need some higher-level QoS hooks to steer packets to
2881	 *      individual queues.
2882	 */
2883	if (cas_xmit_tx_ringN(cp, ring++ & N_TX_RINGS_MASK, skb))
2884		return NETDEV_TX_BUSY;
2885	return NETDEV_TX_OK;
2886}
2887
2888static void cas_init_tx_dma(struct cas *cp)
2889{
2890	u64 desc_dma = cp->block_dvma;
2891	unsigned long off;
2892	u32 val;
2893	int i;
2894
2895	/* set up tx completion writeback registers. must be 8-byte aligned */
2896#ifdef USE_TX_COMPWB
2897	off = offsetof(struct cas_init_block, tx_compwb);
2898	writel((desc_dma + off) >> 32, cp->regs + REG_TX_COMPWB_DB_HI);
2899	writel((desc_dma + off) & 0xffffffff, cp->regs + REG_TX_COMPWB_DB_LOW);
2900#endif
2901
2902	/* enable completion writebacks, enable paced mode,
2903	 * disable read pipe, and disable pre-interrupt compwbs
2904	 */
2905	val =   TX_CFG_COMPWB_Q1 | TX_CFG_COMPWB_Q2 |
2906		TX_CFG_COMPWB_Q3 | TX_CFG_COMPWB_Q4 |
2907		TX_CFG_DMA_RDPIPE_DIS | TX_CFG_PACED_MODE |
2908		TX_CFG_INTR_COMPWB_DIS;
2909
2910	/* write out tx ring info and tx desc bases */
2911	for (i = 0; i < MAX_TX_RINGS; i++) {
2912		off = (unsigned long) cp->init_txds[i] -
2913			(unsigned long) cp->init_block;
2914
2915		val |= CAS_TX_RINGN_BASE(i);
2916		writel((desc_dma + off) >> 32, cp->regs + REG_TX_DBN_HI(i));
2917		writel((desc_dma + off) & 0xffffffff, cp->regs +
2918		       REG_TX_DBN_LOW(i));
2919		/* don't zero out the kick register here as the system
2920		 * will wedge
2921		 */
2922	}
2923	writel(val, cp->regs + REG_TX_CFG);
2924
2925	/* program max burst sizes. these numbers should be different
2926	 * if doing QoS.
2927	 */
2928#ifdef USE_QOS
2929	writel(0x800, cp->regs + REG_TX_MAXBURST_0);
2930	writel(0x1600, cp->regs + REG_TX_MAXBURST_1);
2931	writel(0x2400, cp->regs + REG_TX_MAXBURST_2);
2932	writel(0x4800, cp->regs + REG_TX_MAXBURST_3);
2933#else
2934	writel(0x800, cp->regs + REG_TX_MAXBURST_0);
2935	writel(0x800, cp->regs + REG_TX_MAXBURST_1);
2936	writel(0x800, cp->regs + REG_TX_MAXBURST_2);
2937	writel(0x800, cp->regs + REG_TX_MAXBURST_3);
2938#endif
2939}
2940
2941/* Must be invoked under cp->lock. */
2942static inline void cas_init_dma(struct cas *cp)
2943{
2944	cas_init_tx_dma(cp);
2945	cas_init_rx_dma(cp);
2946}
2947
2948static void cas_process_mc_list(struct cas *cp)
2949{
2950	u16 hash_table[16];
2951	u32 crc;
2952	struct netdev_hw_addr *ha;
2953	int i = 1;
2954
2955	memset(hash_table, 0, sizeof(hash_table));
2956	netdev_for_each_mc_addr(ha, cp->dev) {
2957		if (i <= CAS_MC_EXACT_MATCH_SIZE) {
2958			/* use the alternate mac address registers for the
2959			 * first 15 multicast addresses
2960			 */
2961			writel((ha->addr[4] << 8) | ha->addr[5],
2962			       cp->regs + REG_MAC_ADDRN(i*3 + 0));
2963			writel((ha->addr[2] << 8) | ha->addr[3],
2964			       cp->regs + REG_MAC_ADDRN(i*3 + 1));
2965			writel((ha->addr[0] << 8) | ha->addr[1],
2966			       cp->regs + REG_MAC_ADDRN(i*3 + 2));
2967			i++;
2968		}
2969		else {
2970			/* use hw hash table for the next series of
2971			 * multicast addresses
2972			 */
2973			crc = ether_crc_le(ETH_ALEN, ha->addr);
2974			crc >>= 24;
2975			hash_table[crc >> 4] |= 1 << (15 - (crc & 0xf));
2976		}
2977	}
2978	for (i = 0; i < 16; i++)
2979		writel(hash_table[i], cp->regs + REG_MAC_HASH_TABLEN(i));
2980}
2981
2982/* Must be invoked under cp->lock. */
2983static u32 cas_setup_multicast(struct cas *cp)
2984{
2985	u32 rxcfg = 0;
2986	int i;
2987
2988	if (cp->dev->flags & IFF_PROMISC) {
2989		rxcfg |= MAC_RX_CFG_PROMISC_EN;
2990
2991	} else if (cp->dev->flags & IFF_ALLMULTI) {
2992	    	for (i=0; i < 16; i++)
2993			writel(0xFFFF, cp->regs + REG_MAC_HASH_TABLEN(i));
2994		rxcfg |= MAC_RX_CFG_HASH_FILTER_EN;
2995
2996	} else {
2997		cas_process_mc_list(cp);
2998		rxcfg |= MAC_RX_CFG_HASH_FILTER_EN;
2999	}
3000
3001	return rxcfg;
3002}
3003
3004/* must be invoked under cp->stat_lock[N_TX_RINGS] */
3005static void cas_clear_mac_err(struct cas *cp)
3006{
3007	writel(0, cp->regs + REG_MAC_COLL_NORMAL);
3008	writel(0, cp->regs + REG_MAC_COLL_FIRST);
3009	writel(0, cp->regs + REG_MAC_COLL_EXCESS);
3010	writel(0, cp->regs + REG_MAC_COLL_LATE);
3011	writel(0, cp->regs + REG_MAC_TIMER_DEFER);
3012	writel(0, cp->regs + REG_MAC_ATTEMPTS_PEAK);
3013	writel(0, cp->regs + REG_MAC_RECV_FRAME);
3014	writel(0, cp->regs + REG_MAC_LEN_ERR);
3015	writel(0, cp->regs + REG_MAC_ALIGN_ERR);
3016	writel(0, cp->regs + REG_MAC_FCS_ERR);
3017	writel(0, cp->regs + REG_MAC_RX_CODE_ERR);
3018}
3019
3020
3021static void cas_mac_reset(struct cas *cp)
3022{
3023	int i;
3024
3025	/* do both TX and RX reset */
3026	writel(0x1, cp->regs + REG_MAC_TX_RESET);
3027	writel(0x1, cp->regs + REG_MAC_RX_RESET);
3028
3029	/* wait for TX */
3030	i = STOP_TRIES;
3031	while (i-- > 0) {
3032		if (readl(cp->regs + REG_MAC_TX_RESET) == 0)
3033			break;
3034		udelay(10);
3035	}
3036
3037	/* wait for RX */
3038	i = STOP_TRIES;
3039	while (i-- > 0) {
3040		if (readl(cp->regs + REG_MAC_RX_RESET) == 0)
3041			break;
3042		udelay(10);
3043	}
3044
3045	if (readl(cp->regs + REG_MAC_TX_RESET) |
3046	    readl(cp->regs + REG_MAC_RX_RESET))
3047		netdev_err(cp->dev, "mac tx[%d]/rx[%d] reset failed [%08x]\n",
3048			   readl(cp->regs + REG_MAC_TX_RESET),
3049			   readl(cp->regs + REG_MAC_RX_RESET),
3050			   readl(cp->regs + REG_MAC_STATE_MACHINE));
3051}
3052
3053
3054/* Must be invoked under cp->lock. */
3055static void cas_init_mac(struct cas *cp)
3056{
3057	unsigned char *e = &cp->dev->dev_addr[0];
3058	int i;
3059	cas_mac_reset(cp);
3060
3061	/* setup core arbitration weight register */
3062	writel(CAWR_RR_DIS, cp->regs + REG_CAWR);
3063
3064	/* XXX Use pci_dma_burst_advice() */
3065#if !defined(CONFIG_SPARC64) && !defined(CONFIG_ALPHA)
3066	/* set the infinite burst register for chips that don't have
3067	 * pci issues.
3068	 */
3069	if ((cp->cas_flags & CAS_FLAG_TARGET_ABORT) == 0)
3070		writel(INF_BURST_EN, cp->regs + REG_INF_BURST);
3071#endif
3072
3073	writel(0x1BF0, cp->regs + REG_MAC_SEND_PAUSE);
3074
3075	writel(0x00, cp->regs + REG_MAC_IPG0);
3076	writel(0x08, cp->regs + REG_MAC_IPG1);
3077	writel(0x04, cp->regs + REG_MAC_IPG2);
3078
3079	/* change later for 802.3z */
3080	writel(0x40, cp->regs + REG_MAC_SLOT_TIME);
3081
3082	/* min frame + FCS */
3083	writel(ETH_ZLEN + 4, cp->regs + REG_MAC_FRAMESIZE_MIN);
3084
3085	/* Ethernet payload + header + FCS + optional VLAN tag. NOTE: we
3086	 * specify the maximum frame size to prevent RX tag errors on
3087	 * oversized frames.
3088	 */
3089	writel(CAS_BASE(MAC_FRAMESIZE_MAX_BURST, 0x2000) |
3090	       CAS_BASE(MAC_FRAMESIZE_MAX_FRAME,
3091			(CAS_MAX_MTU + ETH_HLEN + 4 + 4)),
3092	       cp->regs + REG_MAC_FRAMESIZE_MAX);
3093
3094	/* NOTE: crc_size is used as a surrogate for half-duplex.
3095	 * workaround saturn half-duplex issue by increasing preamble
3096	 * size to 65 bytes.
3097	 */
3098	if ((cp->cas_flags & CAS_FLAG_SATURN) && cp->crc_size)
3099		writel(0x41, cp->regs + REG_MAC_PA_SIZE);
3100	else
3101		writel(0x07, cp->regs + REG_MAC_PA_SIZE);
3102	writel(0x04, cp->regs + REG_MAC_JAM_SIZE);
3103	writel(0x10, cp->regs + REG_MAC_ATTEMPT_LIMIT);
3104	writel(0x8808, cp->regs + REG_MAC_CTRL_TYPE);
3105
3106	writel((e[5] | (e[4] << 8)) & 0x3ff, cp->regs + REG_MAC_RANDOM_SEED);
3107
3108	writel(0, cp->regs + REG_MAC_ADDR_FILTER0);
3109	writel(0, cp->regs + REG_MAC_ADDR_FILTER1);
3110	writel(0, cp->regs + REG_MAC_ADDR_FILTER2);
3111	writel(0, cp->regs + REG_MAC_ADDR_FILTER2_1_MASK);
3112	writel(0, cp->regs + REG_MAC_ADDR_FILTER0_MASK);
3113
3114	/* setup mac address in perfect filter array */
3115	for (i = 0; i < 45; i++)
3116		writel(0x0, cp->regs + REG_MAC_ADDRN(i));
3117
3118	writel((e[4] << 8) | e[5], cp->regs + REG_MAC_ADDRN(0));
3119	writel((e[2] << 8) | e[3], cp->regs + REG_MAC_ADDRN(1));
3120	writel((e[0] << 8) | e[1], cp->regs + REG_MAC_ADDRN(2));
3121
3122	writel(0x0001, cp->regs + REG_MAC_ADDRN(42));
3123	writel(0xc200, cp->regs + REG_MAC_ADDRN(43));
3124	writel(0x0180, cp->regs + REG_MAC_ADDRN(44));
3125
3126	cp->mac_rx_cfg = cas_setup_multicast(cp);
3127
3128	spin_lock(&cp->stat_lock[N_TX_RINGS]);
3129	cas_clear_mac_err(cp);
3130	spin_unlock(&cp->stat_lock[N_TX_RINGS]);
3131
3132	/* Setup MAC interrupts.  We want to get all of the interesting
3133	 * counter expiration events, but we do not want to hear about
3134	 * normal rx/tx as the DMA engine tells us that.
3135	 */
3136	writel(MAC_TX_FRAME_XMIT, cp->regs + REG_MAC_TX_MASK);
3137	writel(MAC_RX_FRAME_RECV, cp->regs + REG_MAC_RX_MASK);
3138
3139	/* Don't enable even the PAUSE interrupts for now, we
3140	 * make no use of those events other than to record them.
3141	 */
3142	writel(0xffffffff, cp->regs + REG_MAC_CTRL_MASK);
3143}
3144
3145/* Must be invoked under cp->lock. */
3146static void cas_init_pause_thresholds(struct cas *cp)
3147{
3148	/* Calculate pause thresholds.  Setting the OFF threshold to the
3149	 * full RX fifo size effectively disables PAUSE generation
3150	 */
3151	if (cp->rx_fifo_size <= (2 * 1024)) {
3152		cp->rx_pause_off = cp->rx_pause_on = cp->rx_fifo_size;
3153	} else {
3154		int max_frame = (cp->dev->mtu + ETH_HLEN + 4 + 4 + 64) & ~63;
3155		if (max_frame * 3 > cp->rx_fifo_size) {
3156			cp->rx_pause_off = 7104;
3157			cp->rx_pause_on  = 960;
3158		} else {
3159			int off = (cp->rx_fifo_size - (max_frame * 2));
3160			int on = off - max_frame;
3161			cp->rx_pause_off = off;
3162			cp->rx_pause_on = on;
3163		}
3164	}
3165}
3166
3167static int cas_vpd_match(const void __iomem *p, const char *str)
3168{
3169	int len = strlen(str) + 1;
3170	int i;
3171
3172	for (i = 0; i < len; i++) {
3173		if (readb(p + i) != str[i])
3174			return 0;
3175	}
3176	return 1;
3177}
3178
3179
3180/* get the mac address by reading the vpd information in the rom.
3181 * also get the phy type and determine if there's an entropy generator.
3182 * NOTE: this is a bit convoluted for the following reasons:
3183 *  1) vpd info has order-dependent mac addresses for multinic cards
3184 *  2) the only way to determine the nic order is to use the slot
3185 *     number.
3186 *  3) fiber cards don't have bridges, so their slot numbers don't
3187 *     mean anything.
3188 *  4) we don't actually know we have a fiber card until after
3189 *     the mac addresses are parsed.
3190 */
3191static int cas_get_vpd_info(struct cas *cp, unsigned char *dev_addr,
3192			    const int offset)
3193{
3194	void __iomem *p = cp->regs + REG_EXPANSION_ROM_RUN_START;
3195	void __iomem *base, *kstart;
3196	int i, len;
3197	int found = 0;
3198#define VPD_FOUND_MAC        0x01
3199#define VPD_FOUND_PHY        0x02
3200
3201	int phy_type = CAS_PHY_MII_MDIO0; /* default phy type */
3202	int mac_off  = 0;
3203
3204#if defined(CONFIG_SPARC)
3205	const unsigned char *addr;
3206#endif
3207
3208	/* give us access to the PROM */
3209	writel(BIM_LOCAL_DEV_PROM | BIM_LOCAL_DEV_PAD,
3210	       cp->regs + REG_BIM_LOCAL_DEV_EN);
3211
3212	/* check for an expansion rom */
3213	if (readb(p) != 0x55 || readb(p + 1) != 0xaa)
3214		goto use_random_mac_addr;
3215
3216	/* search for beginning of vpd */
3217	base = NULL;
3218	for (i = 2; i < EXPANSION_ROM_SIZE; i++) {
3219		/* check for PCIR */
3220		if ((readb(p + i + 0) == 0x50) &&
3221		    (readb(p + i + 1) == 0x43) &&
3222		    (readb(p + i + 2) == 0x49) &&
3223		    (readb(p + i + 3) == 0x52)) {
3224			base = p + (readb(p + i + 8) |
3225				    (readb(p + i + 9) << 8));
3226			break;
3227		}
3228	}
3229
3230	if (!base || (readb(base) != 0x82))
3231		goto use_random_mac_addr;
3232
3233	i = (readb(base + 1) | (readb(base + 2) << 8)) + 3;
3234	while (i < EXPANSION_ROM_SIZE) {
3235		if (readb(base + i) != 0x90) /* no vpd found */
3236			goto use_random_mac_addr;
3237
3238		/* found a vpd field */
3239		len = readb(base + i + 1) | (readb(base + i + 2) << 8);
3240
3241		/* extract keywords */
3242		kstart = base + i + 3;
3243		p = kstart;
3244		while ((p - kstart) < len) {
3245			int klen = readb(p + 2);
3246			int j;
3247			char type;
3248
3249			p += 3;
3250
3251			/* look for the following things:
3252			 * -- correct length == 29
3253			 * 3 (type) + 2 (size) +
3254			 * 18 (strlen("local-mac-address") + 1) +
3255			 * 6 (mac addr)
3256			 * -- VPD Instance 'I'
3257			 * -- VPD Type Bytes 'B'
3258			 * -- VPD data length == 6
3259			 * -- property string == local-mac-address
3260			 *
3261			 * -- correct length == 24
3262			 * 3 (type) + 2 (size) +
3263			 * 12 (strlen("entropy-dev") + 1) +
3264			 * 7 (strlen("vms110") + 1)
3265			 * -- VPD Instance 'I'
3266			 * -- VPD Type String 'B'
3267			 * -- VPD data length == 7
3268			 * -- property string == entropy-dev
3269			 *
3270			 * -- correct length == 18
3271			 * 3 (type) + 2 (size) +
3272			 * 9 (strlen("phy-type") + 1) +
3273			 * 4 (strlen("pcs") + 1)
3274			 * -- VPD Instance 'I'
3275			 * -- VPD Type String 'S'
3276			 * -- VPD data length == 4
3277			 * -- property string == phy-type
3278			 *
3279			 * -- correct length == 23
3280			 * 3 (type) + 2 (size) +
3281			 * 14 (strlen("phy-interface") + 1) +
3282			 * 4 (strlen("pcs") + 1)
3283			 * -- VPD Instance 'I'
3284			 * -- VPD Type String 'S'
3285			 * -- VPD data length == 4
3286			 * -- property string == phy-interface
3287			 */
3288			if (readb(p) != 'I')
3289				goto next;
3290
3291			/* finally, check string and length */
3292			type = readb(p + 3);
3293			if (type == 'B') {
3294				if ((klen == 29) && readb(p + 4) == 6 &&
3295				    cas_vpd_match(p + 5,
3296						  "local-mac-address")) {
3297					if (mac_off++ > offset)
3298						goto next;
3299
3300					/* set mac address */
3301					for (j = 0; j < 6; j++)
3302						dev_addr[j] =
3303							readb(p + 23 + j);
3304					goto found_mac;
3305				}
3306			}
3307
3308			if (type != 'S')
3309				goto next;
3310
3311#ifdef USE_ENTROPY_DEV
3312			if ((klen == 24) &&
3313			    cas_vpd_match(p + 5, "entropy-dev") &&
3314			    cas_vpd_match(p + 17, "vms110")) {
3315				cp->cas_flags |= CAS_FLAG_ENTROPY_DEV;
3316				goto next;
3317			}
3318#endif
3319
3320			if (found & VPD_FOUND_PHY)
3321				goto next;
3322
3323			if ((klen == 18) && readb(p + 4) == 4 &&
3324			    cas_vpd_match(p + 5, "phy-type")) {
3325				if (cas_vpd_match(p + 14, "pcs")) {
3326					phy_type = CAS_PHY_SERDES;
3327					goto found_phy;
3328				}
3329			}
3330
3331			if ((klen == 23) && readb(p + 4) == 4 &&
3332			    cas_vpd_match(p + 5, "phy-interface")) {
3333				if (cas_vpd_match(p + 19, "pcs")) {
3334					phy_type = CAS_PHY_SERDES;
3335					goto found_phy;
3336				}
3337			}
3338found_mac:
3339			found |= VPD_FOUND_MAC;
3340			goto next;
3341
3342found_phy:
3343			found |= VPD_FOUND_PHY;
3344
3345next:
3346			p += klen;
3347		}
3348		i += len + 3;
3349	}
3350
3351use_random_mac_addr:
3352	if (found & VPD_FOUND_MAC)
3353		goto done;
3354
3355#if defined(CONFIG_SPARC)
3356	addr = of_get_property(cp->of_node, "local-mac-address", NULL);
3357	if (addr != NULL) {
3358		memcpy(dev_addr, addr, 6);
3359		goto done;
3360	}
3361#endif
3362
3363	/* Sun MAC prefix then 3 random bytes. */
3364	pr_info("MAC address not found in ROM VPD\n");
3365	dev_addr[0] = 0x08;
3366	dev_addr[1] = 0x00;
3367	dev_addr[2] = 0x20;
3368	get_random_bytes(dev_addr + 3, 3);
3369
3370done:
3371	writel(0, cp->regs + REG_BIM_LOCAL_DEV_EN);
3372	return phy_type;
3373}
3374
3375/* check pci invariants */
3376static void cas_check_pci_invariants(struct cas *cp)
3377{
3378	struct pci_dev *pdev = cp->pdev;
3379
3380	cp->cas_flags = 0;
3381	if ((pdev->vendor == PCI_VENDOR_ID_SUN) &&
3382	    (pdev->device == PCI_DEVICE_ID_SUN_CASSINI)) {
3383		if (pdev->revision >= CAS_ID_REVPLUS)
3384			cp->cas_flags |= CAS_FLAG_REG_PLUS;
3385		if (pdev->revision < CAS_ID_REVPLUS02u)
3386			cp->cas_flags |= CAS_FLAG_TARGET_ABORT;
3387
3388		/* Original Cassini supports HW CSUM, but it's not
3389		 * enabled by default as it can trigger TX hangs.
3390		 */
3391		if (pdev->revision < CAS_ID_REV2)
3392			cp->cas_flags |= CAS_FLAG_NO_HW_CSUM;
3393	} else {
3394		/* Only sun has original cassini chips.  */
3395		cp->cas_flags |= CAS_FLAG_REG_PLUS;
3396
3397		/* We use a flag because the same phy might be externally
3398		 * connected.
3399		 */
3400		if ((pdev->vendor == PCI_VENDOR_ID_NS) &&
3401		    (pdev->device == PCI_DEVICE_ID_NS_SATURN))
3402			cp->cas_flags |= CAS_FLAG_SATURN;
3403	}
3404}
3405
3406
3407static int cas_check_invariants(struct cas *cp)
3408{
3409	struct pci_dev *pdev = cp->pdev;
 
3410	u32 cfg;
3411	int i;
3412
3413	/* get page size for rx buffers. */
3414	cp->page_order = 0;
3415#ifdef USE_PAGE_ORDER
3416	if (PAGE_SHIFT < CAS_JUMBO_PAGE_SHIFT) {
3417		/* see if we can allocate larger pages */
3418		struct page *page = alloc_pages(GFP_ATOMIC,
3419						CAS_JUMBO_PAGE_SHIFT -
3420						PAGE_SHIFT);
3421		if (page) {
3422			__free_pages(page, CAS_JUMBO_PAGE_SHIFT - PAGE_SHIFT);
3423			cp->page_order = CAS_JUMBO_PAGE_SHIFT - PAGE_SHIFT;
3424		} else {
3425			printk("MTU limited to %d bytes\n", CAS_MAX_MTU);
3426		}
3427	}
3428#endif
3429	cp->page_size = (PAGE_SIZE << cp->page_order);
3430
3431	/* Fetch the FIFO configurations. */
3432	cp->tx_fifo_size = readl(cp->regs + REG_TX_FIFO_SIZE) * 64;
3433	cp->rx_fifo_size = RX_FIFO_SIZE;
3434
3435	/* finish phy determination. MDIO1 takes precedence over MDIO0 if
3436	 * they're both connected.
3437	 */
3438	cp->phy_type = cas_get_vpd_info(cp, cp->dev->dev_addr,
3439					PCI_SLOT(pdev->devfn));
3440	if (cp->phy_type & CAS_PHY_SERDES) {
3441		cp->cas_flags |= CAS_FLAG_1000MB_CAP;
3442		return 0; /* no more checking needed */
3443	}
3444
3445	/* MII */
3446	cfg = readl(cp->regs + REG_MIF_CFG);
3447	if (cfg & MIF_CFG_MDIO_1) {
3448		cp->phy_type = CAS_PHY_MII_MDIO1;
3449	} else if (cfg & MIF_CFG_MDIO_0) {
3450		cp->phy_type = CAS_PHY_MII_MDIO0;
3451	}
3452
3453	cas_mif_poll(cp, 0);
3454	writel(PCS_DATAPATH_MODE_MII, cp->regs + REG_PCS_DATAPATH_MODE);
3455
3456	for (i = 0; i < 32; i++) {
3457		u32 phy_id;
3458		int j;
3459
3460		for (j = 0; j < 3; j++) {
3461			cp->phy_addr = i;
3462			phy_id = cas_phy_read(cp, MII_PHYSID1) << 16;
3463			phy_id |= cas_phy_read(cp, MII_PHYSID2);
3464			if (phy_id && (phy_id != 0xFFFFFFFF)) {
3465				cp->phy_id = phy_id;
3466				goto done;
3467			}
3468		}
3469	}
3470	pr_err("MII phy did not respond [%08x]\n",
3471	       readl(cp->regs + REG_MIF_STATE_MACHINE));
3472	return -1;
3473
3474done:
3475	/* see if we can do gigabit */
3476	cfg = cas_phy_read(cp, MII_BMSR);
3477	if ((cfg & CAS_BMSR_1000_EXTEND) &&
3478	    cas_phy_read(cp, CAS_MII_1000_EXTEND))
3479		cp->cas_flags |= CAS_FLAG_1000MB_CAP;
3480	return 0;
3481}
3482
3483/* Must be invoked under cp->lock. */
3484static inline void cas_start_dma(struct cas *cp)
3485{
3486	int i;
3487	u32 val;
3488	int txfailed = 0;
3489
3490	/* enable dma */
3491	val = readl(cp->regs + REG_TX_CFG) | TX_CFG_DMA_EN;
3492	writel(val, cp->regs + REG_TX_CFG);
3493	val = readl(cp->regs + REG_RX_CFG) | RX_CFG_DMA_EN;
3494	writel(val, cp->regs + REG_RX_CFG);
3495
3496	/* enable the mac */
3497	val = readl(cp->regs + REG_MAC_TX_CFG) | MAC_TX_CFG_EN;
3498	writel(val, cp->regs + REG_MAC_TX_CFG);
3499	val = readl(cp->regs + REG_MAC_RX_CFG) | MAC_RX_CFG_EN;
3500	writel(val, cp->regs + REG_MAC_RX_CFG);
3501
3502	i = STOP_TRIES;
3503	while (i-- > 0) {
3504		val = readl(cp->regs + REG_MAC_TX_CFG);
3505		if ((val & MAC_TX_CFG_EN))
3506			break;
3507		udelay(10);
3508	}
3509	if (i < 0) txfailed = 1;
3510	i = STOP_TRIES;
3511	while (i-- > 0) {
3512		val = readl(cp->regs + REG_MAC_RX_CFG);
3513		if ((val & MAC_RX_CFG_EN)) {
3514			if (txfailed) {
3515				netdev_err(cp->dev,
3516					   "enabling mac failed [tx:%08x:%08x]\n",
3517					   readl(cp->regs + REG_MIF_STATE_MACHINE),
3518					   readl(cp->regs + REG_MAC_STATE_MACHINE));
3519			}
3520			goto enable_rx_done;
3521		}
3522		udelay(10);
3523	}
3524	netdev_err(cp->dev, "enabling mac failed [%s:%08x:%08x]\n",
3525		   (txfailed ? "tx,rx" : "rx"),
3526		   readl(cp->regs + REG_MIF_STATE_MACHINE),
3527		   readl(cp->regs + REG_MAC_STATE_MACHINE));
3528
3529enable_rx_done:
3530	cas_unmask_intr(cp); /* enable interrupts */
3531	writel(RX_DESC_RINGN_SIZE(0) - 4, cp->regs + REG_RX_KICK);
3532	writel(0, cp->regs + REG_RX_COMP_TAIL);
3533
3534	if (cp->cas_flags & CAS_FLAG_REG_PLUS) {
3535		if (N_RX_DESC_RINGS > 1)
3536			writel(RX_DESC_RINGN_SIZE(1) - 4,
3537			       cp->regs + REG_PLUS_RX_KICK1);
3538
3539		for (i = 1; i < N_RX_COMP_RINGS; i++)
3540			writel(0, cp->regs + REG_PLUS_RX_COMPN_TAIL(i));
3541	}
3542}
3543
3544/* Must be invoked under cp->lock. */
3545static void cas_read_pcs_link_mode(struct cas *cp, int *fd, int *spd,
3546				   int *pause)
3547{
3548	u32 val = readl(cp->regs + REG_PCS_MII_LPA);
3549	*fd     = (val & PCS_MII_LPA_FD) ? 1 : 0;
3550	*pause  = (val & PCS_MII_LPA_SYM_PAUSE) ? 0x01 : 0x00;
3551	if (val & PCS_MII_LPA_ASYM_PAUSE)
3552		*pause |= 0x10;
3553	*spd = 1000;
3554}
3555
3556/* Must be invoked under cp->lock. */
3557static void cas_read_mii_link_mode(struct cas *cp, int *fd, int *spd,
3558				   int *pause)
3559{
3560	u32 val;
3561
3562	*fd = 0;
3563	*spd = 10;
3564	*pause = 0;
3565
3566	/* use GMII registers */
3567	val = cas_phy_read(cp, MII_LPA);
3568	if (val & CAS_LPA_PAUSE)
3569		*pause = 0x01;
3570
3571	if (val & CAS_LPA_ASYM_PAUSE)
3572		*pause |= 0x10;
3573
3574	if (val & LPA_DUPLEX)
3575		*fd = 1;
3576	if (val & LPA_100)
3577		*spd = 100;
3578
3579	if (cp->cas_flags & CAS_FLAG_1000MB_CAP) {
3580		val = cas_phy_read(cp, CAS_MII_1000_STATUS);
3581		if (val & (CAS_LPA_1000FULL | CAS_LPA_1000HALF))
3582			*spd = 1000;
3583		if (val & CAS_LPA_1000FULL)
3584			*fd = 1;
3585	}
3586}
3587
3588/* A link-up condition has occurred, initialize and enable the
3589 * rest of the chip.
3590 *
3591 * Must be invoked under cp->lock.
3592 */
3593static void cas_set_link_modes(struct cas *cp)
3594{
3595	u32 val;
3596	int full_duplex, speed, pause;
3597
3598	full_duplex = 0;
3599	speed = 10;
3600	pause = 0;
3601
3602	if (CAS_PHY_MII(cp->phy_type)) {
3603		cas_mif_poll(cp, 0);
3604		val = cas_phy_read(cp, MII_BMCR);
3605		if (val & BMCR_ANENABLE) {
3606			cas_read_mii_link_mode(cp, &full_duplex, &speed,
3607					       &pause);
3608		} else {
3609			if (val & BMCR_FULLDPLX)
3610				full_duplex = 1;
3611
3612			if (val & BMCR_SPEED100)
3613				speed = 100;
3614			else if (val & CAS_BMCR_SPEED1000)
3615				speed = (cp->cas_flags & CAS_FLAG_1000MB_CAP) ?
3616					1000 : 100;
3617		}
3618		cas_mif_poll(cp, 1);
3619
3620	} else {
3621		val = readl(cp->regs + REG_PCS_MII_CTRL);
3622		cas_read_pcs_link_mode(cp, &full_duplex, &speed, &pause);
3623		if ((val & PCS_MII_AUTONEG_EN) == 0) {
3624			if (val & PCS_MII_CTRL_DUPLEX)
3625				full_duplex = 1;
3626		}
3627	}
3628
3629	netif_info(cp, link, cp->dev, "Link up at %d Mbps, %s-duplex\n",
3630		   speed, full_duplex ? "full" : "half");
3631
3632	val = MAC_XIF_TX_MII_OUTPUT_EN | MAC_XIF_LINK_LED;
3633	if (CAS_PHY_MII(cp->phy_type)) {
3634		val |= MAC_XIF_MII_BUFFER_OUTPUT_EN;
3635		if (!full_duplex)
3636			val |= MAC_XIF_DISABLE_ECHO;
3637	}
3638	if (full_duplex)
3639		val |= MAC_XIF_FDPLX_LED;
3640	if (speed == 1000)
3641		val |= MAC_XIF_GMII_MODE;
3642	writel(val, cp->regs + REG_MAC_XIF_CFG);
3643
3644	/* deal with carrier and collision detect. */
3645	val = MAC_TX_CFG_IPG_EN;
3646	if (full_duplex) {
3647		val |= MAC_TX_CFG_IGNORE_CARRIER;
3648		val |= MAC_TX_CFG_IGNORE_COLL;
3649	} else {
3650#ifndef USE_CSMA_CD_PROTO
3651		val |= MAC_TX_CFG_NEVER_GIVE_UP_EN;
3652		val |= MAC_TX_CFG_NEVER_GIVE_UP_LIM;
3653#endif
3654	}
3655	/* val now set up for REG_MAC_TX_CFG */
3656
3657	/* If gigabit and half-duplex, enable carrier extension
3658	 * mode.  increase slot time to 512 bytes as well.
3659	 * else, disable it and make sure slot time is 64 bytes.
3660	 * also activate checksum bug workaround
3661	 */
3662	if ((speed == 1000) && !full_duplex) {
3663		writel(val | MAC_TX_CFG_CARRIER_EXTEND,
3664		       cp->regs + REG_MAC_TX_CFG);
3665
3666		val = readl(cp->regs + REG_MAC_RX_CFG);
3667		val &= ~MAC_RX_CFG_STRIP_FCS; /* checksum workaround */
3668		writel(val | MAC_RX_CFG_CARRIER_EXTEND,
3669		       cp->regs + REG_MAC_RX_CFG);
3670
3671		writel(0x200, cp->regs + REG_MAC_SLOT_TIME);
3672
3673		cp->crc_size = 4;
3674		/* minimum size gigabit frame at half duplex */
3675		cp->min_frame_size = CAS_1000MB_MIN_FRAME;
3676
3677	} else {
3678		writel(val, cp->regs + REG_MAC_TX_CFG);
3679
3680		/* checksum bug workaround. don't strip FCS when in
3681		 * half-duplex mode
3682		 */
3683		val = readl(cp->regs + REG_MAC_RX_CFG);
3684		if (full_duplex) {
3685			val |= MAC_RX_CFG_STRIP_FCS;
3686			cp->crc_size = 0;
3687			cp->min_frame_size = CAS_MIN_MTU;
3688		} else {
3689			val &= ~MAC_RX_CFG_STRIP_FCS;
3690			cp->crc_size = 4;
3691			cp->min_frame_size = CAS_MIN_FRAME;
3692		}
3693		writel(val & ~MAC_RX_CFG_CARRIER_EXTEND,
3694		       cp->regs + REG_MAC_RX_CFG);
3695		writel(0x40, cp->regs + REG_MAC_SLOT_TIME);
3696	}
3697
3698	if (netif_msg_link(cp)) {
3699		if (pause & 0x01) {
3700			netdev_info(cp->dev, "Pause is enabled (rxfifo: %d off: %d on: %d)\n",
3701				    cp->rx_fifo_size,
3702				    cp->rx_pause_off,
3703				    cp->rx_pause_on);
3704		} else if (pause & 0x10) {
3705			netdev_info(cp->dev, "TX pause enabled\n");
3706		} else {
3707			netdev_info(cp->dev, "Pause is disabled\n");
3708		}
3709	}
3710
3711	val = readl(cp->regs + REG_MAC_CTRL_CFG);
3712	val &= ~(MAC_CTRL_CFG_SEND_PAUSE_EN | MAC_CTRL_CFG_RECV_PAUSE_EN);
3713	if (pause) { /* symmetric or asymmetric pause */
3714		val |= MAC_CTRL_CFG_SEND_PAUSE_EN;
3715		if (pause & 0x01) { /* symmetric pause */
3716			val |= MAC_CTRL_CFG_RECV_PAUSE_EN;
3717		}
3718	}
3719	writel(val, cp->regs + REG_MAC_CTRL_CFG);
3720	cas_start_dma(cp);
3721}
3722
3723/* Must be invoked under cp->lock. */
3724static void cas_init_hw(struct cas *cp, int restart_link)
3725{
3726	if (restart_link)
3727		cas_phy_init(cp);
3728
3729	cas_init_pause_thresholds(cp);
3730	cas_init_mac(cp);
3731	cas_init_dma(cp);
3732
3733	if (restart_link) {
3734		/* Default aneg parameters */
3735		cp->timer_ticks = 0;
3736		cas_begin_auto_negotiation(cp, NULL);
3737	} else if (cp->lstate == link_up) {
3738		cas_set_link_modes(cp);
3739		netif_carrier_on(cp->dev);
3740	}
3741}
3742
3743/* Must be invoked under cp->lock. on earlier cassini boards,
3744 * SOFT_0 is tied to PCI reset. we use this to force a pci reset,
3745 * let it settle out, and then restore pci state.
3746 */
3747static void cas_hard_reset(struct cas *cp)
3748{
3749	writel(BIM_LOCAL_DEV_SOFT_0, cp->regs + REG_BIM_LOCAL_DEV_EN);
3750	udelay(20);
3751	pci_restore_state(cp->pdev);
3752}
3753
3754
3755static void cas_global_reset(struct cas *cp, int blkflag)
3756{
3757	int limit;
3758
3759	/* issue a global reset. don't use RSTOUT. */
3760	if (blkflag && !CAS_PHY_MII(cp->phy_type)) {
3761		/* For PCS, when the blkflag is set, we should set the
3762		 * SW_REST_BLOCK_PCS_SLINK bit to prevent the results of
3763		 * the last autonegotiation from being cleared.  We'll
3764		 * need some special handling if the chip is set into a
3765		 * loopback mode.
3766		 */
3767		writel((SW_RESET_TX | SW_RESET_RX | SW_RESET_BLOCK_PCS_SLINK),
3768		       cp->regs + REG_SW_RESET);
3769	} else {
3770		writel(SW_RESET_TX | SW_RESET_RX, cp->regs + REG_SW_RESET);
3771	}
3772
3773	/* need to wait at least 3ms before polling register */
3774	mdelay(3);
3775
3776	limit = STOP_TRIES;
3777	while (limit-- > 0) {
3778		u32 val = readl(cp->regs + REG_SW_RESET);
3779		if ((val & (SW_RESET_TX | SW_RESET_RX)) == 0)
3780			goto done;
3781		udelay(10);
3782	}
3783	netdev_err(cp->dev, "sw reset failed\n");
3784
3785done:
3786	/* enable various BIM interrupts */
3787	writel(BIM_CFG_DPAR_INTR_ENABLE | BIM_CFG_RMA_INTR_ENABLE |
3788	       BIM_CFG_RTA_INTR_ENABLE, cp->regs + REG_BIM_CFG);
3789
3790	/* clear out pci error status mask for handled errors.
3791	 * we don't deal with DMA counter overflows as they happen
3792	 * all the time.
3793	 */
3794	writel(0xFFFFFFFFU & ~(PCI_ERR_BADACK | PCI_ERR_DTRTO |
3795			       PCI_ERR_OTHER | PCI_ERR_BIM_DMA_WRITE |
3796			       PCI_ERR_BIM_DMA_READ), cp->regs +
3797	       REG_PCI_ERR_STATUS_MASK);
3798
3799	/* set up for MII by default to address mac rx reset timeout
3800	 * issue
3801	 */
3802	writel(PCS_DATAPATH_MODE_MII, cp->regs + REG_PCS_DATAPATH_MODE);
3803}
3804
3805static void cas_reset(struct cas *cp, int blkflag)
3806{
3807	u32 val;
3808
3809	cas_mask_intr(cp);
3810	cas_global_reset(cp, blkflag);
3811	cas_mac_reset(cp);
3812	cas_entropy_reset(cp);
3813
3814	/* disable dma engines. */
3815	val = readl(cp->regs + REG_TX_CFG);
3816	val &= ~TX_CFG_DMA_EN;
3817	writel(val, cp->regs + REG_TX_CFG);
3818
3819	val = readl(cp->regs + REG_RX_CFG);
3820	val &= ~RX_CFG_DMA_EN;
3821	writel(val, cp->regs + REG_RX_CFG);
3822
3823	/* program header parser */
3824	if ((cp->cas_flags & CAS_FLAG_TARGET_ABORT) ||
3825	    (CAS_HP_ALT_FIRMWARE == cas_prog_null)) {
3826		cas_load_firmware(cp, CAS_HP_FIRMWARE);
3827	} else {
3828		cas_load_firmware(cp, CAS_HP_ALT_FIRMWARE);
3829	}
3830
3831	/* clear out error registers */
3832	spin_lock(&cp->stat_lock[N_TX_RINGS]);
3833	cas_clear_mac_err(cp);
3834	spin_unlock(&cp->stat_lock[N_TX_RINGS]);
3835}
3836
3837/* Shut down the chip, must be called with pm_mutex held.  */
3838static void cas_shutdown(struct cas *cp)
3839{
3840	unsigned long flags;
3841
3842	/* Make us not-running to avoid timers respawning */
3843	cp->hw_running = 0;
3844
3845	del_timer_sync(&cp->link_timer);
3846
3847	/* Stop the reset task */
3848#if 0
3849	while (atomic_read(&cp->reset_task_pending_mtu) ||
3850	       atomic_read(&cp->reset_task_pending_spare) ||
3851	       atomic_read(&cp->reset_task_pending_all))
3852		schedule();
3853
3854#else
3855	while (atomic_read(&cp->reset_task_pending))
3856		schedule();
3857#endif
3858	/* Actually stop the chip */
3859	cas_lock_all_save(cp, flags);
3860	cas_reset(cp, 0);
3861	if (cp->cas_flags & CAS_FLAG_SATURN)
3862		cas_phy_powerdown(cp);
3863	cas_unlock_all_restore(cp, flags);
3864}
3865
3866static int cas_change_mtu(struct net_device *dev, int new_mtu)
3867{
3868	struct cas *cp = netdev_priv(dev);
3869
3870	if (new_mtu < CAS_MIN_MTU || new_mtu > CAS_MAX_MTU)
3871		return -EINVAL;
3872
3873	dev->mtu = new_mtu;
3874	if (!netif_running(dev) || !netif_device_present(dev))
3875		return 0;
3876
3877	/* let the reset task handle it */
3878#if 1
3879	atomic_inc(&cp->reset_task_pending);
3880	if ((cp->phy_type & CAS_PHY_SERDES)) {
3881		atomic_inc(&cp->reset_task_pending_all);
3882	} else {
3883		atomic_inc(&cp->reset_task_pending_mtu);
3884	}
3885	schedule_work(&cp->reset_task);
3886#else
3887	atomic_set(&cp->reset_task_pending, (cp->phy_type & CAS_PHY_SERDES) ?
3888		   CAS_RESET_ALL : CAS_RESET_MTU);
3889	pr_err("reset called in cas_change_mtu\n");
3890	schedule_work(&cp->reset_task);
3891#endif
3892
3893	flush_work_sync(&cp->reset_task);
3894	return 0;
3895}
3896
3897static void cas_clean_txd(struct cas *cp, int ring)
3898{
3899	struct cas_tx_desc *txd = cp->init_txds[ring];
3900	struct sk_buff *skb, **skbs = cp->tx_skbs[ring];
3901	u64 daddr, dlen;
3902	int i, size;
3903
3904	size = TX_DESC_RINGN_SIZE(ring);
3905	for (i = 0; i < size; i++) {
3906		int frag;
3907
3908		if (skbs[i] == NULL)
3909			continue;
3910
3911		skb = skbs[i];
3912		skbs[i] = NULL;
3913
3914		for (frag = 0; frag <= skb_shinfo(skb)->nr_frags;  frag++) {
3915			int ent = i & (size - 1);
3916
3917			/* first buffer is never a tiny buffer and so
3918			 * needs to be unmapped.
3919			 */
3920			daddr = le64_to_cpu(txd[ent].buffer);
3921			dlen  =  CAS_VAL(TX_DESC_BUFLEN,
3922					 le64_to_cpu(txd[ent].control));
3923			pci_unmap_page(cp->pdev, daddr, dlen,
3924				       PCI_DMA_TODEVICE);
3925
3926			if (frag != skb_shinfo(skb)->nr_frags) {
3927				i++;
3928
3929				/* next buffer might by a tiny buffer.
3930				 * skip past it.
3931				 */
3932				ent = i & (size - 1);
3933				if (cp->tx_tiny_use[ring][ent].used)
3934					i++;
3935			}
3936		}
3937		dev_kfree_skb_any(skb);
3938	}
3939
3940	/* zero out tiny buf usage */
3941	memset(cp->tx_tiny_use[ring], 0, size*sizeof(*cp->tx_tiny_use[ring]));
3942}
3943
3944/* freed on close */
3945static inline void cas_free_rx_desc(struct cas *cp, int ring)
3946{
3947	cas_page_t **page = cp->rx_pages[ring];
3948	int i, size;
3949
3950	size = RX_DESC_RINGN_SIZE(ring);
3951	for (i = 0; i < size; i++) {
3952		if (page[i]) {
3953			cas_page_free(cp, page[i]);
3954			page[i] = NULL;
3955		}
3956	}
3957}
3958
3959static void cas_free_rxds(struct cas *cp)
3960{
3961	int i;
3962
3963	for (i = 0; i < N_RX_DESC_RINGS; i++)
3964		cas_free_rx_desc(cp, i);
3965}
3966
3967/* Must be invoked under cp->lock. */
3968static void cas_clean_rings(struct cas *cp)
3969{
3970	int i;
3971
3972	/* need to clean all tx rings */
3973	memset(cp->tx_old, 0, sizeof(*cp->tx_old)*N_TX_RINGS);
3974	memset(cp->tx_new, 0, sizeof(*cp->tx_new)*N_TX_RINGS);
3975	for (i = 0; i < N_TX_RINGS; i++)
3976		cas_clean_txd(cp, i);
3977
3978	/* zero out init block */
3979	memset(cp->init_block, 0, sizeof(struct cas_init_block));
3980	cas_clean_rxds(cp);
3981	cas_clean_rxcs(cp);
3982}
3983
3984/* allocated on open */
3985static inline int cas_alloc_rx_desc(struct cas *cp, int ring)
3986{
3987	cas_page_t **page = cp->rx_pages[ring];
3988	int size, i = 0;
3989
3990	size = RX_DESC_RINGN_SIZE(ring);
3991	for (i = 0; i < size; i++) {
3992		if ((page[i] = cas_page_alloc(cp, GFP_KERNEL)) == NULL)
3993			return -1;
3994	}
3995	return 0;
3996}
3997
3998static int cas_alloc_rxds(struct cas *cp)
3999{
4000	int i;
4001
4002	for (i = 0; i < N_RX_DESC_RINGS; i++) {
4003		if (cas_alloc_rx_desc(cp, i) < 0) {
4004			cas_free_rxds(cp);
4005			return -1;
4006		}
4007	}
4008	return 0;
4009}
4010
4011static void cas_reset_task(struct work_struct *work)
4012{
4013	struct cas *cp = container_of(work, struct cas, reset_task);
4014#if 0
4015	int pending = atomic_read(&cp->reset_task_pending);
4016#else
4017	int pending_all = atomic_read(&cp->reset_task_pending_all);
4018	int pending_spare = atomic_read(&cp->reset_task_pending_spare);
4019	int pending_mtu = atomic_read(&cp->reset_task_pending_mtu);
4020
4021	if (pending_all == 0 && pending_spare == 0 && pending_mtu == 0) {
4022		/* We can have more tasks scheduled than actually
4023		 * needed.
4024		 */
4025		atomic_dec(&cp->reset_task_pending);
4026		return;
4027	}
4028#endif
4029	/* The link went down, we reset the ring, but keep
4030	 * DMA stopped. Use this function for reset
4031	 * on error as well.
4032	 */
4033	if (cp->hw_running) {
4034		unsigned long flags;
4035
4036		/* Make sure we don't get interrupts or tx packets */
4037		netif_device_detach(cp->dev);
4038		cas_lock_all_save(cp, flags);
4039
4040		if (cp->opened) {
4041			/* We call cas_spare_recover when we call cas_open.
4042			 * but we do not initialize the lists cas_spare_recover
4043			 * uses until cas_open is called.
4044			 */
4045			cas_spare_recover(cp, GFP_ATOMIC);
4046		}
4047#if 1
4048		/* test => only pending_spare set */
4049		if (!pending_all && !pending_mtu)
4050			goto done;
4051#else
4052		if (pending == CAS_RESET_SPARE)
4053			goto done;
4054#endif
4055		/* when pending == CAS_RESET_ALL, the following
4056		 * call to cas_init_hw will restart auto negotiation.
4057		 * Setting the second argument of cas_reset to
4058		 * !(pending == CAS_RESET_ALL) will set this argument
4059		 * to 1 (avoiding reinitializing the PHY for the normal
4060		 * PCS case) when auto negotiation is not restarted.
4061		 */
4062#if 1
4063		cas_reset(cp, !(pending_all > 0));
4064		if (cp->opened)
4065			cas_clean_rings(cp);
4066		cas_init_hw(cp, (pending_all > 0));
4067#else
4068		cas_reset(cp, !(pending == CAS_RESET_ALL));
4069		if (cp->opened)
4070			cas_clean_rings(cp);
4071		cas_init_hw(cp, pending == CAS_RESET_ALL);
4072#endif
4073
4074done:
4075		cas_unlock_all_restore(cp, flags);
4076		netif_device_attach(cp->dev);
4077	}
4078#if 1
4079	atomic_sub(pending_all, &cp->reset_task_pending_all);
4080	atomic_sub(pending_spare, &cp->reset_task_pending_spare);
4081	atomic_sub(pending_mtu, &cp->reset_task_pending_mtu);
4082	atomic_dec(&cp->reset_task_pending);
4083#else
4084	atomic_set(&cp->reset_task_pending, 0);
4085#endif
4086}
4087
4088static void cas_link_timer(unsigned long data)
4089{
4090	struct cas *cp = (struct cas *) data;
4091	int mask, pending = 0, reset = 0;
4092	unsigned long flags;
4093
4094	if (link_transition_timeout != 0 &&
4095	    cp->link_transition_jiffies_valid &&
4096	    ((jiffies - cp->link_transition_jiffies) >
4097	      (link_transition_timeout))) {
4098		/* One-second counter so link-down workaround doesn't
4099		 * cause resets to occur so fast as to fool the switch
4100		 * into thinking the link is down.
4101		 */
4102		cp->link_transition_jiffies_valid = 0;
4103	}
4104
4105	if (!cp->hw_running)
4106		return;
4107
4108	spin_lock_irqsave(&cp->lock, flags);
4109	cas_lock_tx(cp);
4110	cas_entropy_gather(cp);
4111
4112	/* If the link task is still pending, we just
4113	 * reschedule the link timer
4114	 */
4115#if 1
4116	if (atomic_read(&cp->reset_task_pending_all) ||
4117	    atomic_read(&cp->reset_task_pending_spare) ||
4118	    atomic_read(&cp->reset_task_pending_mtu))
4119		goto done;
4120#else
4121	if (atomic_read(&cp->reset_task_pending))
4122		goto done;
4123#endif
4124
4125	/* check for rx cleaning */
4126	if ((mask = (cp->cas_flags & CAS_FLAG_RXD_POST_MASK))) {
4127		int i, rmask;
4128
4129		for (i = 0; i < MAX_RX_DESC_RINGS; i++) {
4130			rmask = CAS_FLAG_RXD_POST(i);
4131			if ((mask & rmask) == 0)
4132				continue;
4133
4134			/* post_rxds will do a mod_timer */
4135			if (cas_post_rxds_ringN(cp, i, cp->rx_last[i]) < 0) {
4136				pending = 1;
4137				continue;
4138			}
4139			cp->cas_flags &= ~rmask;
4140		}
4141	}
4142
4143	if (CAS_PHY_MII(cp->phy_type)) {
4144		u16 bmsr;
4145		cas_mif_poll(cp, 0);
4146		bmsr = cas_phy_read(cp, MII_BMSR);
4147		/* WTZ: Solaris driver reads this twice, but that
4148		 * may be due to the PCS case and the use of a
4149		 * common implementation. Read it twice here to be
4150		 * safe.
4151		 */
4152		bmsr = cas_phy_read(cp, MII_BMSR);
4153		cas_mif_poll(cp, 1);
4154		readl(cp->regs + REG_MIF_STATUS); /* avoid dups */
4155		reset = cas_mii_link_check(cp, bmsr);
4156	} else {
4157		reset = cas_pcs_link_check(cp);
4158	}
4159
4160	if (reset)
4161		goto done;
4162
4163	/* check for tx state machine confusion */
4164	if ((readl(cp->regs + REG_MAC_TX_STATUS) & MAC_TX_FRAME_XMIT) == 0) {
4165		u32 val = readl(cp->regs + REG_MAC_STATE_MACHINE);
4166		u32 wptr, rptr;
4167		int tlm  = CAS_VAL(MAC_SM_TLM, val);
4168
4169		if (((tlm == 0x5) || (tlm == 0x3)) &&
4170		    (CAS_VAL(MAC_SM_ENCAP_SM, val) == 0)) {
4171			netif_printk(cp, tx_err, KERN_DEBUG, cp->dev,
4172				     "tx err: MAC_STATE[%08x]\n", val);
4173			reset = 1;
4174			goto done;
4175		}
4176
4177		val  = readl(cp->regs + REG_TX_FIFO_PKT_CNT);
4178		wptr = readl(cp->regs + REG_TX_FIFO_WRITE_PTR);
4179		rptr = readl(cp->regs + REG_TX_FIFO_READ_PTR);
4180		if ((val == 0) && (wptr != rptr)) {
4181			netif_printk(cp, tx_err, KERN_DEBUG, cp->dev,
4182				     "tx err: TX_FIFO[%08x:%08x:%08x]\n",
4183				     val, wptr, rptr);
4184			reset = 1;
4185		}
4186
4187		if (reset)
4188			cas_hard_reset(cp);
4189	}
4190
4191done:
4192	if (reset) {
4193#if 1
4194		atomic_inc(&cp->reset_task_pending);
4195		atomic_inc(&cp->reset_task_pending_all);
4196		schedule_work(&cp->reset_task);
4197#else
4198		atomic_set(&cp->reset_task_pending, CAS_RESET_ALL);
4199		pr_err("reset called in cas_link_timer\n");
4200		schedule_work(&cp->reset_task);
4201#endif
4202	}
4203
4204	if (!pending)
4205		mod_timer(&cp->link_timer, jiffies + CAS_LINK_TIMEOUT);
4206	cas_unlock_tx(cp);
4207	spin_unlock_irqrestore(&cp->lock, flags);
4208}
4209
4210/* tiny buffers are used to avoid target abort issues with
4211 * older cassini's
4212 */
4213static void cas_tx_tiny_free(struct cas *cp)
4214{
4215	struct pci_dev *pdev = cp->pdev;
4216	int i;
4217
4218	for (i = 0; i < N_TX_RINGS; i++) {
4219		if (!cp->tx_tiny_bufs[i])
4220			continue;
4221
4222		pci_free_consistent(pdev, TX_TINY_BUF_BLOCK,
4223				    cp->tx_tiny_bufs[i],
4224				    cp->tx_tiny_dvma[i]);
4225		cp->tx_tiny_bufs[i] = NULL;
4226	}
4227}
4228
4229static int cas_tx_tiny_alloc(struct cas *cp)
4230{
4231	struct pci_dev *pdev = cp->pdev;
4232	int i;
4233
4234	for (i = 0; i < N_TX_RINGS; i++) {
4235		cp->tx_tiny_bufs[i] =
4236			pci_alloc_consistent(pdev, TX_TINY_BUF_BLOCK,
4237					     &cp->tx_tiny_dvma[i]);
4238		if (!cp->tx_tiny_bufs[i]) {
4239			cas_tx_tiny_free(cp);
4240			return -1;
4241		}
4242	}
4243	return 0;
4244}
4245
4246
4247static int cas_open(struct net_device *dev)
4248{
4249	struct cas *cp = netdev_priv(dev);
4250	int hw_was_up, err;
4251	unsigned long flags;
4252
4253	mutex_lock(&cp->pm_mutex);
4254
4255	hw_was_up = cp->hw_running;
4256
4257	/* The power-management mutex protects the hw_running
4258	 * etc. state so it is safe to do this bit without cp->lock
4259	 */
4260	if (!cp->hw_running) {
4261		/* Reset the chip */
4262		cas_lock_all_save(cp, flags);
4263		/* We set the second arg to cas_reset to zero
4264		 * because cas_init_hw below will have its second
4265		 * argument set to non-zero, which will force
4266		 * autonegotiation to start.
4267		 */
4268		cas_reset(cp, 0);
4269		cp->hw_running = 1;
4270		cas_unlock_all_restore(cp, flags);
4271	}
4272
4273	err = -ENOMEM;
4274	if (cas_tx_tiny_alloc(cp) < 0)
4275		goto err_unlock;
4276
4277	/* alloc rx descriptors */
4278	if (cas_alloc_rxds(cp) < 0)
4279		goto err_tx_tiny;
4280
4281	/* allocate spares */
4282	cas_spare_init(cp);
4283	cas_spare_recover(cp, GFP_KERNEL);
4284
4285	/* We can now request the interrupt as we know it's masked
4286	 * on the controller. cassini+ has up to 4 interrupts
4287	 * that can be used, but you need to do explicit pci interrupt
4288	 * mapping to expose them
4289	 */
4290	if (request_irq(cp->pdev->irq, cas_interrupt,
4291			IRQF_SHARED, dev->name, (void *) dev)) {
4292		netdev_err(cp->dev, "failed to request irq !\n");
4293		err = -EAGAIN;
4294		goto err_spare;
4295	}
4296
4297#ifdef USE_NAPI
4298	napi_enable(&cp->napi);
4299#endif
4300	/* init hw */
4301	cas_lock_all_save(cp, flags);
4302	cas_clean_rings(cp);
4303	cas_init_hw(cp, !hw_was_up);
4304	cp->opened = 1;
4305	cas_unlock_all_restore(cp, flags);
4306
4307	netif_start_queue(dev);
4308	mutex_unlock(&cp->pm_mutex);
4309	return 0;
4310
4311err_spare:
4312	cas_spare_free(cp);
4313	cas_free_rxds(cp);
4314err_tx_tiny:
4315	cas_tx_tiny_free(cp);
4316err_unlock:
4317	mutex_unlock(&cp->pm_mutex);
4318	return err;
4319}
4320
4321static int cas_close(struct net_device *dev)
4322{
4323	unsigned long flags;
4324	struct cas *cp = netdev_priv(dev);
4325
4326#ifdef USE_NAPI
4327	napi_disable(&cp->napi);
4328#endif
4329	/* Make sure we don't get distracted by suspend/resume */
4330	mutex_lock(&cp->pm_mutex);
4331
4332	netif_stop_queue(dev);
4333
4334	/* Stop traffic, mark us closed */
4335	cas_lock_all_save(cp, flags);
4336	cp->opened = 0;
4337	cas_reset(cp, 0);
4338	cas_phy_init(cp);
4339	cas_begin_auto_negotiation(cp, NULL);
4340	cas_clean_rings(cp);
4341	cas_unlock_all_restore(cp, flags);
4342
4343	free_irq(cp->pdev->irq, (void *) dev);
4344	cas_spare_free(cp);
4345	cas_free_rxds(cp);
4346	cas_tx_tiny_free(cp);
4347	mutex_unlock(&cp->pm_mutex);
4348	return 0;
4349}
4350
4351static struct {
4352	const char name[ETH_GSTRING_LEN];
4353} ethtool_cassini_statnames[] = {
4354	{"collisions"},
4355	{"rx_bytes"},
4356	{"rx_crc_errors"},
4357	{"rx_dropped"},
4358	{"rx_errors"},
4359	{"rx_fifo_errors"},
4360	{"rx_frame_errors"},
4361	{"rx_length_errors"},
4362	{"rx_over_errors"},
4363	{"rx_packets"},
4364	{"tx_aborted_errors"},
4365	{"tx_bytes"},
4366	{"tx_dropped"},
4367	{"tx_errors"},
4368	{"tx_fifo_errors"},
4369	{"tx_packets"}
4370};
4371#define CAS_NUM_STAT_KEYS ARRAY_SIZE(ethtool_cassini_statnames)
4372
4373static struct {
4374	const int offsets;	/* neg. values for 2nd arg to cas_read_phy */
4375} ethtool_register_table[] = {
4376	{-MII_BMSR},
4377	{-MII_BMCR},
4378	{REG_CAWR},
4379	{REG_INF_BURST},
4380	{REG_BIM_CFG},
4381	{REG_RX_CFG},
4382	{REG_HP_CFG},
4383	{REG_MAC_TX_CFG},
4384	{REG_MAC_RX_CFG},
4385	{REG_MAC_CTRL_CFG},
4386	{REG_MAC_XIF_CFG},
4387	{REG_MIF_CFG},
4388	{REG_PCS_CFG},
4389	{REG_SATURN_PCFG},
4390	{REG_PCS_MII_STATUS},
4391	{REG_PCS_STATE_MACHINE},
4392	{REG_MAC_COLL_EXCESS},
4393	{REG_MAC_COLL_LATE}
4394};
4395#define CAS_REG_LEN 	ARRAY_SIZE(ethtool_register_table)
4396#define CAS_MAX_REGS 	(sizeof (u32)*CAS_REG_LEN)
4397
4398static void cas_read_regs(struct cas *cp, u8 *ptr, int len)
4399{
4400	u8 *p;
4401	int i;
4402	unsigned long flags;
4403
4404	spin_lock_irqsave(&cp->lock, flags);
4405	for (i = 0, p = ptr; i < len ; i ++, p += sizeof(u32)) {
4406		u16 hval;
4407		u32 val;
4408		if (ethtool_register_table[i].offsets < 0) {
4409			hval = cas_phy_read(cp,
4410				    -ethtool_register_table[i].offsets);
4411			val = hval;
4412		} else {
4413			val= readl(cp->regs+ethtool_register_table[i].offsets);
4414		}
4415		memcpy(p, (u8 *)&val, sizeof(u32));
4416	}
4417	spin_unlock_irqrestore(&cp->lock, flags);
4418}
4419
4420static struct net_device_stats *cas_get_stats(struct net_device *dev)
4421{
4422	struct cas *cp = netdev_priv(dev);
4423	struct net_device_stats *stats = cp->net_stats;
4424	unsigned long flags;
4425	int i;
4426	unsigned long tmp;
4427
4428	/* we collate all of the stats into net_stats[N_TX_RING] */
4429	if (!cp->hw_running)
4430		return stats + N_TX_RINGS;
4431
4432	/* collect outstanding stats */
4433	/* WTZ: the Cassini spec gives these as 16 bit counters but
4434	 * stored in 32-bit words.  Added a mask of 0xffff to be safe,
4435	 * in case the chip somehow puts any garbage in the other bits.
4436	 * Also, counter usage didn't seem to mach what Adrian did
4437	 * in the parts of the code that set these quantities. Made
4438	 * that consistent.
4439	 */
4440	spin_lock_irqsave(&cp->stat_lock[N_TX_RINGS], flags);
4441	stats[N_TX_RINGS].rx_crc_errors +=
4442	  readl(cp->regs + REG_MAC_FCS_ERR) & 0xffff;
4443	stats[N_TX_RINGS].rx_frame_errors +=
4444		readl(cp->regs + REG_MAC_ALIGN_ERR) &0xffff;
4445	stats[N_TX_RINGS].rx_length_errors +=
4446		readl(cp->regs + REG_MAC_LEN_ERR) & 0xffff;
4447#if 1
4448	tmp = (readl(cp->regs + REG_MAC_COLL_EXCESS) & 0xffff) +
4449		(readl(cp->regs + REG_MAC_COLL_LATE) & 0xffff);
4450	stats[N_TX_RINGS].tx_aborted_errors += tmp;
4451	stats[N_TX_RINGS].collisions +=
4452	  tmp + (readl(cp->regs + REG_MAC_COLL_NORMAL) & 0xffff);
4453#else
4454	stats[N_TX_RINGS].tx_aborted_errors +=
4455		readl(cp->regs + REG_MAC_COLL_EXCESS);
4456	stats[N_TX_RINGS].collisions += readl(cp->regs + REG_MAC_COLL_EXCESS) +
4457		readl(cp->regs + REG_MAC_COLL_LATE);
4458#endif
4459	cas_clear_mac_err(cp);
4460
4461	/* saved bits that are unique to ring 0 */
4462	spin_lock(&cp->stat_lock[0]);
4463	stats[N_TX_RINGS].collisions        += stats[0].collisions;
4464	stats[N_TX_RINGS].rx_over_errors    += stats[0].rx_over_errors;
4465	stats[N_TX_RINGS].rx_frame_errors   += stats[0].rx_frame_errors;
4466	stats[N_TX_RINGS].rx_fifo_errors    += stats[0].rx_fifo_errors;
4467	stats[N_TX_RINGS].tx_aborted_errors += stats[0].tx_aborted_errors;
4468	stats[N_TX_RINGS].tx_fifo_errors    += stats[0].tx_fifo_errors;
4469	spin_unlock(&cp->stat_lock[0]);
4470
4471	for (i = 0; i < N_TX_RINGS; i++) {
4472		spin_lock(&cp->stat_lock[i]);
4473		stats[N_TX_RINGS].rx_length_errors +=
4474			stats[i].rx_length_errors;
4475		stats[N_TX_RINGS].rx_crc_errors += stats[i].rx_crc_errors;
4476		stats[N_TX_RINGS].rx_packets    += stats[i].rx_packets;
4477		stats[N_TX_RINGS].tx_packets    += stats[i].tx_packets;
4478		stats[N_TX_RINGS].rx_bytes      += stats[i].rx_bytes;
4479		stats[N_TX_RINGS].tx_bytes      += stats[i].tx_bytes;
4480		stats[N_TX_RINGS].rx_errors     += stats[i].rx_errors;
4481		stats[N_TX_RINGS].tx_errors     += stats[i].tx_errors;
4482		stats[N_TX_RINGS].rx_dropped    += stats[i].rx_dropped;
4483		stats[N_TX_RINGS].tx_dropped    += stats[i].tx_dropped;
4484		memset(stats + i, 0, sizeof(struct net_device_stats));
4485		spin_unlock(&cp->stat_lock[i]);
4486	}
4487	spin_unlock_irqrestore(&cp->stat_lock[N_TX_RINGS], flags);
4488	return stats + N_TX_RINGS;
4489}
4490
4491
4492static void cas_set_multicast(struct net_device *dev)
4493{
4494	struct cas *cp = netdev_priv(dev);
4495	u32 rxcfg, rxcfg_new;
4496	unsigned long flags;
4497	int limit = STOP_TRIES;
4498
4499	if (!cp->hw_running)
4500		return;
4501
4502	spin_lock_irqsave(&cp->lock, flags);
4503	rxcfg = readl(cp->regs + REG_MAC_RX_CFG);
4504
4505	/* disable RX MAC and wait for completion */
4506	writel(rxcfg & ~MAC_RX_CFG_EN, cp->regs + REG_MAC_RX_CFG);
4507	while (readl(cp->regs + REG_MAC_RX_CFG) & MAC_RX_CFG_EN) {
4508		if (!limit--)
4509			break;
4510		udelay(10);
4511	}
4512
4513	/* disable hash filter and wait for completion */
4514	limit = STOP_TRIES;
4515	rxcfg &= ~(MAC_RX_CFG_PROMISC_EN | MAC_RX_CFG_HASH_FILTER_EN);
4516	writel(rxcfg & ~MAC_RX_CFG_EN, cp->regs + REG_MAC_RX_CFG);
4517	while (readl(cp->regs + REG_MAC_RX_CFG) & MAC_RX_CFG_HASH_FILTER_EN) {
4518		if (!limit--)
4519			break;
4520		udelay(10);
4521	}
4522
4523	/* program hash filters */
4524	cp->mac_rx_cfg = rxcfg_new = cas_setup_multicast(cp);
4525	rxcfg |= rxcfg_new;
4526	writel(rxcfg, cp->regs + REG_MAC_RX_CFG);
4527	spin_unlock_irqrestore(&cp->lock, flags);
4528}
4529
4530static void cas_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
4531{
4532	struct cas *cp = netdev_priv(dev);
4533	strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
4534	strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version));
4535	strlcpy(info->bus_info, pci_name(cp->pdev), sizeof(info->bus_info));
4536	info->regdump_len = cp->casreg_len < CAS_MAX_REGS ?
4537		cp->casreg_len : CAS_MAX_REGS;
4538	info->n_stats = CAS_NUM_STAT_KEYS;
4539}
4540
4541static int cas_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
 
4542{
4543	struct cas *cp = netdev_priv(dev);
4544	u16 bmcr;
4545	int full_duplex, speed, pause;
4546	unsigned long flags;
4547	enum link_state linkstate = link_up;
 
4548
4549	cmd->advertising = 0;
4550	cmd->supported = SUPPORTED_Autoneg;
4551	if (cp->cas_flags & CAS_FLAG_1000MB_CAP) {
4552		cmd->supported |= SUPPORTED_1000baseT_Full;
4553		cmd->advertising |= ADVERTISED_1000baseT_Full;
4554	}
4555
4556	/* Record PHY settings if HW is on. */
4557	spin_lock_irqsave(&cp->lock, flags);
4558	bmcr = 0;
4559	linkstate = cp->lstate;
4560	if (CAS_PHY_MII(cp->phy_type)) {
4561		cmd->port = PORT_MII;
4562		cmd->transceiver = (cp->cas_flags & CAS_FLAG_SATURN) ?
4563			XCVR_INTERNAL : XCVR_EXTERNAL;
4564		cmd->phy_address = cp->phy_addr;
4565		cmd->advertising |= ADVERTISED_TP | ADVERTISED_MII |
4566			ADVERTISED_10baseT_Half |
4567			ADVERTISED_10baseT_Full |
4568			ADVERTISED_100baseT_Half |
4569			ADVERTISED_100baseT_Full;
4570
4571		cmd->supported |=
4572			(SUPPORTED_10baseT_Half |
4573			 SUPPORTED_10baseT_Full |
4574			 SUPPORTED_100baseT_Half |
4575			 SUPPORTED_100baseT_Full |
4576			 SUPPORTED_TP | SUPPORTED_MII);
4577
4578		if (cp->hw_running) {
4579			cas_mif_poll(cp, 0);
4580			bmcr = cas_phy_read(cp, MII_BMCR);
4581			cas_read_mii_link_mode(cp, &full_duplex,
4582					       &speed, &pause);
4583			cas_mif_poll(cp, 1);
4584		}
4585
4586	} else {
4587		cmd->port = PORT_FIBRE;
4588		cmd->transceiver = XCVR_INTERNAL;
4589		cmd->phy_address = 0;
4590		cmd->supported   |= SUPPORTED_FIBRE;
4591		cmd->advertising |= ADVERTISED_FIBRE;
4592
4593		if (cp->hw_running) {
4594			/* pcs uses the same bits as mii */
4595			bmcr = readl(cp->regs + REG_PCS_MII_CTRL);
4596			cas_read_pcs_link_mode(cp, &full_duplex,
4597					       &speed, &pause);
4598		}
4599	}
4600	spin_unlock_irqrestore(&cp->lock, flags);
4601
4602	if (bmcr & BMCR_ANENABLE) {
4603		cmd->advertising |= ADVERTISED_Autoneg;
4604		cmd->autoneg = AUTONEG_ENABLE;
4605		ethtool_cmd_speed_set(cmd, ((speed == 10) ?
4606					    SPEED_10 :
4607					    ((speed == 1000) ?
4608					     SPEED_1000 : SPEED_100)));
4609		cmd->duplex = full_duplex ? DUPLEX_FULL : DUPLEX_HALF;
4610	} else {
4611		cmd->autoneg = AUTONEG_DISABLE;
4612		ethtool_cmd_speed_set(cmd, ((bmcr & CAS_BMCR_SPEED1000) ?
4613					    SPEED_1000 :
4614					    ((bmcr & BMCR_SPEED100) ?
4615					     SPEED_100 : SPEED_10)));
4616		cmd->duplex =
4617			(bmcr & BMCR_FULLDPLX) ?
4618			DUPLEX_FULL : DUPLEX_HALF;
4619	}
4620	if (linkstate != link_up) {
4621		/* Force these to "unknown" if the link is not up and
4622		 * autonogotiation in enabled. We can set the link
4623		 * speed to 0, but not cmd->duplex,
4624		 * because its legal values are 0 and 1.  Ethtool will
4625		 * print the value reported in parentheses after the
4626		 * word "Unknown" for unrecognized values.
4627		 *
4628		 * If in forced mode, we report the speed and duplex
4629		 * settings that we configured.
4630		 */
4631		if (cp->link_cntl & BMCR_ANENABLE) {
4632			ethtool_cmd_speed_set(cmd, 0);
4633			cmd->duplex = 0xff;
4634		} else {
4635			ethtool_cmd_speed_set(cmd, SPEED_10);
4636			if (cp->link_cntl & BMCR_SPEED100) {
4637				ethtool_cmd_speed_set(cmd, SPEED_100);
4638			} else if (cp->link_cntl & CAS_BMCR_SPEED1000) {
4639				ethtool_cmd_speed_set(cmd, SPEED_1000);
4640			}
4641			cmd->duplex = (cp->link_cntl & BMCR_FULLDPLX)?
4642				DUPLEX_FULL : DUPLEX_HALF;
4643		}
4644	}
 
 
 
 
 
 
4645	return 0;
4646}
4647
4648static int cas_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
 
4649{
4650	struct cas *cp = netdev_priv(dev);
4651	unsigned long flags;
4652	u32 speed = ethtool_cmd_speed(cmd);
4653
4654	/* Verify the settings we care about. */
4655	if (cmd->autoneg != AUTONEG_ENABLE &&
4656	    cmd->autoneg != AUTONEG_DISABLE)
4657		return -EINVAL;
4658
4659	if (cmd->autoneg == AUTONEG_DISABLE &&
4660	    ((speed != SPEED_1000 &&
4661	      speed != SPEED_100 &&
4662	      speed != SPEED_10) ||
4663	     (cmd->duplex != DUPLEX_HALF &&
4664	      cmd->duplex != DUPLEX_FULL)))
4665		return -EINVAL;
4666
4667	/* Apply settings and restart link process. */
4668	spin_lock_irqsave(&cp->lock, flags);
4669	cas_begin_auto_negotiation(cp, cmd);
4670	spin_unlock_irqrestore(&cp->lock, flags);
4671	return 0;
4672}
4673
4674static int cas_nway_reset(struct net_device *dev)
4675{
4676	struct cas *cp = netdev_priv(dev);
4677	unsigned long flags;
4678
4679	if ((cp->link_cntl & BMCR_ANENABLE) == 0)
4680		return -EINVAL;
4681
4682	/* Restart link process. */
4683	spin_lock_irqsave(&cp->lock, flags);
4684	cas_begin_auto_negotiation(cp, NULL);
4685	spin_unlock_irqrestore(&cp->lock, flags);
4686
4687	return 0;
4688}
4689
4690static u32 cas_get_link(struct net_device *dev)
4691{
4692	struct cas *cp = netdev_priv(dev);
4693	return cp->lstate == link_up;
4694}
4695
4696static u32 cas_get_msglevel(struct net_device *dev)
4697{
4698	struct cas *cp = netdev_priv(dev);
4699	return cp->msg_enable;
4700}
4701
4702static void cas_set_msglevel(struct net_device *dev, u32 value)
4703{
4704	struct cas *cp = netdev_priv(dev);
4705	cp->msg_enable = value;
4706}
4707
4708static int cas_get_regs_len(struct net_device *dev)
4709{
4710	struct cas *cp = netdev_priv(dev);
4711	return cp->casreg_len < CAS_MAX_REGS ? cp->casreg_len: CAS_MAX_REGS;
4712}
4713
4714static void cas_get_regs(struct net_device *dev, struct ethtool_regs *regs,
4715			     void *p)
4716{
4717	struct cas *cp = netdev_priv(dev);
4718	regs->version = 0;
4719	/* cas_read_regs handles locks (cp->lock).  */
4720	cas_read_regs(cp, p, regs->len / sizeof(u32));
4721}
4722
4723static int cas_get_sset_count(struct net_device *dev, int sset)
4724{
4725	switch (sset) {
4726	case ETH_SS_STATS:
4727		return CAS_NUM_STAT_KEYS;
4728	default:
4729		return -EOPNOTSUPP;
4730	}
4731}
4732
4733static void cas_get_strings(struct net_device *dev, u32 stringset, u8 *data)
4734{
4735	 memcpy(data, &ethtool_cassini_statnames,
4736					 CAS_NUM_STAT_KEYS * ETH_GSTRING_LEN);
4737}
4738
4739static void cas_get_ethtool_stats(struct net_device *dev,
4740				      struct ethtool_stats *estats, u64 *data)
4741{
4742	struct cas *cp = netdev_priv(dev);
4743	struct net_device_stats *stats = cas_get_stats(cp->dev);
4744	int i = 0;
4745	data[i++] = stats->collisions;
4746	data[i++] = stats->rx_bytes;
4747	data[i++] = stats->rx_crc_errors;
4748	data[i++] = stats->rx_dropped;
4749	data[i++] = stats->rx_errors;
4750	data[i++] = stats->rx_fifo_errors;
4751	data[i++] = stats->rx_frame_errors;
4752	data[i++] = stats->rx_length_errors;
4753	data[i++] = stats->rx_over_errors;
4754	data[i++] = stats->rx_packets;
4755	data[i++] = stats->tx_aborted_errors;
4756	data[i++] = stats->tx_bytes;
4757	data[i++] = stats->tx_dropped;
4758	data[i++] = stats->tx_errors;
4759	data[i++] = stats->tx_fifo_errors;
4760	data[i++] = stats->tx_packets;
4761	BUG_ON(i != CAS_NUM_STAT_KEYS);
4762}
4763
4764static const struct ethtool_ops cas_ethtool_ops = {
4765	.get_drvinfo		= cas_get_drvinfo,
4766	.get_settings		= cas_get_settings,
4767	.set_settings		= cas_set_settings,
4768	.nway_reset		= cas_nway_reset,
4769	.get_link		= cas_get_link,
4770	.get_msglevel		= cas_get_msglevel,
4771	.set_msglevel		= cas_set_msglevel,
4772	.get_regs_len		= cas_get_regs_len,
4773	.get_regs		= cas_get_regs,
4774	.get_sset_count		= cas_get_sset_count,
4775	.get_strings		= cas_get_strings,
4776	.get_ethtool_stats	= cas_get_ethtool_stats,
 
 
4777};
4778
4779static int cas_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
4780{
4781	struct cas *cp = netdev_priv(dev);
4782	struct mii_ioctl_data *data = if_mii(ifr);
4783	unsigned long flags;
4784	int rc = -EOPNOTSUPP;
4785
4786	/* Hold the PM mutex while doing ioctl's or we may collide
4787	 * with open/close and power management and oops.
4788	 */
4789	mutex_lock(&cp->pm_mutex);
4790	switch (cmd) {
4791	case SIOCGMIIPHY:		/* Get address of MII PHY in use. */
4792		data->phy_id = cp->phy_addr;
4793		/* Fallthrough... */
4794
4795	case SIOCGMIIREG:		/* Read MII PHY register. */
4796		spin_lock_irqsave(&cp->lock, flags);
4797		cas_mif_poll(cp, 0);
4798		data->val_out = cas_phy_read(cp, data->reg_num & 0x1f);
4799		cas_mif_poll(cp, 1);
4800		spin_unlock_irqrestore(&cp->lock, flags);
4801		rc = 0;
4802		break;
4803
4804	case SIOCSMIIREG:		/* Write MII PHY register. */
4805		spin_lock_irqsave(&cp->lock, flags);
4806		cas_mif_poll(cp, 0);
4807		rc = cas_phy_write(cp, data->reg_num & 0x1f, data->val_in);
4808		cas_mif_poll(cp, 1);
4809		spin_unlock_irqrestore(&cp->lock, flags);
4810		break;
4811	default:
4812		break;
4813	}
4814
4815	mutex_unlock(&cp->pm_mutex);
4816	return rc;
4817}
4818
4819/* When this chip sits underneath an Intel 31154 bridge, it is the
4820 * only subordinate device and we can tweak the bridge settings to
4821 * reflect that fact.
4822 */
4823static void __devinit cas_program_bridge(struct pci_dev *cas_pdev)
4824{
4825	struct pci_dev *pdev = cas_pdev->bus->self;
4826	u32 val;
4827
4828	if (!pdev)
4829		return;
4830
4831	if (pdev->vendor != 0x8086 || pdev->device != 0x537c)
4832		return;
4833
4834	/* Clear bit 10 (Bus Parking Control) in the Secondary
4835	 * Arbiter Control/Status Register which lives at offset
4836	 * 0x41.  Using a 32-bit word read/modify/write at 0x40
4837	 * is much simpler so that's how we do this.
4838	 */
4839	pci_read_config_dword(pdev, 0x40, &val);
4840	val &= ~0x00040000;
4841	pci_write_config_dword(pdev, 0x40, val);
4842
4843	/* Max out the Multi-Transaction Timer settings since
4844	 * Cassini is the only device present.
4845	 *
4846	 * The register is 16-bit and lives at 0x50.  When the
4847	 * settings are enabled, it extends the GRANT# signal
4848	 * for a requestor after a transaction is complete.  This
4849	 * allows the next request to run without first needing
4850	 * to negotiate the GRANT# signal back.
4851	 *
4852	 * Bits 12:10 define the grant duration:
4853	 *
4854	 *	1	--	16 clocks
4855	 *	2	--	32 clocks
4856	 *	3	--	64 clocks
4857	 *	4	--	128 clocks
4858	 *	5	--	256 clocks
4859	 *
4860	 * All other values are illegal.
4861	 *
4862	 * Bits 09:00 define which REQ/GNT signal pairs get the
4863	 * GRANT# signal treatment.  We set them all.
4864	 */
4865	pci_write_config_word(pdev, 0x50, (5 << 10) | 0x3ff);
4866
4867	/* The Read Prefecth Policy register is 16-bit and sits at
4868	 * offset 0x52.  It enables a "smart" pre-fetch policy.  We
4869	 * enable it and max out all of the settings since only one
4870	 * device is sitting underneath and thus bandwidth sharing is
4871	 * not an issue.
4872	 *
4873	 * The register has several 3 bit fields, which indicates a
4874	 * multiplier applied to the base amount of prefetching the
4875	 * chip would do.  These fields are at:
4876	 *
4877	 *	15:13	---	ReRead Primary Bus
4878	 *	12:10	---	FirstRead Primary Bus
4879	 *	09:07	---	ReRead Secondary Bus
4880	 *	06:04	---	FirstRead Secondary Bus
4881	 *
4882	 * Bits 03:00 control which REQ/GNT pairs the prefetch settings
4883	 * get enabled on.  Bit 3 is a grouped enabler which controls
4884	 * all of the REQ/GNT pairs from [8:3].  Bits 2 to 0 control
4885	 * the individual REQ/GNT pairs [2:0].
4886	 */
4887	pci_write_config_word(pdev, 0x52,
4888			      (0x7 << 13) |
4889			      (0x7 << 10) |
4890			      (0x7 <<  7) |
4891			      (0x7 <<  4) |
4892			      (0xf <<  0));
4893
4894	/* Force cacheline size to 0x8 */
4895	pci_write_config_byte(pdev, PCI_CACHE_LINE_SIZE, 0x08);
4896
4897	/* Force latency timer to maximum setting so Cassini can
4898	 * sit on the bus as long as it likes.
4899	 */
4900	pci_write_config_byte(pdev, PCI_LATENCY_TIMER, 0xff);
4901}
4902
4903static const struct net_device_ops cas_netdev_ops = {
4904	.ndo_open		= cas_open,
4905	.ndo_stop		= cas_close,
4906	.ndo_start_xmit		= cas_start_xmit,
4907	.ndo_get_stats 		= cas_get_stats,
4908	.ndo_set_rx_mode	= cas_set_multicast,
4909	.ndo_do_ioctl		= cas_ioctl,
4910	.ndo_tx_timeout		= cas_tx_timeout,
4911	.ndo_change_mtu		= cas_change_mtu,
4912	.ndo_set_mac_address	= eth_mac_addr,
4913	.ndo_validate_addr	= eth_validate_addr,
4914#ifdef CONFIG_NET_POLL_CONTROLLER
4915	.ndo_poll_controller	= cas_netpoll,
4916#endif
4917};
4918
4919static int __devinit cas_init_one(struct pci_dev *pdev,
4920				  const struct pci_device_id *ent)
4921{
4922	static int cas_version_printed = 0;
4923	unsigned long casreg_len;
4924	struct net_device *dev;
4925	struct cas *cp;
4926	int i, err, pci_using_dac;
4927	u16 pci_cmd;
 
4928	u8 orig_cacheline_size = 0, cas_cacheline_size = 0;
4929
4930	if (cas_version_printed++ == 0)
4931		pr_info("%s", version);
4932
4933	err = pci_enable_device(pdev);
4934	if (err) {
4935		dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
4936		return err;
4937	}
4938
4939	if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
4940		dev_err(&pdev->dev, "Cannot find proper PCI device "
4941		       "base address, aborting\n");
4942		err = -ENODEV;
4943		goto err_out_disable_pdev;
4944	}
4945
4946	dev = alloc_etherdev(sizeof(*cp));
4947	if (!dev) {
4948		err = -ENOMEM;
4949		goto err_out_disable_pdev;
4950	}
4951	SET_NETDEV_DEV(dev, &pdev->dev);
4952
4953	err = pci_request_regions(pdev, dev->name);
4954	if (err) {
4955		dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
4956		goto err_out_free_netdev;
4957	}
4958	pci_set_master(pdev);
4959
4960	/* we must always turn on parity response or else parity
4961	 * doesn't get generated properly. disable SERR/PERR as well.
4962	 * in addition, we want to turn MWI on.
4963	 */
4964	pci_read_config_word(pdev, PCI_COMMAND, &pci_cmd);
4965	pci_cmd &= ~PCI_COMMAND_SERR;
4966	pci_cmd |= PCI_COMMAND_PARITY;
4967	pci_write_config_word(pdev, PCI_COMMAND, pci_cmd);
4968	if (pci_try_set_mwi(pdev))
4969		pr_warning("Could not enable MWI for %s\n", pci_name(pdev));
4970
4971	cas_program_bridge(pdev);
4972
4973	/*
4974	 * On some architectures, the default cache line size set
4975	 * by pci_try_set_mwi reduces perforamnce.  We have to increase
4976	 * it for this case.  To start, we'll print some configuration
4977	 * data.
4978	 */
4979#if 1
4980	pci_read_config_byte(pdev, PCI_CACHE_LINE_SIZE,
4981			     &orig_cacheline_size);
4982	if (orig_cacheline_size < CAS_PREF_CACHELINE_SIZE) {
4983		cas_cacheline_size =
4984			(CAS_PREF_CACHELINE_SIZE < SMP_CACHE_BYTES) ?
4985			CAS_PREF_CACHELINE_SIZE : SMP_CACHE_BYTES;
4986		if (pci_write_config_byte(pdev,
4987					  PCI_CACHE_LINE_SIZE,
4988					  cas_cacheline_size)) {
4989			dev_err(&pdev->dev, "Could not set PCI cache "
4990			       "line size\n");
4991			goto err_write_cacheline;
4992		}
4993	}
4994#endif
4995
4996
4997	/* Configure DMA attributes. */
4998	if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
4999		pci_using_dac = 1;
5000		err = pci_set_consistent_dma_mask(pdev,
5001						  DMA_BIT_MASK(64));
5002		if (err < 0) {
5003			dev_err(&pdev->dev, "Unable to obtain 64-bit DMA "
5004			       "for consistent allocations\n");
5005			goto err_out_free_res;
5006		}
5007
5008	} else {
5009		err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
5010		if (err) {
5011			dev_err(&pdev->dev, "No usable DMA configuration, "
5012			       "aborting\n");
5013			goto err_out_free_res;
5014		}
5015		pci_using_dac = 0;
5016	}
5017
5018	casreg_len = pci_resource_len(pdev, 0);
5019
5020	cp = netdev_priv(dev);
5021	cp->pdev = pdev;
5022#if 1
5023	/* A value of 0 indicates we never explicitly set it */
5024	cp->orig_cacheline_size = cas_cacheline_size ? orig_cacheline_size: 0;
5025#endif
5026	cp->dev = dev;
5027	cp->msg_enable = (cassini_debug < 0) ? CAS_DEF_MSG_ENABLE :
5028	  cassini_debug;
5029
5030#if defined(CONFIG_SPARC)
5031	cp->of_node = pci_device_to_OF_node(pdev);
5032#endif
5033
5034	cp->link_transition = LINK_TRANSITION_UNKNOWN;
5035	cp->link_transition_jiffies_valid = 0;
5036
5037	spin_lock_init(&cp->lock);
5038	spin_lock_init(&cp->rx_inuse_lock);
5039	spin_lock_init(&cp->rx_spare_lock);
5040	for (i = 0; i < N_TX_RINGS; i++) {
5041		spin_lock_init(&cp->stat_lock[i]);
5042		spin_lock_init(&cp->tx_lock[i]);
5043	}
5044	spin_lock_init(&cp->stat_lock[N_TX_RINGS]);
5045	mutex_init(&cp->pm_mutex);
5046
5047	init_timer(&cp->link_timer);
5048	cp->link_timer.function = cas_link_timer;
5049	cp->link_timer.data = (unsigned long) cp;
5050
5051#if 1
5052	/* Just in case the implementation of atomic operations
5053	 * change so that an explicit initialization is necessary.
5054	 */
5055	atomic_set(&cp->reset_task_pending, 0);
5056	atomic_set(&cp->reset_task_pending_all, 0);
5057	atomic_set(&cp->reset_task_pending_spare, 0);
5058	atomic_set(&cp->reset_task_pending_mtu, 0);
5059#endif
5060	INIT_WORK(&cp->reset_task, cas_reset_task);
5061
5062	/* Default link parameters */
5063	if (link_mode >= 0 && link_mode < 6)
5064		cp->link_cntl = link_modes[link_mode];
5065	else
5066		cp->link_cntl = BMCR_ANENABLE;
5067	cp->lstate = link_down;
5068	cp->link_transition = LINK_TRANSITION_LINK_DOWN;
5069	netif_carrier_off(cp->dev);
5070	cp->timer_ticks = 0;
5071
5072	/* give us access to cassini registers */
5073	cp->regs = pci_iomap(pdev, 0, casreg_len);
5074	if (!cp->regs) {
5075		dev_err(&pdev->dev, "Cannot map device registers, aborting\n");
5076		goto err_out_free_res;
5077	}
5078	cp->casreg_len = casreg_len;
5079
5080	pci_save_state(pdev);
5081	cas_check_pci_invariants(cp);
5082	cas_hard_reset(cp);
5083	cas_reset(cp, 0);
5084	if (cas_check_invariants(cp))
5085		goto err_out_iounmap;
5086	if (cp->cas_flags & CAS_FLAG_SATURN)
5087		if (cas_saturn_firmware_init(cp))
5088			goto err_out_iounmap;
5089
5090	cp->init_block = (struct cas_init_block *)
5091		pci_alloc_consistent(pdev, sizeof(struct cas_init_block),
5092				     &cp->block_dvma);
5093	if (!cp->init_block) {
5094		dev_err(&pdev->dev, "Cannot allocate init block, aborting\n");
5095		goto err_out_iounmap;
5096	}
5097
5098	for (i = 0; i < N_TX_RINGS; i++)
5099		cp->init_txds[i] = cp->init_block->txds[i];
5100
5101	for (i = 0; i < N_RX_DESC_RINGS; i++)
5102		cp->init_rxds[i] = cp->init_block->rxds[i];
5103
5104	for (i = 0; i < N_RX_COMP_RINGS; i++)
5105		cp->init_rxcs[i] = cp->init_block->rxcs[i];
5106
5107	for (i = 0; i < N_RX_FLOWS; i++)
5108		skb_queue_head_init(&cp->rx_flows[i]);
5109
5110	dev->netdev_ops = &cas_netdev_ops;
5111	dev->ethtool_ops = &cas_ethtool_ops;
5112	dev->watchdog_timeo = CAS_TX_TIMEOUT;
5113
5114#ifdef USE_NAPI
5115	netif_napi_add(dev, &cp->napi, cas_poll, 64);
5116#endif
5117	dev->irq = pdev->irq;
5118	dev->dma = 0;
5119
5120	/* Cassini features. */
5121	if ((cp->cas_flags & CAS_FLAG_NO_HW_CSUM) == 0)
5122		dev->features |= NETIF_F_HW_CSUM | NETIF_F_SG;
5123
5124	if (pci_using_dac)
5125		dev->features |= NETIF_F_HIGHDMA;
 
 
 
5126
5127	if (register_netdev(dev)) {
5128		dev_err(&pdev->dev, "Cannot register net device, aborting\n");
5129		goto err_out_free_consistent;
5130	}
5131
5132	i = readl(cp->regs + REG_BIM_CFG);
5133	netdev_info(dev, "Sun Cassini%s (%sbit/%sMHz PCI/%s) Ethernet[%d] %pM\n",
5134		    (cp->cas_flags & CAS_FLAG_REG_PLUS) ? "+" : "",
5135		    (i & BIM_CFG_32BIT) ? "32" : "64",
5136		    (i & BIM_CFG_66MHZ) ? "66" : "33",
5137		    (cp->phy_type == CAS_PHY_SERDES) ? "Fi" : "Cu", pdev->irq,
5138		    dev->dev_addr);
5139
5140	pci_set_drvdata(pdev, dev);
5141	cp->hw_running = 1;
5142	cas_entropy_reset(cp);
5143	cas_phy_init(cp);
5144	cas_begin_auto_negotiation(cp, NULL);
5145	return 0;
5146
5147err_out_free_consistent:
5148	pci_free_consistent(pdev, sizeof(struct cas_init_block),
5149			    cp->init_block, cp->block_dvma);
5150
5151err_out_iounmap:
5152	mutex_lock(&cp->pm_mutex);
5153	if (cp->hw_running)
5154		cas_shutdown(cp);
5155	mutex_unlock(&cp->pm_mutex);
5156
 
 
5157	pci_iounmap(pdev, cp->regs);
5158
5159
5160err_out_free_res:
5161	pci_release_regions(pdev);
5162
5163err_write_cacheline:
5164	/* Try to restore it in case the error occurred after we
5165	 * set it.
5166	 */
5167	pci_write_config_byte(pdev, PCI_CACHE_LINE_SIZE, orig_cacheline_size);
5168
5169err_out_free_netdev:
5170	free_netdev(dev);
5171
5172err_out_disable_pdev:
5173	pci_disable_device(pdev);
5174	pci_set_drvdata(pdev, NULL);
5175	return -ENODEV;
5176}
5177
5178static void __devexit cas_remove_one(struct pci_dev *pdev)
5179{
5180	struct net_device *dev = pci_get_drvdata(pdev);
5181	struct cas *cp;
5182	if (!dev)
5183		return;
5184
5185	cp = netdev_priv(dev);
5186	unregister_netdev(dev);
5187
5188	if (cp->fw_data)
5189		vfree(cp->fw_data);
5190
5191	mutex_lock(&cp->pm_mutex);
5192	cancel_work_sync(&cp->reset_task);
5193	if (cp->hw_running)
5194		cas_shutdown(cp);
5195	mutex_unlock(&cp->pm_mutex);
5196
5197#if 1
5198	if (cp->orig_cacheline_size) {
5199		/* Restore the cache line size if we had modified
5200		 * it.
5201		 */
5202		pci_write_config_byte(pdev, PCI_CACHE_LINE_SIZE,
5203				      cp->orig_cacheline_size);
5204	}
5205#endif
5206	pci_free_consistent(pdev, sizeof(struct cas_init_block),
5207			    cp->init_block, cp->block_dvma);
5208	pci_iounmap(pdev, cp->regs);
5209	free_netdev(dev);
5210	pci_release_regions(pdev);
5211	pci_disable_device(pdev);
5212	pci_set_drvdata(pdev, NULL);
5213}
5214
5215#ifdef CONFIG_PM
5216static int cas_suspend(struct pci_dev *pdev, pm_message_t state)
5217{
5218	struct net_device *dev = pci_get_drvdata(pdev);
5219	struct cas *cp = netdev_priv(dev);
5220	unsigned long flags;
5221
5222	mutex_lock(&cp->pm_mutex);
5223
5224	/* If the driver is opened, we stop the DMA */
5225	if (cp->opened) {
5226		netif_device_detach(dev);
5227
5228		cas_lock_all_save(cp, flags);
5229
5230		/* We can set the second arg of cas_reset to 0
5231		 * because on resume, we'll call cas_init_hw with
5232		 * its second arg set so that autonegotiation is
5233		 * restarted.
5234		 */
5235		cas_reset(cp, 0);
5236		cas_clean_rings(cp);
5237		cas_unlock_all_restore(cp, flags);
5238	}
5239
5240	if (cp->hw_running)
5241		cas_shutdown(cp);
5242	mutex_unlock(&cp->pm_mutex);
5243
5244	return 0;
5245}
5246
5247static int cas_resume(struct pci_dev *pdev)
5248{
5249	struct net_device *dev = pci_get_drvdata(pdev);
5250	struct cas *cp = netdev_priv(dev);
5251
5252	netdev_info(dev, "resuming\n");
5253
5254	mutex_lock(&cp->pm_mutex);
5255	cas_hard_reset(cp);
5256	if (cp->opened) {
5257		unsigned long flags;
5258		cas_lock_all_save(cp, flags);
5259		cas_reset(cp, 0);
5260		cp->hw_running = 1;
5261		cas_clean_rings(cp);
5262		cas_init_hw(cp, 1);
5263		cas_unlock_all_restore(cp, flags);
5264
5265		netif_device_attach(dev);
5266	}
5267	mutex_unlock(&cp->pm_mutex);
5268	return 0;
5269}
5270#endif /* CONFIG_PM */
 
5271
5272static struct pci_driver cas_driver = {
5273	.name		= DRV_MODULE_NAME,
5274	.id_table	= cas_pci_tbl,
5275	.probe		= cas_init_one,
5276	.remove		= __devexit_p(cas_remove_one),
5277#ifdef CONFIG_PM
5278	.suspend	= cas_suspend,
5279	.resume		= cas_resume
5280#endif
5281};
5282
5283static int __init cas_init(void)
5284{
5285	if (linkdown_timeout > 0)
5286		link_transition_timeout = linkdown_timeout * HZ;
5287	else
5288		link_transition_timeout = 0;
5289
5290	return pci_register_driver(&cas_driver);
5291}
5292
5293static void __exit cas_cleanup(void)
5294{
5295	pci_unregister_driver(&cas_driver);
5296}
5297
5298module_init(cas_init);
5299module_exit(cas_cleanup);