Linux Audio

Check our new training course

Loading...
v6.8
   1/*	tulip_core.c: A DEC 21x4x-family ethernet driver for Linux.
   2
   3	Copyright 2000,2001  The Linux Kernel Team
   4	Written/copyright 1994-2001 by Donald Becker.
   5
   6	This software may be used and distributed according to the terms
   7	of the GNU General Public License, incorporated herein by reference.
   8
   9	Please submit bugs to http://bugzilla.kernel.org/ .
  10*/
  11
  12#define pr_fmt(fmt) "tulip: " fmt
  13
  14#define DRV_NAME	"tulip"
 
 
 
 
 
 
 
  15
  16#include <linux/module.h>
  17#include <linux/pci.h>
  18#include <linux/slab.h>
  19#include "tulip.h"
  20#include <linux/init.h>
  21#include <linux/interrupt.h>
  22#include <linux/etherdevice.h>
  23#include <linux/delay.h>
  24#include <linux/mii.h>
  25#include <linux/crc32.h>
  26#include <asm/unaligned.h>
  27#include <linux/uaccess.h>
  28
  29#ifdef CONFIG_SPARC
  30#include <asm/prom.h>
  31#endif
  32
 
 
 
  33/* A few user-configurable values. */
  34
  35/* Maximum events (Rx packets, etc.) to handle at each interrupt. */
  36static unsigned int max_interrupt_work = 25;
  37
  38#define MAX_UNITS 8
  39/* Used to pass the full-duplex flag, etc. */
  40static int full_duplex[MAX_UNITS];
  41static int options[MAX_UNITS];
  42static int mtu[MAX_UNITS];			/* Jumbo MTU for interfaces. */
  43
  44/*  The possible media types that can be set in options[] are: */
  45const char * const medianame[32] = {
  46	"10baseT", "10base2", "AUI", "100baseTx",
  47	"10baseT-FDX", "100baseTx-FDX", "100baseT4", "100baseFx",
  48	"100baseFx-FDX", "MII 10baseT", "MII 10baseT-FDX", "MII",
  49	"10baseT(forced)", "MII 100baseTx", "MII 100baseTx-FDX", "MII 100baseT4",
  50	"MII 100baseFx-HDX", "MII 100baseFx-FDX", "Home-PNA 1Mbps", "Invalid-19",
  51	"","","","", "","","","",  "","","","Transceiver reset",
  52};
  53
  54/* Set the copy breakpoint for the copy-only-tiny-buffer Rx structure. */
  55#if defined(__alpha__) || defined(__arm__) || defined(__hppa__) || \
  56	defined(CONFIG_SPARC) || defined(__ia64__) || \
  57	defined(__sh__) || defined(__mips__)
  58static int rx_copybreak = 1518;
  59#else
  60static int rx_copybreak = 100;
  61#endif
  62
  63/*
  64  Set the bus performance register.
  65	Typical: Set 16 longword cache alignment, no burst limit.
  66	Cache alignment bits 15:14	     Burst length 13:8
  67		0000	No alignment  0x00000000 unlimited		0800 8 longwords
  68		4000	8  longwords		0100 1 longword		1000 16 longwords
  69		8000	16 longwords		0200 2 longwords	2000 32 longwords
  70		C000	32  longwords		0400 4 longwords
  71	Warning: many older 486 systems are broken and require setting 0x00A04800
  72	   8 longword cache alignment, 8 longword burst.
  73	ToDo: Non-Intel setting could be better.
  74*/
  75
  76#if defined(__alpha__) || defined(__ia64__)
  77static int csr0 = 0x01A00000 | 0xE000;
  78#elif defined(__i386__) || defined(__powerpc__) || defined(__x86_64__)
  79static int csr0 = 0x01A00000 | 0x8000;
  80#elif defined(CONFIG_SPARC) || defined(__hppa__)
  81/* The UltraSparc PCI controllers will disconnect at every 64-byte
  82 * crossing anyways so it makes no sense to tell Tulip to burst
  83 * any more than that.
  84 */
  85static int csr0 = 0x01A00000 | 0x9000;
  86#elif defined(__arm__) || defined(__sh__)
  87static int csr0 = 0x01A00000 | 0x4800;
  88#elif defined(__mips__)
  89static int csr0 = 0x00200000 | 0x4000;
  90#else
  91static int csr0;
  92#endif
  93
  94/* Operational parameters that usually are not changed. */
  95/* Time in jiffies before concluding the transmitter is hung. */
  96#define TX_TIMEOUT  (4*HZ)
  97
  98
  99MODULE_AUTHOR("The Linux Kernel Team");
 100MODULE_DESCRIPTION("Digital 21*4* Tulip ethernet driver");
 101MODULE_LICENSE("GPL");
 
 102module_param(tulip_debug, int, 0);
 103module_param(max_interrupt_work, int, 0);
 104module_param(rx_copybreak, int, 0);
 105module_param(csr0, int, 0);
 106module_param_array(options, int, NULL, 0);
 107module_param_array(full_duplex, int, NULL, 0);
 108
 109#ifdef TULIP_DEBUG
 110int tulip_debug = TULIP_DEBUG;
 111#else
 112int tulip_debug = 1;
 113#endif
 114
 115static void tulip_timer(struct timer_list *t)
 116{
 117	struct tulip_private *tp = from_timer(tp, t, timer);
 118	struct net_device *dev = tp->dev;
 119
 120	if (netif_running(dev))
 121		schedule_work(&tp->media_work);
 122}
 123
 124/*
 125 * This table use during operation for capabilities and media timer.
 126 *
 127 * It is indexed via the values in 'enum chips'
 128 */
 129
 130const struct tulip_chip_table tulip_tbl[] = {
 131  { }, /* placeholder for array, slot unused currently */
 132  { }, /* placeholder for array, slot unused currently */
 133
 134  /* DC21140 */
 135  { "Digital DS21140 Tulip", 128, 0x0001ebef,
 136	HAS_MII | HAS_MEDIA_TABLE | CSR12_IN_SROM | HAS_PCI_MWI, tulip_timer,
 137	tulip_media_task },
 138
 139  /* DC21142, DC21143 */
 140  { "Digital DS21142/43 Tulip", 128, 0x0801fbff,
 141	HAS_MII | HAS_MEDIA_TABLE | ALWAYS_CHECK_MII | HAS_ACPI | HAS_NWAY
 142	| HAS_INTR_MITIGATION | HAS_PCI_MWI, tulip_timer, t21142_media_task },
 143
 144  /* LC82C168 */
 145  { "Lite-On 82c168 PNIC", 256, 0x0001fbef,
 146	HAS_MII | HAS_PNICNWAY, pnic_timer, },
 147
 148  /* MX98713 */
 149  { "Macronix 98713 PMAC", 128, 0x0001ebef,
 150	HAS_MII | HAS_MEDIA_TABLE | CSR12_IN_SROM, mxic_timer, },
 151
 152  /* MX98715 */
 153  { "Macronix 98715 PMAC", 256, 0x0001ebef,
 154	HAS_MEDIA_TABLE, mxic_timer, },
 155
 156  /* MX98725 */
 157  { "Macronix 98725 PMAC", 256, 0x0001ebef,
 158	HAS_MEDIA_TABLE, mxic_timer, },
 159
 160  /* AX88140 */
 161  { "ASIX AX88140", 128, 0x0001fbff,
 162	HAS_MII | HAS_MEDIA_TABLE | CSR12_IN_SROM | MC_HASH_ONLY
 163	| IS_ASIX, tulip_timer, tulip_media_task },
 164
 165  /* PNIC2 */
 166  { "Lite-On PNIC-II", 256, 0x0801fbff,
 167	HAS_MII | HAS_NWAY | HAS_8023X | HAS_PCI_MWI, pnic2_timer, },
 168
 169  /* COMET */
 170  { "ADMtek Comet", 256, 0x0001abef,
 171	HAS_MII | MC_HASH_ONLY | COMET_MAC_ADDR, comet_timer, },
 172
 173  /* COMPEX9881 */
 174  { "Compex 9881 PMAC", 128, 0x0001ebef,
 175	HAS_MII | HAS_MEDIA_TABLE | CSR12_IN_SROM, mxic_timer, },
 176
 177  /* I21145 */
 178  { "Intel DS21145 Tulip", 128, 0x0801fbff,
 179	HAS_MII | HAS_MEDIA_TABLE | ALWAYS_CHECK_MII | HAS_ACPI
 180	| HAS_NWAY | HAS_PCI_MWI, tulip_timer, tulip_media_task },
 181
 182  /* DM910X */
 183#ifdef CONFIG_TULIP_DM910X
 184  { "Davicom DM9102/DM9102A", 128, 0x0001ebef,
 185	HAS_MII | HAS_MEDIA_TABLE | CSR12_IN_SROM | HAS_ACPI,
 186	tulip_timer, tulip_media_task },
 187#else
 188  { NULL },
 189#endif
 190
 191  /* RS7112 */
 192  { "Conexant LANfinity", 256, 0x0001ebef,
 193	HAS_MII | HAS_ACPI, tulip_timer, tulip_media_task },
 194
 195};
 196
 197
 198static const struct pci_device_id tulip_pci_tbl[] = {
 199	{ 0x1011, 0x0009, PCI_ANY_ID, PCI_ANY_ID, 0, 0, DC21140 },
 200	{ 0x1011, 0x0019, PCI_ANY_ID, PCI_ANY_ID, 0, 0, DC21143 },
 201	{ 0x11AD, 0x0002, PCI_ANY_ID, PCI_ANY_ID, 0, 0, LC82C168 },
 202	{ 0x10d9, 0x0512, PCI_ANY_ID, PCI_ANY_ID, 0, 0, MX98713 },
 203	{ 0x10d9, 0x0531, PCI_ANY_ID, PCI_ANY_ID, 0, 0, MX98715 },
 204/*	{ 0x10d9, 0x0531, PCI_ANY_ID, PCI_ANY_ID, 0, 0, MX98725 },*/
 205	{ 0x125B, 0x1400, PCI_ANY_ID, PCI_ANY_ID, 0, 0, AX88140 },
 206	{ 0x11AD, 0xc115, PCI_ANY_ID, PCI_ANY_ID, 0, 0, PNIC2 },
 207	{ 0x1317, 0x0981, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
 208	{ 0x1317, 0x0985, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
 209	{ 0x1317, 0x1985, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
 210	{ 0x1317, 0x9511, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
 211	{ 0x13D1, 0xAB02, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
 212	{ 0x13D1, 0xAB03, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
 213	{ 0x13D1, 0xAB08, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
 214	{ 0x104A, 0x0981, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
 215	{ 0x104A, 0x2774, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
 216	{ 0x1259, 0xa120, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
 217	{ 0x11F6, 0x9881, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMPEX9881 },
 218	{ 0x8086, 0x0039, PCI_ANY_ID, PCI_ANY_ID, 0, 0, I21145 },
 219#ifdef CONFIG_TULIP_DM910X
 220	{ 0x1282, 0x9100, PCI_ANY_ID, PCI_ANY_ID, 0, 0, DM910X },
 221	{ 0x1282, 0x9102, PCI_ANY_ID, PCI_ANY_ID, 0, 0, DM910X },
 222#endif
 223	{ 0x1113, 0x1216, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
 224	{ 0x1113, 0x1217, PCI_ANY_ID, PCI_ANY_ID, 0, 0, MX98715 },
 225	{ 0x1113, 0x9511, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
 226	{ 0x1186, 0x1541, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
 227	{ 0x1186, 0x1561, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
 228	{ 0x1186, 0x1591, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
 229	{ 0x14f1, 0x1803, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CONEXANT },
 230	{ 0x1626, 0x8410, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
 231	{ 0x1737, 0xAB09, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
 232	{ 0x1737, 0xAB08, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
 233	{ 0x17B3, 0xAB08, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
 234	{ 0x10b7, 0x9300, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET }, /* 3Com 3CSOHO100B-TX */
 235	{ 0x14ea, 0xab08, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET }, /* Planex FNW-3602-TX */
 236	{ 0x1414, 0x0001, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET }, /* Microsoft MN-120 */
 237	{ 0x1414, 0x0002, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
 238	{ } /* terminate list */
 239};
 240MODULE_DEVICE_TABLE(pci, tulip_pci_tbl);
 241
 242
 243/* A full-duplex map for media types. */
 244const char tulip_media_cap[32] =
 245{0,0,0,16,  3,19,16,24,  27,4,7,5, 0,20,23,20,  28,31,0,0, };
 246
 247static void tulip_tx_timeout(struct net_device *dev, unsigned int txqueue);
 248static void tulip_init_ring(struct net_device *dev);
 249static void tulip_free_ring(struct net_device *dev);
 250static netdev_tx_t tulip_start_xmit(struct sk_buff *skb,
 251					  struct net_device *dev);
 252static int tulip_open(struct net_device *dev);
 253static int tulip_close(struct net_device *dev);
 254static void tulip_up(struct net_device *dev);
 255static void tulip_down(struct net_device *dev);
 256static struct net_device_stats *tulip_get_stats(struct net_device *dev);
 257static int private_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
 258static void set_rx_mode(struct net_device *dev);
 259static void tulip_set_wolopts(struct pci_dev *pdev, u32 wolopts);
 260#ifdef CONFIG_NET_POLL_CONTROLLER
 261static void poll_tulip(struct net_device *dev);
 262#endif
 263
 264static void tulip_set_power_state (struct tulip_private *tp,
 265				   int sleep, int snooze)
 266{
 267	if (tp->flags & HAS_ACPI) {
 268		u32 tmp, newtmp;
 269		pci_read_config_dword (tp->pdev, CFDD, &tmp);
 270		newtmp = tmp & ~(CFDD_Sleep | CFDD_Snooze);
 271		if (sleep)
 272			newtmp |= CFDD_Sleep;
 273		else if (snooze)
 274			newtmp |= CFDD_Snooze;
 275		if (tmp != newtmp)
 276			pci_write_config_dword (tp->pdev, CFDD, newtmp);
 277	}
 278
 279}
 280
 281
 282static void tulip_up(struct net_device *dev)
 283{
 284	struct tulip_private *tp = netdev_priv(dev);
 285	void __iomem *ioaddr = tp->base_addr;
 286	int next_tick = 3*HZ;
 287	u32 reg;
 288	int i;
 289
 290#ifdef CONFIG_TULIP_NAPI
 291	napi_enable(&tp->napi);
 292#endif
 293
 294	/* Wake the chip from sleep/snooze mode. */
 295	tulip_set_power_state (tp, 0, 0);
 296
 297	/* Disable all WOL events */
 298	pci_enable_wake(tp->pdev, PCI_D3hot, 0);
 299	pci_enable_wake(tp->pdev, PCI_D3cold, 0);
 300	tulip_set_wolopts(tp->pdev, 0);
 301
 302	/* On some chip revs we must set the MII/SYM port before the reset!? */
 303	if (tp->mii_cnt  ||  (tp->mtable  &&  tp->mtable->has_mii))
 304		iowrite32(0x00040000, ioaddr + CSR6);
 305
 306	/* Reset the chip, holding bit 0 set at least 50 PCI cycles. */
 307	iowrite32(0x00000001, ioaddr + CSR0);
 308	pci_read_config_dword(tp->pdev, PCI_COMMAND, &reg);  /* flush write */
 309	udelay(100);
 310
 311	/* Deassert reset.
 312	   Wait the specified 50 PCI cycles after a reset by initializing
 313	   Tx and Rx queues and the address filter list. */
 314	iowrite32(tp->csr0, ioaddr + CSR0);
 315	pci_read_config_dword(tp->pdev, PCI_COMMAND, &reg);  /* flush write */
 316	udelay(100);
 317
 318	if (tulip_debug > 1)
 319		netdev_dbg(dev, "tulip_up(), irq==%d\n", tp->pdev->irq);
 320
 321	iowrite32(tp->rx_ring_dma, ioaddr + CSR3);
 322	iowrite32(tp->tx_ring_dma, ioaddr + CSR4);
 323	tp->cur_rx = tp->cur_tx = 0;
 324	tp->dirty_rx = tp->dirty_tx = 0;
 325
 326	if (tp->flags & MC_HASH_ONLY) {
 327		u32 addr_low = get_unaligned_le32(dev->dev_addr);
 328		u32 addr_high = get_unaligned_le16(dev->dev_addr + 4);
 329		if (tp->chip_id == AX88140) {
 330			iowrite32(0, ioaddr + CSR13);
 331			iowrite32(addr_low,  ioaddr + CSR14);
 332			iowrite32(1, ioaddr + CSR13);
 333			iowrite32(addr_high, ioaddr + CSR14);
 334		} else if (tp->flags & COMET_MAC_ADDR) {
 335			iowrite32(addr_low,  ioaddr + 0xA4);
 336			iowrite32(addr_high, ioaddr + 0xA8);
 337			iowrite32(0, ioaddr + CSR27);
 338			iowrite32(0, ioaddr + CSR28);
 339		}
 340	} else {
 341		/* This is set_rx_mode(), but without starting the transmitter. */
 342		const u16 *eaddrs = (const u16 *)dev->dev_addr;
 343		u16 *setup_frm = &tp->setup_frame[15*6];
 344		dma_addr_t mapping;
 345
 346		/* 21140 bug: you must add the broadcast address. */
 347		memset(tp->setup_frame, 0xff, sizeof(tp->setup_frame));
 348		/* Fill the final entry of the table with our physical address. */
 349		*setup_frm++ = eaddrs[0]; *setup_frm++ = eaddrs[0];
 350		*setup_frm++ = eaddrs[1]; *setup_frm++ = eaddrs[1];
 351		*setup_frm++ = eaddrs[2]; *setup_frm++ = eaddrs[2];
 352
 353		mapping = dma_map_single(&tp->pdev->dev, tp->setup_frame,
 354					 sizeof(tp->setup_frame),
 355					 DMA_TO_DEVICE);
 356		tp->tx_buffers[tp->cur_tx].skb = NULL;
 357		tp->tx_buffers[tp->cur_tx].mapping = mapping;
 358
 359		/* Put the setup frame on the Tx list. */
 360		tp->tx_ring[tp->cur_tx].length = cpu_to_le32(0x08000000 | 192);
 361		tp->tx_ring[tp->cur_tx].buffer1 = cpu_to_le32(mapping);
 362		tp->tx_ring[tp->cur_tx].status = cpu_to_le32(DescOwned);
 363
 364		tp->cur_tx++;
 365	}
 366
 367	tp->saved_if_port = dev->if_port;
 368	if (dev->if_port == 0)
 369		dev->if_port = tp->default_port;
 370
 371	/* Allow selecting a default media. */
 372	i = 0;
 373	if (tp->mtable == NULL)
 374		goto media_picked;
 375	if (dev->if_port) {
 376		int looking_for = tulip_media_cap[dev->if_port] & MediaIsMII ? 11 :
 377			(dev->if_port == 12 ? 0 : dev->if_port);
 378		for (i = 0; i < tp->mtable->leafcount; i++)
 379			if (tp->mtable->mleaf[i].media == looking_for) {
 380				dev_info(&dev->dev,
 381					 "Using user-specified media %s\n",
 382					 medianame[dev->if_port]);
 383				goto media_picked;
 384			}
 385	}
 386	if ((tp->mtable->defaultmedia & 0x0800) == 0) {
 387		int looking_for = tp->mtable->defaultmedia & MEDIA_MASK;
 388		for (i = 0; i < tp->mtable->leafcount; i++)
 389			if (tp->mtable->mleaf[i].media == looking_for) {
 390				dev_info(&dev->dev,
 391					 "Using EEPROM-set media %s\n",
 392					 medianame[looking_for]);
 393				goto media_picked;
 394			}
 395	}
 396	/* Start sensing first non-full-duplex media. */
 397	for (i = tp->mtable->leafcount - 1;
 398		 (tulip_media_cap[tp->mtable->mleaf[i].media] & MediaAlwaysFD) && i > 0; i--)
 399		;
 400media_picked:
 401
 402	tp->csr6 = 0;
 403	tp->cur_index = i;
 404	tp->nwayset = 0;
 405
 406	if (dev->if_port) {
 407		if (tp->chip_id == DC21143  &&
 408		    (tulip_media_cap[dev->if_port] & MediaIsMII)) {
 409			/* We must reset the media CSRs when we force-select MII mode. */
 410			iowrite32(0x0000, ioaddr + CSR13);
 411			iowrite32(0x0000, ioaddr + CSR14);
 412			iowrite32(0x0008, ioaddr + CSR15);
 413		}
 414		tulip_select_media(dev, 1);
 415	} else if (tp->chip_id == DC21142) {
 416		if (tp->mii_cnt) {
 417			tulip_select_media(dev, 1);
 418			if (tulip_debug > 1)
 419				dev_info(&dev->dev,
 420					 "Using MII transceiver %d, status %04x\n",
 421					 tp->phys[0],
 422					 tulip_mdio_read(dev, tp->phys[0], 1));
 423			iowrite32(csr6_mask_defstate, ioaddr + CSR6);
 424			tp->csr6 = csr6_mask_hdcap;
 425			dev->if_port = 11;
 426			iowrite32(0x0000, ioaddr + CSR13);
 427			iowrite32(0x0000, ioaddr + CSR14);
 428		} else
 429			t21142_start_nway(dev);
 430	} else if (tp->chip_id == PNIC2) {
 431	        /* for initial startup advertise 10/100 Full and Half */
 432	        tp->sym_advertise = 0x01E0;
 433                /* enable autonegotiate end interrupt */
 434	        iowrite32(ioread32(ioaddr+CSR5)| 0x00008010, ioaddr + CSR5);
 435	        iowrite32(ioread32(ioaddr+CSR7)| 0x00008010, ioaddr + CSR7);
 436		pnic2_start_nway(dev);
 437	} else if (tp->chip_id == LC82C168  &&  ! tp->medialock) {
 438		if (tp->mii_cnt) {
 439			dev->if_port = 11;
 440			tp->csr6 = 0x814C0000 | (tp->full_duplex ? 0x0200 : 0);
 441			iowrite32(0x0001, ioaddr + CSR15);
 442		} else if (ioread32(ioaddr + CSR5) & TPLnkPass)
 443			pnic_do_nway(dev);
 444		else {
 445			/* Start with 10mbps to do autonegotiation. */
 446			iowrite32(0x32, ioaddr + CSR12);
 447			tp->csr6 = 0x00420000;
 448			iowrite32(0x0001B078, ioaddr + 0xB8);
 449			iowrite32(0x0201B078, ioaddr + 0xB8);
 450			next_tick = 1*HZ;
 451		}
 452	} else if ((tp->chip_id == MX98713 || tp->chip_id == COMPEX9881) &&
 453		   ! tp->medialock) {
 454		dev->if_port = 0;
 455		tp->csr6 = 0x01880000 | (tp->full_duplex ? 0x0200 : 0);
 456		iowrite32(0x0f370000 | ioread16(ioaddr + 0x80), ioaddr + 0x80);
 457	} else if (tp->chip_id == MX98715 || tp->chip_id == MX98725) {
 458		/* Provided by BOLO, Macronix - 12/10/1998. */
 459		dev->if_port = 0;
 460		tp->csr6 = 0x01a80200;
 461		iowrite32(0x0f370000 | ioread16(ioaddr + 0x80), ioaddr + 0x80);
 462		iowrite32(0x11000 | ioread16(ioaddr + 0xa0), ioaddr + 0xa0);
 463	} else if (tp->chip_id == COMET || tp->chip_id == CONEXANT) {
 464		/* Enable automatic Tx underrun recovery. */
 465		iowrite32(ioread32(ioaddr + 0x88) | 1, ioaddr + 0x88);
 466		dev->if_port = tp->mii_cnt ? 11 : 0;
 467		tp->csr6 = 0x00040000;
 468	} else if (tp->chip_id == AX88140) {
 469		tp->csr6 = tp->mii_cnt ? 0x00040100 : 0x00000100;
 470	} else
 471		tulip_select_media(dev, 1);
 472
 473	/* Start the chip's Tx to process setup frame. */
 474	tulip_stop_rxtx(tp);
 475	barrier();
 476	udelay(5);
 477	iowrite32(tp->csr6 | TxOn, ioaddr + CSR6);
 478
 479	/* Enable interrupts by setting the interrupt mask. */
 480	iowrite32(tulip_tbl[tp->chip_id].valid_intrs, ioaddr + CSR5);
 481	iowrite32(tulip_tbl[tp->chip_id].valid_intrs, ioaddr + CSR7);
 482	tulip_start_rxtx(tp);
 483	iowrite32(0, ioaddr + CSR2);		/* Rx poll demand */
 484
 485	if (tulip_debug > 2) {
 486		netdev_dbg(dev, "Done tulip_up(), CSR0 %08x, CSR5 %08x CSR6 %08x\n",
 487			   ioread32(ioaddr + CSR0),
 488			   ioread32(ioaddr + CSR5),
 489			   ioread32(ioaddr + CSR6));
 490	}
 491
 492	/* Set the timer to switch to check for link beat and perhaps switch
 493	   to an alternate media type. */
 494	tp->timer.expires = RUN_AT(next_tick);
 495	add_timer(&tp->timer);
 496#ifdef CONFIG_TULIP_NAPI
 497	timer_setup(&tp->oom_timer, oom_timer, 0);
 498#endif
 499}
 500
 501static int
 502tulip_open(struct net_device *dev)
 503{
 504	struct tulip_private *tp = netdev_priv(dev);
 505	int retval;
 506
 507	tulip_init_ring (dev);
 508
 509	retval = request_irq(tp->pdev->irq, tulip_interrupt, IRQF_SHARED,
 510			     dev->name, dev);
 511	if (retval)
 512		goto free_ring;
 513
 514	tulip_up (dev);
 515
 516	netif_start_queue (dev);
 517
 518	return 0;
 519
 520free_ring:
 521	tulip_free_ring (dev);
 522	return retval;
 523}
 524
 525
 526static void tulip_tx_timeout(struct net_device *dev, unsigned int txqueue)
 527{
 528	struct tulip_private *tp = netdev_priv(dev);
 529	void __iomem *ioaddr = tp->base_addr;
 530	unsigned long flags;
 531
 532	spin_lock_irqsave (&tp->lock, flags);
 533
 534	if (tulip_media_cap[dev->if_port] & MediaIsMII) {
 535		/* Do nothing -- the media monitor should handle this. */
 536		if (tulip_debug > 1)
 537			dev_warn(&dev->dev,
 538				 "Transmit timeout using MII device\n");
 539	} else if (tp->chip_id == DC21140 || tp->chip_id == DC21142 ||
 540		   tp->chip_id == MX98713 || tp->chip_id == COMPEX9881 ||
 541		   tp->chip_id == DM910X) {
 542		dev_warn(&dev->dev,
 543			 "21140 transmit timed out, status %08x, SIA %08x %08x %08x %08x, resetting...\n",
 544			 ioread32(ioaddr + CSR5), ioread32(ioaddr + CSR12),
 545			 ioread32(ioaddr + CSR13), ioread32(ioaddr + CSR14),
 546			 ioread32(ioaddr + CSR15));
 547		tp->timeout_recovery = 1;
 548		schedule_work(&tp->media_work);
 549		goto out_unlock;
 550	} else if (tp->chip_id == PNIC2) {
 551		dev_warn(&dev->dev,
 552			 "PNIC2 transmit timed out, status %08x, CSR6/7 %08x / %08x CSR12 %08x, resetting...\n",
 553			 (int)ioread32(ioaddr + CSR5),
 554			 (int)ioread32(ioaddr + CSR6),
 555			 (int)ioread32(ioaddr + CSR7),
 556			 (int)ioread32(ioaddr + CSR12));
 557	} else {
 558		dev_warn(&dev->dev,
 559			 "Transmit timed out, status %08x, CSR12 %08x, resetting...\n",
 560			 ioread32(ioaddr + CSR5), ioread32(ioaddr + CSR12));
 561		dev->if_port = 0;
 562	}
 563
 564#if defined(way_too_many_messages)
 565	if (tulip_debug > 3) {
 566		int i;
 567		for (i = 0; i < RX_RING_SIZE; i++) {
 568			u8 *buf = (u8 *)(tp->rx_ring[i].buffer1);
 569			int j;
 570			printk(KERN_DEBUG
 571			       "%2d: %08x %08x %08x %08x  %02x %02x %02x\n",
 572			       i,
 573			       (unsigned int)tp->rx_ring[i].status,
 574			       (unsigned int)tp->rx_ring[i].length,
 575			       (unsigned int)tp->rx_ring[i].buffer1,
 576			       (unsigned int)tp->rx_ring[i].buffer2,
 577			       buf[0], buf[1], buf[2]);
 578			for (j = 0; ((j < 1600) && buf[j] != 0xee); j++)
 579				if (j < 100)
 580					pr_cont(" %02x", buf[j]);
 581			pr_cont(" j=%d\n", j);
 582		}
 583		printk(KERN_DEBUG "  Rx ring %p: ", tp->rx_ring);
 584		for (i = 0; i < RX_RING_SIZE; i++)
 585			pr_cont(" %08x", (unsigned int)tp->rx_ring[i].status);
 586		printk(KERN_DEBUG "  Tx ring %p: ", tp->tx_ring);
 587		for (i = 0; i < TX_RING_SIZE; i++)
 588			pr_cont(" %08x", (unsigned int)tp->tx_ring[i].status);
 589		pr_cont("\n");
 590	}
 591#endif
 592
 593	tulip_tx_timeout_complete(tp, ioaddr);
 594
 595out_unlock:
 596	spin_unlock_irqrestore (&tp->lock, flags);
 597	netif_trans_update(dev); /* prevent tx timeout */
 598	netif_wake_queue (dev);
 599}
 600
 601
 602/* Initialize the Rx and Tx rings, along with various 'dev' bits. */
 603static void tulip_init_ring(struct net_device *dev)
 604{
 605	struct tulip_private *tp = netdev_priv(dev);
 606	int i;
 607
 608	tp->susp_rx = 0;
 609	tp->ttimer = 0;
 610	tp->nir = 0;
 611
 612	for (i = 0; i < RX_RING_SIZE; i++) {
 613		tp->rx_ring[i].status = 0x00000000;
 614		tp->rx_ring[i].length = cpu_to_le32(PKT_BUF_SZ);
 615		tp->rx_ring[i].buffer2 = cpu_to_le32(tp->rx_ring_dma + sizeof(struct tulip_rx_desc) * (i + 1));
 616		tp->rx_buffers[i].skb = NULL;
 617		tp->rx_buffers[i].mapping = 0;
 618	}
 619	/* Mark the last entry as wrapping the ring. */
 620	tp->rx_ring[i-1].length = cpu_to_le32(PKT_BUF_SZ | DESC_RING_WRAP);
 621	tp->rx_ring[i-1].buffer2 = cpu_to_le32(tp->rx_ring_dma);
 622
 623	for (i = 0; i < RX_RING_SIZE; i++) {
 624		dma_addr_t mapping;
 625
 626		/* Note the receive buffer must be longword aligned.
 627		   netdev_alloc_skb() provides 16 byte alignment.  But do *not*
 628		   use skb_reserve() to align the IP header! */
 629		struct sk_buff *skb = netdev_alloc_skb(dev, PKT_BUF_SZ);
 630		tp->rx_buffers[i].skb = skb;
 631		if (skb == NULL)
 632			break;
 633		mapping = dma_map_single(&tp->pdev->dev, skb->data,
 634					 PKT_BUF_SZ, DMA_FROM_DEVICE);
 635		tp->rx_buffers[i].mapping = mapping;
 636		tp->rx_ring[i].status = cpu_to_le32(DescOwned);	/* Owned by Tulip chip */
 637		tp->rx_ring[i].buffer1 = cpu_to_le32(mapping);
 638	}
 639	tp->dirty_rx = (unsigned int)(i - RX_RING_SIZE);
 640
 641	/* The Tx buffer descriptor is filled in as needed, but we
 642	   do need to clear the ownership bit. */
 643	for (i = 0; i < TX_RING_SIZE; i++) {
 644		tp->tx_buffers[i].skb = NULL;
 645		tp->tx_buffers[i].mapping = 0;
 646		tp->tx_ring[i].status = 0x00000000;
 647		tp->tx_ring[i].buffer2 = cpu_to_le32(tp->tx_ring_dma + sizeof(struct tulip_tx_desc) * (i + 1));
 648	}
 649	tp->tx_ring[i-1].buffer2 = cpu_to_le32(tp->tx_ring_dma);
 650}
 651
 652static netdev_tx_t
 653tulip_start_xmit(struct sk_buff *skb, struct net_device *dev)
 654{
 655	struct tulip_private *tp = netdev_priv(dev);
 656	int entry;
 657	u32 flag;
 658	dma_addr_t mapping;
 659	unsigned long flags;
 660
 661	spin_lock_irqsave(&tp->lock, flags);
 662
 663	/* Calculate the next Tx descriptor entry. */
 664	entry = tp->cur_tx % TX_RING_SIZE;
 665
 666	tp->tx_buffers[entry].skb = skb;
 667	mapping = dma_map_single(&tp->pdev->dev, skb->data, skb->len,
 668				 DMA_TO_DEVICE);
 669	tp->tx_buffers[entry].mapping = mapping;
 670	tp->tx_ring[entry].buffer1 = cpu_to_le32(mapping);
 671
 672	if (tp->cur_tx - tp->dirty_tx < TX_RING_SIZE/2) {/* Typical path */
 673		flag = 0x60000000; /* No interrupt */
 674	} else if (tp->cur_tx - tp->dirty_tx == TX_RING_SIZE/2) {
 675		flag = 0xe0000000; /* Tx-done intr. */
 676	} else if (tp->cur_tx - tp->dirty_tx < TX_RING_SIZE - 2) {
 677		flag = 0x60000000; /* No Tx-done intr. */
 678	} else {		/* Leave room for set_rx_mode() to fill entries. */
 679		flag = 0xe0000000; /* Tx-done intr. */
 680		netif_stop_queue(dev);
 681	}
 682	if (entry == TX_RING_SIZE-1)
 683		flag = 0xe0000000 | DESC_RING_WRAP;
 684
 685	tp->tx_ring[entry].length = cpu_to_le32(skb->len | flag);
 686	/* if we were using Transmit Automatic Polling, we would need a
 687	 * wmb() here. */
 688	tp->tx_ring[entry].status = cpu_to_le32(DescOwned);
 689	wmb();
 690
 691	tp->cur_tx++;
 692
 693	/* Trigger an immediate transmit demand. */
 694	iowrite32(0, tp->base_addr + CSR1);
 695
 696	spin_unlock_irqrestore(&tp->lock, flags);
 697
 698	return NETDEV_TX_OK;
 699}
 700
 701static void tulip_clean_tx_ring(struct tulip_private *tp)
 702{
 703	unsigned int dirty_tx;
 704
 705	for (dirty_tx = tp->dirty_tx ; tp->cur_tx - dirty_tx > 0;
 706		dirty_tx++) {
 707		int entry = dirty_tx % TX_RING_SIZE;
 708		int status = le32_to_cpu(tp->tx_ring[entry].status);
 709
 710		if (status < 0) {
 711			tp->dev->stats.tx_errors++;	/* It wasn't Txed */
 712			tp->tx_ring[entry].status = 0;
 713		}
 714
 715		/* Check for Tx filter setup frames. */
 716		if (tp->tx_buffers[entry].skb == NULL) {
 717			/* test because dummy frames not mapped */
 718			if (tp->tx_buffers[entry].mapping)
 719				dma_unmap_single(&tp->pdev->dev,
 720						 tp->tx_buffers[entry].mapping,
 721						 sizeof(tp->setup_frame),
 722						 DMA_TO_DEVICE);
 723			continue;
 724		}
 725
 726		dma_unmap_single(&tp->pdev->dev,
 727				 tp->tx_buffers[entry].mapping,
 728				 tp->tx_buffers[entry].skb->len,
 729				 DMA_TO_DEVICE);
 730
 731		/* Free the original skb. */
 732		dev_kfree_skb_irq(tp->tx_buffers[entry].skb);
 733		tp->tx_buffers[entry].skb = NULL;
 734		tp->tx_buffers[entry].mapping = 0;
 735	}
 736}
 737
 738static void tulip_down (struct net_device *dev)
 739{
 740	struct tulip_private *tp = netdev_priv(dev);
 741	void __iomem *ioaddr = tp->base_addr;
 742	unsigned long flags;
 743
 744	cancel_work_sync(&tp->media_work);
 745
 746#ifdef CONFIG_TULIP_NAPI
 747	napi_disable(&tp->napi);
 748#endif
 749
 750	del_timer_sync (&tp->timer);
 751#ifdef CONFIG_TULIP_NAPI
 752	del_timer_sync (&tp->oom_timer);
 753#endif
 754	spin_lock_irqsave (&tp->lock, flags);
 755
 756	/* Disable interrupts by clearing the interrupt mask. */
 757	iowrite32 (0x00000000, ioaddr + CSR7);
 758
 759	/* Stop the Tx and Rx processes. */
 760	tulip_stop_rxtx(tp);
 761
 762	/* prepare receive buffers */
 763	tulip_refill_rx(dev);
 764
 765	/* release any unconsumed transmit buffers */
 766	tulip_clean_tx_ring(tp);
 767
 768	if (ioread32(ioaddr + CSR6) != 0xffffffff)
 769		dev->stats.rx_missed_errors += ioread32(ioaddr + CSR8) & 0xffff;
 770
 771	spin_unlock_irqrestore (&tp->lock, flags);
 772
 773	timer_setup(&tp->timer, tulip_tbl[tp->chip_id].media_timer, 0);
 
 774
 775	dev->if_port = tp->saved_if_port;
 776
 777	/* Leave the driver in snooze, not sleep, mode. */
 778	tulip_set_power_state (tp, 0, 1);
 779}
 780
 781static void tulip_free_ring (struct net_device *dev)
 782{
 783	struct tulip_private *tp = netdev_priv(dev);
 784	int i;
 785
 786	/* Free all the skbuffs in the Rx queue. */
 787	for (i = 0; i < RX_RING_SIZE; i++) {
 788		struct sk_buff *skb = tp->rx_buffers[i].skb;
 789		dma_addr_t mapping = tp->rx_buffers[i].mapping;
 790
 791		tp->rx_buffers[i].skb = NULL;
 792		tp->rx_buffers[i].mapping = 0;
 793
 794		tp->rx_ring[i].status = 0;	/* Not owned by Tulip chip. */
 795		tp->rx_ring[i].length = 0;
 796		/* An invalid address. */
 797		tp->rx_ring[i].buffer1 = cpu_to_le32(0xBADF00D0);
 798		if (skb) {
 799			dma_unmap_single(&tp->pdev->dev, mapping, PKT_BUF_SZ,
 800					 DMA_FROM_DEVICE);
 801			dev_kfree_skb (skb);
 802		}
 803	}
 804
 805	for (i = 0; i < TX_RING_SIZE; i++) {
 806		struct sk_buff *skb = tp->tx_buffers[i].skb;
 807
 808		if (skb != NULL) {
 809			dma_unmap_single(&tp->pdev->dev,
 810					 tp->tx_buffers[i].mapping, skb->len,
 811					 DMA_TO_DEVICE);
 812			dev_kfree_skb (skb);
 813		}
 814		tp->tx_buffers[i].skb = NULL;
 815		tp->tx_buffers[i].mapping = 0;
 816	}
 817}
 818
 819static int tulip_close (struct net_device *dev)
 820{
 821	struct tulip_private *tp = netdev_priv(dev);
 822	void __iomem *ioaddr = tp->base_addr;
 823
 824	netif_stop_queue (dev);
 825
 826	tulip_down (dev);
 827
 828	if (tulip_debug > 1)
 829		netdev_dbg(dev, "Shutting down ethercard, status was %02x\n",
 830			   ioread32 (ioaddr + CSR5));
 831
 832	free_irq (tp->pdev->irq, dev);
 833
 834	tulip_free_ring (dev);
 835
 836	return 0;
 837}
 838
 839static struct net_device_stats *tulip_get_stats(struct net_device *dev)
 840{
 841	struct tulip_private *tp = netdev_priv(dev);
 842	void __iomem *ioaddr = tp->base_addr;
 843
 844	if (netif_running(dev)) {
 845		unsigned long flags;
 846
 847		spin_lock_irqsave (&tp->lock, flags);
 848
 849		dev->stats.rx_missed_errors += ioread32(ioaddr + CSR8) & 0xffff;
 850
 851		spin_unlock_irqrestore(&tp->lock, flags);
 852	}
 853
 854	return &dev->stats;
 855}
 856
 857
 858static void tulip_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
 859{
 860	struct tulip_private *np = netdev_priv(dev);
 861	strscpy(info->driver, DRV_NAME, sizeof(info->driver));
 862	strscpy(info->bus_info, pci_name(np->pdev), sizeof(info->bus_info));
 
 863}
 864
 865
 866static int tulip_ethtool_set_wol(struct net_device *dev,
 867				 struct ethtool_wolinfo *wolinfo)
 868{
 869	struct tulip_private *tp = netdev_priv(dev);
 870
 871	if (wolinfo->wolopts & (~tp->wolinfo.supported))
 872		   return -EOPNOTSUPP;
 873
 874	tp->wolinfo.wolopts = wolinfo->wolopts;
 875	device_set_wakeup_enable(&tp->pdev->dev, tp->wolinfo.wolopts);
 876	return 0;
 877}
 878
 879static void tulip_ethtool_get_wol(struct net_device *dev,
 880				  struct ethtool_wolinfo *wolinfo)
 881{
 882	struct tulip_private *tp = netdev_priv(dev);
 883
 884	wolinfo->supported = tp->wolinfo.supported;
 885	wolinfo->wolopts = tp->wolinfo.wolopts;
 886	return;
 887}
 888
 889
 890static const struct ethtool_ops ops = {
 891	.get_drvinfo = tulip_get_drvinfo,
 892	.set_wol     = tulip_ethtool_set_wol,
 893	.get_wol     = tulip_ethtool_get_wol,
 894};
 895
 896/* Provide ioctl() calls to examine the MII xcvr state. */
 897static int private_ioctl (struct net_device *dev, struct ifreq *rq, int cmd)
 898{
 899	struct tulip_private *tp = netdev_priv(dev);
 900	void __iomem *ioaddr = tp->base_addr;
 901	struct mii_ioctl_data *data = if_mii(rq);
 902	const unsigned int phy_idx = 0;
 903	int phy = tp->phys[phy_idx] & 0x1f;
 904	unsigned int regnum = data->reg_num;
 905
 906	switch (cmd) {
 907	case SIOCGMIIPHY:		/* Get address of MII PHY in use. */
 908		if (tp->mii_cnt)
 909			data->phy_id = phy;
 910		else if (tp->flags & HAS_NWAY)
 911			data->phy_id = 32;
 912		else if (tp->chip_id == COMET)
 913			data->phy_id = 1;
 914		else
 915			return -ENODEV;
 916		fallthrough;
 917
 918	case SIOCGMIIREG:		/* Read MII PHY register. */
 919		if (data->phy_id == 32 && (tp->flags & HAS_NWAY)) {
 920			int csr12 = ioread32 (ioaddr + CSR12);
 921			int csr14 = ioread32 (ioaddr + CSR14);
 922			switch (regnum) {
 923			case 0:
 924                                if (((csr14<<5) & 0x1000) ||
 925                                        (dev->if_port == 5 && tp->nwayset))
 926                                        data->val_out = 0x1000;
 927                                else
 928                                        data->val_out = (tulip_media_cap[dev->if_port]&MediaIs100 ? 0x2000 : 0)
 929                                                | (tulip_media_cap[dev->if_port]&MediaIsFD ? 0x0100 : 0);
 930				break;
 931			case 1:
 932                                data->val_out =
 933					0x1848 +
 934					((csr12&0x7000) == 0x5000 ? 0x20 : 0) +
 935					((csr12&0x06) == 6 ? 0 : 4);
 936                                data->val_out |= 0x6048;
 937				break;
 938			case 4:
 939                                /* Advertised value, bogus 10baseTx-FD value from CSR6. */
 940                                data->val_out =
 941					((ioread32(ioaddr + CSR6) >> 3) & 0x0040) +
 942					((csr14 >> 1) & 0x20) + 1;
 943                                data->val_out |= ((csr14 >> 9) & 0x03C0);
 944				break;
 945			case 5: data->val_out = tp->lpar; break;
 946			default: data->val_out = 0; break;
 947			}
 948		} else {
 949			data->val_out = tulip_mdio_read (dev, data->phy_id & 0x1f, regnum);
 950		}
 951		return 0;
 952
 953	case SIOCSMIIREG:		/* Write MII PHY register. */
 954		if (regnum & ~0x1f)
 955			return -EINVAL;
 956		if (data->phy_id == phy) {
 957			u16 value = data->val_in;
 958			switch (regnum) {
 959			case 0:	/* Check for autonegotiation on or reset. */
 960				tp->full_duplex_lock = (value & 0x9000) ? 0 : 1;
 961				if (tp->full_duplex_lock)
 962					tp->full_duplex = (value & 0x0100) ? 1 : 0;
 963				break;
 964			case 4:
 965				tp->advertising[phy_idx] =
 966				tp->mii_advertise = data->val_in;
 967				break;
 968			}
 969		}
 970		if (data->phy_id == 32 && (tp->flags & HAS_NWAY)) {
 971			u16 value = data->val_in;
 972			if (regnum == 0) {
 973			  if ((value & 0x1200) == 0x1200) {
 974			    if (tp->chip_id == PNIC2) {
 975                                   pnic2_start_nway (dev);
 976                            } else {
 977				   t21142_start_nway (dev);
 978                            }
 979			  }
 980			} else if (regnum == 4)
 981				tp->sym_advertise = value;
 982		} else {
 983			tulip_mdio_write (dev, data->phy_id & 0x1f, regnum, data->val_in);
 984		}
 985		return 0;
 986	default:
 987		return -EOPNOTSUPP;
 988	}
 989
 990	return -EOPNOTSUPP;
 991}
 992
 993
 994/* Set or clear the multicast filter for this adaptor.
 995   Note that we only use exclusion around actually queueing the
 996   new frame, not around filling tp->setup_frame.  This is non-deterministic
 997   when re-entered but still correct. */
 998
 999static void build_setup_frame_hash(u16 *setup_frm, struct net_device *dev)
1000{
1001	struct tulip_private *tp = netdev_priv(dev);
1002	u16 hash_table[32];
1003	struct netdev_hw_addr *ha;
1004	const u16 *eaddrs;
1005	int i;
 
1006
1007	memset(hash_table, 0, sizeof(hash_table));
1008	__set_bit_le(255, hash_table);			/* Broadcast entry */
1009	/* This should work on big-endian machines as well. */
1010	netdev_for_each_mc_addr(ha, dev) {
1011		int index = ether_crc_le(ETH_ALEN, ha->addr) & 0x1ff;
1012
1013		__set_bit_le(index, hash_table);
1014	}
1015	for (i = 0; i < 32; i++) {
1016		*setup_frm++ = hash_table[i];
1017		*setup_frm++ = hash_table[i];
1018	}
1019	setup_frm = &tp->setup_frame[13*6];
1020
1021	/* Fill the final entry with our physical address. */
1022	eaddrs = (const u16 *)dev->dev_addr;
1023	*setup_frm++ = eaddrs[0]; *setup_frm++ = eaddrs[0];
1024	*setup_frm++ = eaddrs[1]; *setup_frm++ = eaddrs[1];
1025	*setup_frm++ = eaddrs[2]; *setup_frm++ = eaddrs[2];
1026}
1027
1028static void build_setup_frame_perfect(u16 *setup_frm, struct net_device *dev)
1029{
1030	struct tulip_private *tp = netdev_priv(dev);
1031	struct netdev_hw_addr *ha;
1032	const u16 *eaddrs;
1033
1034	/* We have <= 14 addresses so we can use the wonderful
1035	   16 address perfect filtering of the Tulip. */
1036	netdev_for_each_mc_addr(ha, dev) {
1037		eaddrs = (u16 *) ha->addr;
1038		*setup_frm++ = *eaddrs; *setup_frm++ = *eaddrs++;
1039		*setup_frm++ = *eaddrs; *setup_frm++ = *eaddrs++;
1040		*setup_frm++ = *eaddrs; *setup_frm++ = *eaddrs++;
1041	}
1042	/* Fill the unused entries with the broadcast address. */
1043	memset(setup_frm, 0xff, (15 - netdev_mc_count(dev)) * 12);
1044	setup_frm = &tp->setup_frame[15*6];
1045
1046	/* Fill the final entry with our physical address. */
1047	eaddrs = (const u16 *)dev->dev_addr;
1048	*setup_frm++ = eaddrs[0]; *setup_frm++ = eaddrs[0];
1049	*setup_frm++ = eaddrs[1]; *setup_frm++ = eaddrs[1];
1050	*setup_frm++ = eaddrs[2]; *setup_frm++ = eaddrs[2];
1051}
1052
1053
1054static void set_rx_mode(struct net_device *dev)
1055{
1056	struct tulip_private *tp = netdev_priv(dev);
1057	void __iomem *ioaddr = tp->base_addr;
1058	int csr6;
1059
1060	csr6 = ioread32(ioaddr + CSR6) & ~0x00D5;
1061
1062	tp->csr6 &= ~0x00D5;
1063	if (dev->flags & IFF_PROMISC) {			/* Set promiscuous. */
1064		tp->csr6 |= AcceptAllMulticast | AcceptAllPhys;
1065		csr6 |= AcceptAllMulticast | AcceptAllPhys;
1066	} else if ((netdev_mc_count(dev) > 1000) ||
1067		   (dev->flags & IFF_ALLMULTI)) {
1068		/* Too many to filter well -- accept all multicasts. */
1069		tp->csr6 |= AcceptAllMulticast;
1070		csr6 |= AcceptAllMulticast;
1071	} else	if (tp->flags & MC_HASH_ONLY) {
1072		/* Some work-alikes have only a 64-entry hash filter table. */
1073		/* Should verify correctness on big-endian/__powerpc__ */
1074		struct netdev_hw_addr *ha;
1075		if (netdev_mc_count(dev) > 64) {
1076			/* Arbitrary non-effective limit. */
1077			tp->csr6 |= AcceptAllMulticast;
1078			csr6 |= AcceptAllMulticast;
1079		} else {
1080			u32 mc_filter[2] = {0, 0};		 /* Multicast hash filter */
1081			int filterbit;
1082			netdev_for_each_mc_addr(ha, dev) {
1083				if (tp->flags & COMET_MAC_ADDR)
1084					filterbit = ether_crc_le(ETH_ALEN,
1085								 ha->addr);
1086				else
1087					filterbit = ether_crc(ETH_ALEN,
1088							      ha->addr) >> 26;
1089				filterbit &= 0x3f;
1090				mc_filter[filterbit >> 5] |= 1 << (filterbit & 31);
1091				if (tulip_debug > 2)
1092					dev_info(&dev->dev,
1093						 "Added filter for %pM  %08x bit %d\n",
1094						 ha->addr,
1095						 ether_crc(ETH_ALEN, ha->addr),
1096						 filterbit);
1097			}
1098			if (mc_filter[0] == tp->mc_filter[0]  &&
1099				mc_filter[1] == tp->mc_filter[1])
1100				;				/* No change. */
1101			else if (tp->flags & IS_ASIX) {
1102				iowrite32(2, ioaddr + CSR13);
1103				iowrite32(mc_filter[0], ioaddr + CSR14);
1104				iowrite32(3, ioaddr + CSR13);
1105				iowrite32(mc_filter[1], ioaddr + CSR14);
1106			} else if (tp->flags & COMET_MAC_ADDR) {
1107				iowrite32(mc_filter[0], ioaddr + CSR27);
1108				iowrite32(mc_filter[1], ioaddr + CSR28);
1109			}
1110			tp->mc_filter[0] = mc_filter[0];
1111			tp->mc_filter[1] = mc_filter[1];
1112		}
1113	} else {
1114		unsigned long flags;
1115		u32 tx_flags = 0x08000000 | 192;
1116
1117		/* Note that only the low-address shortword of setup_frame is valid!
1118		   The values are doubled for big-endian architectures. */
1119		if (netdev_mc_count(dev) > 14) {
1120			/* Must use a multicast hash table. */
1121			build_setup_frame_hash(tp->setup_frame, dev);
1122			tx_flags = 0x08400000 | 192;
1123		} else {
1124			build_setup_frame_perfect(tp->setup_frame, dev);
1125		}
1126
1127		spin_lock_irqsave(&tp->lock, flags);
1128
1129		if (tp->cur_tx - tp->dirty_tx > TX_RING_SIZE - 2) {
1130			/* Same setup recently queued, we need not add it. */
1131		} else {
1132			unsigned int entry;
1133			int dummy = -1;
1134
1135			/* Now add this frame to the Tx list. */
1136
1137			entry = tp->cur_tx++ % TX_RING_SIZE;
1138
1139			if (entry != 0) {
1140				/* Avoid a chip errata by prefixing a dummy entry. */
1141				tp->tx_buffers[entry].skb = NULL;
1142				tp->tx_buffers[entry].mapping = 0;
1143				tp->tx_ring[entry].length =
1144					(entry == TX_RING_SIZE-1) ? cpu_to_le32(DESC_RING_WRAP) : 0;
1145				tp->tx_ring[entry].buffer1 = 0;
1146				/* Must set DescOwned later to avoid race with chip */
1147				dummy = entry;
1148				entry = tp->cur_tx++ % TX_RING_SIZE;
1149
1150			}
1151
1152			tp->tx_buffers[entry].skb = NULL;
1153			tp->tx_buffers[entry].mapping =
1154				dma_map_single(&tp->pdev->dev,
1155					       tp->setup_frame,
1156					       sizeof(tp->setup_frame),
1157					       DMA_TO_DEVICE);
1158			/* Put the setup frame on the Tx list. */
1159			if (entry == TX_RING_SIZE-1)
1160				tx_flags |= DESC_RING_WRAP;		/* Wrap ring. */
1161			tp->tx_ring[entry].length = cpu_to_le32(tx_flags);
1162			tp->tx_ring[entry].buffer1 =
1163				cpu_to_le32(tp->tx_buffers[entry].mapping);
1164			tp->tx_ring[entry].status = cpu_to_le32(DescOwned);
1165			if (dummy >= 0)
1166				tp->tx_ring[dummy].status = cpu_to_le32(DescOwned);
1167			if (tp->cur_tx - tp->dirty_tx >= TX_RING_SIZE - 2)
1168				netif_stop_queue(dev);
1169
1170			/* Trigger an immediate transmit demand. */
1171			iowrite32(0, ioaddr + CSR1);
1172		}
1173
1174		spin_unlock_irqrestore(&tp->lock, flags);
1175	}
1176
1177	iowrite32(csr6, ioaddr + CSR6);
1178}
1179
1180#ifdef CONFIG_TULIP_MWI
1181static void tulip_mwi_config(struct pci_dev *pdev, struct net_device *dev)
1182{
1183	struct tulip_private *tp = netdev_priv(dev);
1184	u8 cache;
1185	u16 pci_command;
1186	u32 csr0;
1187
1188	if (tulip_debug > 3)
1189		netdev_dbg(dev, "tulip_mwi_config()\n");
1190
1191	tp->csr0 = csr0 = 0;
1192
1193	/* if we have any cache line size at all, we can do MRM and MWI */
1194	csr0 |= MRM | MWI;
1195
1196	/* Enable MWI in the standard PCI command bit.
1197	 * Check for the case where MWI is desired but not available
1198	 */
1199	pci_try_set_mwi(pdev);
1200
1201	/* read result from hardware (in case bit refused to enable) */
1202	pci_read_config_word(pdev, PCI_COMMAND, &pci_command);
1203	if ((csr0 & MWI) && (!(pci_command & PCI_COMMAND_INVALIDATE)))
1204		csr0 &= ~MWI;
1205
1206	/* if cache line size hardwired to zero, no MWI */
1207	pci_read_config_byte(pdev, PCI_CACHE_LINE_SIZE, &cache);
1208	if ((csr0 & MWI) && (cache == 0)) {
1209		csr0 &= ~MWI;
1210		pci_clear_mwi(pdev);
1211	}
1212
1213	/* assign per-cacheline-size cache alignment and
1214	 * burst length values
1215	 */
1216	switch (cache) {
1217	case 8:
1218		csr0 |= MRL | (1 << CALShift) | (16 << BurstLenShift);
1219		break;
1220	case 16:
1221		csr0 |= MRL | (2 << CALShift) | (16 << BurstLenShift);
1222		break;
1223	case 32:
1224		csr0 |= MRL | (3 << CALShift) | (32 << BurstLenShift);
1225		break;
1226	default:
1227		cache = 0;
1228		break;
1229	}
1230
1231	/* if we have a good cache line size, we by now have a good
1232	 * csr0, so save it and exit
1233	 */
1234	if (cache)
1235		goto out;
1236
1237	/* we don't have a good csr0 or cache line size, disable MWI */
1238	if (csr0 & MWI) {
1239		pci_clear_mwi(pdev);
1240		csr0 &= ~MWI;
1241	}
1242
1243	/* sane defaults for burst length and cache alignment
1244	 * originally from de4x5 driver
1245	 */
1246	csr0 |= (8 << BurstLenShift) | (1 << CALShift);
1247
1248out:
1249	tp->csr0 = csr0;
1250	if (tulip_debug > 2)
1251		netdev_dbg(dev, "MWI config cacheline=%d, csr0=%08x\n",
1252			   cache, csr0);
1253}
1254#endif
1255
1256/*
1257 *	Chips that have the MRM/reserved bit quirk and the burst quirk. That
1258 *	is the DM910X and the on chip ULi devices
1259 */
1260
1261static int tulip_uli_dm_quirk(struct pci_dev *pdev)
1262{
1263	if (pdev->vendor == 0x1282 && pdev->device == 0x9102)
1264		return 1;
1265	return 0;
1266}
1267
1268static const struct net_device_ops tulip_netdev_ops = {
1269	.ndo_open		= tulip_open,
1270	.ndo_start_xmit		= tulip_start_xmit,
1271	.ndo_tx_timeout		= tulip_tx_timeout,
1272	.ndo_stop		= tulip_close,
1273	.ndo_get_stats		= tulip_get_stats,
1274	.ndo_eth_ioctl		= private_ioctl,
1275	.ndo_set_rx_mode	= set_rx_mode,
 
1276	.ndo_set_mac_address	= eth_mac_addr,
1277	.ndo_validate_addr	= eth_validate_addr,
1278#ifdef CONFIG_NET_POLL_CONTROLLER
1279	.ndo_poll_controller	 = poll_tulip,
1280#endif
1281};
1282
1283static const struct pci_device_id early_486_chipsets[] = {
1284	{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82424) },
1285	{ PCI_DEVICE(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_496) },
1286	{ },
1287};
1288
1289static int tulip_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
1290{
1291	struct tulip_private *tp;
1292	/* See note below on the multiport cards. */
1293	static unsigned char last_phys_addr[ETH_ALEN] = {
1294		0x00, 'L', 'i', 'n', 'u', 'x'
1295	};
1296#if defined(__i386__) || defined(__x86_64__)	/* Patch up x86 BIOS bug. */
1297	static int last_irq;
1298#endif
1299	int i, irq;
1300	unsigned short sum;
1301	unsigned char *ee_data;
1302	struct net_device *dev;
1303	void __iomem *ioaddr;
1304	static int board_idx = -1;
1305	int chip_idx = ent->driver_data;
1306	const char *chip_name = tulip_tbl[chip_idx].chip_name;
1307	unsigned int eeprom_missing = 0;
1308	u8 addr[ETH_ALEN] __aligned(2);
1309	unsigned int force_csr0 = 0;
1310
 
 
 
 
 
1311	board_idx++;
1312
1313	/*
1314	 *	Lan media wire a tulip chip to a wan interface. Needs a very
1315	 *	different driver (lmc driver)
1316	 */
1317
1318        if (pdev->subsystem_vendor == PCI_VENDOR_ID_LMC) {
1319		pr_err("skipping LMC card\n");
1320		return -ENODEV;
1321	} else if (pdev->subsystem_vendor == PCI_VENDOR_ID_SBE &&
1322		   (pdev->subsystem_device == PCI_SUBDEVICE_ID_SBE_T3E3 ||
1323		    pdev->subsystem_device == PCI_SUBDEVICE_ID_SBE_2T3E3_P0 ||
1324		    pdev->subsystem_device == PCI_SUBDEVICE_ID_SBE_2T3E3_P1)) {
1325		pr_err("skipping SBE T3E3 port\n");
1326		return -ENODEV;
1327	}
1328
1329	/*
1330	 *	DM910x chips should be handled by the dmfe driver, except
1331	 *	on-board chips on SPARC systems.  Also, early DM9100s need
1332	 *	software CRC which only the dmfe driver supports.
1333	 */
1334
1335#ifdef CONFIG_TULIP_DM910X
1336	if (chip_idx == DM910X) {
1337		struct device_node *dp;
1338
1339		if (pdev->vendor == 0x1282 && pdev->device == 0x9100 &&
1340		    pdev->revision < 0x30) {
1341			pr_info("skipping early DM9100 with Crc bug (use dmfe)\n");
1342			return -ENODEV;
1343		}
1344
1345		dp = pci_device_to_OF_node(pdev);
1346		if (!(dp && of_get_property(dp, "local-mac-address", NULL))) {
1347			pr_info("skipping DM910x expansion card (use dmfe)\n");
1348			return -ENODEV;
1349		}
1350	}
1351#endif
1352
1353	/*
1354	 *	Looks for early PCI chipsets where people report hangs
1355	 *	without the workarounds being on.
1356	 */
1357
1358	/* 1. Intel Saturn. Switch to 8 long words burst, 8 long word cache
1359	      aligned.  Aries might need this too. The Saturn errata are not
1360	      pretty reading but thankfully it's an old 486 chipset.
1361
1362	   2. The dreaded SiS496 486 chipset. Same workaround as Intel
1363	      Saturn.
1364	*/
1365
1366	if (pci_dev_present(early_486_chipsets)) {
1367		csr0 = MRL | MRM | (8 << BurstLenShift) | (1 << CALShift);
1368		force_csr0 = 1;
1369	}
1370
1371	/* bugfix: the ASIX must have a burst limit or horrible things happen. */
1372	if (chip_idx == AX88140) {
1373		if ((csr0 & 0x3f00) == 0)
1374			csr0 |= 0x2000;
1375	}
1376
1377	/* PNIC doesn't have MWI/MRL/MRM... */
1378	if (chip_idx == LC82C168)
1379		csr0 &= ~0xfff10000; /* zero reserved bits 31:20, 16 */
1380
1381	/* DM9102A has troubles with MRM & clear reserved bits 24:22, 20, 16, 7:1 */
1382	if (tulip_uli_dm_quirk(pdev)) {
1383		csr0 &= ~0x01f100ff;
1384#if defined(CONFIG_SPARC)
1385                csr0 = (csr0 & ~0xff00) | 0xe000;
1386#endif
1387	}
1388	/*
1389	 *	And back to business
1390	 */
1391
1392	i = pcim_enable_device(pdev);
1393	if (i) {
1394		pr_err("Cannot enable tulip board #%d, aborting\n", board_idx);
1395		return i;
1396	}
1397
1398	irq = pdev->irq;
1399
1400	/* alloc_etherdev ensures aligned and zeroed private structures */
1401	dev = devm_alloc_etherdev(&pdev->dev, sizeof(*tp));
1402	if (!dev)
1403		return -ENOMEM;
1404
1405	SET_NETDEV_DEV(dev, &pdev->dev);
1406	if (pci_resource_len (pdev, 0) < tulip_tbl[chip_idx].io_size) {
1407		pr_err("%s: I/O region (0x%llx@0x%llx) too small, aborting\n",
1408		       pci_name(pdev),
1409		       (unsigned long long)pci_resource_len (pdev, 0),
1410		       (unsigned long long)pci_resource_start (pdev, 0));
1411		return -ENODEV;
1412	}
1413
1414	/* grab all resources from both PIO and MMIO regions, as we
1415	 * don't want anyone else messing around with our hardware */
1416	if (pci_request_regions(pdev, DRV_NAME))
1417		return -ENODEV;
1418
1419	ioaddr = pcim_iomap(pdev, TULIP_BAR, tulip_tbl[chip_idx].io_size);
1420
1421	if (!ioaddr)
1422		return -ENODEV;
1423
1424	/*
1425	 * initialize private data structure 'tp'
1426	 * it is zeroed and aligned in alloc_etherdev
1427	 */
1428	tp = netdev_priv(dev);
1429	tp->dev = dev;
1430
1431	tp->rx_ring = dmam_alloc_coherent(&pdev->dev,
1432					  sizeof(struct tulip_rx_desc) * RX_RING_SIZE +
1433					  sizeof(struct tulip_tx_desc) * TX_RING_SIZE,
1434					  &tp->rx_ring_dma, GFP_KERNEL);
1435	if (!tp->rx_ring)
1436		return -ENODEV;
1437	tp->tx_ring = (struct tulip_tx_desc *)(tp->rx_ring + RX_RING_SIZE);
1438	tp->tx_ring_dma = tp->rx_ring_dma + sizeof(struct tulip_rx_desc) * RX_RING_SIZE;
1439
1440	tp->chip_id = chip_idx;
1441	tp->flags = tulip_tbl[chip_idx].flags;
1442
1443	tp->wolinfo.supported = 0;
1444	tp->wolinfo.wolopts = 0;
1445	/* COMET: Enable power management only for AN983B */
1446	if (chip_idx == COMET ) {
1447		u32 sig;
1448		pci_read_config_dword (pdev, 0x80, &sig);
1449		if (sig == 0x09811317) {
1450			tp->flags |= COMET_PM;
1451			tp->wolinfo.supported = WAKE_PHY | WAKE_MAGIC;
1452			pr_info("%s: Enabled WOL support for AN983B\n",
1453				__func__);
1454		}
1455	}
1456	tp->pdev = pdev;
1457	tp->base_addr = ioaddr;
1458	tp->revision = pdev->revision;
1459	tp->csr0 = csr0;
1460	spin_lock_init(&tp->lock);
1461	spin_lock_init(&tp->mii_lock);
1462	timer_setup(&tp->timer, tulip_tbl[tp->chip_id].media_timer, 0);
 
1463
1464	INIT_WORK(&tp->media_work, tulip_tbl[tp->chip_id].media_task);
1465
1466#ifdef CONFIG_TULIP_MWI
1467	if (!force_csr0 && (tp->flags & HAS_PCI_MWI))
1468		tulip_mwi_config (pdev, dev);
1469#endif
1470
1471	/* Stop the chip's Tx and Rx processes. */
1472	tulip_stop_rxtx(tp);
1473
1474	pci_set_master(pdev);
1475
1476#ifdef CONFIG_GSC
1477	if (pdev->subsystem_vendor == PCI_VENDOR_ID_HP) {
1478		switch (pdev->subsystem_device) {
1479		default:
1480			break;
1481		case 0x1061:
1482		case 0x1062:
1483		case 0x1063:
1484		case 0x1098:
1485		case 0x1099:
1486		case 0x10EE:
1487			tp->flags |= HAS_SWAPPED_SEEPROM | NEEDS_FAKE_MEDIA_TABLE;
1488			chip_name = "GSC DS21140 Tulip";
1489		}
1490	}
1491#endif
1492
1493	/* Clear the missed-packet counter. */
1494	ioread32(ioaddr + CSR8);
1495
1496	/* The station address ROM is read byte serially.  The register must
1497	   be polled, waiting for the value to be read bit serially from the
1498	   EEPROM.
1499	   */
1500	ee_data = tp->eeprom;
1501	memset(ee_data, 0, sizeof(tp->eeprom));
1502	sum = 0;
1503	if (chip_idx == LC82C168) {
1504		for (i = 0; i < 3; i++) {
1505			int value, boguscnt = 100000;
1506			iowrite32(0x600 | i, ioaddr + 0x98);
1507			do {
1508				value = ioread32(ioaddr + CSR9);
1509			} while (value < 0  && --boguscnt > 0);
1510			put_unaligned_le16(value, ((__le16 *)addr) + i);
1511			sum += value & 0xffff;
1512		}
1513		eth_hw_addr_set(dev, addr);
1514	} else if (chip_idx == COMET) {
1515		/* No need to read the EEPROM. */
1516		put_unaligned_le32(ioread32(ioaddr + 0xA4), addr);
1517		put_unaligned_le16(ioread32(ioaddr + 0xA8), addr + 4);
1518		eth_hw_addr_set(dev, addr);
1519		for (i = 0; i < 6; i ++)
1520			sum += dev->dev_addr[i];
1521	} else {
1522		/* A serial EEPROM interface, we read now and sort it out later. */
1523		int sa_offset = 0;
1524		int ee_addr_size = tulip_read_eeprom(dev, 0xff, 8) & 0x40000 ? 8 : 6;
1525		int ee_max_addr = ((1 << ee_addr_size) - 1) * sizeof(u16);
1526
1527		if (ee_max_addr > sizeof(tp->eeprom))
1528			ee_max_addr = sizeof(tp->eeprom);
1529
1530		for (i = 0; i < ee_max_addr ; i += sizeof(u16)) {
1531			u16 data = tulip_read_eeprom(dev, i/2, ee_addr_size);
1532			ee_data[i] = data & 0xff;
1533			ee_data[i + 1] = data >> 8;
1534		}
1535
1536		/* DEC now has a specification (see Notes) but early board makers
1537		   just put the address in the first EEPROM locations. */
1538		/* This does  memcmp(ee_data, ee_data+16, 8) */
1539		for (i = 0; i < 8; i ++)
1540			if (ee_data[i] != ee_data[16+i])
1541				sa_offset = 20;
1542		if (chip_idx == CONEXANT) {
1543			/* Check that the tuple type and length is correct. */
1544			if (ee_data[0x198] == 0x04  &&  ee_data[0x199] == 6)
1545				sa_offset = 0x19A;
1546		} else if (ee_data[0] == 0xff  &&  ee_data[1] == 0xff &&
1547				   ee_data[2] == 0) {
1548			sa_offset = 2;		/* Grrr, damn Matrox boards. */
 
1549		}
1550#ifdef CONFIG_MIPS_COBALT
1551               if ((pdev->bus->number == 0) &&
1552                   ((PCI_SLOT(pdev->devfn) == 7) ||
1553                    (PCI_SLOT(pdev->devfn) == 12))) {
1554                       /* Cobalt MAC address in first EEPROM locations. */
1555                       sa_offset = 0;
1556		       /* Ensure our media table fixup get's applied */
1557		       memcpy(ee_data + 16, ee_data, 8);
1558               }
1559#endif
1560#ifdef CONFIG_GSC
1561		/* Check to see if we have a broken srom */
1562		if (ee_data[0] == 0x61 && ee_data[1] == 0x10) {
1563			/* pci_vendor_id and subsystem_id are swapped */
1564			ee_data[0] = ee_data[2];
1565			ee_data[1] = ee_data[3];
1566			ee_data[2] = 0x61;
1567			ee_data[3] = 0x10;
1568
1569			/* HSC-PCI boards need to be byte-swaped and shifted
1570			 * up 1 word.  This shift needs to happen at the end
1571			 * of the MAC first because of the 2 byte overlap.
1572			 */
1573			for (i = 4; i >= 0; i -= 2) {
1574				ee_data[17 + i + 3] = ee_data[17 + i];
1575				ee_data[16 + i + 5] = ee_data[16 + i];
1576			}
1577		}
1578#endif
1579
1580		for (i = 0; i < 6; i ++) {
1581			addr[i] = ee_data[i + sa_offset];
1582			sum += ee_data[i + sa_offset];
1583		}
1584		eth_hw_addr_set(dev, addr);
1585	}
1586	/* Lite-On boards have the address byte-swapped. */
1587	if ((dev->dev_addr[0] == 0xA0 ||
1588	     dev->dev_addr[0] == 0xC0 ||
1589	     dev->dev_addr[0] == 0x02) &&
1590	    dev->dev_addr[1] == 0x00) {
1591		for (i = 0; i < 6; i+=2) {
1592			addr[i] = dev->dev_addr[i+1];
1593			addr[i+1] = dev->dev_addr[i];
 
1594		}
1595		eth_hw_addr_set(dev, addr);
1596	}
1597
1598	/* On the Zynx 315 Etherarray and other multiport boards only the
1599	   first Tulip has an EEPROM.
1600	   On Sparc systems the mac address is held in the OBP property
1601	   "local-mac-address".
1602	   The addresses of the subsequent ports are derived from the first.
1603	   Many PCI BIOSes also incorrectly report the IRQ line, so we correct
1604	   that here as well. */
1605	if (sum == 0  || sum == 6*0xff) {
1606#if defined(CONFIG_SPARC)
1607		struct device_node *dp = pci_device_to_OF_node(pdev);
1608		const unsigned char *addr2;
1609		int len;
1610#endif
1611		eeprom_missing = 1;
1612		for (i = 0; i < 5; i++)
1613			addr[i] = last_phys_addr[i];
1614		addr[i] = last_phys_addr[i] + 1;
1615		eth_hw_addr_set(dev, addr);
1616#if defined(CONFIG_SPARC)
1617		addr2 = of_get_property(dp, "local-mac-address", &len);
1618		if (addr2 && len == ETH_ALEN)
1619			eth_hw_addr_set(dev, addr2);
1620#endif
1621#if defined(__i386__) || defined(__x86_64__)	/* Patch up x86 BIOS bug. */
1622		if (last_irq)
1623			irq = last_irq;
1624#endif
1625	}
1626
1627	for (i = 0; i < 6; i++)
1628		last_phys_addr[i] = dev->dev_addr[i];
1629#if defined(__i386__) || defined(__x86_64__)	/* Patch up x86 BIOS bug. */
1630	last_irq = irq;
1631#endif
1632
1633	/* The lower four bits are the media type. */
1634	if (board_idx >= 0  &&  board_idx < MAX_UNITS) {
1635		if (options[board_idx] & MEDIA_MASK)
1636			tp->default_port = options[board_idx] & MEDIA_MASK;
1637		if ((options[board_idx] & FullDuplex) || full_duplex[board_idx] > 0)
1638			tp->full_duplex = 1;
1639		if (mtu[board_idx] > 0)
1640			dev->mtu = mtu[board_idx];
1641	}
1642	if (dev->mem_start & MEDIA_MASK)
1643		tp->default_port = dev->mem_start & MEDIA_MASK;
1644	if (tp->default_port) {
1645		pr_info(DRV_NAME "%d: Transceiver selection forced to %s\n",
1646			board_idx, medianame[tp->default_port & MEDIA_MASK]);
1647		tp->medialock = 1;
1648		if (tulip_media_cap[tp->default_port] & MediaAlwaysFD)
1649			tp->full_duplex = 1;
1650	}
1651	if (tp->full_duplex)
1652		tp->full_duplex_lock = 1;
1653
1654	if (tulip_media_cap[tp->default_port] & MediaIsMII) {
1655		static const u16 media2advert[] = {
1656			0x20, 0x40, 0x03e0, 0x60, 0x80, 0x100, 0x200
1657		};
1658		tp->mii_advertise = media2advert[tp->default_port - 9];
1659		tp->mii_advertise |= (tp->flags & HAS_8023X); /* Matching bits! */
1660	}
1661
1662	if (tp->flags & HAS_MEDIA_TABLE) {
1663		sprintf(dev->name, DRV_NAME "%d", board_idx);	/* hack */
1664		tulip_parse_eeprom(dev);
1665		strcpy(dev->name, "eth%d");			/* un-hack */
1666	}
1667
1668	if ((tp->flags & ALWAYS_CHECK_MII) ||
1669		(tp->mtable  &&  tp->mtable->has_mii) ||
1670		( ! tp->mtable  &&  (tp->flags & HAS_MII))) {
1671		if (tp->mtable  &&  tp->mtable->has_mii) {
1672			for (i = 0; i < tp->mtable->leafcount; i++)
1673				if (tp->mtable->mleaf[i].media == 11) {
1674					tp->cur_index = i;
1675					tp->saved_if_port = dev->if_port;
1676					tulip_select_media(dev, 2);
1677					dev->if_port = tp->saved_if_port;
1678					break;
1679				}
1680		}
1681
1682		/* Find the connected MII xcvrs.
1683		   Doing this in open() would allow detecting external xcvrs
1684		   later, but takes much time. */
1685		tulip_find_mii (dev, board_idx);
1686	}
1687
1688	/* The Tulip-specific entries in the device structure. */
1689	dev->netdev_ops = &tulip_netdev_ops;
1690	dev->watchdog_timeo = TX_TIMEOUT;
1691#ifdef CONFIG_TULIP_NAPI
1692	netif_napi_add_weight(dev, &tp->napi, tulip_poll, 16);
1693#endif
1694	dev->ethtool_ops = &ops;
1695
1696	i = register_netdev(dev);
1697	if (i)
1698		return i;
1699
1700	pci_set_drvdata(pdev, dev);
1701
1702	dev_info(&dev->dev,
1703#ifdef CONFIG_TULIP_MMIO
1704		 "%s rev %d at MMIO %#llx,%s %pM, IRQ %d\n",
1705#else
1706		 "%s rev %d at Port %#llx,%s %pM, IRQ %d\n",
1707#endif
1708		 chip_name, pdev->revision,
1709		 (unsigned long long)pci_resource_start(pdev, TULIP_BAR),
1710		 eeprom_missing ? " EEPROM not present," : "",
1711		 dev->dev_addr, irq);
1712
1713        if (tp->chip_id == PNIC2)
1714		tp->link_change = pnic2_lnk_change;
1715	else if (tp->flags & HAS_NWAY)
1716		tp->link_change = t21142_lnk_change;
1717	else if (tp->flags & HAS_PNICNWAY)
1718		tp->link_change = pnic_lnk_change;
1719
1720	/* Reset the xcvr interface and turn on heartbeat. */
1721	switch (chip_idx) {
1722	case DC21140:
1723	case DM910X:
1724	default:
1725		if (tp->mtable)
1726			iowrite32(tp->mtable->csr12dir | 0x100, ioaddr + CSR12);
1727		break;
1728	case DC21142:
1729		if (tp->mii_cnt  ||  tulip_media_cap[dev->if_port] & MediaIsMII) {
1730			iowrite32(csr6_mask_defstate, ioaddr + CSR6);
1731			iowrite32(0x0000, ioaddr + CSR13);
1732			iowrite32(0x0000, ioaddr + CSR14);
1733			iowrite32(csr6_mask_hdcap, ioaddr + CSR6);
1734		} else
1735			t21142_start_nway(dev);
1736		break;
1737	case PNIC2:
1738	        /* just do a reset for sanity sake */
1739		iowrite32(0x0000, ioaddr + CSR13);
1740		iowrite32(0x0000, ioaddr + CSR14);
1741		break;
1742	case LC82C168:
1743		if ( ! tp->mii_cnt) {
1744			tp->nway = 1;
1745			tp->nwayset = 0;
1746			iowrite32(csr6_ttm | csr6_ca, ioaddr + CSR6);
1747			iowrite32(0x30, ioaddr + CSR12);
1748			iowrite32(0x0001F078, ioaddr + CSR6);
1749			iowrite32(0x0201F078, ioaddr + CSR6); /* Turn on autonegotiation. */
1750		}
1751		break;
1752	case MX98713:
1753	case COMPEX9881:
1754		iowrite32(0x00000000, ioaddr + CSR6);
1755		iowrite32(0x000711C0, ioaddr + CSR14); /* Turn on NWay. */
1756		iowrite32(0x00000001, ioaddr + CSR13);
1757		break;
1758	case MX98715:
1759	case MX98725:
1760		iowrite32(0x01a80000, ioaddr + CSR6);
1761		iowrite32(0xFFFFFFFF, ioaddr + CSR14);
1762		iowrite32(0x00001000, ioaddr + CSR12);
1763		break;
1764	case COMET:
1765		/* No initialization necessary. */
1766		break;
1767	}
1768
1769	/* put the chip in snooze mode until opened */
1770	tulip_set_power_state (tp, 0, 1);
1771
1772	return 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1773}
1774
1775
1776/* set the registers according to the given wolopts */
1777static void tulip_set_wolopts (struct pci_dev *pdev, u32 wolopts)
1778{
1779	struct net_device *dev = pci_get_drvdata(pdev);
1780	struct tulip_private *tp = netdev_priv(dev);
1781	void __iomem *ioaddr = tp->base_addr;
1782
1783	if (tp->flags & COMET_PM) {
 
1784		unsigned int tmp;
1785
1786		tmp = ioread32(ioaddr + CSR18);
1787		tmp &= ~(comet_csr18_pmes_sticky | comet_csr18_apm_mode | comet_csr18_d3a);
1788		tmp |= comet_csr18_pm_mode;
1789		iowrite32(tmp, ioaddr + CSR18);
1790
1791		/* Set the Wake-up Control/Status Register to the given WOL options*/
1792		tmp = ioread32(ioaddr + CSR13);
1793		tmp &= ~(comet_csr13_linkoffe | comet_csr13_linkone | comet_csr13_wfre | comet_csr13_lsce | comet_csr13_mpre);
1794		if (wolopts & WAKE_MAGIC)
1795			tmp |= comet_csr13_mpre;
1796		if (wolopts & WAKE_PHY)
1797			tmp |= comet_csr13_linkoffe | comet_csr13_linkone | comet_csr13_lsce;
1798		/* Clear the event flags */
1799		tmp |= comet_csr13_wfr | comet_csr13_mpr | comet_csr13_lsc;
1800		iowrite32(tmp, ioaddr + CSR13);
1801	}
1802}
1803
1804static int __maybe_unused tulip_suspend(struct device *dev_d)
 
 
 
1805{
1806	struct net_device *dev = dev_get_drvdata(dev_d);
 
1807	struct tulip_private *tp = netdev_priv(dev);
1808
1809	if (!dev)
1810		return -EINVAL;
1811
1812	if (!netif_running(dev))
1813		goto save_state;
1814
1815	tulip_down(dev);
1816
1817	netif_device_detach(dev);
1818	/* FIXME: it needlessly adds an error path. */
1819	free_irq(tp->pdev->irq, dev);
1820
1821save_state:
1822	tulip_set_wolopts(to_pci_dev(dev_d), tp->wolinfo.wolopts);
1823	device_set_wakeup_enable(dev_d, !!tp->wolinfo.wolopts);
 
 
 
 
 
 
 
 
 
 
1824
1825	return 0;
1826}
1827
1828static int __maybe_unused tulip_resume(struct device *dev_d)
 
1829{
1830	struct pci_dev *pdev = to_pci_dev(dev_d);
1831	struct net_device *dev = dev_get_drvdata(dev_d);
1832	struct tulip_private *tp = netdev_priv(dev);
1833	void __iomem *ioaddr = tp->base_addr;
 
1834	unsigned int tmp;
1835	int retval = 0;
1836
1837	if (!dev)
1838		return -EINVAL;
1839
 
 
 
1840	if (!netif_running(dev))
1841		return 0;
1842
 
 
 
 
 
1843	retval = request_irq(pdev->irq, tulip_interrupt, IRQF_SHARED,
1844			     dev->name, dev);
1845	if (retval) {
1846		pr_err("request_irq failed in resume\n");
1847		return retval;
1848	}
1849
1850	if (tp->flags & COMET_PM) {
1851		device_set_wakeup_enable(dev_d, 0);
 
1852
1853		/* Clear the PMES flag */
1854		tmp = ioread32(ioaddr + CSR20);
1855		tmp |= comet_csr20_pmes;
1856		iowrite32(tmp, ioaddr + CSR20);
1857
1858		/* Disable all wake-up events */
1859		tulip_set_wolopts(pdev, 0);
1860	}
1861	netif_device_attach(dev);
1862
1863	if (netif_running(dev))
1864		tulip_up(dev);
1865
1866	return 0;
1867}
1868
 
 
 
1869static void tulip_remove_one(struct pci_dev *pdev)
1870{
1871	struct net_device *dev = pci_get_drvdata (pdev);
 
1872
1873	if (!dev)
1874		return;
1875
 
1876	unregister_netdev(dev);
 
 
 
 
 
 
 
 
 
 
 
1877}
1878
1879#ifdef CONFIG_NET_POLL_CONTROLLER
1880/*
1881 * Polling 'interrupt' - used by things like netconsole to send skbs
1882 * without having to re-enable interrupts. It's not called while
1883 * the interrupt routine is executing.
1884 */
1885
1886static void poll_tulip (struct net_device *dev)
1887{
1888	struct tulip_private *tp = netdev_priv(dev);
1889	const int irq = tp->pdev->irq;
1890
1891	/* disable_irq here is not very nice, but with the lockless
1892	   interrupt handler we have no other choice. */
1893	disable_irq(irq);
1894	tulip_interrupt (irq, dev);
1895	enable_irq(irq);
1896}
1897#endif
1898
1899static SIMPLE_DEV_PM_OPS(tulip_pm_ops, tulip_suspend, tulip_resume);
1900
1901static struct pci_driver tulip_driver = {
1902	.name		= DRV_NAME,
1903	.id_table	= tulip_pci_tbl,
1904	.probe		= tulip_init_one,
1905	.remove		= tulip_remove_one,
1906	.driver.pm	= &tulip_pm_ops,
 
 
 
1907};
1908
1909
1910static int __init tulip_init (void)
1911{
 
 
 
 
1912	if (!csr0) {
1913		pr_warn("tulip: unknown CPU architecture, using default csr0\n");
1914		/* default to 8 longword cache line alignment */
1915		csr0 = 0x00A00000 | 0x4800;
1916	}
1917
1918	/* copy module parms into globals */
1919	tulip_rx_copybreak = rx_copybreak;
1920	tulip_max_interrupt_work = max_interrupt_work;
1921
1922	/* probe for and init boards */
1923	return pci_register_driver(&tulip_driver);
1924}
1925
1926
1927static void __exit tulip_cleanup (void)
1928{
1929	pci_unregister_driver (&tulip_driver);
1930}
1931
1932
1933module_init(tulip_init);
1934module_exit(tulip_cleanup);
v4.6
   1/*	tulip_core.c: A DEC 21x4x-family ethernet driver for Linux.
   2
   3	Copyright 2000,2001  The Linux Kernel Team
   4	Written/copyright 1994-2001 by Donald Becker.
   5
   6	This software may be used and distributed according to the terms
   7	of the GNU General Public License, incorporated herein by reference.
   8
   9	Please submit bugs to http://bugzilla.kernel.org/ .
  10*/
  11
  12#define pr_fmt(fmt) "tulip: " fmt
  13
  14#define DRV_NAME	"tulip"
  15#ifdef CONFIG_TULIP_NAPI
  16#define DRV_VERSION    "1.1.15-NAPI" /* Keep at least for test */
  17#else
  18#define DRV_VERSION	"1.1.15"
  19#endif
  20#define DRV_RELDATE	"Feb 27, 2007"
  21
  22
  23#include <linux/module.h>
  24#include <linux/pci.h>
  25#include <linux/slab.h>
  26#include "tulip.h"
  27#include <linux/init.h>
  28#include <linux/interrupt.h>
  29#include <linux/etherdevice.h>
  30#include <linux/delay.h>
  31#include <linux/mii.h>
  32#include <linux/crc32.h>
  33#include <asm/unaligned.h>
  34#include <asm/uaccess.h>
  35
  36#ifdef CONFIG_SPARC
  37#include <asm/prom.h>
  38#endif
  39
  40static char version[] =
  41	"Linux Tulip driver version " DRV_VERSION " (" DRV_RELDATE ")\n";
  42
  43/* A few user-configurable values. */
  44
  45/* Maximum events (Rx packets, etc.) to handle at each interrupt. */
  46static unsigned int max_interrupt_work = 25;
  47
  48#define MAX_UNITS 8
  49/* Used to pass the full-duplex flag, etc. */
  50static int full_duplex[MAX_UNITS];
  51static int options[MAX_UNITS];
  52static int mtu[MAX_UNITS];			/* Jumbo MTU for interfaces. */
  53
  54/*  The possible media types that can be set in options[] are: */
  55const char * const medianame[32] = {
  56	"10baseT", "10base2", "AUI", "100baseTx",
  57	"10baseT-FDX", "100baseTx-FDX", "100baseT4", "100baseFx",
  58	"100baseFx-FDX", "MII 10baseT", "MII 10baseT-FDX", "MII",
  59	"10baseT(forced)", "MII 100baseTx", "MII 100baseTx-FDX", "MII 100baseT4",
  60	"MII 100baseFx-HDX", "MII 100baseFx-FDX", "Home-PNA 1Mbps", "Invalid-19",
  61	"","","","", "","","","",  "","","","Transceiver reset",
  62};
  63
  64/* Set the copy breakpoint for the copy-only-tiny-buffer Rx structure. */
  65#if defined(__alpha__) || defined(__arm__) || defined(__hppa__) || \
  66	defined(CONFIG_SPARC) || defined(__ia64__) || \
  67	defined(__sh__) || defined(__mips__)
  68static int rx_copybreak = 1518;
  69#else
  70static int rx_copybreak = 100;
  71#endif
  72
  73/*
  74  Set the bus performance register.
  75	Typical: Set 16 longword cache alignment, no burst limit.
  76	Cache alignment bits 15:14	     Burst length 13:8
  77		0000	No alignment  0x00000000 unlimited		0800 8 longwords
  78		4000	8  longwords		0100 1 longword		1000 16 longwords
  79		8000	16 longwords		0200 2 longwords	2000 32 longwords
  80		C000	32  longwords		0400 4 longwords
  81	Warning: many older 486 systems are broken and require setting 0x00A04800
  82	   8 longword cache alignment, 8 longword burst.
  83	ToDo: Non-Intel setting could be better.
  84*/
  85
  86#if defined(__alpha__) || defined(__ia64__)
  87static int csr0 = 0x01A00000 | 0xE000;
  88#elif defined(__i386__) || defined(__powerpc__) || defined(__x86_64__)
  89static int csr0 = 0x01A00000 | 0x8000;
  90#elif defined(CONFIG_SPARC) || defined(__hppa__)
  91/* The UltraSparc PCI controllers will disconnect at every 64-byte
  92 * crossing anyways so it makes no sense to tell Tulip to burst
  93 * any more than that.
  94 */
  95static int csr0 = 0x01A00000 | 0x9000;
  96#elif defined(__arm__) || defined(__sh__)
  97static int csr0 = 0x01A00000 | 0x4800;
  98#elif defined(__mips__)
  99static int csr0 = 0x00200000 | 0x4000;
 100#else
 101static int csr0;
 102#endif
 103
 104/* Operational parameters that usually are not changed. */
 105/* Time in jiffies before concluding the transmitter is hung. */
 106#define TX_TIMEOUT  (4*HZ)
 107
 108
 109MODULE_AUTHOR("The Linux Kernel Team");
 110MODULE_DESCRIPTION("Digital 21*4* Tulip ethernet driver");
 111MODULE_LICENSE("GPL");
 112MODULE_VERSION(DRV_VERSION);
 113module_param(tulip_debug, int, 0);
 114module_param(max_interrupt_work, int, 0);
 115module_param(rx_copybreak, int, 0);
 116module_param(csr0, int, 0);
 117module_param_array(options, int, NULL, 0);
 118module_param_array(full_duplex, int, NULL, 0);
 119
 120#ifdef TULIP_DEBUG
 121int tulip_debug = TULIP_DEBUG;
 122#else
 123int tulip_debug = 1;
 124#endif
 125
 126static void tulip_timer(unsigned long data)
 127{
 128	struct net_device *dev = (struct net_device *)data;
 129	struct tulip_private *tp = netdev_priv(dev);
 130
 131	if (netif_running(dev))
 132		schedule_work(&tp->media_work);
 133}
 134
 135/*
 136 * This table use during operation for capabilities and media timer.
 137 *
 138 * It is indexed via the values in 'enum chips'
 139 */
 140
 141struct tulip_chip_table tulip_tbl[] = {
 142  { }, /* placeholder for array, slot unused currently */
 143  { }, /* placeholder for array, slot unused currently */
 144
 145  /* DC21140 */
 146  { "Digital DS21140 Tulip", 128, 0x0001ebef,
 147	HAS_MII | HAS_MEDIA_TABLE | CSR12_IN_SROM | HAS_PCI_MWI, tulip_timer,
 148	tulip_media_task },
 149
 150  /* DC21142, DC21143 */
 151  { "Digital DS21142/43 Tulip", 128, 0x0801fbff,
 152	HAS_MII | HAS_MEDIA_TABLE | ALWAYS_CHECK_MII | HAS_ACPI | HAS_NWAY
 153	| HAS_INTR_MITIGATION | HAS_PCI_MWI, tulip_timer, t21142_media_task },
 154
 155  /* LC82C168 */
 156  { "Lite-On 82c168 PNIC", 256, 0x0001fbef,
 157	HAS_MII | HAS_PNICNWAY, pnic_timer, },
 158
 159  /* MX98713 */
 160  { "Macronix 98713 PMAC", 128, 0x0001ebef,
 161	HAS_MII | HAS_MEDIA_TABLE | CSR12_IN_SROM, mxic_timer, },
 162
 163  /* MX98715 */
 164  { "Macronix 98715 PMAC", 256, 0x0001ebef,
 165	HAS_MEDIA_TABLE, mxic_timer, },
 166
 167  /* MX98725 */
 168  { "Macronix 98725 PMAC", 256, 0x0001ebef,
 169	HAS_MEDIA_TABLE, mxic_timer, },
 170
 171  /* AX88140 */
 172  { "ASIX AX88140", 128, 0x0001fbff,
 173	HAS_MII | HAS_MEDIA_TABLE | CSR12_IN_SROM | MC_HASH_ONLY
 174	| IS_ASIX, tulip_timer, tulip_media_task },
 175
 176  /* PNIC2 */
 177  { "Lite-On PNIC-II", 256, 0x0801fbff,
 178	HAS_MII | HAS_NWAY | HAS_8023X | HAS_PCI_MWI, pnic2_timer, },
 179
 180  /* COMET */
 181  { "ADMtek Comet", 256, 0x0001abef,
 182	HAS_MII | MC_HASH_ONLY | COMET_MAC_ADDR, comet_timer, },
 183
 184  /* COMPEX9881 */
 185  { "Compex 9881 PMAC", 128, 0x0001ebef,
 186	HAS_MII | HAS_MEDIA_TABLE | CSR12_IN_SROM, mxic_timer, },
 187
 188  /* I21145 */
 189  { "Intel DS21145 Tulip", 128, 0x0801fbff,
 190	HAS_MII | HAS_MEDIA_TABLE | ALWAYS_CHECK_MII | HAS_ACPI
 191	| HAS_NWAY | HAS_PCI_MWI, tulip_timer, tulip_media_task },
 192
 193  /* DM910X */
 194#ifdef CONFIG_TULIP_DM910X
 195  { "Davicom DM9102/DM9102A", 128, 0x0001ebef,
 196	HAS_MII | HAS_MEDIA_TABLE | CSR12_IN_SROM | HAS_ACPI,
 197	tulip_timer, tulip_media_task },
 198#else
 199  { NULL },
 200#endif
 201
 202  /* RS7112 */
 203  { "Conexant LANfinity", 256, 0x0001ebef,
 204	HAS_MII | HAS_ACPI, tulip_timer, tulip_media_task },
 205
 206};
 207
 208
 209static const struct pci_device_id tulip_pci_tbl[] = {
 210	{ 0x1011, 0x0009, PCI_ANY_ID, PCI_ANY_ID, 0, 0, DC21140 },
 211	{ 0x1011, 0x0019, PCI_ANY_ID, PCI_ANY_ID, 0, 0, DC21143 },
 212	{ 0x11AD, 0x0002, PCI_ANY_ID, PCI_ANY_ID, 0, 0, LC82C168 },
 213	{ 0x10d9, 0x0512, PCI_ANY_ID, PCI_ANY_ID, 0, 0, MX98713 },
 214	{ 0x10d9, 0x0531, PCI_ANY_ID, PCI_ANY_ID, 0, 0, MX98715 },
 215/*	{ 0x10d9, 0x0531, PCI_ANY_ID, PCI_ANY_ID, 0, 0, MX98725 },*/
 216	{ 0x125B, 0x1400, PCI_ANY_ID, PCI_ANY_ID, 0, 0, AX88140 },
 217	{ 0x11AD, 0xc115, PCI_ANY_ID, PCI_ANY_ID, 0, 0, PNIC2 },
 218	{ 0x1317, 0x0981, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
 219	{ 0x1317, 0x0985, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
 220	{ 0x1317, 0x1985, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
 221	{ 0x1317, 0x9511, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
 222	{ 0x13D1, 0xAB02, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
 223	{ 0x13D1, 0xAB03, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
 224	{ 0x13D1, 0xAB08, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
 225	{ 0x104A, 0x0981, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
 226	{ 0x104A, 0x2774, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
 227	{ 0x1259, 0xa120, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
 228	{ 0x11F6, 0x9881, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMPEX9881 },
 229	{ 0x8086, 0x0039, PCI_ANY_ID, PCI_ANY_ID, 0, 0, I21145 },
 230#ifdef CONFIG_TULIP_DM910X
 231	{ 0x1282, 0x9100, PCI_ANY_ID, PCI_ANY_ID, 0, 0, DM910X },
 232	{ 0x1282, 0x9102, PCI_ANY_ID, PCI_ANY_ID, 0, 0, DM910X },
 233#endif
 234	{ 0x1113, 0x1216, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
 235	{ 0x1113, 0x1217, PCI_ANY_ID, PCI_ANY_ID, 0, 0, MX98715 },
 236	{ 0x1113, 0x9511, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
 237	{ 0x1186, 0x1541, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
 238	{ 0x1186, 0x1561, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
 239	{ 0x1186, 0x1591, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
 240	{ 0x14f1, 0x1803, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CONEXANT },
 241	{ 0x1626, 0x8410, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
 242	{ 0x1737, 0xAB09, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
 243	{ 0x1737, 0xAB08, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
 244	{ 0x17B3, 0xAB08, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
 245	{ 0x10b7, 0x9300, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET }, /* 3Com 3CSOHO100B-TX */
 246	{ 0x14ea, 0xab08, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET }, /* Planex FNW-3602-TX */
 247	{ 0x1414, 0x0001, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET }, /* Microsoft MN-120 */
 248	{ 0x1414, 0x0002, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
 249	{ } /* terminate list */
 250};
 251MODULE_DEVICE_TABLE(pci, tulip_pci_tbl);
 252
 253
 254/* A full-duplex map for media types. */
 255const char tulip_media_cap[32] =
 256{0,0,0,16,  3,19,16,24,  27,4,7,5, 0,20,23,20,  28,31,0,0, };
 257
 258static void tulip_tx_timeout(struct net_device *dev);
 259static void tulip_init_ring(struct net_device *dev);
 260static void tulip_free_ring(struct net_device *dev);
 261static netdev_tx_t tulip_start_xmit(struct sk_buff *skb,
 262					  struct net_device *dev);
 263static int tulip_open(struct net_device *dev);
 264static int tulip_close(struct net_device *dev);
 265static void tulip_up(struct net_device *dev);
 266static void tulip_down(struct net_device *dev);
 267static struct net_device_stats *tulip_get_stats(struct net_device *dev);
 268static int private_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
 269static void set_rx_mode(struct net_device *dev);
 270static void tulip_set_wolopts(struct pci_dev *pdev, u32 wolopts);
 271#ifdef CONFIG_NET_POLL_CONTROLLER
 272static void poll_tulip(struct net_device *dev);
 273#endif
 274
 275static void tulip_set_power_state (struct tulip_private *tp,
 276				   int sleep, int snooze)
 277{
 278	if (tp->flags & HAS_ACPI) {
 279		u32 tmp, newtmp;
 280		pci_read_config_dword (tp->pdev, CFDD, &tmp);
 281		newtmp = tmp & ~(CFDD_Sleep | CFDD_Snooze);
 282		if (sleep)
 283			newtmp |= CFDD_Sleep;
 284		else if (snooze)
 285			newtmp |= CFDD_Snooze;
 286		if (tmp != newtmp)
 287			pci_write_config_dword (tp->pdev, CFDD, newtmp);
 288	}
 289
 290}
 291
 292
 293static void tulip_up(struct net_device *dev)
 294{
 295	struct tulip_private *tp = netdev_priv(dev);
 296	void __iomem *ioaddr = tp->base_addr;
 297	int next_tick = 3*HZ;
 298	u32 reg;
 299	int i;
 300
 301#ifdef CONFIG_TULIP_NAPI
 302	napi_enable(&tp->napi);
 303#endif
 304
 305	/* Wake the chip from sleep/snooze mode. */
 306	tulip_set_power_state (tp, 0, 0);
 307
 308	/* Disable all WOL events */
 309	pci_enable_wake(tp->pdev, PCI_D3hot, 0);
 310	pci_enable_wake(tp->pdev, PCI_D3cold, 0);
 311	tulip_set_wolopts(tp->pdev, 0);
 312
 313	/* On some chip revs we must set the MII/SYM port before the reset!? */
 314	if (tp->mii_cnt  ||  (tp->mtable  &&  tp->mtable->has_mii))
 315		iowrite32(0x00040000, ioaddr + CSR6);
 316
 317	/* Reset the chip, holding bit 0 set at least 50 PCI cycles. */
 318	iowrite32(0x00000001, ioaddr + CSR0);
 319	pci_read_config_dword(tp->pdev, PCI_COMMAND, &reg);  /* flush write */
 320	udelay(100);
 321
 322	/* Deassert reset.
 323	   Wait the specified 50 PCI cycles after a reset by initializing
 324	   Tx and Rx queues and the address filter list. */
 325	iowrite32(tp->csr0, ioaddr + CSR0);
 326	pci_read_config_dword(tp->pdev, PCI_COMMAND, &reg);  /* flush write */
 327	udelay(100);
 328
 329	if (tulip_debug > 1)
 330		netdev_dbg(dev, "tulip_up(), irq==%d\n", tp->pdev->irq);
 331
 332	iowrite32(tp->rx_ring_dma, ioaddr + CSR3);
 333	iowrite32(tp->tx_ring_dma, ioaddr + CSR4);
 334	tp->cur_rx = tp->cur_tx = 0;
 335	tp->dirty_rx = tp->dirty_tx = 0;
 336
 337	if (tp->flags & MC_HASH_ONLY) {
 338		u32 addr_low = get_unaligned_le32(dev->dev_addr);
 339		u32 addr_high = get_unaligned_le16(dev->dev_addr + 4);
 340		if (tp->chip_id == AX88140) {
 341			iowrite32(0, ioaddr + CSR13);
 342			iowrite32(addr_low,  ioaddr + CSR14);
 343			iowrite32(1, ioaddr + CSR13);
 344			iowrite32(addr_high, ioaddr + CSR14);
 345		} else if (tp->flags & COMET_MAC_ADDR) {
 346			iowrite32(addr_low,  ioaddr + 0xA4);
 347			iowrite32(addr_high, ioaddr + 0xA8);
 348			iowrite32(0, ioaddr + CSR27);
 349			iowrite32(0, ioaddr + CSR28);
 350		}
 351	} else {
 352		/* This is set_rx_mode(), but without starting the transmitter. */
 353		u16 *eaddrs = (u16 *)dev->dev_addr;
 354		u16 *setup_frm = &tp->setup_frame[15*6];
 355		dma_addr_t mapping;
 356
 357		/* 21140 bug: you must add the broadcast address. */
 358		memset(tp->setup_frame, 0xff, sizeof(tp->setup_frame));
 359		/* Fill the final entry of the table with our physical address. */
 360		*setup_frm++ = eaddrs[0]; *setup_frm++ = eaddrs[0];
 361		*setup_frm++ = eaddrs[1]; *setup_frm++ = eaddrs[1];
 362		*setup_frm++ = eaddrs[2]; *setup_frm++ = eaddrs[2];
 363
 364		mapping = pci_map_single(tp->pdev, tp->setup_frame,
 365					 sizeof(tp->setup_frame),
 366					 PCI_DMA_TODEVICE);
 367		tp->tx_buffers[tp->cur_tx].skb = NULL;
 368		tp->tx_buffers[tp->cur_tx].mapping = mapping;
 369
 370		/* Put the setup frame on the Tx list. */
 371		tp->tx_ring[tp->cur_tx].length = cpu_to_le32(0x08000000 | 192);
 372		tp->tx_ring[tp->cur_tx].buffer1 = cpu_to_le32(mapping);
 373		tp->tx_ring[tp->cur_tx].status = cpu_to_le32(DescOwned);
 374
 375		tp->cur_tx++;
 376	}
 377
 378	tp->saved_if_port = dev->if_port;
 379	if (dev->if_port == 0)
 380		dev->if_port = tp->default_port;
 381
 382	/* Allow selecting a default media. */
 383	i = 0;
 384	if (tp->mtable == NULL)
 385		goto media_picked;
 386	if (dev->if_port) {
 387		int looking_for = tulip_media_cap[dev->if_port] & MediaIsMII ? 11 :
 388			(dev->if_port == 12 ? 0 : dev->if_port);
 389		for (i = 0; i < tp->mtable->leafcount; i++)
 390			if (tp->mtable->mleaf[i].media == looking_for) {
 391				dev_info(&dev->dev,
 392					 "Using user-specified media %s\n",
 393					 medianame[dev->if_port]);
 394				goto media_picked;
 395			}
 396	}
 397	if ((tp->mtable->defaultmedia & 0x0800) == 0) {
 398		int looking_for = tp->mtable->defaultmedia & MEDIA_MASK;
 399		for (i = 0; i < tp->mtable->leafcount; i++)
 400			if (tp->mtable->mleaf[i].media == looking_for) {
 401				dev_info(&dev->dev,
 402					 "Using EEPROM-set media %s\n",
 403					 medianame[looking_for]);
 404				goto media_picked;
 405			}
 406	}
 407	/* Start sensing first non-full-duplex media. */
 408	for (i = tp->mtable->leafcount - 1;
 409		 (tulip_media_cap[tp->mtable->mleaf[i].media] & MediaAlwaysFD) && i > 0; i--)
 410		;
 411media_picked:
 412
 413	tp->csr6 = 0;
 414	tp->cur_index = i;
 415	tp->nwayset = 0;
 416
 417	if (dev->if_port) {
 418		if (tp->chip_id == DC21143  &&
 419		    (tulip_media_cap[dev->if_port] & MediaIsMII)) {
 420			/* We must reset the media CSRs when we force-select MII mode. */
 421			iowrite32(0x0000, ioaddr + CSR13);
 422			iowrite32(0x0000, ioaddr + CSR14);
 423			iowrite32(0x0008, ioaddr + CSR15);
 424		}
 425		tulip_select_media(dev, 1);
 426	} else if (tp->chip_id == DC21142) {
 427		if (tp->mii_cnt) {
 428			tulip_select_media(dev, 1);
 429			if (tulip_debug > 1)
 430				dev_info(&dev->dev,
 431					 "Using MII transceiver %d, status %04x\n",
 432					 tp->phys[0],
 433					 tulip_mdio_read(dev, tp->phys[0], 1));
 434			iowrite32(csr6_mask_defstate, ioaddr + CSR6);
 435			tp->csr6 = csr6_mask_hdcap;
 436			dev->if_port = 11;
 437			iowrite32(0x0000, ioaddr + CSR13);
 438			iowrite32(0x0000, ioaddr + CSR14);
 439		} else
 440			t21142_start_nway(dev);
 441	} else if (tp->chip_id == PNIC2) {
 442	        /* for initial startup advertise 10/100 Full and Half */
 443	        tp->sym_advertise = 0x01E0;
 444                /* enable autonegotiate end interrupt */
 445	        iowrite32(ioread32(ioaddr+CSR5)| 0x00008010, ioaddr + CSR5);
 446	        iowrite32(ioread32(ioaddr+CSR7)| 0x00008010, ioaddr + CSR7);
 447		pnic2_start_nway(dev);
 448	} else if (tp->chip_id == LC82C168  &&  ! tp->medialock) {
 449		if (tp->mii_cnt) {
 450			dev->if_port = 11;
 451			tp->csr6 = 0x814C0000 | (tp->full_duplex ? 0x0200 : 0);
 452			iowrite32(0x0001, ioaddr + CSR15);
 453		} else if (ioread32(ioaddr + CSR5) & TPLnkPass)
 454			pnic_do_nway(dev);
 455		else {
 456			/* Start with 10mbps to do autonegotiation. */
 457			iowrite32(0x32, ioaddr + CSR12);
 458			tp->csr6 = 0x00420000;
 459			iowrite32(0x0001B078, ioaddr + 0xB8);
 460			iowrite32(0x0201B078, ioaddr + 0xB8);
 461			next_tick = 1*HZ;
 462		}
 463	} else if ((tp->chip_id == MX98713 || tp->chip_id == COMPEX9881) &&
 464		   ! tp->medialock) {
 465		dev->if_port = 0;
 466		tp->csr6 = 0x01880000 | (tp->full_duplex ? 0x0200 : 0);
 467		iowrite32(0x0f370000 | ioread16(ioaddr + 0x80), ioaddr + 0x80);
 468	} else if (tp->chip_id == MX98715 || tp->chip_id == MX98725) {
 469		/* Provided by BOLO, Macronix - 12/10/1998. */
 470		dev->if_port = 0;
 471		tp->csr6 = 0x01a80200;
 472		iowrite32(0x0f370000 | ioread16(ioaddr + 0x80), ioaddr + 0x80);
 473		iowrite32(0x11000 | ioread16(ioaddr + 0xa0), ioaddr + 0xa0);
 474	} else if (tp->chip_id == COMET || tp->chip_id == CONEXANT) {
 475		/* Enable automatic Tx underrun recovery. */
 476		iowrite32(ioread32(ioaddr + 0x88) | 1, ioaddr + 0x88);
 477		dev->if_port = tp->mii_cnt ? 11 : 0;
 478		tp->csr6 = 0x00040000;
 479	} else if (tp->chip_id == AX88140) {
 480		tp->csr6 = tp->mii_cnt ? 0x00040100 : 0x00000100;
 481	} else
 482		tulip_select_media(dev, 1);
 483
 484	/* Start the chip's Tx to process setup frame. */
 485	tulip_stop_rxtx(tp);
 486	barrier();
 487	udelay(5);
 488	iowrite32(tp->csr6 | TxOn, ioaddr + CSR6);
 489
 490	/* Enable interrupts by setting the interrupt mask. */
 491	iowrite32(tulip_tbl[tp->chip_id].valid_intrs, ioaddr + CSR5);
 492	iowrite32(tulip_tbl[tp->chip_id].valid_intrs, ioaddr + CSR7);
 493	tulip_start_rxtx(tp);
 494	iowrite32(0, ioaddr + CSR2);		/* Rx poll demand */
 495
 496	if (tulip_debug > 2) {
 497		netdev_dbg(dev, "Done tulip_up(), CSR0 %08x, CSR5 %08x CSR6 %08x\n",
 498			   ioread32(ioaddr + CSR0),
 499			   ioread32(ioaddr + CSR5),
 500			   ioread32(ioaddr + CSR6));
 501	}
 502
 503	/* Set the timer to switch to check for link beat and perhaps switch
 504	   to an alternate media type. */
 505	tp->timer.expires = RUN_AT(next_tick);
 506	add_timer(&tp->timer);
 507#ifdef CONFIG_TULIP_NAPI
 508	setup_timer(&tp->oom_timer, oom_timer, (unsigned long)dev);
 509#endif
 510}
 511
 512static int
 513tulip_open(struct net_device *dev)
 514{
 515	struct tulip_private *tp = netdev_priv(dev);
 516	int retval;
 517
 518	tulip_init_ring (dev);
 519
 520	retval = request_irq(tp->pdev->irq, tulip_interrupt, IRQF_SHARED,
 521			     dev->name, dev);
 522	if (retval)
 523		goto free_ring;
 524
 525	tulip_up (dev);
 526
 527	netif_start_queue (dev);
 528
 529	return 0;
 530
 531free_ring:
 532	tulip_free_ring (dev);
 533	return retval;
 534}
 535
 536
 537static void tulip_tx_timeout(struct net_device *dev)
 538{
 539	struct tulip_private *tp = netdev_priv(dev);
 540	void __iomem *ioaddr = tp->base_addr;
 541	unsigned long flags;
 542
 543	spin_lock_irqsave (&tp->lock, flags);
 544
 545	if (tulip_media_cap[dev->if_port] & MediaIsMII) {
 546		/* Do nothing -- the media monitor should handle this. */
 547		if (tulip_debug > 1)
 548			dev_warn(&dev->dev,
 549				 "Transmit timeout using MII device\n");
 550	} else if (tp->chip_id == DC21140 || tp->chip_id == DC21142 ||
 551		   tp->chip_id == MX98713 || tp->chip_id == COMPEX9881 ||
 552		   tp->chip_id == DM910X) {
 553		dev_warn(&dev->dev,
 554			 "21140 transmit timed out, status %08x, SIA %08x %08x %08x %08x, resetting...\n",
 555			 ioread32(ioaddr + CSR5), ioread32(ioaddr + CSR12),
 556			 ioread32(ioaddr + CSR13), ioread32(ioaddr + CSR14),
 557			 ioread32(ioaddr + CSR15));
 558		tp->timeout_recovery = 1;
 559		schedule_work(&tp->media_work);
 560		goto out_unlock;
 561	} else if (tp->chip_id == PNIC2) {
 562		dev_warn(&dev->dev,
 563			 "PNIC2 transmit timed out, status %08x, CSR6/7 %08x / %08x CSR12 %08x, resetting...\n",
 564			 (int)ioread32(ioaddr + CSR5),
 565			 (int)ioread32(ioaddr + CSR6),
 566			 (int)ioread32(ioaddr + CSR7),
 567			 (int)ioread32(ioaddr + CSR12));
 568	} else {
 569		dev_warn(&dev->dev,
 570			 "Transmit timed out, status %08x, CSR12 %08x, resetting...\n",
 571			 ioread32(ioaddr + CSR5), ioread32(ioaddr + CSR12));
 572		dev->if_port = 0;
 573	}
 574
 575#if defined(way_too_many_messages)
 576	if (tulip_debug > 3) {
 577		int i;
 578		for (i = 0; i < RX_RING_SIZE; i++) {
 579			u8 *buf = (u8 *)(tp->rx_ring[i].buffer1);
 580			int j;
 581			printk(KERN_DEBUG
 582			       "%2d: %08x %08x %08x %08x  %02x %02x %02x\n",
 583			       i,
 584			       (unsigned int)tp->rx_ring[i].status,
 585			       (unsigned int)tp->rx_ring[i].length,
 586			       (unsigned int)tp->rx_ring[i].buffer1,
 587			       (unsigned int)tp->rx_ring[i].buffer2,
 588			       buf[0], buf[1], buf[2]);
 589			for (j = 0; ((j < 1600) && buf[j] != 0xee); j++)
 590				if (j < 100)
 591					pr_cont(" %02x", buf[j]);
 592			pr_cont(" j=%d\n", j);
 593		}
 594		printk(KERN_DEBUG "  Rx ring %p: ", tp->rx_ring);
 595		for (i = 0; i < RX_RING_SIZE; i++)
 596			pr_cont(" %08x", (unsigned int)tp->rx_ring[i].status);
 597		printk(KERN_DEBUG "  Tx ring %p: ", tp->tx_ring);
 598		for (i = 0; i < TX_RING_SIZE; i++)
 599			pr_cont(" %08x", (unsigned int)tp->tx_ring[i].status);
 600		pr_cont("\n");
 601	}
 602#endif
 603
 604	tulip_tx_timeout_complete(tp, ioaddr);
 605
 606out_unlock:
 607	spin_unlock_irqrestore (&tp->lock, flags);
 608	dev->trans_start = jiffies; /* prevent tx timeout */
 609	netif_wake_queue (dev);
 610}
 611
 612
 613/* Initialize the Rx and Tx rings, along with various 'dev' bits. */
 614static void tulip_init_ring(struct net_device *dev)
 615{
 616	struct tulip_private *tp = netdev_priv(dev);
 617	int i;
 618
 619	tp->susp_rx = 0;
 620	tp->ttimer = 0;
 621	tp->nir = 0;
 622
 623	for (i = 0; i < RX_RING_SIZE; i++) {
 624		tp->rx_ring[i].status = 0x00000000;
 625		tp->rx_ring[i].length = cpu_to_le32(PKT_BUF_SZ);
 626		tp->rx_ring[i].buffer2 = cpu_to_le32(tp->rx_ring_dma + sizeof(struct tulip_rx_desc) * (i + 1));
 627		tp->rx_buffers[i].skb = NULL;
 628		tp->rx_buffers[i].mapping = 0;
 629	}
 630	/* Mark the last entry as wrapping the ring. */
 631	tp->rx_ring[i-1].length = cpu_to_le32(PKT_BUF_SZ | DESC_RING_WRAP);
 632	tp->rx_ring[i-1].buffer2 = cpu_to_le32(tp->rx_ring_dma);
 633
 634	for (i = 0; i < RX_RING_SIZE; i++) {
 635		dma_addr_t mapping;
 636
 637		/* Note the receive buffer must be longword aligned.
 638		   netdev_alloc_skb() provides 16 byte alignment.  But do *not*
 639		   use skb_reserve() to align the IP header! */
 640		struct sk_buff *skb = netdev_alloc_skb(dev, PKT_BUF_SZ);
 641		tp->rx_buffers[i].skb = skb;
 642		if (skb == NULL)
 643			break;
 644		mapping = pci_map_single(tp->pdev, skb->data,
 645					 PKT_BUF_SZ, PCI_DMA_FROMDEVICE);
 646		tp->rx_buffers[i].mapping = mapping;
 647		tp->rx_ring[i].status = cpu_to_le32(DescOwned);	/* Owned by Tulip chip */
 648		tp->rx_ring[i].buffer1 = cpu_to_le32(mapping);
 649	}
 650	tp->dirty_rx = (unsigned int)(i - RX_RING_SIZE);
 651
 652	/* The Tx buffer descriptor is filled in as needed, but we
 653	   do need to clear the ownership bit. */
 654	for (i = 0; i < TX_RING_SIZE; i++) {
 655		tp->tx_buffers[i].skb = NULL;
 656		tp->tx_buffers[i].mapping = 0;
 657		tp->tx_ring[i].status = 0x00000000;
 658		tp->tx_ring[i].buffer2 = cpu_to_le32(tp->tx_ring_dma + sizeof(struct tulip_tx_desc) * (i + 1));
 659	}
 660	tp->tx_ring[i-1].buffer2 = cpu_to_le32(tp->tx_ring_dma);
 661}
 662
 663static netdev_tx_t
 664tulip_start_xmit(struct sk_buff *skb, struct net_device *dev)
 665{
 666	struct tulip_private *tp = netdev_priv(dev);
 667	int entry;
 668	u32 flag;
 669	dma_addr_t mapping;
 670	unsigned long flags;
 671
 672	spin_lock_irqsave(&tp->lock, flags);
 673
 674	/* Calculate the next Tx descriptor entry. */
 675	entry = tp->cur_tx % TX_RING_SIZE;
 676
 677	tp->tx_buffers[entry].skb = skb;
 678	mapping = pci_map_single(tp->pdev, skb->data,
 679				 skb->len, PCI_DMA_TODEVICE);
 680	tp->tx_buffers[entry].mapping = mapping;
 681	tp->tx_ring[entry].buffer1 = cpu_to_le32(mapping);
 682
 683	if (tp->cur_tx - tp->dirty_tx < TX_RING_SIZE/2) {/* Typical path */
 684		flag = 0x60000000; /* No interrupt */
 685	} else if (tp->cur_tx - tp->dirty_tx == TX_RING_SIZE/2) {
 686		flag = 0xe0000000; /* Tx-done intr. */
 687	} else if (tp->cur_tx - tp->dirty_tx < TX_RING_SIZE - 2) {
 688		flag = 0x60000000; /* No Tx-done intr. */
 689	} else {		/* Leave room for set_rx_mode() to fill entries. */
 690		flag = 0xe0000000; /* Tx-done intr. */
 691		netif_stop_queue(dev);
 692	}
 693	if (entry == TX_RING_SIZE-1)
 694		flag = 0xe0000000 | DESC_RING_WRAP;
 695
 696	tp->tx_ring[entry].length = cpu_to_le32(skb->len | flag);
 697	/* if we were using Transmit Automatic Polling, we would need a
 698	 * wmb() here. */
 699	tp->tx_ring[entry].status = cpu_to_le32(DescOwned);
 700	wmb();
 701
 702	tp->cur_tx++;
 703
 704	/* Trigger an immediate transmit demand. */
 705	iowrite32(0, tp->base_addr + CSR1);
 706
 707	spin_unlock_irqrestore(&tp->lock, flags);
 708
 709	return NETDEV_TX_OK;
 710}
 711
 712static void tulip_clean_tx_ring(struct tulip_private *tp)
 713{
 714	unsigned int dirty_tx;
 715
 716	for (dirty_tx = tp->dirty_tx ; tp->cur_tx - dirty_tx > 0;
 717		dirty_tx++) {
 718		int entry = dirty_tx % TX_RING_SIZE;
 719		int status = le32_to_cpu(tp->tx_ring[entry].status);
 720
 721		if (status < 0) {
 722			tp->dev->stats.tx_errors++;	/* It wasn't Txed */
 723			tp->tx_ring[entry].status = 0;
 724		}
 725
 726		/* Check for Tx filter setup frames. */
 727		if (tp->tx_buffers[entry].skb == NULL) {
 728			/* test because dummy frames not mapped */
 729			if (tp->tx_buffers[entry].mapping)
 730				pci_unmap_single(tp->pdev,
 731					tp->tx_buffers[entry].mapping,
 732					sizeof(tp->setup_frame),
 733					PCI_DMA_TODEVICE);
 734			continue;
 735		}
 736
 737		pci_unmap_single(tp->pdev, tp->tx_buffers[entry].mapping,
 738				tp->tx_buffers[entry].skb->len,
 739				PCI_DMA_TODEVICE);
 
 740
 741		/* Free the original skb. */
 742		dev_kfree_skb_irq(tp->tx_buffers[entry].skb);
 743		tp->tx_buffers[entry].skb = NULL;
 744		tp->tx_buffers[entry].mapping = 0;
 745	}
 746}
 747
 748static void tulip_down (struct net_device *dev)
 749{
 750	struct tulip_private *tp = netdev_priv(dev);
 751	void __iomem *ioaddr = tp->base_addr;
 752	unsigned long flags;
 753
 754	cancel_work_sync(&tp->media_work);
 755
 756#ifdef CONFIG_TULIP_NAPI
 757	napi_disable(&tp->napi);
 758#endif
 759
 760	del_timer_sync (&tp->timer);
 761#ifdef CONFIG_TULIP_NAPI
 762	del_timer_sync (&tp->oom_timer);
 763#endif
 764	spin_lock_irqsave (&tp->lock, flags);
 765
 766	/* Disable interrupts by clearing the interrupt mask. */
 767	iowrite32 (0x00000000, ioaddr + CSR7);
 768
 769	/* Stop the Tx and Rx processes. */
 770	tulip_stop_rxtx(tp);
 771
 772	/* prepare receive buffers */
 773	tulip_refill_rx(dev);
 774
 775	/* release any unconsumed transmit buffers */
 776	tulip_clean_tx_ring(tp);
 777
 778	if (ioread32(ioaddr + CSR6) != 0xffffffff)
 779		dev->stats.rx_missed_errors += ioread32(ioaddr + CSR8) & 0xffff;
 780
 781	spin_unlock_irqrestore (&tp->lock, flags);
 782
 783	setup_timer(&tp->timer, tulip_tbl[tp->chip_id].media_timer,
 784		    (unsigned long)dev);
 785
 786	dev->if_port = tp->saved_if_port;
 787
 788	/* Leave the driver in snooze, not sleep, mode. */
 789	tulip_set_power_state (tp, 0, 1);
 790}
 791
 792static void tulip_free_ring (struct net_device *dev)
 793{
 794	struct tulip_private *tp = netdev_priv(dev);
 795	int i;
 796
 797	/* Free all the skbuffs in the Rx queue. */
 798	for (i = 0; i < RX_RING_SIZE; i++) {
 799		struct sk_buff *skb = tp->rx_buffers[i].skb;
 800		dma_addr_t mapping = tp->rx_buffers[i].mapping;
 801
 802		tp->rx_buffers[i].skb = NULL;
 803		tp->rx_buffers[i].mapping = 0;
 804
 805		tp->rx_ring[i].status = 0;	/* Not owned by Tulip chip. */
 806		tp->rx_ring[i].length = 0;
 807		/* An invalid address. */
 808		tp->rx_ring[i].buffer1 = cpu_to_le32(0xBADF00D0);
 809		if (skb) {
 810			pci_unmap_single(tp->pdev, mapping, PKT_BUF_SZ,
 811					 PCI_DMA_FROMDEVICE);
 812			dev_kfree_skb (skb);
 813		}
 814	}
 815
 816	for (i = 0; i < TX_RING_SIZE; i++) {
 817		struct sk_buff *skb = tp->tx_buffers[i].skb;
 818
 819		if (skb != NULL) {
 820			pci_unmap_single(tp->pdev, tp->tx_buffers[i].mapping,
 821					 skb->len, PCI_DMA_TODEVICE);
 
 822			dev_kfree_skb (skb);
 823		}
 824		tp->tx_buffers[i].skb = NULL;
 825		tp->tx_buffers[i].mapping = 0;
 826	}
 827}
 828
 829static int tulip_close (struct net_device *dev)
 830{
 831	struct tulip_private *tp = netdev_priv(dev);
 832	void __iomem *ioaddr = tp->base_addr;
 833
 834	netif_stop_queue (dev);
 835
 836	tulip_down (dev);
 837
 838	if (tulip_debug > 1)
 839		netdev_dbg(dev, "Shutting down ethercard, status was %02x\n",
 840			   ioread32 (ioaddr + CSR5));
 841
 842	free_irq (tp->pdev->irq, dev);
 843
 844	tulip_free_ring (dev);
 845
 846	return 0;
 847}
 848
 849static struct net_device_stats *tulip_get_stats(struct net_device *dev)
 850{
 851	struct tulip_private *tp = netdev_priv(dev);
 852	void __iomem *ioaddr = tp->base_addr;
 853
 854	if (netif_running(dev)) {
 855		unsigned long flags;
 856
 857		spin_lock_irqsave (&tp->lock, flags);
 858
 859		dev->stats.rx_missed_errors += ioread32(ioaddr + CSR8) & 0xffff;
 860
 861		spin_unlock_irqrestore(&tp->lock, flags);
 862	}
 863
 864	return &dev->stats;
 865}
 866
 867
 868static void tulip_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
 869{
 870	struct tulip_private *np = netdev_priv(dev);
 871	strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
 872	strlcpy(info->version, DRV_VERSION, sizeof(info->version));
 873	strlcpy(info->bus_info, pci_name(np->pdev), sizeof(info->bus_info));
 874}
 875
 876
 877static int tulip_ethtool_set_wol(struct net_device *dev,
 878				 struct ethtool_wolinfo *wolinfo)
 879{
 880	struct tulip_private *tp = netdev_priv(dev);
 881
 882	if (wolinfo->wolopts & (~tp->wolinfo.supported))
 883		   return -EOPNOTSUPP;
 884
 885	tp->wolinfo.wolopts = wolinfo->wolopts;
 886	device_set_wakeup_enable(&tp->pdev->dev, tp->wolinfo.wolopts);
 887	return 0;
 888}
 889
 890static void tulip_ethtool_get_wol(struct net_device *dev,
 891				  struct ethtool_wolinfo *wolinfo)
 892{
 893	struct tulip_private *tp = netdev_priv(dev);
 894
 895	wolinfo->supported = tp->wolinfo.supported;
 896	wolinfo->wolopts = tp->wolinfo.wolopts;
 897	return;
 898}
 899
 900
 901static const struct ethtool_ops ops = {
 902	.get_drvinfo = tulip_get_drvinfo,
 903	.set_wol     = tulip_ethtool_set_wol,
 904	.get_wol     = tulip_ethtool_get_wol,
 905};
 906
 907/* Provide ioctl() calls to examine the MII xcvr state. */
 908static int private_ioctl (struct net_device *dev, struct ifreq *rq, int cmd)
 909{
 910	struct tulip_private *tp = netdev_priv(dev);
 911	void __iomem *ioaddr = tp->base_addr;
 912	struct mii_ioctl_data *data = if_mii(rq);
 913	const unsigned int phy_idx = 0;
 914	int phy = tp->phys[phy_idx] & 0x1f;
 915	unsigned int regnum = data->reg_num;
 916
 917	switch (cmd) {
 918	case SIOCGMIIPHY:		/* Get address of MII PHY in use. */
 919		if (tp->mii_cnt)
 920			data->phy_id = phy;
 921		else if (tp->flags & HAS_NWAY)
 922			data->phy_id = 32;
 923		else if (tp->chip_id == COMET)
 924			data->phy_id = 1;
 925		else
 926			return -ENODEV;
 
 927
 928	case SIOCGMIIREG:		/* Read MII PHY register. */
 929		if (data->phy_id == 32 && (tp->flags & HAS_NWAY)) {
 930			int csr12 = ioread32 (ioaddr + CSR12);
 931			int csr14 = ioread32 (ioaddr + CSR14);
 932			switch (regnum) {
 933			case 0:
 934                                if (((csr14<<5) & 0x1000) ||
 935                                        (dev->if_port == 5 && tp->nwayset))
 936                                        data->val_out = 0x1000;
 937                                else
 938                                        data->val_out = (tulip_media_cap[dev->if_port]&MediaIs100 ? 0x2000 : 0)
 939                                                | (tulip_media_cap[dev->if_port]&MediaIsFD ? 0x0100 : 0);
 940				break;
 941			case 1:
 942                                data->val_out =
 943					0x1848 +
 944					((csr12&0x7000) == 0x5000 ? 0x20 : 0) +
 945					((csr12&0x06) == 6 ? 0 : 4);
 946                                data->val_out |= 0x6048;
 947				break;
 948			case 4:
 949                                /* Advertised value, bogus 10baseTx-FD value from CSR6. */
 950                                data->val_out =
 951					((ioread32(ioaddr + CSR6) >> 3) & 0x0040) +
 952					((csr14 >> 1) & 0x20) + 1;
 953                                data->val_out |= ((csr14 >> 9) & 0x03C0);
 954				break;
 955			case 5: data->val_out = tp->lpar; break;
 956			default: data->val_out = 0; break;
 957			}
 958		} else {
 959			data->val_out = tulip_mdio_read (dev, data->phy_id & 0x1f, regnum);
 960		}
 961		return 0;
 962
 963	case SIOCSMIIREG:		/* Write MII PHY register. */
 964		if (regnum & ~0x1f)
 965			return -EINVAL;
 966		if (data->phy_id == phy) {
 967			u16 value = data->val_in;
 968			switch (regnum) {
 969			case 0:	/* Check for autonegotiation on or reset. */
 970				tp->full_duplex_lock = (value & 0x9000) ? 0 : 1;
 971				if (tp->full_duplex_lock)
 972					tp->full_duplex = (value & 0x0100) ? 1 : 0;
 973				break;
 974			case 4:
 975				tp->advertising[phy_idx] =
 976				tp->mii_advertise = data->val_in;
 977				break;
 978			}
 979		}
 980		if (data->phy_id == 32 && (tp->flags & HAS_NWAY)) {
 981			u16 value = data->val_in;
 982			if (regnum == 0) {
 983			  if ((value & 0x1200) == 0x1200) {
 984			    if (tp->chip_id == PNIC2) {
 985                                   pnic2_start_nway (dev);
 986                            } else {
 987				   t21142_start_nway (dev);
 988                            }
 989			  }
 990			} else if (regnum == 4)
 991				tp->sym_advertise = value;
 992		} else {
 993			tulip_mdio_write (dev, data->phy_id & 0x1f, regnum, data->val_in);
 994		}
 995		return 0;
 996	default:
 997		return -EOPNOTSUPP;
 998	}
 999
1000	return -EOPNOTSUPP;
1001}
1002
1003
1004/* Set or clear the multicast filter for this adaptor.
1005   Note that we only use exclusion around actually queueing the
1006   new frame, not around filling tp->setup_frame.  This is non-deterministic
1007   when re-entered but still correct. */
1008
1009static void build_setup_frame_hash(u16 *setup_frm, struct net_device *dev)
1010{
1011	struct tulip_private *tp = netdev_priv(dev);
1012	u16 hash_table[32];
1013	struct netdev_hw_addr *ha;
 
1014	int i;
1015	u16 *eaddrs;
1016
1017	memset(hash_table, 0, sizeof(hash_table));
1018	__set_bit_le(255, hash_table);			/* Broadcast entry */
1019	/* This should work on big-endian machines as well. */
1020	netdev_for_each_mc_addr(ha, dev) {
1021		int index = ether_crc_le(ETH_ALEN, ha->addr) & 0x1ff;
1022
1023		__set_bit_le(index, hash_table);
1024	}
1025	for (i = 0; i < 32; i++) {
1026		*setup_frm++ = hash_table[i];
1027		*setup_frm++ = hash_table[i];
1028	}
1029	setup_frm = &tp->setup_frame[13*6];
1030
1031	/* Fill the final entry with our physical address. */
1032	eaddrs = (u16 *)dev->dev_addr;
1033	*setup_frm++ = eaddrs[0]; *setup_frm++ = eaddrs[0];
1034	*setup_frm++ = eaddrs[1]; *setup_frm++ = eaddrs[1];
1035	*setup_frm++ = eaddrs[2]; *setup_frm++ = eaddrs[2];
1036}
1037
1038static void build_setup_frame_perfect(u16 *setup_frm, struct net_device *dev)
1039{
1040	struct tulip_private *tp = netdev_priv(dev);
1041	struct netdev_hw_addr *ha;
1042	u16 *eaddrs;
1043
1044	/* We have <= 14 addresses so we can use the wonderful
1045	   16 address perfect filtering of the Tulip. */
1046	netdev_for_each_mc_addr(ha, dev) {
1047		eaddrs = (u16 *) ha->addr;
1048		*setup_frm++ = *eaddrs; *setup_frm++ = *eaddrs++;
1049		*setup_frm++ = *eaddrs; *setup_frm++ = *eaddrs++;
1050		*setup_frm++ = *eaddrs; *setup_frm++ = *eaddrs++;
1051	}
1052	/* Fill the unused entries with the broadcast address. */
1053	memset(setup_frm, 0xff, (15 - netdev_mc_count(dev)) * 12);
1054	setup_frm = &tp->setup_frame[15*6];
1055
1056	/* Fill the final entry with our physical address. */
1057	eaddrs = (u16 *)dev->dev_addr;
1058	*setup_frm++ = eaddrs[0]; *setup_frm++ = eaddrs[0];
1059	*setup_frm++ = eaddrs[1]; *setup_frm++ = eaddrs[1];
1060	*setup_frm++ = eaddrs[2]; *setup_frm++ = eaddrs[2];
1061}
1062
1063
1064static void set_rx_mode(struct net_device *dev)
1065{
1066	struct tulip_private *tp = netdev_priv(dev);
1067	void __iomem *ioaddr = tp->base_addr;
1068	int csr6;
1069
1070	csr6 = ioread32(ioaddr + CSR6) & ~0x00D5;
1071
1072	tp->csr6 &= ~0x00D5;
1073	if (dev->flags & IFF_PROMISC) {			/* Set promiscuous. */
1074		tp->csr6 |= AcceptAllMulticast | AcceptAllPhys;
1075		csr6 |= AcceptAllMulticast | AcceptAllPhys;
1076	} else if ((netdev_mc_count(dev) > 1000) ||
1077		   (dev->flags & IFF_ALLMULTI)) {
1078		/* Too many to filter well -- accept all multicasts. */
1079		tp->csr6 |= AcceptAllMulticast;
1080		csr6 |= AcceptAllMulticast;
1081	} else	if (tp->flags & MC_HASH_ONLY) {
1082		/* Some work-alikes have only a 64-entry hash filter table. */
1083		/* Should verify correctness on big-endian/__powerpc__ */
1084		struct netdev_hw_addr *ha;
1085		if (netdev_mc_count(dev) > 64) {
1086			/* Arbitrary non-effective limit. */
1087			tp->csr6 |= AcceptAllMulticast;
1088			csr6 |= AcceptAllMulticast;
1089		} else {
1090			u32 mc_filter[2] = {0, 0};		 /* Multicast hash filter */
1091			int filterbit;
1092			netdev_for_each_mc_addr(ha, dev) {
1093				if (tp->flags & COMET_MAC_ADDR)
1094					filterbit = ether_crc_le(ETH_ALEN,
1095								 ha->addr);
1096				else
1097					filterbit = ether_crc(ETH_ALEN,
1098							      ha->addr) >> 26;
1099				filterbit &= 0x3f;
1100				mc_filter[filterbit >> 5] |= 1 << (filterbit & 31);
1101				if (tulip_debug > 2)
1102					dev_info(&dev->dev,
1103						 "Added filter for %pM  %08x bit %d\n",
1104						 ha->addr,
1105						 ether_crc(ETH_ALEN, ha->addr),
1106						 filterbit);
1107			}
1108			if (mc_filter[0] == tp->mc_filter[0]  &&
1109				mc_filter[1] == tp->mc_filter[1])
1110				;				/* No change. */
1111			else if (tp->flags & IS_ASIX) {
1112				iowrite32(2, ioaddr + CSR13);
1113				iowrite32(mc_filter[0], ioaddr + CSR14);
1114				iowrite32(3, ioaddr + CSR13);
1115				iowrite32(mc_filter[1], ioaddr + CSR14);
1116			} else if (tp->flags & COMET_MAC_ADDR) {
1117				iowrite32(mc_filter[0], ioaddr + CSR27);
1118				iowrite32(mc_filter[1], ioaddr + CSR28);
1119			}
1120			tp->mc_filter[0] = mc_filter[0];
1121			tp->mc_filter[1] = mc_filter[1];
1122		}
1123	} else {
1124		unsigned long flags;
1125		u32 tx_flags = 0x08000000 | 192;
1126
1127		/* Note that only the low-address shortword of setup_frame is valid!
1128		   The values are doubled for big-endian architectures. */
1129		if (netdev_mc_count(dev) > 14) {
1130			/* Must use a multicast hash table. */
1131			build_setup_frame_hash(tp->setup_frame, dev);
1132			tx_flags = 0x08400000 | 192;
1133		} else {
1134			build_setup_frame_perfect(tp->setup_frame, dev);
1135		}
1136
1137		spin_lock_irqsave(&tp->lock, flags);
1138
1139		if (tp->cur_tx - tp->dirty_tx > TX_RING_SIZE - 2) {
1140			/* Same setup recently queued, we need not add it. */
1141		} else {
1142			unsigned int entry;
1143			int dummy = -1;
1144
1145			/* Now add this frame to the Tx list. */
1146
1147			entry = tp->cur_tx++ % TX_RING_SIZE;
1148
1149			if (entry != 0) {
1150				/* Avoid a chip errata by prefixing a dummy entry. */
1151				tp->tx_buffers[entry].skb = NULL;
1152				tp->tx_buffers[entry].mapping = 0;
1153				tp->tx_ring[entry].length =
1154					(entry == TX_RING_SIZE-1) ? cpu_to_le32(DESC_RING_WRAP) : 0;
1155				tp->tx_ring[entry].buffer1 = 0;
1156				/* Must set DescOwned later to avoid race with chip */
1157				dummy = entry;
1158				entry = tp->cur_tx++ % TX_RING_SIZE;
1159
1160			}
1161
1162			tp->tx_buffers[entry].skb = NULL;
1163			tp->tx_buffers[entry].mapping =
1164				pci_map_single(tp->pdev, tp->setup_frame,
 
1165					       sizeof(tp->setup_frame),
1166					       PCI_DMA_TODEVICE);
1167			/* Put the setup frame on the Tx list. */
1168			if (entry == TX_RING_SIZE-1)
1169				tx_flags |= DESC_RING_WRAP;		/* Wrap ring. */
1170			tp->tx_ring[entry].length = cpu_to_le32(tx_flags);
1171			tp->tx_ring[entry].buffer1 =
1172				cpu_to_le32(tp->tx_buffers[entry].mapping);
1173			tp->tx_ring[entry].status = cpu_to_le32(DescOwned);
1174			if (dummy >= 0)
1175				tp->tx_ring[dummy].status = cpu_to_le32(DescOwned);
1176			if (tp->cur_tx - tp->dirty_tx >= TX_RING_SIZE - 2)
1177				netif_stop_queue(dev);
1178
1179			/* Trigger an immediate transmit demand. */
1180			iowrite32(0, ioaddr + CSR1);
1181		}
1182
1183		spin_unlock_irqrestore(&tp->lock, flags);
1184	}
1185
1186	iowrite32(csr6, ioaddr + CSR6);
1187}
1188
1189#ifdef CONFIG_TULIP_MWI
1190static void tulip_mwi_config(struct pci_dev *pdev, struct net_device *dev)
1191{
1192	struct tulip_private *tp = netdev_priv(dev);
1193	u8 cache;
1194	u16 pci_command;
1195	u32 csr0;
1196
1197	if (tulip_debug > 3)
1198		netdev_dbg(dev, "tulip_mwi_config()\n");
1199
1200	tp->csr0 = csr0 = 0;
1201
1202	/* if we have any cache line size at all, we can do MRM and MWI */
1203	csr0 |= MRM | MWI;
1204
1205	/* Enable MWI in the standard PCI command bit.
1206	 * Check for the case where MWI is desired but not available
1207	 */
1208	pci_try_set_mwi(pdev);
1209
1210	/* read result from hardware (in case bit refused to enable) */
1211	pci_read_config_word(pdev, PCI_COMMAND, &pci_command);
1212	if ((csr0 & MWI) && (!(pci_command & PCI_COMMAND_INVALIDATE)))
1213		csr0 &= ~MWI;
1214
1215	/* if cache line size hardwired to zero, no MWI */
1216	pci_read_config_byte(pdev, PCI_CACHE_LINE_SIZE, &cache);
1217	if ((csr0 & MWI) && (cache == 0)) {
1218		csr0 &= ~MWI;
1219		pci_clear_mwi(pdev);
1220	}
1221
1222	/* assign per-cacheline-size cache alignment and
1223	 * burst length values
1224	 */
1225	switch (cache) {
1226	case 8:
1227		csr0 |= MRL | (1 << CALShift) | (16 << BurstLenShift);
1228		break;
1229	case 16:
1230		csr0 |= MRL | (2 << CALShift) | (16 << BurstLenShift);
1231		break;
1232	case 32:
1233		csr0 |= MRL | (3 << CALShift) | (32 << BurstLenShift);
1234		break;
1235	default:
1236		cache = 0;
1237		break;
1238	}
1239
1240	/* if we have a good cache line size, we by now have a good
1241	 * csr0, so save it and exit
1242	 */
1243	if (cache)
1244		goto out;
1245
1246	/* we don't have a good csr0 or cache line size, disable MWI */
1247	if (csr0 & MWI) {
1248		pci_clear_mwi(pdev);
1249		csr0 &= ~MWI;
1250	}
1251
1252	/* sane defaults for burst length and cache alignment
1253	 * originally from de4x5 driver
1254	 */
1255	csr0 |= (8 << BurstLenShift) | (1 << CALShift);
1256
1257out:
1258	tp->csr0 = csr0;
1259	if (tulip_debug > 2)
1260		netdev_dbg(dev, "MWI config cacheline=%d, csr0=%08x\n",
1261			   cache, csr0);
1262}
1263#endif
1264
1265/*
1266 *	Chips that have the MRM/reserved bit quirk and the burst quirk. That
1267 *	is the DM910X and the on chip ULi devices
1268 */
1269
1270static int tulip_uli_dm_quirk(struct pci_dev *pdev)
1271{
1272	if (pdev->vendor == 0x1282 && pdev->device == 0x9102)
1273		return 1;
1274	return 0;
1275}
1276
1277static const struct net_device_ops tulip_netdev_ops = {
1278	.ndo_open		= tulip_open,
1279	.ndo_start_xmit		= tulip_start_xmit,
1280	.ndo_tx_timeout		= tulip_tx_timeout,
1281	.ndo_stop		= tulip_close,
1282	.ndo_get_stats		= tulip_get_stats,
1283	.ndo_do_ioctl 		= private_ioctl,
1284	.ndo_set_rx_mode	= set_rx_mode,
1285	.ndo_change_mtu		= eth_change_mtu,
1286	.ndo_set_mac_address	= eth_mac_addr,
1287	.ndo_validate_addr	= eth_validate_addr,
1288#ifdef CONFIG_NET_POLL_CONTROLLER
1289	.ndo_poll_controller	 = poll_tulip,
1290#endif
1291};
1292
1293const struct pci_device_id early_486_chipsets[] = {
1294	{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82424) },
1295	{ PCI_DEVICE(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_496) },
1296	{ },
1297};
1298
1299static int tulip_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
1300{
1301	struct tulip_private *tp;
1302	/* See note below on the multiport cards. */
1303	static unsigned char last_phys_addr[ETH_ALEN] = {
1304		0x00, 'L', 'i', 'n', 'u', 'x'
1305	};
 
1306	static int last_irq;
1307	static int multiport_cnt;	/* For four-port boards w/one EEPROM */
1308	int i, irq;
1309	unsigned short sum;
1310	unsigned char *ee_data;
1311	struct net_device *dev;
1312	void __iomem *ioaddr;
1313	static int board_idx = -1;
1314	int chip_idx = ent->driver_data;
1315	const char *chip_name = tulip_tbl[chip_idx].chip_name;
1316	unsigned int eeprom_missing = 0;
 
1317	unsigned int force_csr0 = 0;
1318
1319#ifndef MODULE
1320	if (tulip_debug > 0)
1321		printk_once(KERN_INFO "%s", version);
1322#endif
1323
1324	board_idx++;
1325
1326	/*
1327	 *	Lan media wire a tulip chip to a wan interface. Needs a very
1328	 *	different driver (lmc driver)
1329	 */
1330
1331        if (pdev->subsystem_vendor == PCI_VENDOR_ID_LMC) {
1332		pr_err("skipping LMC card\n");
1333		return -ENODEV;
1334	} else if (pdev->subsystem_vendor == PCI_VENDOR_ID_SBE &&
1335		   (pdev->subsystem_device == PCI_SUBDEVICE_ID_SBE_T3E3 ||
1336		    pdev->subsystem_device == PCI_SUBDEVICE_ID_SBE_2T3E3_P0 ||
1337		    pdev->subsystem_device == PCI_SUBDEVICE_ID_SBE_2T3E3_P1)) {
1338		pr_err("skipping SBE T3E3 port\n");
1339		return -ENODEV;
1340	}
1341
1342	/*
1343	 *	DM910x chips should be handled by the dmfe driver, except
1344	 *	on-board chips on SPARC systems.  Also, early DM9100s need
1345	 *	software CRC which only the dmfe driver supports.
1346	 */
1347
1348#ifdef CONFIG_TULIP_DM910X
1349	if (chip_idx == DM910X) {
1350		struct device_node *dp;
1351
1352		if (pdev->vendor == 0x1282 && pdev->device == 0x9100 &&
1353		    pdev->revision < 0x30) {
1354			pr_info("skipping early DM9100 with Crc bug (use dmfe)\n");
1355			return -ENODEV;
1356		}
1357
1358		dp = pci_device_to_OF_node(pdev);
1359		if (!(dp && of_get_property(dp, "local-mac-address", NULL))) {
1360			pr_info("skipping DM910x expansion card (use dmfe)\n");
1361			return -ENODEV;
1362		}
1363	}
1364#endif
1365
1366	/*
1367	 *	Looks for early PCI chipsets where people report hangs
1368	 *	without the workarounds being on.
1369	 */
1370
1371	/* 1. Intel Saturn. Switch to 8 long words burst, 8 long word cache
1372	      aligned.  Aries might need this too. The Saturn errata are not
1373	      pretty reading but thankfully it's an old 486 chipset.
1374
1375	   2. The dreaded SiS496 486 chipset. Same workaround as Intel
1376	      Saturn.
1377	*/
1378
1379	if (pci_dev_present(early_486_chipsets)) {
1380		csr0 = MRL | MRM | (8 << BurstLenShift) | (1 << CALShift);
1381		force_csr0 = 1;
1382	}
1383
1384	/* bugfix: the ASIX must have a burst limit or horrible things happen. */
1385	if (chip_idx == AX88140) {
1386		if ((csr0 & 0x3f00) == 0)
1387			csr0 |= 0x2000;
1388	}
1389
1390	/* PNIC doesn't have MWI/MRL/MRM... */
1391	if (chip_idx == LC82C168)
1392		csr0 &= ~0xfff10000; /* zero reserved bits 31:20, 16 */
1393
1394	/* DM9102A has troubles with MRM & clear reserved bits 24:22, 20, 16, 7:1 */
1395	if (tulip_uli_dm_quirk(pdev)) {
1396		csr0 &= ~0x01f100ff;
1397#if defined(CONFIG_SPARC)
1398                csr0 = (csr0 & ~0xff00) | 0xe000;
1399#endif
1400	}
1401	/*
1402	 *	And back to business
1403	 */
1404
1405	i = pci_enable_device(pdev);
1406	if (i) {
1407		pr_err("Cannot enable tulip board #%d, aborting\n", board_idx);
1408		return i;
1409	}
1410
1411	irq = pdev->irq;
1412
1413	/* alloc_etherdev ensures aligned and zeroed private structures */
1414	dev = alloc_etherdev (sizeof (*tp));
1415	if (!dev)
1416		return -ENOMEM;
1417
1418	SET_NETDEV_DEV(dev, &pdev->dev);
1419	if (pci_resource_len (pdev, 0) < tulip_tbl[chip_idx].io_size) {
1420		pr_err("%s: I/O region (0x%llx@0x%llx) too small, aborting\n",
1421		       pci_name(pdev),
1422		       (unsigned long long)pci_resource_len (pdev, 0),
1423		       (unsigned long long)pci_resource_start (pdev, 0));
1424		goto err_out_free_netdev;
1425	}
1426
1427	/* grab all resources from both PIO and MMIO regions, as we
1428	 * don't want anyone else messing around with our hardware */
1429	if (pci_request_regions (pdev, DRV_NAME))
1430		goto err_out_free_netdev;
1431
1432	ioaddr =  pci_iomap(pdev, TULIP_BAR, tulip_tbl[chip_idx].io_size);
1433
1434	if (!ioaddr)
1435		goto err_out_free_res;
1436
1437	/*
1438	 * initialize private data structure 'tp'
1439	 * it is zeroed and aligned in alloc_etherdev
1440	 */
1441	tp = netdev_priv(dev);
1442	tp->dev = dev;
1443
1444	tp->rx_ring = pci_alloc_consistent(pdev,
1445					   sizeof(struct tulip_rx_desc) * RX_RING_SIZE +
1446					   sizeof(struct tulip_tx_desc) * TX_RING_SIZE,
1447					   &tp->rx_ring_dma);
1448	if (!tp->rx_ring)
1449		goto err_out_mtable;
1450	tp->tx_ring = (struct tulip_tx_desc *)(tp->rx_ring + RX_RING_SIZE);
1451	tp->tx_ring_dma = tp->rx_ring_dma + sizeof(struct tulip_rx_desc) * RX_RING_SIZE;
1452
1453	tp->chip_id = chip_idx;
1454	tp->flags = tulip_tbl[chip_idx].flags;
1455
1456	tp->wolinfo.supported = 0;
1457	tp->wolinfo.wolopts = 0;
1458	/* COMET: Enable power management only for AN983B */
1459	if (chip_idx == COMET ) {
1460		u32 sig;
1461		pci_read_config_dword (pdev, 0x80, &sig);
1462		if (sig == 0x09811317) {
1463			tp->flags |= COMET_PM;
1464			tp->wolinfo.supported = WAKE_PHY | WAKE_MAGIC;
1465			pr_info("%s: Enabled WOL support for AN983B\n",
1466				__func__);
1467		}
1468	}
1469	tp->pdev = pdev;
1470	tp->base_addr = ioaddr;
1471	tp->revision = pdev->revision;
1472	tp->csr0 = csr0;
1473	spin_lock_init(&tp->lock);
1474	spin_lock_init(&tp->mii_lock);
1475	setup_timer(&tp->timer, tulip_tbl[tp->chip_id].media_timer,
1476		    (unsigned long)dev);
1477
1478	INIT_WORK(&tp->media_work, tulip_tbl[tp->chip_id].media_task);
1479
1480#ifdef CONFIG_TULIP_MWI
1481	if (!force_csr0 && (tp->flags & HAS_PCI_MWI))
1482		tulip_mwi_config (pdev, dev);
1483#endif
1484
1485	/* Stop the chip's Tx and Rx processes. */
1486	tulip_stop_rxtx(tp);
1487
1488	pci_set_master(pdev);
1489
1490#ifdef CONFIG_GSC
1491	if (pdev->subsystem_vendor == PCI_VENDOR_ID_HP) {
1492		switch (pdev->subsystem_device) {
1493		default:
1494			break;
1495		case 0x1061:
1496		case 0x1062:
1497		case 0x1063:
1498		case 0x1098:
1499		case 0x1099:
1500		case 0x10EE:
1501			tp->flags |= HAS_SWAPPED_SEEPROM | NEEDS_FAKE_MEDIA_TABLE;
1502			chip_name = "GSC DS21140 Tulip";
1503		}
1504	}
1505#endif
1506
1507	/* Clear the missed-packet counter. */
1508	ioread32(ioaddr + CSR8);
1509
1510	/* The station address ROM is read byte serially.  The register must
1511	   be polled, waiting for the value to be read bit serially from the
1512	   EEPROM.
1513	   */
1514	ee_data = tp->eeprom;
1515	memset(ee_data, 0, sizeof(tp->eeprom));
1516	sum = 0;
1517	if (chip_idx == LC82C168) {
1518		for (i = 0; i < 3; i++) {
1519			int value, boguscnt = 100000;
1520			iowrite32(0x600 | i, ioaddr + 0x98);
1521			do {
1522				value = ioread32(ioaddr + CSR9);
1523			} while (value < 0  && --boguscnt > 0);
1524			put_unaligned_le16(value, ((__le16 *)dev->dev_addr) + i);
1525			sum += value & 0xffff;
1526		}
 
1527	} else if (chip_idx == COMET) {
1528		/* No need to read the EEPROM. */
1529		put_unaligned_le32(ioread32(ioaddr + 0xA4), dev->dev_addr);
1530		put_unaligned_le16(ioread32(ioaddr + 0xA8), dev->dev_addr + 4);
 
1531		for (i = 0; i < 6; i ++)
1532			sum += dev->dev_addr[i];
1533	} else {
1534		/* A serial EEPROM interface, we read now and sort it out later. */
1535		int sa_offset = 0;
1536		int ee_addr_size = tulip_read_eeprom(dev, 0xff, 8) & 0x40000 ? 8 : 6;
1537		int ee_max_addr = ((1 << ee_addr_size) - 1) * sizeof(u16);
1538
1539		if (ee_max_addr > sizeof(tp->eeprom))
1540			ee_max_addr = sizeof(tp->eeprom);
1541
1542		for (i = 0; i < ee_max_addr ; i += sizeof(u16)) {
1543			u16 data = tulip_read_eeprom(dev, i/2, ee_addr_size);
1544			ee_data[i] = data & 0xff;
1545			ee_data[i + 1] = data >> 8;
1546		}
1547
1548		/* DEC now has a specification (see Notes) but early board makers
1549		   just put the address in the first EEPROM locations. */
1550		/* This does  memcmp(ee_data, ee_data+16, 8) */
1551		for (i = 0; i < 8; i ++)
1552			if (ee_data[i] != ee_data[16+i])
1553				sa_offset = 20;
1554		if (chip_idx == CONEXANT) {
1555			/* Check that the tuple type and length is correct. */
1556			if (ee_data[0x198] == 0x04  &&  ee_data[0x199] == 6)
1557				sa_offset = 0x19A;
1558		} else if (ee_data[0] == 0xff  &&  ee_data[1] == 0xff &&
1559				   ee_data[2] == 0) {
1560			sa_offset = 2;		/* Grrr, damn Matrox boards. */
1561			multiport_cnt = 4;
1562		}
1563#ifdef CONFIG_MIPS_COBALT
1564               if ((pdev->bus->number == 0) &&
1565                   ((PCI_SLOT(pdev->devfn) == 7) ||
1566                    (PCI_SLOT(pdev->devfn) == 12))) {
1567                       /* Cobalt MAC address in first EEPROM locations. */
1568                       sa_offset = 0;
1569		       /* Ensure our media table fixup get's applied */
1570		       memcpy(ee_data + 16, ee_data, 8);
1571               }
1572#endif
1573#ifdef CONFIG_GSC
1574		/* Check to see if we have a broken srom */
1575		if (ee_data[0] == 0x61 && ee_data[1] == 0x10) {
1576			/* pci_vendor_id and subsystem_id are swapped */
1577			ee_data[0] = ee_data[2];
1578			ee_data[1] = ee_data[3];
1579			ee_data[2] = 0x61;
1580			ee_data[3] = 0x10;
1581
1582			/* HSC-PCI boards need to be byte-swaped and shifted
1583			 * up 1 word.  This shift needs to happen at the end
1584			 * of the MAC first because of the 2 byte overlap.
1585			 */
1586			for (i = 4; i >= 0; i -= 2) {
1587				ee_data[17 + i + 3] = ee_data[17 + i];
1588				ee_data[16 + i + 5] = ee_data[16 + i];
1589			}
1590		}
1591#endif
1592
1593		for (i = 0; i < 6; i ++) {
1594			dev->dev_addr[i] = ee_data[i + sa_offset];
1595			sum += ee_data[i + sa_offset];
1596		}
 
1597	}
1598	/* Lite-On boards have the address byte-swapped. */
1599	if ((dev->dev_addr[0] == 0xA0 ||
1600	     dev->dev_addr[0] == 0xC0 ||
1601	     dev->dev_addr[0] == 0x02) &&
1602	    dev->dev_addr[1] == 0x00)
1603		for (i = 0; i < 6; i+=2) {
1604			char tmp = dev->dev_addr[i];
1605			dev->dev_addr[i] = dev->dev_addr[i+1];
1606			dev->dev_addr[i+1] = tmp;
1607		}
 
 
 
1608	/* On the Zynx 315 Etherarray and other multiport boards only the
1609	   first Tulip has an EEPROM.
1610	   On Sparc systems the mac address is held in the OBP property
1611	   "local-mac-address".
1612	   The addresses of the subsequent ports are derived from the first.
1613	   Many PCI BIOSes also incorrectly report the IRQ line, so we correct
1614	   that here as well. */
1615	if (sum == 0  || sum == 6*0xff) {
1616#if defined(CONFIG_SPARC)
1617		struct device_node *dp = pci_device_to_OF_node(pdev);
1618		const unsigned char *addr;
1619		int len;
1620#endif
1621		eeprom_missing = 1;
1622		for (i = 0; i < 5; i++)
1623			dev->dev_addr[i] = last_phys_addr[i];
1624		dev->dev_addr[i] = last_phys_addr[i] + 1;
 
1625#if defined(CONFIG_SPARC)
1626		addr = of_get_property(dp, "local-mac-address", &len);
1627		if (addr && len == ETH_ALEN)
1628			memcpy(dev->dev_addr, addr, ETH_ALEN);
1629#endif
1630#if defined(__i386__) || defined(__x86_64__)	/* Patch up x86 BIOS bug. */
1631		if (last_irq)
1632			irq = last_irq;
1633#endif
1634	}
1635
1636	for (i = 0; i < 6; i++)
1637		last_phys_addr[i] = dev->dev_addr[i];
 
1638	last_irq = irq;
 
1639
1640	/* The lower four bits are the media type. */
1641	if (board_idx >= 0  &&  board_idx < MAX_UNITS) {
1642		if (options[board_idx] & MEDIA_MASK)
1643			tp->default_port = options[board_idx] & MEDIA_MASK;
1644		if ((options[board_idx] & FullDuplex) || full_duplex[board_idx] > 0)
1645			tp->full_duplex = 1;
1646		if (mtu[board_idx] > 0)
1647			dev->mtu = mtu[board_idx];
1648	}
1649	if (dev->mem_start & MEDIA_MASK)
1650		tp->default_port = dev->mem_start & MEDIA_MASK;
1651	if (tp->default_port) {
1652		pr_info(DRV_NAME "%d: Transceiver selection forced to %s\n",
1653			board_idx, medianame[tp->default_port & MEDIA_MASK]);
1654		tp->medialock = 1;
1655		if (tulip_media_cap[tp->default_port] & MediaAlwaysFD)
1656			tp->full_duplex = 1;
1657	}
1658	if (tp->full_duplex)
1659		tp->full_duplex_lock = 1;
1660
1661	if (tulip_media_cap[tp->default_port] & MediaIsMII) {
1662		static const u16 media2advert[] = {
1663			0x20, 0x40, 0x03e0, 0x60, 0x80, 0x100, 0x200
1664		};
1665		tp->mii_advertise = media2advert[tp->default_port - 9];
1666		tp->mii_advertise |= (tp->flags & HAS_8023X); /* Matching bits! */
1667	}
1668
1669	if (tp->flags & HAS_MEDIA_TABLE) {
1670		sprintf(dev->name, DRV_NAME "%d", board_idx);	/* hack */
1671		tulip_parse_eeprom(dev);
1672		strcpy(dev->name, "eth%d");			/* un-hack */
1673	}
1674
1675	if ((tp->flags & ALWAYS_CHECK_MII) ||
1676		(tp->mtable  &&  tp->mtable->has_mii) ||
1677		( ! tp->mtable  &&  (tp->flags & HAS_MII))) {
1678		if (tp->mtable  &&  tp->mtable->has_mii) {
1679			for (i = 0; i < tp->mtable->leafcount; i++)
1680				if (tp->mtable->mleaf[i].media == 11) {
1681					tp->cur_index = i;
1682					tp->saved_if_port = dev->if_port;
1683					tulip_select_media(dev, 2);
1684					dev->if_port = tp->saved_if_port;
1685					break;
1686				}
1687		}
1688
1689		/* Find the connected MII xcvrs.
1690		   Doing this in open() would allow detecting external xcvrs
1691		   later, but takes much time. */
1692		tulip_find_mii (dev, board_idx);
1693	}
1694
1695	/* The Tulip-specific entries in the device structure. */
1696	dev->netdev_ops = &tulip_netdev_ops;
1697	dev->watchdog_timeo = TX_TIMEOUT;
1698#ifdef CONFIG_TULIP_NAPI
1699	netif_napi_add(dev, &tp->napi, tulip_poll, 16);
1700#endif
1701	dev->ethtool_ops = &ops;
1702
1703	if (register_netdev(dev))
1704		goto err_out_free_ring;
 
1705
1706	pci_set_drvdata(pdev, dev);
1707
1708	dev_info(&dev->dev,
1709#ifdef CONFIG_TULIP_MMIO
1710		 "%s rev %d at MMIO %#llx,%s %pM, IRQ %d\n",
1711#else
1712		 "%s rev %d at Port %#llx,%s %pM, IRQ %d\n",
1713#endif
1714		 chip_name, pdev->revision,
1715		 (unsigned long long)pci_resource_start(pdev, TULIP_BAR),
1716		 eeprom_missing ? " EEPROM not present," : "",
1717		 dev->dev_addr, irq);
1718
1719        if (tp->chip_id == PNIC2)
1720		tp->link_change = pnic2_lnk_change;
1721	else if (tp->flags & HAS_NWAY)
1722		tp->link_change = t21142_lnk_change;
1723	else if (tp->flags & HAS_PNICNWAY)
1724		tp->link_change = pnic_lnk_change;
1725
1726	/* Reset the xcvr interface and turn on heartbeat. */
1727	switch (chip_idx) {
1728	case DC21140:
1729	case DM910X:
1730	default:
1731		if (tp->mtable)
1732			iowrite32(tp->mtable->csr12dir | 0x100, ioaddr + CSR12);
1733		break;
1734	case DC21142:
1735		if (tp->mii_cnt  ||  tulip_media_cap[dev->if_port] & MediaIsMII) {
1736			iowrite32(csr6_mask_defstate, ioaddr + CSR6);
1737			iowrite32(0x0000, ioaddr + CSR13);
1738			iowrite32(0x0000, ioaddr + CSR14);
1739			iowrite32(csr6_mask_hdcap, ioaddr + CSR6);
1740		} else
1741			t21142_start_nway(dev);
1742		break;
1743	case PNIC2:
1744	        /* just do a reset for sanity sake */
1745		iowrite32(0x0000, ioaddr + CSR13);
1746		iowrite32(0x0000, ioaddr + CSR14);
1747		break;
1748	case LC82C168:
1749		if ( ! tp->mii_cnt) {
1750			tp->nway = 1;
1751			tp->nwayset = 0;
1752			iowrite32(csr6_ttm | csr6_ca, ioaddr + CSR6);
1753			iowrite32(0x30, ioaddr + CSR12);
1754			iowrite32(0x0001F078, ioaddr + CSR6);
1755			iowrite32(0x0201F078, ioaddr + CSR6); /* Turn on autonegotiation. */
1756		}
1757		break;
1758	case MX98713:
1759	case COMPEX9881:
1760		iowrite32(0x00000000, ioaddr + CSR6);
1761		iowrite32(0x000711C0, ioaddr + CSR14); /* Turn on NWay. */
1762		iowrite32(0x00000001, ioaddr + CSR13);
1763		break;
1764	case MX98715:
1765	case MX98725:
1766		iowrite32(0x01a80000, ioaddr + CSR6);
1767		iowrite32(0xFFFFFFFF, ioaddr + CSR14);
1768		iowrite32(0x00001000, ioaddr + CSR12);
1769		break;
1770	case COMET:
1771		/* No initialization necessary. */
1772		break;
1773	}
1774
1775	/* put the chip in snooze mode until opened */
1776	tulip_set_power_state (tp, 0, 1);
1777
1778	return 0;
1779
1780err_out_free_ring:
1781	pci_free_consistent (pdev,
1782			     sizeof (struct tulip_rx_desc) * RX_RING_SIZE +
1783			     sizeof (struct tulip_tx_desc) * TX_RING_SIZE,
1784			     tp->rx_ring, tp->rx_ring_dma);
1785
1786err_out_mtable:
1787	kfree (tp->mtable);
1788	pci_iounmap(pdev, ioaddr);
1789
1790err_out_free_res:
1791	pci_release_regions (pdev);
1792
1793err_out_free_netdev:
1794	free_netdev (dev);
1795	return -ENODEV;
1796}
1797
1798
1799/* set the registers according to the given wolopts */
1800static void tulip_set_wolopts (struct pci_dev *pdev, u32 wolopts)
1801{
1802	struct net_device *dev = pci_get_drvdata(pdev);
1803	struct tulip_private *tp = netdev_priv(dev);
1804	void __iomem *ioaddr = tp->base_addr;
1805
1806	if (tp->flags & COMET_PM) {
1807	  
1808		unsigned int tmp;
1809			
1810		tmp = ioread32(ioaddr + CSR18);
1811		tmp &= ~(comet_csr18_pmes_sticky | comet_csr18_apm_mode | comet_csr18_d3a);
1812		tmp |= comet_csr18_pm_mode;
1813		iowrite32(tmp, ioaddr + CSR18);
1814			
1815		/* Set the Wake-up Control/Status Register to the given WOL options*/
1816		tmp = ioread32(ioaddr + CSR13);
1817		tmp &= ~(comet_csr13_linkoffe | comet_csr13_linkone | comet_csr13_wfre | comet_csr13_lsce | comet_csr13_mpre);
1818		if (wolopts & WAKE_MAGIC)
1819			tmp |= comet_csr13_mpre;
1820		if (wolopts & WAKE_PHY)
1821			tmp |= comet_csr13_linkoffe | comet_csr13_linkone | comet_csr13_lsce;
1822		/* Clear the event flags */
1823		tmp |= comet_csr13_wfr | comet_csr13_mpr | comet_csr13_lsc;
1824		iowrite32(tmp, ioaddr + CSR13);
1825	}
1826}
1827
1828#ifdef CONFIG_PM
1829
1830
1831static int tulip_suspend (struct pci_dev *pdev, pm_message_t state)
1832{
1833	pci_power_t pstate;
1834	struct net_device *dev = pci_get_drvdata(pdev);
1835	struct tulip_private *tp = netdev_priv(dev);
1836
1837	if (!dev)
1838		return -EINVAL;
1839
1840	if (!netif_running(dev))
1841		goto save_state;
1842
1843	tulip_down(dev);
1844
1845	netif_device_detach(dev);
1846	/* FIXME: it needlessly adds an error path. */
1847	free_irq(tp->pdev->irq, dev);
1848
1849save_state:
1850	pci_save_state(pdev);
1851	pci_disable_device(pdev);
1852	pstate = pci_choose_state(pdev, state);
1853	if (state.event == PM_EVENT_SUSPEND && pstate != PCI_D0) {
1854		int rc;
1855
1856		tulip_set_wolopts(pdev, tp->wolinfo.wolopts);
1857		rc = pci_enable_wake(pdev, pstate, tp->wolinfo.wolopts);
1858		if (rc)
1859			pr_err("pci_enable_wake failed (%d)\n", rc);
1860	}
1861	pci_set_power_state(pdev, pstate);
1862
1863	return 0;
1864}
1865
1866
1867static int tulip_resume(struct pci_dev *pdev)
1868{
1869	struct net_device *dev = pci_get_drvdata(pdev);
 
1870	struct tulip_private *tp = netdev_priv(dev);
1871	void __iomem *ioaddr = tp->base_addr;
1872	int retval;
1873	unsigned int tmp;
 
1874
1875	if (!dev)
1876		return -EINVAL;
1877
1878	pci_set_power_state(pdev, PCI_D0);
1879	pci_restore_state(pdev);
1880
1881	if (!netif_running(dev))
1882		return 0;
1883
1884	if ((retval = pci_enable_device(pdev))) {
1885		pr_err("pci_enable_device failed in resume\n");
1886		return retval;
1887	}
1888
1889	retval = request_irq(pdev->irq, tulip_interrupt, IRQF_SHARED,
1890			     dev->name, dev);
1891	if (retval) {
1892		pr_err("request_irq failed in resume\n");
1893		return retval;
1894	}
1895
1896	if (tp->flags & COMET_PM) {
1897		pci_enable_wake(pdev, PCI_D3hot, 0);
1898		pci_enable_wake(pdev, PCI_D3cold, 0);
1899
1900		/* Clear the PMES flag */
1901		tmp = ioread32(ioaddr + CSR20);
1902		tmp |= comet_csr20_pmes;
1903		iowrite32(tmp, ioaddr + CSR20);
1904
1905		/* Disable all wake-up events */
1906		tulip_set_wolopts(pdev, 0);
1907	}
1908	netif_device_attach(dev);
1909
1910	if (netif_running(dev))
1911		tulip_up(dev);
1912
1913	return 0;
1914}
1915
1916#endif /* CONFIG_PM */
1917
1918
1919static void tulip_remove_one(struct pci_dev *pdev)
1920{
1921	struct net_device *dev = pci_get_drvdata (pdev);
1922	struct tulip_private *tp;
1923
1924	if (!dev)
1925		return;
1926
1927	tp = netdev_priv(dev);
1928	unregister_netdev(dev);
1929	pci_free_consistent (pdev,
1930			     sizeof (struct tulip_rx_desc) * RX_RING_SIZE +
1931			     sizeof (struct tulip_tx_desc) * TX_RING_SIZE,
1932			     tp->rx_ring, tp->rx_ring_dma);
1933	kfree (tp->mtable);
1934	pci_iounmap(pdev, tp->base_addr);
1935	free_netdev (dev);
1936	pci_release_regions (pdev);
1937	pci_disable_device(pdev);
1938
1939	/* pci_power_off (pdev, -1); */
1940}
1941
1942#ifdef CONFIG_NET_POLL_CONTROLLER
1943/*
1944 * Polling 'interrupt' - used by things like netconsole to send skbs
1945 * without having to re-enable interrupts. It's not called while
1946 * the interrupt routine is executing.
1947 */
1948
1949static void poll_tulip (struct net_device *dev)
1950{
1951	struct tulip_private *tp = netdev_priv(dev);
1952	const int irq = tp->pdev->irq;
1953
1954	/* disable_irq here is not very nice, but with the lockless
1955	   interrupt handler we have no other choice. */
1956	disable_irq(irq);
1957	tulip_interrupt (irq, dev);
1958	enable_irq(irq);
1959}
1960#endif
1961
 
 
1962static struct pci_driver tulip_driver = {
1963	.name		= DRV_NAME,
1964	.id_table	= tulip_pci_tbl,
1965	.probe		= tulip_init_one,
1966	.remove		= tulip_remove_one,
1967#ifdef CONFIG_PM
1968	.suspend	= tulip_suspend,
1969	.resume		= tulip_resume,
1970#endif /* CONFIG_PM */
1971};
1972
1973
1974static int __init tulip_init (void)
1975{
1976#ifdef MODULE
1977	pr_info("%s", version);
1978#endif
1979
1980	if (!csr0) {
1981		pr_warn("tulip: unknown CPU architecture, using default csr0\n");
1982		/* default to 8 longword cache line alignment */
1983		csr0 = 0x00A00000 | 0x4800;
1984	}
1985
1986	/* copy module parms into globals */
1987	tulip_rx_copybreak = rx_copybreak;
1988	tulip_max_interrupt_work = max_interrupt_work;
1989
1990	/* probe for and init boards */
1991	return pci_register_driver(&tulip_driver);
1992}
1993
1994
1995static void __exit tulip_cleanup (void)
1996{
1997	pci_unregister_driver (&tulip_driver);
1998}
1999
2000
2001module_init(tulip_init);
2002module_exit(tulip_cleanup);