Loading...
1/* tulip_core.c: A DEC 21x4x-family ethernet driver for Linux.
2
3 Copyright 2000,2001 The Linux Kernel Team
4 Written/copyright 1994-2001 by Donald Becker.
5
6 This software may be used and distributed according to the terms
7 of the GNU General Public License, incorporated herein by reference.
8
9 Please submit bugs to http://bugzilla.kernel.org/ .
10*/
11
12#define pr_fmt(fmt) "tulip: " fmt
13
14#define DRV_NAME "tulip"
15#ifdef CONFIG_TULIP_NAPI
16#define DRV_VERSION "1.1.15-NAPI" /* Keep at least for test */
17#else
18#define DRV_VERSION "1.1.15"
19#endif
20#define DRV_RELDATE "Feb 27, 2007"
21
22
23#include <linux/module.h>
24#include <linux/pci.h>
25#include <linux/slab.h>
26#include "tulip.h"
27#include <linux/init.h>
28#include <linux/interrupt.h>
29#include <linux/etherdevice.h>
30#include <linux/delay.h>
31#include <linux/mii.h>
32#include <linux/crc32.h>
33#include <asm/unaligned.h>
34#include <linux/uaccess.h>
35
36#ifdef CONFIG_SPARC
37#include <asm/prom.h>
38#endif
39
40static char version[] =
41 "Linux Tulip driver version " DRV_VERSION " (" DRV_RELDATE ")\n";
42
43/* A few user-configurable values. */
44
45/* Maximum events (Rx packets, etc.) to handle at each interrupt. */
46static unsigned int max_interrupt_work = 25;
47
48#define MAX_UNITS 8
49/* Used to pass the full-duplex flag, etc. */
50static int full_duplex[MAX_UNITS];
51static int options[MAX_UNITS];
52static int mtu[MAX_UNITS]; /* Jumbo MTU for interfaces. */
53
54/* The possible media types that can be set in options[] are: */
55const char * const medianame[32] = {
56 "10baseT", "10base2", "AUI", "100baseTx",
57 "10baseT-FDX", "100baseTx-FDX", "100baseT4", "100baseFx",
58 "100baseFx-FDX", "MII 10baseT", "MII 10baseT-FDX", "MII",
59 "10baseT(forced)", "MII 100baseTx", "MII 100baseTx-FDX", "MII 100baseT4",
60 "MII 100baseFx-HDX", "MII 100baseFx-FDX", "Home-PNA 1Mbps", "Invalid-19",
61 "","","","", "","","","", "","","","Transceiver reset",
62};
63
64/* Set the copy breakpoint for the copy-only-tiny-buffer Rx structure. */
65#if defined(__alpha__) || defined(__arm__) || defined(__hppa__) || \
66 defined(CONFIG_SPARC) || defined(__ia64__) || \
67 defined(__sh__) || defined(__mips__)
68static int rx_copybreak = 1518;
69#else
70static int rx_copybreak = 100;
71#endif
72
73/*
74 Set the bus performance register.
75 Typical: Set 16 longword cache alignment, no burst limit.
76 Cache alignment bits 15:14 Burst length 13:8
77 0000 No alignment 0x00000000 unlimited 0800 8 longwords
78 4000 8 longwords 0100 1 longword 1000 16 longwords
79 8000 16 longwords 0200 2 longwords 2000 32 longwords
80 C000 32 longwords 0400 4 longwords
81 Warning: many older 486 systems are broken and require setting 0x00A04800
82 8 longword cache alignment, 8 longword burst.
83 ToDo: Non-Intel setting could be better.
84*/
85
86#if defined(__alpha__) || defined(__ia64__)
87static int csr0 = 0x01A00000 | 0xE000;
88#elif defined(__i386__) || defined(__powerpc__) || defined(__x86_64__)
89static int csr0 = 0x01A00000 | 0x8000;
90#elif defined(CONFIG_SPARC) || defined(__hppa__)
91/* The UltraSparc PCI controllers will disconnect at every 64-byte
92 * crossing anyways so it makes no sense to tell Tulip to burst
93 * any more than that.
94 */
95static int csr0 = 0x01A00000 | 0x9000;
96#elif defined(__arm__) || defined(__sh__)
97static int csr0 = 0x01A00000 | 0x4800;
98#elif defined(__mips__)
99static int csr0 = 0x00200000 | 0x4000;
100#else
101static int csr0;
102#endif
103
104/* Operational parameters that usually are not changed. */
105/* Time in jiffies before concluding the transmitter is hung. */
106#define TX_TIMEOUT (4*HZ)
107
108
109MODULE_AUTHOR("The Linux Kernel Team");
110MODULE_DESCRIPTION("Digital 21*4* Tulip ethernet driver");
111MODULE_LICENSE("GPL");
112MODULE_VERSION(DRV_VERSION);
113module_param(tulip_debug, int, 0);
114module_param(max_interrupt_work, int, 0);
115module_param(rx_copybreak, int, 0);
116module_param(csr0, int, 0);
117module_param_array(options, int, NULL, 0);
118module_param_array(full_duplex, int, NULL, 0);
119
120#ifdef TULIP_DEBUG
121int tulip_debug = TULIP_DEBUG;
122#else
123int tulip_debug = 1;
124#endif
125
126static void tulip_timer(struct timer_list *t)
127{
128 struct tulip_private *tp = from_timer(tp, t, timer);
129 struct net_device *dev = tp->dev;
130
131 if (netif_running(dev))
132 schedule_work(&tp->media_work);
133}
134
135/*
136 * This table use during operation for capabilities and media timer.
137 *
138 * It is indexed via the values in 'enum chips'
139 */
140
141const struct tulip_chip_table tulip_tbl[] = {
142 { }, /* placeholder for array, slot unused currently */
143 { }, /* placeholder for array, slot unused currently */
144
145 /* DC21140 */
146 { "Digital DS21140 Tulip", 128, 0x0001ebef,
147 HAS_MII | HAS_MEDIA_TABLE | CSR12_IN_SROM | HAS_PCI_MWI, tulip_timer,
148 tulip_media_task },
149
150 /* DC21142, DC21143 */
151 { "Digital DS21142/43 Tulip", 128, 0x0801fbff,
152 HAS_MII | HAS_MEDIA_TABLE | ALWAYS_CHECK_MII | HAS_ACPI | HAS_NWAY
153 | HAS_INTR_MITIGATION | HAS_PCI_MWI, tulip_timer, t21142_media_task },
154
155 /* LC82C168 */
156 { "Lite-On 82c168 PNIC", 256, 0x0001fbef,
157 HAS_MII | HAS_PNICNWAY, pnic_timer, },
158
159 /* MX98713 */
160 { "Macronix 98713 PMAC", 128, 0x0001ebef,
161 HAS_MII | HAS_MEDIA_TABLE | CSR12_IN_SROM, mxic_timer, },
162
163 /* MX98715 */
164 { "Macronix 98715 PMAC", 256, 0x0001ebef,
165 HAS_MEDIA_TABLE, mxic_timer, },
166
167 /* MX98725 */
168 { "Macronix 98725 PMAC", 256, 0x0001ebef,
169 HAS_MEDIA_TABLE, mxic_timer, },
170
171 /* AX88140 */
172 { "ASIX AX88140", 128, 0x0001fbff,
173 HAS_MII | HAS_MEDIA_TABLE | CSR12_IN_SROM | MC_HASH_ONLY
174 | IS_ASIX, tulip_timer, tulip_media_task },
175
176 /* PNIC2 */
177 { "Lite-On PNIC-II", 256, 0x0801fbff,
178 HAS_MII | HAS_NWAY | HAS_8023X | HAS_PCI_MWI, pnic2_timer, },
179
180 /* COMET */
181 { "ADMtek Comet", 256, 0x0001abef,
182 HAS_MII | MC_HASH_ONLY | COMET_MAC_ADDR, comet_timer, },
183
184 /* COMPEX9881 */
185 { "Compex 9881 PMAC", 128, 0x0001ebef,
186 HAS_MII | HAS_MEDIA_TABLE | CSR12_IN_SROM, mxic_timer, },
187
188 /* I21145 */
189 { "Intel DS21145 Tulip", 128, 0x0801fbff,
190 HAS_MII | HAS_MEDIA_TABLE | ALWAYS_CHECK_MII | HAS_ACPI
191 | HAS_NWAY | HAS_PCI_MWI, tulip_timer, tulip_media_task },
192
193 /* DM910X */
194#ifdef CONFIG_TULIP_DM910X
195 { "Davicom DM9102/DM9102A", 128, 0x0001ebef,
196 HAS_MII | HAS_MEDIA_TABLE | CSR12_IN_SROM | HAS_ACPI,
197 tulip_timer, tulip_media_task },
198#else
199 { NULL },
200#endif
201
202 /* RS7112 */
203 { "Conexant LANfinity", 256, 0x0001ebef,
204 HAS_MII | HAS_ACPI, tulip_timer, tulip_media_task },
205
206};
207
208
209static const struct pci_device_id tulip_pci_tbl[] = {
210 { 0x1011, 0x0009, PCI_ANY_ID, PCI_ANY_ID, 0, 0, DC21140 },
211 { 0x1011, 0x0019, PCI_ANY_ID, PCI_ANY_ID, 0, 0, DC21143 },
212 { 0x11AD, 0x0002, PCI_ANY_ID, PCI_ANY_ID, 0, 0, LC82C168 },
213 { 0x10d9, 0x0512, PCI_ANY_ID, PCI_ANY_ID, 0, 0, MX98713 },
214 { 0x10d9, 0x0531, PCI_ANY_ID, PCI_ANY_ID, 0, 0, MX98715 },
215/* { 0x10d9, 0x0531, PCI_ANY_ID, PCI_ANY_ID, 0, 0, MX98725 },*/
216 { 0x125B, 0x1400, PCI_ANY_ID, PCI_ANY_ID, 0, 0, AX88140 },
217 { 0x11AD, 0xc115, PCI_ANY_ID, PCI_ANY_ID, 0, 0, PNIC2 },
218 { 0x1317, 0x0981, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
219 { 0x1317, 0x0985, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
220 { 0x1317, 0x1985, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
221 { 0x1317, 0x9511, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
222 { 0x13D1, 0xAB02, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
223 { 0x13D1, 0xAB03, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
224 { 0x13D1, 0xAB08, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
225 { 0x104A, 0x0981, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
226 { 0x104A, 0x2774, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
227 { 0x1259, 0xa120, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
228 { 0x11F6, 0x9881, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMPEX9881 },
229 { 0x8086, 0x0039, PCI_ANY_ID, PCI_ANY_ID, 0, 0, I21145 },
230#ifdef CONFIG_TULIP_DM910X
231 { 0x1282, 0x9100, PCI_ANY_ID, PCI_ANY_ID, 0, 0, DM910X },
232 { 0x1282, 0x9102, PCI_ANY_ID, PCI_ANY_ID, 0, 0, DM910X },
233#endif
234 { 0x1113, 0x1216, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
235 { 0x1113, 0x1217, PCI_ANY_ID, PCI_ANY_ID, 0, 0, MX98715 },
236 { 0x1113, 0x9511, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
237 { 0x1186, 0x1541, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
238 { 0x1186, 0x1561, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
239 { 0x1186, 0x1591, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
240 { 0x14f1, 0x1803, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CONEXANT },
241 { 0x1626, 0x8410, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
242 { 0x1737, 0xAB09, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
243 { 0x1737, 0xAB08, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
244 { 0x17B3, 0xAB08, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
245 { 0x10b7, 0x9300, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET }, /* 3Com 3CSOHO100B-TX */
246 { 0x14ea, 0xab08, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET }, /* Planex FNW-3602-TX */
247 { 0x1414, 0x0001, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET }, /* Microsoft MN-120 */
248 { 0x1414, 0x0002, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
249 { } /* terminate list */
250};
251MODULE_DEVICE_TABLE(pci, tulip_pci_tbl);
252
253
254/* A full-duplex map for media types. */
255const char tulip_media_cap[32] =
256{0,0,0,16, 3,19,16,24, 27,4,7,5, 0,20,23,20, 28,31,0,0, };
257
258static void tulip_tx_timeout(struct net_device *dev);
259static void tulip_init_ring(struct net_device *dev);
260static void tulip_free_ring(struct net_device *dev);
261static netdev_tx_t tulip_start_xmit(struct sk_buff *skb,
262 struct net_device *dev);
263static int tulip_open(struct net_device *dev);
264static int tulip_close(struct net_device *dev);
265static void tulip_up(struct net_device *dev);
266static void tulip_down(struct net_device *dev);
267static struct net_device_stats *tulip_get_stats(struct net_device *dev);
268static int private_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
269static void set_rx_mode(struct net_device *dev);
270static void tulip_set_wolopts(struct pci_dev *pdev, u32 wolopts);
271#ifdef CONFIG_NET_POLL_CONTROLLER
272static void poll_tulip(struct net_device *dev);
273#endif
274
275static void tulip_set_power_state (struct tulip_private *tp,
276 int sleep, int snooze)
277{
278 if (tp->flags & HAS_ACPI) {
279 u32 tmp, newtmp;
280 pci_read_config_dword (tp->pdev, CFDD, &tmp);
281 newtmp = tmp & ~(CFDD_Sleep | CFDD_Snooze);
282 if (sleep)
283 newtmp |= CFDD_Sleep;
284 else if (snooze)
285 newtmp |= CFDD_Snooze;
286 if (tmp != newtmp)
287 pci_write_config_dword (tp->pdev, CFDD, newtmp);
288 }
289
290}
291
292
293static void tulip_up(struct net_device *dev)
294{
295 struct tulip_private *tp = netdev_priv(dev);
296 void __iomem *ioaddr = tp->base_addr;
297 int next_tick = 3*HZ;
298 u32 reg;
299 int i;
300
301#ifdef CONFIG_TULIP_NAPI
302 napi_enable(&tp->napi);
303#endif
304
305 /* Wake the chip from sleep/snooze mode. */
306 tulip_set_power_state (tp, 0, 0);
307
308 /* Disable all WOL events */
309 pci_enable_wake(tp->pdev, PCI_D3hot, 0);
310 pci_enable_wake(tp->pdev, PCI_D3cold, 0);
311 tulip_set_wolopts(tp->pdev, 0);
312
313 /* On some chip revs we must set the MII/SYM port before the reset!? */
314 if (tp->mii_cnt || (tp->mtable && tp->mtable->has_mii))
315 iowrite32(0x00040000, ioaddr + CSR6);
316
317 /* Reset the chip, holding bit 0 set at least 50 PCI cycles. */
318 iowrite32(0x00000001, ioaddr + CSR0);
319 pci_read_config_dword(tp->pdev, PCI_COMMAND, ®); /* flush write */
320 udelay(100);
321
322 /* Deassert reset.
323 Wait the specified 50 PCI cycles after a reset by initializing
324 Tx and Rx queues and the address filter list. */
325 iowrite32(tp->csr0, ioaddr + CSR0);
326 pci_read_config_dword(tp->pdev, PCI_COMMAND, ®); /* flush write */
327 udelay(100);
328
329 if (tulip_debug > 1)
330 netdev_dbg(dev, "tulip_up(), irq==%d\n", tp->pdev->irq);
331
332 iowrite32(tp->rx_ring_dma, ioaddr + CSR3);
333 iowrite32(tp->tx_ring_dma, ioaddr + CSR4);
334 tp->cur_rx = tp->cur_tx = 0;
335 tp->dirty_rx = tp->dirty_tx = 0;
336
337 if (tp->flags & MC_HASH_ONLY) {
338 u32 addr_low = get_unaligned_le32(dev->dev_addr);
339 u32 addr_high = get_unaligned_le16(dev->dev_addr + 4);
340 if (tp->chip_id == AX88140) {
341 iowrite32(0, ioaddr + CSR13);
342 iowrite32(addr_low, ioaddr + CSR14);
343 iowrite32(1, ioaddr + CSR13);
344 iowrite32(addr_high, ioaddr + CSR14);
345 } else if (tp->flags & COMET_MAC_ADDR) {
346 iowrite32(addr_low, ioaddr + 0xA4);
347 iowrite32(addr_high, ioaddr + 0xA8);
348 iowrite32(0, ioaddr + CSR27);
349 iowrite32(0, ioaddr + CSR28);
350 }
351 } else {
352 /* This is set_rx_mode(), but without starting the transmitter. */
353 u16 *eaddrs = (u16 *)dev->dev_addr;
354 u16 *setup_frm = &tp->setup_frame[15*6];
355 dma_addr_t mapping;
356
357 /* 21140 bug: you must add the broadcast address. */
358 memset(tp->setup_frame, 0xff, sizeof(tp->setup_frame));
359 /* Fill the final entry of the table with our physical address. */
360 *setup_frm++ = eaddrs[0]; *setup_frm++ = eaddrs[0];
361 *setup_frm++ = eaddrs[1]; *setup_frm++ = eaddrs[1];
362 *setup_frm++ = eaddrs[2]; *setup_frm++ = eaddrs[2];
363
364 mapping = pci_map_single(tp->pdev, tp->setup_frame,
365 sizeof(tp->setup_frame),
366 PCI_DMA_TODEVICE);
367 tp->tx_buffers[tp->cur_tx].skb = NULL;
368 tp->tx_buffers[tp->cur_tx].mapping = mapping;
369
370 /* Put the setup frame on the Tx list. */
371 tp->tx_ring[tp->cur_tx].length = cpu_to_le32(0x08000000 | 192);
372 tp->tx_ring[tp->cur_tx].buffer1 = cpu_to_le32(mapping);
373 tp->tx_ring[tp->cur_tx].status = cpu_to_le32(DescOwned);
374
375 tp->cur_tx++;
376 }
377
378 tp->saved_if_port = dev->if_port;
379 if (dev->if_port == 0)
380 dev->if_port = tp->default_port;
381
382 /* Allow selecting a default media. */
383 i = 0;
384 if (tp->mtable == NULL)
385 goto media_picked;
386 if (dev->if_port) {
387 int looking_for = tulip_media_cap[dev->if_port] & MediaIsMII ? 11 :
388 (dev->if_port == 12 ? 0 : dev->if_port);
389 for (i = 0; i < tp->mtable->leafcount; i++)
390 if (tp->mtable->mleaf[i].media == looking_for) {
391 dev_info(&dev->dev,
392 "Using user-specified media %s\n",
393 medianame[dev->if_port]);
394 goto media_picked;
395 }
396 }
397 if ((tp->mtable->defaultmedia & 0x0800) == 0) {
398 int looking_for = tp->mtable->defaultmedia & MEDIA_MASK;
399 for (i = 0; i < tp->mtable->leafcount; i++)
400 if (tp->mtable->mleaf[i].media == looking_for) {
401 dev_info(&dev->dev,
402 "Using EEPROM-set media %s\n",
403 medianame[looking_for]);
404 goto media_picked;
405 }
406 }
407 /* Start sensing first non-full-duplex media. */
408 for (i = tp->mtable->leafcount - 1;
409 (tulip_media_cap[tp->mtable->mleaf[i].media] & MediaAlwaysFD) && i > 0; i--)
410 ;
411media_picked:
412
413 tp->csr6 = 0;
414 tp->cur_index = i;
415 tp->nwayset = 0;
416
417 if (dev->if_port) {
418 if (tp->chip_id == DC21143 &&
419 (tulip_media_cap[dev->if_port] & MediaIsMII)) {
420 /* We must reset the media CSRs when we force-select MII mode. */
421 iowrite32(0x0000, ioaddr + CSR13);
422 iowrite32(0x0000, ioaddr + CSR14);
423 iowrite32(0x0008, ioaddr + CSR15);
424 }
425 tulip_select_media(dev, 1);
426 } else if (tp->chip_id == DC21142) {
427 if (tp->mii_cnt) {
428 tulip_select_media(dev, 1);
429 if (tulip_debug > 1)
430 dev_info(&dev->dev,
431 "Using MII transceiver %d, status %04x\n",
432 tp->phys[0],
433 tulip_mdio_read(dev, tp->phys[0], 1));
434 iowrite32(csr6_mask_defstate, ioaddr + CSR6);
435 tp->csr6 = csr6_mask_hdcap;
436 dev->if_port = 11;
437 iowrite32(0x0000, ioaddr + CSR13);
438 iowrite32(0x0000, ioaddr + CSR14);
439 } else
440 t21142_start_nway(dev);
441 } else if (tp->chip_id == PNIC2) {
442 /* for initial startup advertise 10/100 Full and Half */
443 tp->sym_advertise = 0x01E0;
444 /* enable autonegotiate end interrupt */
445 iowrite32(ioread32(ioaddr+CSR5)| 0x00008010, ioaddr + CSR5);
446 iowrite32(ioread32(ioaddr+CSR7)| 0x00008010, ioaddr + CSR7);
447 pnic2_start_nway(dev);
448 } else if (tp->chip_id == LC82C168 && ! tp->medialock) {
449 if (tp->mii_cnt) {
450 dev->if_port = 11;
451 tp->csr6 = 0x814C0000 | (tp->full_duplex ? 0x0200 : 0);
452 iowrite32(0x0001, ioaddr + CSR15);
453 } else if (ioread32(ioaddr + CSR5) & TPLnkPass)
454 pnic_do_nway(dev);
455 else {
456 /* Start with 10mbps to do autonegotiation. */
457 iowrite32(0x32, ioaddr + CSR12);
458 tp->csr6 = 0x00420000;
459 iowrite32(0x0001B078, ioaddr + 0xB8);
460 iowrite32(0x0201B078, ioaddr + 0xB8);
461 next_tick = 1*HZ;
462 }
463 } else if ((tp->chip_id == MX98713 || tp->chip_id == COMPEX9881) &&
464 ! tp->medialock) {
465 dev->if_port = 0;
466 tp->csr6 = 0x01880000 | (tp->full_duplex ? 0x0200 : 0);
467 iowrite32(0x0f370000 | ioread16(ioaddr + 0x80), ioaddr + 0x80);
468 } else if (tp->chip_id == MX98715 || tp->chip_id == MX98725) {
469 /* Provided by BOLO, Macronix - 12/10/1998. */
470 dev->if_port = 0;
471 tp->csr6 = 0x01a80200;
472 iowrite32(0x0f370000 | ioread16(ioaddr + 0x80), ioaddr + 0x80);
473 iowrite32(0x11000 | ioread16(ioaddr + 0xa0), ioaddr + 0xa0);
474 } else if (tp->chip_id == COMET || tp->chip_id == CONEXANT) {
475 /* Enable automatic Tx underrun recovery. */
476 iowrite32(ioread32(ioaddr + 0x88) | 1, ioaddr + 0x88);
477 dev->if_port = tp->mii_cnt ? 11 : 0;
478 tp->csr6 = 0x00040000;
479 } else if (tp->chip_id == AX88140) {
480 tp->csr6 = tp->mii_cnt ? 0x00040100 : 0x00000100;
481 } else
482 tulip_select_media(dev, 1);
483
484 /* Start the chip's Tx to process setup frame. */
485 tulip_stop_rxtx(tp);
486 barrier();
487 udelay(5);
488 iowrite32(tp->csr6 | TxOn, ioaddr + CSR6);
489
490 /* Enable interrupts by setting the interrupt mask. */
491 iowrite32(tulip_tbl[tp->chip_id].valid_intrs, ioaddr + CSR5);
492 iowrite32(tulip_tbl[tp->chip_id].valid_intrs, ioaddr + CSR7);
493 tulip_start_rxtx(tp);
494 iowrite32(0, ioaddr + CSR2); /* Rx poll demand */
495
496 if (tulip_debug > 2) {
497 netdev_dbg(dev, "Done tulip_up(), CSR0 %08x, CSR5 %08x CSR6 %08x\n",
498 ioread32(ioaddr + CSR0),
499 ioread32(ioaddr + CSR5),
500 ioread32(ioaddr + CSR6));
501 }
502
503 /* Set the timer to switch to check for link beat and perhaps switch
504 to an alternate media type. */
505 tp->timer.expires = RUN_AT(next_tick);
506 add_timer(&tp->timer);
507#ifdef CONFIG_TULIP_NAPI
508 timer_setup(&tp->oom_timer, oom_timer, 0);
509#endif
510}
511
512static int
513tulip_open(struct net_device *dev)
514{
515 struct tulip_private *tp = netdev_priv(dev);
516 int retval;
517
518 tulip_init_ring (dev);
519
520 retval = request_irq(tp->pdev->irq, tulip_interrupt, IRQF_SHARED,
521 dev->name, dev);
522 if (retval)
523 goto free_ring;
524
525 tulip_up (dev);
526
527 netif_start_queue (dev);
528
529 return 0;
530
531free_ring:
532 tulip_free_ring (dev);
533 return retval;
534}
535
536
537static void tulip_tx_timeout(struct net_device *dev)
538{
539 struct tulip_private *tp = netdev_priv(dev);
540 void __iomem *ioaddr = tp->base_addr;
541 unsigned long flags;
542
543 spin_lock_irqsave (&tp->lock, flags);
544
545 if (tulip_media_cap[dev->if_port] & MediaIsMII) {
546 /* Do nothing -- the media monitor should handle this. */
547 if (tulip_debug > 1)
548 dev_warn(&dev->dev,
549 "Transmit timeout using MII device\n");
550 } else if (tp->chip_id == DC21140 || tp->chip_id == DC21142 ||
551 tp->chip_id == MX98713 || tp->chip_id == COMPEX9881 ||
552 tp->chip_id == DM910X) {
553 dev_warn(&dev->dev,
554 "21140 transmit timed out, status %08x, SIA %08x %08x %08x %08x, resetting...\n",
555 ioread32(ioaddr + CSR5), ioread32(ioaddr + CSR12),
556 ioread32(ioaddr + CSR13), ioread32(ioaddr + CSR14),
557 ioread32(ioaddr + CSR15));
558 tp->timeout_recovery = 1;
559 schedule_work(&tp->media_work);
560 goto out_unlock;
561 } else if (tp->chip_id == PNIC2) {
562 dev_warn(&dev->dev,
563 "PNIC2 transmit timed out, status %08x, CSR6/7 %08x / %08x CSR12 %08x, resetting...\n",
564 (int)ioread32(ioaddr + CSR5),
565 (int)ioread32(ioaddr + CSR6),
566 (int)ioread32(ioaddr + CSR7),
567 (int)ioread32(ioaddr + CSR12));
568 } else {
569 dev_warn(&dev->dev,
570 "Transmit timed out, status %08x, CSR12 %08x, resetting...\n",
571 ioread32(ioaddr + CSR5), ioread32(ioaddr + CSR12));
572 dev->if_port = 0;
573 }
574
575#if defined(way_too_many_messages)
576 if (tulip_debug > 3) {
577 int i;
578 for (i = 0; i < RX_RING_SIZE; i++) {
579 u8 *buf = (u8 *)(tp->rx_ring[i].buffer1);
580 int j;
581 printk(KERN_DEBUG
582 "%2d: %08x %08x %08x %08x %02x %02x %02x\n",
583 i,
584 (unsigned int)tp->rx_ring[i].status,
585 (unsigned int)tp->rx_ring[i].length,
586 (unsigned int)tp->rx_ring[i].buffer1,
587 (unsigned int)tp->rx_ring[i].buffer2,
588 buf[0], buf[1], buf[2]);
589 for (j = 0; ((j < 1600) && buf[j] != 0xee); j++)
590 if (j < 100)
591 pr_cont(" %02x", buf[j]);
592 pr_cont(" j=%d\n", j);
593 }
594 printk(KERN_DEBUG " Rx ring %p: ", tp->rx_ring);
595 for (i = 0; i < RX_RING_SIZE; i++)
596 pr_cont(" %08x", (unsigned int)tp->rx_ring[i].status);
597 printk(KERN_DEBUG " Tx ring %p: ", tp->tx_ring);
598 for (i = 0; i < TX_RING_SIZE; i++)
599 pr_cont(" %08x", (unsigned int)tp->tx_ring[i].status);
600 pr_cont("\n");
601 }
602#endif
603
604 tulip_tx_timeout_complete(tp, ioaddr);
605
606out_unlock:
607 spin_unlock_irqrestore (&tp->lock, flags);
608 netif_trans_update(dev); /* prevent tx timeout */
609 netif_wake_queue (dev);
610}
611
612
613/* Initialize the Rx and Tx rings, along with various 'dev' bits. */
614static void tulip_init_ring(struct net_device *dev)
615{
616 struct tulip_private *tp = netdev_priv(dev);
617 int i;
618
619 tp->susp_rx = 0;
620 tp->ttimer = 0;
621 tp->nir = 0;
622
623 for (i = 0; i < RX_RING_SIZE; i++) {
624 tp->rx_ring[i].status = 0x00000000;
625 tp->rx_ring[i].length = cpu_to_le32(PKT_BUF_SZ);
626 tp->rx_ring[i].buffer2 = cpu_to_le32(tp->rx_ring_dma + sizeof(struct tulip_rx_desc) * (i + 1));
627 tp->rx_buffers[i].skb = NULL;
628 tp->rx_buffers[i].mapping = 0;
629 }
630 /* Mark the last entry as wrapping the ring. */
631 tp->rx_ring[i-1].length = cpu_to_le32(PKT_BUF_SZ | DESC_RING_WRAP);
632 tp->rx_ring[i-1].buffer2 = cpu_to_le32(tp->rx_ring_dma);
633
634 for (i = 0; i < RX_RING_SIZE; i++) {
635 dma_addr_t mapping;
636
637 /* Note the receive buffer must be longword aligned.
638 netdev_alloc_skb() provides 16 byte alignment. But do *not*
639 use skb_reserve() to align the IP header! */
640 struct sk_buff *skb = netdev_alloc_skb(dev, PKT_BUF_SZ);
641 tp->rx_buffers[i].skb = skb;
642 if (skb == NULL)
643 break;
644 mapping = pci_map_single(tp->pdev, skb->data,
645 PKT_BUF_SZ, PCI_DMA_FROMDEVICE);
646 tp->rx_buffers[i].mapping = mapping;
647 tp->rx_ring[i].status = cpu_to_le32(DescOwned); /* Owned by Tulip chip */
648 tp->rx_ring[i].buffer1 = cpu_to_le32(mapping);
649 }
650 tp->dirty_rx = (unsigned int)(i - RX_RING_SIZE);
651
652 /* The Tx buffer descriptor is filled in as needed, but we
653 do need to clear the ownership bit. */
654 for (i = 0; i < TX_RING_SIZE; i++) {
655 tp->tx_buffers[i].skb = NULL;
656 tp->tx_buffers[i].mapping = 0;
657 tp->tx_ring[i].status = 0x00000000;
658 tp->tx_ring[i].buffer2 = cpu_to_le32(tp->tx_ring_dma + sizeof(struct tulip_tx_desc) * (i + 1));
659 }
660 tp->tx_ring[i-1].buffer2 = cpu_to_le32(tp->tx_ring_dma);
661}
662
663static netdev_tx_t
664tulip_start_xmit(struct sk_buff *skb, struct net_device *dev)
665{
666 struct tulip_private *tp = netdev_priv(dev);
667 int entry;
668 u32 flag;
669 dma_addr_t mapping;
670 unsigned long flags;
671
672 spin_lock_irqsave(&tp->lock, flags);
673
674 /* Calculate the next Tx descriptor entry. */
675 entry = tp->cur_tx % TX_RING_SIZE;
676
677 tp->tx_buffers[entry].skb = skb;
678 mapping = pci_map_single(tp->pdev, skb->data,
679 skb->len, PCI_DMA_TODEVICE);
680 tp->tx_buffers[entry].mapping = mapping;
681 tp->tx_ring[entry].buffer1 = cpu_to_le32(mapping);
682
683 if (tp->cur_tx - tp->dirty_tx < TX_RING_SIZE/2) {/* Typical path */
684 flag = 0x60000000; /* No interrupt */
685 } else if (tp->cur_tx - tp->dirty_tx == TX_RING_SIZE/2) {
686 flag = 0xe0000000; /* Tx-done intr. */
687 } else if (tp->cur_tx - tp->dirty_tx < TX_RING_SIZE - 2) {
688 flag = 0x60000000; /* No Tx-done intr. */
689 } else { /* Leave room for set_rx_mode() to fill entries. */
690 flag = 0xe0000000; /* Tx-done intr. */
691 netif_stop_queue(dev);
692 }
693 if (entry == TX_RING_SIZE-1)
694 flag = 0xe0000000 | DESC_RING_WRAP;
695
696 tp->tx_ring[entry].length = cpu_to_le32(skb->len | flag);
697 /* if we were using Transmit Automatic Polling, we would need a
698 * wmb() here. */
699 tp->tx_ring[entry].status = cpu_to_le32(DescOwned);
700 wmb();
701
702 tp->cur_tx++;
703
704 /* Trigger an immediate transmit demand. */
705 iowrite32(0, tp->base_addr + CSR1);
706
707 spin_unlock_irqrestore(&tp->lock, flags);
708
709 return NETDEV_TX_OK;
710}
711
712static void tulip_clean_tx_ring(struct tulip_private *tp)
713{
714 unsigned int dirty_tx;
715
716 for (dirty_tx = tp->dirty_tx ; tp->cur_tx - dirty_tx > 0;
717 dirty_tx++) {
718 int entry = dirty_tx % TX_RING_SIZE;
719 int status = le32_to_cpu(tp->tx_ring[entry].status);
720
721 if (status < 0) {
722 tp->dev->stats.tx_errors++; /* It wasn't Txed */
723 tp->tx_ring[entry].status = 0;
724 }
725
726 /* Check for Tx filter setup frames. */
727 if (tp->tx_buffers[entry].skb == NULL) {
728 /* test because dummy frames not mapped */
729 if (tp->tx_buffers[entry].mapping)
730 pci_unmap_single(tp->pdev,
731 tp->tx_buffers[entry].mapping,
732 sizeof(tp->setup_frame),
733 PCI_DMA_TODEVICE);
734 continue;
735 }
736
737 pci_unmap_single(tp->pdev, tp->tx_buffers[entry].mapping,
738 tp->tx_buffers[entry].skb->len,
739 PCI_DMA_TODEVICE);
740
741 /* Free the original skb. */
742 dev_kfree_skb_irq(tp->tx_buffers[entry].skb);
743 tp->tx_buffers[entry].skb = NULL;
744 tp->tx_buffers[entry].mapping = 0;
745 }
746}
747
748static void tulip_down (struct net_device *dev)
749{
750 struct tulip_private *tp = netdev_priv(dev);
751 void __iomem *ioaddr = tp->base_addr;
752 unsigned long flags;
753
754 cancel_work_sync(&tp->media_work);
755
756#ifdef CONFIG_TULIP_NAPI
757 napi_disable(&tp->napi);
758#endif
759
760 del_timer_sync (&tp->timer);
761#ifdef CONFIG_TULIP_NAPI
762 del_timer_sync (&tp->oom_timer);
763#endif
764 spin_lock_irqsave (&tp->lock, flags);
765
766 /* Disable interrupts by clearing the interrupt mask. */
767 iowrite32 (0x00000000, ioaddr + CSR7);
768
769 /* Stop the Tx and Rx processes. */
770 tulip_stop_rxtx(tp);
771
772 /* prepare receive buffers */
773 tulip_refill_rx(dev);
774
775 /* release any unconsumed transmit buffers */
776 tulip_clean_tx_ring(tp);
777
778 if (ioread32(ioaddr + CSR6) != 0xffffffff)
779 dev->stats.rx_missed_errors += ioread32(ioaddr + CSR8) & 0xffff;
780
781 spin_unlock_irqrestore (&tp->lock, flags);
782
783 timer_setup(&tp->timer, tulip_tbl[tp->chip_id].media_timer, 0);
784
785 dev->if_port = tp->saved_if_port;
786
787 /* Leave the driver in snooze, not sleep, mode. */
788 tulip_set_power_state (tp, 0, 1);
789}
790
791static void tulip_free_ring (struct net_device *dev)
792{
793 struct tulip_private *tp = netdev_priv(dev);
794 int i;
795
796 /* Free all the skbuffs in the Rx queue. */
797 for (i = 0; i < RX_RING_SIZE; i++) {
798 struct sk_buff *skb = tp->rx_buffers[i].skb;
799 dma_addr_t mapping = tp->rx_buffers[i].mapping;
800
801 tp->rx_buffers[i].skb = NULL;
802 tp->rx_buffers[i].mapping = 0;
803
804 tp->rx_ring[i].status = 0; /* Not owned by Tulip chip. */
805 tp->rx_ring[i].length = 0;
806 /* An invalid address. */
807 tp->rx_ring[i].buffer1 = cpu_to_le32(0xBADF00D0);
808 if (skb) {
809 pci_unmap_single(tp->pdev, mapping, PKT_BUF_SZ,
810 PCI_DMA_FROMDEVICE);
811 dev_kfree_skb (skb);
812 }
813 }
814
815 for (i = 0; i < TX_RING_SIZE; i++) {
816 struct sk_buff *skb = tp->tx_buffers[i].skb;
817
818 if (skb != NULL) {
819 pci_unmap_single(tp->pdev, tp->tx_buffers[i].mapping,
820 skb->len, PCI_DMA_TODEVICE);
821 dev_kfree_skb (skb);
822 }
823 tp->tx_buffers[i].skb = NULL;
824 tp->tx_buffers[i].mapping = 0;
825 }
826}
827
828static int tulip_close (struct net_device *dev)
829{
830 struct tulip_private *tp = netdev_priv(dev);
831 void __iomem *ioaddr = tp->base_addr;
832
833 netif_stop_queue (dev);
834
835 tulip_down (dev);
836
837 if (tulip_debug > 1)
838 netdev_dbg(dev, "Shutting down ethercard, status was %02x\n",
839 ioread32 (ioaddr + CSR5));
840
841 free_irq (tp->pdev->irq, dev);
842
843 tulip_free_ring (dev);
844
845 return 0;
846}
847
848static struct net_device_stats *tulip_get_stats(struct net_device *dev)
849{
850 struct tulip_private *tp = netdev_priv(dev);
851 void __iomem *ioaddr = tp->base_addr;
852
853 if (netif_running(dev)) {
854 unsigned long flags;
855
856 spin_lock_irqsave (&tp->lock, flags);
857
858 dev->stats.rx_missed_errors += ioread32(ioaddr + CSR8) & 0xffff;
859
860 spin_unlock_irqrestore(&tp->lock, flags);
861 }
862
863 return &dev->stats;
864}
865
866
867static void tulip_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
868{
869 struct tulip_private *np = netdev_priv(dev);
870 strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
871 strlcpy(info->version, DRV_VERSION, sizeof(info->version));
872 strlcpy(info->bus_info, pci_name(np->pdev), sizeof(info->bus_info));
873}
874
875
876static int tulip_ethtool_set_wol(struct net_device *dev,
877 struct ethtool_wolinfo *wolinfo)
878{
879 struct tulip_private *tp = netdev_priv(dev);
880
881 if (wolinfo->wolopts & (~tp->wolinfo.supported))
882 return -EOPNOTSUPP;
883
884 tp->wolinfo.wolopts = wolinfo->wolopts;
885 device_set_wakeup_enable(&tp->pdev->dev, tp->wolinfo.wolopts);
886 return 0;
887}
888
889static void tulip_ethtool_get_wol(struct net_device *dev,
890 struct ethtool_wolinfo *wolinfo)
891{
892 struct tulip_private *tp = netdev_priv(dev);
893
894 wolinfo->supported = tp->wolinfo.supported;
895 wolinfo->wolopts = tp->wolinfo.wolopts;
896 return;
897}
898
899
900static const struct ethtool_ops ops = {
901 .get_drvinfo = tulip_get_drvinfo,
902 .set_wol = tulip_ethtool_set_wol,
903 .get_wol = tulip_ethtool_get_wol,
904};
905
906/* Provide ioctl() calls to examine the MII xcvr state. */
907static int private_ioctl (struct net_device *dev, struct ifreq *rq, int cmd)
908{
909 struct tulip_private *tp = netdev_priv(dev);
910 void __iomem *ioaddr = tp->base_addr;
911 struct mii_ioctl_data *data = if_mii(rq);
912 const unsigned int phy_idx = 0;
913 int phy = tp->phys[phy_idx] & 0x1f;
914 unsigned int regnum = data->reg_num;
915
916 switch (cmd) {
917 case SIOCGMIIPHY: /* Get address of MII PHY in use. */
918 if (tp->mii_cnt)
919 data->phy_id = phy;
920 else if (tp->flags & HAS_NWAY)
921 data->phy_id = 32;
922 else if (tp->chip_id == COMET)
923 data->phy_id = 1;
924 else
925 return -ENODEV;
926
927 case SIOCGMIIREG: /* Read MII PHY register. */
928 if (data->phy_id == 32 && (tp->flags & HAS_NWAY)) {
929 int csr12 = ioread32 (ioaddr + CSR12);
930 int csr14 = ioread32 (ioaddr + CSR14);
931 switch (regnum) {
932 case 0:
933 if (((csr14<<5) & 0x1000) ||
934 (dev->if_port == 5 && tp->nwayset))
935 data->val_out = 0x1000;
936 else
937 data->val_out = (tulip_media_cap[dev->if_port]&MediaIs100 ? 0x2000 : 0)
938 | (tulip_media_cap[dev->if_port]&MediaIsFD ? 0x0100 : 0);
939 break;
940 case 1:
941 data->val_out =
942 0x1848 +
943 ((csr12&0x7000) == 0x5000 ? 0x20 : 0) +
944 ((csr12&0x06) == 6 ? 0 : 4);
945 data->val_out |= 0x6048;
946 break;
947 case 4:
948 /* Advertised value, bogus 10baseTx-FD value from CSR6. */
949 data->val_out =
950 ((ioread32(ioaddr + CSR6) >> 3) & 0x0040) +
951 ((csr14 >> 1) & 0x20) + 1;
952 data->val_out |= ((csr14 >> 9) & 0x03C0);
953 break;
954 case 5: data->val_out = tp->lpar; break;
955 default: data->val_out = 0; break;
956 }
957 } else {
958 data->val_out = tulip_mdio_read (dev, data->phy_id & 0x1f, regnum);
959 }
960 return 0;
961
962 case SIOCSMIIREG: /* Write MII PHY register. */
963 if (regnum & ~0x1f)
964 return -EINVAL;
965 if (data->phy_id == phy) {
966 u16 value = data->val_in;
967 switch (regnum) {
968 case 0: /* Check for autonegotiation on or reset. */
969 tp->full_duplex_lock = (value & 0x9000) ? 0 : 1;
970 if (tp->full_duplex_lock)
971 tp->full_duplex = (value & 0x0100) ? 1 : 0;
972 break;
973 case 4:
974 tp->advertising[phy_idx] =
975 tp->mii_advertise = data->val_in;
976 break;
977 }
978 }
979 if (data->phy_id == 32 && (tp->flags & HAS_NWAY)) {
980 u16 value = data->val_in;
981 if (regnum == 0) {
982 if ((value & 0x1200) == 0x1200) {
983 if (tp->chip_id == PNIC2) {
984 pnic2_start_nway (dev);
985 } else {
986 t21142_start_nway (dev);
987 }
988 }
989 } else if (regnum == 4)
990 tp->sym_advertise = value;
991 } else {
992 tulip_mdio_write (dev, data->phy_id & 0x1f, regnum, data->val_in);
993 }
994 return 0;
995 default:
996 return -EOPNOTSUPP;
997 }
998
999 return -EOPNOTSUPP;
1000}
1001
1002
1003/* Set or clear the multicast filter for this adaptor.
1004 Note that we only use exclusion around actually queueing the
1005 new frame, not around filling tp->setup_frame. This is non-deterministic
1006 when re-entered but still correct. */
1007
1008static void build_setup_frame_hash(u16 *setup_frm, struct net_device *dev)
1009{
1010 struct tulip_private *tp = netdev_priv(dev);
1011 u16 hash_table[32];
1012 struct netdev_hw_addr *ha;
1013 int i;
1014 u16 *eaddrs;
1015
1016 memset(hash_table, 0, sizeof(hash_table));
1017 __set_bit_le(255, hash_table); /* Broadcast entry */
1018 /* This should work on big-endian machines as well. */
1019 netdev_for_each_mc_addr(ha, dev) {
1020 int index = ether_crc_le(ETH_ALEN, ha->addr) & 0x1ff;
1021
1022 __set_bit_le(index, hash_table);
1023 }
1024 for (i = 0; i < 32; i++) {
1025 *setup_frm++ = hash_table[i];
1026 *setup_frm++ = hash_table[i];
1027 }
1028 setup_frm = &tp->setup_frame[13*6];
1029
1030 /* Fill the final entry with our physical address. */
1031 eaddrs = (u16 *)dev->dev_addr;
1032 *setup_frm++ = eaddrs[0]; *setup_frm++ = eaddrs[0];
1033 *setup_frm++ = eaddrs[1]; *setup_frm++ = eaddrs[1];
1034 *setup_frm++ = eaddrs[2]; *setup_frm++ = eaddrs[2];
1035}
1036
1037static void build_setup_frame_perfect(u16 *setup_frm, struct net_device *dev)
1038{
1039 struct tulip_private *tp = netdev_priv(dev);
1040 struct netdev_hw_addr *ha;
1041 u16 *eaddrs;
1042
1043 /* We have <= 14 addresses so we can use the wonderful
1044 16 address perfect filtering of the Tulip. */
1045 netdev_for_each_mc_addr(ha, dev) {
1046 eaddrs = (u16 *) ha->addr;
1047 *setup_frm++ = *eaddrs; *setup_frm++ = *eaddrs++;
1048 *setup_frm++ = *eaddrs; *setup_frm++ = *eaddrs++;
1049 *setup_frm++ = *eaddrs; *setup_frm++ = *eaddrs++;
1050 }
1051 /* Fill the unused entries with the broadcast address. */
1052 memset(setup_frm, 0xff, (15 - netdev_mc_count(dev)) * 12);
1053 setup_frm = &tp->setup_frame[15*6];
1054
1055 /* Fill the final entry with our physical address. */
1056 eaddrs = (u16 *)dev->dev_addr;
1057 *setup_frm++ = eaddrs[0]; *setup_frm++ = eaddrs[0];
1058 *setup_frm++ = eaddrs[1]; *setup_frm++ = eaddrs[1];
1059 *setup_frm++ = eaddrs[2]; *setup_frm++ = eaddrs[2];
1060}
1061
1062
1063static void set_rx_mode(struct net_device *dev)
1064{
1065 struct tulip_private *tp = netdev_priv(dev);
1066 void __iomem *ioaddr = tp->base_addr;
1067 int csr6;
1068
1069 csr6 = ioread32(ioaddr + CSR6) & ~0x00D5;
1070
1071 tp->csr6 &= ~0x00D5;
1072 if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */
1073 tp->csr6 |= AcceptAllMulticast | AcceptAllPhys;
1074 csr6 |= AcceptAllMulticast | AcceptAllPhys;
1075 } else if ((netdev_mc_count(dev) > 1000) ||
1076 (dev->flags & IFF_ALLMULTI)) {
1077 /* Too many to filter well -- accept all multicasts. */
1078 tp->csr6 |= AcceptAllMulticast;
1079 csr6 |= AcceptAllMulticast;
1080 } else if (tp->flags & MC_HASH_ONLY) {
1081 /* Some work-alikes have only a 64-entry hash filter table. */
1082 /* Should verify correctness on big-endian/__powerpc__ */
1083 struct netdev_hw_addr *ha;
1084 if (netdev_mc_count(dev) > 64) {
1085 /* Arbitrary non-effective limit. */
1086 tp->csr6 |= AcceptAllMulticast;
1087 csr6 |= AcceptAllMulticast;
1088 } else {
1089 u32 mc_filter[2] = {0, 0}; /* Multicast hash filter */
1090 int filterbit;
1091 netdev_for_each_mc_addr(ha, dev) {
1092 if (tp->flags & COMET_MAC_ADDR)
1093 filterbit = ether_crc_le(ETH_ALEN,
1094 ha->addr);
1095 else
1096 filterbit = ether_crc(ETH_ALEN,
1097 ha->addr) >> 26;
1098 filterbit &= 0x3f;
1099 mc_filter[filterbit >> 5] |= 1 << (filterbit & 31);
1100 if (tulip_debug > 2)
1101 dev_info(&dev->dev,
1102 "Added filter for %pM %08x bit %d\n",
1103 ha->addr,
1104 ether_crc(ETH_ALEN, ha->addr),
1105 filterbit);
1106 }
1107 if (mc_filter[0] == tp->mc_filter[0] &&
1108 mc_filter[1] == tp->mc_filter[1])
1109 ; /* No change. */
1110 else if (tp->flags & IS_ASIX) {
1111 iowrite32(2, ioaddr + CSR13);
1112 iowrite32(mc_filter[0], ioaddr + CSR14);
1113 iowrite32(3, ioaddr + CSR13);
1114 iowrite32(mc_filter[1], ioaddr + CSR14);
1115 } else if (tp->flags & COMET_MAC_ADDR) {
1116 iowrite32(mc_filter[0], ioaddr + CSR27);
1117 iowrite32(mc_filter[1], ioaddr + CSR28);
1118 }
1119 tp->mc_filter[0] = mc_filter[0];
1120 tp->mc_filter[1] = mc_filter[1];
1121 }
1122 } else {
1123 unsigned long flags;
1124 u32 tx_flags = 0x08000000 | 192;
1125
1126 /* Note that only the low-address shortword of setup_frame is valid!
1127 The values are doubled for big-endian architectures. */
1128 if (netdev_mc_count(dev) > 14) {
1129 /* Must use a multicast hash table. */
1130 build_setup_frame_hash(tp->setup_frame, dev);
1131 tx_flags = 0x08400000 | 192;
1132 } else {
1133 build_setup_frame_perfect(tp->setup_frame, dev);
1134 }
1135
1136 spin_lock_irqsave(&tp->lock, flags);
1137
1138 if (tp->cur_tx - tp->dirty_tx > TX_RING_SIZE - 2) {
1139 /* Same setup recently queued, we need not add it. */
1140 } else {
1141 unsigned int entry;
1142 int dummy = -1;
1143
1144 /* Now add this frame to the Tx list. */
1145
1146 entry = tp->cur_tx++ % TX_RING_SIZE;
1147
1148 if (entry != 0) {
1149 /* Avoid a chip errata by prefixing a dummy entry. */
1150 tp->tx_buffers[entry].skb = NULL;
1151 tp->tx_buffers[entry].mapping = 0;
1152 tp->tx_ring[entry].length =
1153 (entry == TX_RING_SIZE-1) ? cpu_to_le32(DESC_RING_WRAP) : 0;
1154 tp->tx_ring[entry].buffer1 = 0;
1155 /* Must set DescOwned later to avoid race with chip */
1156 dummy = entry;
1157 entry = tp->cur_tx++ % TX_RING_SIZE;
1158
1159 }
1160
1161 tp->tx_buffers[entry].skb = NULL;
1162 tp->tx_buffers[entry].mapping =
1163 pci_map_single(tp->pdev, tp->setup_frame,
1164 sizeof(tp->setup_frame),
1165 PCI_DMA_TODEVICE);
1166 /* Put the setup frame on the Tx list. */
1167 if (entry == TX_RING_SIZE-1)
1168 tx_flags |= DESC_RING_WRAP; /* Wrap ring. */
1169 tp->tx_ring[entry].length = cpu_to_le32(tx_flags);
1170 tp->tx_ring[entry].buffer1 =
1171 cpu_to_le32(tp->tx_buffers[entry].mapping);
1172 tp->tx_ring[entry].status = cpu_to_le32(DescOwned);
1173 if (dummy >= 0)
1174 tp->tx_ring[dummy].status = cpu_to_le32(DescOwned);
1175 if (tp->cur_tx - tp->dirty_tx >= TX_RING_SIZE - 2)
1176 netif_stop_queue(dev);
1177
1178 /* Trigger an immediate transmit demand. */
1179 iowrite32(0, ioaddr + CSR1);
1180 }
1181
1182 spin_unlock_irqrestore(&tp->lock, flags);
1183 }
1184
1185 iowrite32(csr6, ioaddr + CSR6);
1186}
1187
1188#ifdef CONFIG_TULIP_MWI
1189static void tulip_mwi_config(struct pci_dev *pdev, struct net_device *dev)
1190{
1191 struct tulip_private *tp = netdev_priv(dev);
1192 u8 cache;
1193 u16 pci_command;
1194 u32 csr0;
1195
1196 if (tulip_debug > 3)
1197 netdev_dbg(dev, "tulip_mwi_config()\n");
1198
1199 tp->csr0 = csr0 = 0;
1200
1201 /* if we have any cache line size at all, we can do MRM and MWI */
1202 csr0 |= MRM | MWI;
1203
1204 /* Enable MWI in the standard PCI command bit.
1205 * Check for the case where MWI is desired but not available
1206 */
1207 pci_try_set_mwi(pdev);
1208
1209 /* read result from hardware (in case bit refused to enable) */
1210 pci_read_config_word(pdev, PCI_COMMAND, &pci_command);
1211 if ((csr0 & MWI) && (!(pci_command & PCI_COMMAND_INVALIDATE)))
1212 csr0 &= ~MWI;
1213
1214 /* if cache line size hardwired to zero, no MWI */
1215 pci_read_config_byte(pdev, PCI_CACHE_LINE_SIZE, &cache);
1216 if ((csr0 & MWI) && (cache == 0)) {
1217 csr0 &= ~MWI;
1218 pci_clear_mwi(pdev);
1219 }
1220
1221 /* assign per-cacheline-size cache alignment and
1222 * burst length values
1223 */
1224 switch (cache) {
1225 case 8:
1226 csr0 |= MRL | (1 << CALShift) | (16 << BurstLenShift);
1227 break;
1228 case 16:
1229 csr0 |= MRL | (2 << CALShift) | (16 << BurstLenShift);
1230 break;
1231 case 32:
1232 csr0 |= MRL | (3 << CALShift) | (32 << BurstLenShift);
1233 break;
1234 default:
1235 cache = 0;
1236 break;
1237 }
1238
1239 /* if we have a good cache line size, we by now have a good
1240 * csr0, so save it and exit
1241 */
1242 if (cache)
1243 goto out;
1244
1245 /* we don't have a good csr0 or cache line size, disable MWI */
1246 if (csr0 & MWI) {
1247 pci_clear_mwi(pdev);
1248 csr0 &= ~MWI;
1249 }
1250
1251 /* sane defaults for burst length and cache alignment
1252 * originally from de4x5 driver
1253 */
1254 csr0 |= (8 << BurstLenShift) | (1 << CALShift);
1255
1256out:
1257 tp->csr0 = csr0;
1258 if (tulip_debug > 2)
1259 netdev_dbg(dev, "MWI config cacheline=%d, csr0=%08x\n",
1260 cache, csr0);
1261}
1262#endif
1263
1264/*
1265 * Chips that have the MRM/reserved bit quirk and the burst quirk. That
1266 * is the DM910X and the on chip ULi devices
1267 */
1268
1269static int tulip_uli_dm_quirk(struct pci_dev *pdev)
1270{
1271 if (pdev->vendor == 0x1282 && pdev->device == 0x9102)
1272 return 1;
1273 return 0;
1274}
1275
1276static const struct net_device_ops tulip_netdev_ops = {
1277 .ndo_open = tulip_open,
1278 .ndo_start_xmit = tulip_start_xmit,
1279 .ndo_tx_timeout = tulip_tx_timeout,
1280 .ndo_stop = tulip_close,
1281 .ndo_get_stats = tulip_get_stats,
1282 .ndo_do_ioctl = private_ioctl,
1283 .ndo_set_rx_mode = set_rx_mode,
1284 .ndo_set_mac_address = eth_mac_addr,
1285 .ndo_validate_addr = eth_validate_addr,
1286#ifdef CONFIG_NET_POLL_CONTROLLER
1287 .ndo_poll_controller = poll_tulip,
1288#endif
1289};
1290
1291const struct pci_device_id early_486_chipsets[] = {
1292 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82424) },
1293 { PCI_DEVICE(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_496) },
1294 { },
1295};
1296
1297static int tulip_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
1298{
1299 struct tulip_private *tp;
1300 /* See note below on the multiport cards. */
1301 static unsigned char last_phys_addr[ETH_ALEN] = {
1302 0x00, 'L', 'i', 'n', 'u', 'x'
1303 };
1304 static int last_irq;
1305 int i, irq;
1306 unsigned short sum;
1307 unsigned char *ee_data;
1308 struct net_device *dev;
1309 void __iomem *ioaddr;
1310 static int board_idx = -1;
1311 int chip_idx = ent->driver_data;
1312 const char *chip_name = tulip_tbl[chip_idx].chip_name;
1313 unsigned int eeprom_missing = 0;
1314 unsigned int force_csr0 = 0;
1315
1316#ifndef MODULE
1317 if (tulip_debug > 0)
1318 printk_once(KERN_INFO "%s", version);
1319#endif
1320
1321 board_idx++;
1322
1323 /*
1324 * Lan media wire a tulip chip to a wan interface. Needs a very
1325 * different driver (lmc driver)
1326 */
1327
1328 if (pdev->subsystem_vendor == PCI_VENDOR_ID_LMC) {
1329 pr_err("skipping LMC card\n");
1330 return -ENODEV;
1331 } else if (pdev->subsystem_vendor == PCI_VENDOR_ID_SBE &&
1332 (pdev->subsystem_device == PCI_SUBDEVICE_ID_SBE_T3E3 ||
1333 pdev->subsystem_device == PCI_SUBDEVICE_ID_SBE_2T3E3_P0 ||
1334 pdev->subsystem_device == PCI_SUBDEVICE_ID_SBE_2T3E3_P1)) {
1335 pr_err("skipping SBE T3E3 port\n");
1336 return -ENODEV;
1337 }
1338
1339 /*
1340 * DM910x chips should be handled by the dmfe driver, except
1341 * on-board chips on SPARC systems. Also, early DM9100s need
1342 * software CRC which only the dmfe driver supports.
1343 */
1344
1345#ifdef CONFIG_TULIP_DM910X
1346 if (chip_idx == DM910X) {
1347 struct device_node *dp;
1348
1349 if (pdev->vendor == 0x1282 && pdev->device == 0x9100 &&
1350 pdev->revision < 0x30) {
1351 pr_info("skipping early DM9100 with Crc bug (use dmfe)\n");
1352 return -ENODEV;
1353 }
1354
1355 dp = pci_device_to_OF_node(pdev);
1356 if (!(dp && of_get_property(dp, "local-mac-address", NULL))) {
1357 pr_info("skipping DM910x expansion card (use dmfe)\n");
1358 return -ENODEV;
1359 }
1360 }
1361#endif
1362
1363 /*
1364 * Looks for early PCI chipsets where people report hangs
1365 * without the workarounds being on.
1366 */
1367
1368 /* 1. Intel Saturn. Switch to 8 long words burst, 8 long word cache
1369 aligned. Aries might need this too. The Saturn errata are not
1370 pretty reading but thankfully it's an old 486 chipset.
1371
1372 2. The dreaded SiS496 486 chipset. Same workaround as Intel
1373 Saturn.
1374 */
1375
1376 if (pci_dev_present(early_486_chipsets)) {
1377 csr0 = MRL | MRM | (8 << BurstLenShift) | (1 << CALShift);
1378 force_csr0 = 1;
1379 }
1380
1381 /* bugfix: the ASIX must have a burst limit or horrible things happen. */
1382 if (chip_idx == AX88140) {
1383 if ((csr0 & 0x3f00) == 0)
1384 csr0 |= 0x2000;
1385 }
1386
1387 /* PNIC doesn't have MWI/MRL/MRM... */
1388 if (chip_idx == LC82C168)
1389 csr0 &= ~0xfff10000; /* zero reserved bits 31:20, 16 */
1390
1391 /* DM9102A has troubles with MRM & clear reserved bits 24:22, 20, 16, 7:1 */
1392 if (tulip_uli_dm_quirk(pdev)) {
1393 csr0 &= ~0x01f100ff;
1394#if defined(CONFIG_SPARC)
1395 csr0 = (csr0 & ~0xff00) | 0xe000;
1396#endif
1397 }
1398 /*
1399 * And back to business
1400 */
1401
1402 i = pci_enable_device(pdev);
1403 if (i) {
1404 pr_err("Cannot enable tulip board #%d, aborting\n", board_idx);
1405 return i;
1406 }
1407
1408 irq = pdev->irq;
1409
1410 /* alloc_etherdev ensures aligned and zeroed private structures */
1411 dev = alloc_etherdev (sizeof (*tp));
1412 if (!dev)
1413 return -ENOMEM;
1414
1415 SET_NETDEV_DEV(dev, &pdev->dev);
1416 if (pci_resource_len (pdev, 0) < tulip_tbl[chip_idx].io_size) {
1417 pr_err("%s: I/O region (0x%llx@0x%llx) too small, aborting\n",
1418 pci_name(pdev),
1419 (unsigned long long)pci_resource_len (pdev, 0),
1420 (unsigned long long)pci_resource_start (pdev, 0));
1421 goto err_out_free_netdev;
1422 }
1423
1424 /* grab all resources from both PIO and MMIO regions, as we
1425 * don't want anyone else messing around with our hardware */
1426 if (pci_request_regions (pdev, DRV_NAME))
1427 goto err_out_free_netdev;
1428
1429 ioaddr = pci_iomap(pdev, TULIP_BAR, tulip_tbl[chip_idx].io_size);
1430
1431 if (!ioaddr)
1432 goto err_out_free_res;
1433
1434 /*
1435 * initialize private data structure 'tp'
1436 * it is zeroed and aligned in alloc_etherdev
1437 */
1438 tp = netdev_priv(dev);
1439 tp->dev = dev;
1440
1441 tp->rx_ring = pci_alloc_consistent(pdev,
1442 sizeof(struct tulip_rx_desc) * RX_RING_SIZE +
1443 sizeof(struct tulip_tx_desc) * TX_RING_SIZE,
1444 &tp->rx_ring_dma);
1445 if (!tp->rx_ring)
1446 goto err_out_mtable;
1447 tp->tx_ring = (struct tulip_tx_desc *)(tp->rx_ring + RX_RING_SIZE);
1448 tp->tx_ring_dma = tp->rx_ring_dma + sizeof(struct tulip_rx_desc) * RX_RING_SIZE;
1449
1450 tp->chip_id = chip_idx;
1451 tp->flags = tulip_tbl[chip_idx].flags;
1452
1453 tp->wolinfo.supported = 0;
1454 tp->wolinfo.wolopts = 0;
1455 /* COMET: Enable power management only for AN983B */
1456 if (chip_idx == COMET ) {
1457 u32 sig;
1458 pci_read_config_dword (pdev, 0x80, &sig);
1459 if (sig == 0x09811317) {
1460 tp->flags |= COMET_PM;
1461 tp->wolinfo.supported = WAKE_PHY | WAKE_MAGIC;
1462 pr_info("%s: Enabled WOL support for AN983B\n",
1463 __func__);
1464 }
1465 }
1466 tp->pdev = pdev;
1467 tp->base_addr = ioaddr;
1468 tp->revision = pdev->revision;
1469 tp->csr0 = csr0;
1470 spin_lock_init(&tp->lock);
1471 spin_lock_init(&tp->mii_lock);
1472 timer_setup(&tp->timer, tulip_tbl[tp->chip_id].media_timer, 0);
1473
1474 INIT_WORK(&tp->media_work, tulip_tbl[tp->chip_id].media_task);
1475
1476#ifdef CONFIG_TULIP_MWI
1477 if (!force_csr0 && (tp->flags & HAS_PCI_MWI))
1478 tulip_mwi_config (pdev, dev);
1479#endif
1480
1481 /* Stop the chip's Tx and Rx processes. */
1482 tulip_stop_rxtx(tp);
1483
1484 pci_set_master(pdev);
1485
1486#ifdef CONFIG_GSC
1487 if (pdev->subsystem_vendor == PCI_VENDOR_ID_HP) {
1488 switch (pdev->subsystem_device) {
1489 default:
1490 break;
1491 case 0x1061:
1492 case 0x1062:
1493 case 0x1063:
1494 case 0x1098:
1495 case 0x1099:
1496 case 0x10EE:
1497 tp->flags |= HAS_SWAPPED_SEEPROM | NEEDS_FAKE_MEDIA_TABLE;
1498 chip_name = "GSC DS21140 Tulip";
1499 }
1500 }
1501#endif
1502
1503 /* Clear the missed-packet counter. */
1504 ioread32(ioaddr + CSR8);
1505
1506 /* The station address ROM is read byte serially. The register must
1507 be polled, waiting for the value to be read bit serially from the
1508 EEPROM.
1509 */
1510 ee_data = tp->eeprom;
1511 memset(ee_data, 0, sizeof(tp->eeprom));
1512 sum = 0;
1513 if (chip_idx == LC82C168) {
1514 for (i = 0; i < 3; i++) {
1515 int value, boguscnt = 100000;
1516 iowrite32(0x600 | i, ioaddr + 0x98);
1517 do {
1518 value = ioread32(ioaddr + CSR9);
1519 } while (value < 0 && --boguscnt > 0);
1520 put_unaligned_le16(value, ((__le16 *)dev->dev_addr) + i);
1521 sum += value & 0xffff;
1522 }
1523 } else if (chip_idx == COMET) {
1524 /* No need to read the EEPROM. */
1525 put_unaligned_le32(ioread32(ioaddr + 0xA4), dev->dev_addr);
1526 put_unaligned_le16(ioread32(ioaddr + 0xA8), dev->dev_addr + 4);
1527 for (i = 0; i < 6; i ++)
1528 sum += dev->dev_addr[i];
1529 } else {
1530 /* A serial EEPROM interface, we read now and sort it out later. */
1531 int sa_offset = 0;
1532 int ee_addr_size = tulip_read_eeprom(dev, 0xff, 8) & 0x40000 ? 8 : 6;
1533 int ee_max_addr = ((1 << ee_addr_size) - 1) * sizeof(u16);
1534
1535 if (ee_max_addr > sizeof(tp->eeprom))
1536 ee_max_addr = sizeof(tp->eeprom);
1537
1538 for (i = 0; i < ee_max_addr ; i += sizeof(u16)) {
1539 u16 data = tulip_read_eeprom(dev, i/2, ee_addr_size);
1540 ee_data[i] = data & 0xff;
1541 ee_data[i + 1] = data >> 8;
1542 }
1543
1544 /* DEC now has a specification (see Notes) but early board makers
1545 just put the address in the first EEPROM locations. */
1546 /* This does memcmp(ee_data, ee_data+16, 8) */
1547 for (i = 0; i < 8; i ++)
1548 if (ee_data[i] != ee_data[16+i])
1549 sa_offset = 20;
1550 if (chip_idx == CONEXANT) {
1551 /* Check that the tuple type and length is correct. */
1552 if (ee_data[0x198] == 0x04 && ee_data[0x199] == 6)
1553 sa_offset = 0x19A;
1554 } else if (ee_data[0] == 0xff && ee_data[1] == 0xff &&
1555 ee_data[2] == 0) {
1556 sa_offset = 2; /* Grrr, damn Matrox boards. */
1557 }
1558#ifdef CONFIG_MIPS_COBALT
1559 if ((pdev->bus->number == 0) &&
1560 ((PCI_SLOT(pdev->devfn) == 7) ||
1561 (PCI_SLOT(pdev->devfn) == 12))) {
1562 /* Cobalt MAC address in first EEPROM locations. */
1563 sa_offset = 0;
1564 /* Ensure our media table fixup get's applied */
1565 memcpy(ee_data + 16, ee_data, 8);
1566 }
1567#endif
1568#ifdef CONFIG_GSC
1569 /* Check to see if we have a broken srom */
1570 if (ee_data[0] == 0x61 && ee_data[1] == 0x10) {
1571 /* pci_vendor_id and subsystem_id are swapped */
1572 ee_data[0] = ee_data[2];
1573 ee_data[1] = ee_data[3];
1574 ee_data[2] = 0x61;
1575 ee_data[3] = 0x10;
1576
1577 /* HSC-PCI boards need to be byte-swaped and shifted
1578 * up 1 word. This shift needs to happen at the end
1579 * of the MAC first because of the 2 byte overlap.
1580 */
1581 for (i = 4; i >= 0; i -= 2) {
1582 ee_data[17 + i + 3] = ee_data[17 + i];
1583 ee_data[16 + i + 5] = ee_data[16 + i];
1584 }
1585 }
1586#endif
1587
1588 for (i = 0; i < 6; i ++) {
1589 dev->dev_addr[i] = ee_data[i + sa_offset];
1590 sum += ee_data[i + sa_offset];
1591 }
1592 }
1593 /* Lite-On boards have the address byte-swapped. */
1594 if ((dev->dev_addr[0] == 0xA0 ||
1595 dev->dev_addr[0] == 0xC0 ||
1596 dev->dev_addr[0] == 0x02) &&
1597 dev->dev_addr[1] == 0x00)
1598 for (i = 0; i < 6; i+=2) {
1599 char tmp = dev->dev_addr[i];
1600 dev->dev_addr[i] = dev->dev_addr[i+1];
1601 dev->dev_addr[i+1] = tmp;
1602 }
1603 /* On the Zynx 315 Etherarray and other multiport boards only the
1604 first Tulip has an EEPROM.
1605 On Sparc systems the mac address is held in the OBP property
1606 "local-mac-address".
1607 The addresses of the subsequent ports are derived from the first.
1608 Many PCI BIOSes also incorrectly report the IRQ line, so we correct
1609 that here as well. */
1610 if (sum == 0 || sum == 6*0xff) {
1611#if defined(CONFIG_SPARC)
1612 struct device_node *dp = pci_device_to_OF_node(pdev);
1613 const unsigned char *addr;
1614 int len;
1615#endif
1616 eeprom_missing = 1;
1617 for (i = 0; i < 5; i++)
1618 dev->dev_addr[i] = last_phys_addr[i];
1619 dev->dev_addr[i] = last_phys_addr[i] + 1;
1620#if defined(CONFIG_SPARC)
1621 addr = of_get_property(dp, "local-mac-address", &len);
1622 if (addr && len == ETH_ALEN)
1623 memcpy(dev->dev_addr, addr, ETH_ALEN);
1624#endif
1625#if defined(__i386__) || defined(__x86_64__) /* Patch up x86 BIOS bug. */
1626 if (last_irq)
1627 irq = last_irq;
1628#endif
1629 }
1630
1631 for (i = 0; i < 6; i++)
1632 last_phys_addr[i] = dev->dev_addr[i];
1633 last_irq = irq;
1634
1635 /* The lower four bits are the media type. */
1636 if (board_idx >= 0 && board_idx < MAX_UNITS) {
1637 if (options[board_idx] & MEDIA_MASK)
1638 tp->default_port = options[board_idx] & MEDIA_MASK;
1639 if ((options[board_idx] & FullDuplex) || full_duplex[board_idx] > 0)
1640 tp->full_duplex = 1;
1641 if (mtu[board_idx] > 0)
1642 dev->mtu = mtu[board_idx];
1643 }
1644 if (dev->mem_start & MEDIA_MASK)
1645 tp->default_port = dev->mem_start & MEDIA_MASK;
1646 if (tp->default_port) {
1647 pr_info(DRV_NAME "%d: Transceiver selection forced to %s\n",
1648 board_idx, medianame[tp->default_port & MEDIA_MASK]);
1649 tp->medialock = 1;
1650 if (tulip_media_cap[tp->default_port] & MediaAlwaysFD)
1651 tp->full_duplex = 1;
1652 }
1653 if (tp->full_duplex)
1654 tp->full_duplex_lock = 1;
1655
1656 if (tulip_media_cap[tp->default_port] & MediaIsMII) {
1657 static const u16 media2advert[] = {
1658 0x20, 0x40, 0x03e0, 0x60, 0x80, 0x100, 0x200
1659 };
1660 tp->mii_advertise = media2advert[tp->default_port - 9];
1661 tp->mii_advertise |= (tp->flags & HAS_8023X); /* Matching bits! */
1662 }
1663
1664 if (tp->flags & HAS_MEDIA_TABLE) {
1665 sprintf(dev->name, DRV_NAME "%d", board_idx); /* hack */
1666 tulip_parse_eeprom(dev);
1667 strcpy(dev->name, "eth%d"); /* un-hack */
1668 }
1669
1670 if ((tp->flags & ALWAYS_CHECK_MII) ||
1671 (tp->mtable && tp->mtable->has_mii) ||
1672 ( ! tp->mtable && (tp->flags & HAS_MII))) {
1673 if (tp->mtable && tp->mtable->has_mii) {
1674 for (i = 0; i < tp->mtable->leafcount; i++)
1675 if (tp->mtable->mleaf[i].media == 11) {
1676 tp->cur_index = i;
1677 tp->saved_if_port = dev->if_port;
1678 tulip_select_media(dev, 2);
1679 dev->if_port = tp->saved_if_port;
1680 break;
1681 }
1682 }
1683
1684 /* Find the connected MII xcvrs.
1685 Doing this in open() would allow detecting external xcvrs
1686 later, but takes much time. */
1687 tulip_find_mii (dev, board_idx);
1688 }
1689
1690 /* The Tulip-specific entries in the device structure. */
1691 dev->netdev_ops = &tulip_netdev_ops;
1692 dev->watchdog_timeo = TX_TIMEOUT;
1693#ifdef CONFIG_TULIP_NAPI
1694 netif_napi_add(dev, &tp->napi, tulip_poll, 16);
1695#endif
1696 dev->ethtool_ops = &ops;
1697
1698 if (register_netdev(dev))
1699 goto err_out_free_ring;
1700
1701 pci_set_drvdata(pdev, dev);
1702
1703 dev_info(&dev->dev,
1704#ifdef CONFIG_TULIP_MMIO
1705 "%s rev %d at MMIO %#llx,%s %pM, IRQ %d\n",
1706#else
1707 "%s rev %d at Port %#llx,%s %pM, IRQ %d\n",
1708#endif
1709 chip_name, pdev->revision,
1710 (unsigned long long)pci_resource_start(pdev, TULIP_BAR),
1711 eeprom_missing ? " EEPROM not present," : "",
1712 dev->dev_addr, irq);
1713
1714 if (tp->chip_id == PNIC2)
1715 tp->link_change = pnic2_lnk_change;
1716 else if (tp->flags & HAS_NWAY)
1717 tp->link_change = t21142_lnk_change;
1718 else if (tp->flags & HAS_PNICNWAY)
1719 tp->link_change = pnic_lnk_change;
1720
1721 /* Reset the xcvr interface and turn on heartbeat. */
1722 switch (chip_idx) {
1723 case DC21140:
1724 case DM910X:
1725 default:
1726 if (tp->mtable)
1727 iowrite32(tp->mtable->csr12dir | 0x100, ioaddr + CSR12);
1728 break;
1729 case DC21142:
1730 if (tp->mii_cnt || tulip_media_cap[dev->if_port] & MediaIsMII) {
1731 iowrite32(csr6_mask_defstate, ioaddr + CSR6);
1732 iowrite32(0x0000, ioaddr + CSR13);
1733 iowrite32(0x0000, ioaddr + CSR14);
1734 iowrite32(csr6_mask_hdcap, ioaddr + CSR6);
1735 } else
1736 t21142_start_nway(dev);
1737 break;
1738 case PNIC2:
1739 /* just do a reset for sanity sake */
1740 iowrite32(0x0000, ioaddr + CSR13);
1741 iowrite32(0x0000, ioaddr + CSR14);
1742 break;
1743 case LC82C168:
1744 if ( ! tp->mii_cnt) {
1745 tp->nway = 1;
1746 tp->nwayset = 0;
1747 iowrite32(csr6_ttm | csr6_ca, ioaddr + CSR6);
1748 iowrite32(0x30, ioaddr + CSR12);
1749 iowrite32(0x0001F078, ioaddr + CSR6);
1750 iowrite32(0x0201F078, ioaddr + CSR6); /* Turn on autonegotiation. */
1751 }
1752 break;
1753 case MX98713:
1754 case COMPEX9881:
1755 iowrite32(0x00000000, ioaddr + CSR6);
1756 iowrite32(0x000711C0, ioaddr + CSR14); /* Turn on NWay. */
1757 iowrite32(0x00000001, ioaddr + CSR13);
1758 break;
1759 case MX98715:
1760 case MX98725:
1761 iowrite32(0x01a80000, ioaddr + CSR6);
1762 iowrite32(0xFFFFFFFF, ioaddr + CSR14);
1763 iowrite32(0x00001000, ioaddr + CSR12);
1764 break;
1765 case COMET:
1766 /* No initialization necessary. */
1767 break;
1768 }
1769
1770 /* put the chip in snooze mode until opened */
1771 tulip_set_power_state (tp, 0, 1);
1772
1773 return 0;
1774
1775err_out_free_ring:
1776 pci_free_consistent (pdev,
1777 sizeof (struct tulip_rx_desc) * RX_RING_SIZE +
1778 sizeof (struct tulip_tx_desc) * TX_RING_SIZE,
1779 tp->rx_ring, tp->rx_ring_dma);
1780
1781err_out_mtable:
1782 kfree (tp->mtable);
1783 pci_iounmap(pdev, ioaddr);
1784
1785err_out_free_res:
1786 pci_release_regions (pdev);
1787
1788err_out_free_netdev:
1789 free_netdev (dev);
1790 return -ENODEV;
1791}
1792
1793
1794/* set the registers according to the given wolopts */
1795static void tulip_set_wolopts (struct pci_dev *pdev, u32 wolopts)
1796{
1797 struct net_device *dev = pci_get_drvdata(pdev);
1798 struct tulip_private *tp = netdev_priv(dev);
1799 void __iomem *ioaddr = tp->base_addr;
1800
1801 if (tp->flags & COMET_PM) {
1802
1803 unsigned int tmp;
1804
1805 tmp = ioread32(ioaddr + CSR18);
1806 tmp &= ~(comet_csr18_pmes_sticky | comet_csr18_apm_mode | comet_csr18_d3a);
1807 tmp |= comet_csr18_pm_mode;
1808 iowrite32(tmp, ioaddr + CSR18);
1809
1810 /* Set the Wake-up Control/Status Register to the given WOL options*/
1811 tmp = ioread32(ioaddr + CSR13);
1812 tmp &= ~(comet_csr13_linkoffe | comet_csr13_linkone | comet_csr13_wfre | comet_csr13_lsce | comet_csr13_mpre);
1813 if (wolopts & WAKE_MAGIC)
1814 tmp |= comet_csr13_mpre;
1815 if (wolopts & WAKE_PHY)
1816 tmp |= comet_csr13_linkoffe | comet_csr13_linkone | comet_csr13_lsce;
1817 /* Clear the event flags */
1818 tmp |= comet_csr13_wfr | comet_csr13_mpr | comet_csr13_lsc;
1819 iowrite32(tmp, ioaddr + CSR13);
1820 }
1821}
1822
1823#ifdef CONFIG_PM
1824
1825
1826static int tulip_suspend (struct pci_dev *pdev, pm_message_t state)
1827{
1828 pci_power_t pstate;
1829 struct net_device *dev = pci_get_drvdata(pdev);
1830 struct tulip_private *tp = netdev_priv(dev);
1831
1832 if (!dev)
1833 return -EINVAL;
1834
1835 if (!netif_running(dev))
1836 goto save_state;
1837
1838 tulip_down(dev);
1839
1840 netif_device_detach(dev);
1841 /* FIXME: it needlessly adds an error path. */
1842 free_irq(tp->pdev->irq, dev);
1843
1844save_state:
1845 pci_save_state(pdev);
1846 pci_disable_device(pdev);
1847 pstate = pci_choose_state(pdev, state);
1848 if (state.event == PM_EVENT_SUSPEND && pstate != PCI_D0) {
1849 int rc;
1850
1851 tulip_set_wolopts(pdev, tp->wolinfo.wolopts);
1852 rc = pci_enable_wake(pdev, pstate, tp->wolinfo.wolopts);
1853 if (rc)
1854 pr_err("pci_enable_wake failed (%d)\n", rc);
1855 }
1856 pci_set_power_state(pdev, pstate);
1857
1858 return 0;
1859}
1860
1861
1862static int tulip_resume(struct pci_dev *pdev)
1863{
1864 struct net_device *dev = pci_get_drvdata(pdev);
1865 struct tulip_private *tp = netdev_priv(dev);
1866 void __iomem *ioaddr = tp->base_addr;
1867 int retval;
1868 unsigned int tmp;
1869
1870 if (!dev)
1871 return -EINVAL;
1872
1873 pci_set_power_state(pdev, PCI_D0);
1874 pci_restore_state(pdev);
1875
1876 if (!netif_running(dev))
1877 return 0;
1878
1879 if ((retval = pci_enable_device(pdev))) {
1880 pr_err("pci_enable_device failed in resume\n");
1881 return retval;
1882 }
1883
1884 retval = request_irq(pdev->irq, tulip_interrupt, IRQF_SHARED,
1885 dev->name, dev);
1886 if (retval) {
1887 pr_err("request_irq failed in resume\n");
1888 return retval;
1889 }
1890
1891 if (tp->flags & COMET_PM) {
1892 pci_enable_wake(pdev, PCI_D3hot, 0);
1893 pci_enable_wake(pdev, PCI_D3cold, 0);
1894
1895 /* Clear the PMES flag */
1896 tmp = ioread32(ioaddr + CSR20);
1897 tmp |= comet_csr20_pmes;
1898 iowrite32(tmp, ioaddr + CSR20);
1899
1900 /* Disable all wake-up events */
1901 tulip_set_wolopts(pdev, 0);
1902 }
1903 netif_device_attach(dev);
1904
1905 if (netif_running(dev))
1906 tulip_up(dev);
1907
1908 return 0;
1909}
1910
1911#endif /* CONFIG_PM */
1912
1913
1914static void tulip_remove_one(struct pci_dev *pdev)
1915{
1916 struct net_device *dev = pci_get_drvdata (pdev);
1917 struct tulip_private *tp;
1918
1919 if (!dev)
1920 return;
1921
1922 tp = netdev_priv(dev);
1923 unregister_netdev(dev);
1924 pci_free_consistent (pdev,
1925 sizeof (struct tulip_rx_desc) * RX_RING_SIZE +
1926 sizeof (struct tulip_tx_desc) * TX_RING_SIZE,
1927 tp->rx_ring, tp->rx_ring_dma);
1928 kfree (tp->mtable);
1929 pci_iounmap(pdev, tp->base_addr);
1930 free_netdev (dev);
1931 pci_release_regions (pdev);
1932 pci_disable_device(pdev);
1933
1934 /* pci_power_off (pdev, -1); */
1935}
1936
1937#ifdef CONFIG_NET_POLL_CONTROLLER
1938/*
1939 * Polling 'interrupt' - used by things like netconsole to send skbs
1940 * without having to re-enable interrupts. It's not called while
1941 * the interrupt routine is executing.
1942 */
1943
1944static void poll_tulip (struct net_device *dev)
1945{
1946 struct tulip_private *tp = netdev_priv(dev);
1947 const int irq = tp->pdev->irq;
1948
1949 /* disable_irq here is not very nice, but with the lockless
1950 interrupt handler we have no other choice. */
1951 disable_irq(irq);
1952 tulip_interrupt (irq, dev);
1953 enable_irq(irq);
1954}
1955#endif
1956
1957static struct pci_driver tulip_driver = {
1958 .name = DRV_NAME,
1959 .id_table = tulip_pci_tbl,
1960 .probe = tulip_init_one,
1961 .remove = tulip_remove_one,
1962#ifdef CONFIG_PM
1963 .suspend = tulip_suspend,
1964 .resume = tulip_resume,
1965#endif /* CONFIG_PM */
1966};
1967
1968
1969static int __init tulip_init (void)
1970{
1971#ifdef MODULE
1972 pr_info("%s", version);
1973#endif
1974
1975 if (!csr0) {
1976 pr_warn("tulip: unknown CPU architecture, using default csr0\n");
1977 /* default to 8 longword cache line alignment */
1978 csr0 = 0x00A00000 | 0x4800;
1979 }
1980
1981 /* copy module parms into globals */
1982 tulip_rx_copybreak = rx_copybreak;
1983 tulip_max_interrupt_work = max_interrupt_work;
1984
1985 /* probe for and init boards */
1986 return pci_register_driver(&tulip_driver);
1987}
1988
1989
1990static void __exit tulip_cleanup (void)
1991{
1992 pci_unregister_driver (&tulip_driver);
1993}
1994
1995
1996module_init(tulip_init);
1997module_exit(tulip_cleanup);
1/* tulip_core.c: A DEC 21x4x-family ethernet driver for Linux.
2
3 Copyright 2000,2001 The Linux Kernel Team
4 Written/copyright 1994-2001 by Donald Becker.
5
6 This software may be used and distributed according to the terms
7 of the GNU General Public License, incorporated herein by reference.
8
9 Please submit bugs to http://bugzilla.kernel.org/ .
10*/
11
12#define pr_fmt(fmt) "tulip: " fmt
13
14#define DRV_NAME "tulip"
15#ifdef CONFIG_TULIP_NAPI
16#define DRV_VERSION "1.1.15-NAPI" /* Keep at least for test */
17#else
18#define DRV_VERSION "1.1.15"
19#endif
20#define DRV_RELDATE "Feb 27, 2007"
21
22
23#include <linux/module.h>
24#include <linux/pci.h>
25#include <linux/slab.h>
26#include "tulip.h"
27#include <linux/init.h>
28#include <linux/interrupt.h>
29#include <linux/etherdevice.h>
30#include <linux/delay.h>
31#include <linux/mii.h>
32#include <linux/crc32.h>
33#include <asm/unaligned.h>
34#include <asm/uaccess.h>
35
36#ifdef CONFIG_SPARC
37#include <asm/prom.h>
38#endif
39
40static char version[] =
41 "Linux Tulip driver version " DRV_VERSION " (" DRV_RELDATE ")\n";
42
43/* A few user-configurable values. */
44
45/* Maximum events (Rx packets, etc.) to handle at each interrupt. */
46static unsigned int max_interrupt_work = 25;
47
48#define MAX_UNITS 8
49/* Used to pass the full-duplex flag, etc. */
50static int full_duplex[MAX_UNITS];
51static int options[MAX_UNITS];
52static int mtu[MAX_UNITS]; /* Jumbo MTU for interfaces. */
53
54/* The possible media types that can be set in options[] are: */
55const char * const medianame[32] = {
56 "10baseT", "10base2", "AUI", "100baseTx",
57 "10baseT-FDX", "100baseTx-FDX", "100baseT4", "100baseFx",
58 "100baseFx-FDX", "MII 10baseT", "MII 10baseT-FDX", "MII",
59 "10baseT(forced)", "MII 100baseTx", "MII 100baseTx-FDX", "MII 100baseT4",
60 "MII 100baseFx-HDX", "MII 100baseFx-FDX", "Home-PNA 1Mbps", "Invalid-19",
61 "","","","", "","","","", "","","","Transceiver reset",
62};
63
64/* Set the copy breakpoint for the copy-only-tiny-buffer Rx structure. */
65#if defined(__alpha__) || defined(__arm__) || defined(__hppa__) || \
66 defined(CONFIG_SPARC) || defined(__ia64__) || \
67 defined(__sh__) || defined(__mips__)
68static int rx_copybreak = 1518;
69#else
70static int rx_copybreak = 100;
71#endif
72
73/*
74 Set the bus performance register.
75 Typical: Set 16 longword cache alignment, no burst limit.
76 Cache alignment bits 15:14 Burst length 13:8
77 0000 No alignment 0x00000000 unlimited 0800 8 longwords
78 4000 8 longwords 0100 1 longword 1000 16 longwords
79 8000 16 longwords 0200 2 longwords 2000 32 longwords
80 C000 32 longwords 0400 4 longwords
81 Warning: many older 486 systems are broken and require setting 0x00A04800
82 8 longword cache alignment, 8 longword burst.
83 ToDo: Non-Intel setting could be better.
84*/
85
86#if defined(__alpha__) || defined(__ia64__)
87static int csr0 = 0x01A00000 | 0xE000;
88#elif defined(__i386__) || defined(__powerpc__) || defined(__x86_64__)
89static int csr0 = 0x01A00000 | 0x8000;
90#elif defined(CONFIG_SPARC) || defined(__hppa__)
91/* The UltraSparc PCI controllers will disconnect at every 64-byte
92 * crossing anyways so it makes no sense to tell Tulip to burst
93 * any more than that.
94 */
95static int csr0 = 0x01A00000 | 0x9000;
96#elif defined(__arm__) || defined(__sh__)
97static int csr0 = 0x01A00000 | 0x4800;
98#elif defined(__mips__)
99static int csr0 = 0x00200000 | 0x4000;
100#else
101#warning Processor architecture undefined!
102static int csr0 = 0x00A00000 | 0x4800;
103#endif
104
105/* Operational parameters that usually are not changed. */
106/* Time in jiffies before concluding the transmitter is hung. */
107#define TX_TIMEOUT (4*HZ)
108
109
110MODULE_AUTHOR("The Linux Kernel Team");
111MODULE_DESCRIPTION("Digital 21*4* Tulip ethernet driver");
112MODULE_LICENSE("GPL");
113MODULE_VERSION(DRV_VERSION);
114module_param(tulip_debug, int, 0);
115module_param(max_interrupt_work, int, 0);
116module_param(rx_copybreak, int, 0);
117module_param(csr0, int, 0);
118module_param_array(options, int, NULL, 0);
119module_param_array(full_duplex, int, NULL, 0);
120
121#ifdef TULIP_DEBUG
122int tulip_debug = TULIP_DEBUG;
123#else
124int tulip_debug = 1;
125#endif
126
127static void tulip_timer(unsigned long data)
128{
129 struct net_device *dev = (struct net_device *)data;
130 struct tulip_private *tp = netdev_priv(dev);
131
132 if (netif_running(dev))
133 schedule_work(&tp->media_work);
134}
135
136/*
137 * This table use during operation for capabilities and media timer.
138 *
139 * It is indexed via the values in 'enum chips'
140 */
141
142struct tulip_chip_table tulip_tbl[] = {
143 { }, /* placeholder for array, slot unused currently */
144 { }, /* placeholder for array, slot unused currently */
145
146 /* DC21140 */
147 { "Digital DS21140 Tulip", 128, 0x0001ebef,
148 HAS_MII | HAS_MEDIA_TABLE | CSR12_IN_SROM | HAS_PCI_MWI, tulip_timer,
149 tulip_media_task },
150
151 /* DC21142, DC21143 */
152 { "Digital DS21142/43 Tulip", 128, 0x0801fbff,
153 HAS_MII | HAS_MEDIA_TABLE | ALWAYS_CHECK_MII | HAS_ACPI | HAS_NWAY
154 | HAS_INTR_MITIGATION | HAS_PCI_MWI, tulip_timer, t21142_media_task },
155
156 /* LC82C168 */
157 { "Lite-On 82c168 PNIC", 256, 0x0001fbef,
158 HAS_MII | HAS_PNICNWAY, pnic_timer, },
159
160 /* MX98713 */
161 { "Macronix 98713 PMAC", 128, 0x0001ebef,
162 HAS_MII | HAS_MEDIA_TABLE | CSR12_IN_SROM, mxic_timer, },
163
164 /* MX98715 */
165 { "Macronix 98715 PMAC", 256, 0x0001ebef,
166 HAS_MEDIA_TABLE, mxic_timer, },
167
168 /* MX98725 */
169 { "Macronix 98725 PMAC", 256, 0x0001ebef,
170 HAS_MEDIA_TABLE, mxic_timer, },
171
172 /* AX88140 */
173 { "ASIX AX88140", 128, 0x0001fbff,
174 HAS_MII | HAS_MEDIA_TABLE | CSR12_IN_SROM | MC_HASH_ONLY
175 | IS_ASIX, tulip_timer, tulip_media_task },
176
177 /* PNIC2 */
178 { "Lite-On PNIC-II", 256, 0x0801fbff,
179 HAS_MII | HAS_NWAY | HAS_8023X | HAS_PCI_MWI, pnic2_timer, },
180
181 /* COMET */
182 { "ADMtek Comet", 256, 0x0001abef,
183 HAS_MII | MC_HASH_ONLY | COMET_MAC_ADDR, comet_timer, },
184
185 /* COMPEX9881 */
186 { "Compex 9881 PMAC", 128, 0x0001ebef,
187 HAS_MII | HAS_MEDIA_TABLE | CSR12_IN_SROM, mxic_timer, },
188
189 /* I21145 */
190 { "Intel DS21145 Tulip", 128, 0x0801fbff,
191 HAS_MII | HAS_MEDIA_TABLE | ALWAYS_CHECK_MII | HAS_ACPI
192 | HAS_NWAY | HAS_PCI_MWI, tulip_timer, tulip_media_task },
193
194 /* DM910X */
195#ifdef CONFIG_TULIP_DM910X
196 { "Davicom DM9102/DM9102A", 128, 0x0001ebef,
197 HAS_MII | HAS_MEDIA_TABLE | CSR12_IN_SROM | HAS_ACPI,
198 tulip_timer, tulip_media_task },
199#else
200 { NULL },
201#endif
202
203 /* RS7112 */
204 { "Conexant LANfinity", 256, 0x0001ebef,
205 HAS_MII | HAS_ACPI, tulip_timer, tulip_media_task },
206
207};
208
209
210static DEFINE_PCI_DEVICE_TABLE(tulip_pci_tbl) = {
211 { 0x1011, 0x0009, PCI_ANY_ID, PCI_ANY_ID, 0, 0, DC21140 },
212 { 0x1011, 0x0019, PCI_ANY_ID, PCI_ANY_ID, 0, 0, DC21143 },
213 { 0x11AD, 0x0002, PCI_ANY_ID, PCI_ANY_ID, 0, 0, LC82C168 },
214 { 0x10d9, 0x0512, PCI_ANY_ID, PCI_ANY_ID, 0, 0, MX98713 },
215 { 0x10d9, 0x0531, PCI_ANY_ID, PCI_ANY_ID, 0, 0, MX98715 },
216/* { 0x10d9, 0x0531, PCI_ANY_ID, PCI_ANY_ID, 0, 0, MX98725 },*/
217 { 0x125B, 0x1400, PCI_ANY_ID, PCI_ANY_ID, 0, 0, AX88140 },
218 { 0x11AD, 0xc115, PCI_ANY_ID, PCI_ANY_ID, 0, 0, PNIC2 },
219 { 0x1317, 0x0981, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
220 { 0x1317, 0x0985, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
221 { 0x1317, 0x1985, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
222 { 0x1317, 0x9511, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
223 { 0x13D1, 0xAB02, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
224 { 0x13D1, 0xAB03, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
225 { 0x13D1, 0xAB08, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
226 { 0x104A, 0x0981, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
227 { 0x104A, 0x2774, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
228 { 0x1259, 0xa120, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
229 { 0x11F6, 0x9881, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMPEX9881 },
230 { 0x8086, 0x0039, PCI_ANY_ID, PCI_ANY_ID, 0, 0, I21145 },
231#ifdef CONFIG_TULIP_DM910X
232 { 0x1282, 0x9100, PCI_ANY_ID, PCI_ANY_ID, 0, 0, DM910X },
233 { 0x1282, 0x9102, PCI_ANY_ID, PCI_ANY_ID, 0, 0, DM910X },
234#endif
235 { 0x1113, 0x1216, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
236 { 0x1113, 0x1217, PCI_ANY_ID, PCI_ANY_ID, 0, 0, MX98715 },
237 { 0x1113, 0x9511, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
238 { 0x1186, 0x1541, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
239 { 0x1186, 0x1561, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
240 { 0x1186, 0x1591, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
241 { 0x14f1, 0x1803, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CONEXANT },
242 { 0x1626, 0x8410, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
243 { 0x1737, 0xAB09, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
244 { 0x1737, 0xAB08, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
245 { 0x17B3, 0xAB08, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
246 { 0x10b7, 0x9300, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET }, /* 3Com 3CSOHO100B-TX */
247 { 0x14ea, 0xab08, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET }, /* Planex FNW-3602-TX */
248 { 0x1414, 0x0001, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET }, /* Microsoft MN-120 */
249 { 0x1414, 0x0002, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
250 { } /* terminate list */
251};
252MODULE_DEVICE_TABLE(pci, tulip_pci_tbl);
253
254
255/* A full-duplex map for media types. */
256const char tulip_media_cap[32] =
257{0,0,0,16, 3,19,16,24, 27,4,7,5, 0,20,23,20, 28,31,0,0, };
258
259static void tulip_tx_timeout(struct net_device *dev);
260static void tulip_init_ring(struct net_device *dev);
261static void tulip_free_ring(struct net_device *dev);
262static netdev_tx_t tulip_start_xmit(struct sk_buff *skb,
263 struct net_device *dev);
264static int tulip_open(struct net_device *dev);
265static int tulip_close(struct net_device *dev);
266static void tulip_up(struct net_device *dev);
267static void tulip_down(struct net_device *dev);
268static struct net_device_stats *tulip_get_stats(struct net_device *dev);
269static int private_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
270static void set_rx_mode(struct net_device *dev);
271static void tulip_set_wolopts(struct pci_dev *pdev, u32 wolopts);
272#ifdef CONFIG_NET_POLL_CONTROLLER
273static void poll_tulip(struct net_device *dev);
274#endif
275
276static void tulip_set_power_state (struct tulip_private *tp,
277 int sleep, int snooze)
278{
279 if (tp->flags & HAS_ACPI) {
280 u32 tmp, newtmp;
281 pci_read_config_dword (tp->pdev, CFDD, &tmp);
282 newtmp = tmp & ~(CFDD_Sleep | CFDD_Snooze);
283 if (sleep)
284 newtmp |= CFDD_Sleep;
285 else if (snooze)
286 newtmp |= CFDD_Snooze;
287 if (tmp != newtmp)
288 pci_write_config_dword (tp->pdev, CFDD, newtmp);
289 }
290
291}
292
293
294static void tulip_up(struct net_device *dev)
295{
296 struct tulip_private *tp = netdev_priv(dev);
297 void __iomem *ioaddr = tp->base_addr;
298 int next_tick = 3*HZ;
299 u32 reg;
300 int i;
301
302#ifdef CONFIG_TULIP_NAPI
303 napi_enable(&tp->napi);
304#endif
305
306 /* Wake the chip from sleep/snooze mode. */
307 tulip_set_power_state (tp, 0, 0);
308
309 /* Disable all WOL events */
310 pci_enable_wake(tp->pdev, PCI_D3hot, 0);
311 pci_enable_wake(tp->pdev, PCI_D3cold, 0);
312 tulip_set_wolopts(tp->pdev, 0);
313
314 /* On some chip revs we must set the MII/SYM port before the reset!? */
315 if (tp->mii_cnt || (tp->mtable && tp->mtable->has_mii))
316 iowrite32(0x00040000, ioaddr + CSR6);
317
318 /* Reset the chip, holding bit 0 set at least 50 PCI cycles. */
319 iowrite32(0x00000001, ioaddr + CSR0);
320 pci_read_config_dword(tp->pdev, PCI_COMMAND, ®); /* flush write */
321 udelay(100);
322
323 /* Deassert reset.
324 Wait the specified 50 PCI cycles after a reset by initializing
325 Tx and Rx queues and the address filter list. */
326 iowrite32(tp->csr0, ioaddr + CSR0);
327 pci_read_config_dword(tp->pdev, PCI_COMMAND, ®); /* flush write */
328 udelay(100);
329
330 if (tulip_debug > 1)
331 netdev_dbg(dev, "tulip_up(), irq==%d\n", tp->pdev->irq);
332
333 iowrite32(tp->rx_ring_dma, ioaddr + CSR3);
334 iowrite32(tp->tx_ring_dma, ioaddr + CSR4);
335 tp->cur_rx = tp->cur_tx = 0;
336 tp->dirty_rx = tp->dirty_tx = 0;
337
338 if (tp->flags & MC_HASH_ONLY) {
339 u32 addr_low = get_unaligned_le32(dev->dev_addr);
340 u32 addr_high = get_unaligned_le16(dev->dev_addr + 4);
341 if (tp->chip_id == AX88140) {
342 iowrite32(0, ioaddr + CSR13);
343 iowrite32(addr_low, ioaddr + CSR14);
344 iowrite32(1, ioaddr + CSR13);
345 iowrite32(addr_high, ioaddr + CSR14);
346 } else if (tp->flags & COMET_MAC_ADDR) {
347 iowrite32(addr_low, ioaddr + 0xA4);
348 iowrite32(addr_high, ioaddr + 0xA8);
349 iowrite32(0, ioaddr + CSR27);
350 iowrite32(0, ioaddr + CSR28);
351 }
352 } else {
353 /* This is set_rx_mode(), but without starting the transmitter. */
354 u16 *eaddrs = (u16 *)dev->dev_addr;
355 u16 *setup_frm = &tp->setup_frame[15*6];
356 dma_addr_t mapping;
357
358 /* 21140 bug: you must add the broadcast address. */
359 memset(tp->setup_frame, 0xff, sizeof(tp->setup_frame));
360 /* Fill the final entry of the table with our physical address. */
361 *setup_frm++ = eaddrs[0]; *setup_frm++ = eaddrs[0];
362 *setup_frm++ = eaddrs[1]; *setup_frm++ = eaddrs[1];
363 *setup_frm++ = eaddrs[2]; *setup_frm++ = eaddrs[2];
364
365 mapping = pci_map_single(tp->pdev, tp->setup_frame,
366 sizeof(tp->setup_frame),
367 PCI_DMA_TODEVICE);
368 tp->tx_buffers[tp->cur_tx].skb = NULL;
369 tp->tx_buffers[tp->cur_tx].mapping = mapping;
370
371 /* Put the setup frame on the Tx list. */
372 tp->tx_ring[tp->cur_tx].length = cpu_to_le32(0x08000000 | 192);
373 tp->tx_ring[tp->cur_tx].buffer1 = cpu_to_le32(mapping);
374 tp->tx_ring[tp->cur_tx].status = cpu_to_le32(DescOwned);
375
376 tp->cur_tx++;
377 }
378
379 tp->saved_if_port = dev->if_port;
380 if (dev->if_port == 0)
381 dev->if_port = tp->default_port;
382
383 /* Allow selecting a default media. */
384 i = 0;
385 if (tp->mtable == NULL)
386 goto media_picked;
387 if (dev->if_port) {
388 int looking_for = tulip_media_cap[dev->if_port] & MediaIsMII ? 11 :
389 (dev->if_port == 12 ? 0 : dev->if_port);
390 for (i = 0; i < tp->mtable->leafcount; i++)
391 if (tp->mtable->mleaf[i].media == looking_for) {
392 dev_info(&dev->dev,
393 "Using user-specified media %s\n",
394 medianame[dev->if_port]);
395 goto media_picked;
396 }
397 }
398 if ((tp->mtable->defaultmedia & 0x0800) == 0) {
399 int looking_for = tp->mtable->defaultmedia & MEDIA_MASK;
400 for (i = 0; i < tp->mtable->leafcount; i++)
401 if (tp->mtable->mleaf[i].media == looking_for) {
402 dev_info(&dev->dev,
403 "Using EEPROM-set media %s\n",
404 medianame[looking_for]);
405 goto media_picked;
406 }
407 }
408 /* Start sensing first non-full-duplex media. */
409 for (i = tp->mtable->leafcount - 1;
410 (tulip_media_cap[tp->mtable->mleaf[i].media] & MediaAlwaysFD) && i > 0; i--)
411 ;
412media_picked:
413
414 tp->csr6 = 0;
415 tp->cur_index = i;
416 tp->nwayset = 0;
417
418 if (dev->if_port) {
419 if (tp->chip_id == DC21143 &&
420 (tulip_media_cap[dev->if_port] & MediaIsMII)) {
421 /* We must reset the media CSRs when we force-select MII mode. */
422 iowrite32(0x0000, ioaddr + CSR13);
423 iowrite32(0x0000, ioaddr + CSR14);
424 iowrite32(0x0008, ioaddr + CSR15);
425 }
426 tulip_select_media(dev, 1);
427 } else if (tp->chip_id == DC21142) {
428 if (tp->mii_cnt) {
429 tulip_select_media(dev, 1);
430 if (tulip_debug > 1)
431 dev_info(&dev->dev,
432 "Using MII transceiver %d, status %04x\n",
433 tp->phys[0],
434 tulip_mdio_read(dev, tp->phys[0], 1));
435 iowrite32(csr6_mask_defstate, ioaddr + CSR6);
436 tp->csr6 = csr6_mask_hdcap;
437 dev->if_port = 11;
438 iowrite32(0x0000, ioaddr + CSR13);
439 iowrite32(0x0000, ioaddr + CSR14);
440 } else
441 t21142_start_nway(dev);
442 } else if (tp->chip_id == PNIC2) {
443 /* for initial startup advertise 10/100 Full and Half */
444 tp->sym_advertise = 0x01E0;
445 /* enable autonegotiate end interrupt */
446 iowrite32(ioread32(ioaddr+CSR5)| 0x00008010, ioaddr + CSR5);
447 iowrite32(ioread32(ioaddr+CSR7)| 0x00008010, ioaddr + CSR7);
448 pnic2_start_nway(dev);
449 } else if (tp->chip_id == LC82C168 && ! tp->medialock) {
450 if (tp->mii_cnt) {
451 dev->if_port = 11;
452 tp->csr6 = 0x814C0000 | (tp->full_duplex ? 0x0200 : 0);
453 iowrite32(0x0001, ioaddr + CSR15);
454 } else if (ioread32(ioaddr + CSR5) & TPLnkPass)
455 pnic_do_nway(dev);
456 else {
457 /* Start with 10mbps to do autonegotiation. */
458 iowrite32(0x32, ioaddr + CSR12);
459 tp->csr6 = 0x00420000;
460 iowrite32(0x0001B078, ioaddr + 0xB8);
461 iowrite32(0x0201B078, ioaddr + 0xB8);
462 next_tick = 1*HZ;
463 }
464 } else if ((tp->chip_id == MX98713 || tp->chip_id == COMPEX9881) &&
465 ! tp->medialock) {
466 dev->if_port = 0;
467 tp->csr6 = 0x01880000 | (tp->full_duplex ? 0x0200 : 0);
468 iowrite32(0x0f370000 | ioread16(ioaddr + 0x80), ioaddr + 0x80);
469 } else if (tp->chip_id == MX98715 || tp->chip_id == MX98725) {
470 /* Provided by BOLO, Macronix - 12/10/1998. */
471 dev->if_port = 0;
472 tp->csr6 = 0x01a80200;
473 iowrite32(0x0f370000 | ioread16(ioaddr + 0x80), ioaddr + 0x80);
474 iowrite32(0x11000 | ioread16(ioaddr + 0xa0), ioaddr + 0xa0);
475 } else if (tp->chip_id == COMET || tp->chip_id == CONEXANT) {
476 /* Enable automatic Tx underrun recovery. */
477 iowrite32(ioread32(ioaddr + 0x88) | 1, ioaddr + 0x88);
478 dev->if_port = tp->mii_cnt ? 11 : 0;
479 tp->csr6 = 0x00040000;
480 } else if (tp->chip_id == AX88140) {
481 tp->csr6 = tp->mii_cnt ? 0x00040100 : 0x00000100;
482 } else
483 tulip_select_media(dev, 1);
484
485 /* Start the chip's Tx to process setup frame. */
486 tulip_stop_rxtx(tp);
487 barrier();
488 udelay(5);
489 iowrite32(tp->csr6 | TxOn, ioaddr + CSR6);
490
491 /* Enable interrupts by setting the interrupt mask. */
492 iowrite32(tulip_tbl[tp->chip_id].valid_intrs, ioaddr + CSR5);
493 iowrite32(tulip_tbl[tp->chip_id].valid_intrs, ioaddr + CSR7);
494 tulip_start_rxtx(tp);
495 iowrite32(0, ioaddr + CSR2); /* Rx poll demand */
496
497 if (tulip_debug > 2) {
498 netdev_dbg(dev, "Done tulip_up(), CSR0 %08x, CSR5 %08x CSR6 %08x\n",
499 ioread32(ioaddr + CSR0),
500 ioread32(ioaddr + CSR5),
501 ioread32(ioaddr + CSR6));
502 }
503
504 /* Set the timer to switch to check for link beat and perhaps switch
505 to an alternate media type. */
506 tp->timer.expires = RUN_AT(next_tick);
507 add_timer(&tp->timer);
508#ifdef CONFIG_TULIP_NAPI
509 init_timer(&tp->oom_timer);
510 tp->oom_timer.data = (unsigned long)dev;
511 tp->oom_timer.function = oom_timer;
512#endif
513}
514
515static int
516tulip_open(struct net_device *dev)
517{
518 struct tulip_private *tp = netdev_priv(dev);
519 int retval;
520
521 tulip_init_ring (dev);
522
523 retval = request_irq(tp->pdev->irq, tulip_interrupt, IRQF_SHARED,
524 dev->name, dev);
525 if (retval)
526 goto free_ring;
527
528 tulip_up (dev);
529
530 netif_start_queue (dev);
531
532 return 0;
533
534free_ring:
535 tulip_free_ring (dev);
536 return retval;
537}
538
539
540static void tulip_tx_timeout(struct net_device *dev)
541{
542 struct tulip_private *tp = netdev_priv(dev);
543 void __iomem *ioaddr = tp->base_addr;
544 unsigned long flags;
545
546 spin_lock_irqsave (&tp->lock, flags);
547
548 if (tulip_media_cap[dev->if_port] & MediaIsMII) {
549 /* Do nothing -- the media monitor should handle this. */
550 if (tulip_debug > 1)
551 dev_warn(&dev->dev,
552 "Transmit timeout using MII device\n");
553 } else if (tp->chip_id == DC21140 || tp->chip_id == DC21142 ||
554 tp->chip_id == MX98713 || tp->chip_id == COMPEX9881 ||
555 tp->chip_id == DM910X) {
556 dev_warn(&dev->dev,
557 "21140 transmit timed out, status %08x, SIA %08x %08x %08x %08x, resetting...\n",
558 ioread32(ioaddr + CSR5), ioread32(ioaddr + CSR12),
559 ioread32(ioaddr + CSR13), ioread32(ioaddr + CSR14),
560 ioread32(ioaddr + CSR15));
561 tp->timeout_recovery = 1;
562 schedule_work(&tp->media_work);
563 goto out_unlock;
564 } else if (tp->chip_id == PNIC2) {
565 dev_warn(&dev->dev,
566 "PNIC2 transmit timed out, status %08x, CSR6/7 %08x / %08x CSR12 %08x, resetting...\n",
567 (int)ioread32(ioaddr + CSR5),
568 (int)ioread32(ioaddr + CSR6),
569 (int)ioread32(ioaddr + CSR7),
570 (int)ioread32(ioaddr + CSR12));
571 } else {
572 dev_warn(&dev->dev,
573 "Transmit timed out, status %08x, CSR12 %08x, resetting...\n",
574 ioread32(ioaddr + CSR5), ioread32(ioaddr + CSR12));
575 dev->if_port = 0;
576 }
577
578#if defined(way_too_many_messages)
579 if (tulip_debug > 3) {
580 int i;
581 for (i = 0; i < RX_RING_SIZE; i++) {
582 u8 *buf = (u8 *)(tp->rx_ring[i].buffer1);
583 int j;
584 printk(KERN_DEBUG
585 "%2d: %08x %08x %08x %08x %02x %02x %02x\n",
586 i,
587 (unsigned int)tp->rx_ring[i].status,
588 (unsigned int)tp->rx_ring[i].length,
589 (unsigned int)tp->rx_ring[i].buffer1,
590 (unsigned int)tp->rx_ring[i].buffer2,
591 buf[0], buf[1], buf[2]);
592 for (j = 0; buf[j] != 0xee && j < 1600; j++)
593 if (j < 100)
594 pr_cont(" %02x", buf[j]);
595 pr_cont(" j=%d\n", j);
596 }
597 printk(KERN_DEBUG " Rx ring %p: ", tp->rx_ring);
598 for (i = 0; i < RX_RING_SIZE; i++)
599 pr_cont(" %08x", (unsigned int)tp->rx_ring[i].status);
600 printk(KERN_DEBUG " Tx ring %p: ", tp->tx_ring);
601 for (i = 0; i < TX_RING_SIZE; i++)
602 pr_cont(" %08x", (unsigned int)tp->tx_ring[i].status);
603 pr_cont("\n");
604 }
605#endif
606
607 tulip_tx_timeout_complete(tp, ioaddr);
608
609out_unlock:
610 spin_unlock_irqrestore (&tp->lock, flags);
611 dev->trans_start = jiffies; /* prevent tx timeout */
612 netif_wake_queue (dev);
613}
614
615
616/* Initialize the Rx and Tx rings, along with various 'dev' bits. */
617static void tulip_init_ring(struct net_device *dev)
618{
619 struct tulip_private *tp = netdev_priv(dev);
620 int i;
621
622 tp->susp_rx = 0;
623 tp->ttimer = 0;
624 tp->nir = 0;
625
626 for (i = 0; i < RX_RING_SIZE; i++) {
627 tp->rx_ring[i].status = 0x00000000;
628 tp->rx_ring[i].length = cpu_to_le32(PKT_BUF_SZ);
629 tp->rx_ring[i].buffer2 = cpu_to_le32(tp->rx_ring_dma + sizeof(struct tulip_rx_desc) * (i + 1));
630 tp->rx_buffers[i].skb = NULL;
631 tp->rx_buffers[i].mapping = 0;
632 }
633 /* Mark the last entry as wrapping the ring. */
634 tp->rx_ring[i-1].length = cpu_to_le32(PKT_BUF_SZ | DESC_RING_WRAP);
635 tp->rx_ring[i-1].buffer2 = cpu_to_le32(tp->rx_ring_dma);
636
637 for (i = 0; i < RX_RING_SIZE; i++) {
638 dma_addr_t mapping;
639
640 /* Note the receive buffer must be longword aligned.
641 netdev_alloc_skb() provides 16 byte alignment. But do *not*
642 use skb_reserve() to align the IP header! */
643 struct sk_buff *skb = netdev_alloc_skb(dev, PKT_BUF_SZ);
644 tp->rx_buffers[i].skb = skb;
645 if (skb == NULL)
646 break;
647 mapping = pci_map_single(tp->pdev, skb->data,
648 PKT_BUF_SZ, PCI_DMA_FROMDEVICE);
649 tp->rx_buffers[i].mapping = mapping;
650 tp->rx_ring[i].status = cpu_to_le32(DescOwned); /* Owned by Tulip chip */
651 tp->rx_ring[i].buffer1 = cpu_to_le32(mapping);
652 }
653 tp->dirty_rx = (unsigned int)(i - RX_RING_SIZE);
654
655 /* The Tx buffer descriptor is filled in as needed, but we
656 do need to clear the ownership bit. */
657 for (i = 0; i < TX_RING_SIZE; i++) {
658 tp->tx_buffers[i].skb = NULL;
659 tp->tx_buffers[i].mapping = 0;
660 tp->tx_ring[i].status = 0x00000000;
661 tp->tx_ring[i].buffer2 = cpu_to_le32(tp->tx_ring_dma + sizeof(struct tulip_tx_desc) * (i + 1));
662 }
663 tp->tx_ring[i-1].buffer2 = cpu_to_le32(tp->tx_ring_dma);
664}
665
666static netdev_tx_t
667tulip_start_xmit(struct sk_buff *skb, struct net_device *dev)
668{
669 struct tulip_private *tp = netdev_priv(dev);
670 int entry;
671 u32 flag;
672 dma_addr_t mapping;
673 unsigned long flags;
674
675 spin_lock_irqsave(&tp->lock, flags);
676
677 /* Calculate the next Tx descriptor entry. */
678 entry = tp->cur_tx % TX_RING_SIZE;
679
680 tp->tx_buffers[entry].skb = skb;
681 mapping = pci_map_single(tp->pdev, skb->data,
682 skb->len, PCI_DMA_TODEVICE);
683 tp->tx_buffers[entry].mapping = mapping;
684 tp->tx_ring[entry].buffer1 = cpu_to_le32(mapping);
685
686 if (tp->cur_tx - tp->dirty_tx < TX_RING_SIZE/2) {/* Typical path */
687 flag = 0x60000000; /* No interrupt */
688 } else if (tp->cur_tx - tp->dirty_tx == TX_RING_SIZE/2) {
689 flag = 0xe0000000; /* Tx-done intr. */
690 } else if (tp->cur_tx - tp->dirty_tx < TX_RING_SIZE - 2) {
691 flag = 0x60000000; /* No Tx-done intr. */
692 } else { /* Leave room for set_rx_mode() to fill entries. */
693 flag = 0xe0000000; /* Tx-done intr. */
694 netif_stop_queue(dev);
695 }
696 if (entry == TX_RING_SIZE-1)
697 flag = 0xe0000000 | DESC_RING_WRAP;
698
699 tp->tx_ring[entry].length = cpu_to_le32(skb->len | flag);
700 /* if we were using Transmit Automatic Polling, we would need a
701 * wmb() here. */
702 tp->tx_ring[entry].status = cpu_to_le32(DescOwned);
703 wmb();
704
705 tp->cur_tx++;
706
707 /* Trigger an immediate transmit demand. */
708 iowrite32(0, tp->base_addr + CSR1);
709
710 spin_unlock_irqrestore(&tp->lock, flags);
711
712 return NETDEV_TX_OK;
713}
714
715static void tulip_clean_tx_ring(struct tulip_private *tp)
716{
717 unsigned int dirty_tx;
718
719 for (dirty_tx = tp->dirty_tx ; tp->cur_tx - dirty_tx > 0;
720 dirty_tx++) {
721 int entry = dirty_tx % TX_RING_SIZE;
722 int status = le32_to_cpu(tp->tx_ring[entry].status);
723
724 if (status < 0) {
725 tp->dev->stats.tx_errors++; /* It wasn't Txed */
726 tp->tx_ring[entry].status = 0;
727 }
728
729 /* Check for Tx filter setup frames. */
730 if (tp->tx_buffers[entry].skb == NULL) {
731 /* test because dummy frames not mapped */
732 if (tp->tx_buffers[entry].mapping)
733 pci_unmap_single(tp->pdev,
734 tp->tx_buffers[entry].mapping,
735 sizeof(tp->setup_frame),
736 PCI_DMA_TODEVICE);
737 continue;
738 }
739
740 pci_unmap_single(tp->pdev, tp->tx_buffers[entry].mapping,
741 tp->tx_buffers[entry].skb->len,
742 PCI_DMA_TODEVICE);
743
744 /* Free the original skb. */
745 dev_kfree_skb_irq(tp->tx_buffers[entry].skb);
746 tp->tx_buffers[entry].skb = NULL;
747 tp->tx_buffers[entry].mapping = 0;
748 }
749}
750
751static void tulip_down (struct net_device *dev)
752{
753 struct tulip_private *tp = netdev_priv(dev);
754 void __iomem *ioaddr = tp->base_addr;
755 unsigned long flags;
756
757 cancel_work_sync(&tp->media_work);
758
759#ifdef CONFIG_TULIP_NAPI
760 napi_disable(&tp->napi);
761#endif
762
763 del_timer_sync (&tp->timer);
764#ifdef CONFIG_TULIP_NAPI
765 del_timer_sync (&tp->oom_timer);
766#endif
767 spin_lock_irqsave (&tp->lock, flags);
768
769 /* Disable interrupts by clearing the interrupt mask. */
770 iowrite32 (0x00000000, ioaddr + CSR7);
771
772 /* Stop the Tx and Rx processes. */
773 tulip_stop_rxtx(tp);
774
775 /* prepare receive buffers */
776 tulip_refill_rx(dev);
777
778 /* release any unconsumed transmit buffers */
779 tulip_clean_tx_ring(tp);
780
781 if (ioread32(ioaddr + CSR6) != 0xffffffff)
782 dev->stats.rx_missed_errors += ioread32(ioaddr + CSR8) & 0xffff;
783
784 spin_unlock_irqrestore (&tp->lock, flags);
785
786 init_timer(&tp->timer);
787 tp->timer.data = (unsigned long)dev;
788 tp->timer.function = tulip_tbl[tp->chip_id].media_timer;
789
790 dev->if_port = tp->saved_if_port;
791
792 /* Leave the driver in snooze, not sleep, mode. */
793 tulip_set_power_state (tp, 0, 1);
794}
795
796static void tulip_free_ring (struct net_device *dev)
797{
798 struct tulip_private *tp = netdev_priv(dev);
799 int i;
800
801 /* Free all the skbuffs in the Rx queue. */
802 for (i = 0; i < RX_RING_SIZE; i++) {
803 struct sk_buff *skb = tp->rx_buffers[i].skb;
804 dma_addr_t mapping = tp->rx_buffers[i].mapping;
805
806 tp->rx_buffers[i].skb = NULL;
807 tp->rx_buffers[i].mapping = 0;
808
809 tp->rx_ring[i].status = 0; /* Not owned by Tulip chip. */
810 tp->rx_ring[i].length = 0;
811 /* An invalid address. */
812 tp->rx_ring[i].buffer1 = cpu_to_le32(0xBADF00D0);
813 if (skb) {
814 pci_unmap_single(tp->pdev, mapping, PKT_BUF_SZ,
815 PCI_DMA_FROMDEVICE);
816 dev_kfree_skb (skb);
817 }
818 }
819
820 for (i = 0; i < TX_RING_SIZE; i++) {
821 struct sk_buff *skb = tp->tx_buffers[i].skb;
822
823 if (skb != NULL) {
824 pci_unmap_single(tp->pdev, tp->tx_buffers[i].mapping,
825 skb->len, PCI_DMA_TODEVICE);
826 dev_kfree_skb (skb);
827 }
828 tp->tx_buffers[i].skb = NULL;
829 tp->tx_buffers[i].mapping = 0;
830 }
831}
832
833static int tulip_close (struct net_device *dev)
834{
835 struct tulip_private *tp = netdev_priv(dev);
836 void __iomem *ioaddr = tp->base_addr;
837
838 netif_stop_queue (dev);
839
840 tulip_down (dev);
841
842 if (tulip_debug > 1)
843 netdev_dbg(dev, "Shutting down ethercard, status was %02x\n",
844 ioread32 (ioaddr + CSR5));
845
846 free_irq (tp->pdev->irq, dev);
847
848 tulip_free_ring (dev);
849
850 return 0;
851}
852
853static struct net_device_stats *tulip_get_stats(struct net_device *dev)
854{
855 struct tulip_private *tp = netdev_priv(dev);
856 void __iomem *ioaddr = tp->base_addr;
857
858 if (netif_running(dev)) {
859 unsigned long flags;
860
861 spin_lock_irqsave (&tp->lock, flags);
862
863 dev->stats.rx_missed_errors += ioread32(ioaddr + CSR8) & 0xffff;
864
865 spin_unlock_irqrestore(&tp->lock, flags);
866 }
867
868 return &dev->stats;
869}
870
871
872static void tulip_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
873{
874 struct tulip_private *np = netdev_priv(dev);
875 strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
876 strlcpy(info->version, DRV_VERSION, sizeof(info->version));
877 strlcpy(info->bus_info, pci_name(np->pdev), sizeof(info->bus_info));
878}
879
880
881static int tulip_ethtool_set_wol(struct net_device *dev,
882 struct ethtool_wolinfo *wolinfo)
883{
884 struct tulip_private *tp = netdev_priv(dev);
885
886 if (wolinfo->wolopts & (~tp->wolinfo.supported))
887 return -EOPNOTSUPP;
888
889 tp->wolinfo.wolopts = wolinfo->wolopts;
890 device_set_wakeup_enable(&tp->pdev->dev, tp->wolinfo.wolopts);
891 return 0;
892}
893
894static void tulip_ethtool_get_wol(struct net_device *dev,
895 struct ethtool_wolinfo *wolinfo)
896{
897 struct tulip_private *tp = netdev_priv(dev);
898
899 wolinfo->supported = tp->wolinfo.supported;
900 wolinfo->wolopts = tp->wolinfo.wolopts;
901 return;
902}
903
904
905static const struct ethtool_ops ops = {
906 .get_drvinfo = tulip_get_drvinfo,
907 .set_wol = tulip_ethtool_set_wol,
908 .get_wol = tulip_ethtool_get_wol,
909};
910
911/* Provide ioctl() calls to examine the MII xcvr state. */
912static int private_ioctl (struct net_device *dev, struct ifreq *rq, int cmd)
913{
914 struct tulip_private *tp = netdev_priv(dev);
915 void __iomem *ioaddr = tp->base_addr;
916 struct mii_ioctl_data *data = if_mii(rq);
917 const unsigned int phy_idx = 0;
918 int phy = tp->phys[phy_idx] & 0x1f;
919 unsigned int regnum = data->reg_num;
920
921 switch (cmd) {
922 case SIOCGMIIPHY: /* Get address of MII PHY in use. */
923 if (tp->mii_cnt)
924 data->phy_id = phy;
925 else if (tp->flags & HAS_NWAY)
926 data->phy_id = 32;
927 else if (tp->chip_id == COMET)
928 data->phy_id = 1;
929 else
930 return -ENODEV;
931
932 case SIOCGMIIREG: /* Read MII PHY register. */
933 if (data->phy_id == 32 && (tp->flags & HAS_NWAY)) {
934 int csr12 = ioread32 (ioaddr + CSR12);
935 int csr14 = ioread32 (ioaddr + CSR14);
936 switch (regnum) {
937 case 0:
938 if (((csr14<<5) & 0x1000) ||
939 (dev->if_port == 5 && tp->nwayset))
940 data->val_out = 0x1000;
941 else
942 data->val_out = (tulip_media_cap[dev->if_port]&MediaIs100 ? 0x2000 : 0)
943 | (tulip_media_cap[dev->if_port]&MediaIsFD ? 0x0100 : 0);
944 break;
945 case 1:
946 data->val_out =
947 0x1848 +
948 ((csr12&0x7000) == 0x5000 ? 0x20 : 0) +
949 ((csr12&0x06) == 6 ? 0 : 4);
950 data->val_out |= 0x6048;
951 break;
952 case 4:
953 /* Advertised value, bogus 10baseTx-FD value from CSR6. */
954 data->val_out =
955 ((ioread32(ioaddr + CSR6) >> 3) & 0x0040) +
956 ((csr14 >> 1) & 0x20) + 1;
957 data->val_out |= ((csr14 >> 9) & 0x03C0);
958 break;
959 case 5: data->val_out = tp->lpar; break;
960 default: data->val_out = 0; break;
961 }
962 } else {
963 data->val_out = tulip_mdio_read (dev, data->phy_id & 0x1f, regnum);
964 }
965 return 0;
966
967 case SIOCSMIIREG: /* Write MII PHY register. */
968 if (regnum & ~0x1f)
969 return -EINVAL;
970 if (data->phy_id == phy) {
971 u16 value = data->val_in;
972 switch (regnum) {
973 case 0: /* Check for autonegotiation on or reset. */
974 tp->full_duplex_lock = (value & 0x9000) ? 0 : 1;
975 if (tp->full_duplex_lock)
976 tp->full_duplex = (value & 0x0100) ? 1 : 0;
977 break;
978 case 4:
979 tp->advertising[phy_idx] =
980 tp->mii_advertise = data->val_in;
981 break;
982 }
983 }
984 if (data->phy_id == 32 && (tp->flags & HAS_NWAY)) {
985 u16 value = data->val_in;
986 if (regnum == 0) {
987 if ((value & 0x1200) == 0x1200) {
988 if (tp->chip_id == PNIC2) {
989 pnic2_start_nway (dev);
990 } else {
991 t21142_start_nway (dev);
992 }
993 }
994 } else if (regnum == 4)
995 tp->sym_advertise = value;
996 } else {
997 tulip_mdio_write (dev, data->phy_id & 0x1f, regnum, data->val_in);
998 }
999 return 0;
1000 default:
1001 return -EOPNOTSUPP;
1002 }
1003
1004 return -EOPNOTSUPP;
1005}
1006
1007
1008/* Set or clear the multicast filter for this adaptor.
1009 Note that we only use exclusion around actually queueing the
1010 new frame, not around filling tp->setup_frame. This is non-deterministic
1011 when re-entered but still correct. */
1012
1013static void build_setup_frame_hash(u16 *setup_frm, struct net_device *dev)
1014{
1015 struct tulip_private *tp = netdev_priv(dev);
1016 u16 hash_table[32];
1017 struct netdev_hw_addr *ha;
1018 int i;
1019 u16 *eaddrs;
1020
1021 memset(hash_table, 0, sizeof(hash_table));
1022 __set_bit_le(255, hash_table); /* Broadcast entry */
1023 /* This should work on big-endian machines as well. */
1024 netdev_for_each_mc_addr(ha, dev) {
1025 int index = ether_crc_le(ETH_ALEN, ha->addr) & 0x1ff;
1026
1027 __set_bit_le(index, hash_table);
1028 }
1029 for (i = 0; i < 32; i++) {
1030 *setup_frm++ = hash_table[i];
1031 *setup_frm++ = hash_table[i];
1032 }
1033 setup_frm = &tp->setup_frame[13*6];
1034
1035 /* Fill the final entry with our physical address. */
1036 eaddrs = (u16 *)dev->dev_addr;
1037 *setup_frm++ = eaddrs[0]; *setup_frm++ = eaddrs[0];
1038 *setup_frm++ = eaddrs[1]; *setup_frm++ = eaddrs[1];
1039 *setup_frm++ = eaddrs[2]; *setup_frm++ = eaddrs[2];
1040}
1041
1042static void build_setup_frame_perfect(u16 *setup_frm, struct net_device *dev)
1043{
1044 struct tulip_private *tp = netdev_priv(dev);
1045 struct netdev_hw_addr *ha;
1046 u16 *eaddrs;
1047
1048 /* We have <= 14 addresses so we can use the wonderful
1049 16 address perfect filtering of the Tulip. */
1050 netdev_for_each_mc_addr(ha, dev) {
1051 eaddrs = (u16 *) ha->addr;
1052 *setup_frm++ = *eaddrs; *setup_frm++ = *eaddrs++;
1053 *setup_frm++ = *eaddrs; *setup_frm++ = *eaddrs++;
1054 *setup_frm++ = *eaddrs; *setup_frm++ = *eaddrs++;
1055 }
1056 /* Fill the unused entries with the broadcast address. */
1057 memset(setup_frm, 0xff, (15 - netdev_mc_count(dev)) * 12);
1058 setup_frm = &tp->setup_frame[15*6];
1059
1060 /* Fill the final entry with our physical address. */
1061 eaddrs = (u16 *)dev->dev_addr;
1062 *setup_frm++ = eaddrs[0]; *setup_frm++ = eaddrs[0];
1063 *setup_frm++ = eaddrs[1]; *setup_frm++ = eaddrs[1];
1064 *setup_frm++ = eaddrs[2]; *setup_frm++ = eaddrs[2];
1065}
1066
1067
1068static void set_rx_mode(struct net_device *dev)
1069{
1070 struct tulip_private *tp = netdev_priv(dev);
1071 void __iomem *ioaddr = tp->base_addr;
1072 int csr6;
1073
1074 csr6 = ioread32(ioaddr + CSR6) & ~0x00D5;
1075
1076 tp->csr6 &= ~0x00D5;
1077 if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */
1078 tp->csr6 |= AcceptAllMulticast | AcceptAllPhys;
1079 csr6 |= AcceptAllMulticast | AcceptAllPhys;
1080 } else if ((netdev_mc_count(dev) > 1000) ||
1081 (dev->flags & IFF_ALLMULTI)) {
1082 /* Too many to filter well -- accept all multicasts. */
1083 tp->csr6 |= AcceptAllMulticast;
1084 csr6 |= AcceptAllMulticast;
1085 } else if (tp->flags & MC_HASH_ONLY) {
1086 /* Some work-alikes have only a 64-entry hash filter table. */
1087 /* Should verify correctness on big-endian/__powerpc__ */
1088 struct netdev_hw_addr *ha;
1089 if (netdev_mc_count(dev) > 64) {
1090 /* Arbitrary non-effective limit. */
1091 tp->csr6 |= AcceptAllMulticast;
1092 csr6 |= AcceptAllMulticast;
1093 } else {
1094 u32 mc_filter[2] = {0, 0}; /* Multicast hash filter */
1095 int filterbit;
1096 netdev_for_each_mc_addr(ha, dev) {
1097 if (tp->flags & COMET_MAC_ADDR)
1098 filterbit = ether_crc_le(ETH_ALEN,
1099 ha->addr);
1100 else
1101 filterbit = ether_crc(ETH_ALEN,
1102 ha->addr) >> 26;
1103 filterbit &= 0x3f;
1104 mc_filter[filterbit >> 5] |= 1 << (filterbit & 31);
1105 if (tulip_debug > 2)
1106 dev_info(&dev->dev,
1107 "Added filter for %pM %08x bit %d\n",
1108 ha->addr,
1109 ether_crc(ETH_ALEN, ha->addr),
1110 filterbit);
1111 }
1112 if (mc_filter[0] == tp->mc_filter[0] &&
1113 mc_filter[1] == tp->mc_filter[1])
1114 ; /* No change. */
1115 else if (tp->flags & IS_ASIX) {
1116 iowrite32(2, ioaddr + CSR13);
1117 iowrite32(mc_filter[0], ioaddr + CSR14);
1118 iowrite32(3, ioaddr + CSR13);
1119 iowrite32(mc_filter[1], ioaddr + CSR14);
1120 } else if (tp->flags & COMET_MAC_ADDR) {
1121 iowrite32(mc_filter[0], ioaddr + CSR27);
1122 iowrite32(mc_filter[1], ioaddr + CSR28);
1123 }
1124 tp->mc_filter[0] = mc_filter[0];
1125 tp->mc_filter[1] = mc_filter[1];
1126 }
1127 } else {
1128 unsigned long flags;
1129 u32 tx_flags = 0x08000000 | 192;
1130
1131 /* Note that only the low-address shortword of setup_frame is valid!
1132 The values are doubled for big-endian architectures. */
1133 if (netdev_mc_count(dev) > 14) {
1134 /* Must use a multicast hash table. */
1135 build_setup_frame_hash(tp->setup_frame, dev);
1136 tx_flags = 0x08400000 | 192;
1137 } else {
1138 build_setup_frame_perfect(tp->setup_frame, dev);
1139 }
1140
1141 spin_lock_irqsave(&tp->lock, flags);
1142
1143 if (tp->cur_tx - tp->dirty_tx > TX_RING_SIZE - 2) {
1144 /* Same setup recently queued, we need not add it. */
1145 } else {
1146 unsigned int entry;
1147 int dummy = -1;
1148
1149 /* Now add this frame to the Tx list. */
1150
1151 entry = tp->cur_tx++ % TX_RING_SIZE;
1152
1153 if (entry != 0) {
1154 /* Avoid a chip errata by prefixing a dummy entry. */
1155 tp->tx_buffers[entry].skb = NULL;
1156 tp->tx_buffers[entry].mapping = 0;
1157 tp->tx_ring[entry].length =
1158 (entry == TX_RING_SIZE-1) ? cpu_to_le32(DESC_RING_WRAP) : 0;
1159 tp->tx_ring[entry].buffer1 = 0;
1160 /* Must set DescOwned later to avoid race with chip */
1161 dummy = entry;
1162 entry = tp->cur_tx++ % TX_RING_SIZE;
1163
1164 }
1165
1166 tp->tx_buffers[entry].skb = NULL;
1167 tp->tx_buffers[entry].mapping =
1168 pci_map_single(tp->pdev, tp->setup_frame,
1169 sizeof(tp->setup_frame),
1170 PCI_DMA_TODEVICE);
1171 /* Put the setup frame on the Tx list. */
1172 if (entry == TX_RING_SIZE-1)
1173 tx_flags |= DESC_RING_WRAP; /* Wrap ring. */
1174 tp->tx_ring[entry].length = cpu_to_le32(tx_flags);
1175 tp->tx_ring[entry].buffer1 =
1176 cpu_to_le32(tp->tx_buffers[entry].mapping);
1177 tp->tx_ring[entry].status = cpu_to_le32(DescOwned);
1178 if (dummy >= 0)
1179 tp->tx_ring[dummy].status = cpu_to_le32(DescOwned);
1180 if (tp->cur_tx - tp->dirty_tx >= TX_RING_SIZE - 2)
1181 netif_stop_queue(dev);
1182
1183 /* Trigger an immediate transmit demand. */
1184 iowrite32(0, ioaddr + CSR1);
1185 }
1186
1187 spin_unlock_irqrestore(&tp->lock, flags);
1188 }
1189
1190 iowrite32(csr6, ioaddr + CSR6);
1191}
1192
1193#ifdef CONFIG_TULIP_MWI
1194static void tulip_mwi_config(struct pci_dev *pdev, struct net_device *dev)
1195{
1196 struct tulip_private *tp = netdev_priv(dev);
1197 u8 cache;
1198 u16 pci_command;
1199 u32 csr0;
1200
1201 if (tulip_debug > 3)
1202 netdev_dbg(dev, "tulip_mwi_config()\n");
1203
1204 tp->csr0 = csr0 = 0;
1205
1206 /* if we have any cache line size at all, we can do MRM and MWI */
1207 csr0 |= MRM | MWI;
1208
1209 /* Enable MWI in the standard PCI command bit.
1210 * Check for the case where MWI is desired but not available
1211 */
1212 pci_try_set_mwi(pdev);
1213
1214 /* read result from hardware (in case bit refused to enable) */
1215 pci_read_config_word(pdev, PCI_COMMAND, &pci_command);
1216 if ((csr0 & MWI) && (!(pci_command & PCI_COMMAND_INVALIDATE)))
1217 csr0 &= ~MWI;
1218
1219 /* if cache line size hardwired to zero, no MWI */
1220 pci_read_config_byte(pdev, PCI_CACHE_LINE_SIZE, &cache);
1221 if ((csr0 & MWI) && (cache == 0)) {
1222 csr0 &= ~MWI;
1223 pci_clear_mwi(pdev);
1224 }
1225
1226 /* assign per-cacheline-size cache alignment and
1227 * burst length values
1228 */
1229 switch (cache) {
1230 case 8:
1231 csr0 |= MRL | (1 << CALShift) | (16 << BurstLenShift);
1232 break;
1233 case 16:
1234 csr0 |= MRL | (2 << CALShift) | (16 << BurstLenShift);
1235 break;
1236 case 32:
1237 csr0 |= MRL | (3 << CALShift) | (32 << BurstLenShift);
1238 break;
1239 default:
1240 cache = 0;
1241 break;
1242 }
1243
1244 /* if we have a good cache line size, we by now have a good
1245 * csr0, so save it and exit
1246 */
1247 if (cache)
1248 goto out;
1249
1250 /* we don't have a good csr0 or cache line size, disable MWI */
1251 if (csr0 & MWI) {
1252 pci_clear_mwi(pdev);
1253 csr0 &= ~MWI;
1254 }
1255
1256 /* sane defaults for burst length and cache alignment
1257 * originally from de4x5 driver
1258 */
1259 csr0 |= (8 << BurstLenShift) | (1 << CALShift);
1260
1261out:
1262 tp->csr0 = csr0;
1263 if (tulip_debug > 2)
1264 netdev_dbg(dev, "MWI config cacheline=%d, csr0=%08x\n",
1265 cache, csr0);
1266}
1267#endif
1268
1269/*
1270 * Chips that have the MRM/reserved bit quirk and the burst quirk. That
1271 * is the DM910X and the on chip ULi devices
1272 */
1273
1274static int tulip_uli_dm_quirk(struct pci_dev *pdev)
1275{
1276 if (pdev->vendor == 0x1282 && pdev->device == 0x9102)
1277 return 1;
1278 return 0;
1279}
1280
1281static const struct net_device_ops tulip_netdev_ops = {
1282 .ndo_open = tulip_open,
1283 .ndo_start_xmit = tulip_start_xmit,
1284 .ndo_tx_timeout = tulip_tx_timeout,
1285 .ndo_stop = tulip_close,
1286 .ndo_get_stats = tulip_get_stats,
1287 .ndo_do_ioctl = private_ioctl,
1288 .ndo_set_rx_mode = set_rx_mode,
1289 .ndo_change_mtu = eth_change_mtu,
1290 .ndo_set_mac_address = eth_mac_addr,
1291 .ndo_validate_addr = eth_validate_addr,
1292#ifdef CONFIG_NET_POLL_CONTROLLER
1293 .ndo_poll_controller = poll_tulip,
1294#endif
1295};
1296
1297DEFINE_PCI_DEVICE_TABLE(early_486_chipsets) = {
1298 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82424) },
1299 { PCI_DEVICE(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_496) },
1300 { },
1301};
1302
1303static int tulip_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
1304{
1305 struct tulip_private *tp;
1306 /* See note below on the multiport cards. */
1307 static unsigned char last_phys_addr[ETH_ALEN] = {
1308 0x00, 'L', 'i', 'n', 'u', 'x'
1309 };
1310 static int last_irq;
1311 static int multiport_cnt; /* For four-port boards w/one EEPROM */
1312 int i, irq;
1313 unsigned short sum;
1314 unsigned char *ee_data;
1315 struct net_device *dev;
1316 void __iomem *ioaddr;
1317 static int board_idx = -1;
1318 int chip_idx = ent->driver_data;
1319 const char *chip_name = tulip_tbl[chip_idx].chip_name;
1320 unsigned int eeprom_missing = 0;
1321 unsigned int force_csr0 = 0;
1322
1323#ifndef MODULE
1324 if (tulip_debug > 0)
1325 printk_once(KERN_INFO "%s", version);
1326#endif
1327
1328 board_idx++;
1329
1330 /*
1331 * Lan media wire a tulip chip to a wan interface. Needs a very
1332 * different driver (lmc driver)
1333 */
1334
1335 if (pdev->subsystem_vendor == PCI_VENDOR_ID_LMC) {
1336 pr_err("skipping LMC card\n");
1337 return -ENODEV;
1338 } else if (pdev->subsystem_vendor == PCI_VENDOR_ID_SBE &&
1339 (pdev->subsystem_device == PCI_SUBDEVICE_ID_SBE_T3E3 ||
1340 pdev->subsystem_device == PCI_SUBDEVICE_ID_SBE_2T3E3_P0 ||
1341 pdev->subsystem_device == PCI_SUBDEVICE_ID_SBE_2T3E3_P1)) {
1342 pr_err("skipping SBE T3E3 port\n");
1343 return -ENODEV;
1344 }
1345
1346 /*
1347 * DM910x chips should be handled by the dmfe driver, except
1348 * on-board chips on SPARC systems. Also, early DM9100s need
1349 * software CRC which only the dmfe driver supports.
1350 */
1351
1352#ifdef CONFIG_TULIP_DM910X
1353 if (chip_idx == DM910X) {
1354 struct device_node *dp;
1355
1356 if (pdev->vendor == 0x1282 && pdev->device == 0x9100 &&
1357 pdev->revision < 0x30) {
1358 pr_info("skipping early DM9100 with Crc bug (use dmfe)\n");
1359 return -ENODEV;
1360 }
1361
1362 dp = pci_device_to_OF_node(pdev);
1363 if (!(dp && of_get_property(dp, "local-mac-address", NULL))) {
1364 pr_info("skipping DM910x expansion card (use dmfe)\n");
1365 return -ENODEV;
1366 }
1367 }
1368#endif
1369
1370 /*
1371 * Looks for early PCI chipsets where people report hangs
1372 * without the workarounds being on.
1373 */
1374
1375 /* 1. Intel Saturn. Switch to 8 long words burst, 8 long word cache
1376 aligned. Aries might need this too. The Saturn errata are not
1377 pretty reading but thankfully it's an old 486 chipset.
1378
1379 2. The dreaded SiS496 486 chipset. Same workaround as Intel
1380 Saturn.
1381 */
1382
1383 if (pci_dev_present(early_486_chipsets)) {
1384 csr0 = MRL | MRM | (8 << BurstLenShift) | (1 << CALShift);
1385 force_csr0 = 1;
1386 }
1387
1388 /* bugfix: the ASIX must have a burst limit or horrible things happen. */
1389 if (chip_idx == AX88140) {
1390 if ((csr0 & 0x3f00) == 0)
1391 csr0 |= 0x2000;
1392 }
1393
1394 /* PNIC doesn't have MWI/MRL/MRM... */
1395 if (chip_idx == LC82C168)
1396 csr0 &= ~0xfff10000; /* zero reserved bits 31:20, 16 */
1397
1398 /* DM9102A has troubles with MRM & clear reserved bits 24:22, 20, 16, 7:1 */
1399 if (tulip_uli_dm_quirk(pdev)) {
1400 csr0 &= ~0x01f100ff;
1401#if defined(CONFIG_SPARC)
1402 csr0 = (csr0 & ~0xff00) | 0xe000;
1403#endif
1404 }
1405 /*
1406 * And back to business
1407 */
1408
1409 i = pci_enable_device(pdev);
1410 if (i) {
1411 pr_err("Cannot enable tulip board #%d, aborting\n", board_idx);
1412 return i;
1413 }
1414
1415 irq = pdev->irq;
1416
1417 /* alloc_etherdev ensures aligned and zeroed private structures */
1418 dev = alloc_etherdev (sizeof (*tp));
1419 if (!dev)
1420 return -ENOMEM;
1421
1422 SET_NETDEV_DEV(dev, &pdev->dev);
1423 if (pci_resource_len (pdev, 0) < tulip_tbl[chip_idx].io_size) {
1424 pr_err("%s: I/O region (0x%llx@0x%llx) too small, aborting\n",
1425 pci_name(pdev),
1426 (unsigned long long)pci_resource_len (pdev, 0),
1427 (unsigned long long)pci_resource_start (pdev, 0));
1428 goto err_out_free_netdev;
1429 }
1430
1431 /* grab all resources from both PIO and MMIO regions, as we
1432 * don't want anyone else messing around with our hardware */
1433 if (pci_request_regions (pdev, DRV_NAME))
1434 goto err_out_free_netdev;
1435
1436 ioaddr = pci_iomap(pdev, TULIP_BAR, tulip_tbl[chip_idx].io_size);
1437
1438 if (!ioaddr)
1439 goto err_out_free_res;
1440
1441 /*
1442 * initialize private data structure 'tp'
1443 * it is zeroed and aligned in alloc_etherdev
1444 */
1445 tp = netdev_priv(dev);
1446 tp->dev = dev;
1447
1448 tp->rx_ring = pci_alloc_consistent(pdev,
1449 sizeof(struct tulip_rx_desc) * RX_RING_SIZE +
1450 sizeof(struct tulip_tx_desc) * TX_RING_SIZE,
1451 &tp->rx_ring_dma);
1452 if (!tp->rx_ring)
1453 goto err_out_mtable;
1454 tp->tx_ring = (struct tulip_tx_desc *)(tp->rx_ring + RX_RING_SIZE);
1455 tp->tx_ring_dma = tp->rx_ring_dma + sizeof(struct tulip_rx_desc) * RX_RING_SIZE;
1456
1457 tp->chip_id = chip_idx;
1458 tp->flags = tulip_tbl[chip_idx].flags;
1459
1460 tp->wolinfo.supported = 0;
1461 tp->wolinfo.wolopts = 0;
1462 /* COMET: Enable power management only for AN983B */
1463 if (chip_idx == COMET ) {
1464 u32 sig;
1465 pci_read_config_dword (pdev, 0x80, &sig);
1466 if (sig == 0x09811317) {
1467 tp->flags |= COMET_PM;
1468 tp->wolinfo.supported = WAKE_PHY | WAKE_MAGIC;
1469 pr_info("%s: Enabled WOL support for AN983B\n",
1470 __func__);
1471 }
1472 }
1473 tp->pdev = pdev;
1474 tp->base_addr = ioaddr;
1475 tp->revision = pdev->revision;
1476 tp->csr0 = csr0;
1477 spin_lock_init(&tp->lock);
1478 spin_lock_init(&tp->mii_lock);
1479 init_timer(&tp->timer);
1480 tp->timer.data = (unsigned long)dev;
1481 tp->timer.function = tulip_tbl[tp->chip_id].media_timer;
1482
1483 INIT_WORK(&tp->media_work, tulip_tbl[tp->chip_id].media_task);
1484
1485#ifdef CONFIG_TULIP_MWI
1486 if (!force_csr0 && (tp->flags & HAS_PCI_MWI))
1487 tulip_mwi_config (pdev, dev);
1488#endif
1489
1490 /* Stop the chip's Tx and Rx processes. */
1491 tulip_stop_rxtx(tp);
1492
1493 pci_set_master(pdev);
1494
1495#ifdef CONFIG_GSC
1496 if (pdev->subsystem_vendor == PCI_VENDOR_ID_HP) {
1497 switch (pdev->subsystem_device) {
1498 default:
1499 break;
1500 case 0x1061:
1501 case 0x1062:
1502 case 0x1063:
1503 case 0x1098:
1504 case 0x1099:
1505 case 0x10EE:
1506 tp->flags |= HAS_SWAPPED_SEEPROM | NEEDS_FAKE_MEDIA_TABLE;
1507 chip_name = "GSC DS21140 Tulip";
1508 }
1509 }
1510#endif
1511
1512 /* Clear the missed-packet counter. */
1513 ioread32(ioaddr + CSR8);
1514
1515 /* The station address ROM is read byte serially. The register must
1516 be polled, waiting for the value to be read bit serially from the
1517 EEPROM.
1518 */
1519 ee_data = tp->eeprom;
1520 memset(ee_data, 0, sizeof(tp->eeprom));
1521 sum = 0;
1522 if (chip_idx == LC82C168) {
1523 for (i = 0; i < 3; i++) {
1524 int value, boguscnt = 100000;
1525 iowrite32(0x600 | i, ioaddr + 0x98);
1526 do {
1527 value = ioread32(ioaddr + CSR9);
1528 } while (value < 0 && --boguscnt > 0);
1529 put_unaligned_le16(value, ((__le16 *)dev->dev_addr) + i);
1530 sum += value & 0xffff;
1531 }
1532 } else if (chip_idx == COMET) {
1533 /* No need to read the EEPROM. */
1534 put_unaligned_le32(ioread32(ioaddr + 0xA4), dev->dev_addr);
1535 put_unaligned_le16(ioread32(ioaddr + 0xA8), dev->dev_addr + 4);
1536 for (i = 0; i < 6; i ++)
1537 sum += dev->dev_addr[i];
1538 } else {
1539 /* A serial EEPROM interface, we read now and sort it out later. */
1540 int sa_offset = 0;
1541 int ee_addr_size = tulip_read_eeprom(dev, 0xff, 8) & 0x40000 ? 8 : 6;
1542 int ee_max_addr = ((1 << ee_addr_size) - 1) * sizeof(u16);
1543
1544 if (ee_max_addr > sizeof(tp->eeprom))
1545 ee_max_addr = sizeof(tp->eeprom);
1546
1547 for (i = 0; i < ee_max_addr ; i += sizeof(u16)) {
1548 u16 data = tulip_read_eeprom(dev, i/2, ee_addr_size);
1549 ee_data[i] = data & 0xff;
1550 ee_data[i + 1] = data >> 8;
1551 }
1552
1553 /* DEC now has a specification (see Notes) but early board makers
1554 just put the address in the first EEPROM locations. */
1555 /* This does memcmp(ee_data, ee_data+16, 8) */
1556 for (i = 0; i < 8; i ++)
1557 if (ee_data[i] != ee_data[16+i])
1558 sa_offset = 20;
1559 if (chip_idx == CONEXANT) {
1560 /* Check that the tuple type and length is correct. */
1561 if (ee_data[0x198] == 0x04 && ee_data[0x199] == 6)
1562 sa_offset = 0x19A;
1563 } else if (ee_data[0] == 0xff && ee_data[1] == 0xff &&
1564 ee_data[2] == 0) {
1565 sa_offset = 2; /* Grrr, damn Matrox boards. */
1566 multiport_cnt = 4;
1567 }
1568#ifdef CONFIG_MIPS_COBALT
1569 if ((pdev->bus->number == 0) &&
1570 ((PCI_SLOT(pdev->devfn) == 7) ||
1571 (PCI_SLOT(pdev->devfn) == 12))) {
1572 /* Cobalt MAC address in first EEPROM locations. */
1573 sa_offset = 0;
1574 /* Ensure our media table fixup get's applied */
1575 memcpy(ee_data + 16, ee_data, 8);
1576 }
1577#endif
1578#ifdef CONFIG_GSC
1579 /* Check to see if we have a broken srom */
1580 if (ee_data[0] == 0x61 && ee_data[1] == 0x10) {
1581 /* pci_vendor_id and subsystem_id are swapped */
1582 ee_data[0] = ee_data[2];
1583 ee_data[1] = ee_data[3];
1584 ee_data[2] = 0x61;
1585 ee_data[3] = 0x10;
1586
1587 /* HSC-PCI boards need to be byte-swaped and shifted
1588 * up 1 word. This shift needs to happen at the end
1589 * of the MAC first because of the 2 byte overlap.
1590 */
1591 for (i = 4; i >= 0; i -= 2) {
1592 ee_data[17 + i + 3] = ee_data[17 + i];
1593 ee_data[16 + i + 5] = ee_data[16 + i];
1594 }
1595 }
1596#endif
1597
1598 for (i = 0; i < 6; i ++) {
1599 dev->dev_addr[i] = ee_data[i + sa_offset];
1600 sum += ee_data[i + sa_offset];
1601 }
1602 }
1603 /* Lite-On boards have the address byte-swapped. */
1604 if ((dev->dev_addr[0] == 0xA0 ||
1605 dev->dev_addr[0] == 0xC0 ||
1606 dev->dev_addr[0] == 0x02) &&
1607 dev->dev_addr[1] == 0x00)
1608 for (i = 0; i < 6; i+=2) {
1609 char tmp = dev->dev_addr[i];
1610 dev->dev_addr[i] = dev->dev_addr[i+1];
1611 dev->dev_addr[i+1] = tmp;
1612 }
1613 /* On the Zynx 315 Etherarray and other multiport boards only the
1614 first Tulip has an EEPROM.
1615 On Sparc systems the mac address is held in the OBP property
1616 "local-mac-address".
1617 The addresses of the subsequent ports are derived from the first.
1618 Many PCI BIOSes also incorrectly report the IRQ line, so we correct
1619 that here as well. */
1620 if (sum == 0 || sum == 6*0xff) {
1621#if defined(CONFIG_SPARC)
1622 struct device_node *dp = pci_device_to_OF_node(pdev);
1623 const unsigned char *addr;
1624 int len;
1625#endif
1626 eeprom_missing = 1;
1627 for (i = 0; i < 5; i++)
1628 dev->dev_addr[i] = last_phys_addr[i];
1629 dev->dev_addr[i] = last_phys_addr[i] + 1;
1630#if defined(CONFIG_SPARC)
1631 addr = of_get_property(dp, "local-mac-address", &len);
1632 if (addr && len == ETH_ALEN)
1633 memcpy(dev->dev_addr, addr, ETH_ALEN);
1634#endif
1635#if defined(__i386__) || defined(__x86_64__) /* Patch up x86 BIOS bug. */
1636 if (last_irq)
1637 irq = last_irq;
1638#endif
1639 }
1640
1641 for (i = 0; i < 6; i++)
1642 last_phys_addr[i] = dev->dev_addr[i];
1643 last_irq = irq;
1644
1645 /* The lower four bits are the media type. */
1646 if (board_idx >= 0 && board_idx < MAX_UNITS) {
1647 if (options[board_idx] & MEDIA_MASK)
1648 tp->default_port = options[board_idx] & MEDIA_MASK;
1649 if ((options[board_idx] & FullDuplex) || full_duplex[board_idx] > 0)
1650 tp->full_duplex = 1;
1651 if (mtu[board_idx] > 0)
1652 dev->mtu = mtu[board_idx];
1653 }
1654 if (dev->mem_start & MEDIA_MASK)
1655 tp->default_port = dev->mem_start & MEDIA_MASK;
1656 if (tp->default_port) {
1657 pr_info(DRV_NAME "%d: Transceiver selection forced to %s\n",
1658 board_idx, medianame[tp->default_port & MEDIA_MASK]);
1659 tp->medialock = 1;
1660 if (tulip_media_cap[tp->default_port] & MediaAlwaysFD)
1661 tp->full_duplex = 1;
1662 }
1663 if (tp->full_duplex)
1664 tp->full_duplex_lock = 1;
1665
1666 if (tulip_media_cap[tp->default_port] & MediaIsMII) {
1667 static const u16 media2advert[] = {
1668 0x20, 0x40, 0x03e0, 0x60, 0x80, 0x100, 0x200
1669 };
1670 tp->mii_advertise = media2advert[tp->default_port - 9];
1671 tp->mii_advertise |= (tp->flags & HAS_8023X); /* Matching bits! */
1672 }
1673
1674 if (tp->flags & HAS_MEDIA_TABLE) {
1675 sprintf(dev->name, DRV_NAME "%d", board_idx); /* hack */
1676 tulip_parse_eeprom(dev);
1677 strcpy(dev->name, "eth%d"); /* un-hack */
1678 }
1679
1680 if ((tp->flags & ALWAYS_CHECK_MII) ||
1681 (tp->mtable && tp->mtable->has_mii) ||
1682 ( ! tp->mtable && (tp->flags & HAS_MII))) {
1683 if (tp->mtable && tp->mtable->has_mii) {
1684 for (i = 0; i < tp->mtable->leafcount; i++)
1685 if (tp->mtable->mleaf[i].media == 11) {
1686 tp->cur_index = i;
1687 tp->saved_if_port = dev->if_port;
1688 tulip_select_media(dev, 2);
1689 dev->if_port = tp->saved_if_port;
1690 break;
1691 }
1692 }
1693
1694 /* Find the connected MII xcvrs.
1695 Doing this in open() would allow detecting external xcvrs
1696 later, but takes much time. */
1697 tulip_find_mii (dev, board_idx);
1698 }
1699
1700 /* The Tulip-specific entries in the device structure. */
1701 dev->netdev_ops = &tulip_netdev_ops;
1702 dev->watchdog_timeo = TX_TIMEOUT;
1703#ifdef CONFIG_TULIP_NAPI
1704 netif_napi_add(dev, &tp->napi, tulip_poll, 16);
1705#endif
1706 SET_ETHTOOL_OPS(dev, &ops);
1707
1708 if (register_netdev(dev))
1709 goto err_out_free_ring;
1710
1711 pci_set_drvdata(pdev, dev);
1712
1713 dev_info(&dev->dev,
1714#ifdef CONFIG_TULIP_MMIO
1715 "%s rev %d at MMIO %#llx,%s %pM, IRQ %d\n",
1716#else
1717 "%s rev %d at Port %#llx,%s %pM, IRQ %d\n",
1718#endif
1719 chip_name, pdev->revision,
1720 (unsigned long long)pci_resource_start(pdev, TULIP_BAR),
1721 eeprom_missing ? " EEPROM not present," : "",
1722 dev->dev_addr, irq);
1723
1724 if (tp->chip_id == PNIC2)
1725 tp->link_change = pnic2_lnk_change;
1726 else if (tp->flags & HAS_NWAY)
1727 tp->link_change = t21142_lnk_change;
1728 else if (tp->flags & HAS_PNICNWAY)
1729 tp->link_change = pnic_lnk_change;
1730
1731 /* Reset the xcvr interface and turn on heartbeat. */
1732 switch (chip_idx) {
1733 case DC21140:
1734 case DM910X:
1735 default:
1736 if (tp->mtable)
1737 iowrite32(tp->mtable->csr12dir | 0x100, ioaddr + CSR12);
1738 break;
1739 case DC21142:
1740 if (tp->mii_cnt || tulip_media_cap[dev->if_port] & MediaIsMII) {
1741 iowrite32(csr6_mask_defstate, ioaddr + CSR6);
1742 iowrite32(0x0000, ioaddr + CSR13);
1743 iowrite32(0x0000, ioaddr + CSR14);
1744 iowrite32(csr6_mask_hdcap, ioaddr + CSR6);
1745 } else
1746 t21142_start_nway(dev);
1747 break;
1748 case PNIC2:
1749 /* just do a reset for sanity sake */
1750 iowrite32(0x0000, ioaddr + CSR13);
1751 iowrite32(0x0000, ioaddr + CSR14);
1752 break;
1753 case LC82C168:
1754 if ( ! tp->mii_cnt) {
1755 tp->nway = 1;
1756 tp->nwayset = 0;
1757 iowrite32(csr6_ttm | csr6_ca, ioaddr + CSR6);
1758 iowrite32(0x30, ioaddr + CSR12);
1759 iowrite32(0x0001F078, ioaddr + CSR6);
1760 iowrite32(0x0201F078, ioaddr + CSR6); /* Turn on autonegotiation. */
1761 }
1762 break;
1763 case MX98713:
1764 case COMPEX9881:
1765 iowrite32(0x00000000, ioaddr + CSR6);
1766 iowrite32(0x000711C0, ioaddr + CSR14); /* Turn on NWay. */
1767 iowrite32(0x00000001, ioaddr + CSR13);
1768 break;
1769 case MX98715:
1770 case MX98725:
1771 iowrite32(0x01a80000, ioaddr + CSR6);
1772 iowrite32(0xFFFFFFFF, ioaddr + CSR14);
1773 iowrite32(0x00001000, ioaddr + CSR12);
1774 break;
1775 case COMET:
1776 /* No initialization necessary. */
1777 break;
1778 }
1779
1780 /* put the chip in snooze mode until opened */
1781 tulip_set_power_state (tp, 0, 1);
1782
1783 return 0;
1784
1785err_out_free_ring:
1786 pci_free_consistent (pdev,
1787 sizeof (struct tulip_rx_desc) * RX_RING_SIZE +
1788 sizeof (struct tulip_tx_desc) * TX_RING_SIZE,
1789 tp->rx_ring, tp->rx_ring_dma);
1790
1791err_out_mtable:
1792 kfree (tp->mtable);
1793 pci_iounmap(pdev, ioaddr);
1794
1795err_out_free_res:
1796 pci_release_regions (pdev);
1797
1798err_out_free_netdev:
1799 free_netdev (dev);
1800 return -ENODEV;
1801}
1802
1803
1804/* set the registers according to the given wolopts */
1805static void tulip_set_wolopts (struct pci_dev *pdev, u32 wolopts)
1806{
1807 struct net_device *dev = pci_get_drvdata(pdev);
1808 struct tulip_private *tp = netdev_priv(dev);
1809 void __iomem *ioaddr = tp->base_addr;
1810
1811 if (tp->flags & COMET_PM) {
1812
1813 unsigned int tmp;
1814
1815 tmp = ioread32(ioaddr + CSR18);
1816 tmp &= ~(comet_csr18_pmes_sticky | comet_csr18_apm_mode | comet_csr18_d3a);
1817 tmp |= comet_csr18_pm_mode;
1818 iowrite32(tmp, ioaddr + CSR18);
1819
1820 /* Set the Wake-up Control/Status Register to the given WOL options*/
1821 tmp = ioread32(ioaddr + CSR13);
1822 tmp &= ~(comet_csr13_linkoffe | comet_csr13_linkone | comet_csr13_wfre | comet_csr13_lsce | comet_csr13_mpre);
1823 if (wolopts & WAKE_MAGIC)
1824 tmp |= comet_csr13_mpre;
1825 if (wolopts & WAKE_PHY)
1826 tmp |= comet_csr13_linkoffe | comet_csr13_linkone | comet_csr13_lsce;
1827 /* Clear the event flags */
1828 tmp |= comet_csr13_wfr | comet_csr13_mpr | comet_csr13_lsc;
1829 iowrite32(tmp, ioaddr + CSR13);
1830 }
1831}
1832
1833#ifdef CONFIG_PM
1834
1835
1836static int tulip_suspend (struct pci_dev *pdev, pm_message_t state)
1837{
1838 pci_power_t pstate;
1839 struct net_device *dev = pci_get_drvdata(pdev);
1840 struct tulip_private *tp = netdev_priv(dev);
1841
1842 if (!dev)
1843 return -EINVAL;
1844
1845 if (!netif_running(dev))
1846 goto save_state;
1847
1848 tulip_down(dev);
1849
1850 netif_device_detach(dev);
1851 /* FIXME: it needlessly adds an error path. */
1852 free_irq(tp->pdev->irq, dev);
1853
1854save_state:
1855 pci_save_state(pdev);
1856 pci_disable_device(pdev);
1857 pstate = pci_choose_state(pdev, state);
1858 if (state.event == PM_EVENT_SUSPEND && pstate != PCI_D0) {
1859 int rc;
1860
1861 tulip_set_wolopts(pdev, tp->wolinfo.wolopts);
1862 rc = pci_enable_wake(pdev, pstate, tp->wolinfo.wolopts);
1863 if (rc)
1864 pr_err("pci_enable_wake failed (%d)\n", rc);
1865 }
1866 pci_set_power_state(pdev, pstate);
1867
1868 return 0;
1869}
1870
1871
1872static int tulip_resume(struct pci_dev *pdev)
1873{
1874 struct net_device *dev = pci_get_drvdata(pdev);
1875 struct tulip_private *tp = netdev_priv(dev);
1876 void __iomem *ioaddr = tp->base_addr;
1877 int retval;
1878 unsigned int tmp;
1879
1880 if (!dev)
1881 return -EINVAL;
1882
1883 pci_set_power_state(pdev, PCI_D0);
1884 pci_restore_state(pdev);
1885
1886 if (!netif_running(dev))
1887 return 0;
1888
1889 if ((retval = pci_enable_device(pdev))) {
1890 pr_err("pci_enable_device failed in resume\n");
1891 return retval;
1892 }
1893
1894 retval = request_irq(pdev->irq, tulip_interrupt, IRQF_SHARED,
1895 dev->name, dev);
1896 if (retval) {
1897 pr_err("request_irq failed in resume\n");
1898 return retval;
1899 }
1900
1901 if (tp->flags & COMET_PM) {
1902 pci_enable_wake(pdev, PCI_D3hot, 0);
1903 pci_enable_wake(pdev, PCI_D3cold, 0);
1904
1905 /* Clear the PMES flag */
1906 tmp = ioread32(ioaddr + CSR20);
1907 tmp |= comet_csr20_pmes;
1908 iowrite32(tmp, ioaddr + CSR20);
1909
1910 /* Disable all wake-up events */
1911 tulip_set_wolopts(pdev, 0);
1912 }
1913 netif_device_attach(dev);
1914
1915 if (netif_running(dev))
1916 tulip_up(dev);
1917
1918 return 0;
1919}
1920
1921#endif /* CONFIG_PM */
1922
1923
1924static void tulip_remove_one(struct pci_dev *pdev)
1925{
1926 struct net_device *dev = pci_get_drvdata (pdev);
1927 struct tulip_private *tp;
1928
1929 if (!dev)
1930 return;
1931
1932 tp = netdev_priv(dev);
1933 unregister_netdev(dev);
1934 pci_free_consistent (pdev,
1935 sizeof (struct tulip_rx_desc) * RX_RING_SIZE +
1936 sizeof (struct tulip_tx_desc) * TX_RING_SIZE,
1937 tp->rx_ring, tp->rx_ring_dma);
1938 kfree (tp->mtable);
1939 pci_iounmap(pdev, tp->base_addr);
1940 free_netdev (dev);
1941 pci_release_regions (pdev);
1942 pci_disable_device(pdev);
1943
1944 /* pci_power_off (pdev, -1); */
1945}
1946
1947#ifdef CONFIG_NET_POLL_CONTROLLER
1948/*
1949 * Polling 'interrupt' - used by things like netconsole to send skbs
1950 * without having to re-enable interrupts. It's not called while
1951 * the interrupt routine is executing.
1952 */
1953
1954static void poll_tulip (struct net_device *dev)
1955{
1956 struct tulip_private *tp = netdev_priv(dev);
1957 const int irq = tp->pdev->irq;
1958
1959 /* disable_irq here is not very nice, but with the lockless
1960 interrupt handler we have no other choice. */
1961 disable_irq(irq);
1962 tulip_interrupt (irq, dev);
1963 enable_irq(irq);
1964}
1965#endif
1966
1967static struct pci_driver tulip_driver = {
1968 .name = DRV_NAME,
1969 .id_table = tulip_pci_tbl,
1970 .probe = tulip_init_one,
1971 .remove = tulip_remove_one,
1972#ifdef CONFIG_PM
1973 .suspend = tulip_suspend,
1974 .resume = tulip_resume,
1975#endif /* CONFIG_PM */
1976};
1977
1978
1979static int __init tulip_init (void)
1980{
1981#ifdef MODULE
1982 pr_info("%s", version);
1983#endif
1984
1985 /* copy module parms into globals */
1986 tulip_rx_copybreak = rx_copybreak;
1987 tulip_max_interrupt_work = max_interrupt_work;
1988
1989 /* probe for and init boards */
1990 return pci_register_driver(&tulip_driver);
1991}
1992
1993
1994static void __exit tulip_cleanup (void)
1995{
1996 pci_unregister_driver (&tulip_driver);
1997}
1998
1999
2000module_init(tulip_init);
2001module_exit(tulip_cleanup);