Loading...
1// SPDX-License-Identifier: GPL-1.0+
2
3/* 8390.c: A general NS8390 ethernet driver core for linux. */
4/*
5 Written 1992-94 by Donald Becker.
6
7 Copyright 1993 United States Government as represented by the
8 Director, National Security Agency.
9
10 The author may be reached as becker@scyld.com, or C/O
11 Scyld Computing Corporation
12 410 Severn Ave., Suite 210
13 Annapolis MD 21403
14
15
16 This is the chip-specific code for many 8390-based ethernet adaptors.
17 This is not a complete driver, it must be combined with board-specific
18 code such as ne.c, wd.c, 3c503.c, etc.
19
20 Seeing how at least eight drivers use this code, (not counting the
21 PCMCIA ones either) it is easy to break some card by what seems like
22 a simple innocent change. Please contact me or Donald if you think
23 you have found something that needs changing. -- PG
24
25
26 Changelog:
27
28 Paul Gortmaker : remove set_bit lock, other cleanups.
29 Paul Gortmaker : add ei_get_8390_hdr() so we can pass skb's to
30 ei_block_input() for eth_io_copy_and_sum().
31 Paul Gortmaker : exchange static int ei_pingpong for a #define,
32 also add better Tx error handling.
33 Paul Gortmaker : rewrite Rx overrun handling as per NS specs.
34 Alexey Kuznetsov : use the 8390's six bit hash multicast filter.
35 Paul Gortmaker : tweak ANK's above multicast changes a bit.
36 Paul Gortmaker : update packet statistics for v2.1.x
37 Alan Cox : support arbitrary stupid port mappings on the
38 68K Macintosh. Support >16bit I/O spaces
39 Paul Gortmaker : add kmod support for auto-loading of the 8390
40 module by all drivers that require it.
41 Alan Cox : Spinlocking work, added 'BUG_83C690'
42 Paul Gortmaker : Separate out Tx timeout code from Tx path.
43 Paul Gortmaker : Remove old unused single Tx buffer code.
44 Hayato Fujiwara : Add m32r support.
45 Paul Gortmaker : use skb_padto() instead of stack scratch area
46
47 Sources:
48 The National Semiconductor LAN Databook, and the 3Com 3c503 databook.
49
50 */
51
52#include <linux/build_bug.h>
53#include <linux/module.h>
54#include <linux/kernel.h>
55#include <linux/jiffies.h>
56#include <linux/fs.h>
57#include <linux/types.h>
58#include <linux/string.h>
59#include <linux/bitops.h>
60#include <linux/uaccess.h>
61#include <linux/io.h>
62#include <asm/irq.h>
63#include <linux/delay.h>
64#include <linux/errno.h>
65#include <linux/fcntl.h>
66#include <linux/in.h>
67#include <linux/interrupt.h>
68#include <linux/init.h>
69#include <linux/crc32.h>
70
71#include <linux/netdevice.h>
72#include <linux/etherdevice.h>
73
74#define NS8390_CORE
75#include "8390.h"
76
77#define BUG_83C690
78
79/* These are the operational function interfaces to board-specific
80 routines.
81 void reset_8390(struct net_device *dev)
82 Resets the board associated with DEV, including a hardware reset of
83 the 8390. This is only called when there is a transmit timeout, and
84 it is always followed by 8390_init().
85 void block_output(struct net_device *dev, int count, const unsigned char *buf,
86 int start_page)
87 Write the COUNT bytes of BUF to the packet buffer at START_PAGE. The
88 "page" value uses the 8390's 256-byte pages.
89 void get_8390_hdr(struct net_device *dev, struct e8390_hdr *hdr, int ring_page)
90 Read the 4 byte, page aligned 8390 header. *If* there is a
91 subsequent read, it will be of the rest of the packet.
92 void block_input(struct net_device *dev, int count, struct sk_buff *skb, int ring_offset)
93 Read COUNT bytes from the packet buffer into the skb data area. Start
94 reading from RING_OFFSET, the address as the 8390 sees it. This will always
95 follow the read of the 8390 header.
96*/
97#define ei_reset_8390 (ei_local->reset_8390)
98#define ei_block_output (ei_local->block_output)
99#define ei_block_input (ei_local->block_input)
100#define ei_get_8390_hdr (ei_local->get_8390_hdr)
101
102/* Index to functions. */
103static void ei_tx_intr(struct net_device *dev);
104static void ei_tx_err(struct net_device *dev);
105static void ei_receive(struct net_device *dev);
106static void ei_rx_overrun(struct net_device *dev);
107
108/* Routines generic to NS8390-based boards. */
109static void NS8390_trigger_send(struct net_device *dev, unsigned int length,
110 int start_page);
111static void do_set_multicast_list(struct net_device *dev);
112static void __NS8390_init(struct net_device *dev, int startp);
113
114static unsigned version_printed;
115static int msg_enable;
116static const int default_msg_level = (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_RX_ERR |
117 NETIF_MSG_TX_ERR);
118module_param(msg_enable, int, 0444);
119MODULE_PARM_DESC(msg_enable, "Debug message level (see linux/netdevice.h for bitmap)");
120
121/*
122 * SMP and the 8390 setup.
123 *
124 * The 8390 isn't exactly designed to be multithreaded on RX/TX. There is
125 * a page register that controls bank and packet buffer access. We guard
126 * this with ei_local->page_lock. Nobody should assume or set the page other
127 * than zero when the lock is not held. Lock holders must restore page 0
128 * before unlocking. Even pure readers must take the lock to protect in
129 * page 0.
130 *
131 * To make life difficult the chip can also be very slow. We therefore can't
132 * just use spinlocks. For the longer lockups we disable the irq the device
133 * sits on and hold the lock. We must hold the lock because there is a dual
134 * processor case other than interrupts (get stats/set multicast list in
135 * parallel with each other and transmit).
136 *
137 * Note: in theory we can just disable the irq on the card _but_ there is
138 * a latency on SMP irq delivery. So we can easily go "disable irq" "sync irqs"
139 * enter lock, take the queued irq. So we waddle instead of flying.
140 *
141 * Finally by special arrangement for the purpose of being generally
142 * annoying the transmit function is called bh atomic. That places
143 * restrictions on the user context callers as disable_irq won't save
144 * them.
145 *
146 * Additional explanation of problems with locking by Alan Cox:
147 *
148 * "The author (me) didn't use spin_lock_irqsave because the slowness of the
149 * card means that approach caused horrible problems like losing serial data
150 * at 38400 baud on some chips. Remember many 8390 nics on PCI were ISA
151 * chips with FPGA front ends.
152 *
153 * Ok the logic behind the 8390 is very simple:
154 *
155 * Things to know
156 * - IRQ delivery is asynchronous to the PCI bus
157 * - Blocking the local CPU IRQ via spin locks was too slow
158 * - The chip has register windows needing locking work
159 *
160 * So the path was once (I say once as people appear to have changed it
161 * in the mean time and it now looks rather bogus if the changes to use
162 * disable_irq_nosync_irqsave are disabling the local IRQ)
163 *
164 *
165 * Take the page lock
166 * Mask the IRQ on chip
167 * Disable the IRQ (but not mask locally- someone seems to have
168 * broken this with the lock validator stuff)
169 * [This must be _nosync as the page lock may otherwise
170 * deadlock us]
171 * Drop the page lock and turn IRQs back on
172 *
173 * At this point an existing IRQ may still be running but we can't
174 * get a new one
175 *
176 * Take the lock (so we know the IRQ has terminated) but don't mask
177 * the IRQs on the processor
178 * Set irqlock [for debug]
179 *
180 * Transmit (slow as ****)
181 *
182 * re-enable the IRQ
183 *
184 *
185 * We have to use disable_irq because otherwise you will get delayed
186 * interrupts on the APIC bus deadlocking the transmit path.
187 *
188 * Quite hairy but the chip simply wasn't designed for SMP and you can't
189 * even ACK an interrupt without risking corrupting other parallel
190 * activities on the chip." [lkml, 25 Jul 2007]
191 */
192
193
194
195/**
196 * ei_open - Open/initialize the board.
197 * @dev: network device to initialize
198 *
199 * This routine goes all-out, setting everything
200 * up anew at each open, even though many of these registers should only
201 * need to be set once at boot.
202 */
203static int __ei_open(struct net_device *dev)
204{
205 unsigned long flags;
206 struct ei_device *ei_local = netdev_priv(dev);
207
208 if (dev->watchdog_timeo <= 0)
209 dev->watchdog_timeo = TX_TIMEOUT;
210
211 /*
212 * Grab the page lock so we own the register set, then call
213 * the init function.
214 */
215
216 spin_lock_irqsave(&ei_local->page_lock, flags);
217 __NS8390_init(dev, 1);
218 /* Set the flag before we drop the lock, That way the IRQ arrives
219 after its set and we get no silly warnings */
220 netif_start_queue(dev);
221 spin_unlock_irqrestore(&ei_local->page_lock, flags);
222 ei_local->irqlock = 0;
223 return 0;
224}
225
226/**
227 * ei_close - shut down network device
228 * @dev: network device to close
229 *
230 * Opposite of ei_open(). Only used when "ifconfig <devname> down" is done.
231 */
232static int __ei_close(struct net_device *dev)
233{
234 struct ei_device *ei_local = netdev_priv(dev);
235 unsigned long flags;
236
237 /*
238 * Hold the page lock during close
239 */
240
241 spin_lock_irqsave(&ei_local->page_lock, flags);
242 __NS8390_init(dev, 0);
243 spin_unlock_irqrestore(&ei_local->page_lock, flags);
244 netif_stop_queue(dev);
245 return 0;
246}
247
248/**
249 * ei_tx_timeout - handle transmit time out condition
250 * @dev: network device which has apparently fallen asleep
251 *
252 * Called by kernel when device never acknowledges a transmit has
253 * completed (or failed) - i.e. never posted a Tx related interrupt.
254 */
255
256static void __ei_tx_timeout(struct net_device *dev, unsigned int txqueue)
257{
258 unsigned long e8390_base = dev->base_addr;
259 struct ei_device *ei_local = netdev_priv(dev);
260 int txsr, isr, tickssofar = jiffies - dev_trans_start(dev);
261 unsigned long flags;
262
263 dev->stats.tx_errors++;
264
265 spin_lock_irqsave(&ei_local->page_lock, flags);
266 txsr = ei_inb(e8390_base+EN0_TSR);
267 isr = ei_inb(e8390_base+EN0_ISR);
268 spin_unlock_irqrestore(&ei_local->page_lock, flags);
269
270 netdev_dbg(dev, "Tx timed out, %s TSR=%#2x, ISR=%#2x, t=%d\n",
271 (txsr & ENTSR_ABT) ? "excess collisions." :
272 (isr) ? "lost interrupt?" : "cable problem?",
273 txsr, isr, tickssofar);
274
275 if (!isr && !dev->stats.tx_packets) {
276 /* The 8390 probably hasn't gotten on the cable yet. */
277 ei_local->interface_num ^= 1; /* Try a different xcvr. */
278 }
279
280 /* Ugly but a reset can be slow, yet must be protected */
281
282 disable_irq_nosync_lockdep(dev->irq);
283 spin_lock(&ei_local->page_lock);
284
285 /* Try to restart the card. Perhaps the user has fixed something. */
286 ei_reset_8390(dev);
287 __NS8390_init(dev, 1);
288
289 spin_unlock(&ei_local->page_lock);
290 enable_irq_lockdep(dev->irq);
291 netif_wake_queue(dev);
292}
293
294/**
295 * ei_start_xmit - begin packet transmission
296 * @skb: packet to be sent
297 * @dev: network device to which packet is sent
298 *
299 * Sends a packet to an 8390 network device.
300 */
301
302static netdev_tx_t __ei_start_xmit(struct sk_buff *skb,
303 struct net_device *dev)
304{
305 unsigned long e8390_base = dev->base_addr;
306 struct ei_device *ei_local = netdev_priv(dev);
307 int send_length = skb->len, output_page;
308 unsigned long flags;
309 char buf[ETH_ZLEN];
310 char *data = skb->data;
311
312 if (skb->len < ETH_ZLEN) {
313 memset(buf, 0, ETH_ZLEN); /* more efficient than doing just the needed bits */
314 memcpy(buf, data, skb->len);
315 send_length = ETH_ZLEN;
316 data = buf;
317 }
318
319 /* Mask interrupts from the ethercard.
320 SMP: We have to grab the lock here otherwise the IRQ handler
321 on another CPU can flip window and race the IRQ mask set. We end
322 up trashing the mcast filter not disabling irqs if we don't lock */
323
324 spin_lock_irqsave(&ei_local->page_lock, flags);
325 ei_outb_p(0x00, e8390_base + EN0_IMR);
326 spin_unlock_irqrestore(&ei_local->page_lock, flags);
327
328
329 /*
330 * Slow phase with lock held.
331 */
332
333 disable_irq_nosync_lockdep_irqsave(dev->irq, &flags);
334
335 spin_lock(&ei_local->page_lock);
336
337 ei_local->irqlock = 1;
338
339 /*
340 * We have two Tx slots available for use. Find the first free
341 * slot, and then perform some sanity checks. With two Tx bufs,
342 * you get very close to transmitting back-to-back packets. With
343 * only one Tx buf, the transmitter sits idle while you reload the
344 * card, leaving a substantial gap between each transmitted packet.
345 */
346
347 if (ei_local->tx1 == 0) {
348 output_page = ei_local->tx_start_page;
349 ei_local->tx1 = send_length;
350 if ((netif_msg_tx_queued(ei_local)) &&
351 ei_local->tx2 > 0)
352 netdev_dbg(dev,
353 "idle transmitter tx2=%d, lasttx=%d, txing=%d\n",
354 ei_local->tx2, ei_local->lasttx, ei_local->txing);
355 } else if (ei_local->tx2 == 0) {
356 output_page = ei_local->tx_start_page + TX_PAGES/2;
357 ei_local->tx2 = send_length;
358 if ((netif_msg_tx_queued(ei_local)) &&
359 ei_local->tx1 > 0)
360 netdev_dbg(dev,
361 "idle transmitter, tx1=%d, lasttx=%d, txing=%d\n",
362 ei_local->tx1, ei_local->lasttx, ei_local->txing);
363 } else { /* We should never get here. */
364 netif_dbg(ei_local, tx_err, dev,
365 "No Tx buffers free! tx1=%d tx2=%d last=%d\n",
366 ei_local->tx1, ei_local->tx2, ei_local->lasttx);
367 ei_local->irqlock = 0;
368 netif_stop_queue(dev);
369 ei_outb_p(ENISR_ALL, e8390_base + EN0_IMR);
370 spin_unlock(&ei_local->page_lock);
371 enable_irq_lockdep_irqrestore(dev->irq, &flags);
372 dev->stats.tx_errors++;
373 return NETDEV_TX_BUSY;
374 }
375
376 /*
377 * Okay, now upload the packet and trigger a send if the transmitter
378 * isn't already sending. If it is busy, the interrupt handler will
379 * trigger the send later, upon receiving a Tx done interrupt.
380 */
381
382 ei_block_output(dev, send_length, data, output_page);
383
384 if (!ei_local->txing) {
385 ei_local->txing = 1;
386 NS8390_trigger_send(dev, send_length, output_page);
387 if (output_page == ei_local->tx_start_page) {
388 ei_local->tx1 = -1;
389 ei_local->lasttx = -1;
390 } else {
391 ei_local->tx2 = -1;
392 ei_local->lasttx = -2;
393 }
394 } else
395 ei_local->txqueue++;
396
397 if (ei_local->tx1 && ei_local->tx2)
398 netif_stop_queue(dev);
399 else
400 netif_start_queue(dev);
401
402 /* Turn 8390 interrupts back on. */
403 ei_local->irqlock = 0;
404 ei_outb_p(ENISR_ALL, e8390_base + EN0_IMR);
405
406 spin_unlock(&ei_local->page_lock);
407 enable_irq_lockdep_irqrestore(dev->irq, &flags);
408 skb_tx_timestamp(skb);
409 dev_consume_skb_any(skb);
410 dev->stats.tx_bytes += send_length;
411
412 return NETDEV_TX_OK;
413}
414
415/**
416 * ei_interrupt - handle the interrupts from an 8390
417 * @irq: interrupt number
418 * @dev_id: a pointer to the net_device
419 *
420 * Handle the ether interface interrupts. We pull packets from
421 * the 8390 via the card specific functions and fire them at the networking
422 * stack. We also handle transmit completions and wake the transmit path if
423 * necessary. We also update the counters and do other housekeeping as
424 * needed.
425 */
426
427static irqreturn_t __ei_interrupt(int irq, void *dev_id)
428{
429 struct net_device *dev = dev_id;
430 unsigned long e8390_base = dev->base_addr;
431 int interrupts, nr_serviced = 0;
432 struct ei_device *ei_local = netdev_priv(dev);
433
434 /*
435 * Protect the irq test too.
436 */
437
438 spin_lock(&ei_local->page_lock);
439
440 if (ei_local->irqlock) {
441 /*
442 * This might just be an interrupt for a PCI device sharing
443 * this line
444 */
445 netdev_err(dev, "Interrupted while interrupts are masked! isr=%#2x imr=%#2x\n",
446 ei_inb_p(e8390_base + EN0_ISR),
447 ei_inb_p(e8390_base + EN0_IMR));
448 spin_unlock(&ei_local->page_lock);
449 return IRQ_NONE;
450 }
451
452 /* Change to page 0 and read the intr status reg. */
453 ei_outb_p(E8390_NODMA+E8390_PAGE0, e8390_base + E8390_CMD);
454 netif_dbg(ei_local, intr, dev, "interrupt(isr=%#2.2x)\n",
455 ei_inb_p(e8390_base + EN0_ISR));
456
457 /* !!Assumption!! -- we stay in page 0. Don't break this. */
458 while ((interrupts = ei_inb_p(e8390_base + EN0_ISR)) != 0 &&
459 ++nr_serviced < MAX_SERVICE) {
460 if (!netif_running(dev)) {
461 netdev_warn(dev, "interrupt from stopped card\n");
462 /* rmk - acknowledge the interrupts */
463 ei_outb_p(interrupts, e8390_base + EN0_ISR);
464 interrupts = 0;
465 break;
466 }
467 if (interrupts & ENISR_OVER)
468 ei_rx_overrun(dev);
469 else if (interrupts & (ENISR_RX+ENISR_RX_ERR)) {
470 /* Got a good (?) packet. */
471 ei_receive(dev);
472 }
473 /* Push the next to-transmit packet through. */
474 if (interrupts & ENISR_TX)
475 ei_tx_intr(dev);
476 else if (interrupts & ENISR_TX_ERR)
477 ei_tx_err(dev);
478
479 if (interrupts & ENISR_COUNTERS) {
480 dev->stats.rx_frame_errors += ei_inb_p(e8390_base + EN0_COUNTER0);
481 dev->stats.rx_crc_errors += ei_inb_p(e8390_base + EN0_COUNTER1);
482 dev->stats.rx_missed_errors += ei_inb_p(e8390_base + EN0_COUNTER2);
483 ei_outb_p(ENISR_COUNTERS, e8390_base + EN0_ISR); /* Ack intr. */
484 }
485
486 /* Ignore any RDC interrupts that make it back to here. */
487 if (interrupts & ENISR_RDC)
488 ei_outb_p(ENISR_RDC, e8390_base + EN0_ISR);
489
490 ei_outb_p(E8390_NODMA+E8390_PAGE0+E8390_START, e8390_base + E8390_CMD);
491 }
492
493 if (interrupts && (netif_msg_intr(ei_local))) {
494 ei_outb_p(E8390_NODMA+E8390_PAGE0+E8390_START, e8390_base + E8390_CMD);
495 if (nr_serviced >= MAX_SERVICE) {
496 /* 0xFF is valid for a card removal */
497 if (interrupts != 0xFF)
498 netdev_warn(dev, "Too much work at interrupt, status %#2.2x\n",
499 interrupts);
500 ei_outb_p(ENISR_ALL, e8390_base + EN0_ISR); /* Ack. most intrs. */
501 } else {
502 netdev_warn(dev, "unknown interrupt %#2x\n", interrupts);
503 ei_outb_p(0xff, e8390_base + EN0_ISR); /* Ack. all intrs. */
504 }
505 }
506 spin_unlock(&ei_local->page_lock);
507 return IRQ_RETVAL(nr_serviced > 0);
508}
509
510#ifdef CONFIG_NET_POLL_CONTROLLER
511static void __ei_poll(struct net_device *dev)
512{
513 disable_irq(dev->irq);
514 __ei_interrupt(dev->irq, dev);
515 enable_irq(dev->irq);
516}
517#endif
518
519/**
520 * ei_tx_err - handle transmitter error
521 * @dev: network device which threw the exception
522 *
523 * A transmitter error has happened. Most likely excess collisions (which
524 * is a fairly normal condition). If the error is one where the Tx will
525 * have been aborted, we try and send another one right away, instead of
526 * letting the failed packet sit and collect dust in the Tx buffer. This
527 * is a much better solution as it avoids kernel based Tx timeouts, and
528 * an unnecessary card reset.
529 *
530 * Called with lock held.
531 */
532
533static void ei_tx_err(struct net_device *dev)
534{
535 unsigned long e8390_base = dev->base_addr;
536 /* ei_local is used on some platforms via the EI_SHIFT macro */
537 struct ei_device *ei_local __maybe_unused = netdev_priv(dev);
538 unsigned char txsr = ei_inb_p(e8390_base+EN0_TSR);
539 unsigned char tx_was_aborted = txsr & (ENTSR_ABT+ENTSR_FU);
540
541#ifdef VERBOSE_ERROR_DUMP
542 netdev_dbg(dev, "transmitter error (%#2x):", txsr);
543 if (txsr & ENTSR_ABT)
544 pr_cont(" excess-collisions ");
545 if (txsr & ENTSR_ND)
546 pr_cont(" non-deferral ");
547 if (txsr & ENTSR_CRS)
548 pr_cont(" lost-carrier ");
549 if (txsr & ENTSR_FU)
550 pr_cont(" FIFO-underrun ");
551 if (txsr & ENTSR_CDH)
552 pr_cont(" lost-heartbeat ");
553 pr_cont("\n");
554#endif
555
556 ei_outb_p(ENISR_TX_ERR, e8390_base + EN0_ISR); /* Ack intr. */
557
558 if (tx_was_aborted)
559 ei_tx_intr(dev);
560 else {
561 dev->stats.tx_errors++;
562 if (txsr & ENTSR_CRS)
563 dev->stats.tx_carrier_errors++;
564 if (txsr & ENTSR_CDH)
565 dev->stats.tx_heartbeat_errors++;
566 if (txsr & ENTSR_OWC)
567 dev->stats.tx_window_errors++;
568 }
569}
570
571/**
572 * ei_tx_intr - transmit interrupt handler
573 * @dev: network device for which tx intr is handled
574 *
575 * We have finished a transmit: check for errors and then trigger the next
576 * packet to be sent. Called with lock held.
577 */
578
579static void ei_tx_intr(struct net_device *dev)
580{
581 unsigned long e8390_base = dev->base_addr;
582 struct ei_device *ei_local = netdev_priv(dev);
583 int status = ei_inb(e8390_base + EN0_TSR);
584
585 ei_outb_p(ENISR_TX, e8390_base + EN0_ISR); /* Ack intr. */
586
587 /*
588 * There are two Tx buffers, see which one finished, and trigger
589 * the send of another one if it exists.
590 */
591 ei_local->txqueue--;
592
593 if (ei_local->tx1 < 0) {
594 if (ei_local->lasttx != 1 && ei_local->lasttx != -1)
595 pr_err("%s: bogus last_tx_buffer %d, tx1=%d\n",
596 ei_local->name, ei_local->lasttx, ei_local->tx1);
597 ei_local->tx1 = 0;
598 if (ei_local->tx2 > 0) {
599 ei_local->txing = 1;
600 NS8390_trigger_send(dev, ei_local->tx2, ei_local->tx_start_page + 6);
601 netif_trans_update(dev);
602 ei_local->tx2 = -1;
603 ei_local->lasttx = 2;
604 } else {
605 ei_local->lasttx = 20;
606 ei_local->txing = 0;
607 }
608 } else if (ei_local->tx2 < 0) {
609 if (ei_local->lasttx != 2 && ei_local->lasttx != -2)
610 pr_err("%s: bogus last_tx_buffer %d, tx2=%d\n",
611 ei_local->name, ei_local->lasttx, ei_local->tx2);
612 ei_local->tx2 = 0;
613 if (ei_local->tx1 > 0) {
614 ei_local->txing = 1;
615 NS8390_trigger_send(dev, ei_local->tx1, ei_local->tx_start_page);
616 netif_trans_update(dev);
617 ei_local->tx1 = -1;
618 ei_local->lasttx = 1;
619 } else {
620 ei_local->lasttx = 10;
621 ei_local->txing = 0;
622 }
623 } /* else
624 netdev_warn(dev, "unexpected TX-done interrupt, lasttx=%d\n",
625 ei_local->lasttx);
626*/
627
628 /* Minimize Tx latency: update the statistics after we restart TXing. */
629 if (status & ENTSR_COL)
630 dev->stats.collisions++;
631 if (status & ENTSR_PTX)
632 dev->stats.tx_packets++;
633 else {
634 dev->stats.tx_errors++;
635 if (status & ENTSR_ABT) {
636 dev->stats.tx_aborted_errors++;
637 dev->stats.collisions += 16;
638 }
639 if (status & ENTSR_CRS)
640 dev->stats.tx_carrier_errors++;
641 if (status & ENTSR_FU)
642 dev->stats.tx_fifo_errors++;
643 if (status & ENTSR_CDH)
644 dev->stats.tx_heartbeat_errors++;
645 if (status & ENTSR_OWC)
646 dev->stats.tx_window_errors++;
647 }
648 netif_wake_queue(dev);
649}
650
651/**
652 * ei_receive - receive some packets
653 * @dev: network device with which receive will be run
654 *
655 * We have a good packet(s), get it/them out of the buffers.
656 * Called with lock held.
657 */
658
659static void ei_receive(struct net_device *dev)
660{
661 unsigned long e8390_base = dev->base_addr;
662 struct ei_device *ei_local = netdev_priv(dev);
663 unsigned char rxing_page, this_frame, next_frame;
664 unsigned short current_offset;
665 int rx_pkt_count = 0;
666 struct e8390_pkt_hdr rx_frame;
667 int num_rx_pages = ei_local->stop_page-ei_local->rx_start_page;
668
669 while (++rx_pkt_count < 10) {
670 int pkt_len, pkt_stat;
671
672 /* Get the rx page (incoming packet pointer). */
673 ei_outb_p(E8390_NODMA+E8390_PAGE1, e8390_base + E8390_CMD);
674 rxing_page = ei_inb_p(e8390_base + EN1_CURPAG);
675 ei_outb_p(E8390_NODMA+E8390_PAGE0, e8390_base + E8390_CMD);
676
677 /* Remove one frame from the ring. Boundary is always a page behind. */
678 this_frame = ei_inb_p(e8390_base + EN0_BOUNDARY) + 1;
679 if (this_frame >= ei_local->stop_page)
680 this_frame = ei_local->rx_start_page;
681
682 /* Someday we'll omit the previous, iff we never get this message.
683 (There is at least one clone claimed to have a problem.)
684
685 Keep quiet if it looks like a card removal. One problem here
686 is that some clones crash in roughly the same way.
687 */
688 if ((netif_msg_rx_status(ei_local)) &&
689 this_frame != ei_local->current_page &&
690 (this_frame != 0x0 || rxing_page != 0xFF))
691 netdev_err(dev,
692 "mismatched read page pointers %2x vs %2x\n",
693 this_frame, ei_local->current_page);
694
695 if (this_frame == rxing_page) /* Read all the frames? */
696 break; /* Done for now */
697
698 current_offset = this_frame << 8;
699 ei_get_8390_hdr(dev, &rx_frame, this_frame);
700
701 pkt_len = rx_frame.count - sizeof(struct e8390_pkt_hdr);
702 pkt_stat = rx_frame.status;
703
704 next_frame = this_frame + 1 + ((pkt_len+4)>>8);
705
706 /* Check for bogosity warned by 3c503 book: the status byte is never
707 written. This happened a lot during testing! This code should be
708 cleaned up someday. */
709 if (rx_frame.next != next_frame &&
710 rx_frame.next != next_frame + 1 &&
711 rx_frame.next != next_frame - num_rx_pages &&
712 rx_frame.next != next_frame + 1 - num_rx_pages) {
713 ei_local->current_page = rxing_page;
714 ei_outb(ei_local->current_page-1, e8390_base+EN0_BOUNDARY);
715 dev->stats.rx_errors++;
716 continue;
717 }
718
719 if (pkt_len < 60 || pkt_len > 1518) {
720 netif_dbg(ei_local, rx_status, dev,
721 "bogus packet size: %d, status=%#2x nxpg=%#2x\n",
722 rx_frame.count, rx_frame.status,
723 rx_frame.next);
724 dev->stats.rx_errors++;
725 dev->stats.rx_length_errors++;
726 } else if ((pkt_stat & 0x0F) == ENRSR_RXOK) {
727 struct sk_buff *skb;
728
729 skb = netdev_alloc_skb(dev, pkt_len + 2);
730 if (skb == NULL) {
731 netif_err(ei_local, rx_err, dev,
732 "Couldn't allocate a sk_buff of size %d\n",
733 pkt_len);
734 dev->stats.rx_dropped++;
735 break;
736 } else {
737 skb_reserve(skb, 2); /* IP headers on 16 byte boundaries */
738 skb_put(skb, pkt_len); /* Make room */
739 ei_block_input(dev, pkt_len, skb, current_offset + sizeof(rx_frame));
740 skb->protocol = eth_type_trans(skb, dev);
741 if (!skb_defer_rx_timestamp(skb))
742 netif_rx(skb);
743 dev->stats.rx_packets++;
744 dev->stats.rx_bytes += pkt_len;
745 if (pkt_stat & ENRSR_PHY)
746 dev->stats.multicast++;
747 }
748 } else {
749 netif_err(ei_local, rx_err, dev,
750 "bogus packet: status=%#2x nxpg=%#2x size=%d\n",
751 rx_frame.status, rx_frame.next,
752 rx_frame.count);
753 dev->stats.rx_errors++;
754 /* NB: The NIC counts CRC, frame and missed errors. */
755 if (pkt_stat & ENRSR_FO)
756 dev->stats.rx_fifo_errors++;
757 }
758 next_frame = rx_frame.next;
759
760 /* This _should_ never happen: it's here for avoiding bad clones. */
761 if (next_frame >= ei_local->stop_page) {
762 netdev_notice(dev, "next frame inconsistency, %#2x\n",
763 next_frame);
764 next_frame = ei_local->rx_start_page;
765 }
766 ei_local->current_page = next_frame;
767 ei_outb_p(next_frame-1, e8390_base+EN0_BOUNDARY);
768 }
769
770 /* We used to also ack ENISR_OVER here, but that would sometimes mask
771 a real overrun, leaving the 8390 in a stopped state with rec'vr off. */
772 ei_outb_p(ENISR_RX+ENISR_RX_ERR, e8390_base+EN0_ISR);
773}
774
775/**
776 * ei_rx_overrun - handle receiver overrun
777 * @dev: network device which threw exception
778 *
779 * We have a receiver overrun: we have to kick the 8390 to get it started
780 * again. Problem is that you have to kick it exactly as NS prescribes in
781 * the updated datasheets, or "the NIC may act in an unpredictable manner."
782 * This includes causing "the NIC to defer indefinitely when it is stopped
783 * on a busy network." Ugh.
784 * Called with lock held. Don't call this with the interrupts off or your
785 * computer will hate you - it takes 10ms or so.
786 */
787
788static void ei_rx_overrun(struct net_device *dev)
789{
790 unsigned long e8390_base = dev->base_addr;
791 unsigned char was_txing, must_resend = 0;
792 /* ei_local is used on some platforms via the EI_SHIFT macro */
793 struct ei_device *ei_local __maybe_unused = netdev_priv(dev);
794
795 /*
796 * Record whether a Tx was in progress and then issue the
797 * stop command.
798 */
799 was_txing = ei_inb_p(e8390_base+E8390_CMD) & E8390_TRANS;
800 ei_outb_p(E8390_NODMA+E8390_PAGE0+E8390_STOP, e8390_base+E8390_CMD);
801
802 netif_dbg(ei_local, rx_err, dev, "Receiver overrun\n");
803 dev->stats.rx_over_errors++;
804
805 /*
806 * Wait a full Tx time (1.2ms) + some guard time, NS says 1.6ms total.
807 * Early datasheets said to poll the reset bit, but now they say that
808 * it "is not a reliable indicator and subsequently should be ignored."
809 * We wait at least 10ms.
810 */
811
812 mdelay(10);
813
814 /*
815 * Reset RBCR[01] back to zero as per magic incantation.
816 */
817 ei_outb_p(0x00, e8390_base+EN0_RCNTLO);
818 ei_outb_p(0x00, e8390_base+EN0_RCNTHI);
819
820 /*
821 * See if any Tx was interrupted or not. According to NS, this
822 * step is vital, and skipping it will cause no end of havoc.
823 */
824
825 if (was_txing) {
826 unsigned char tx_completed = ei_inb_p(e8390_base+EN0_ISR) & (ENISR_TX+ENISR_TX_ERR);
827 if (!tx_completed)
828 must_resend = 1;
829 }
830
831 /*
832 * Have to enter loopback mode and then restart the NIC before
833 * you are allowed to slurp packets up off the ring.
834 */
835 ei_outb_p(E8390_TXOFF, e8390_base + EN0_TXCR);
836 ei_outb_p(E8390_NODMA + E8390_PAGE0 + E8390_START, e8390_base + E8390_CMD);
837
838 /*
839 * Clear the Rx ring of all the debris, and ack the interrupt.
840 */
841 ei_receive(dev);
842 ei_outb_p(ENISR_OVER, e8390_base+EN0_ISR);
843
844 /*
845 * Leave loopback mode, and resend any packet that got stopped.
846 */
847 ei_outb_p(E8390_TXCONFIG, e8390_base + EN0_TXCR);
848 if (must_resend)
849 ei_outb_p(E8390_NODMA + E8390_PAGE0 + E8390_START + E8390_TRANS, e8390_base + E8390_CMD);
850}
851
852/*
853 * Collect the stats. This is called unlocked and from several contexts.
854 */
855
856static struct net_device_stats *__ei_get_stats(struct net_device *dev)
857{
858 unsigned long ioaddr = dev->base_addr;
859 struct ei_device *ei_local = netdev_priv(dev);
860 unsigned long flags;
861
862 /* If the card is stopped, just return the present stats. */
863 if (!netif_running(dev))
864 return &dev->stats;
865
866 spin_lock_irqsave(&ei_local->page_lock, flags);
867 /* Read the counter registers, assuming we are in page 0. */
868 dev->stats.rx_frame_errors += ei_inb_p(ioaddr + EN0_COUNTER0);
869 dev->stats.rx_crc_errors += ei_inb_p(ioaddr + EN0_COUNTER1);
870 dev->stats.rx_missed_errors += ei_inb_p(ioaddr + EN0_COUNTER2);
871 spin_unlock_irqrestore(&ei_local->page_lock, flags);
872
873 return &dev->stats;
874}
875
876/*
877 * Form the 64 bit 8390 multicast table from the linked list of addresses
878 * associated with this dev structure.
879 */
880
881static inline void make_mc_bits(u8 *bits, struct net_device *dev)
882{
883 struct netdev_hw_addr *ha;
884
885 netdev_for_each_mc_addr(ha, dev) {
886 u32 crc = ether_crc(ETH_ALEN, ha->addr);
887 /*
888 * The 8390 uses the 6 most significant bits of the
889 * CRC to index the multicast table.
890 */
891 bits[crc>>29] |= (1<<((crc>>26)&7));
892 }
893}
894
895/**
896 * do_set_multicast_list - set/clear multicast filter
897 * @dev: net device for which multicast filter is adjusted
898 *
899 * Set or clear the multicast filter for this adaptor. May be called
900 * from a BH in 2.1.x. Must be called with lock held.
901 */
902
903static void do_set_multicast_list(struct net_device *dev)
904{
905 unsigned long e8390_base = dev->base_addr;
906 int i;
907 struct ei_device *ei_local = netdev_priv(dev);
908
909 if (!(dev->flags&(IFF_PROMISC|IFF_ALLMULTI))) {
910 memset(ei_local->mcfilter, 0, 8);
911 if (!netdev_mc_empty(dev))
912 make_mc_bits(ei_local->mcfilter, dev);
913 } else
914 memset(ei_local->mcfilter, 0xFF, 8); /* mcast set to accept-all */
915
916 /*
917 * DP8390 manuals don't specify any magic sequence for altering
918 * the multicast regs on an already running card. To be safe, we
919 * ensure multicast mode is off prior to loading up the new hash
920 * table. If this proves to be not enough, we can always resort
921 * to stopping the NIC, loading the table and then restarting.
922 *
923 * Bug Alert! The MC regs on the SMC 83C690 (SMC Elite and SMC
924 * Elite16) appear to be write-only. The NS 8390 data sheet lists
925 * them as r/w so this is a bug. The SMC 83C790 (SMC Ultra and
926 * Ultra32 EISA) appears to have this bug fixed.
927 */
928
929 if (netif_running(dev))
930 ei_outb_p(E8390_RXCONFIG, e8390_base + EN0_RXCR);
931 ei_outb_p(E8390_NODMA + E8390_PAGE1, e8390_base + E8390_CMD);
932 for (i = 0; i < 8; i++) {
933 ei_outb_p(ei_local->mcfilter[i], e8390_base + EN1_MULT_SHIFT(i));
934#ifndef BUG_83C690
935 if (ei_inb_p(e8390_base + EN1_MULT_SHIFT(i)) != ei_local->mcfilter[i])
936 netdev_err(dev, "Multicast filter read/write mismap %d\n",
937 i);
938#endif
939 }
940 ei_outb_p(E8390_NODMA + E8390_PAGE0, e8390_base + E8390_CMD);
941
942 if (dev->flags&IFF_PROMISC)
943 ei_outb_p(E8390_RXCONFIG | 0x18, e8390_base + EN0_RXCR);
944 else if (dev->flags & IFF_ALLMULTI || !netdev_mc_empty(dev))
945 ei_outb_p(E8390_RXCONFIG | 0x08, e8390_base + EN0_RXCR);
946 else
947 ei_outb_p(E8390_RXCONFIG, e8390_base + EN0_RXCR);
948}
949
950/*
951 * Called without lock held. This is invoked from user context and may
952 * be parallel to just about everything else. Its also fairly quick and
953 * not called too often. Must protect against both bh and irq users
954 */
955
956static void __ei_set_multicast_list(struct net_device *dev)
957{
958 unsigned long flags;
959 struct ei_device *ei_local = netdev_priv(dev);
960
961 spin_lock_irqsave(&ei_local->page_lock, flags);
962 do_set_multicast_list(dev);
963 spin_unlock_irqrestore(&ei_local->page_lock, flags);
964}
965
966/**
967 * ethdev_setup - init rest of 8390 device struct
968 * @dev: network device structure to init
969 *
970 * Initialize the rest of the 8390 device structure. Do NOT __init
971 * this, as it is used by 8390 based modular drivers too.
972 */
973
974static void ethdev_setup(struct net_device *dev)
975{
976 struct ei_device *ei_local = netdev_priv(dev);
977
978 ether_setup(dev);
979
980 spin_lock_init(&ei_local->page_lock);
981
982 ei_local->msg_enable = netif_msg_init(msg_enable, default_msg_level);
983
984 if (netif_msg_drv(ei_local) && (version_printed++ == 0))
985 pr_info("%s", version);
986}
987
988/**
989 * alloc_ei_netdev - alloc_etherdev counterpart for 8390
990 * @size: extra bytes to allocate
991 *
992 * Allocate 8390-specific net_device.
993 */
994static struct net_device *____alloc_ei_netdev(int size)
995{
996 return alloc_netdev(sizeof(struct ei_device) + size, "eth%d",
997 NET_NAME_UNKNOWN, ethdev_setup);
998}
999
1000
1001
1002
1003/* This page of functions should be 8390 generic */
1004/* Follow National Semi's recommendations for initializing the "NIC". */
1005
1006/**
1007 * NS8390_init - initialize 8390 hardware
1008 * @dev: network device to initialize
1009 * @startp: boolean. non-zero value to initiate chip processing
1010 *
1011 * Must be called with lock held.
1012 */
1013
1014static void __NS8390_init(struct net_device *dev, int startp)
1015{
1016 unsigned long e8390_base = dev->base_addr;
1017 struct ei_device *ei_local = netdev_priv(dev);
1018 int i;
1019 int endcfg = ei_local->word16
1020 ? (0x48 | ENDCFG_WTS | (ei_local->bigendian ? ENDCFG_BOS : 0))
1021 : 0x48;
1022
1023 BUILD_BUG_ON(sizeof(struct e8390_pkt_hdr) != 4);
1024 /* Follow National Semi's recommendations for initing the DP83902. */
1025 ei_outb_p(E8390_NODMA+E8390_PAGE0+E8390_STOP, e8390_base+E8390_CMD); /* 0x21 */
1026 ei_outb_p(endcfg, e8390_base + EN0_DCFG); /* 0x48 or 0x49 */
1027 /* Clear the remote byte count registers. */
1028 ei_outb_p(0x00, e8390_base + EN0_RCNTLO);
1029 ei_outb_p(0x00, e8390_base + EN0_RCNTHI);
1030 /* Set to monitor and loopback mode -- this is vital!. */
1031 ei_outb_p(E8390_RXOFF, e8390_base + EN0_RXCR); /* 0x20 */
1032 ei_outb_p(E8390_TXOFF, e8390_base + EN0_TXCR); /* 0x02 */
1033 /* Set the transmit page and receive ring. */
1034 ei_outb_p(ei_local->tx_start_page, e8390_base + EN0_TPSR);
1035 ei_local->tx1 = ei_local->tx2 = 0;
1036 ei_outb_p(ei_local->rx_start_page, e8390_base + EN0_STARTPG);
1037 ei_outb_p(ei_local->stop_page-1, e8390_base + EN0_BOUNDARY); /* 3c503 says 0x3f,NS0x26*/
1038 ei_local->current_page = ei_local->rx_start_page; /* assert boundary+1 */
1039 ei_outb_p(ei_local->stop_page, e8390_base + EN0_STOPPG);
1040 /* Clear the pending interrupts and mask. */
1041 ei_outb_p(0xFF, e8390_base + EN0_ISR);
1042 ei_outb_p(0x00, e8390_base + EN0_IMR);
1043
1044 /* Copy the station address into the DS8390 registers. */
1045
1046 ei_outb_p(E8390_NODMA + E8390_PAGE1 + E8390_STOP, e8390_base+E8390_CMD); /* 0x61 */
1047 for (i = 0; i < 6; i++) {
1048 ei_outb_p(dev->dev_addr[i], e8390_base + EN1_PHYS_SHIFT(i));
1049 if ((netif_msg_probe(ei_local)) &&
1050 ei_inb_p(e8390_base + EN1_PHYS_SHIFT(i)) != dev->dev_addr[i])
1051 netdev_err(dev,
1052 "Hw. address read/write mismap %d\n", i);
1053 }
1054
1055 ei_outb_p(ei_local->rx_start_page, e8390_base + EN1_CURPAG);
1056 ei_outb_p(E8390_NODMA+E8390_PAGE0+E8390_STOP, e8390_base+E8390_CMD);
1057
1058 ei_local->tx1 = ei_local->tx2 = 0;
1059 ei_local->txing = 0;
1060
1061 if (startp) {
1062 ei_outb_p(0xff, e8390_base + EN0_ISR);
1063 ei_outb_p(ENISR_ALL, e8390_base + EN0_IMR);
1064 ei_outb_p(E8390_NODMA+E8390_PAGE0+E8390_START, e8390_base+E8390_CMD);
1065 ei_outb_p(E8390_TXCONFIG, e8390_base + EN0_TXCR); /* xmit on. */
1066 /* 3c503 TechMan says rxconfig only after the NIC is started. */
1067 ei_outb_p(E8390_RXCONFIG, e8390_base + EN0_RXCR); /* rx on, */
1068 do_set_multicast_list(dev); /* (re)load the mcast table */
1069 }
1070}
1071
1072/* Trigger a transmit start, assuming the length is valid.
1073 Always called with the page lock held */
1074
1075static void NS8390_trigger_send(struct net_device *dev, unsigned int length,
1076 int start_page)
1077{
1078 unsigned long e8390_base = dev->base_addr;
1079 struct ei_device *ei_local __attribute((unused)) = netdev_priv(dev);
1080
1081 ei_outb_p(E8390_NODMA+E8390_PAGE0, e8390_base+E8390_CMD);
1082
1083 if (ei_inb_p(e8390_base + E8390_CMD) & E8390_TRANS) {
1084 netdev_warn(dev, "trigger_send() called with the transmitter busy\n");
1085 return;
1086 }
1087 ei_outb_p(length & 0xff, e8390_base + EN0_TCNTLO);
1088 ei_outb_p(length >> 8, e8390_base + EN0_TCNTHI);
1089 ei_outb_p(start_page, e8390_base + EN0_TPSR);
1090 ei_outb_p(E8390_NODMA+E8390_TRANS+E8390_START, e8390_base+E8390_CMD);
1091}
1/* 8390.c: A general NS8390 ethernet driver core for linux. */
2/*
3 Written 1992-94 by Donald Becker.
4
5 Copyright 1993 United States Government as represented by the
6 Director, National Security Agency.
7
8 This software may be used and distributed according to the terms
9 of the GNU General Public License, incorporated herein by reference.
10
11 The author may be reached as becker@scyld.com, or C/O
12 Scyld Computing Corporation
13 410 Severn Ave., Suite 210
14 Annapolis MD 21403
15
16
17 This is the chip-specific code for many 8390-based ethernet adaptors.
18 This is not a complete driver, it must be combined with board-specific
19 code such as ne.c, wd.c, 3c503.c, etc.
20
21 Seeing how at least eight drivers use this code, (not counting the
22 PCMCIA ones either) it is easy to break some card by what seems like
23 a simple innocent change. Please contact me or Donald if you think
24 you have found something that needs changing. -- PG
25
26
27 Changelog:
28
29 Paul Gortmaker : remove set_bit lock, other cleanups.
30 Paul Gortmaker : add ei_get_8390_hdr() so we can pass skb's to
31 ei_block_input() for eth_io_copy_and_sum().
32 Paul Gortmaker : exchange static int ei_pingpong for a #define,
33 also add better Tx error handling.
34 Paul Gortmaker : rewrite Rx overrun handling as per NS specs.
35 Alexey Kuznetsov : use the 8390's six bit hash multicast filter.
36 Paul Gortmaker : tweak ANK's above multicast changes a bit.
37 Paul Gortmaker : update packet statistics for v2.1.x
38 Alan Cox : support arbitrary stupid port mappings on the
39 68K Macintosh. Support >16bit I/O spaces
40 Paul Gortmaker : add kmod support for auto-loading of the 8390
41 module by all drivers that require it.
42 Alan Cox : Spinlocking work, added 'BUG_83C690'
43 Paul Gortmaker : Separate out Tx timeout code from Tx path.
44 Paul Gortmaker : Remove old unused single Tx buffer code.
45 Hayato Fujiwara : Add m32r support.
46 Paul Gortmaker : use skb_padto() instead of stack scratch area
47
48 Sources:
49 The National Semiconductor LAN Databook, and the 3Com 3c503 databook.
50
51 */
52
53#include <linux/module.h>
54#include <linux/kernel.h>
55#include <linux/jiffies.h>
56#include <linux/fs.h>
57#include <linux/types.h>
58#include <linux/string.h>
59#include <linux/bitops.h>
60#include <linux/uaccess.h>
61#include <linux/io.h>
62#include <asm/irq.h>
63#include <linux/delay.h>
64#include <linux/errno.h>
65#include <linux/fcntl.h>
66#include <linux/in.h>
67#include <linux/interrupt.h>
68#include <linux/init.h>
69#include <linux/crc32.h>
70
71#include <linux/netdevice.h>
72#include <linux/etherdevice.h>
73
74#define NS8390_CORE
75#include "8390.h"
76
77#define BUG_83C690
78
79/* These are the operational function interfaces to board-specific
80 routines.
81 void reset_8390(struct net_device *dev)
82 Resets the board associated with DEV, including a hardware reset of
83 the 8390. This is only called when there is a transmit timeout, and
84 it is always followed by 8390_init().
85 void block_output(struct net_device *dev, int count, const unsigned char *buf,
86 int start_page)
87 Write the COUNT bytes of BUF to the packet buffer at START_PAGE. The
88 "page" value uses the 8390's 256-byte pages.
89 void get_8390_hdr(struct net_device *dev, struct e8390_hdr *hdr, int ring_page)
90 Read the 4 byte, page aligned 8390 header. *If* there is a
91 subsequent read, it will be of the rest of the packet.
92 void block_input(struct net_device *dev, int count, struct sk_buff *skb, int ring_offset)
93 Read COUNT bytes from the packet buffer into the skb data area. Start
94 reading from RING_OFFSET, the address as the 8390 sees it. This will always
95 follow the read of the 8390 header.
96*/
97#define ei_reset_8390 (ei_local->reset_8390)
98#define ei_block_output (ei_local->block_output)
99#define ei_block_input (ei_local->block_input)
100#define ei_get_8390_hdr (ei_local->get_8390_hdr)
101
102/* use 0 for production, 1 for verification, >2 for debug */
103#ifndef ei_debug
104int ei_debug = 1;
105#endif
106
107/* Index to functions. */
108static void ei_tx_intr(struct net_device *dev);
109static void ei_tx_err(struct net_device *dev);
110static void ei_receive(struct net_device *dev);
111static void ei_rx_overrun(struct net_device *dev);
112
113/* Routines generic to NS8390-based boards. */
114static void NS8390_trigger_send(struct net_device *dev, unsigned int length,
115 int start_page);
116static void do_set_multicast_list(struct net_device *dev);
117static void __NS8390_init(struct net_device *dev, int startp);
118
119/*
120 * SMP and the 8390 setup.
121 *
122 * The 8390 isn't exactly designed to be multithreaded on RX/TX. There is
123 * a page register that controls bank and packet buffer access. We guard
124 * this with ei_local->page_lock. Nobody should assume or set the page other
125 * than zero when the lock is not held. Lock holders must restore page 0
126 * before unlocking. Even pure readers must take the lock to protect in
127 * page 0.
128 *
129 * To make life difficult the chip can also be very slow. We therefore can't
130 * just use spinlocks. For the longer lockups we disable the irq the device
131 * sits on and hold the lock. We must hold the lock because there is a dual
132 * processor case other than interrupts (get stats/set multicast list in
133 * parallel with each other and transmit).
134 *
135 * Note: in theory we can just disable the irq on the card _but_ there is
136 * a latency on SMP irq delivery. So we can easily go "disable irq" "sync irqs"
137 * enter lock, take the queued irq. So we waddle instead of flying.
138 *
139 * Finally by special arrangement for the purpose of being generally
140 * annoying the transmit function is called bh atomic. That places
141 * restrictions on the user context callers as disable_irq won't save
142 * them.
143 *
144 * Additional explanation of problems with locking by Alan Cox:
145 *
146 * "The author (me) didn't use spin_lock_irqsave because the slowness of the
147 * card means that approach caused horrible problems like losing serial data
148 * at 38400 baud on some chips. Remember many 8390 nics on PCI were ISA
149 * chips with FPGA front ends.
150 *
151 * Ok the logic behind the 8390 is very simple:
152 *
153 * Things to know
154 * - IRQ delivery is asynchronous to the PCI bus
155 * - Blocking the local CPU IRQ via spin locks was too slow
156 * - The chip has register windows needing locking work
157 *
158 * So the path was once (I say once as people appear to have changed it
159 * in the mean time and it now looks rather bogus if the changes to use
160 * disable_irq_nosync_irqsave are disabling the local IRQ)
161 *
162 *
163 * Take the page lock
164 * Mask the IRQ on chip
165 * Disable the IRQ (but not mask locally- someone seems to have
166 * broken this with the lock validator stuff)
167 * [This must be _nosync as the page lock may otherwise
168 * deadlock us]
169 * Drop the page lock and turn IRQs back on
170 *
171 * At this point an existing IRQ may still be running but we can't
172 * get a new one
173 *
174 * Take the lock (so we know the IRQ has terminated) but don't mask
175 * the IRQs on the processor
176 * Set irqlock [for debug]
177 *
178 * Transmit (slow as ****)
179 *
180 * re-enable the IRQ
181 *
182 *
183 * We have to use disable_irq because otherwise you will get delayed
184 * interrupts on the APIC bus deadlocking the transmit path.
185 *
186 * Quite hairy but the chip simply wasn't designed for SMP and you can't
187 * even ACK an interrupt without risking corrupting other parallel
188 * activities on the chip." [lkml, 25 Jul 2007]
189 */
190
191
192
193/**
194 * ei_open - Open/initialize the board.
195 * @dev: network device to initialize
196 *
197 * This routine goes all-out, setting everything
198 * up anew at each open, even though many of these registers should only
199 * need to be set once at boot.
200 */
201static int __ei_open(struct net_device *dev)
202{
203 unsigned long flags;
204 struct ei_device *ei_local = netdev_priv(dev);
205
206 if (dev->watchdog_timeo <= 0)
207 dev->watchdog_timeo = TX_TIMEOUT;
208
209 /*
210 * Grab the page lock so we own the register set, then call
211 * the init function.
212 */
213
214 spin_lock_irqsave(&ei_local->page_lock, flags);
215 __NS8390_init(dev, 1);
216 /* Set the flag before we drop the lock, That way the IRQ arrives
217 after its set and we get no silly warnings */
218 netif_start_queue(dev);
219 spin_unlock_irqrestore(&ei_local->page_lock, flags);
220 ei_local->irqlock = 0;
221 return 0;
222}
223
224/**
225 * ei_close - shut down network device
226 * @dev: network device to close
227 *
228 * Opposite of ei_open(). Only used when "ifconfig <devname> down" is done.
229 */
230static int __ei_close(struct net_device *dev)
231{
232 struct ei_device *ei_local = netdev_priv(dev);
233 unsigned long flags;
234
235 /*
236 * Hold the page lock during close
237 */
238
239 spin_lock_irqsave(&ei_local->page_lock, flags);
240 __NS8390_init(dev, 0);
241 spin_unlock_irqrestore(&ei_local->page_lock, flags);
242 netif_stop_queue(dev);
243 return 0;
244}
245
246/**
247 * ei_tx_timeout - handle transmit time out condition
248 * @dev: network device which has apparently fallen asleep
249 *
250 * Called by kernel when device never acknowledges a transmit has
251 * completed (or failed) - i.e. never posted a Tx related interrupt.
252 */
253
254static void __ei_tx_timeout(struct net_device *dev)
255{
256 unsigned long e8390_base = dev->base_addr;
257 struct ei_device *ei_local = netdev_priv(dev);
258 int txsr, isr, tickssofar = jiffies - dev_trans_start(dev);
259 unsigned long flags;
260
261 dev->stats.tx_errors++;
262
263 spin_lock_irqsave(&ei_local->page_lock, flags);
264 txsr = ei_inb(e8390_base+EN0_TSR);
265 isr = ei_inb(e8390_base+EN0_ISR);
266 spin_unlock_irqrestore(&ei_local->page_lock, flags);
267
268 netdev_dbg(dev, "Tx timed out, %s TSR=%#2x, ISR=%#2x, t=%d\n",
269 (txsr & ENTSR_ABT) ? "excess collisions." :
270 (isr) ? "lost interrupt?" : "cable problem?",
271 txsr, isr, tickssofar);
272
273 if (!isr && !dev->stats.tx_packets) {
274 /* The 8390 probably hasn't gotten on the cable yet. */
275 ei_local->interface_num ^= 1; /* Try a different xcvr. */
276 }
277
278 /* Ugly but a reset can be slow, yet must be protected */
279
280 disable_irq_nosync_lockdep(dev->irq);
281 spin_lock(&ei_local->page_lock);
282
283 /* Try to restart the card. Perhaps the user has fixed something. */
284 ei_reset_8390(dev);
285 __NS8390_init(dev, 1);
286
287 spin_unlock(&ei_local->page_lock);
288 enable_irq_lockdep(dev->irq);
289 netif_wake_queue(dev);
290}
291
292/**
293 * ei_start_xmit - begin packet transmission
294 * @skb: packet to be sent
295 * @dev: network device to which packet is sent
296 *
297 * Sends a packet to an 8390 network device.
298 */
299
300static netdev_tx_t __ei_start_xmit(struct sk_buff *skb,
301 struct net_device *dev)
302{
303 unsigned long e8390_base = dev->base_addr;
304 struct ei_device *ei_local = netdev_priv(dev);
305 int send_length = skb->len, output_page;
306 unsigned long flags;
307 char buf[ETH_ZLEN];
308 char *data = skb->data;
309
310 if (skb->len < ETH_ZLEN) {
311 memset(buf, 0, ETH_ZLEN); /* more efficient than doing just the needed bits */
312 memcpy(buf, data, skb->len);
313 send_length = ETH_ZLEN;
314 data = buf;
315 }
316
317 /* Mask interrupts from the ethercard.
318 SMP: We have to grab the lock here otherwise the IRQ handler
319 on another CPU can flip window and race the IRQ mask set. We end
320 up trashing the mcast filter not disabling irqs if we don't lock */
321
322 spin_lock_irqsave(&ei_local->page_lock, flags);
323 ei_outb_p(0x00, e8390_base + EN0_IMR);
324 spin_unlock_irqrestore(&ei_local->page_lock, flags);
325
326
327 /*
328 * Slow phase with lock held.
329 */
330
331 disable_irq_nosync_lockdep_irqsave(dev->irq, &flags);
332
333 spin_lock(&ei_local->page_lock);
334
335 ei_local->irqlock = 1;
336
337 /*
338 * We have two Tx slots available for use. Find the first free
339 * slot, and then perform some sanity checks. With two Tx bufs,
340 * you get very close to transmitting back-to-back packets. With
341 * only one Tx buf, the transmitter sits idle while you reload the
342 * card, leaving a substantial gap between each transmitted packet.
343 */
344
345 if (ei_local->tx1 == 0) {
346 output_page = ei_local->tx_start_page;
347 ei_local->tx1 = send_length;
348 if (ei_debug && ei_local->tx2 > 0)
349 netdev_dbg(dev, "idle transmitter tx2=%d, lasttx=%d, txing=%d\n",
350 ei_local->tx2, ei_local->lasttx, ei_local->txing);
351 } else if (ei_local->tx2 == 0) {
352 output_page = ei_local->tx_start_page + TX_PAGES/2;
353 ei_local->tx2 = send_length;
354 if (ei_debug && ei_local->tx1 > 0)
355 netdev_dbg(dev, "idle transmitter, tx1=%d, lasttx=%d, txing=%d\n",
356 ei_local->tx1, ei_local->lasttx, ei_local->txing);
357 } else { /* We should never get here. */
358 if (ei_debug)
359 netdev_dbg(dev, "No Tx buffers free! tx1=%d tx2=%d last=%d\n",
360 ei_local->tx1, ei_local->tx2, ei_local->lasttx);
361 ei_local->irqlock = 0;
362 netif_stop_queue(dev);
363 ei_outb_p(ENISR_ALL, e8390_base + EN0_IMR);
364 spin_unlock(&ei_local->page_lock);
365 enable_irq_lockdep_irqrestore(dev->irq, &flags);
366 dev->stats.tx_errors++;
367 return NETDEV_TX_BUSY;
368 }
369
370 /*
371 * Okay, now upload the packet and trigger a send if the transmitter
372 * isn't already sending. If it is busy, the interrupt handler will
373 * trigger the send later, upon receiving a Tx done interrupt.
374 */
375
376 ei_block_output(dev, send_length, data, output_page);
377
378 if (!ei_local->txing) {
379 ei_local->txing = 1;
380 NS8390_trigger_send(dev, send_length, output_page);
381 if (output_page == ei_local->tx_start_page) {
382 ei_local->tx1 = -1;
383 ei_local->lasttx = -1;
384 } else {
385 ei_local->tx2 = -1;
386 ei_local->lasttx = -2;
387 }
388 } else
389 ei_local->txqueue++;
390
391 if (ei_local->tx1 && ei_local->tx2)
392 netif_stop_queue(dev);
393 else
394 netif_start_queue(dev);
395
396 /* Turn 8390 interrupts back on. */
397 ei_local->irqlock = 0;
398 ei_outb_p(ENISR_ALL, e8390_base + EN0_IMR);
399
400 spin_unlock(&ei_local->page_lock);
401 enable_irq_lockdep_irqrestore(dev->irq, &flags);
402 skb_tx_timestamp(skb);
403 dev_kfree_skb(skb);
404 dev->stats.tx_bytes += send_length;
405
406 return NETDEV_TX_OK;
407}
408
409/**
410 * ei_interrupt - handle the interrupts from an 8390
411 * @irq: interrupt number
412 * @dev_id: a pointer to the net_device
413 *
414 * Handle the ether interface interrupts. We pull packets from
415 * the 8390 via the card specific functions and fire them at the networking
416 * stack. We also handle transmit completions and wake the transmit path if
417 * necessary. We also update the counters and do other housekeeping as
418 * needed.
419 */
420
421static irqreturn_t __ei_interrupt(int irq, void *dev_id)
422{
423 struct net_device *dev = dev_id;
424 unsigned long e8390_base = dev->base_addr;
425 int interrupts, nr_serviced = 0;
426 struct ei_device *ei_local = netdev_priv(dev);
427
428 /*
429 * Protect the irq test too.
430 */
431
432 spin_lock(&ei_local->page_lock);
433
434 if (ei_local->irqlock) {
435 /*
436 * This might just be an interrupt for a PCI device sharing
437 * this line
438 */
439 netdev_err(dev, "Interrupted while interrupts are masked! isr=%#2x imr=%#2x\n",
440 ei_inb_p(e8390_base + EN0_ISR),
441 ei_inb_p(e8390_base + EN0_IMR));
442 spin_unlock(&ei_local->page_lock);
443 return IRQ_NONE;
444 }
445
446 /* Change to page 0 and read the intr status reg. */
447 ei_outb_p(E8390_NODMA+E8390_PAGE0, e8390_base + E8390_CMD);
448 if (ei_debug > 3)
449 netdev_dbg(dev, "interrupt(isr=%#2.2x)\n",
450 ei_inb_p(e8390_base + EN0_ISR));
451
452 /* !!Assumption!! -- we stay in page 0. Don't break this. */
453 while ((interrupts = ei_inb_p(e8390_base + EN0_ISR)) != 0 &&
454 ++nr_serviced < MAX_SERVICE) {
455 if (!netif_running(dev)) {
456 netdev_warn(dev, "interrupt from stopped card\n");
457 /* rmk - acknowledge the interrupts */
458 ei_outb_p(interrupts, e8390_base + EN0_ISR);
459 interrupts = 0;
460 break;
461 }
462 if (interrupts & ENISR_OVER)
463 ei_rx_overrun(dev);
464 else if (interrupts & (ENISR_RX+ENISR_RX_ERR)) {
465 /* Got a good (?) packet. */
466 ei_receive(dev);
467 }
468 /* Push the next to-transmit packet through. */
469 if (interrupts & ENISR_TX)
470 ei_tx_intr(dev);
471 else if (interrupts & ENISR_TX_ERR)
472 ei_tx_err(dev);
473
474 if (interrupts & ENISR_COUNTERS) {
475 dev->stats.rx_frame_errors += ei_inb_p(e8390_base + EN0_COUNTER0);
476 dev->stats.rx_crc_errors += ei_inb_p(e8390_base + EN0_COUNTER1);
477 dev->stats.rx_missed_errors += ei_inb_p(e8390_base + EN0_COUNTER2);
478 ei_outb_p(ENISR_COUNTERS, e8390_base + EN0_ISR); /* Ack intr. */
479 }
480
481 /* Ignore any RDC interrupts that make it back to here. */
482 if (interrupts & ENISR_RDC)
483 ei_outb_p(ENISR_RDC, e8390_base + EN0_ISR);
484
485 ei_outb_p(E8390_NODMA+E8390_PAGE0+E8390_START, e8390_base + E8390_CMD);
486 }
487
488 if (interrupts && ei_debug) {
489 ei_outb_p(E8390_NODMA+E8390_PAGE0+E8390_START, e8390_base + E8390_CMD);
490 if (nr_serviced >= MAX_SERVICE) {
491 /* 0xFF is valid for a card removal */
492 if (interrupts != 0xFF)
493 netdev_warn(dev, "Too much work at interrupt, status %#2.2x\n",
494 interrupts);
495 ei_outb_p(ENISR_ALL, e8390_base + EN0_ISR); /* Ack. most intrs. */
496 } else {
497 netdev_warn(dev, "unknown interrupt %#2x\n", interrupts);
498 ei_outb_p(0xff, e8390_base + EN0_ISR); /* Ack. all intrs. */
499 }
500 }
501 spin_unlock(&ei_local->page_lock);
502 return IRQ_RETVAL(nr_serviced > 0);
503}
504
505#ifdef CONFIG_NET_POLL_CONTROLLER
506static void __ei_poll(struct net_device *dev)
507{
508 disable_irq(dev->irq);
509 __ei_interrupt(dev->irq, dev);
510 enable_irq(dev->irq);
511}
512#endif
513
514/**
515 * ei_tx_err - handle transmitter error
516 * @dev: network device which threw the exception
517 *
518 * A transmitter error has happened. Most likely excess collisions (which
519 * is a fairly normal condition). If the error is one where the Tx will
520 * have been aborted, we try and send another one right away, instead of
521 * letting the failed packet sit and collect dust in the Tx buffer. This
522 * is a much better solution as it avoids kernel based Tx timeouts, and
523 * an unnecessary card reset.
524 *
525 * Called with lock held.
526 */
527
528static void ei_tx_err(struct net_device *dev)
529{
530 unsigned long e8390_base = dev->base_addr;
531 /* ei_local is used on some platforms via the EI_SHIFT macro */
532 struct ei_device *ei_local __maybe_unused = netdev_priv(dev);
533 unsigned char txsr = ei_inb_p(e8390_base+EN0_TSR);
534 unsigned char tx_was_aborted = txsr & (ENTSR_ABT+ENTSR_FU);
535
536#ifdef VERBOSE_ERROR_DUMP
537 netdev_dbg(dev, "transmitter error (%#2x):", txsr);
538 if (txsr & ENTSR_ABT)
539 pr_cont(" excess-collisions ");
540 if (txsr & ENTSR_ND)
541 pr_cont(" non-deferral ");
542 if (txsr & ENTSR_CRS)
543 pr_cont(" lost-carrier ");
544 if (txsr & ENTSR_FU)
545 pr_cont(" FIFO-underrun ");
546 if (txsr & ENTSR_CDH)
547 pr_cont(" lost-heartbeat ");
548 pr_cont("\n");
549#endif
550
551 ei_outb_p(ENISR_TX_ERR, e8390_base + EN0_ISR); /* Ack intr. */
552
553 if (tx_was_aborted)
554 ei_tx_intr(dev);
555 else {
556 dev->stats.tx_errors++;
557 if (txsr & ENTSR_CRS)
558 dev->stats.tx_carrier_errors++;
559 if (txsr & ENTSR_CDH)
560 dev->stats.tx_heartbeat_errors++;
561 if (txsr & ENTSR_OWC)
562 dev->stats.tx_window_errors++;
563 }
564}
565
566/**
567 * ei_tx_intr - transmit interrupt handler
568 * @dev: network device for which tx intr is handled
569 *
570 * We have finished a transmit: check for errors and then trigger the next
571 * packet to be sent. Called with lock held.
572 */
573
574static void ei_tx_intr(struct net_device *dev)
575{
576 unsigned long e8390_base = dev->base_addr;
577 struct ei_device *ei_local = netdev_priv(dev);
578 int status = ei_inb(e8390_base + EN0_TSR);
579
580 ei_outb_p(ENISR_TX, e8390_base + EN0_ISR); /* Ack intr. */
581
582 /*
583 * There are two Tx buffers, see which one finished, and trigger
584 * the send of another one if it exists.
585 */
586 ei_local->txqueue--;
587
588 if (ei_local->tx1 < 0) {
589 if (ei_local->lasttx != 1 && ei_local->lasttx != -1)
590 pr_err("%s: bogus last_tx_buffer %d, tx1=%d\n",
591 ei_local->name, ei_local->lasttx, ei_local->tx1);
592 ei_local->tx1 = 0;
593 if (ei_local->tx2 > 0) {
594 ei_local->txing = 1;
595 NS8390_trigger_send(dev, ei_local->tx2, ei_local->tx_start_page + 6);
596 dev->trans_start = jiffies;
597 ei_local->tx2 = -1,
598 ei_local->lasttx = 2;
599 } else
600 ei_local->lasttx = 20, ei_local->txing = 0;
601 } else if (ei_local->tx2 < 0) {
602 if (ei_local->lasttx != 2 && ei_local->lasttx != -2)
603 pr_err("%s: bogus last_tx_buffer %d, tx2=%d\n",
604 ei_local->name, ei_local->lasttx, ei_local->tx2);
605 ei_local->tx2 = 0;
606 if (ei_local->tx1 > 0) {
607 ei_local->txing = 1;
608 NS8390_trigger_send(dev, ei_local->tx1, ei_local->tx_start_page);
609 dev->trans_start = jiffies;
610 ei_local->tx1 = -1;
611 ei_local->lasttx = 1;
612 } else
613 ei_local->lasttx = 10, ei_local->txing = 0;
614 } /* else
615 netdev_warn(dev, "unexpected TX-done interrupt, lasttx=%d\n",
616 ei_local->lasttx);
617*/
618
619 /* Minimize Tx latency: update the statistics after we restart TXing. */
620 if (status & ENTSR_COL)
621 dev->stats.collisions++;
622 if (status & ENTSR_PTX)
623 dev->stats.tx_packets++;
624 else {
625 dev->stats.tx_errors++;
626 if (status & ENTSR_ABT) {
627 dev->stats.tx_aborted_errors++;
628 dev->stats.collisions += 16;
629 }
630 if (status & ENTSR_CRS)
631 dev->stats.tx_carrier_errors++;
632 if (status & ENTSR_FU)
633 dev->stats.tx_fifo_errors++;
634 if (status & ENTSR_CDH)
635 dev->stats.tx_heartbeat_errors++;
636 if (status & ENTSR_OWC)
637 dev->stats.tx_window_errors++;
638 }
639 netif_wake_queue(dev);
640}
641
642/**
643 * ei_receive - receive some packets
644 * @dev: network device with which receive will be run
645 *
646 * We have a good packet(s), get it/them out of the buffers.
647 * Called with lock held.
648 */
649
650static void ei_receive(struct net_device *dev)
651{
652 unsigned long e8390_base = dev->base_addr;
653 struct ei_device *ei_local = netdev_priv(dev);
654 unsigned char rxing_page, this_frame, next_frame;
655 unsigned short current_offset;
656 int rx_pkt_count = 0;
657 struct e8390_pkt_hdr rx_frame;
658 int num_rx_pages = ei_local->stop_page-ei_local->rx_start_page;
659
660 while (++rx_pkt_count < 10) {
661 int pkt_len, pkt_stat;
662
663 /* Get the rx page (incoming packet pointer). */
664 ei_outb_p(E8390_NODMA+E8390_PAGE1, e8390_base + E8390_CMD);
665 rxing_page = ei_inb_p(e8390_base + EN1_CURPAG);
666 ei_outb_p(E8390_NODMA+E8390_PAGE0, e8390_base + E8390_CMD);
667
668 /* Remove one frame from the ring. Boundary is always a page behind. */
669 this_frame = ei_inb_p(e8390_base + EN0_BOUNDARY) + 1;
670 if (this_frame >= ei_local->stop_page)
671 this_frame = ei_local->rx_start_page;
672
673 /* Someday we'll omit the previous, iff we never get this message.
674 (There is at least one clone claimed to have a problem.)
675
676 Keep quiet if it looks like a card removal. One problem here
677 is that some clones crash in roughly the same way.
678 */
679 if (ei_debug > 0 &&
680 this_frame != ei_local->current_page &&
681 (this_frame != 0x0 || rxing_page != 0xFF))
682 netdev_err(dev, "mismatched read page pointers %2x vs %2x\n",
683 this_frame, ei_local->current_page);
684
685 if (this_frame == rxing_page) /* Read all the frames? */
686 break; /* Done for now */
687
688 current_offset = this_frame << 8;
689 ei_get_8390_hdr(dev, &rx_frame, this_frame);
690
691 pkt_len = rx_frame.count - sizeof(struct e8390_pkt_hdr);
692 pkt_stat = rx_frame.status;
693
694 next_frame = this_frame + 1 + ((pkt_len+4)>>8);
695
696 /* Check for bogosity warned by 3c503 book: the status byte is never
697 written. This happened a lot during testing! This code should be
698 cleaned up someday. */
699 if (rx_frame.next != next_frame &&
700 rx_frame.next != next_frame + 1 &&
701 rx_frame.next != next_frame - num_rx_pages &&
702 rx_frame.next != next_frame + 1 - num_rx_pages) {
703 ei_local->current_page = rxing_page;
704 ei_outb(ei_local->current_page-1, e8390_base+EN0_BOUNDARY);
705 dev->stats.rx_errors++;
706 continue;
707 }
708
709 if (pkt_len < 60 || pkt_len > 1518) {
710 if (ei_debug)
711 netdev_dbg(dev, "bogus packet size: %d, status=%#2x nxpg=%#2x\n",
712 rx_frame.count, rx_frame.status,
713 rx_frame.next);
714 dev->stats.rx_errors++;
715 dev->stats.rx_length_errors++;
716 } else if ((pkt_stat & 0x0F) == ENRSR_RXOK) {
717 struct sk_buff *skb;
718
719 skb = netdev_alloc_skb(dev, pkt_len + 2);
720 if (skb == NULL) {
721 if (ei_debug > 1)
722 netdev_dbg(dev, "Couldn't allocate a sk_buff of size %d\n",
723 pkt_len);
724 dev->stats.rx_dropped++;
725 break;
726 } else {
727 skb_reserve(skb, 2); /* IP headers on 16 byte boundaries */
728 skb_put(skb, pkt_len); /* Make room */
729 ei_block_input(dev, pkt_len, skb, current_offset + sizeof(rx_frame));
730 skb->protocol = eth_type_trans(skb, dev);
731 if (!skb_defer_rx_timestamp(skb))
732 netif_rx(skb);
733 dev->stats.rx_packets++;
734 dev->stats.rx_bytes += pkt_len;
735 if (pkt_stat & ENRSR_PHY)
736 dev->stats.multicast++;
737 }
738 } else {
739 if (ei_debug)
740 netdev_dbg(dev, "bogus packet: status=%#2x nxpg=%#2x size=%d\n",
741 rx_frame.status, rx_frame.next,
742 rx_frame.count);
743 dev->stats.rx_errors++;
744 /* NB: The NIC counts CRC, frame and missed errors. */
745 if (pkt_stat & ENRSR_FO)
746 dev->stats.rx_fifo_errors++;
747 }
748 next_frame = rx_frame.next;
749
750 /* This _should_ never happen: it's here for avoiding bad clones. */
751 if (next_frame >= ei_local->stop_page) {
752 netdev_notice(dev, "next frame inconsistency, %#2x\n",
753 next_frame);
754 next_frame = ei_local->rx_start_page;
755 }
756 ei_local->current_page = next_frame;
757 ei_outb_p(next_frame-1, e8390_base+EN0_BOUNDARY);
758 }
759
760 /* We used to also ack ENISR_OVER here, but that would sometimes mask
761 a real overrun, leaving the 8390 in a stopped state with rec'vr off. */
762 ei_outb_p(ENISR_RX+ENISR_RX_ERR, e8390_base+EN0_ISR);
763}
764
765/**
766 * ei_rx_overrun - handle receiver overrun
767 * @dev: network device which threw exception
768 *
769 * We have a receiver overrun: we have to kick the 8390 to get it started
770 * again. Problem is that you have to kick it exactly as NS prescribes in
771 * the updated datasheets, or "the NIC may act in an unpredictable manner."
772 * This includes causing "the NIC to defer indefinitely when it is stopped
773 * on a busy network." Ugh.
774 * Called with lock held. Don't call this with the interrupts off or your
775 * computer will hate you - it takes 10ms or so.
776 */
777
778static void ei_rx_overrun(struct net_device *dev)
779{
780 unsigned long e8390_base = dev->base_addr;
781 unsigned char was_txing, must_resend = 0;
782 /* ei_local is used on some platforms via the EI_SHIFT macro */
783 struct ei_device *ei_local __maybe_unused = netdev_priv(dev);
784
785 /*
786 * Record whether a Tx was in progress and then issue the
787 * stop command.
788 */
789 was_txing = ei_inb_p(e8390_base+E8390_CMD) & E8390_TRANS;
790 ei_outb_p(E8390_NODMA+E8390_PAGE0+E8390_STOP, e8390_base+E8390_CMD);
791
792 if (ei_debug > 1)
793 netdev_dbg(dev, "Receiver overrun\n");
794 dev->stats.rx_over_errors++;
795
796 /*
797 * Wait a full Tx time (1.2ms) + some guard time, NS says 1.6ms total.
798 * Early datasheets said to poll the reset bit, but now they say that
799 * it "is not a reliable indicator and subsequently should be ignored."
800 * We wait at least 10ms.
801 */
802
803 mdelay(10);
804
805 /*
806 * Reset RBCR[01] back to zero as per magic incantation.
807 */
808 ei_outb_p(0x00, e8390_base+EN0_RCNTLO);
809 ei_outb_p(0x00, e8390_base+EN0_RCNTHI);
810
811 /*
812 * See if any Tx was interrupted or not. According to NS, this
813 * step is vital, and skipping it will cause no end of havoc.
814 */
815
816 if (was_txing) {
817 unsigned char tx_completed = ei_inb_p(e8390_base+EN0_ISR) & (ENISR_TX+ENISR_TX_ERR);
818 if (!tx_completed)
819 must_resend = 1;
820 }
821
822 /*
823 * Have to enter loopback mode and then restart the NIC before
824 * you are allowed to slurp packets up off the ring.
825 */
826 ei_outb_p(E8390_TXOFF, e8390_base + EN0_TXCR);
827 ei_outb_p(E8390_NODMA + E8390_PAGE0 + E8390_START, e8390_base + E8390_CMD);
828
829 /*
830 * Clear the Rx ring of all the debris, and ack the interrupt.
831 */
832 ei_receive(dev);
833 ei_outb_p(ENISR_OVER, e8390_base+EN0_ISR);
834
835 /*
836 * Leave loopback mode, and resend any packet that got stopped.
837 */
838 ei_outb_p(E8390_TXCONFIG, e8390_base + EN0_TXCR);
839 if (must_resend)
840 ei_outb_p(E8390_NODMA + E8390_PAGE0 + E8390_START + E8390_TRANS, e8390_base + E8390_CMD);
841}
842
843/*
844 * Collect the stats. This is called unlocked and from several contexts.
845 */
846
847static struct net_device_stats *__ei_get_stats(struct net_device *dev)
848{
849 unsigned long ioaddr = dev->base_addr;
850 struct ei_device *ei_local = netdev_priv(dev);
851 unsigned long flags;
852
853 /* If the card is stopped, just return the present stats. */
854 if (!netif_running(dev))
855 return &dev->stats;
856
857 spin_lock_irqsave(&ei_local->page_lock, flags);
858 /* Read the counter registers, assuming we are in page 0. */
859 dev->stats.rx_frame_errors += ei_inb_p(ioaddr + EN0_COUNTER0);
860 dev->stats.rx_crc_errors += ei_inb_p(ioaddr + EN0_COUNTER1);
861 dev->stats.rx_missed_errors += ei_inb_p(ioaddr + EN0_COUNTER2);
862 spin_unlock_irqrestore(&ei_local->page_lock, flags);
863
864 return &dev->stats;
865}
866
867/*
868 * Form the 64 bit 8390 multicast table from the linked list of addresses
869 * associated with this dev structure.
870 */
871
872static inline void make_mc_bits(u8 *bits, struct net_device *dev)
873{
874 struct netdev_hw_addr *ha;
875
876 netdev_for_each_mc_addr(ha, dev) {
877 u32 crc = ether_crc(ETH_ALEN, ha->addr);
878 /*
879 * The 8390 uses the 6 most significant bits of the
880 * CRC to index the multicast table.
881 */
882 bits[crc>>29] |= (1<<((crc>>26)&7));
883 }
884}
885
886/**
887 * do_set_multicast_list - set/clear multicast filter
888 * @dev: net device for which multicast filter is adjusted
889 *
890 * Set or clear the multicast filter for this adaptor. May be called
891 * from a BH in 2.1.x. Must be called with lock held.
892 */
893
894static void do_set_multicast_list(struct net_device *dev)
895{
896 unsigned long e8390_base = dev->base_addr;
897 int i;
898 struct ei_device *ei_local = netdev_priv(dev);
899
900 if (!(dev->flags&(IFF_PROMISC|IFF_ALLMULTI))) {
901 memset(ei_local->mcfilter, 0, 8);
902 if (!netdev_mc_empty(dev))
903 make_mc_bits(ei_local->mcfilter, dev);
904 } else
905 memset(ei_local->mcfilter, 0xFF, 8); /* mcast set to accept-all */
906
907 /*
908 * DP8390 manuals don't specify any magic sequence for altering
909 * the multicast regs on an already running card. To be safe, we
910 * ensure multicast mode is off prior to loading up the new hash
911 * table. If this proves to be not enough, we can always resort
912 * to stopping the NIC, loading the table and then restarting.
913 *
914 * Bug Alert! The MC regs on the SMC 83C690 (SMC Elite and SMC
915 * Elite16) appear to be write-only. The NS 8390 data sheet lists
916 * them as r/w so this is a bug. The SMC 83C790 (SMC Ultra and
917 * Ultra32 EISA) appears to have this bug fixed.
918 */
919
920 if (netif_running(dev))
921 ei_outb_p(E8390_RXCONFIG, e8390_base + EN0_RXCR);
922 ei_outb_p(E8390_NODMA + E8390_PAGE1, e8390_base + E8390_CMD);
923 for (i = 0; i < 8; i++) {
924 ei_outb_p(ei_local->mcfilter[i], e8390_base + EN1_MULT_SHIFT(i));
925#ifndef BUG_83C690
926 if (ei_inb_p(e8390_base + EN1_MULT_SHIFT(i)) != ei_local->mcfilter[i])
927 netdev_err(dev, "Multicast filter read/write mismap %d\n",
928 i);
929#endif
930 }
931 ei_outb_p(E8390_NODMA + E8390_PAGE0, e8390_base + E8390_CMD);
932
933 if (dev->flags&IFF_PROMISC)
934 ei_outb_p(E8390_RXCONFIG | 0x18, e8390_base + EN0_RXCR);
935 else if (dev->flags & IFF_ALLMULTI || !netdev_mc_empty(dev))
936 ei_outb_p(E8390_RXCONFIG | 0x08, e8390_base + EN0_RXCR);
937 else
938 ei_outb_p(E8390_RXCONFIG, e8390_base + EN0_RXCR);
939}
940
941/*
942 * Called without lock held. This is invoked from user context and may
943 * be parallel to just about everything else. Its also fairly quick and
944 * not called too often. Must protect against both bh and irq users
945 */
946
947static void __ei_set_multicast_list(struct net_device *dev)
948{
949 unsigned long flags;
950 struct ei_device *ei_local = netdev_priv(dev);
951
952 spin_lock_irqsave(&ei_local->page_lock, flags);
953 do_set_multicast_list(dev);
954 spin_unlock_irqrestore(&ei_local->page_lock, flags);
955}
956
957/**
958 * ethdev_setup - init rest of 8390 device struct
959 * @dev: network device structure to init
960 *
961 * Initialize the rest of the 8390 device structure. Do NOT __init
962 * this, as it is used by 8390 based modular drivers too.
963 */
964
965static void ethdev_setup(struct net_device *dev)
966{
967 struct ei_device *ei_local = netdev_priv(dev);
968 if (ei_debug > 1)
969 printk(version);
970
971 ether_setup(dev);
972
973 spin_lock_init(&ei_local->page_lock);
974}
975
976/**
977 * alloc_ei_netdev - alloc_etherdev counterpart for 8390
978 * @size: extra bytes to allocate
979 *
980 * Allocate 8390-specific net_device.
981 */
982static struct net_device *____alloc_ei_netdev(int size)
983{
984 return alloc_netdev(sizeof(struct ei_device) + size, "eth%d",
985 ethdev_setup);
986}
987
988
989
990
991/* This page of functions should be 8390 generic */
992/* Follow National Semi's recommendations for initializing the "NIC". */
993
994/**
995 * NS8390_init - initialize 8390 hardware
996 * @dev: network device to initialize
997 * @startp: boolean. non-zero value to initiate chip processing
998 *
999 * Must be called with lock held.
1000 */
1001
1002static void __NS8390_init(struct net_device *dev, int startp)
1003{
1004 unsigned long e8390_base = dev->base_addr;
1005 struct ei_device *ei_local = netdev_priv(dev);
1006 int i;
1007 int endcfg = ei_local->word16
1008 ? (0x48 | ENDCFG_WTS | (ei_local->bigendian ? ENDCFG_BOS : 0))
1009 : 0x48;
1010
1011 if (sizeof(struct e8390_pkt_hdr) != 4)
1012 panic("8390.c: header struct mispacked\n");
1013 /* Follow National Semi's recommendations for initing the DP83902. */
1014 ei_outb_p(E8390_NODMA+E8390_PAGE0+E8390_STOP, e8390_base+E8390_CMD); /* 0x21 */
1015 ei_outb_p(endcfg, e8390_base + EN0_DCFG); /* 0x48 or 0x49 */
1016 /* Clear the remote byte count registers. */
1017 ei_outb_p(0x00, e8390_base + EN0_RCNTLO);
1018 ei_outb_p(0x00, e8390_base + EN0_RCNTHI);
1019 /* Set to monitor and loopback mode -- this is vital!. */
1020 ei_outb_p(E8390_RXOFF, e8390_base + EN0_RXCR); /* 0x20 */
1021 ei_outb_p(E8390_TXOFF, e8390_base + EN0_TXCR); /* 0x02 */
1022 /* Set the transmit page and receive ring. */
1023 ei_outb_p(ei_local->tx_start_page, e8390_base + EN0_TPSR);
1024 ei_local->tx1 = ei_local->tx2 = 0;
1025 ei_outb_p(ei_local->rx_start_page, e8390_base + EN0_STARTPG);
1026 ei_outb_p(ei_local->stop_page-1, e8390_base + EN0_BOUNDARY); /* 3c503 says 0x3f,NS0x26*/
1027 ei_local->current_page = ei_local->rx_start_page; /* assert boundary+1 */
1028 ei_outb_p(ei_local->stop_page, e8390_base + EN0_STOPPG);
1029 /* Clear the pending interrupts and mask. */
1030 ei_outb_p(0xFF, e8390_base + EN0_ISR);
1031 ei_outb_p(0x00, e8390_base + EN0_IMR);
1032
1033 /* Copy the station address into the DS8390 registers. */
1034
1035 ei_outb_p(E8390_NODMA + E8390_PAGE1 + E8390_STOP, e8390_base+E8390_CMD); /* 0x61 */
1036 for (i = 0; i < 6; i++) {
1037 ei_outb_p(dev->dev_addr[i], e8390_base + EN1_PHYS_SHIFT(i));
1038 if (ei_debug > 1 &&
1039 ei_inb_p(e8390_base + EN1_PHYS_SHIFT(i)) != dev->dev_addr[i])
1040 netdev_err(dev, "Hw. address read/write mismap %d\n", i);
1041 }
1042
1043 ei_outb_p(ei_local->rx_start_page, e8390_base + EN1_CURPAG);
1044 ei_outb_p(E8390_NODMA+E8390_PAGE0+E8390_STOP, e8390_base+E8390_CMD);
1045
1046 ei_local->tx1 = ei_local->tx2 = 0;
1047 ei_local->txing = 0;
1048
1049 if (startp) {
1050 ei_outb_p(0xff, e8390_base + EN0_ISR);
1051 ei_outb_p(ENISR_ALL, e8390_base + EN0_IMR);
1052 ei_outb_p(E8390_NODMA+E8390_PAGE0+E8390_START, e8390_base+E8390_CMD);
1053 ei_outb_p(E8390_TXCONFIG, e8390_base + EN0_TXCR); /* xmit on. */
1054 /* 3c503 TechMan says rxconfig only after the NIC is started. */
1055 ei_outb_p(E8390_RXCONFIG, e8390_base + EN0_RXCR); /* rx on, */
1056 do_set_multicast_list(dev); /* (re)load the mcast table */
1057 }
1058}
1059
1060/* Trigger a transmit start, assuming the length is valid.
1061 Always called with the page lock held */
1062
1063static void NS8390_trigger_send(struct net_device *dev, unsigned int length,
1064 int start_page)
1065{
1066 unsigned long e8390_base = dev->base_addr;
1067 struct ei_device *ei_local __attribute((unused)) = netdev_priv(dev);
1068
1069 ei_outb_p(E8390_NODMA+E8390_PAGE0, e8390_base+E8390_CMD);
1070
1071 if (ei_inb_p(e8390_base + E8390_CMD) & E8390_TRANS) {
1072 netdev_warn(dev, "trigger_send() called with the transmitter busy\n");
1073 return;
1074 }
1075 ei_outb_p(length & 0xff, e8390_base + EN0_TCNTLO);
1076 ei_outb_p(length >> 8, e8390_base + EN0_TCNTHI);
1077 ei_outb_p(start_page, e8390_base + EN0_TPSR);
1078 ei_outb_p(E8390_NODMA+E8390_TRANS+E8390_START, e8390_base+E8390_CMD);
1079}