Loading...
1/* lasi_82596.c -- driver for the intel 82596 ethernet controller, as
2 munged into HPPA boxen .
3
4 This driver is based upon 82596.c, original credits are below...
5 but there were too many hoops which HP wants jumped through to
6 keep this code in there in a sane manner.
7
8 3 primary sources of the mess --
9 1) hppa needs *lots* of cacheline flushing to keep this kind of
10 MMIO running.
11
12 2) The 82596 needs to see all of its pointers as their physical
13 address. Thus virt_to_bus/bus_to_virt are *everywhere*.
14
15 3) The implementation HP is using seems to be significantly pickier
16 about when and how the command and RX units are started. some
17 command ordering was changed.
18
19 Examination of the mach driver leads one to believe that there
20 might be a saner way to pull this off... anyone who feels like a
21 full rewrite can be my guest.
22
23 Split 02/13/2000 Sam Creasey (sammy@oh.verio.com)
24
25 02/01/2000 Initial modifications for parisc by Helge Deller (deller@gmx.de)
26 03/02/2000 changes for better/correct(?) cache-flushing (deller)
27*/
28
29/* 82596.c: A generic 82596 ethernet driver for linux. */
30/*
31 Based on Apricot.c
32 Written 1994 by Mark Evans.
33 This driver is for the Apricot 82596 bus-master interface
34
35 Modularised 12/94 Mark Evans
36
37
38 Modified to support the 82596 ethernet chips on 680x0 VME boards.
39 by Richard Hirst <richard@sleepie.demon.co.uk>
40 Renamed to be 82596.c
41
42 980825: Changed to receive directly in to sk_buffs which are
43 allocated at open() time. Eliminates copy on incoming frames
44 (small ones are still copied). Shared data now held in a
45 non-cached page, so we can run on 68060 in copyback mode.
46
47 TBD:
48 * look at deferring rx frames rather than discarding (as per tulip)
49 * handle tx ring full as per tulip
50 * performance test to tune rx_copybreak
51
52 Most of my modifications relate to the braindead big-endian
53 implementation by Intel. When the i596 is operating in
54 'big-endian' mode, it thinks a 32 bit value of 0x12345678
55 should be stored as 0x56781234. This is a real pain, when
56 you have linked lists which are shared by the 680x0 and the
57 i596.
58
59 Driver skeleton
60 Written 1993 by Donald Becker.
61 Copyright 1993 United States Government as represented by the Director,
62 National Security Agency. This software may only be used and distributed
63 according to the terms of the GNU General Public License as modified by SRC,
64 incorporated herein by reference.
65
66 The author may be reached as becker@scyld.com, or C/O
67 Scyld Computing Corporation, 410 Severn Ave., Suite 210, Annapolis MD 21403
68
69 */
70
71#include <linux/module.h>
72#include <linux/kernel.h>
73#include <linux/string.h>
74#include <linux/errno.h>
75#include <linux/ioport.h>
76#include <linux/interrupt.h>
77#include <linux/delay.h>
78#include <linux/netdevice.h>
79#include <linux/etherdevice.h>
80#include <linux/skbuff.h>
81#include <linux/types.h>
82#include <linux/bitops.h>
83#include <linux/dma-mapping.h>
84#include <linux/io.h>
85#include <linux/irq.h>
86#include <linux/gfp.h>
87
88/* DEBUG flags
89 */
90
91#define DEB_INIT 0x0001
92#define DEB_PROBE 0x0002
93#define DEB_SERIOUS 0x0004
94#define DEB_ERRORS 0x0008
95#define DEB_MULTI 0x0010
96#define DEB_TDR 0x0020
97#define DEB_OPEN 0x0040
98#define DEB_RESET 0x0080
99#define DEB_ADDCMD 0x0100
100#define DEB_STATUS 0x0200
101#define DEB_STARTTX 0x0400
102#define DEB_RXADDR 0x0800
103#define DEB_TXADDR 0x1000
104#define DEB_RXFRAME 0x2000
105#define DEB_INTS 0x4000
106#define DEB_STRUCT 0x8000
107#define DEB_ANY 0xffff
108
109
110#define DEB(x, y) if (i596_debug & (x)) { y; }
111
112
113/*
114 * The MPU_PORT command allows direct access to the 82596. With PORT access
115 * the following commands are available (p5-18). The 32-bit port command
116 * must be word-swapped with the most significant word written first.
117 * This only applies to VME boards.
118 */
119#define PORT_RESET 0x00 /* reset 82596 */
120#define PORT_SELFTEST 0x01 /* selftest */
121#define PORT_ALTSCP 0x02 /* alternate SCB address */
122#define PORT_ALTDUMP 0x03 /* Alternate DUMP address */
123
124static int i596_debug = (DEB_SERIOUS|DEB_PROBE);
125
126/* Copy frames shorter than rx_copybreak, otherwise pass on up in
127 * a full sized sk_buff. Value of 100 stolen from tulip.c (!alpha).
128 */
129static int rx_copybreak = 100;
130
131#define PKT_BUF_SZ 1536
132#define MAX_MC_CNT 64
133
134#define ISCP_BUSY 0x0001
135
136#define I596_NULL ((u32)0xffffffff)
137
138#define CMD_EOL 0x8000 /* The last command of the list, stop. */
139#define CMD_SUSP 0x4000 /* Suspend after doing cmd. */
140#define CMD_INTR 0x2000 /* Interrupt after doing cmd. */
141
142#define CMD_FLEX 0x0008 /* Enable flexible memory model */
143
144enum commands {
145 CmdNOp = 0, CmdSASetup = 1, CmdConfigure = 2, CmdMulticastList = 3,
146 CmdTx = 4, CmdTDR = 5, CmdDump = 6, CmdDiagnose = 7
147};
148
149#define STAT_C 0x8000 /* Set to 0 after execution */
150#define STAT_B 0x4000 /* Command being executed */
151#define STAT_OK 0x2000 /* Command executed ok */
152#define STAT_A 0x1000 /* Command aborted */
153
154#define CUC_START 0x0100
155#define CUC_RESUME 0x0200
156#define CUC_SUSPEND 0x0300
157#define CUC_ABORT 0x0400
158#define RX_START 0x0010
159#define RX_RESUME 0x0020
160#define RX_SUSPEND 0x0030
161#define RX_ABORT 0x0040
162
163#define TX_TIMEOUT (HZ/20)
164
165
166struct i596_reg {
167 unsigned short porthi;
168 unsigned short portlo;
169 u32 ca;
170};
171
172#define EOF 0x8000
173#define SIZE_MASK 0x3fff
174
175struct i596_tbd {
176 unsigned short size;
177 unsigned short pad;
178 u32 next;
179 u32 data;
180 u32 cache_pad[5]; /* Total 32 bytes... */
181};
182
183/* The command structure has two 'next' pointers; v_next is the address of
184 * the next command as seen by the CPU, b_next is the address of the next
185 * command as seen by the 82596. The b_next pointer, as used by the 82596
186 * always references the status field of the next command, rather than the
187 * v_next field, because the 82596 is unaware of v_next. It may seem more
188 * logical to put v_next at the end of the structure, but we cannot do that
189 * because the 82596 expects other fields to be there, depending on command
190 * type.
191 */
192
193struct i596_cmd {
194 struct i596_cmd *v_next; /* Address from CPUs viewpoint */
195 unsigned short status;
196 unsigned short command;
197 u32 b_next; /* Address from i596 viewpoint */
198};
199
200struct tx_cmd {
201 struct i596_cmd cmd;
202 u32 tbd;
203 unsigned short size;
204 unsigned short pad;
205 struct sk_buff *skb; /* So we can free it after tx */
206 dma_addr_t dma_addr;
207#ifdef __LP64__
208 u32 cache_pad[6]; /* Total 64 bytes... */
209#else
210 u32 cache_pad[1]; /* Total 32 bytes... */
211#endif
212};
213
214struct tdr_cmd {
215 struct i596_cmd cmd;
216 unsigned short status;
217 unsigned short pad;
218};
219
220struct mc_cmd {
221 struct i596_cmd cmd;
222 short mc_cnt;
223 char mc_addrs[MAX_MC_CNT*6];
224};
225
226struct sa_cmd {
227 struct i596_cmd cmd;
228 char eth_addr[8];
229};
230
231struct cf_cmd {
232 struct i596_cmd cmd;
233 char i596_config[16];
234};
235
236struct i596_rfd {
237 unsigned short stat;
238 unsigned short cmd;
239 u32 b_next; /* Address from i596 viewpoint */
240 u32 rbd;
241 unsigned short count;
242 unsigned short size;
243 struct i596_rfd *v_next; /* Address from CPUs viewpoint */
244 struct i596_rfd *v_prev;
245#ifndef __LP64__
246 u32 cache_pad[2]; /* Total 32 bytes... */
247#endif
248};
249
250struct i596_rbd {
251 /* hardware data */
252 unsigned short count;
253 unsigned short zero1;
254 u32 b_next;
255 u32 b_data; /* Address from i596 viewpoint */
256 unsigned short size;
257 unsigned short zero2;
258 /* driver data */
259 struct sk_buff *skb;
260 struct i596_rbd *v_next;
261 u32 b_addr; /* This rbd addr from i596 view */
262 unsigned char *v_data; /* Address from CPUs viewpoint */
263 /* Total 32 bytes... */
264#ifdef __LP64__
265 u32 cache_pad[4];
266#endif
267};
268
269/* These values as chosen so struct i596_dma fits in one page... */
270
271#define TX_RING_SIZE 32
272#define RX_RING_SIZE 16
273
274struct i596_scb {
275 unsigned short status;
276 unsigned short command;
277 u32 cmd;
278 u32 rfd;
279 u32 crc_err;
280 u32 align_err;
281 u32 resource_err;
282 u32 over_err;
283 u32 rcvdt_err;
284 u32 short_err;
285 unsigned short t_on;
286 unsigned short t_off;
287};
288
289struct i596_iscp {
290 u32 stat;
291 u32 scb;
292};
293
294struct i596_scp {
295 u32 sysbus;
296 u32 pad;
297 u32 iscp;
298};
299
300struct i596_dma {
301 struct i596_scp scp __attribute__((aligned(32)));
302 volatile struct i596_iscp iscp __attribute__((aligned(32)));
303 volatile struct i596_scb scb __attribute__((aligned(32)));
304 struct sa_cmd sa_cmd __attribute__((aligned(32)));
305 struct cf_cmd cf_cmd __attribute__((aligned(32)));
306 struct tdr_cmd tdr_cmd __attribute__((aligned(32)));
307 struct mc_cmd mc_cmd __attribute__((aligned(32)));
308 struct i596_rfd rfds[RX_RING_SIZE] __attribute__((aligned(32)));
309 struct i596_rbd rbds[RX_RING_SIZE] __attribute__((aligned(32)));
310 struct tx_cmd tx_cmds[TX_RING_SIZE] __attribute__((aligned(32)));
311 struct i596_tbd tbds[TX_RING_SIZE] __attribute__((aligned(32)));
312};
313
314struct i596_private {
315 struct i596_dma *dma;
316 u32 stat;
317 int last_restart;
318 struct i596_rfd *rfd_head;
319 struct i596_rbd *rbd_head;
320 struct i596_cmd *cmd_tail;
321 struct i596_cmd *cmd_head;
322 int cmd_backlog;
323 u32 last_cmd;
324 int next_tx_cmd;
325 int options;
326 spinlock_t lock; /* serialize access to chip */
327 dma_addr_t dma_addr;
328 void __iomem *mpu_port;
329 void __iomem *ca;
330};
331
332static const char init_setup[] =
333{
334 0x8E, /* length, prefetch on */
335 0xC8, /* fifo to 8, monitor off */
336 0x80, /* don't save bad frames */
337 0x2E, /* No source address insertion, 8 byte preamble */
338 0x00, /* priority and backoff defaults */
339 0x60, /* interframe spacing */
340 0x00, /* slot time LSB */
341 0xf2, /* slot time and retries */
342 0x00, /* promiscuous mode */
343 0x00, /* collision detect */
344 0x40, /* minimum frame length */
345 0xff,
346 0x00,
347 0x7f /* *multi IA */ };
348
349static int i596_open(struct net_device *dev);
350static int i596_start_xmit(struct sk_buff *skb, struct net_device *dev);
351static irqreturn_t i596_interrupt(int irq, void *dev_id);
352static int i596_close(struct net_device *dev);
353static void i596_add_cmd(struct net_device *dev, struct i596_cmd *cmd);
354static void i596_tx_timeout (struct net_device *dev);
355static void print_eth(unsigned char *buf, char *str);
356static void set_multicast_list(struct net_device *dev);
357static inline void ca(struct net_device *dev);
358static void mpu_port(struct net_device *dev, int c, dma_addr_t x);
359
360static int rx_ring_size = RX_RING_SIZE;
361static int ticks_limit = 100;
362static int max_cmd_backlog = TX_RING_SIZE-1;
363
364#ifdef CONFIG_NET_POLL_CONTROLLER
365static void i596_poll_controller(struct net_device *dev);
366#endif
367
368
369static inline int wait_istat(struct net_device *dev, struct i596_dma *dma, int delcnt, char *str)
370{
371 DMA_INV(dev, &(dma->iscp), sizeof(struct i596_iscp));
372 while (--delcnt && dma->iscp.stat) {
373 udelay(10);
374 DMA_INV(dev, &(dma->iscp), sizeof(struct i596_iscp));
375 }
376 if (!delcnt) {
377 printk(KERN_ERR "%s: %s, iscp.stat %04x, didn't clear\n",
378 dev->name, str, SWAP16(dma->iscp.stat));
379 return -1;
380 } else
381 return 0;
382}
383
384
385static inline int wait_cmd(struct net_device *dev, struct i596_dma *dma, int delcnt, char *str)
386{
387 DMA_INV(dev, &(dma->scb), sizeof(struct i596_scb));
388 while (--delcnt && dma->scb.command) {
389 udelay(10);
390 DMA_INV(dev, &(dma->scb), sizeof(struct i596_scb));
391 }
392 if (!delcnt) {
393 printk(KERN_ERR "%s: %s, status %4.4x, cmd %4.4x.\n",
394 dev->name, str,
395 SWAP16(dma->scb.status),
396 SWAP16(dma->scb.command));
397 return -1;
398 } else
399 return 0;
400}
401
402
403static void i596_display_data(struct net_device *dev)
404{
405 struct i596_private *lp = netdev_priv(dev);
406 struct i596_dma *dma = lp->dma;
407 struct i596_cmd *cmd;
408 struct i596_rfd *rfd;
409 struct i596_rbd *rbd;
410
411 printk(KERN_DEBUG "lp and scp at %p, .sysbus = %08x, .iscp = %08x\n",
412 &dma->scp, dma->scp.sysbus, SWAP32(dma->scp.iscp));
413 printk(KERN_DEBUG "iscp at %p, iscp.stat = %08x, .scb = %08x\n",
414 &dma->iscp, SWAP32(dma->iscp.stat), SWAP32(dma->iscp.scb));
415 printk(KERN_DEBUG "scb at %p, scb.status = %04x, .command = %04x,"
416 " .cmd = %08x, .rfd = %08x\n",
417 &dma->scb, SWAP16(dma->scb.status), SWAP16(dma->scb.command),
418 SWAP16(dma->scb.cmd), SWAP32(dma->scb.rfd));
419 printk(KERN_DEBUG " errors: crc %x, align %x, resource %x,"
420 " over %x, rcvdt %x, short %x\n",
421 SWAP32(dma->scb.crc_err), SWAP32(dma->scb.align_err),
422 SWAP32(dma->scb.resource_err), SWAP32(dma->scb.over_err),
423 SWAP32(dma->scb.rcvdt_err), SWAP32(dma->scb.short_err));
424 cmd = lp->cmd_head;
425 while (cmd != NULL) {
426 printk(KERN_DEBUG
427 "cmd at %p, .status = %04x, .command = %04x,"
428 " .b_next = %08x\n",
429 cmd, SWAP16(cmd->status), SWAP16(cmd->command),
430 SWAP32(cmd->b_next));
431 cmd = cmd->v_next;
432 }
433 rfd = lp->rfd_head;
434 printk(KERN_DEBUG "rfd_head = %p\n", rfd);
435 do {
436 printk(KERN_DEBUG
437 " %p .stat %04x, .cmd %04x, b_next %08x, rbd %08x,"
438 " count %04x\n",
439 rfd, SWAP16(rfd->stat), SWAP16(rfd->cmd),
440 SWAP32(rfd->b_next), SWAP32(rfd->rbd),
441 SWAP16(rfd->count));
442 rfd = rfd->v_next;
443 } while (rfd != lp->rfd_head);
444 rbd = lp->rbd_head;
445 printk(KERN_DEBUG "rbd_head = %p\n", rbd);
446 do {
447 printk(KERN_DEBUG
448 " %p .count %04x, b_next %08x, b_data %08x,"
449 " size %04x\n",
450 rbd, SWAP16(rbd->count), SWAP32(rbd->b_next),
451 SWAP32(rbd->b_data), SWAP16(rbd->size));
452 rbd = rbd->v_next;
453 } while (rbd != lp->rbd_head);
454 DMA_INV(dev, dma, sizeof(struct i596_dma));
455}
456
457
458#define virt_to_dma(lp, v) ((lp)->dma_addr + (dma_addr_t)((unsigned long)(v)-(unsigned long)((lp)->dma)))
459
460static inline int init_rx_bufs(struct net_device *dev)
461{
462 struct i596_private *lp = netdev_priv(dev);
463 struct i596_dma *dma = lp->dma;
464 int i;
465 struct i596_rfd *rfd;
466 struct i596_rbd *rbd;
467
468 /* First build the Receive Buffer Descriptor List */
469
470 for (i = 0, rbd = dma->rbds; i < rx_ring_size; i++, rbd++) {
471 dma_addr_t dma_addr;
472 struct sk_buff *skb;
473
474 skb = netdev_alloc_skb_ip_align(dev, PKT_BUF_SZ);
475 if (skb == NULL)
476 return -1;
477 dma_addr = dma_map_single(dev->dev.parent, skb->data,
478 PKT_BUF_SZ, DMA_FROM_DEVICE);
479 rbd->v_next = rbd+1;
480 rbd->b_next = SWAP32(virt_to_dma(lp, rbd+1));
481 rbd->b_addr = SWAP32(virt_to_dma(lp, rbd));
482 rbd->skb = skb;
483 rbd->v_data = skb->data;
484 rbd->b_data = SWAP32(dma_addr);
485 rbd->size = SWAP16(PKT_BUF_SZ);
486 }
487 lp->rbd_head = dma->rbds;
488 rbd = dma->rbds + rx_ring_size - 1;
489 rbd->v_next = dma->rbds;
490 rbd->b_next = SWAP32(virt_to_dma(lp, dma->rbds));
491
492 /* Now build the Receive Frame Descriptor List */
493
494 for (i = 0, rfd = dma->rfds; i < rx_ring_size; i++, rfd++) {
495 rfd->rbd = I596_NULL;
496 rfd->v_next = rfd+1;
497 rfd->v_prev = rfd-1;
498 rfd->b_next = SWAP32(virt_to_dma(lp, rfd+1));
499 rfd->cmd = SWAP16(CMD_FLEX);
500 }
501 lp->rfd_head = dma->rfds;
502 dma->scb.rfd = SWAP32(virt_to_dma(lp, dma->rfds));
503 rfd = dma->rfds;
504 rfd->rbd = SWAP32(virt_to_dma(lp, lp->rbd_head));
505 rfd->v_prev = dma->rfds + rx_ring_size - 1;
506 rfd = dma->rfds + rx_ring_size - 1;
507 rfd->v_next = dma->rfds;
508 rfd->b_next = SWAP32(virt_to_dma(lp, dma->rfds));
509 rfd->cmd = SWAP16(CMD_EOL|CMD_FLEX);
510
511 DMA_WBACK_INV(dev, dma, sizeof(struct i596_dma));
512 return 0;
513}
514
515static inline void remove_rx_bufs(struct net_device *dev)
516{
517 struct i596_private *lp = netdev_priv(dev);
518 struct i596_rbd *rbd;
519 int i;
520
521 for (i = 0, rbd = lp->dma->rbds; i < rx_ring_size; i++, rbd++) {
522 if (rbd->skb == NULL)
523 break;
524 dma_unmap_single(dev->dev.parent,
525 (dma_addr_t)SWAP32(rbd->b_data),
526 PKT_BUF_SZ, DMA_FROM_DEVICE);
527 dev_kfree_skb(rbd->skb);
528 }
529}
530
531
532static void rebuild_rx_bufs(struct net_device *dev)
533{
534 struct i596_private *lp = netdev_priv(dev);
535 struct i596_dma *dma = lp->dma;
536 int i;
537
538 /* Ensure rx frame/buffer descriptors are tidy */
539
540 for (i = 0; i < rx_ring_size; i++) {
541 dma->rfds[i].rbd = I596_NULL;
542 dma->rfds[i].cmd = SWAP16(CMD_FLEX);
543 }
544 dma->rfds[rx_ring_size-1].cmd = SWAP16(CMD_EOL|CMD_FLEX);
545 lp->rfd_head = dma->rfds;
546 dma->scb.rfd = SWAP32(virt_to_dma(lp, dma->rfds));
547 lp->rbd_head = dma->rbds;
548 dma->rfds[0].rbd = SWAP32(virt_to_dma(lp, dma->rbds));
549
550 DMA_WBACK_INV(dev, dma, sizeof(struct i596_dma));
551}
552
553
554static int init_i596_mem(struct net_device *dev)
555{
556 struct i596_private *lp = netdev_priv(dev);
557 struct i596_dma *dma = lp->dma;
558 unsigned long flags;
559
560 mpu_port(dev, PORT_RESET, 0);
561 udelay(100); /* Wait 100us - seems to help */
562
563 /* change the scp address */
564
565 lp->last_cmd = jiffies;
566
567 dma->scp.sysbus = SYSBUS;
568 dma->scp.iscp = SWAP32(virt_to_dma(lp, &(dma->iscp)));
569 dma->iscp.scb = SWAP32(virt_to_dma(lp, &(dma->scb)));
570 dma->iscp.stat = SWAP32(ISCP_BUSY);
571 lp->cmd_backlog = 0;
572
573 lp->cmd_head = NULL;
574 dma->scb.cmd = I596_NULL;
575
576 DEB(DEB_INIT, printk(KERN_DEBUG "%s: starting i82596.\n", dev->name));
577
578 DMA_WBACK(dev, &(dma->scp), sizeof(struct i596_scp));
579 DMA_WBACK(dev, &(dma->iscp), sizeof(struct i596_iscp));
580 DMA_WBACK(dev, &(dma->scb), sizeof(struct i596_scb));
581
582 mpu_port(dev, PORT_ALTSCP, virt_to_dma(lp, &dma->scp));
583 ca(dev);
584 if (wait_istat(dev, dma, 1000, "initialization timed out"))
585 goto failed;
586 DEB(DEB_INIT, printk(KERN_DEBUG
587 "%s: i82596 initialization successful\n",
588 dev->name));
589
590 if (request_irq(dev->irq, i596_interrupt, 0, "i82596", dev)) {
591 printk(KERN_ERR "%s: IRQ %d not free\n", dev->name, dev->irq);
592 goto failed;
593 }
594
595 /* Ensure rx frame/buffer descriptors are tidy */
596 rebuild_rx_bufs(dev);
597
598 dma->scb.command = 0;
599 DMA_WBACK(dev, &(dma->scb), sizeof(struct i596_scb));
600
601 DEB(DEB_INIT, printk(KERN_DEBUG
602 "%s: queuing CmdConfigure\n", dev->name));
603 memcpy(dma->cf_cmd.i596_config, init_setup, 14);
604 dma->cf_cmd.cmd.command = SWAP16(CmdConfigure);
605 DMA_WBACK(dev, &(dma->cf_cmd), sizeof(struct cf_cmd));
606 i596_add_cmd(dev, &dma->cf_cmd.cmd);
607
608 DEB(DEB_INIT, printk(KERN_DEBUG "%s: queuing CmdSASetup\n", dev->name));
609 memcpy(dma->sa_cmd.eth_addr, dev->dev_addr, ETH_ALEN);
610 dma->sa_cmd.cmd.command = SWAP16(CmdSASetup);
611 DMA_WBACK(dev, &(dma->sa_cmd), sizeof(struct sa_cmd));
612 i596_add_cmd(dev, &dma->sa_cmd.cmd);
613
614 DEB(DEB_INIT, printk(KERN_DEBUG "%s: queuing CmdTDR\n", dev->name));
615 dma->tdr_cmd.cmd.command = SWAP16(CmdTDR);
616 DMA_WBACK(dev, &(dma->tdr_cmd), sizeof(struct tdr_cmd));
617 i596_add_cmd(dev, &dma->tdr_cmd.cmd);
618
619 spin_lock_irqsave (&lp->lock, flags);
620
621 if (wait_cmd(dev, dma, 1000, "timed out waiting to issue RX_START")) {
622 spin_unlock_irqrestore (&lp->lock, flags);
623 goto failed_free_irq;
624 }
625 DEB(DEB_INIT, printk(KERN_DEBUG "%s: Issuing RX_START\n", dev->name));
626 dma->scb.command = SWAP16(RX_START);
627 dma->scb.rfd = SWAP32(virt_to_dma(lp, dma->rfds));
628 DMA_WBACK(dev, &(dma->scb), sizeof(struct i596_scb));
629
630 ca(dev);
631
632 spin_unlock_irqrestore (&lp->lock, flags);
633 if (wait_cmd(dev, dma, 1000, "RX_START not processed"))
634 goto failed_free_irq;
635 DEB(DEB_INIT, printk(KERN_DEBUG
636 "%s: Receive unit started OK\n", dev->name));
637 return 0;
638
639failed_free_irq:
640 free_irq(dev->irq, dev);
641failed:
642 printk(KERN_ERR "%s: Failed to initialise 82596\n", dev->name);
643 mpu_port(dev, PORT_RESET, 0);
644 return -1;
645}
646
647
648static inline int i596_rx(struct net_device *dev)
649{
650 struct i596_private *lp = netdev_priv(dev);
651 struct i596_rfd *rfd;
652 struct i596_rbd *rbd;
653 int frames = 0;
654
655 DEB(DEB_RXFRAME, printk(KERN_DEBUG
656 "i596_rx(), rfd_head %p, rbd_head %p\n",
657 lp->rfd_head, lp->rbd_head));
658
659
660 rfd = lp->rfd_head; /* Ref next frame to check */
661
662 DMA_INV(dev, rfd, sizeof(struct i596_rfd));
663 while (rfd->stat & SWAP16(STAT_C)) { /* Loop while complete frames */
664 if (rfd->rbd == I596_NULL)
665 rbd = NULL;
666 else if (rfd->rbd == lp->rbd_head->b_addr) {
667 rbd = lp->rbd_head;
668 DMA_INV(dev, rbd, sizeof(struct i596_rbd));
669 } else {
670 printk(KERN_ERR "%s: rbd chain broken!\n", dev->name);
671 /* XXX Now what? */
672 rbd = NULL;
673 }
674 DEB(DEB_RXFRAME, printk(KERN_DEBUG
675 " rfd %p, rfd.rbd %08x, rfd.stat %04x\n",
676 rfd, rfd->rbd, rfd->stat));
677
678 if (rbd != NULL && (rfd->stat & SWAP16(STAT_OK))) {
679 /* a good frame */
680 int pkt_len = SWAP16(rbd->count) & 0x3fff;
681 struct sk_buff *skb = rbd->skb;
682 int rx_in_place = 0;
683
684 DEB(DEB_RXADDR, print_eth(rbd->v_data, "received"));
685 frames++;
686
687 /* Check if the packet is long enough to just accept
688 * without copying to a properly sized skbuff.
689 */
690
691 if (pkt_len > rx_copybreak) {
692 struct sk_buff *newskb;
693 dma_addr_t dma_addr;
694
695 dma_unmap_single(dev->dev.parent,
696 (dma_addr_t)SWAP32(rbd->b_data),
697 PKT_BUF_SZ, DMA_FROM_DEVICE);
698 /* Get fresh skbuff to replace filled one. */
699 newskb = netdev_alloc_skb_ip_align(dev,
700 PKT_BUF_SZ);
701 if (newskb == NULL) {
702 skb = NULL; /* drop pkt */
703 goto memory_squeeze;
704 }
705
706 /* Pass up the skb already on the Rx ring. */
707 skb_put(skb, pkt_len);
708 rx_in_place = 1;
709 rbd->skb = newskb;
710 dma_addr = dma_map_single(dev->dev.parent,
711 newskb->data,
712 PKT_BUF_SZ,
713 DMA_FROM_DEVICE);
714 rbd->v_data = newskb->data;
715 rbd->b_data = SWAP32(dma_addr);
716 DMA_WBACK_INV(dev, rbd, sizeof(struct i596_rbd));
717 } else {
718 skb = netdev_alloc_skb_ip_align(dev, pkt_len);
719 }
720memory_squeeze:
721 if (skb == NULL) {
722 /* XXX tulip.c can defer packets here!! */
723 dev->stats.rx_dropped++;
724 } else {
725 if (!rx_in_place) {
726 /* 16 byte align the data fields */
727 dma_sync_single_for_cpu(dev->dev.parent,
728 (dma_addr_t)SWAP32(rbd->b_data),
729 PKT_BUF_SZ, DMA_FROM_DEVICE);
730 memcpy(skb_put(skb, pkt_len), rbd->v_data, pkt_len);
731 dma_sync_single_for_device(dev->dev.parent,
732 (dma_addr_t)SWAP32(rbd->b_data),
733 PKT_BUF_SZ, DMA_FROM_DEVICE);
734 }
735 skb->len = pkt_len;
736 skb->protocol = eth_type_trans(skb, dev);
737 netif_rx(skb);
738 dev->stats.rx_packets++;
739 dev->stats.rx_bytes += pkt_len;
740 }
741 } else {
742 DEB(DEB_ERRORS, printk(KERN_DEBUG
743 "%s: Error, rfd.stat = 0x%04x\n",
744 dev->name, rfd->stat));
745 dev->stats.rx_errors++;
746 if (rfd->stat & SWAP16(0x0100))
747 dev->stats.collisions++;
748 if (rfd->stat & SWAP16(0x8000))
749 dev->stats.rx_length_errors++;
750 if (rfd->stat & SWAP16(0x0001))
751 dev->stats.rx_over_errors++;
752 if (rfd->stat & SWAP16(0x0002))
753 dev->stats.rx_fifo_errors++;
754 if (rfd->stat & SWAP16(0x0004))
755 dev->stats.rx_frame_errors++;
756 if (rfd->stat & SWAP16(0x0008))
757 dev->stats.rx_crc_errors++;
758 if (rfd->stat & SWAP16(0x0010))
759 dev->stats.rx_length_errors++;
760 }
761
762 /* Clear the buffer descriptor count and EOF + F flags */
763
764 if (rbd != NULL && (rbd->count & SWAP16(0x4000))) {
765 rbd->count = 0;
766 lp->rbd_head = rbd->v_next;
767 DMA_WBACK_INV(dev, rbd, sizeof(struct i596_rbd));
768 }
769
770 /* Tidy the frame descriptor, marking it as end of list */
771
772 rfd->rbd = I596_NULL;
773 rfd->stat = 0;
774 rfd->cmd = SWAP16(CMD_EOL|CMD_FLEX);
775 rfd->count = 0;
776
777 /* Update record of next frame descriptor to process */
778
779 lp->dma->scb.rfd = rfd->b_next;
780 lp->rfd_head = rfd->v_next;
781 DMA_WBACK_INV(dev, rfd, sizeof(struct i596_rfd));
782
783 /* Remove end-of-list from old end descriptor */
784
785 rfd->v_prev->cmd = SWAP16(CMD_FLEX);
786 DMA_WBACK_INV(dev, rfd->v_prev, sizeof(struct i596_rfd));
787 rfd = lp->rfd_head;
788 DMA_INV(dev, rfd, sizeof(struct i596_rfd));
789 }
790
791 DEB(DEB_RXFRAME, printk(KERN_DEBUG "frames %d\n", frames));
792
793 return 0;
794}
795
796
797static inline void i596_cleanup_cmd(struct net_device *dev, struct i596_private *lp)
798{
799 struct i596_cmd *ptr;
800
801 while (lp->cmd_head != NULL) {
802 ptr = lp->cmd_head;
803 lp->cmd_head = ptr->v_next;
804 lp->cmd_backlog--;
805
806 switch (SWAP16(ptr->command) & 0x7) {
807 case CmdTx:
808 {
809 struct tx_cmd *tx_cmd = (struct tx_cmd *) ptr;
810 struct sk_buff *skb = tx_cmd->skb;
811 dma_unmap_single(dev->dev.parent,
812 tx_cmd->dma_addr,
813 skb->len, DMA_TO_DEVICE);
814
815 dev_kfree_skb(skb);
816
817 dev->stats.tx_errors++;
818 dev->stats.tx_aborted_errors++;
819
820 ptr->v_next = NULL;
821 ptr->b_next = I596_NULL;
822 tx_cmd->cmd.command = 0; /* Mark as free */
823 break;
824 }
825 default:
826 ptr->v_next = NULL;
827 ptr->b_next = I596_NULL;
828 }
829 DMA_WBACK_INV(dev, ptr, sizeof(struct i596_cmd));
830 }
831
832 wait_cmd(dev, lp->dma, 100, "i596_cleanup_cmd timed out");
833 lp->dma->scb.cmd = I596_NULL;
834 DMA_WBACK(dev, &(lp->dma->scb), sizeof(struct i596_scb));
835}
836
837
838static inline void i596_reset(struct net_device *dev, struct i596_private *lp)
839{
840 unsigned long flags;
841
842 DEB(DEB_RESET, printk(KERN_DEBUG "i596_reset\n"));
843
844 spin_lock_irqsave (&lp->lock, flags);
845
846 wait_cmd(dev, lp->dma, 100, "i596_reset timed out");
847
848 netif_stop_queue(dev);
849
850 /* FIXME: this command might cause an lpmc */
851 lp->dma->scb.command = SWAP16(CUC_ABORT | RX_ABORT);
852 DMA_WBACK(dev, &(lp->dma->scb), sizeof(struct i596_scb));
853 ca(dev);
854
855 /* wait for shutdown */
856 wait_cmd(dev, lp->dma, 1000, "i596_reset 2 timed out");
857 spin_unlock_irqrestore (&lp->lock, flags);
858
859 i596_cleanup_cmd(dev, lp);
860 i596_rx(dev);
861
862 netif_start_queue(dev);
863 init_i596_mem(dev);
864}
865
866
867static void i596_add_cmd(struct net_device *dev, struct i596_cmd *cmd)
868{
869 struct i596_private *lp = netdev_priv(dev);
870 struct i596_dma *dma = lp->dma;
871 unsigned long flags;
872
873 DEB(DEB_ADDCMD, printk(KERN_DEBUG "i596_add_cmd cmd_head %p\n",
874 lp->cmd_head));
875
876 cmd->status = 0;
877 cmd->command |= SWAP16(CMD_EOL | CMD_INTR);
878 cmd->v_next = NULL;
879 cmd->b_next = I596_NULL;
880 DMA_WBACK(dev, cmd, sizeof(struct i596_cmd));
881
882 spin_lock_irqsave (&lp->lock, flags);
883
884 if (lp->cmd_head != NULL) {
885 lp->cmd_tail->v_next = cmd;
886 lp->cmd_tail->b_next = SWAP32(virt_to_dma(lp, &cmd->status));
887 DMA_WBACK(dev, lp->cmd_tail, sizeof(struct i596_cmd));
888 } else {
889 lp->cmd_head = cmd;
890 wait_cmd(dev, dma, 100, "i596_add_cmd timed out");
891 dma->scb.cmd = SWAP32(virt_to_dma(lp, &cmd->status));
892 dma->scb.command = SWAP16(CUC_START);
893 DMA_WBACK(dev, &(dma->scb), sizeof(struct i596_scb));
894 ca(dev);
895 }
896 lp->cmd_tail = cmd;
897 lp->cmd_backlog++;
898
899 spin_unlock_irqrestore (&lp->lock, flags);
900
901 if (lp->cmd_backlog > max_cmd_backlog) {
902 unsigned long tickssofar = jiffies - lp->last_cmd;
903
904 if (tickssofar < ticks_limit)
905 return;
906
907 printk(KERN_ERR
908 "%s: command unit timed out, status resetting.\n",
909 dev->name);
910#if 1
911 i596_reset(dev, lp);
912#endif
913 }
914}
915
916static int i596_open(struct net_device *dev)
917{
918 DEB(DEB_OPEN, printk(KERN_DEBUG
919 "%s: i596_open() irq %d.\n", dev->name, dev->irq));
920
921 if (init_rx_bufs(dev)) {
922 printk(KERN_ERR "%s: Failed to init rx bufs\n", dev->name);
923 return -EAGAIN;
924 }
925 if (init_i596_mem(dev)) {
926 printk(KERN_ERR "%s: Failed to init memory\n", dev->name);
927 goto out_remove_rx_bufs;
928 }
929 netif_start_queue(dev);
930
931 return 0;
932
933out_remove_rx_bufs:
934 remove_rx_bufs(dev);
935 return -EAGAIN;
936}
937
938static void i596_tx_timeout (struct net_device *dev)
939{
940 struct i596_private *lp = netdev_priv(dev);
941
942 /* Transmitter timeout, serious problems. */
943 DEB(DEB_ERRORS, printk(KERN_DEBUG
944 "%s: transmit timed out, status resetting.\n",
945 dev->name));
946
947 dev->stats.tx_errors++;
948
949 /* Try to restart the adaptor */
950 if (lp->last_restart == dev->stats.tx_packets) {
951 DEB(DEB_ERRORS, printk(KERN_DEBUG "Resetting board.\n"));
952 /* Shutdown and restart */
953 i596_reset (dev, lp);
954 } else {
955 /* Issue a channel attention signal */
956 DEB(DEB_ERRORS, printk(KERN_DEBUG "Kicking board.\n"));
957 lp->dma->scb.command = SWAP16(CUC_START | RX_START);
958 DMA_WBACK_INV(dev, &(lp->dma->scb), sizeof(struct i596_scb));
959 ca (dev);
960 lp->last_restart = dev->stats.tx_packets;
961 }
962
963 dev->trans_start = jiffies; /* prevent tx timeout */
964 netif_wake_queue (dev);
965}
966
967
968static int i596_start_xmit(struct sk_buff *skb, struct net_device *dev)
969{
970 struct i596_private *lp = netdev_priv(dev);
971 struct tx_cmd *tx_cmd;
972 struct i596_tbd *tbd;
973 short length = skb->len;
974
975 DEB(DEB_STARTTX, printk(KERN_DEBUG
976 "%s: i596_start_xmit(%x,%p) called\n",
977 dev->name, skb->len, skb->data));
978
979 if (length < ETH_ZLEN) {
980 if (skb_padto(skb, ETH_ZLEN))
981 return NETDEV_TX_OK;
982 length = ETH_ZLEN;
983 }
984
985 netif_stop_queue(dev);
986
987 tx_cmd = lp->dma->tx_cmds + lp->next_tx_cmd;
988 tbd = lp->dma->tbds + lp->next_tx_cmd;
989
990 if (tx_cmd->cmd.command) {
991 DEB(DEB_ERRORS, printk(KERN_DEBUG
992 "%s: xmit ring full, dropping packet.\n",
993 dev->name));
994 dev->stats.tx_dropped++;
995
996 dev_kfree_skb_any(skb);
997 } else {
998 if (++lp->next_tx_cmd == TX_RING_SIZE)
999 lp->next_tx_cmd = 0;
1000 tx_cmd->tbd = SWAP32(virt_to_dma(lp, tbd));
1001 tbd->next = I596_NULL;
1002
1003 tx_cmd->cmd.command = SWAP16(CMD_FLEX | CmdTx);
1004 tx_cmd->skb = skb;
1005
1006 tx_cmd->pad = 0;
1007 tx_cmd->size = 0;
1008 tbd->pad = 0;
1009 tbd->size = SWAP16(EOF | length);
1010
1011 tx_cmd->dma_addr = dma_map_single(dev->dev.parent, skb->data,
1012 skb->len, DMA_TO_DEVICE);
1013 tbd->data = SWAP32(tx_cmd->dma_addr);
1014
1015 DEB(DEB_TXADDR, print_eth(skb->data, "tx-queued"));
1016 DMA_WBACK_INV(dev, tx_cmd, sizeof(struct tx_cmd));
1017 DMA_WBACK_INV(dev, tbd, sizeof(struct i596_tbd));
1018 i596_add_cmd(dev, &tx_cmd->cmd);
1019
1020 dev->stats.tx_packets++;
1021 dev->stats.tx_bytes += length;
1022 }
1023
1024 netif_start_queue(dev);
1025
1026 return NETDEV_TX_OK;
1027}
1028
1029static void print_eth(unsigned char *add, char *str)
1030{
1031 printk(KERN_DEBUG "i596 0x%p, %pM --> %pM %02X%02X, %s\n",
1032 add, add + 6, add, add[12], add[13], str);
1033}
1034static const struct net_device_ops i596_netdev_ops = {
1035 .ndo_open = i596_open,
1036 .ndo_stop = i596_close,
1037 .ndo_start_xmit = i596_start_xmit,
1038 .ndo_set_rx_mode = set_multicast_list,
1039 .ndo_tx_timeout = i596_tx_timeout,
1040 .ndo_change_mtu = eth_change_mtu,
1041 .ndo_validate_addr = eth_validate_addr,
1042 .ndo_set_mac_address = eth_mac_addr,
1043#ifdef CONFIG_NET_POLL_CONTROLLER
1044 .ndo_poll_controller = i596_poll_controller,
1045#endif
1046};
1047
1048static int i82596_probe(struct net_device *dev)
1049{
1050 int i;
1051 struct i596_private *lp = netdev_priv(dev);
1052 struct i596_dma *dma;
1053
1054 /* This lot is ensure things have been cache line aligned. */
1055 BUILD_BUG_ON(sizeof(struct i596_rfd) != 32);
1056 BUILD_BUG_ON(sizeof(struct i596_rbd) & 31);
1057 BUILD_BUG_ON(sizeof(struct tx_cmd) & 31);
1058 BUILD_BUG_ON(sizeof(struct i596_tbd) != 32);
1059#ifndef __LP64__
1060 BUILD_BUG_ON(sizeof(struct i596_dma) > 4096);
1061#endif
1062
1063 if (!dev->base_addr || !dev->irq)
1064 return -ENODEV;
1065
1066 dma = (struct i596_dma *) DMA_ALLOC(dev->dev.parent,
1067 sizeof(struct i596_dma), &lp->dma_addr, GFP_KERNEL);
1068 if (!dma) {
1069 printk(KERN_ERR "%s: Couldn't get shared memory\n", __FILE__);
1070 return -ENOMEM;
1071 }
1072
1073 dev->netdev_ops = &i596_netdev_ops;
1074 dev->watchdog_timeo = TX_TIMEOUT;
1075
1076 memset(dma, 0, sizeof(struct i596_dma));
1077 lp->dma = dma;
1078
1079 dma->scb.command = 0;
1080 dma->scb.cmd = I596_NULL;
1081 dma->scb.rfd = I596_NULL;
1082 spin_lock_init(&lp->lock);
1083
1084 DMA_WBACK_INV(dev, dma, sizeof(struct i596_dma));
1085
1086 i = register_netdev(dev);
1087 if (i) {
1088 DMA_FREE(dev->dev.parent, sizeof(struct i596_dma),
1089 (void *)dma, lp->dma_addr);
1090 return i;
1091 }
1092
1093 DEB(DEB_PROBE, printk(KERN_INFO "%s: 82596 at %#3lx, %pM IRQ %d.\n",
1094 dev->name, dev->base_addr, dev->dev_addr,
1095 dev->irq));
1096 DEB(DEB_INIT, printk(KERN_INFO
1097 "%s: dma at 0x%p (%d bytes), lp->scb at 0x%p\n",
1098 dev->name, dma, (int)sizeof(struct i596_dma),
1099 &dma->scb));
1100
1101 return 0;
1102}
1103
1104#ifdef CONFIG_NET_POLL_CONTROLLER
1105static void i596_poll_controller(struct net_device *dev)
1106{
1107 disable_irq(dev->irq);
1108 i596_interrupt(dev->irq, dev);
1109 enable_irq(dev->irq);
1110}
1111#endif
1112
1113static irqreturn_t i596_interrupt(int irq, void *dev_id)
1114{
1115 struct net_device *dev = dev_id;
1116 struct i596_private *lp;
1117 struct i596_dma *dma;
1118 unsigned short status, ack_cmd = 0;
1119
1120 lp = netdev_priv(dev);
1121 dma = lp->dma;
1122
1123 spin_lock (&lp->lock);
1124
1125 wait_cmd(dev, dma, 100, "i596 interrupt, timeout");
1126 status = SWAP16(dma->scb.status);
1127
1128 DEB(DEB_INTS, printk(KERN_DEBUG
1129 "%s: i596 interrupt, IRQ %d, status %4.4x.\n",
1130 dev->name, dev->irq, status));
1131
1132 ack_cmd = status & 0xf000;
1133
1134 if (!ack_cmd) {
1135 DEB(DEB_ERRORS, printk(KERN_DEBUG
1136 "%s: interrupt with no events\n",
1137 dev->name));
1138 spin_unlock (&lp->lock);
1139 return IRQ_NONE;
1140 }
1141
1142 if ((status & 0x8000) || (status & 0x2000)) {
1143 struct i596_cmd *ptr;
1144
1145 if ((status & 0x8000))
1146 DEB(DEB_INTS,
1147 printk(KERN_DEBUG
1148 "%s: i596 interrupt completed command.\n",
1149 dev->name));
1150 if ((status & 0x2000))
1151 DEB(DEB_INTS,
1152 printk(KERN_DEBUG
1153 "%s: i596 interrupt command unit inactive %x.\n",
1154 dev->name, status & 0x0700));
1155
1156 while (lp->cmd_head != NULL) {
1157 DMA_INV(dev, lp->cmd_head, sizeof(struct i596_cmd));
1158 if (!(lp->cmd_head->status & SWAP16(STAT_C)))
1159 break;
1160
1161 ptr = lp->cmd_head;
1162
1163 DEB(DEB_STATUS,
1164 printk(KERN_DEBUG
1165 "cmd_head->status = %04x, ->command = %04x\n",
1166 SWAP16(lp->cmd_head->status),
1167 SWAP16(lp->cmd_head->command)));
1168 lp->cmd_head = ptr->v_next;
1169 lp->cmd_backlog--;
1170
1171 switch (SWAP16(ptr->command) & 0x7) {
1172 case CmdTx:
1173 {
1174 struct tx_cmd *tx_cmd = (struct tx_cmd *) ptr;
1175 struct sk_buff *skb = tx_cmd->skb;
1176
1177 if (ptr->status & SWAP16(STAT_OK)) {
1178 DEB(DEB_TXADDR,
1179 print_eth(skb->data, "tx-done"));
1180 } else {
1181 dev->stats.tx_errors++;
1182 if (ptr->status & SWAP16(0x0020))
1183 dev->stats.collisions++;
1184 if (!(ptr->status & SWAP16(0x0040)))
1185 dev->stats.tx_heartbeat_errors++;
1186 if (ptr->status & SWAP16(0x0400))
1187 dev->stats.tx_carrier_errors++;
1188 if (ptr->status & SWAP16(0x0800))
1189 dev->stats.collisions++;
1190 if (ptr->status & SWAP16(0x1000))
1191 dev->stats.tx_aborted_errors++;
1192 }
1193 dma_unmap_single(dev->dev.parent,
1194 tx_cmd->dma_addr,
1195 skb->len, DMA_TO_DEVICE);
1196 dev_kfree_skb_irq(skb);
1197
1198 tx_cmd->cmd.command = 0; /* Mark free */
1199 break;
1200 }
1201 case CmdTDR:
1202 {
1203 unsigned short status = SWAP16(((struct tdr_cmd *)ptr)->status);
1204
1205 if (status & 0x8000) {
1206 DEB(DEB_ANY,
1207 printk(KERN_DEBUG "%s: link ok.\n",
1208 dev->name));
1209 } else {
1210 if (status & 0x4000)
1211 printk(KERN_ERR
1212 "%s: Transceiver problem.\n",
1213 dev->name);
1214 if (status & 0x2000)
1215 printk(KERN_ERR
1216 "%s: Termination problem.\n",
1217 dev->name);
1218 if (status & 0x1000)
1219 printk(KERN_ERR
1220 "%s: Short circuit.\n",
1221 dev->name);
1222
1223 DEB(DEB_TDR,
1224 printk(KERN_DEBUG "%s: Time %d.\n",
1225 dev->name, status & 0x07ff));
1226 }
1227 break;
1228 }
1229 case CmdConfigure:
1230 /*
1231 * Zap command so set_multicast_list() know
1232 * it is free
1233 */
1234 ptr->command = 0;
1235 break;
1236 }
1237 ptr->v_next = NULL;
1238 ptr->b_next = I596_NULL;
1239 DMA_WBACK(dev, ptr, sizeof(struct i596_cmd));
1240 lp->last_cmd = jiffies;
1241 }
1242
1243 /* This mess is arranging that only the last of any outstanding
1244 * commands has the interrupt bit set. Should probably really
1245 * only add to the cmd queue when the CU is stopped.
1246 */
1247 ptr = lp->cmd_head;
1248 while ((ptr != NULL) && (ptr != lp->cmd_tail)) {
1249 struct i596_cmd *prev = ptr;
1250
1251 ptr->command &= SWAP16(0x1fff);
1252 ptr = ptr->v_next;
1253 DMA_WBACK_INV(dev, prev, sizeof(struct i596_cmd));
1254 }
1255
1256 if (lp->cmd_head != NULL)
1257 ack_cmd |= CUC_START;
1258 dma->scb.cmd = SWAP32(virt_to_dma(lp, &lp->cmd_head->status));
1259 DMA_WBACK_INV(dev, &dma->scb, sizeof(struct i596_scb));
1260 }
1261 if ((status & 0x1000) || (status & 0x4000)) {
1262 if ((status & 0x4000))
1263 DEB(DEB_INTS,
1264 printk(KERN_DEBUG
1265 "%s: i596 interrupt received a frame.\n",
1266 dev->name));
1267 i596_rx(dev);
1268 /* Only RX_START if stopped - RGH 07-07-96 */
1269 if (status & 0x1000) {
1270 if (netif_running(dev)) {
1271 DEB(DEB_ERRORS,
1272 printk(KERN_DEBUG
1273 "%s: i596 interrupt receive unit inactive, status 0x%x\n",
1274 dev->name, status));
1275 ack_cmd |= RX_START;
1276 dev->stats.rx_errors++;
1277 dev->stats.rx_fifo_errors++;
1278 rebuild_rx_bufs(dev);
1279 }
1280 }
1281 }
1282 wait_cmd(dev, dma, 100, "i596 interrupt, timeout");
1283 dma->scb.command = SWAP16(ack_cmd);
1284 DMA_WBACK(dev, &dma->scb, sizeof(struct i596_scb));
1285
1286 /* DANGER: I suspect that some kind of interrupt
1287 acknowledgement aside from acking the 82596 might be needed
1288 here... but it's running acceptably without */
1289
1290 ca(dev);
1291
1292 wait_cmd(dev, dma, 100, "i596 interrupt, exit timeout");
1293 DEB(DEB_INTS, printk(KERN_DEBUG "%s: exiting interrupt.\n", dev->name));
1294
1295 spin_unlock (&lp->lock);
1296 return IRQ_HANDLED;
1297}
1298
1299static int i596_close(struct net_device *dev)
1300{
1301 struct i596_private *lp = netdev_priv(dev);
1302 unsigned long flags;
1303
1304 netif_stop_queue(dev);
1305
1306 DEB(DEB_INIT,
1307 printk(KERN_DEBUG
1308 "%s: Shutting down ethercard, status was %4.4x.\n",
1309 dev->name, SWAP16(lp->dma->scb.status)));
1310
1311 spin_lock_irqsave(&lp->lock, flags);
1312
1313 wait_cmd(dev, lp->dma, 100, "close1 timed out");
1314 lp->dma->scb.command = SWAP16(CUC_ABORT | RX_ABORT);
1315 DMA_WBACK(dev, &lp->dma->scb, sizeof(struct i596_scb));
1316
1317 ca(dev);
1318
1319 wait_cmd(dev, lp->dma, 100, "close2 timed out");
1320 spin_unlock_irqrestore(&lp->lock, flags);
1321 DEB(DEB_STRUCT, i596_display_data(dev));
1322 i596_cleanup_cmd(dev, lp);
1323
1324 free_irq(dev->irq, dev);
1325 remove_rx_bufs(dev);
1326
1327 return 0;
1328}
1329
1330/*
1331 * Set or clear the multicast filter for this adaptor.
1332 */
1333
1334static void set_multicast_list(struct net_device *dev)
1335{
1336 struct i596_private *lp = netdev_priv(dev);
1337 struct i596_dma *dma = lp->dma;
1338 int config = 0, cnt;
1339
1340 DEB(DEB_MULTI,
1341 printk(KERN_DEBUG
1342 "%s: set multicast list, %d entries, promisc %s, allmulti %s\n",
1343 dev->name, netdev_mc_count(dev),
1344 dev->flags & IFF_PROMISC ? "ON" : "OFF",
1345 dev->flags & IFF_ALLMULTI ? "ON" : "OFF"));
1346
1347 if ((dev->flags & IFF_PROMISC) &&
1348 !(dma->cf_cmd.i596_config[8] & 0x01)) {
1349 dma->cf_cmd.i596_config[8] |= 0x01;
1350 config = 1;
1351 }
1352 if (!(dev->flags & IFF_PROMISC) &&
1353 (dma->cf_cmd.i596_config[8] & 0x01)) {
1354 dma->cf_cmd.i596_config[8] &= ~0x01;
1355 config = 1;
1356 }
1357 if ((dev->flags & IFF_ALLMULTI) &&
1358 (dma->cf_cmd.i596_config[11] & 0x20)) {
1359 dma->cf_cmd.i596_config[11] &= ~0x20;
1360 config = 1;
1361 }
1362 if (!(dev->flags & IFF_ALLMULTI) &&
1363 !(dma->cf_cmd.i596_config[11] & 0x20)) {
1364 dma->cf_cmd.i596_config[11] |= 0x20;
1365 config = 1;
1366 }
1367 if (config) {
1368 if (dma->cf_cmd.cmd.command)
1369 printk(KERN_INFO
1370 "%s: config change request already queued\n",
1371 dev->name);
1372 else {
1373 dma->cf_cmd.cmd.command = SWAP16(CmdConfigure);
1374 DMA_WBACK_INV(dev, &dma->cf_cmd, sizeof(struct cf_cmd));
1375 i596_add_cmd(dev, &dma->cf_cmd.cmd);
1376 }
1377 }
1378
1379 cnt = netdev_mc_count(dev);
1380 if (cnt > MAX_MC_CNT) {
1381 cnt = MAX_MC_CNT;
1382 printk(KERN_NOTICE "%s: Only %d multicast addresses supported",
1383 dev->name, cnt);
1384 }
1385
1386 if (!netdev_mc_empty(dev)) {
1387 struct netdev_hw_addr *ha;
1388 unsigned char *cp;
1389 struct mc_cmd *cmd;
1390
1391 cmd = &dma->mc_cmd;
1392 cmd->cmd.command = SWAP16(CmdMulticastList);
1393 cmd->mc_cnt = SWAP16(netdev_mc_count(dev) * 6);
1394 cp = cmd->mc_addrs;
1395 netdev_for_each_mc_addr(ha, dev) {
1396 if (!cnt--)
1397 break;
1398 memcpy(cp, ha->addr, ETH_ALEN);
1399 if (i596_debug > 1)
1400 DEB(DEB_MULTI,
1401 printk(KERN_DEBUG
1402 "%s: Adding address %pM\n",
1403 dev->name, cp));
1404 cp += ETH_ALEN;
1405 }
1406 DMA_WBACK_INV(dev, &dma->mc_cmd, sizeof(struct mc_cmd));
1407 i596_add_cmd(dev, &cmd->cmd);
1408 }
1409}
1// SPDX-License-Identifier: GPL-1.0+
2/* lasi_82596.c -- driver for the intel 82596 ethernet controller, as
3 munged into HPPA boxen .
4
5 This driver is based upon 82596.c, original credits are below...
6 but there were too many hoops which HP wants jumped through to
7 keep this code in there in a sane manner.
8
9 3 primary sources of the mess --
10 1) hppa needs *lots* of cacheline flushing to keep this kind of
11 MMIO running.
12
13 2) The 82596 needs to see all of its pointers as their physical
14 address. Thus virt_to_bus/bus_to_virt are *everywhere*.
15
16 3) The implementation HP is using seems to be significantly pickier
17 about when and how the command and RX units are started. some
18 command ordering was changed.
19
20 Examination of the mach driver leads one to believe that there
21 might be a saner way to pull this off... anyone who feels like a
22 full rewrite can be my guest.
23
24 Split 02/13/2000 Sam Creasey (sammy@oh.verio.com)
25
26 02/01/2000 Initial modifications for parisc by Helge Deller (deller@gmx.de)
27 03/02/2000 changes for better/correct(?) cache-flushing (deller)
28*/
29
30/* 82596.c: A generic 82596 ethernet driver for linux. */
31/*
32 Based on Apricot.c
33 Written 1994 by Mark Evans.
34 This driver is for the Apricot 82596 bus-master interface
35
36 Modularised 12/94 Mark Evans
37
38
39 Modified to support the 82596 ethernet chips on 680x0 VME boards.
40 by Richard Hirst <richard@sleepie.demon.co.uk>
41 Renamed to be 82596.c
42
43 980825: Changed to receive directly in to sk_buffs which are
44 allocated at open() time. Eliminates copy on incoming frames
45 (small ones are still copied). Shared data now held in a
46 non-cached page, so we can run on 68060 in copyback mode.
47
48 TBD:
49 * look at deferring rx frames rather than discarding (as per tulip)
50 * handle tx ring full as per tulip
51 * performance test to tune rx_copybreak
52
53 Most of my modifications relate to the braindead big-endian
54 implementation by Intel. When the i596 is operating in
55 'big-endian' mode, it thinks a 32 bit value of 0x12345678
56 should be stored as 0x56781234. This is a real pain, when
57 you have linked lists which are shared by the 680x0 and the
58 i596.
59
60 Driver skeleton
61 Written 1993 by Donald Becker.
62 Copyright 1993 United States Government as represented by the Director,
63 National Security Agency.
64
65 The author may be reached as becker@scyld.com, or C/O
66 Scyld Computing Corporation, 410 Severn Ave., Suite 210, Annapolis MD 21403
67
68 */
69
70#include <linux/module.h>
71#include <linux/kernel.h>
72#include <linux/string.h>
73#include <linux/errno.h>
74#include <linux/ioport.h>
75#include <linux/interrupt.h>
76#include <linux/delay.h>
77#include <linux/netdevice.h>
78#include <linux/etherdevice.h>
79#include <linux/skbuff.h>
80#include <linux/types.h>
81#include <linux/bitops.h>
82#include <linux/dma-mapping.h>
83#include <linux/io.h>
84#include <linux/irq.h>
85#include <linux/gfp.h>
86
87/* DEBUG flags
88 */
89
90#define DEB_INIT 0x0001
91#define DEB_PROBE 0x0002
92#define DEB_SERIOUS 0x0004
93#define DEB_ERRORS 0x0008
94#define DEB_MULTI 0x0010
95#define DEB_TDR 0x0020
96#define DEB_OPEN 0x0040
97#define DEB_RESET 0x0080
98#define DEB_ADDCMD 0x0100
99#define DEB_STATUS 0x0200
100#define DEB_STARTTX 0x0400
101#define DEB_RXADDR 0x0800
102#define DEB_TXADDR 0x1000
103#define DEB_RXFRAME 0x2000
104#define DEB_INTS 0x4000
105#define DEB_STRUCT 0x8000
106#define DEB_ANY 0xffff
107
108
109#define DEB(x, y) if (i596_debug & (x)) { y; }
110
111
112/*
113 * The MPU_PORT command allows direct access to the 82596. With PORT access
114 * the following commands are available (p5-18). The 32-bit port command
115 * must be word-swapped with the most significant word written first.
116 * This only applies to VME boards.
117 */
118#define PORT_RESET 0x00 /* reset 82596 */
119#define PORT_SELFTEST 0x01 /* selftest */
120#define PORT_ALTSCP 0x02 /* alternate SCB address */
121#define PORT_ALTDUMP 0x03 /* Alternate DUMP address */
122
123static int i596_debug = (DEB_SERIOUS|DEB_PROBE);
124
125/* Copy frames shorter than rx_copybreak, otherwise pass on up in
126 * a full sized sk_buff. Value of 100 stolen from tulip.c (!alpha).
127 */
128static int rx_copybreak = 100;
129
130#define PKT_BUF_SZ 1536
131#define MAX_MC_CNT 64
132
133#define ISCP_BUSY 0x0001
134
135#define I596_NULL ((u32)0xffffffff)
136
137#define CMD_EOL 0x8000 /* The last command of the list, stop. */
138#define CMD_SUSP 0x4000 /* Suspend after doing cmd. */
139#define CMD_INTR 0x2000 /* Interrupt after doing cmd. */
140
141#define CMD_FLEX 0x0008 /* Enable flexible memory model */
142
143enum commands {
144 CmdNOp = 0, CmdSASetup = 1, CmdConfigure = 2, CmdMulticastList = 3,
145 CmdTx = 4, CmdTDR = 5, CmdDump = 6, CmdDiagnose = 7
146};
147
148#define STAT_C 0x8000 /* Set to 0 after execution */
149#define STAT_B 0x4000 /* Command being executed */
150#define STAT_OK 0x2000 /* Command executed ok */
151#define STAT_A 0x1000 /* Command aborted */
152
153#define CUC_START 0x0100
154#define CUC_RESUME 0x0200
155#define CUC_SUSPEND 0x0300
156#define CUC_ABORT 0x0400
157#define RX_START 0x0010
158#define RX_RESUME 0x0020
159#define RX_SUSPEND 0x0030
160#define RX_ABORT 0x0040
161
162#define TX_TIMEOUT (HZ/20)
163
164
165struct i596_reg {
166 unsigned short porthi;
167 unsigned short portlo;
168 u32 ca;
169};
170
171#define EOF 0x8000
172#define SIZE_MASK 0x3fff
173
174struct i596_tbd {
175 unsigned short size;
176 unsigned short pad;
177 u32 next;
178 u32 data;
179 u32 cache_pad[5]; /* Total 32 bytes... */
180};
181
182/* The command structure has two 'next' pointers; v_next is the address of
183 * the next command as seen by the CPU, b_next is the address of the next
184 * command as seen by the 82596. The b_next pointer, as used by the 82596
185 * always references the status field of the next command, rather than the
186 * v_next field, because the 82596 is unaware of v_next. It may seem more
187 * logical to put v_next at the end of the structure, but we cannot do that
188 * because the 82596 expects other fields to be there, depending on command
189 * type.
190 */
191
192struct i596_cmd {
193 struct i596_cmd *v_next; /* Address from CPUs viewpoint */
194 unsigned short status;
195 unsigned short command;
196 u32 b_next; /* Address from i596 viewpoint */
197};
198
199struct tx_cmd {
200 struct i596_cmd cmd;
201 u32 tbd;
202 unsigned short size;
203 unsigned short pad;
204 struct sk_buff *skb; /* So we can free it after tx */
205 dma_addr_t dma_addr;
206#ifdef __LP64__
207 u32 cache_pad[6]; /* Total 64 bytes... */
208#else
209 u32 cache_pad[1]; /* Total 32 bytes... */
210#endif
211};
212
213struct tdr_cmd {
214 struct i596_cmd cmd;
215 unsigned short status;
216 unsigned short pad;
217};
218
219struct mc_cmd {
220 struct i596_cmd cmd;
221 short mc_cnt;
222 char mc_addrs[MAX_MC_CNT*6];
223};
224
225struct sa_cmd {
226 struct i596_cmd cmd;
227 char eth_addr[8];
228};
229
230struct cf_cmd {
231 struct i596_cmd cmd;
232 char i596_config[16];
233};
234
235struct i596_rfd {
236 unsigned short stat;
237 unsigned short cmd;
238 u32 b_next; /* Address from i596 viewpoint */
239 u32 rbd;
240 unsigned short count;
241 unsigned short size;
242 struct i596_rfd *v_next; /* Address from CPUs viewpoint */
243 struct i596_rfd *v_prev;
244#ifndef __LP64__
245 u32 cache_pad[2]; /* Total 32 bytes... */
246#endif
247};
248
249struct i596_rbd {
250 /* hardware data */
251 unsigned short count;
252 unsigned short zero1;
253 u32 b_next;
254 u32 b_data; /* Address from i596 viewpoint */
255 unsigned short size;
256 unsigned short zero2;
257 /* driver data */
258 struct sk_buff *skb;
259 struct i596_rbd *v_next;
260 u32 b_addr; /* This rbd addr from i596 view */
261 unsigned char *v_data; /* Address from CPUs viewpoint */
262 /* Total 32 bytes... */
263#ifdef __LP64__
264 u32 cache_pad[4];
265#endif
266};
267
268/* These values as chosen so struct i596_dma fits in one page... */
269
270#define TX_RING_SIZE 32
271#define RX_RING_SIZE 16
272
273struct i596_scb {
274 unsigned short status;
275 unsigned short command;
276 u32 cmd;
277 u32 rfd;
278 u32 crc_err;
279 u32 align_err;
280 u32 resource_err;
281 u32 over_err;
282 u32 rcvdt_err;
283 u32 short_err;
284 unsigned short t_on;
285 unsigned short t_off;
286};
287
288struct i596_iscp {
289 u32 stat;
290 u32 scb;
291};
292
293struct i596_scp {
294 u32 sysbus;
295 u32 pad;
296 u32 iscp;
297};
298
299struct i596_dma {
300 struct i596_scp scp __attribute__((aligned(32)));
301 volatile struct i596_iscp iscp __attribute__((aligned(32)));
302 volatile struct i596_scb scb __attribute__((aligned(32)));
303 struct sa_cmd sa_cmd __attribute__((aligned(32)));
304 struct cf_cmd cf_cmd __attribute__((aligned(32)));
305 struct tdr_cmd tdr_cmd __attribute__((aligned(32)));
306 struct mc_cmd mc_cmd __attribute__((aligned(32)));
307 struct i596_rfd rfds[RX_RING_SIZE] __attribute__((aligned(32)));
308 struct i596_rbd rbds[RX_RING_SIZE] __attribute__((aligned(32)));
309 struct tx_cmd tx_cmds[TX_RING_SIZE] __attribute__((aligned(32)));
310 struct i596_tbd tbds[TX_RING_SIZE] __attribute__((aligned(32)));
311};
312
313struct i596_private {
314 struct i596_dma *dma;
315 u32 stat;
316 int last_restart;
317 struct i596_rfd *rfd_head;
318 struct i596_rbd *rbd_head;
319 struct i596_cmd *cmd_tail;
320 struct i596_cmd *cmd_head;
321 int cmd_backlog;
322 u32 last_cmd;
323 int next_tx_cmd;
324 int options;
325 spinlock_t lock; /* serialize access to chip */
326 dma_addr_t dma_addr;
327 void __iomem *mpu_port;
328 void __iomem *ca;
329};
330
331static const char init_setup[] =
332{
333 0x8E, /* length, prefetch on */
334 0xC8, /* fifo to 8, monitor off */
335 0x80, /* don't save bad frames */
336 0x2E, /* No source address insertion, 8 byte preamble */
337 0x00, /* priority and backoff defaults */
338 0x60, /* interframe spacing */
339 0x00, /* slot time LSB */
340 0xf2, /* slot time and retries */
341 0x00, /* promiscuous mode */
342 0x00, /* collision detect */
343 0x40, /* minimum frame length */
344 0xff,
345 0x00,
346 0x7f /* *multi IA */ };
347
348static int i596_open(struct net_device *dev);
349static netdev_tx_t i596_start_xmit(struct sk_buff *skb, struct net_device *dev);
350static irqreturn_t i596_interrupt(int irq, void *dev_id);
351static int i596_close(struct net_device *dev);
352static void i596_add_cmd(struct net_device *dev, struct i596_cmd *cmd);
353static void i596_tx_timeout (struct net_device *dev, unsigned int txqueue);
354static void print_eth(unsigned char *buf, char *str);
355static void set_multicast_list(struct net_device *dev);
356static inline void ca(struct net_device *dev);
357static void mpu_port(struct net_device *dev, int c, dma_addr_t x);
358
359static int rx_ring_size = RX_RING_SIZE;
360static int ticks_limit = 100;
361static int max_cmd_backlog = TX_RING_SIZE-1;
362
363#ifdef CONFIG_NET_POLL_CONTROLLER
364static void i596_poll_controller(struct net_device *dev);
365#endif
366
367static inline dma_addr_t virt_to_dma(struct i596_private *lp, volatile void *v)
368{
369 return lp->dma_addr + ((unsigned long)v - (unsigned long)lp->dma);
370}
371
372#ifdef NONCOHERENT_DMA
373static inline void dma_sync_dev(struct net_device *ndev, volatile void *addr,
374 size_t len)
375{
376 dma_sync_single_for_device(ndev->dev.parent,
377 virt_to_dma(netdev_priv(ndev), addr), len,
378 DMA_BIDIRECTIONAL);
379}
380
381static inline void dma_sync_cpu(struct net_device *ndev, volatile void *addr,
382 size_t len)
383{
384 dma_sync_single_for_cpu(ndev->dev.parent,
385 virt_to_dma(netdev_priv(ndev), addr), len,
386 DMA_BIDIRECTIONAL);
387}
388#else
389static inline void dma_sync_dev(struct net_device *ndev, volatile void *addr,
390 size_t len)
391{
392}
393static inline void dma_sync_cpu(struct net_device *ndev, volatile void *addr,
394 size_t len)
395{
396}
397#endif /* NONCOHERENT_DMA */
398
399static inline int wait_istat(struct net_device *dev, struct i596_dma *dma, int delcnt, char *str)
400{
401 dma_sync_cpu(dev, &(dma->iscp), sizeof(struct i596_iscp));
402 while (--delcnt && dma->iscp.stat) {
403 udelay(10);
404 dma_sync_cpu(dev, &(dma->iscp), sizeof(struct i596_iscp));
405 }
406 if (!delcnt) {
407 printk(KERN_ERR "%s: %s, iscp.stat %04x, didn't clear\n",
408 dev->name, str, SWAP16(dma->iscp.stat));
409 return -1;
410 } else
411 return 0;
412}
413
414
415static inline int wait_cmd(struct net_device *dev, struct i596_dma *dma, int delcnt, char *str)
416{
417 dma_sync_cpu(dev, &(dma->scb), sizeof(struct i596_scb));
418 while (--delcnt && dma->scb.command) {
419 udelay(10);
420 dma_sync_cpu(dev, &(dma->scb), sizeof(struct i596_scb));
421 }
422 if (!delcnt) {
423 printk(KERN_ERR "%s: %s, status %4.4x, cmd %4.4x.\n",
424 dev->name, str,
425 SWAP16(dma->scb.status),
426 SWAP16(dma->scb.command));
427 return -1;
428 } else
429 return 0;
430}
431
432
433static void i596_display_data(struct net_device *dev)
434{
435 struct i596_private *lp = netdev_priv(dev);
436 struct i596_dma *dma = lp->dma;
437 struct i596_cmd *cmd;
438 struct i596_rfd *rfd;
439 struct i596_rbd *rbd;
440
441 printk(KERN_DEBUG "lp and scp at %p, .sysbus = %08x, .iscp = %08x\n",
442 &dma->scp, dma->scp.sysbus, SWAP32(dma->scp.iscp));
443 printk(KERN_DEBUG "iscp at %p, iscp.stat = %08x, .scb = %08x\n",
444 &dma->iscp, SWAP32(dma->iscp.stat), SWAP32(dma->iscp.scb));
445 printk(KERN_DEBUG "scb at %p, scb.status = %04x, .command = %04x,"
446 " .cmd = %08x, .rfd = %08x\n",
447 &dma->scb, SWAP16(dma->scb.status), SWAP16(dma->scb.command),
448 SWAP16(dma->scb.cmd), SWAP32(dma->scb.rfd));
449 printk(KERN_DEBUG " errors: crc %x, align %x, resource %x,"
450 " over %x, rcvdt %x, short %x\n",
451 SWAP32(dma->scb.crc_err), SWAP32(dma->scb.align_err),
452 SWAP32(dma->scb.resource_err), SWAP32(dma->scb.over_err),
453 SWAP32(dma->scb.rcvdt_err), SWAP32(dma->scb.short_err));
454 cmd = lp->cmd_head;
455 while (cmd != NULL) {
456 printk(KERN_DEBUG
457 "cmd at %p, .status = %04x, .command = %04x,"
458 " .b_next = %08x\n",
459 cmd, SWAP16(cmd->status), SWAP16(cmd->command),
460 SWAP32(cmd->b_next));
461 cmd = cmd->v_next;
462 }
463 rfd = lp->rfd_head;
464 printk(KERN_DEBUG "rfd_head = %p\n", rfd);
465 do {
466 printk(KERN_DEBUG
467 " %p .stat %04x, .cmd %04x, b_next %08x, rbd %08x,"
468 " count %04x\n",
469 rfd, SWAP16(rfd->stat), SWAP16(rfd->cmd),
470 SWAP32(rfd->b_next), SWAP32(rfd->rbd),
471 SWAP16(rfd->count));
472 rfd = rfd->v_next;
473 } while (rfd != lp->rfd_head);
474 rbd = lp->rbd_head;
475 printk(KERN_DEBUG "rbd_head = %p\n", rbd);
476 do {
477 printk(KERN_DEBUG
478 " %p .count %04x, b_next %08x, b_data %08x,"
479 " size %04x\n",
480 rbd, SWAP16(rbd->count), SWAP32(rbd->b_next),
481 SWAP32(rbd->b_data), SWAP16(rbd->size));
482 rbd = rbd->v_next;
483 } while (rbd != lp->rbd_head);
484 dma_sync_cpu(dev, dma, sizeof(struct i596_dma));
485}
486
487static inline int init_rx_bufs(struct net_device *dev)
488{
489 struct i596_private *lp = netdev_priv(dev);
490 struct i596_dma *dma = lp->dma;
491 int i;
492 struct i596_rfd *rfd;
493 struct i596_rbd *rbd;
494
495 /* First build the Receive Buffer Descriptor List */
496
497 for (i = 0, rbd = dma->rbds; i < rx_ring_size; i++, rbd++) {
498 dma_addr_t dma_addr;
499 struct sk_buff *skb;
500
501 skb = netdev_alloc_skb_ip_align(dev, PKT_BUF_SZ);
502 if (skb == NULL)
503 return -1;
504 dma_addr = dma_map_single(dev->dev.parent, skb->data,
505 PKT_BUF_SZ, DMA_FROM_DEVICE);
506 rbd->v_next = rbd+1;
507 rbd->b_next = SWAP32(virt_to_dma(lp, rbd+1));
508 rbd->b_addr = SWAP32(virt_to_dma(lp, rbd));
509 rbd->skb = skb;
510 rbd->v_data = skb->data;
511 rbd->b_data = SWAP32(dma_addr);
512 rbd->size = SWAP16(PKT_BUF_SZ);
513 }
514 lp->rbd_head = dma->rbds;
515 rbd = dma->rbds + rx_ring_size - 1;
516 rbd->v_next = dma->rbds;
517 rbd->b_next = SWAP32(virt_to_dma(lp, dma->rbds));
518
519 /* Now build the Receive Frame Descriptor List */
520
521 for (i = 0, rfd = dma->rfds; i < rx_ring_size; i++, rfd++) {
522 rfd->rbd = I596_NULL;
523 rfd->v_next = rfd+1;
524 rfd->v_prev = rfd-1;
525 rfd->b_next = SWAP32(virt_to_dma(lp, rfd+1));
526 rfd->cmd = SWAP16(CMD_FLEX);
527 }
528 lp->rfd_head = dma->rfds;
529 dma->scb.rfd = SWAP32(virt_to_dma(lp, dma->rfds));
530 rfd = dma->rfds;
531 rfd->rbd = SWAP32(virt_to_dma(lp, lp->rbd_head));
532 rfd->v_prev = dma->rfds + rx_ring_size - 1;
533 rfd = dma->rfds + rx_ring_size - 1;
534 rfd->v_next = dma->rfds;
535 rfd->b_next = SWAP32(virt_to_dma(lp, dma->rfds));
536 rfd->cmd = SWAP16(CMD_EOL|CMD_FLEX);
537
538 dma_sync_dev(dev, dma, sizeof(struct i596_dma));
539 return 0;
540}
541
542static inline void remove_rx_bufs(struct net_device *dev)
543{
544 struct i596_private *lp = netdev_priv(dev);
545 struct i596_rbd *rbd;
546 int i;
547
548 for (i = 0, rbd = lp->dma->rbds; i < rx_ring_size; i++, rbd++) {
549 if (rbd->skb == NULL)
550 break;
551 dma_unmap_single(dev->dev.parent,
552 (dma_addr_t)SWAP32(rbd->b_data),
553 PKT_BUF_SZ, DMA_FROM_DEVICE);
554 dev_kfree_skb(rbd->skb);
555 }
556}
557
558
559static void rebuild_rx_bufs(struct net_device *dev)
560{
561 struct i596_private *lp = netdev_priv(dev);
562 struct i596_dma *dma = lp->dma;
563 int i;
564
565 /* Ensure rx frame/buffer descriptors are tidy */
566
567 for (i = 0; i < rx_ring_size; i++) {
568 dma->rfds[i].rbd = I596_NULL;
569 dma->rfds[i].cmd = SWAP16(CMD_FLEX);
570 }
571 dma->rfds[rx_ring_size-1].cmd = SWAP16(CMD_EOL|CMD_FLEX);
572 lp->rfd_head = dma->rfds;
573 dma->scb.rfd = SWAP32(virt_to_dma(lp, dma->rfds));
574 lp->rbd_head = dma->rbds;
575 dma->rfds[0].rbd = SWAP32(virt_to_dma(lp, dma->rbds));
576
577 dma_sync_dev(dev, dma, sizeof(struct i596_dma));
578}
579
580
581static int init_i596_mem(struct net_device *dev)
582{
583 struct i596_private *lp = netdev_priv(dev);
584 struct i596_dma *dma = lp->dma;
585 unsigned long flags;
586
587 mpu_port(dev, PORT_RESET, 0);
588 udelay(100); /* Wait 100us - seems to help */
589
590 /* change the scp address */
591
592 lp->last_cmd = jiffies;
593
594 dma->scp.sysbus = SYSBUS;
595 dma->scp.iscp = SWAP32(virt_to_dma(lp, &(dma->iscp)));
596 dma->iscp.scb = SWAP32(virt_to_dma(lp, &(dma->scb)));
597 dma->iscp.stat = SWAP32(ISCP_BUSY);
598 lp->cmd_backlog = 0;
599
600 lp->cmd_head = NULL;
601 dma->scb.cmd = I596_NULL;
602
603 DEB(DEB_INIT, printk(KERN_DEBUG "%s: starting i82596.\n", dev->name));
604
605 dma_sync_dev(dev, &(dma->scp), sizeof(struct i596_scp));
606 dma_sync_dev(dev, &(dma->iscp), sizeof(struct i596_iscp));
607 dma_sync_dev(dev, &(dma->scb), sizeof(struct i596_scb));
608
609 mpu_port(dev, PORT_ALTSCP, virt_to_dma(lp, &dma->scp));
610 ca(dev);
611 if (wait_istat(dev, dma, 1000, "initialization timed out"))
612 goto failed;
613 DEB(DEB_INIT, printk(KERN_DEBUG
614 "%s: i82596 initialization successful\n",
615 dev->name));
616
617 if (request_irq(dev->irq, i596_interrupt, 0, "i82596", dev)) {
618 printk(KERN_ERR "%s: IRQ %d not free\n", dev->name, dev->irq);
619 goto failed;
620 }
621
622 /* Ensure rx frame/buffer descriptors are tidy */
623 rebuild_rx_bufs(dev);
624
625 dma->scb.command = 0;
626 dma_sync_dev(dev, &(dma->scb), sizeof(struct i596_scb));
627
628 DEB(DEB_INIT, printk(KERN_DEBUG
629 "%s: queuing CmdConfigure\n", dev->name));
630 memcpy(dma->cf_cmd.i596_config, init_setup, 14);
631 dma->cf_cmd.cmd.command = SWAP16(CmdConfigure);
632 dma_sync_dev(dev, &(dma->cf_cmd), sizeof(struct cf_cmd));
633 i596_add_cmd(dev, &dma->cf_cmd.cmd);
634
635 DEB(DEB_INIT, printk(KERN_DEBUG "%s: queuing CmdSASetup\n", dev->name));
636 memcpy(dma->sa_cmd.eth_addr, dev->dev_addr, ETH_ALEN);
637 dma->sa_cmd.cmd.command = SWAP16(CmdSASetup);
638 dma_sync_dev(dev, &(dma->sa_cmd), sizeof(struct sa_cmd));
639 i596_add_cmd(dev, &dma->sa_cmd.cmd);
640
641 DEB(DEB_INIT, printk(KERN_DEBUG "%s: queuing CmdTDR\n", dev->name));
642 dma->tdr_cmd.cmd.command = SWAP16(CmdTDR);
643 dma_sync_dev(dev, &(dma->tdr_cmd), sizeof(struct tdr_cmd));
644 i596_add_cmd(dev, &dma->tdr_cmd.cmd);
645
646 spin_lock_irqsave (&lp->lock, flags);
647
648 if (wait_cmd(dev, dma, 1000, "timed out waiting to issue RX_START")) {
649 spin_unlock_irqrestore (&lp->lock, flags);
650 goto failed_free_irq;
651 }
652 DEB(DEB_INIT, printk(KERN_DEBUG "%s: Issuing RX_START\n", dev->name));
653 dma->scb.command = SWAP16(RX_START);
654 dma->scb.rfd = SWAP32(virt_to_dma(lp, dma->rfds));
655 dma_sync_dev(dev, &(dma->scb), sizeof(struct i596_scb));
656
657 ca(dev);
658
659 spin_unlock_irqrestore (&lp->lock, flags);
660 if (wait_cmd(dev, dma, 1000, "RX_START not processed"))
661 goto failed_free_irq;
662 DEB(DEB_INIT, printk(KERN_DEBUG
663 "%s: Receive unit started OK\n", dev->name));
664 return 0;
665
666failed_free_irq:
667 free_irq(dev->irq, dev);
668failed:
669 printk(KERN_ERR "%s: Failed to initialise 82596\n", dev->name);
670 mpu_port(dev, PORT_RESET, 0);
671 return -1;
672}
673
674
675static inline int i596_rx(struct net_device *dev)
676{
677 struct i596_private *lp = netdev_priv(dev);
678 struct i596_rfd *rfd;
679 struct i596_rbd *rbd;
680 int frames = 0;
681
682 DEB(DEB_RXFRAME, printk(KERN_DEBUG
683 "i596_rx(), rfd_head %p, rbd_head %p\n",
684 lp->rfd_head, lp->rbd_head));
685
686
687 rfd = lp->rfd_head; /* Ref next frame to check */
688
689 dma_sync_cpu(dev, rfd, sizeof(struct i596_rfd));
690 while (rfd->stat & SWAP16(STAT_C)) { /* Loop while complete frames */
691 if (rfd->rbd == I596_NULL)
692 rbd = NULL;
693 else if (rfd->rbd == lp->rbd_head->b_addr) {
694 rbd = lp->rbd_head;
695 dma_sync_cpu(dev, rbd, sizeof(struct i596_rbd));
696 } else {
697 printk(KERN_ERR "%s: rbd chain broken!\n", dev->name);
698 /* XXX Now what? */
699 rbd = NULL;
700 }
701 DEB(DEB_RXFRAME, printk(KERN_DEBUG
702 " rfd %p, rfd.rbd %08x, rfd.stat %04x\n",
703 rfd, rfd->rbd, rfd->stat));
704
705 if (rbd != NULL && (rfd->stat & SWAP16(STAT_OK))) {
706 /* a good frame */
707 int pkt_len = SWAP16(rbd->count) & 0x3fff;
708 struct sk_buff *skb = rbd->skb;
709 int rx_in_place = 0;
710
711 DEB(DEB_RXADDR, print_eth(rbd->v_data, "received"));
712 frames++;
713
714 /* Check if the packet is long enough to just accept
715 * without copying to a properly sized skbuff.
716 */
717
718 if (pkt_len > rx_copybreak) {
719 struct sk_buff *newskb;
720 dma_addr_t dma_addr;
721
722 dma_unmap_single(dev->dev.parent,
723 (dma_addr_t)SWAP32(rbd->b_data),
724 PKT_BUF_SZ, DMA_FROM_DEVICE);
725 /* Get fresh skbuff to replace filled one. */
726 newskb = netdev_alloc_skb_ip_align(dev,
727 PKT_BUF_SZ);
728 if (newskb == NULL) {
729 skb = NULL; /* drop pkt */
730 goto memory_squeeze;
731 }
732
733 /* Pass up the skb already on the Rx ring. */
734 skb_put(skb, pkt_len);
735 rx_in_place = 1;
736 rbd->skb = newskb;
737 dma_addr = dma_map_single(dev->dev.parent,
738 newskb->data,
739 PKT_BUF_SZ,
740 DMA_FROM_DEVICE);
741 rbd->v_data = newskb->data;
742 rbd->b_data = SWAP32(dma_addr);
743 dma_sync_dev(dev, rbd, sizeof(struct i596_rbd));
744 } else {
745 skb = netdev_alloc_skb_ip_align(dev, pkt_len);
746 }
747memory_squeeze:
748 if (skb == NULL) {
749 /* XXX tulip.c can defer packets here!! */
750 dev->stats.rx_dropped++;
751 } else {
752 if (!rx_in_place) {
753 /* 16 byte align the data fields */
754 dma_sync_single_for_cpu(dev->dev.parent,
755 (dma_addr_t)SWAP32(rbd->b_data),
756 PKT_BUF_SZ, DMA_FROM_DEVICE);
757 skb_put_data(skb, rbd->v_data,
758 pkt_len);
759 dma_sync_single_for_device(dev->dev.parent,
760 (dma_addr_t)SWAP32(rbd->b_data),
761 PKT_BUF_SZ, DMA_FROM_DEVICE);
762 }
763 skb->len = pkt_len;
764 skb->protocol = eth_type_trans(skb, dev);
765 netif_rx(skb);
766 dev->stats.rx_packets++;
767 dev->stats.rx_bytes += pkt_len;
768 }
769 } else {
770 DEB(DEB_ERRORS, printk(KERN_DEBUG
771 "%s: Error, rfd.stat = 0x%04x\n",
772 dev->name, rfd->stat));
773 dev->stats.rx_errors++;
774 if (rfd->stat & SWAP16(0x0100))
775 dev->stats.collisions++;
776 if (rfd->stat & SWAP16(0x8000))
777 dev->stats.rx_length_errors++;
778 if (rfd->stat & SWAP16(0x0001))
779 dev->stats.rx_over_errors++;
780 if (rfd->stat & SWAP16(0x0002))
781 dev->stats.rx_fifo_errors++;
782 if (rfd->stat & SWAP16(0x0004))
783 dev->stats.rx_frame_errors++;
784 if (rfd->stat & SWAP16(0x0008))
785 dev->stats.rx_crc_errors++;
786 if (rfd->stat & SWAP16(0x0010))
787 dev->stats.rx_length_errors++;
788 }
789
790 /* Clear the buffer descriptor count and EOF + F flags */
791
792 if (rbd != NULL && (rbd->count & SWAP16(0x4000))) {
793 rbd->count = 0;
794 lp->rbd_head = rbd->v_next;
795 dma_sync_dev(dev, rbd, sizeof(struct i596_rbd));
796 }
797
798 /* Tidy the frame descriptor, marking it as end of list */
799
800 rfd->rbd = I596_NULL;
801 rfd->stat = 0;
802 rfd->cmd = SWAP16(CMD_EOL|CMD_FLEX);
803 rfd->count = 0;
804
805 /* Update record of next frame descriptor to process */
806
807 lp->dma->scb.rfd = rfd->b_next;
808 lp->rfd_head = rfd->v_next;
809 dma_sync_dev(dev, rfd, sizeof(struct i596_rfd));
810
811 /* Remove end-of-list from old end descriptor */
812
813 rfd->v_prev->cmd = SWAP16(CMD_FLEX);
814 dma_sync_dev(dev, rfd->v_prev, sizeof(struct i596_rfd));
815 rfd = lp->rfd_head;
816 dma_sync_cpu(dev, rfd, sizeof(struct i596_rfd));
817 }
818
819 DEB(DEB_RXFRAME, printk(KERN_DEBUG "frames %d\n", frames));
820
821 return 0;
822}
823
824
825static inline void i596_cleanup_cmd(struct net_device *dev, struct i596_private *lp)
826{
827 struct i596_cmd *ptr;
828
829 while (lp->cmd_head != NULL) {
830 ptr = lp->cmd_head;
831 lp->cmd_head = ptr->v_next;
832 lp->cmd_backlog--;
833
834 switch (SWAP16(ptr->command) & 0x7) {
835 case CmdTx:
836 {
837 struct tx_cmd *tx_cmd = (struct tx_cmd *) ptr;
838 struct sk_buff *skb = tx_cmd->skb;
839 dma_unmap_single(dev->dev.parent,
840 tx_cmd->dma_addr,
841 skb->len, DMA_TO_DEVICE);
842
843 dev_kfree_skb(skb);
844
845 dev->stats.tx_errors++;
846 dev->stats.tx_aborted_errors++;
847
848 ptr->v_next = NULL;
849 ptr->b_next = I596_NULL;
850 tx_cmd->cmd.command = 0; /* Mark as free */
851 break;
852 }
853 default:
854 ptr->v_next = NULL;
855 ptr->b_next = I596_NULL;
856 }
857 dma_sync_dev(dev, ptr, sizeof(struct i596_cmd));
858 }
859
860 wait_cmd(dev, lp->dma, 100, "i596_cleanup_cmd timed out");
861 lp->dma->scb.cmd = I596_NULL;
862 dma_sync_dev(dev, &(lp->dma->scb), sizeof(struct i596_scb));
863}
864
865
866static inline void i596_reset(struct net_device *dev, struct i596_private *lp)
867{
868 unsigned long flags;
869
870 DEB(DEB_RESET, printk(KERN_DEBUG "i596_reset\n"));
871
872 spin_lock_irqsave (&lp->lock, flags);
873
874 wait_cmd(dev, lp->dma, 100, "i596_reset timed out");
875
876 netif_stop_queue(dev);
877
878 /* FIXME: this command might cause an lpmc */
879 lp->dma->scb.command = SWAP16(CUC_ABORT | RX_ABORT);
880 dma_sync_dev(dev, &(lp->dma->scb), sizeof(struct i596_scb));
881 ca(dev);
882
883 /* wait for shutdown */
884 wait_cmd(dev, lp->dma, 1000, "i596_reset 2 timed out");
885 spin_unlock_irqrestore (&lp->lock, flags);
886
887 i596_cleanup_cmd(dev, lp);
888 i596_rx(dev);
889
890 netif_start_queue(dev);
891 init_i596_mem(dev);
892}
893
894
895static void i596_add_cmd(struct net_device *dev, struct i596_cmd *cmd)
896{
897 struct i596_private *lp = netdev_priv(dev);
898 struct i596_dma *dma = lp->dma;
899 unsigned long flags;
900
901 DEB(DEB_ADDCMD, printk(KERN_DEBUG "i596_add_cmd cmd_head %p\n",
902 lp->cmd_head));
903
904 cmd->status = 0;
905 cmd->command |= SWAP16(CMD_EOL | CMD_INTR);
906 cmd->v_next = NULL;
907 cmd->b_next = I596_NULL;
908 dma_sync_dev(dev, cmd, sizeof(struct i596_cmd));
909
910 spin_lock_irqsave (&lp->lock, flags);
911
912 if (lp->cmd_head != NULL) {
913 lp->cmd_tail->v_next = cmd;
914 lp->cmd_tail->b_next = SWAP32(virt_to_dma(lp, &cmd->status));
915 dma_sync_dev(dev, lp->cmd_tail, sizeof(struct i596_cmd));
916 } else {
917 lp->cmd_head = cmd;
918 wait_cmd(dev, dma, 100, "i596_add_cmd timed out");
919 dma->scb.cmd = SWAP32(virt_to_dma(lp, &cmd->status));
920 dma->scb.command = SWAP16(CUC_START);
921 dma_sync_dev(dev, &(dma->scb), sizeof(struct i596_scb));
922 ca(dev);
923 }
924 lp->cmd_tail = cmd;
925 lp->cmd_backlog++;
926
927 spin_unlock_irqrestore (&lp->lock, flags);
928
929 if (lp->cmd_backlog > max_cmd_backlog) {
930 unsigned long tickssofar = jiffies - lp->last_cmd;
931
932 if (tickssofar < ticks_limit)
933 return;
934
935 printk(KERN_ERR
936 "%s: command unit timed out, status resetting.\n",
937 dev->name);
938#if 1
939 i596_reset(dev, lp);
940#endif
941 }
942}
943
944static int i596_open(struct net_device *dev)
945{
946 DEB(DEB_OPEN, printk(KERN_DEBUG
947 "%s: i596_open() irq %d.\n", dev->name, dev->irq));
948
949 if (init_rx_bufs(dev)) {
950 printk(KERN_ERR "%s: Failed to init rx bufs\n", dev->name);
951 return -EAGAIN;
952 }
953 if (init_i596_mem(dev)) {
954 printk(KERN_ERR "%s: Failed to init memory\n", dev->name);
955 goto out_remove_rx_bufs;
956 }
957 netif_start_queue(dev);
958
959 return 0;
960
961out_remove_rx_bufs:
962 remove_rx_bufs(dev);
963 return -EAGAIN;
964}
965
966static void i596_tx_timeout (struct net_device *dev, unsigned int txqueue)
967{
968 struct i596_private *lp = netdev_priv(dev);
969
970 /* Transmitter timeout, serious problems. */
971 DEB(DEB_ERRORS, printk(KERN_DEBUG
972 "%s: transmit timed out, status resetting.\n",
973 dev->name));
974
975 dev->stats.tx_errors++;
976
977 /* Try to restart the adaptor */
978 if (lp->last_restart == dev->stats.tx_packets) {
979 DEB(DEB_ERRORS, printk(KERN_DEBUG "Resetting board.\n"));
980 /* Shutdown and restart */
981 i596_reset (dev, lp);
982 } else {
983 /* Issue a channel attention signal */
984 DEB(DEB_ERRORS, printk(KERN_DEBUG "Kicking board.\n"));
985 lp->dma->scb.command = SWAP16(CUC_START | RX_START);
986 dma_sync_dev(dev, &(lp->dma->scb), sizeof(struct i596_scb));
987 ca (dev);
988 lp->last_restart = dev->stats.tx_packets;
989 }
990
991 netif_trans_update(dev); /* prevent tx timeout */
992 netif_wake_queue (dev);
993}
994
995
996static netdev_tx_t i596_start_xmit(struct sk_buff *skb, struct net_device *dev)
997{
998 struct i596_private *lp = netdev_priv(dev);
999 struct tx_cmd *tx_cmd;
1000 struct i596_tbd *tbd;
1001 short length = skb->len;
1002
1003 DEB(DEB_STARTTX, printk(KERN_DEBUG
1004 "%s: i596_start_xmit(%x,%p) called\n",
1005 dev->name, skb->len, skb->data));
1006
1007 if (length < ETH_ZLEN) {
1008 if (skb_padto(skb, ETH_ZLEN))
1009 return NETDEV_TX_OK;
1010 length = ETH_ZLEN;
1011 }
1012
1013 netif_stop_queue(dev);
1014
1015 tx_cmd = lp->dma->tx_cmds + lp->next_tx_cmd;
1016 tbd = lp->dma->tbds + lp->next_tx_cmd;
1017
1018 if (tx_cmd->cmd.command) {
1019 DEB(DEB_ERRORS, printk(KERN_DEBUG
1020 "%s: xmit ring full, dropping packet.\n",
1021 dev->name));
1022 dev->stats.tx_dropped++;
1023
1024 dev_kfree_skb_any(skb);
1025 } else {
1026 if (++lp->next_tx_cmd == TX_RING_SIZE)
1027 lp->next_tx_cmd = 0;
1028 tx_cmd->tbd = SWAP32(virt_to_dma(lp, tbd));
1029 tbd->next = I596_NULL;
1030
1031 tx_cmd->cmd.command = SWAP16(CMD_FLEX | CmdTx);
1032 tx_cmd->skb = skb;
1033
1034 tx_cmd->pad = 0;
1035 tx_cmd->size = 0;
1036 tbd->pad = 0;
1037 tbd->size = SWAP16(EOF | length);
1038
1039 tx_cmd->dma_addr = dma_map_single(dev->dev.parent, skb->data,
1040 skb->len, DMA_TO_DEVICE);
1041 tbd->data = SWAP32(tx_cmd->dma_addr);
1042
1043 DEB(DEB_TXADDR, print_eth(skb->data, "tx-queued"));
1044 dma_sync_dev(dev, tx_cmd, sizeof(struct tx_cmd));
1045 dma_sync_dev(dev, tbd, sizeof(struct i596_tbd));
1046 i596_add_cmd(dev, &tx_cmd->cmd);
1047
1048 dev->stats.tx_packets++;
1049 dev->stats.tx_bytes += length;
1050 }
1051
1052 netif_start_queue(dev);
1053
1054 return NETDEV_TX_OK;
1055}
1056
1057static void print_eth(unsigned char *add, char *str)
1058{
1059 printk(KERN_DEBUG "i596 0x%p, %pM --> %pM %02X%02X, %s\n",
1060 add, add + 6, add, add[12], add[13], str);
1061}
1062static const struct net_device_ops i596_netdev_ops = {
1063 .ndo_open = i596_open,
1064 .ndo_stop = i596_close,
1065 .ndo_start_xmit = i596_start_xmit,
1066 .ndo_set_rx_mode = set_multicast_list,
1067 .ndo_tx_timeout = i596_tx_timeout,
1068 .ndo_validate_addr = eth_validate_addr,
1069 .ndo_set_mac_address = eth_mac_addr,
1070#ifdef CONFIG_NET_POLL_CONTROLLER
1071 .ndo_poll_controller = i596_poll_controller,
1072#endif
1073};
1074
1075static int i82596_probe(struct net_device *dev)
1076{
1077 struct i596_private *lp = netdev_priv(dev);
1078 int ret;
1079
1080 /* This lot is ensure things have been cache line aligned. */
1081 BUILD_BUG_ON(sizeof(struct i596_rfd) != 32);
1082 BUILD_BUG_ON(sizeof(struct i596_rbd) & 31);
1083 BUILD_BUG_ON(sizeof(struct tx_cmd) & 31);
1084 BUILD_BUG_ON(sizeof(struct i596_tbd) != 32);
1085#ifndef __LP64__
1086 BUILD_BUG_ON(sizeof(struct i596_dma) > 4096);
1087#endif
1088
1089 if (!dev->base_addr || !dev->irq)
1090 return -ENODEV;
1091
1092 dev->netdev_ops = &i596_netdev_ops;
1093 dev->watchdog_timeo = TX_TIMEOUT;
1094
1095 memset(lp->dma, 0, sizeof(struct i596_dma));
1096 lp->dma->scb.command = 0;
1097 lp->dma->scb.cmd = I596_NULL;
1098 lp->dma->scb.rfd = I596_NULL;
1099 spin_lock_init(&lp->lock);
1100
1101 dma_sync_dev(dev, lp->dma, sizeof(struct i596_dma));
1102
1103 ret = register_netdev(dev);
1104 if (ret)
1105 return ret;
1106
1107 DEB(DEB_PROBE, printk(KERN_INFO "%s: 82596 at %#3lx, %pM IRQ %d.\n",
1108 dev->name, dev->base_addr, dev->dev_addr,
1109 dev->irq));
1110 DEB(DEB_INIT, printk(KERN_INFO
1111 "%s: dma at 0x%p (%d bytes), lp->scb at 0x%p\n",
1112 dev->name, lp->dma, (int)sizeof(struct i596_dma),
1113 &lp->dma->scb));
1114
1115 return 0;
1116}
1117
1118#ifdef CONFIG_NET_POLL_CONTROLLER
1119static void i596_poll_controller(struct net_device *dev)
1120{
1121 disable_irq(dev->irq);
1122 i596_interrupt(dev->irq, dev);
1123 enable_irq(dev->irq);
1124}
1125#endif
1126
1127static irqreturn_t i596_interrupt(int irq, void *dev_id)
1128{
1129 struct net_device *dev = dev_id;
1130 struct i596_private *lp;
1131 struct i596_dma *dma;
1132 unsigned short status, ack_cmd = 0;
1133
1134 lp = netdev_priv(dev);
1135 dma = lp->dma;
1136
1137 spin_lock (&lp->lock);
1138
1139 wait_cmd(dev, dma, 100, "i596 interrupt, timeout");
1140 status = SWAP16(dma->scb.status);
1141
1142 DEB(DEB_INTS, printk(KERN_DEBUG
1143 "%s: i596 interrupt, IRQ %d, status %4.4x.\n",
1144 dev->name, dev->irq, status));
1145
1146 ack_cmd = status & 0xf000;
1147
1148 if (!ack_cmd) {
1149 DEB(DEB_ERRORS, printk(KERN_DEBUG
1150 "%s: interrupt with no events\n",
1151 dev->name));
1152 spin_unlock (&lp->lock);
1153 return IRQ_NONE;
1154 }
1155
1156 if ((status & 0x8000) || (status & 0x2000)) {
1157 struct i596_cmd *ptr;
1158
1159 if ((status & 0x8000))
1160 DEB(DEB_INTS,
1161 printk(KERN_DEBUG
1162 "%s: i596 interrupt completed command.\n",
1163 dev->name));
1164 if ((status & 0x2000))
1165 DEB(DEB_INTS,
1166 printk(KERN_DEBUG
1167 "%s: i596 interrupt command unit inactive %x.\n",
1168 dev->name, status & 0x0700));
1169
1170 while (lp->cmd_head != NULL) {
1171 dma_sync_cpu(dev, lp->cmd_head, sizeof(struct i596_cmd));
1172 if (!(lp->cmd_head->status & SWAP16(STAT_C)))
1173 break;
1174
1175 ptr = lp->cmd_head;
1176
1177 DEB(DEB_STATUS,
1178 printk(KERN_DEBUG
1179 "cmd_head->status = %04x, ->command = %04x\n",
1180 SWAP16(lp->cmd_head->status),
1181 SWAP16(lp->cmd_head->command)));
1182 lp->cmd_head = ptr->v_next;
1183 lp->cmd_backlog--;
1184
1185 switch (SWAP16(ptr->command) & 0x7) {
1186 case CmdTx:
1187 {
1188 struct tx_cmd *tx_cmd = (struct tx_cmd *) ptr;
1189 struct sk_buff *skb = tx_cmd->skb;
1190
1191 if (ptr->status & SWAP16(STAT_OK)) {
1192 DEB(DEB_TXADDR,
1193 print_eth(skb->data, "tx-done"));
1194 } else {
1195 dev->stats.tx_errors++;
1196 if (ptr->status & SWAP16(0x0020))
1197 dev->stats.collisions++;
1198 if (!(ptr->status & SWAP16(0x0040)))
1199 dev->stats.tx_heartbeat_errors++;
1200 if (ptr->status & SWAP16(0x0400))
1201 dev->stats.tx_carrier_errors++;
1202 if (ptr->status & SWAP16(0x0800))
1203 dev->stats.collisions++;
1204 if (ptr->status & SWAP16(0x1000))
1205 dev->stats.tx_aborted_errors++;
1206 }
1207 dma_unmap_single(dev->dev.parent,
1208 tx_cmd->dma_addr,
1209 skb->len, DMA_TO_DEVICE);
1210 dev_consume_skb_irq(skb);
1211
1212 tx_cmd->cmd.command = 0; /* Mark free */
1213 break;
1214 }
1215 case CmdTDR:
1216 {
1217 unsigned short status = SWAP16(((struct tdr_cmd *)ptr)->status);
1218
1219 if (status & 0x8000) {
1220 DEB(DEB_ANY,
1221 printk(KERN_DEBUG "%s: link ok.\n",
1222 dev->name));
1223 } else {
1224 if (status & 0x4000)
1225 printk(KERN_ERR
1226 "%s: Transceiver problem.\n",
1227 dev->name);
1228 if (status & 0x2000)
1229 printk(KERN_ERR
1230 "%s: Termination problem.\n",
1231 dev->name);
1232 if (status & 0x1000)
1233 printk(KERN_ERR
1234 "%s: Short circuit.\n",
1235 dev->name);
1236
1237 DEB(DEB_TDR,
1238 printk(KERN_DEBUG "%s: Time %d.\n",
1239 dev->name, status & 0x07ff));
1240 }
1241 break;
1242 }
1243 case CmdConfigure:
1244 /*
1245 * Zap command so set_multicast_list() know
1246 * it is free
1247 */
1248 ptr->command = 0;
1249 break;
1250 }
1251 ptr->v_next = NULL;
1252 ptr->b_next = I596_NULL;
1253 dma_sync_dev(dev, ptr, sizeof(struct i596_cmd));
1254 lp->last_cmd = jiffies;
1255 }
1256
1257 /* This mess is arranging that only the last of any outstanding
1258 * commands has the interrupt bit set. Should probably really
1259 * only add to the cmd queue when the CU is stopped.
1260 */
1261 ptr = lp->cmd_head;
1262 while ((ptr != NULL) && (ptr != lp->cmd_tail)) {
1263 struct i596_cmd *prev = ptr;
1264
1265 ptr->command &= SWAP16(0x1fff);
1266 ptr = ptr->v_next;
1267 dma_sync_dev(dev, prev, sizeof(struct i596_cmd));
1268 }
1269
1270 if (lp->cmd_head != NULL)
1271 ack_cmd |= CUC_START;
1272 dma->scb.cmd = SWAP32(virt_to_dma(lp, &lp->cmd_head->status));
1273 dma_sync_dev(dev, &dma->scb, sizeof(struct i596_scb));
1274 }
1275 if ((status & 0x1000) || (status & 0x4000)) {
1276 if ((status & 0x4000))
1277 DEB(DEB_INTS,
1278 printk(KERN_DEBUG
1279 "%s: i596 interrupt received a frame.\n",
1280 dev->name));
1281 i596_rx(dev);
1282 /* Only RX_START if stopped - RGH 07-07-96 */
1283 if (status & 0x1000) {
1284 if (netif_running(dev)) {
1285 DEB(DEB_ERRORS,
1286 printk(KERN_DEBUG
1287 "%s: i596 interrupt receive unit inactive, status 0x%x\n",
1288 dev->name, status));
1289 ack_cmd |= RX_START;
1290 dev->stats.rx_errors++;
1291 dev->stats.rx_fifo_errors++;
1292 rebuild_rx_bufs(dev);
1293 }
1294 }
1295 }
1296 wait_cmd(dev, dma, 100, "i596 interrupt, timeout");
1297 dma->scb.command = SWAP16(ack_cmd);
1298 dma_sync_dev(dev, &dma->scb, sizeof(struct i596_scb));
1299
1300 /* DANGER: I suspect that some kind of interrupt
1301 acknowledgement aside from acking the 82596 might be needed
1302 here... but it's running acceptably without */
1303
1304 ca(dev);
1305
1306 wait_cmd(dev, dma, 100, "i596 interrupt, exit timeout");
1307 DEB(DEB_INTS, printk(KERN_DEBUG "%s: exiting interrupt.\n", dev->name));
1308
1309 spin_unlock (&lp->lock);
1310 return IRQ_HANDLED;
1311}
1312
1313static int i596_close(struct net_device *dev)
1314{
1315 struct i596_private *lp = netdev_priv(dev);
1316 unsigned long flags;
1317
1318 netif_stop_queue(dev);
1319
1320 DEB(DEB_INIT,
1321 printk(KERN_DEBUG
1322 "%s: Shutting down ethercard, status was %4.4x.\n",
1323 dev->name, SWAP16(lp->dma->scb.status)));
1324
1325 spin_lock_irqsave(&lp->lock, flags);
1326
1327 wait_cmd(dev, lp->dma, 100, "close1 timed out");
1328 lp->dma->scb.command = SWAP16(CUC_ABORT | RX_ABORT);
1329 dma_sync_dev(dev, &lp->dma->scb, sizeof(struct i596_scb));
1330
1331 ca(dev);
1332
1333 wait_cmd(dev, lp->dma, 100, "close2 timed out");
1334 spin_unlock_irqrestore(&lp->lock, flags);
1335 DEB(DEB_STRUCT, i596_display_data(dev));
1336 i596_cleanup_cmd(dev, lp);
1337
1338 free_irq(dev->irq, dev);
1339 remove_rx_bufs(dev);
1340
1341 return 0;
1342}
1343
1344/*
1345 * Set or clear the multicast filter for this adaptor.
1346 */
1347
1348static void set_multicast_list(struct net_device *dev)
1349{
1350 struct i596_private *lp = netdev_priv(dev);
1351 struct i596_dma *dma = lp->dma;
1352 int config = 0, cnt;
1353
1354 DEB(DEB_MULTI,
1355 printk(KERN_DEBUG
1356 "%s: set multicast list, %d entries, promisc %s, allmulti %s\n",
1357 dev->name, netdev_mc_count(dev),
1358 dev->flags & IFF_PROMISC ? "ON" : "OFF",
1359 dev->flags & IFF_ALLMULTI ? "ON" : "OFF"));
1360
1361 if ((dev->flags & IFF_PROMISC) &&
1362 !(dma->cf_cmd.i596_config[8] & 0x01)) {
1363 dma->cf_cmd.i596_config[8] |= 0x01;
1364 config = 1;
1365 }
1366 if (!(dev->flags & IFF_PROMISC) &&
1367 (dma->cf_cmd.i596_config[8] & 0x01)) {
1368 dma->cf_cmd.i596_config[8] &= ~0x01;
1369 config = 1;
1370 }
1371 if ((dev->flags & IFF_ALLMULTI) &&
1372 (dma->cf_cmd.i596_config[11] & 0x20)) {
1373 dma->cf_cmd.i596_config[11] &= ~0x20;
1374 config = 1;
1375 }
1376 if (!(dev->flags & IFF_ALLMULTI) &&
1377 !(dma->cf_cmd.i596_config[11] & 0x20)) {
1378 dma->cf_cmd.i596_config[11] |= 0x20;
1379 config = 1;
1380 }
1381 if (config) {
1382 if (dma->cf_cmd.cmd.command)
1383 printk(KERN_INFO
1384 "%s: config change request already queued\n",
1385 dev->name);
1386 else {
1387 dma->cf_cmd.cmd.command = SWAP16(CmdConfigure);
1388 dma_sync_dev(dev, &dma->cf_cmd, sizeof(struct cf_cmd));
1389 i596_add_cmd(dev, &dma->cf_cmd.cmd);
1390 }
1391 }
1392
1393 cnt = netdev_mc_count(dev);
1394 if (cnt > MAX_MC_CNT) {
1395 cnt = MAX_MC_CNT;
1396 printk(KERN_NOTICE "%s: Only %d multicast addresses supported",
1397 dev->name, cnt);
1398 }
1399
1400 if (!netdev_mc_empty(dev)) {
1401 struct netdev_hw_addr *ha;
1402 unsigned char *cp;
1403 struct mc_cmd *cmd;
1404
1405 cmd = &dma->mc_cmd;
1406 cmd->cmd.command = SWAP16(CmdMulticastList);
1407 cmd->mc_cnt = SWAP16(netdev_mc_count(dev) * 6);
1408 cp = cmd->mc_addrs;
1409 netdev_for_each_mc_addr(ha, dev) {
1410 if (!cnt--)
1411 break;
1412 memcpy(cp, ha->addr, ETH_ALEN);
1413 if (i596_debug > 1)
1414 DEB(DEB_MULTI,
1415 printk(KERN_DEBUG
1416 "%s: Adding address %pM\n",
1417 dev->name, cp));
1418 cp += ETH_ALEN;
1419 }
1420 dma_sync_dev(dev, &dma->mc_cmd, sizeof(struct mc_cmd));
1421 i596_add_cmd(dev, &cmd->cmd);
1422 }
1423}