Linux Audio

Check our new training course

Yocto distribution development and maintenance

Need a Yocto distribution for your embedded project?
Loading...
v3.15
   1/* lasi_82596.c -- driver for the intel 82596 ethernet controller, as
   2   munged into HPPA boxen .
   3
   4   This driver is based upon 82596.c, original credits are below...
   5   but there were too many hoops which HP wants jumped through to
   6   keep this code in there in a sane manner.
   7
   8   3 primary sources of the mess --
   9   1) hppa needs *lots* of cacheline flushing to keep this kind of
  10   MMIO running.
  11
  12   2) The 82596 needs to see all of its pointers as their physical
  13   address.  Thus virt_to_bus/bus_to_virt are *everywhere*.
  14
  15   3) The implementation HP is using seems to be significantly pickier
  16   about when and how the command and RX units are started.  some
  17   command ordering was changed.
  18
  19   Examination of the mach driver leads one to believe that there
  20   might be a saner way to pull this off...  anyone who feels like a
  21   full rewrite can be my guest.
  22
  23   Split 02/13/2000 Sam Creasey (sammy@oh.verio.com)
  24
  25   02/01/2000  Initial modifications for parisc by Helge Deller (deller@gmx.de)
  26   03/02/2000  changes for better/correct(?) cache-flushing (deller)
  27*/
  28
  29/* 82596.c: A generic 82596 ethernet driver for linux. */
  30/*
  31   Based on Apricot.c
  32   Written 1994 by Mark Evans.
  33   This driver is for the Apricot 82596 bus-master interface
  34
  35   Modularised 12/94 Mark Evans
  36
  37
  38   Modified to support the 82596 ethernet chips on 680x0 VME boards.
  39   by Richard Hirst <richard@sleepie.demon.co.uk>
  40   Renamed to be 82596.c
  41
  42   980825:  Changed to receive directly in to sk_buffs which are
  43   allocated at open() time.  Eliminates copy on incoming frames
  44   (small ones are still copied).  Shared data now held in a
  45   non-cached page, so we can run on 68060 in copyback mode.
  46
  47   TBD:
  48   * look at deferring rx frames rather than discarding (as per tulip)
  49   * handle tx ring full as per tulip
  50   * performance test to tune rx_copybreak
  51
  52   Most of my modifications relate to the braindead big-endian
  53   implementation by Intel.  When the i596 is operating in
  54   'big-endian' mode, it thinks a 32 bit value of 0x12345678
  55   should be stored as 0x56781234.  This is a real pain, when
  56   you have linked lists which are shared by the 680x0 and the
  57   i596.
  58
  59   Driver skeleton
  60   Written 1993 by Donald Becker.
  61   Copyright 1993 United States Government as represented by the Director,
  62   National Security Agency. This software may only be used and distributed
  63   according to the terms of the GNU General Public License as modified by SRC,
  64   incorporated herein by reference.
  65
  66   The author may be reached as becker@scyld.com, or C/O
  67   Scyld Computing Corporation, 410 Severn Ave., Suite 210, Annapolis MD 21403
  68
  69 */
  70
  71#include <linux/module.h>
  72#include <linux/kernel.h>
  73#include <linux/string.h>
  74#include <linux/errno.h>
  75#include <linux/ioport.h>
  76#include <linux/interrupt.h>
  77#include <linux/delay.h>
  78#include <linux/netdevice.h>
  79#include <linux/etherdevice.h>
  80#include <linux/skbuff.h>
  81#include <linux/types.h>
  82#include <linux/bitops.h>
  83#include <linux/dma-mapping.h>
  84#include <linux/io.h>
  85#include <linux/irq.h>
  86#include <linux/gfp.h>
  87
  88/* DEBUG flags
  89 */
  90
  91#define DEB_INIT	0x0001
  92#define DEB_PROBE	0x0002
  93#define DEB_SERIOUS	0x0004
  94#define DEB_ERRORS	0x0008
  95#define DEB_MULTI	0x0010
  96#define DEB_TDR		0x0020
  97#define DEB_OPEN	0x0040
  98#define DEB_RESET	0x0080
  99#define DEB_ADDCMD	0x0100
 100#define DEB_STATUS	0x0200
 101#define DEB_STARTTX	0x0400
 102#define DEB_RXADDR	0x0800
 103#define DEB_TXADDR	0x1000
 104#define DEB_RXFRAME	0x2000
 105#define DEB_INTS	0x4000
 106#define DEB_STRUCT	0x8000
 107#define DEB_ANY		0xffff
 108
 109
 110#define DEB(x, y)	if (i596_debug & (x)) { y; }
 111
 112
 113/*
 114 * The MPU_PORT command allows direct access to the 82596. With PORT access
 115 * the following commands are available (p5-18). The 32-bit port command
 116 * must be word-swapped with the most significant word written first.
 117 * This only applies to VME boards.
 118 */
 119#define PORT_RESET		0x00	/* reset 82596 */
 120#define PORT_SELFTEST		0x01	/* selftest */
 121#define PORT_ALTSCP		0x02	/* alternate SCB address */
 122#define PORT_ALTDUMP		0x03	/* Alternate DUMP address */
 123
 124static int i596_debug = (DEB_SERIOUS|DEB_PROBE);
 125
 126/* Copy frames shorter than rx_copybreak, otherwise pass on up in
 127 * a full sized sk_buff.  Value of 100 stolen from tulip.c (!alpha).
 128 */
 129static int rx_copybreak = 100;
 130
 131#define PKT_BUF_SZ	1536
 132#define MAX_MC_CNT	64
 133
 134#define ISCP_BUSY	0x0001
 135
 136#define I596_NULL ((u32)0xffffffff)
 137
 138#define CMD_EOL		0x8000	/* The last command of the list, stop. */
 139#define CMD_SUSP	0x4000	/* Suspend after doing cmd. */
 140#define CMD_INTR	0x2000	/* Interrupt after doing cmd. */
 141
 142#define CMD_FLEX	0x0008	/* Enable flexible memory model */
 143
 144enum commands {
 145	CmdNOp = 0, CmdSASetup = 1, CmdConfigure = 2, CmdMulticastList = 3,
 146	CmdTx = 4, CmdTDR = 5, CmdDump = 6, CmdDiagnose = 7
 147};
 148
 149#define STAT_C		0x8000	/* Set to 0 after execution */
 150#define STAT_B		0x4000	/* Command being executed */
 151#define STAT_OK		0x2000	/* Command executed ok */
 152#define STAT_A		0x1000	/* Command aborted */
 153
 154#define	 CUC_START	0x0100
 155#define	 CUC_RESUME	0x0200
 156#define	 CUC_SUSPEND    0x0300
 157#define	 CUC_ABORT	0x0400
 158#define	 RX_START	0x0010
 159#define	 RX_RESUME	0x0020
 160#define	 RX_SUSPEND	0x0030
 161#define	 RX_ABORT	0x0040
 162
 163#define TX_TIMEOUT	(HZ/20)
 164
 165
 166struct i596_reg {
 167	unsigned short porthi;
 168	unsigned short portlo;
 169	u32            ca;
 170};
 171
 172#define EOF		0x8000
 173#define SIZE_MASK	0x3fff
 174
 175struct i596_tbd {
 176	unsigned short size;
 177	unsigned short pad;
 178	u32            next;
 179	u32            data;
 180	u32 cache_pad[5];		/* Total 32 bytes... */
 181};
 182
 183/* The command structure has two 'next' pointers; v_next is the address of
 184 * the next command as seen by the CPU, b_next is the address of the next
 185 * command as seen by the 82596.  The b_next pointer, as used by the 82596
 186 * always references the status field of the next command, rather than the
 187 * v_next field, because the 82596 is unaware of v_next.  It may seem more
 188 * logical to put v_next at the end of the structure, but we cannot do that
 189 * because the 82596 expects other fields to be there, depending on command
 190 * type.
 191 */
 192
 193struct i596_cmd {
 194	struct i596_cmd *v_next;	/* Address from CPUs viewpoint */
 195	unsigned short status;
 196	unsigned short command;
 197	u32            b_next;	/* Address from i596 viewpoint */
 198};
 199
 200struct tx_cmd {
 201	struct i596_cmd cmd;
 202	u32            tbd;
 203	unsigned short size;
 204	unsigned short pad;
 205	struct sk_buff *skb;		/* So we can free it after tx */
 206	dma_addr_t dma_addr;
 207#ifdef __LP64__
 208	u32 cache_pad[6];		/* Total 64 bytes... */
 209#else
 210	u32 cache_pad[1];		/* Total 32 bytes... */
 211#endif
 212};
 213
 214struct tdr_cmd {
 215	struct i596_cmd cmd;
 216	unsigned short status;
 217	unsigned short pad;
 218};
 219
 220struct mc_cmd {
 221	struct i596_cmd cmd;
 222	short mc_cnt;
 223	char mc_addrs[MAX_MC_CNT*6];
 224};
 225
 226struct sa_cmd {
 227	struct i596_cmd cmd;
 228	char eth_addr[8];
 229};
 230
 231struct cf_cmd {
 232	struct i596_cmd cmd;
 233	char i596_config[16];
 234};
 235
 236struct i596_rfd {
 237	unsigned short stat;
 238	unsigned short cmd;
 239	u32            b_next;	/* Address from i596 viewpoint */
 240	u32            rbd;
 241	unsigned short count;
 242	unsigned short size;
 243	struct i596_rfd *v_next;	/* Address from CPUs viewpoint */
 244	struct i596_rfd *v_prev;
 245#ifndef __LP64__
 246	u32 cache_pad[2];		/* Total 32 bytes... */
 247#endif
 248};
 249
 250struct i596_rbd {
 251	/* hardware data */
 252	unsigned short count;
 253	unsigned short zero1;
 254	u32            b_next;
 255	u32            b_data;		/* Address from i596 viewpoint */
 256	unsigned short size;
 257	unsigned short zero2;
 258	/* driver data */
 259	struct sk_buff *skb;
 260	struct i596_rbd *v_next;
 261	u32            b_addr;		/* This rbd addr from i596 view */
 262	unsigned char *v_data;		/* Address from CPUs viewpoint */
 263					/* Total 32 bytes... */
 264#ifdef __LP64__
 265    u32 cache_pad[4];
 266#endif
 267};
 268
 269/* These values as chosen so struct i596_dma fits in one page... */
 270
 271#define TX_RING_SIZE 32
 272#define RX_RING_SIZE 16
 273
 274struct i596_scb {
 275	unsigned short status;
 276	unsigned short command;
 277	u32           cmd;
 278	u32           rfd;
 279	u32           crc_err;
 280	u32           align_err;
 281	u32           resource_err;
 282	u32           over_err;
 283	u32           rcvdt_err;
 284	u32           short_err;
 285	unsigned short t_on;
 286	unsigned short t_off;
 287};
 288
 289struct i596_iscp {
 290	u32 stat;
 291	u32 scb;
 292};
 293
 294struct i596_scp {
 295	u32 sysbus;
 296	u32 pad;
 297	u32 iscp;
 298};
 299
 300struct i596_dma {
 301	struct i596_scp scp		        __attribute__((aligned(32)));
 302	volatile struct i596_iscp iscp		__attribute__((aligned(32)));
 303	volatile struct i596_scb scb		__attribute__((aligned(32)));
 304	struct sa_cmd sa_cmd			__attribute__((aligned(32)));
 305	struct cf_cmd cf_cmd			__attribute__((aligned(32)));
 306	struct tdr_cmd tdr_cmd			__attribute__((aligned(32)));
 307	struct mc_cmd mc_cmd			__attribute__((aligned(32)));
 308	struct i596_rfd rfds[RX_RING_SIZE]	__attribute__((aligned(32)));
 309	struct i596_rbd rbds[RX_RING_SIZE]	__attribute__((aligned(32)));
 310	struct tx_cmd tx_cmds[TX_RING_SIZE]	__attribute__((aligned(32)));
 311	struct i596_tbd tbds[TX_RING_SIZE]	__attribute__((aligned(32)));
 312};
 313
 314struct i596_private {
 315	struct i596_dma *dma;
 316	u32    stat;
 317	int last_restart;
 318	struct i596_rfd *rfd_head;
 319	struct i596_rbd *rbd_head;
 320	struct i596_cmd *cmd_tail;
 321	struct i596_cmd *cmd_head;
 322	int cmd_backlog;
 323	u32    last_cmd;
 324	int next_tx_cmd;
 325	int options;
 326	spinlock_t lock;       /* serialize access to chip */
 327	dma_addr_t dma_addr;
 328	void __iomem *mpu_port;
 329	void __iomem *ca;
 330};
 331
 332static const char init_setup[] =
 333{
 334	0x8E,		/* length, prefetch on */
 335	0xC8,		/* fifo to 8, monitor off */
 336	0x80,		/* don't save bad frames */
 337	0x2E,		/* No source address insertion, 8 byte preamble */
 338	0x00,		/* priority and backoff defaults */
 339	0x60,		/* interframe spacing */
 340	0x00,		/* slot time LSB */
 341	0xf2,		/* slot time and retries */
 342	0x00,		/* promiscuous mode */
 343	0x00,		/* collision detect */
 344	0x40,		/* minimum frame length */
 345	0xff,
 346	0x00,
 347	0x7f /*  *multi IA */ };
 348
 349static int i596_open(struct net_device *dev);
 350static int i596_start_xmit(struct sk_buff *skb, struct net_device *dev);
 351static irqreturn_t i596_interrupt(int irq, void *dev_id);
 352static int i596_close(struct net_device *dev);
 353static void i596_add_cmd(struct net_device *dev, struct i596_cmd *cmd);
 354static void i596_tx_timeout (struct net_device *dev);
 355static void print_eth(unsigned char *buf, char *str);
 356static void set_multicast_list(struct net_device *dev);
 357static inline void ca(struct net_device *dev);
 358static void mpu_port(struct net_device *dev, int c, dma_addr_t x);
 359
 360static int rx_ring_size = RX_RING_SIZE;
 361static int ticks_limit = 100;
 362static int max_cmd_backlog = TX_RING_SIZE-1;
 363
 364#ifdef CONFIG_NET_POLL_CONTROLLER
 365static void i596_poll_controller(struct net_device *dev);
 366#endif
 367
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 368
 369static inline int wait_istat(struct net_device *dev, struct i596_dma *dma, int delcnt, char *str)
 370{
 371	DMA_INV(dev, &(dma->iscp), sizeof(struct i596_iscp));
 372	while (--delcnt && dma->iscp.stat) {
 373		udelay(10);
 374		DMA_INV(dev, &(dma->iscp), sizeof(struct i596_iscp));
 375	}
 376	if (!delcnt) {
 377		printk(KERN_ERR "%s: %s, iscp.stat %04x, didn't clear\n",
 378		     dev->name, str, SWAP16(dma->iscp.stat));
 379		return -1;
 380	} else
 381		return 0;
 382}
 383
 384
 385static inline int wait_cmd(struct net_device *dev, struct i596_dma *dma, int delcnt, char *str)
 386{
 387	DMA_INV(dev, &(dma->scb), sizeof(struct i596_scb));
 388	while (--delcnt && dma->scb.command) {
 389		udelay(10);
 390		DMA_INV(dev, &(dma->scb), sizeof(struct i596_scb));
 391	}
 392	if (!delcnt) {
 393		printk(KERN_ERR "%s: %s, status %4.4x, cmd %4.4x.\n",
 394		       dev->name, str,
 395		       SWAP16(dma->scb.status),
 396		       SWAP16(dma->scb.command));
 397		return -1;
 398	} else
 399		return 0;
 400}
 401
 402
 403static void i596_display_data(struct net_device *dev)
 404{
 405	struct i596_private *lp = netdev_priv(dev);
 406	struct i596_dma *dma = lp->dma;
 407	struct i596_cmd *cmd;
 408	struct i596_rfd *rfd;
 409	struct i596_rbd *rbd;
 410
 411	printk(KERN_DEBUG "lp and scp at %p, .sysbus = %08x, .iscp = %08x\n",
 412	       &dma->scp, dma->scp.sysbus, SWAP32(dma->scp.iscp));
 413	printk(KERN_DEBUG "iscp at %p, iscp.stat = %08x, .scb = %08x\n",
 414	       &dma->iscp, SWAP32(dma->iscp.stat), SWAP32(dma->iscp.scb));
 415	printk(KERN_DEBUG "scb at %p, scb.status = %04x, .command = %04x,"
 416		" .cmd = %08x, .rfd = %08x\n",
 417	       &dma->scb, SWAP16(dma->scb.status), SWAP16(dma->scb.command),
 418		SWAP16(dma->scb.cmd), SWAP32(dma->scb.rfd));
 419	printk(KERN_DEBUG "   errors: crc %x, align %x, resource %x,"
 420	       " over %x, rcvdt %x, short %x\n",
 421	       SWAP32(dma->scb.crc_err), SWAP32(dma->scb.align_err),
 422	       SWAP32(dma->scb.resource_err), SWAP32(dma->scb.over_err),
 423	       SWAP32(dma->scb.rcvdt_err), SWAP32(dma->scb.short_err));
 424	cmd = lp->cmd_head;
 425	while (cmd != NULL) {
 426		printk(KERN_DEBUG
 427		       "cmd at %p, .status = %04x, .command = %04x,"
 428		       " .b_next = %08x\n",
 429		       cmd, SWAP16(cmd->status), SWAP16(cmd->command),
 430		       SWAP32(cmd->b_next));
 431		cmd = cmd->v_next;
 432	}
 433	rfd = lp->rfd_head;
 434	printk(KERN_DEBUG "rfd_head = %p\n", rfd);
 435	do {
 436		printk(KERN_DEBUG
 437		       "   %p .stat %04x, .cmd %04x, b_next %08x, rbd %08x,"
 438		       " count %04x\n",
 439		       rfd, SWAP16(rfd->stat), SWAP16(rfd->cmd),
 440		       SWAP32(rfd->b_next), SWAP32(rfd->rbd),
 441		       SWAP16(rfd->count));
 442		rfd = rfd->v_next;
 443	} while (rfd != lp->rfd_head);
 444	rbd = lp->rbd_head;
 445	printk(KERN_DEBUG "rbd_head = %p\n", rbd);
 446	do {
 447		printk(KERN_DEBUG
 448		       "   %p .count %04x, b_next %08x, b_data %08x,"
 449		       " size %04x\n",
 450			rbd, SWAP16(rbd->count), SWAP32(rbd->b_next),
 451		       SWAP32(rbd->b_data), SWAP16(rbd->size));
 452		rbd = rbd->v_next;
 453	} while (rbd != lp->rbd_head);
 454	DMA_INV(dev, dma, sizeof(struct i596_dma));
 455}
 456
 457
 458#define virt_to_dma(lp, v) ((lp)->dma_addr + (dma_addr_t)((unsigned long)(v)-(unsigned long)((lp)->dma)))
 459
 460static inline int init_rx_bufs(struct net_device *dev)
 461{
 462	struct i596_private *lp = netdev_priv(dev);
 463	struct i596_dma *dma = lp->dma;
 464	int i;
 465	struct i596_rfd *rfd;
 466	struct i596_rbd *rbd;
 467
 468	/* First build the Receive Buffer Descriptor List */
 469
 470	for (i = 0, rbd = dma->rbds; i < rx_ring_size; i++, rbd++) {
 471		dma_addr_t dma_addr;
 472		struct sk_buff *skb;
 473
 474		skb = netdev_alloc_skb_ip_align(dev, PKT_BUF_SZ);
 475		if (skb == NULL)
 476			return -1;
 477		dma_addr = dma_map_single(dev->dev.parent, skb->data,
 478					  PKT_BUF_SZ, DMA_FROM_DEVICE);
 479		rbd->v_next = rbd+1;
 480		rbd->b_next = SWAP32(virt_to_dma(lp, rbd+1));
 481		rbd->b_addr = SWAP32(virt_to_dma(lp, rbd));
 482		rbd->skb = skb;
 483		rbd->v_data = skb->data;
 484		rbd->b_data = SWAP32(dma_addr);
 485		rbd->size = SWAP16(PKT_BUF_SZ);
 486	}
 487	lp->rbd_head = dma->rbds;
 488	rbd = dma->rbds + rx_ring_size - 1;
 489	rbd->v_next = dma->rbds;
 490	rbd->b_next = SWAP32(virt_to_dma(lp, dma->rbds));
 491
 492	/* Now build the Receive Frame Descriptor List */
 493
 494	for (i = 0, rfd = dma->rfds; i < rx_ring_size; i++, rfd++) {
 495		rfd->rbd = I596_NULL;
 496		rfd->v_next = rfd+1;
 497		rfd->v_prev = rfd-1;
 498		rfd->b_next = SWAP32(virt_to_dma(lp, rfd+1));
 499		rfd->cmd = SWAP16(CMD_FLEX);
 500	}
 501	lp->rfd_head = dma->rfds;
 502	dma->scb.rfd = SWAP32(virt_to_dma(lp, dma->rfds));
 503	rfd = dma->rfds;
 504	rfd->rbd = SWAP32(virt_to_dma(lp, lp->rbd_head));
 505	rfd->v_prev = dma->rfds + rx_ring_size - 1;
 506	rfd = dma->rfds + rx_ring_size - 1;
 507	rfd->v_next = dma->rfds;
 508	rfd->b_next = SWAP32(virt_to_dma(lp, dma->rfds));
 509	rfd->cmd = SWAP16(CMD_EOL|CMD_FLEX);
 510
 511	DMA_WBACK_INV(dev, dma, sizeof(struct i596_dma));
 512	return 0;
 513}
 514
 515static inline void remove_rx_bufs(struct net_device *dev)
 516{
 517	struct i596_private *lp = netdev_priv(dev);
 518	struct i596_rbd *rbd;
 519	int i;
 520
 521	for (i = 0, rbd = lp->dma->rbds; i < rx_ring_size; i++, rbd++) {
 522		if (rbd->skb == NULL)
 523			break;
 524		dma_unmap_single(dev->dev.parent,
 525				 (dma_addr_t)SWAP32(rbd->b_data),
 526				 PKT_BUF_SZ, DMA_FROM_DEVICE);
 527		dev_kfree_skb(rbd->skb);
 528	}
 529}
 530
 531
 532static void rebuild_rx_bufs(struct net_device *dev)
 533{
 534	struct i596_private *lp = netdev_priv(dev);
 535	struct i596_dma *dma = lp->dma;
 536	int i;
 537
 538	/* Ensure rx frame/buffer descriptors are tidy */
 539
 540	for (i = 0; i < rx_ring_size; i++) {
 541		dma->rfds[i].rbd = I596_NULL;
 542		dma->rfds[i].cmd = SWAP16(CMD_FLEX);
 543	}
 544	dma->rfds[rx_ring_size-1].cmd = SWAP16(CMD_EOL|CMD_FLEX);
 545	lp->rfd_head = dma->rfds;
 546	dma->scb.rfd = SWAP32(virt_to_dma(lp, dma->rfds));
 547	lp->rbd_head = dma->rbds;
 548	dma->rfds[0].rbd = SWAP32(virt_to_dma(lp, dma->rbds));
 549
 550	DMA_WBACK_INV(dev, dma, sizeof(struct i596_dma));
 551}
 552
 553
 554static int init_i596_mem(struct net_device *dev)
 555{
 556	struct i596_private *lp = netdev_priv(dev);
 557	struct i596_dma *dma = lp->dma;
 558	unsigned long flags;
 559
 560	mpu_port(dev, PORT_RESET, 0);
 561	udelay(100);			/* Wait 100us - seems to help */
 562
 563	/* change the scp address */
 564
 565	lp->last_cmd = jiffies;
 566
 567	dma->scp.sysbus = SYSBUS;
 568	dma->scp.iscp = SWAP32(virt_to_dma(lp, &(dma->iscp)));
 569	dma->iscp.scb = SWAP32(virt_to_dma(lp, &(dma->scb)));
 570	dma->iscp.stat = SWAP32(ISCP_BUSY);
 571	lp->cmd_backlog = 0;
 572
 573	lp->cmd_head = NULL;
 574	dma->scb.cmd = I596_NULL;
 575
 576	DEB(DEB_INIT, printk(KERN_DEBUG "%s: starting i82596.\n", dev->name));
 577
 578	DMA_WBACK(dev, &(dma->scp), sizeof(struct i596_scp));
 579	DMA_WBACK(dev, &(dma->iscp), sizeof(struct i596_iscp));
 580	DMA_WBACK(dev, &(dma->scb), sizeof(struct i596_scb));
 581
 582	mpu_port(dev, PORT_ALTSCP, virt_to_dma(lp, &dma->scp));
 583	ca(dev);
 584	if (wait_istat(dev, dma, 1000, "initialization timed out"))
 585		goto failed;
 586	DEB(DEB_INIT, printk(KERN_DEBUG
 587			     "%s: i82596 initialization successful\n",
 588			     dev->name));
 589
 590	if (request_irq(dev->irq, i596_interrupt, 0, "i82596", dev)) {
 591		printk(KERN_ERR "%s: IRQ %d not free\n", dev->name, dev->irq);
 592		goto failed;
 593	}
 594
 595	/* Ensure rx frame/buffer descriptors are tidy */
 596	rebuild_rx_bufs(dev);
 597
 598	dma->scb.command = 0;
 599	DMA_WBACK(dev, &(dma->scb), sizeof(struct i596_scb));
 600
 601	DEB(DEB_INIT, printk(KERN_DEBUG
 602			     "%s: queuing CmdConfigure\n", dev->name));
 603	memcpy(dma->cf_cmd.i596_config, init_setup, 14);
 604	dma->cf_cmd.cmd.command = SWAP16(CmdConfigure);
 605	DMA_WBACK(dev, &(dma->cf_cmd), sizeof(struct cf_cmd));
 606	i596_add_cmd(dev, &dma->cf_cmd.cmd);
 607
 608	DEB(DEB_INIT, printk(KERN_DEBUG "%s: queuing CmdSASetup\n", dev->name));
 609	memcpy(dma->sa_cmd.eth_addr, dev->dev_addr, ETH_ALEN);
 610	dma->sa_cmd.cmd.command = SWAP16(CmdSASetup);
 611	DMA_WBACK(dev, &(dma->sa_cmd), sizeof(struct sa_cmd));
 612	i596_add_cmd(dev, &dma->sa_cmd.cmd);
 613
 614	DEB(DEB_INIT, printk(KERN_DEBUG "%s: queuing CmdTDR\n", dev->name));
 615	dma->tdr_cmd.cmd.command = SWAP16(CmdTDR);
 616	DMA_WBACK(dev, &(dma->tdr_cmd), sizeof(struct tdr_cmd));
 617	i596_add_cmd(dev, &dma->tdr_cmd.cmd);
 618
 619	spin_lock_irqsave (&lp->lock, flags);
 620
 621	if (wait_cmd(dev, dma, 1000, "timed out waiting to issue RX_START")) {
 622		spin_unlock_irqrestore (&lp->lock, flags);
 623		goto failed_free_irq;
 624	}
 625	DEB(DEB_INIT, printk(KERN_DEBUG "%s: Issuing RX_START\n", dev->name));
 626	dma->scb.command = SWAP16(RX_START);
 627	dma->scb.rfd = SWAP32(virt_to_dma(lp, dma->rfds));
 628	DMA_WBACK(dev, &(dma->scb), sizeof(struct i596_scb));
 629
 630	ca(dev);
 631
 632	spin_unlock_irqrestore (&lp->lock, flags);
 633	if (wait_cmd(dev, dma, 1000, "RX_START not processed"))
 634		goto failed_free_irq;
 635	DEB(DEB_INIT, printk(KERN_DEBUG
 636			     "%s: Receive unit started OK\n", dev->name));
 637	return 0;
 638
 639failed_free_irq:
 640	free_irq(dev->irq, dev);
 641failed:
 642	printk(KERN_ERR "%s: Failed to initialise 82596\n", dev->name);
 643	mpu_port(dev, PORT_RESET, 0);
 644	return -1;
 645}
 646
 647
 648static inline int i596_rx(struct net_device *dev)
 649{
 650	struct i596_private *lp = netdev_priv(dev);
 651	struct i596_rfd *rfd;
 652	struct i596_rbd *rbd;
 653	int frames = 0;
 654
 655	DEB(DEB_RXFRAME, printk(KERN_DEBUG
 656				"i596_rx(), rfd_head %p, rbd_head %p\n",
 657				lp->rfd_head, lp->rbd_head));
 658
 659
 660	rfd = lp->rfd_head;		/* Ref next frame to check */
 661
 662	DMA_INV(dev, rfd, sizeof(struct i596_rfd));
 663	while (rfd->stat & SWAP16(STAT_C)) {	/* Loop while complete frames */
 664		if (rfd->rbd == I596_NULL)
 665			rbd = NULL;
 666		else if (rfd->rbd == lp->rbd_head->b_addr) {
 667			rbd = lp->rbd_head;
 668			DMA_INV(dev, rbd, sizeof(struct i596_rbd));
 669		} else {
 670			printk(KERN_ERR "%s: rbd chain broken!\n", dev->name);
 671			/* XXX Now what? */
 672			rbd = NULL;
 673		}
 674		DEB(DEB_RXFRAME, printk(KERN_DEBUG
 675				      "  rfd %p, rfd.rbd %08x, rfd.stat %04x\n",
 676				      rfd, rfd->rbd, rfd->stat));
 677
 678		if (rbd != NULL && (rfd->stat & SWAP16(STAT_OK))) {
 679			/* a good frame */
 680			int pkt_len = SWAP16(rbd->count) & 0x3fff;
 681			struct sk_buff *skb = rbd->skb;
 682			int rx_in_place = 0;
 683
 684			DEB(DEB_RXADDR, print_eth(rbd->v_data, "received"));
 685			frames++;
 686
 687			/* Check if the packet is long enough to just accept
 688			 * without copying to a properly sized skbuff.
 689			 */
 690
 691			if (pkt_len > rx_copybreak) {
 692				struct sk_buff *newskb;
 693				dma_addr_t dma_addr;
 694
 695				dma_unmap_single(dev->dev.parent,
 696						 (dma_addr_t)SWAP32(rbd->b_data),
 697						 PKT_BUF_SZ, DMA_FROM_DEVICE);
 698				/* Get fresh skbuff to replace filled one. */
 699				newskb = netdev_alloc_skb_ip_align(dev,
 700								   PKT_BUF_SZ);
 701				if (newskb == NULL) {
 702					skb = NULL;	/* drop pkt */
 703					goto memory_squeeze;
 704				}
 705
 706				/* Pass up the skb already on the Rx ring. */
 707				skb_put(skb, pkt_len);
 708				rx_in_place = 1;
 709				rbd->skb = newskb;
 710				dma_addr = dma_map_single(dev->dev.parent,
 711							  newskb->data,
 712							  PKT_BUF_SZ,
 713							  DMA_FROM_DEVICE);
 714				rbd->v_data = newskb->data;
 715				rbd->b_data = SWAP32(dma_addr);
 716				DMA_WBACK_INV(dev, rbd, sizeof(struct i596_rbd));
 717			} else {
 718				skb = netdev_alloc_skb_ip_align(dev, pkt_len);
 719			}
 720memory_squeeze:
 721			if (skb == NULL) {
 722				/* XXX tulip.c can defer packets here!! */
 723				dev->stats.rx_dropped++;
 724			} else {
 725				if (!rx_in_place) {
 726					/* 16 byte align the data fields */
 727					dma_sync_single_for_cpu(dev->dev.parent,
 728								(dma_addr_t)SWAP32(rbd->b_data),
 729								PKT_BUF_SZ, DMA_FROM_DEVICE);
 730					memcpy(skb_put(skb, pkt_len), rbd->v_data, pkt_len);
 
 731					dma_sync_single_for_device(dev->dev.parent,
 732								   (dma_addr_t)SWAP32(rbd->b_data),
 733								   PKT_BUF_SZ, DMA_FROM_DEVICE);
 734				}
 735				skb->len = pkt_len;
 736				skb->protocol = eth_type_trans(skb, dev);
 737				netif_rx(skb);
 738				dev->stats.rx_packets++;
 739				dev->stats.rx_bytes += pkt_len;
 740			}
 741		} else {
 742			DEB(DEB_ERRORS, printk(KERN_DEBUG
 743					       "%s: Error, rfd.stat = 0x%04x\n",
 744					       dev->name, rfd->stat));
 745			dev->stats.rx_errors++;
 746			if (rfd->stat & SWAP16(0x0100))
 747				dev->stats.collisions++;
 748			if (rfd->stat & SWAP16(0x8000))
 749				dev->stats.rx_length_errors++;
 750			if (rfd->stat & SWAP16(0x0001))
 751				dev->stats.rx_over_errors++;
 752			if (rfd->stat & SWAP16(0x0002))
 753				dev->stats.rx_fifo_errors++;
 754			if (rfd->stat & SWAP16(0x0004))
 755				dev->stats.rx_frame_errors++;
 756			if (rfd->stat & SWAP16(0x0008))
 757				dev->stats.rx_crc_errors++;
 758			if (rfd->stat & SWAP16(0x0010))
 759				dev->stats.rx_length_errors++;
 760		}
 761
 762		/* Clear the buffer descriptor count and EOF + F flags */
 763
 764		if (rbd != NULL && (rbd->count & SWAP16(0x4000))) {
 765			rbd->count = 0;
 766			lp->rbd_head = rbd->v_next;
 767			DMA_WBACK_INV(dev, rbd, sizeof(struct i596_rbd));
 768		}
 769
 770		/* Tidy the frame descriptor, marking it as end of list */
 771
 772		rfd->rbd = I596_NULL;
 773		rfd->stat = 0;
 774		rfd->cmd = SWAP16(CMD_EOL|CMD_FLEX);
 775		rfd->count = 0;
 776
 777		/* Update record of next frame descriptor to process */
 778
 779		lp->dma->scb.rfd = rfd->b_next;
 780		lp->rfd_head = rfd->v_next;
 781		DMA_WBACK_INV(dev, rfd, sizeof(struct i596_rfd));
 782
 783		/* Remove end-of-list from old end descriptor */
 784
 785		rfd->v_prev->cmd = SWAP16(CMD_FLEX);
 786		DMA_WBACK_INV(dev, rfd->v_prev, sizeof(struct i596_rfd));
 787		rfd = lp->rfd_head;
 788		DMA_INV(dev, rfd, sizeof(struct i596_rfd));
 789	}
 790
 791	DEB(DEB_RXFRAME, printk(KERN_DEBUG "frames %d\n", frames));
 792
 793	return 0;
 794}
 795
 796
 797static inline void i596_cleanup_cmd(struct net_device *dev, struct i596_private *lp)
 798{
 799	struct i596_cmd *ptr;
 800
 801	while (lp->cmd_head != NULL) {
 802		ptr = lp->cmd_head;
 803		lp->cmd_head = ptr->v_next;
 804		lp->cmd_backlog--;
 805
 806		switch (SWAP16(ptr->command) & 0x7) {
 807		case CmdTx:
 808			{
 809				struct tx_cmd *tx_cmd = (struct tx_cmd *) ptr;
 810				struct sk_buff *skb = tx_cmd->skb;
 811				dma_unmap_single(dev->dev.parent,
 812						 tx_cmd->dma_addr,
 813						 skb->len, DMA_TO_DEVICE);
 814
 815				dev_kfree_skb(skb);
 816
 817				dev->stats.tx_errors++;
 818				dev->stats.tx_aborted_errors++;
 819
 820				ptr->v_next = NULL;
 821				ptr->b_next = I596_NULL;
 822				tx_cmd->cmd.command = 0;  /* Mark as free */
 823				break;
 824			}
 825		default:
 826			ptr->v_next = NULL;
 827			ptr->b_next = I596_NULL;
 828		}
 829		DMA_WBACK_INV(dev, ptr, sizeof(struct i596_cmd));
 830	}
 831
 832	wait_cmd(dev, lp->dma, 100, "i596_cleanup_cmd timed out");
 833	lp->dma->scb.cmd = I596_NULL;
 834	DMA_WBACK(dev, &(lp->dma->scb), sizeof(struct i596_scb));
 835}
 836
 837
 838static inline void i596_reset(struct net_device *dev, struct i596_private *lp)
 839{
 840	unsigned long flags;
 841
 842	DEB(DEB_RESET, printk(KERN_DEBUG "i596_reset\n"));
 843
 844	spin_lock_irqsave (&lp->lock, flags);
 845
 846	wait_cmd(dev, lp->dma, 100, "i596_reset timed out");
 847
 848	netif_stop_queue(dev);
 849
 850	/* FIXME: this command might cause an lpmc */
 851	lp->dma->scb.command = SWAP16(CUC_ABORT | RX_ABORT);
 852	DMA_WBACK(dev, &(lp->dma->scb), sizeof(struct i596_scb));
 853	ca(dev);
 854
 855	/* wait for shutdown */
 856	wait_cmd(dev, lp->dma, 1000, "i596_reset 2 timed out");
 857	spin_unlock_irqrestore (&lp->lock, flags);
 858
 859	i596_cleanup_cmd(dev, lp);
 860	i596_rx(dev);
 861
 862	netif_start_queue(dev);
 863	init_i596_mem(dev);
 864}
 865
 866
 867static void i596_add_cmd(struct net_device *dev, struct i596_cmd *cmd)
 868{
 869	struct i596_private *lp = netdev_priv(dev);
 870	struct i596_dma *dma = lp->dma;
 871	unsigned long flags;
 872
 873	DEB(DEB_ADDCMD, printk(KERN_DEBUG "i596_add_cmd cmd_head %p\n",
 874			       lp->cmd_head));
 875
 876	cmd->status = 0;
 877	cmd->command |= SWAP16(CMD_EOL | CMD_INTR);
 878	cmd->v_next = NULL;
 879	cmd->b_next = I596_NULL;
 880	DMA_WBACK(dev, cmd, sizeof(struct i596_cmd));
 881
 882	spin_lock_irqsave (&lp->lock, flags);
 883
 884	if (lp->cmd_head != NULL) {
 885		lp->cmd_tail->v_next = cmd;
 886		lp->cmd_tail->b_next = SWAP32(virt_to_dma(lp, &cmd->status));
 887		DMA_WBACK(dev, lp->cmd_tail, sizeof(struct i596_cmd));
 888	} else {
 889		lp->cmd_head = cmd;
 890		wait_cmd(dev, dma, 100, "i596_add_cmd timed out");
 891		dma->scb.cmd = SWAP32(virt_to_dma(lp, &cmd->status));
 892		dma->scb.command = SWAP16(CUC_START);
 893		DMA_WBACK(dev, &(dma->scb), sizeof(struct i596_scb));
 894		ca(dev);
 895	}
 896	lp->cmd_tail = cmd;
 897	lp->cmd_backlog++;
 898
 899	spin_unlock_irqrestore (&lp->lock, flags);
 900
 901	if (lp->cmd_backlog > max_cmd_backlog) {
 902		unsigned long tickssofar = jiffies - lp->last_cmd;
 903
 904		if (tickssofar < ticks_limit)
 905			return;
 906
 907		printk(KERN_ERR
 908		       "%s: command unit timed out, status resetting.\n",
 909		       dev->name);
 910#if 1
 911		i596_reset(dev, lp);
 912#endif
 913	}
 914}
 915
 916static int i596_open(struct net_device *dev)
 917{
 918	DEB(DEB_OPEN, printk(KERN_DEBUG
 919			     "%s: i596_open() irq %d.\n", dev->name, dev->irq));
 920
 921	if (init_rx_bufs(dev)) {
 922		printk(KERN_ERR "%s: Failed to init rx bufs\n", dev->name);
 923		return -EAGAIN;
 924	}
 925	if (init_i596_mem(dev)) {
 926		printk(KERN_ERR "%s: Failed to init memory\n", dev->name);
 927		goto out_remove_rx_bufs;
 928	}
 929	netif_start_queue(dev);
 930
 931	return 0;
 932
 933out_remove_rx_bufs:
 934	remove_rx_bufs(dev);
 935	return -EAGAIN;
 936}
 937
 938static void i596_tx_timeout (struct net_device *dev)
 939{
 940	struct i596_private *lp = netdev_priv(dev);
 941
 942	/* Transmitter timeout, serious problems. */
 943	DEB(DEB_ERRORS, printk(KERN_DEBUG
 944			       "%s: transmit timed out, status resetting.\n",
 945			       dev->name));
 946
 947	dev->stats.tx_errors++;
 948
 949	/* Try to restart the adaptor */
 950	if (lp->last_restart == dev->stats.tx_packets) {
 951		DEB(DEB_ERRORS, printk(KERN_DEBUG "Resetting board.\n"));
 952		/* Shutdown and restart */
 953		i596_reset (dev, lp);
 954	} else {
 955		/* Issue a channel attention signal */
 956		DEB(DEB_ERRORS, printk(KERN_DEBUG "Kicking board.\n"));
 957		lp->dma->scb.command = SWAP16(CUC_START | RX_START);
 958		DMA_WBACK_INV(dev, &(lp->dma->scb), sizeof(struct i596_scb));
 959		ca (dev);
 960		lp->last_restart = dev->stats.tx_packets;
 961	}
 962
 963	dev->trans_start = jiffies; /* prevent tx timeout */
 964	netif_wake_queue (dev);
 965}
 966
 967
 968static int i596_start_xmit(struct sk_buff *skb, struct net_device *dev)
 969{
 970	struct i596_private *lp = netdev_priv(dev);
 971	struct tx_cmd *tx_cmd;
 972	struct i596_tbd *tbd;
 973	short length = skb->len;
 974
 975	DEB(DEB_STARTTX, printk(KERN_DEBUG
 976				"%s: i596_start_xmit(%x,%p) called\n",
 977				dev->name, skb->len, skb->data));
 978
 979	if (length < ETH_ZLEN) {
 980		if (skb_padto(skb, ETH_ZLEN))
 981			return NETDEV_TX_OK;
 982		length = ETH_ZLEN;
 983	}
 984
 985	netif_stop_queue(dev);
 986
 987	tx_cmd = lp->dma->tx_cmds + lp->next_tx_cmd;
 988	tbd = lp->dma->tbds + lp->next_tx_cmd;
 989
 990	if (tx_cmd->cmd.command) {
 991		DEB(DEB_ERRORS, printk(KERN_DEBUG
 992				       "%s: xmit ring full, dropping packet.\n",
 993				       dev->name));
 994		dev->stats.tx_dropped++;
 995
 996		dev_kfree_skb_any(skb);
 997	} else {
 998		if (++lp->next_tx_cmd == TX_RING_SIZE)
 999			lp->next_tx_cmd = 0;
1000		tx_cmd->tbd = SWAP32(virt_to_dma(lp, tbd));
1001		tbd->next = I596_NULL;
1002
1003		tx_cmd->cmd.command = SWAP16(CMD_FLEX | CmdTx);
1004		tx_cmd->skb = skb;
1005
1006		tx_cmd->pad = 0;
1007		tx_cmd->size = 0;
1008		tbd->pad = 0;
1009		tbd->size = SWAP16(EOF | length);
1010
1011		tx_cmd->dma_addr = dma_map_single(dev->dev.parent, skb->data,
1012						  skb->len, DMA_TO_DEVICE);
1013		tbd->data = SWAP32(tx_cmd->dma_addr);
1014
1015		DEB(DEB_TXADDR, print_eth(skb->data, "tx-queued"));
1016		DMA_WBACK_INV(dev, tx_cmd, sizeof(struct tx_cmd));
1017		DMA_WBACK_INV(dev, tbd, sizeof(struct i596_tbd));
1018		i596_add_cmd(dev, &tx_cmd->cmd);
1019
1020		dev->stats.tx_packets++;
1021		dev->stats.tx_bytes += length;
1022	}
1023
1024	netif_start_queue(dev);
1025
1026	return NETDEV_TX_OK;
1027}
1028
1029static void print_eth(unsigned char *add, char *str)
1030{
1031	printk(KERN_DEBUG "i596 0x%p, %pM --> %pM %02X%02X, %s\n",
1032	       add, add + 6, add, add[12], add[13], str);
1033}
1034static const struct net_device_ops i596_netdev_ops = {
1035	.ndo_open		= i596_open,
1036	.ndo_stop		= i596_close,
1037	.ndo_start_xmit		= i596_start_xmit,
1038	.ndo_set_rx_mode	= set_multicast_list,
1039	.ndo_tx_timeout		= i596_tx_timeout,
1040	.ndo_change_mtu		= eth_change_mtu,
1041	.ndo_validate_addr	= eth_validate_addr,
1042	.ndo_set_mac_address	= eth_mac_addr,
1043#ifdef CONFIG_NET_POLL_CONTROLLER
1044	.ndo_poll_controller	= i596_poll_controller,
1045#endif
1046};
1047
1048static int i82596_probe(struct net_device *dev)
1049{
1050	int i;
1051	struct i596_private *lp = netdev_priv(dev);
1052	struct i596_dma *dma;
1053
1054	/* This lot is ensure things have been cache line aligned. */
1055	BUILD_BUG_ON(sizeof(struct i596_rfd) != 32);
1056	BUILD_BUG_ON(sizeof(struct i596_rbd) &  31);
1057	BUILD_BUG_ON(sizeof(struct tx_cmd)   &  31);
1058	BUILD_BUG_ON(sizeof(struct i596_tbd) != 32);
1059#ifndef __LP64__
1060	BUILD_BUG_ON(sizeof(struct i596_dma) > 4096);
1061#endif
1062
1063	if (!dev->base_addr || !dev->irq)
1064		return -ENODEV;
1065
1066	dma = (struct i596_dma *) DMA_ALLOC(dev->dev.parent,
1067		sizeof(struct i596_dma), &lp->dma_addr, GFP_KERNEL);
1068	if (!dma) {
1069		printk(KERN_ERR "%s: Couldn't get shared memory\n", __FILE__);
1070		return -ENOMEM;
1071	}
1072
1073	dev->netdev_ops = &i596_netdev_ops;
1074	dev->watchdog_timeo = TX_TIMEOUT;
1075
1076	memset(dma, 0, sizeof(struct i596_dma));
1077	lp->dma = dma;
1078
1079	dma->scb.command = 0;
1080	dma->scb.cmd = I596_NULL;
1081	dma->scb.rfd = I596_NULL;
1082	spin_lock_init(&lp->lock);
1083
1084	DMA_WBACK_INV(dev, dma, sizeof(struct i596_dma));
1085
1086	i = register_netdev(dev);
1087	if (i) {
1088		DMA_FREE(dev->dev.parent, sizeof(struct i596_dma),
1089				    (void *)dma, lp->dma_addr);
1090		return i;
1091	}
1092
1093	DEB(DEB_PROBE, printk(KERN_INFO "%s: 82596 at %#3lx, %pM IRQ %d.\n",
1094			      dev->name, dev->base_addr, dev->dev_addr,
1095			      dev->irq));
1096	DEB(DEB_INIT, printk(KERN_INFO
1097			     "%s: dma at 0x%p (%d bytes), lp->scb at 0x%p\n",
1098			     dev->name, dma, (int)sizeof(struct i596_dma),
1099			     &dma->scb));
1100
1101	return 0;
1102}
1103
1104#ifdef CONFIG_NET_POLL_CONTROLLER
1105static void i596_poll_controller(struct net_device *dev)
1106{
1107	disable_irq(dev->irq);
1108	i596_interrupt(dev->irq, dev);
1109	enable_irq(dev->irq);
1110}
1111#endif
1112
1113static irqreturn_t i596_interrupt(int irq, void *dev_id)
1114{
1115	struct net_device *dev = dev_id;
1116	struct i596_private *lp;
1117	struct i596_dma *dma;
1118	unsigned short status, ack_cmd = 0;
1119
1120	lp = netdev_priv(dev);
1121	dma = lp->dma;
1122
1123	spin_lock (&lp->lock);
1124
1125	wait_cmd(dev, dma, 100, "i596 interrupt, timeout");
1126	status = SWAP16(dma->scb.status);
1127
1128	DEB(DEB_INTS, printk(KERN_DEBUG
1129			     "%s: i596 interrupt, IRQ %d, status %4.4x.\n",
1130			dev->name, dev->irq, status));
1131
1132	ack_cmd = status & 0xf000;
1133
1134	if (!ack_cmd) {
1135		DEB(DEB_ERRORS, printk(KERN_DEBUG
1136				       "%s: interrupt with no events\n",
1137				       dev->name));
1138		spin_unlock (&lp->lock);
1139		return IRQ_NONE;
1140	}
1141
1142	if ((status & 0x8000) || (status & 0x2000)) {
1143		struct i596_cmd *ptr;
1144
1145		if ((status & 0x8000))
1146			DEB(DEB_INTS,
1147			    printk(KERN_DEBUG
1148				   "%s: i596 interrupt completed command.\n",
1149				   dev->name));
1150		if ((status & 0x2000))
1151			DEB(DEB_INTS,
1152			    printk(KERN_DEBUG
1153				   "%s: i596 interrupt command unit inactive %x.\n",
1154				   dev->name, status & 0x0700));
1155
1156		while (lp->cmd_head != NULL) {
1157			DMA_INV(dev, lp->cmd_head, sizeof(struct i596_cmd));
1158			if (!(lp->cmd_head->status & SWAP16(STAT_C)))
1159				break;
1160
1161			ptr = lp->cmd_head;
1162
1163			DEB(DEB_STATUS,
1164			    printk(KERN_DEBUG
1165				   "cmd_head->status = %04x, ->command = %04x\n",
1166				   SWAP16(lp->cmd_head->status),
1167				   SWAP16(lp->cmd_head->command)));
1168			lp->cmd_head = ptr->v_next;
1169			lp->cmd_backlog--;
1170
1171			switch (SWAP16(ptr->command) & 0x7) {
1172			case CmdTx:
1173			    {
1174				struct tx_cmd *tx_cmd = (struct tx_cmd *) ptr;
1175				struct sk_buff *skb = tx_cmd->skb;
1176
1177				if (ptr->status & SWAP16(STAT_OK)) {
1178					DEB(DEB_TXADDR,
1179					    print_eth(skb->data, "tx-done"));
1180				} else {
1181					dev->stats.tx_errors++;
1182					if (ptr->status & SWAP16(0x0020))
1183						dev->stats.collisions++;
1184					if (!(ptr->status & SWAP16(0x0040)))
1185						dev->stats.tx_heartbeat_errors++;
1186					if (ptr->status & SWAP16(0x0400))
1187						dev->stats.tx_carrier_errors++;
1188					if (ptr->status & SWAP16(0x0800))
1189						dev->stats.collisions++;
1190					if (ptr->status & SWAP16(0x1000))
1191						dev->stats.tx_aborted_errors++;
1192				}
1193				dma_unmap_single(dev->dev.parent,
1194						 tx_cmd->dma_addr,
1195						 skb->len, DMA_TO_DEVICE);
1196				dev_kfree_skb_irq(skb);
1197
1198				tx_cmd->cmd.command = 0; /* Mark free */
1199				break;
1200			    }
1201			case CmdTDR:
1202			    {
1203				unsigned short status = SWAP16(((struct tdr_cmd *)ptr)->status);
1204
1205				if (status & 0x8000) {
1206					DEB(DEB_ANY,
1207					    printk(KERN_DEBUG "%s: link ok.\n",
1208						   dev->name));
1209				} else {
1210					if (status & 0x4000)
1211						printk(KERN_ERR
1212						       "%s: Transceiver problem.\n",
1213						       dev->name);
1214					if (status & 0x2000)
1215						printk(KERN_ERR
1216						       "%s: Termination problem.\n",
1217						       dev->name);
1218					if (status & 0x1000)
1219						printk(KERN_ERR
1220						       "%s: Short circuit.\n",
1221						       dev->name);
1222
1223					DEB(DEB_TDR,
1224					    printk(KERN_DEBUG "%s: Time %d.\n",
1225						   dev->name, status & 0x07ff));
1226				}
1227				break;
1228			    }
1229			case CmdConfigure:
1230				/*
1231				 * Zap command so set_multicast_list() know
1232				 * it is free
1233				 */
1234				ptr->command = 0;
1235				break;
1236			}
1237			ptr->v_next = NULL;
1238			ptr->b_next = I596_NULL;
1239			DMA_WBACK(dev, ptr, sizeof(struct i596_cmd));
1240			lp->last_cmd = jiffies;
1241		}
1242
1243		/* This mess is arranging that only the last of any outstanding
1244		 * commands has the interrupt bit set.  Should probably really
1245		 * only add to the cmd queue when the CU is stopped.
1246		 */
1247		ptr = lp->cmd_head;
1248		while ((ptr != NULL) && (ptr != lp->cmd_tail)) {
1249			struct i596_cmd *prev = ptr;
1250
1251			ptr->command &= SWAP16(0x1fff);
1252			ptr = ptr->v_next;
1253			DMA_WBACK_INV(dev, prev, sizeof(struct i596_cmd));
1254		}
1255
1256		if (lp->cmd_head != NULL)
1257			ack_cmd |= CUC_START;
1258		dma->scb.cmd = SWAP32(virt_to_dma(lp, &lp->cmd_head->status));
1259		DMA_WBACK_INV(dev, &dma->scb, sizeof(struct i596_scb));
1260	}
1261	if ((status & 0x1000) || (status & 0x4000)) {
1262		if ((status & 0x4000))
1263			DEB(DEB_INTS,
1264			    printk(KERN_DEBUG
1265				   "%s: i596 interrupt received a frame.\n",
1266				   dev->name));
1267		i596_rx(dev);
1268		/* Only RX_START if stopped - RGH 07-07-96 */
1269		if (status & 0x1000) {
1270			if (netif_running(dev)) {
1271				DEB(DEB_ERRORS,
1272				    printk(KERN_DEBUG
1273					   "%s: i596 interrupt receive unit inactive, status 0x%x\n",
1274					   dev->name, status));
1275				ack_cmd |= RX_START;
1276				dev->stats.rx_errors++;
1277				dev->stats.rx_fifo_errors++;
1278				rebuild_rx_bufs(dev);
1279			}
1280		}
1281	}
1282	wait_cmd(dev, dma, 100, "i596 interrupt, timeout");
1283	dma->scb.command = SWAP16(ack_cmd);
1284	DMA_WBACK(dev, &dma->scb, sizeof(struct i596_scb));
1285
1286	/* DANGER: I suspect that some kind of interrupt
1287	 acknowledgement aside from acking the 82596 might be needed
1288	 here...  but it's running acceptably without */
1289
1290	ca(dev);
1291
1292	wait_cmd(dev, dma, 100, "i596 interrupt, exit timeout");
1293	DEB(DEB_INTS, printk(KERN_DEBUG "%s: exiting interrupt.\n", dev->name));
1294
1295	spin_unlock (&lp->lock);
1296	return IRQ_HANDLED;
1297}
1298
1299static int i596_close(struct net_device *dev)
1300{
1301	struct i596_private *lp = netdev_priv(dev);
1302	unsigned long flags;
1303
1304	netif_stop_queue(dev);
1305
1306	DEB(DEB_INIT,
1307	    printk(KERN_DEBUG
1308		   "%s: Shutting down ethercard, status was %4.4x.\n",
1309		   dev->name, SWAP16(lp->dma->scb.status)));
1310
1311	spin_lock_irqsave(&lp->lock, flags);
1312
1313	wait_cmd(dev, lp->dma, 100, "close1 timed out");
1314	lp->dma->scb.command = SWAP16(CUC_ABORT | RX_ABORT);
1315	DMA_WBACK(dev, &lp->dma->scb, sizeof(struct i596_scb));
1316
1317	ca(dev);
1318
1319	wait_cmd(dev, lp->dma, 100, "close2 timed out");
1320	spin_unlock_irqrestore(&lp->lock, flags);
1321	DEB(DEB_STRUCT, i596_display_data(dev));
1322	i596_cleanup_cmd(dev, lp);
1323
1324	free_irq(dev->irq, dev);
1325	remove_rx_bufs(dev);
1326
1327	return 0;
1328}
1329
1330/*
1331 *    Set or clear the multicast filter for this adaptor.
1332 */
1333
1334static void set_multicast_list(struct net_device *dev)
1335{
1336	struct i596_private *lp = netdev_priv(dev);
1337	struct i596_dma *dma = lp->dma;
1338	int config = 0, cnt;
1339
1340	DEB(DEB_MULTI,
1341	    printk(KERN_DEBUG
1342		   "%s: set multicast list, %d entries, promisc %s, allmulti %s\n",
1343		   dev->name, netdev_mc_count(dev),
1344		   dev->flags & IFF_PROMISC ? "ON" : "OFF",
1345		   dev->flags & IFF_ALLMULTI ? "ON" : "OFF"));
1346
1347	if ((dev->flags & IFF_PROMISC) &&
1348	    !(dma->cf_cmd.i596_config[8] & 0x01)) {
1349		dma->cf_cmd.i596_config[8] |= 0x01;
1350		config = 1;
1351	}
1352	if (!(dev->flags & IFF_PROMISC) &&
1353	    (dma->cf_cmd.i596_config[8] & 0x01)) {
1354		dma->cf_cmd.i596_config[8] &= ~0x01;
1355		config = 1;
1356	}
1357	if ((dev->flags & IFF_ALLMULTI) &&
1358	    (dma->cf_cmd.i596_config[11] & 0x20)) {
1359		dma->cf_cmd.i596_config[11] &= ~0x20;
1360		config = 1;
1361	}
1362	if (!(dev->flags & IFF_ALLMULTI) &&
1363	    !(dma->cf_cmd.i596_config[11] & 0x20)) {
1364		dma->cf_cmd.i596_config[11] |= 0x20;
1365		config = 1;
1366	}
1367	if (config) {
1368		if (dma->cf_cmd.cmd.command)
1369			printk(KERN_INFO
1370			       "%s: config change request already queued\n",
1371			       dev->name);
1372		else {
1373			dma->cf_cmd.cmd.command = SWAP16(CmdConfigure);
1374			DMA_WBACK_INV(dev, &dma->cf_cmd, sizeof(struct cf_cmd));
1375			i596_add_cmd(dev, &dma->cf_cmd.cmd);
1376		}
1377	}
1378
1379	cnt = netdev_mc_count(dev);
1380	if (cnt > MAX_MC_CNT) {
1381		cnt = MAX_MC_CNT;
1382		printk(KERN_NOTICE "%s: Only %d multicast addresses supported",
1383			dev->name, cnt);
1384	}
1385
1386	if (!netdev_mc_empty(dev)) {
1387		struct netdev_hw_addr *ha;
1388		unsigned char *cp;
1389		struct mc_cmd *cmd;
1390
1391		cmd = &dma->mc_cmd;
1392		cmd->cmd.command = SWAP16(CmdMulticastList);
1393		cmd->mc_cnt = SWAP16(netdev_mc_count(dev) * 6);
1394		cp = cmd->mc_addrs;
1395		netdev_for_each_mc_addr(ha, dev) {
1396			if (!cnt--)
1397				break;
1398			memcpy(cp, ha->addr, ETH_ALEN);
1399			if (i596_debug > 1)
1400				DEB(DEB_MULTI,
1401				    printk(KERN_DEBUG
1402					   "%s: Adding address %pM\n",
1403					   dev->name, cp));
1404			cp += ETH_ALEN;
1405		}
1406		DMA_WBACK_INV(dev, &dma->mc_cmd, sizeof(struct mc_cmd));
1407		i596_add_cmd(dev, &cmd->cmd);
1408	}
1409}
v5.14.15
   1/* lasi_82596.c -- driver for the intel 82596 ethernet controller, as
   2   munged into HPPA boxen .
   3
   4   This driver is based upon 82596.c, original credits are below...
   5   but there were too many hoops which HP wants jumped through to
   6   keep this code in there in a sane manner.
   7
   8   3 primary sources of the mess --
   9   1) hppa needs *lots* of cacheline flushing to keep this kind of
  10   MMIO running.
  11
  12   2) The 82596 needs to see all of its pointers as their physical
  13   address.  Thus virt_to_bus/bus_to_virt are *everywhere*.
  14
  15   3) The implementation HP is using seems to be significantly pickier
  16   about when and how the command and RX units are started.  some
  17   command ordering was changed.
  18
  19   Examination of the mach driver leads one to believe that there
  20   might be a saner way to pull this off...  anyone who feels like a
  21   full rewrite can be my guest.
  22
  23   Split 02/13/2000 Sam Creasey (sammy@oh.verio.com)
  24
  25   02/01/2000  Initial modifications for parisc by Helge Deller (deller@gmx.de)
  26   03/02/2000  changes for better/correct(?) cache-flushing (deller)
  27*/
  28
  29/* 82596.c: A generic 82596 ethernet driver for linux. */
  30/*
  31   Based on Apricot.c
  32   Written 1994 by Mark Evans.
  33   This driver is for the Apricot 82596 bus-master interface
  34
  35   Modularised 12/94 Mark Evans
  36
  37
  38   Modified to support the 82596 ethernet chips on 680x0 VME boards.
  39   by Richard Hirst <richard@sleepie.demon.co.uk>
  40   Renamed to be 82596.c
  41
  42   980825:  Changed to receive directly in to sk_buffs which are
  43   allocated at open() time.  Eliminates copy on incoming frames
  44   (small ones are still copied).  Shared data now held in a
  45   non-cached page, so we can run on 68060 in copyback mode.
  46
  47   TBD:
  48   * look at deferring rx frames rather than discarding (as per tulip)
  49   * handle tx ring full as per tulip
  50   * performance test to tune rx_copybreak
  51
  52   Most of my modifications relate to the braindead big-endian
  53   implementation by Intel.  When the i596 is operating in
  54   'big-endian' mode, it thinks a 32 bit value of 0x12345678
  55   should be stored as 0x56781234.  This is a real pain, when
  56   you have linked lists which are shared by the 680x0 and the
  57   i596.
  58
  59   Driver skeleton
  60   Written 1993 by Donald Becker.
  61   Copyright 1993 United States Government as represented by the Director,
  62   National Security Agency. This software may only be used and distributed
  63   according to the terms of the GNU General Public License as modified by SRC,
  64   incorporated herein by reference.
  65
  66   The author may be reached as becker@scyld.com, or C/O
  67   Scyld Computing Corporation, 410 Severn Ave., Suite 210, Annapolis MD 21403
  68
  69 */
  70
  71#include <linux/module.h>
  72#include <linux/kernel.h>
  73#include <linux/string.h>
  74#include <linux/errno.h>
  75#include <linux/ioport.h>
  76#include <linux/interrupt.h>
  77#include <linux/delay.h>
  78#include <linux/netdevice.h>
  79#include <linux/etherdevice.h>
  80#include <linux/skbuff.h>
  81#include <linux/types.h>
  82#include <linux/bitops.h>
  83#include <linux/dma-mapping.h>
  84#include <linux/io.h>
  85#include <linux/irq.h>
  86#include <linux/gfp.h>
  87
  88/* DEBUG flags
  89 */
  90
  91#define DEB_INIT	0x0001
  92#define DEB_PROBE	0x0002
  93#define DEB_SERIOUS	0x0004
  94#define DEB_ERRORS	0x0008
  95#define DEB_MULTI	0x0010
  96#define DEB_TDR		0x0020
  97#define DEB_OPEN	0x0040
  98#define DEB_RESET	0x0080
  99#define DEB_ADDCMD	0x0100
 100#define DEB_STATUS	0x0200
 101#define DEB_STARTTX	0x0400
 102#define DEB_RXADDR	0x0800
 103#define DEB_TXADDR	0x1000
 104#define DEB_RXFRAME	0x2000
 105#define DEB_INTS	0x4000
 106#define DEB_STRUCT	0x8000
 107#define DEB_ANY		0xffff
 108
 109
 110#define DEB(x, y)	if (i596_debug & (x)) { y; }
 111
 112
 113/*
 114 * The MPU_PORT command allows direct access to the 82596. With PORT access
 115 * the following commands are available (p5-18). The 32-bit port command
 116 * must be word-swapped with the most significant word written first.
 117 * This only applies to VME boards.
 118 */
 119#define PORT_RESET		0x00	/* reset 82596 */
 120#define PORT_SELFTEST		0x01	/* selftest */
 121#define PORT_ALTSCP		0x02	/* alternate SCB address */
 122#define PORT_ALTDUMP		0x03	/* Alternate DUMP address */
 123
 124static int i596_debug = (DEB_SERIOUS|DEB_PROBE);
 125
 126/* Copy frames shorter than rx_copybreak, otherwise pass on up in
 127 * a full sized sk_buff.  Value of 100 stolen from tulip.c (!alpha).
 128 */
 129static int rx_copybreak = 100;
 130
 131#define PKT_BUF_SZ	1536
 132#define MAX_MC_CNT	64
 133
 134#define ISCP_BUSY	0x0001
 135
 136#define I596_NULL ((u32)0xffffffff)
 137
 138#define CMD_EOL		0x8000	/* The last command of the list, stop. */
 139#define CMD_SUSP	0x4000	/* Suspend after doing cmd. */
 140#define CMD_INTR	0x2000	/* Interrupt after doing cmd. */
 141
 142#define CMD_FLEX	0x0008	/* Enable flexible memory model */
 143
 144enum commands {
 145	CmdNOp = 0, CmdSASetup = 1, CmdConfigure = 2, CmdMulticastList = 3,
 146	CmdTx = 4, CmdTDR = 5, CmdDump = 6, CmdDiagnose = 7
 147};
 148
 149#define STAT_C		0x8000	/* Set to 0 after execution */
 150#define STAT_B		0x4000	/* Command being executed */
 151#define STAT_OK		0x2000	/* Command executed ok */
 152#define STAT_A		0x1000	/* Command aborted */
 153
 154#define	 CUC_START	0x0100
 155#define	 CUC_RESUME	0x0200
 156#define	 CUC_SUSPEND    0x0300
 157#define	 CUC_ABORT	0x0400
 158#define	 RX_START	0x0010
 159#define	 RX_RESUME	0x0020
 160#define	 RX_SUSPEND	0x0030
 161#define	 RX_ABORT	0x0040
 162
 163#define TX_TIMEOUT	(HZ/20)
 164
 165
 166struct i596_reg {
 167	unsigned short porthi;
 168	unsigned short portlo;
 169	u32            ca;
 170};
 171
 172#define EOF		0x8000
 173#define SIZE_MASK	0x3fff
 174
 175struct i596_tbd {
 176	unsigned short size;
 177	unsigned short pad;
 178	u32            next;
 179	u32            data;
 180	u32 cache_pad[5];		/* Total 32 bytes... */
 181};
 182
 183/* The command structure has two 'next' pointers; v_next is the address of
 184 * the next command as seen by the CPU, b_next is the address of the next
 185 * command as seen by the 82596.  The b_next pointer, as used by the 82596
 186 * always references the status field of the next command, rather than the
 187 * v_next field, because the 82596 is unaware of v_next.  It may seem more
 188 * logical to put v_next at the end of the structure, but we cannot do that
 189 * because the 82596 expects other fields to be there, depending on command
 190 * type.
 191 */
 192
 193struct i596_cmd {
 194	struct i596_cmd *v_next;	/* Address from CPUs viewpoint */
 195	unsigned short status;
 196	unsigned short command;
 197	u32            b_next;	/* Address from i596 viewpoint */
 198};
 199
 200struct tx_cmd {
 201	struct i596_cmd cmd;
 202	u32            tbd;
 203	unsigned short size;
 204	unsigned short pad;
 205	struct sk_buff *skb;		/* So we can free it after tx */
 206	dma_addr_t dma_addr;
 207#ifdef __LP64__
 208	u32 cache_pad[6];		/* Total 64 bytes... */
 209#else
 210	u32 cache_pad[1];		/* Total 32 bytes... */
 211#endif
 212};
 213
 214struct tdr_cmd {
 215	struct i596_cmd cmd;
 216	unsigned short status;
 217	unsigned short pad;
 218};
 219
 220struct mc_cmd {
 221	struct i596_cmd cmd;
 222	short mc_cnt;
 223	char mc_addrs[MAX_MC_CNT*6];
 224};
 225
 226struct sa_cmd {
 227	struct i596_cmd cmd;
 228	char eth_addr[8];
 229};
 230
 231struct cf_cmd {
 232	struct i596_cmd cmd;
 233	char i596_config[16];
 234};
 235
 236struct i596_rfd {
 237	unsigned short stat;
 238	unsigned short cmd;
 239	u32            b_next;	/* Address from i596 viewpoint */
 240	u32            rbd;
 241	unsigned short count;
 242	unsigned short size;
 243	struct i596_rfd *v_next;	/* Address from CPUs viewpoint */
 244	struct i596_rfd *v_prev;
 245#ifndef __LP64__
 246	u32 cache_pad[2];		/* Total 32 bytes... */
 247#endif
 248};
 249
 250struct i596_rbd {
 251	/* hardware data */
 252	unsigned short count;
 253	unsigned short zero1;
 254	u32            b_next;
 255	u32            b_data;		/* Address from i596 viewpoint */
 256	unsigned short size;
 257	unsigned short zero2;
 258	/* driver data */
 259	struct sk_buff *skb;
 260	struct i596_rbd *v_next;
 261	u32            b_addr;		/* This rbd addr from i596 view */
 262	unsigned char *v_data;		/* Address from CPUs viewpoint */
 263					/* Total 32 bytes... */
 264#ifdef __LP64__
 265    u32 cache_pad[4];
 266#endif
 267};
 268
 269/* These values as chosen so struct i596_dma fits in one page... */
 270
 271#define TX_RING_SIZE 32
 272#define RX_RING_SIZE 16
 273
 274struct i596_scb {
 275	unsigned short status;
 276	unsigned short command;
 277	u32           cmd;
 278	u32           rfd;
 279	u32           crc_err;
 280	u32           align_err;
 281	u32           resource_err;
 282	u32           over_err;
 283	u32           rcvdt_err;
 284	u32           short_err;
 285	unsigned short t_on;
 286	unsigned short t_off;
 287};
 288
 289struct i596_iscp {
 290	u32 stat;
 291	u32 scb;
 292};
 293
 294struct i596_scp {
 295	u32 sysbus;
 296	u32 pad;
 297	u32 iscp;
 298};
 299
 300struct i596_dma {
 301	struct i596_scp scp		        __attribute__((aligned(32)));
 302	volatile struct i596_iscp iscp		__attribute__((aligned(32)));
 303	volatile struct i596_scb scb		__attribute__((aligned(32)));
 304	struct sa_cmd sa_cmd			__attribute__((aligned(32)));
 305	struct cf_cmd cf_cmd			__attribute__((aligned(32)));
 306	struct tdr_cmd tdr_cmd			__attribute__((aligned(32)));
 307	struct mc_cmd mc_cmd			__attribute__((aligned(32)));
 308	struct i596_rfd rfds[RX_RING_SIZE]	__attribute__((aligned(32)));
 309	struct i596_rbd rbds[RX_RING_SIZE]	__attribute__((aligned(32)));
 310	struct tx_cmd tx_cmds[TX_RING_SIZE]	__attribute__((aligned(32)));
 311	struct i596_tbd tbds[TX_RING_SIZE]	__attribute__((aligned(32)));
 312};
 313
 314struct i596_private {
 315	struct i596_dma *dma;
 316	u32    stat;
 317	int last_restart;
 318	struct i596_rfd *rfd_head;
 319	struct i596_rbd *rbd_head;
 320	struct i596_cmd *cmd_tail;
 321	struct i596_cmd *cmd_head;
 322	int cmd_backlog;
 323	u32    last_cmd;
 324	int next_tx_cmd;
 325	int options;
 326	spinlock_t lock;       /* serialize access to chip */
 327	dma_addr_t dma_addr;
 328	void __iomem *mpu_port;
 329	void __iomem *ca;
 330};
 331
 332static const char init_setup[] =
 333{
 334	0x8E,		/* length, prefetch on */
 335	0xC8,		/* fifo to 8, monitor off */
 336	0x80,		/* don't save bad frames */
 337	0x2E,		/* No source address insertion, 8 byte preamble */
 338	0x00,		/* priority and backoff defaults */
 339	0x60,		/* interframe spacing */
 340	0x00,		/* slot time LSB */
 341	0xf2,		/* slot time and retries */
 342	0x00,		/* promiscuous mode */
 343	0x00,		/* collision detect */
 344	0x40,		/* minimum frame length */
 345	0xff,
 346	0x00,
 347	0x7f /*  *multi IA */ };
 348
 349static int i596_open(struct net_device *dev);
 350static netdev_tx_t i596_start_xmit(struct sk_buff *skb, struct net_device *dev);
 351static irqreturn_t i596_interrupt(int irq, void *dev_id);
 352static int i596_close(struct net_device *dev);
 353static void i596_add_cmd(struct net_device *dev, struct i596_cmd *cmd);
 354static void i596_tx_timeout (struct net_device *dev, unsigned int txqueue);
 355static void print_eth(unsigned char *buf, char *str);
 356static void set_multicast_list(struct net_device *dev);
 357static inline void ca(struct net_device *dev);
 358static void mpu_port(struct net_device *dev, int c, dma_addr_t x);
 359
 360static int rx_ring_size = RX_RING_SIZE;
 361static int ticks_limit = 100;
 362static int max_cmd_backlog = TX_RING_SIZE-1;
 363
 364#ifdef CONFIG_NET_POLL_CONTROLLER
 365static void i596_poll_controller(struct net_device *dev);
 366#endif
 367
 368static inline dma_addr_t virt_to_dma(struct i596_private *lp, volatile void *v)
 369{
 370	return lp->dma_addr + ((unsigned long)v - (unsigned long)lp->dma);
 371}
 372
 373#ifdef NONCOHERENT_DMA
 374static inline void dma_sync_dev(struct net_device *ndev, volatile void *addr,
 375		size_t len)
 376{
 377	dma_sync_single_for_device(ndev->dev.parent,
 378			virt_to_dma(netdev_priv(ndev), addr), len,
 379			DMA_BIDIRECTIONAL);
 380}
 381
 382static inline void dma_sync_cpu(struct net_device *ndev, volatile void *addr,
 383		size_t len)
 384{
 385	dma_sync_single_for_cpu(ndev->dev.parent,
 386			virt_to_dma(netdev_priv(ndev), addr), len,
 387			DMA_BIDIRECTIONAL);
 388}
 389#else
 390static inline void dma_sync_dev(struct net_device *ndev, volatile void *addr,
 391		size_t len)
 392{
 393}
 394static inline void dma_sync_cpu(struct net_device *ndev, volatile void *addr,
 395		size_t len)
 396{
 397}
 398#endif /* NONCOHERENT_DMA */
 399
 400static inline int wait_istat(struct net_device *dev, struct i596_dma *dma, int delcnt, char *str)
 401{
 402	dma_sync_cpu(dev, &(dma->iscp), sizeof(struct i596_iscp));
 403	while (--delcnt && dma->iscp.stat) {
 404		udelay(10);
 405		dma_sync_cpu(dev, &(dma->iscp), sizeof(struct i596_iscp));
 406	}
 407	if (!delcnt) {
 408		printk(KERN_ERR "%s: %s, iscp.stat %04x, didn't clear\n",
 409		     dev->name, str, SWAP16(dma->iscp.stat));
 410		return -1;
 411	} else
 412		return 0;
 413}
 414
 415
 416static inline int wait_cmd(struct net_device *dev, struct i596_dma *dma, int delcnt, char *str)
 417{
 418	dma_sync_cpu(dev, &(dma->scb), sizeof(struct i596_scb));
 419	while (--delcnt && dma->scb.command) {
 420		udelay(10);
 421		dma_sync_cpu(dev, &(dma->scb), sizeof(struct i596_scb));
 422	}
 423	if (!delcnt) {
 424		printk(KERN_ERR "%s: %s, status %4.4x, cmd %4.4x.\n",
 425		       dev->name, str,
 426		       SWAP16(dma->scb.status),
 427		       SWAP16(dma->scb.command));
 428		return -1;
 429	} else
 430		return 0;
 431}
 432
 433
 434static void i596_display_data(struct net_device *dev)
 435{
 436	struct i596_private *lp = netdev_priv(dev);
 437	struct i596_dma *dma = lp->dma;
 438	struct i596_cmd *cmd;
 439	struct i596_rfd *rfd;
 440	struct i596_rbd *rbd;
 441
 442	printk(KERN_DEBUG "lp and scp at %p, .sysbus = %08x, .iscp = %08x\n",
 443	       &dma->scp, dma->scp.sysbus, SWAP32(dma->scp.iscp));
 444	printk(KERN_DEBUG "iscp at %p, iscp.stat = %08x, .scb = %08x\n",
 445	       &dma->iscp, SWAP32(dma->iscp.stat), SWAP32(dma->iscp.scb));
 446	printk(KERN_DEBUG "scb at %p, scb.status = %04x, .command = %04x,"
 447		" .cmd = %08x, .rfd = %08x\n",
 448	       &dma->scb, SWAP16(dma->scb.status), SWAP16(dma->scb.command),
 449		SWAP16(dma->scb.cmd), SWAP32(dma->scb.rfd));
 450	printk(KERN_DEBUG "   errors: crc %x, align %x, resource %x,"
 451	       " over %x, rcvdt %x, short %x\n",
 452	       SWAP32(dma->scb.crc_err), SWAP32(dma->scb.align_err),
 453	       SWAP32(dma->scb.resource_err), SWAP32(dma->scb.over_err),
 454	       SWAP32(dma->scb.rcvdt_err), SWAP32(dma->scb.short_err));
 455	cmd = lp->cmd_head;
 456	while (cmd != NULL) {
 457		printk(KERN_DEBUG
 458		       "cmd at %p, .status = %04x, .command = %04x,"
 459		       " .b_next = %08x\n",
 460		       cmd, SWAP16(cmd->status), SWAP16(cmd->command),
 461		       SWAP32(cmd->b_next));
 462		cmd = cmd->v_next;
 463	}
 464	rfd = lp->rfd_head;
 465	printk(KERN_DEBUG "rfd_head = %p\n", rfd);
 466	do {
 467		printk(KERN_DEBUG
 468		       "   %p .stat %04x, .cmd %04x, b_next %08x, rbd %08x,"
 469		       " count %04x\n",
 470		       rfd, SWAP16(rfd->stat), SWAP16(rfd->cmd),
 471		       SWAP32(rfd->b_next), SWAP32(rfd->rbd),
 472		       SWAP16(rfd->count));
 473		rfd = rfd->v_next;
 474	} while (rfd != lp->rfd_head);
 475	rbd = lp->rbd_head;
 476	printk(KERN_DEBUG "rbd_head = %p\n", rbd);
 477	do {
 478		printk(KERN_DEBUG
 479		       "   %p .count %04x, b_next %08x, b_data %08x,"
 480		       " size %04x\n",
 481			rbd, SWAP16(rbd->count), SWAP32(rbd->b_next),
 482		       SWAP32(rbd->b_data), SWAP16(rbd->size));
 483		rbd = rbd->v_next;
 484	} while (rbd != lp->rbd_head);
 485	dma_sync_cpu(dev, dma, sizeof(struct i596_dma));
 486}
 487
 
 
 
 488static inline int init_rx_bufs(struct net_device *dev)
 489{
 490	struct i596_private *lp = netdev_priv(dev);
 491	struct i596_dma *dma = lp->dma;
 492	int i;
 493	struct i596_rfd *rfd;
 494	struct i596_rbd *rbd;
 495
 496	/* First build the Receive Buffer Descriptor List */
 497
 498	for (i = 0, rbd = dma->rbds; i < rx_ring_size; i++, rbd++) {
 499		dma_addr_t dma_addr;
 500		struct sk_buff *skb;
 501
 502		skb = netdev_alloc_skb_ip_align(dev, PKT_BUF_SZ);
 503		if (skb == NULL)
 504			return -1;
 505		dma_addr = dma_map_single(dev->dev.parent, skb->data,
 506					  PKT_BUF_SZ, DMA_FROM_DEVICE);
 507		rbd->v_next = rbd+1;
 508		rbd->b_next = SWAP32(virt_to_dma(lp, rbd+1));
 509		rbd->b_addr = SWAP32(virt_to_dma(lp, rbd));
 510		rbd->skb = skb;
 511		rbd->v_data = skb->data;
 512		rbd->b_data = SWAP32(dma_addr);
 513		rbd->size = SWAP16(PKT_BUF_SZ);
 514	}
 515	lp->rbd_head = dma->rbds;
 516	rbd = dma->rbds + rx_ring_size - 1;
 517	rbd->v_next = dma->rbds;
 518	rbd->b_next = SWAP32(virt_to_dma(lp, dma->rbds));
 519
 520	/* Now build the Receive Frame Descriptor List */
 521
 522	for (i = 0, rfd = dma->rfds; i < rx_ring_size; i++, rfd++) {
 523		rfd->rbd = I596_NULL;
 524		rfd->v_next = rfd+1;
 525		rfd->v_prev = rfd-1;
 526		rfd->b_next = SWAP32(virt_to_dma(lp, rfd+1));
 527		rfd->cmd = SWAP16(CMD_FLEX);
 528	}
 529	lp->rfd_head = dma->rfds;
 530	dma->scb.rfd = SWAP32(virt_to_dma(lp, dma->rfds));
 531	rfd = dma->rfds;
 532	rfd->rbd = SWAP32(virt_to_dma(lp, lp->rbd_head));
 533	rfd->v_prev = dma->rfds + rx_ring_size - 1;
 534	rfd = dma->rfds + rx_ring_size - 1;
 535	rfd->v_next = dma->rfds;
 536	rfd->b_next = SWAP32(virt_to_dma(lp, dma->rfds));
 537	rfd->cmd = SWAP16(CMD_EOL|CMD_FLEX);
 538
 539	dma_sync_dev(dev, dma, sizeof(struct i596_dma));
 540	return 0;
 541}
 542
 543static inline void remove_rx_bufs(struct net_device *dev)
 544{
 545	struct i596_private *lp = netdev_priv(dev);
 546	struct i596_rbd *rbd;
 547	int i;
 548
 549	for (i = 0, rbd = lp->dma->rbds; i < rx_ring_size; i++, rbd++) {
 550		if (rbd->skb == NULL)
 551			break;
 552		dma_unmap_single(dev->dev.parent,
 553				 (dma_addr_t)SWAP32(rbd->b_data),
 554				 PKT_BUF_SZ, DMA_FROM_DEVICE);
 555		dev_kfree_skb(rbd->skb);
 556	}
 557}
 558
 559
 560static void rebuild_rx_bufs(struct net_device *dev)
 561{
 562	struct i596_private *lp = netdev_priv(dev);
 563	struct i596_dma *dma = lp->dma;
 564	int i;
 565
 566	/* Ensure rx frame/buffer descriptors are tidy */
 567
 568	for (i = 0; i < rx_ring_size; i++) {
 569		dma->rfds[i].rbd = I596_NULL;
 570		dma->rfds[i].cmd = SWAP16(CMD_FLEX);
 571	}
 572	dma->rfds[rx_ring_size-1].cmd = SWAP16(CMD_EOL|CMD_FLEX);
 573	lp->rfd_head = dma->rfds;
 574	dma->scb.rfd = SWAP32(virt_to_dma(lp, dma->rfds));
 575	lp->rbd_head = dma->rbds;
 576	dma->rfds[0].rbd = SWAP32(virt_to_dma(lp, dma->rbds));
 577
 578	dma_sync_dev(dev, dma, sizeof(struct i596_dma));
 579}
 580
 581
 582static int init_i596_mem(struct net_device *dev)
 583{
 584	struct i596_private *lp = netdev_priv(dev);
 585	struct i596_dma *dma = lp->dma;
 586	unsigned long flags;
 587
 588	mpu_port(dev, PORT_RESET, 0);
 589	udelay(100);			/* Wait 100us - seems to help */
 590
 591	/* change the scp address */
 592
 593	lp->last_cmd = jiffies;
 594
 595	dma->scp.sysbus = SYSBUS;
 596	dma->scp.iscp = SWAP32(virt_to_dma(lp, &(dma->iscp)));
 597	dma->iscp.scb = SWAP32(virt_to_dma(lp, &(dma->scb)));
 598	dma->iscp.stat = SWAP32(ISCP_BUSY);
 599	lp->cmd_backlog = 0;
 600
 601	lp->cmd_head = NULL;
 602	dma->scb.cmd = I596_NULL;
 603
 604	DEB(DEB_INIT, printk(KERN_DEBUG "%s: starting i82596.\n", dev->name));
 605
 606	dma_sync_dev(dev, &(dma->scp), sizeof(struct i596_scp));
 607	dma_sync_dev(dev, &(dma->iscp), sizeof(struct i596_iscp));
 608	dma_sync_dev(dev, &(dma->scb), sizeof(struct i596_scb));
 609
 610	mpu_port(dev, PORT_ALTSCP, virt_to_dma(lp, &dma->scp));
 611	ca(dev);
 612	if (wait_istat(dev, dma, 1000, "initialization timed out"))
 613		goto failed;
 614	DEB(DEB_INIT, printk(KERN_DEBUG
 615			     "%s: i82596 initialization successful\n",
 616			     dev->name));
 617
 618	if (request_irq(dev->irq, i596_interrupt, 0, "i82596", dev)) {
 619		printk(KERN_ERR "%s: IRQ %d not free\n", dev->name, dev->irq);
 620		goto failed;
 621	}
 622
 623	/* Ensure rx frame/buffer descriptors are tidy */
 624	rebuild_rx_bufs(dev);
 625
 626	dma->scb.command = 0;
 627	dma_sync_dev(dev, &(dma->scb), sizeof(struct i596_scb));
 628
 629	DEB(DEB_INIT, printk(KERN_DEBUG
 630			     "%s: queuing CmdConfigure\n", dev->name));
 631	memcpy(dma->cf_cmd.i596_config, init_setup, 14);
 632	dma->cf_cmd.cmd.command = SWAP16(CmdConfigure);
 633	dma_sync_dev(dev, &(dma->cf_cmd), sizeof(struct cf_cmd));
 634	i596_add_cmd(dev, &dma->cf_cmd.cmd);
 635
 636	DEB(DEB_INIT, printk(KERN_DEBUG "%s: queuing CmdSASetup\n", dev->name));
 637	memcpy(dma->sa_cmd.eth_addr, dev->dev_addr, ETH_ALEN);
 638	dma->sa_cmd.cmd.command = SWAP16(CmdSASetup);
 639	dma_sync_dev(dev, &(dma->sa_cmd), sizeof(struct sa_cmd));
 640	i596_add_cmd(dev, &dma->sa_cmd.cmd);
 641
 642	DEB(DEB_INIT, printk(KERN_DEBUG "%s: queuing CmdTDR\n", dev->name));
 643	dma->tdr_cmd.cmd.command = SWAP16(CmdTDR);
 644	dma_sync_dev(dev, &(dma->tdr_cmd), sizeof(struct tdr_cmd));
 645	i596_add_cmd(dev, &dma->tdr_cmd.cmd);
 646
 647	spin_lock_irqsave (&lp->lock, flags);
 648
 649	if (wait_cmd(dev, dma, 1000, "timed out waiting to issue RX_START")) {
 650		spin_unlock_irqrestore (&lp->lock, flags);
 651		goto failed_free_irq;
 652	}
 653	DEB(DEB_INIT, printk(KERN_DEBUG "%s: Issuing RX_START\n", dev->name));
 654	dma->scb.command = SWAP16(RX_START);
 655	dma->scb.rfd = SWAP32(virt_to_dma(lp, dma->rfds));
 656	dma_sync_dev(dev, &(dma->scb), sizeof(struct i596_scb));
 657
 658	ca(dev);
 659
 660	spin_unlock_irqrestore (&lp->lock, flags);
 661	if (wait_cmd(dev, dma, 1000, "RX_START not processed"))
 662		goto failed_free_irq;
 663	DEB(DEB_INIT, printk(KERN_DEBUG
 664			     "%s: Receive unit started OK\n", dev->name));
 665	return 0;
 666
 667failed_free_irq:
 668	free_irq(dev->irq, dev);
 669failed:
 670	printk(KERN_ERR "%s: Failed to initialise 82596\n", dev->name);
 671	mpu_port(dev, PORT_RESET, 0);
 672	return -1;
 673}
 674
 675
 676static inline int i596_rx(struct net_device *dev)
 677{
 678	struct i596_private *lp = netdev_priv(dev);
 679	struct i596_rfd *rfd;
 680	struct i596_rbd *rbd;
 681	int frames = 0;
 682
 683	DEB(DEB_RXFRAME, printk(KERN_DEBUG
 684				"i596_rx(), rfd_head %p, rbd_head %p\n",
 685				lp->rfd_head, lp->rbd_head));
 686
 687
 688	rfd = lp->rfd_head;		/* Ref next frame to check */
 689
 690	dma_sync_cpu(dev, rfd, sizeof(struct i596_rfd));
 691	while (rfd->stat & SWAP16(STAT_C)) {	/* Loop while complete frames */
 692		if (rfd->rbd == I596_NULL)
 693			rbd = NULL;
 694		else if (rfd->rbd == lp->rbd_head->b_addr) {
 695			rbd = lp->rbd_head;
 696			dma_sync_cpu(dev, rbd, sizeof(struct i596_rbd));
 697		} else {
 698			printk(KERN_ERR "%s: rbd chain broken!\n", dev->name);
 699			/* XXX Now what? */
 700			rbd = NULL;
 701		}
 702		DEB(DEB_RXFRAME, printk(KERN_DEBUG
 703				      "  rfd %p, rfd.rbd %08x, rfd.stat %04x\n",
 704				      rfd, rfd->rbd, rfd->stat));
 705
 706		if (rbd != NULL && (rfd->stat & SWAP16(STAT_OK))) {
 707			/* a good frame */
 708			int pkt_len = SWAP16(rbd->count) & 0x3fff;
 709			struct sk_buff *skb = rbd->skb;
 710			int rx_in_place = 0;
 711
 712			DEB(DEB_RXADDR, print_eth(rbd->v_data, "received"));
 713			frames++;
 714
 715			/* Check if the packet is long enough to just accept
 716			 * without copying to a properly sized skbuff.
 717			 */
 718
 719			if (pkt_len > rx_copybreak) {
 720				struct sk_buff *newskb;
 721				dma_addr_t dma_addr;
 722
 723				dma_unmap_single(dev->dev.parent,
 724						 (dma_addr_t)SWAP32(rbd->b_data),
 725						 PKT_BUF_SZ, DMA_FROM_DEVICE);
 726				/* Get fresh skbuff to replace filled one. */
 727				newskb = netdev_alloc_skb_ip_align(dev,
 728								   PKT_BUF_SZ);
 729				if (newskb == NULL) {
 730					skb = NULL;	/* drop pkt */
 731					goto memory_squeeze;
 732				}
 733
 734				/* Pass up the skb already on the Rx ring. */
 735				skb_put(skb, pkt_len);
 736				rx_in_place = 1;
 737				rbd->skb = newskb;
 738				dma_addr = dma_map_single(dev->dev.parent,
 739							  newskb->data,
 740							  PKT_BUF_SZ,
 741							  DMA_FROM_DEVICE);
 742				rbd->v_data = newskb->data;
 743				rbd->b_data = SWAP32(dma_addr);
 744				dma_sync_dev(dev, rbd, sizeof(struct i596_rbd));
 745			} else {
 746				skb = netdev_alloc_skb_ip_align(dev, pkt_len);
 747			}
 748memory_squeeze:
 749			if (skb == NULL) {
 750				/* XXX tulip.c can defer packets here!! */
 751				dev->stats.rx_dropped++;
 752			} else {
 753				if (!rx_in_place) {
 754					/* 16 byte align the data fields */
 755					dma_sync_single_for_cpu(dev->dev.parent,
 756								(dma_addr_t)SWAP32(rbd->b_data),
 757								PKT_BUF_SZ, DMA_FROM_DEVICE);
 758					skb_put_data(skb, rbd->v_data,
 759						     pkt_len);
 760					dma_sync_single_for_device(dev->dev.parent,
 761								   (dma_addr_t)SWAP32(rbd->b_data),
 762								   PKT_BUF_SZ, DMA_FROM_DEVICE);
 763				}
 764				skb->len = pkt_len;
 765				skb->protocol = eth_type_trans(skb, dev);
 766				netif_rx(skb);
 767				dev->stats.rx_packets++;
 768				dev->stats.rx_bytes += pkt_len;
 769			}
 770		} else {
 771			DEB(DEB_ERRORS, printk(KERN_DEBUG
 772					       "%s: Error, rfd.stat = 0x%04x\n",
 773					       dev->name, rfd->stat));
 774			dev->stats.rx_errors++;
 775			if (rfd->stat & SWAP16(0x0100))
 776				dev->stats.collisions++;
 777			if (rfd->stat & SWAP16(0x8000))
 778				dev->stats.rx_length_errors++;
 779			if (rfd->stat & SWAP16(0x0001))
 780				dev->stats.rx_over_errors++;
 781			if (rfd->stat & SWAP16(0x0002))
 782				dev->stats.rx_fifo_errors++;
 783			if (rfd->stat & SWAP16(0x0004))
 784				dev->stats.rx_frame_errors++;
 785			if (rfd->stat & SWAP16(0x0008))
 786				dev->stats.rx_crc_errors++;
 787			if (rfd->stat & SWAP16(0x0010))
 788				dev->stats.rx_length_errors++;
 789		}
 790
 791		/* Clear the buffer descriptor count and EOF + F flags */
 792
 793		if (rbd != NULL && (rbd->count & SWAP16(0x4000))) {
 794			rbd->count = 0;
 795			lp->rbd_head = rbd->v_next;
 796			dma_sync_dev(dev, rbd, sizeof(struct i596_rbd));
 797		}
 798
 799		/* Tidy the frame descriptor, marking it as end of list */
 800
 801		rfd->rbd = I596_NULL;
 802		rfd->stat = 0;
 803		rfd->cmd = SWAP16(CMD_EOL|CMD_FLEX);
 804		rfd->count = 0;
 805
 806		/* Update record of next frame descriptor to process */
 807
 808		lp->dma->scb.rfd = rfd->b_next;
 809		lp->rfd_head = rfd->v_next;
 810		dma_sync_dev(dev, rfd, sizeof(struct i596_rfd));
 811
 812		/* Remove end-of-list from old end descriptor */
 813
 814		rfd->v_prev->cmd = SWAP16(CMD_FLEX);
 815		dma_sync_dev(dev, rfd->v_prev, sizeof(struct i596_rfd));
 816		rfd = lp->rfd_head;
 817		dma_sync_cpu(dev, rfd, sizeof(struct i596_rfd));
 818	}
 819
 820	DEB(DEB_RXFRAME, printk(KERN_DEBUG "frames %d\n", frames));
 821
 822	return 0;
 823}
 824
 825
 826static inline void i596_cleanup_cmd(struct net_device *dev, struct i596_private *lp)
 827{
 828	struct i596_cmd *ptr;
 829
 830	while (lp->cmd_head != NULL) {
 831		ptr = lp->cmd_head;
 832		lp->cmd_head = ptr->v_next;
 833		lp->cmd_backlog--;
 834
 835		switch (SWAP16(ptr->command) & 0x7) {
 836		case CmdTx:
 837			{
 838				struct tx_cmd *tx_cmd = (struct tx_cmd *) ptr;
 839				struct sk_buff *skb = tx_cmd->skb;
 840				dma_unmap_single(dev->dev.parent,
 841						 tx_cmd->dma_addr,
 842						 skb->len, DMA_TO_DEVICE);
 843
 844				dev_kfree_skb(skb);
 845
 846				dev->stats.tx_errors++;
 847				dev->stats.tx_aborted_errors++;
 848
 849				ptr->v_next = NULL;
 850				ptr->b_next = I596_NULL;
 851				tx_cmd->cmd.command = 0;  /* Mark as free */
 852				break;
 853			}
 854		default:
 855			ptr->v_next = NULL;
 856			ptr->b_next = I596_NULL;
 857		}
 858		dma_sync_dev(dev, ptr, sizeof(struct i596_cmd));
 859	}
 860
 861	wait_cmd(dev, lp->dma, 100, "i596_cleanup_cmd timed out");
 862	lp->dma->scb.cmd = I596_NULL;
 863	dma_sync_dev(dev, &(lp->dma->scb), sizeof(struct i596_scb));
 864}
 865
 866
 867static inline void i596_reset(struct net_device *dev, struct i596_private *lp)
 868{
 869	unsigned long flags;
 870
 871	DEB(DEB_RESET, printk(KERN_DEBUG "i596_reset\n"));
 872
 873	spin_lock_irqsave (&lp->lock, flags);
 874
 875	wait_cmd(dev, lp->dma, 100, "i596_reset timed out");
 876
 877	netif_stop_queue(dev);
 878
 879	/* FIXME: this command might cause an lpmc */
 880	lp->dma->scb.command = SWAP16(CUC_ABORT | RX_ABORT);
 881	dma_sync_dev(dev, &(lp->dma->scb), sizeof(struct i596_scb));
 882	ca(dev);
 883
 884	/* wait for shutdown */
 885	wait_cmd(dev, lp->dma, 1000, "i596_reset 2 timed out");
 886	spin_unlock_irqrestore (&lp->lock, flags);
 887
 888	i596_cleanup_cmd(dev, lp);
 889	i596_rx(dev);
 890
 891	netif_start_queue(dev);
 892	init_i596_mem(dev);
 893}
 894
 895
 896static void i596_add_cmd(struct net_device *dev, struct i596_cmd *cmd)
 897{
 898	struct i596_private *lp = netdev_priv(dev);
 899	struct i596_dma *dma = lp->dma;
 900	unsigned long flags;
 901
 902	DEB(DEB_ADDCMD, printk(KERN_DEBUG "i596_add_cmd cmd_head %p\n",
 903			       lp->cmd_head));
 904
 905	cmd->status = 0;
 906	cmd->command |= SWAP16(CMD_EOL | CMD_INTR);
 907	cmd->v_next = NULL;
 908	cmd->b_next = I596_NULL;
 909	dma_sync_dev(dev, cmd, sizeof(struct i596_cmd));
 910
 911	spin_lock_irqsave (&lp->lock, flags);
 912
 913	if (lp->cmd_head != NULL) {
 914		lp->cmd_tail->v_next = cmd;
 915		lp->cmd_tail->b_next = SWAP32(virt_to_dma(lp, &cmd->status));
 916		dma_sync_dev(dev, lp->cmd_tail, sizeof(struct i596_cmd));
 917	} else {
 918		lp->cmd_head = cmd;
 919		wait_cmd(dev, dma, 100, "i596_add_cmd timed out");
 920		dma->scb.cmd = SWAP32(virt_to_dma(lp, &cmd->status));
 921		dma->scb.command = SWAP16(CUC_START);
 922		dma_sync_dev(dev, &(dma->scb), sizeof(struct i596_scb));
 923		ca(dev);
 924	}
 925	lp->cmd_tail = cmd;
 926	lp->cmd_backlog++;
 927
 928	spin_unlock_irqrestore (&lp->lock, flags);
 929
 930	if (lp->cmd_backlog > max_cmd_backlog) {
 931		unsigned long tickssofar = jiffies - lp->last_cmd;
 932
 933		if (tickssofar < ticks_limit)
 934			return;
 935
 936		printk(KERN_ERR
 937		       "%s: command unit timed out, status resetting.\n",
 938		       dev->name);
 939#if 1
 940		i596_reset(dev, lp);
 941#endif
 942	}
 943}
 944
 945static int i596_open(struct net_device *dev)
 946{
 947	DEB(DEB_OPEN, printk(KERN_DEBUG
 948			     "%s: i596_open() irq %d.\n", dev->name, dev->irq));
 949
 950	if (init_rx_bufs(dev)) {
 951		printk(KERN_ERR "%s: Failed to init rx bufs\n", dev->name);
 952		return -EAGAIN;
 953	}
 954	if (init_i596_mem(dev)) {
 955		printk(KERN_ERR "%s: Failed to init memory\n", dev->name);
 956		goto out_remove_rx_bufs;
 957	}
 958	netif_start_queue(dev);
 959
 960	return 0;
 961
 962out_remove_rx_bufs:
 963	remove_rx_bufs(dev);
 964	return -EAGAIN;
 965}
 966
 967static void i596_tx_timeout (struct net_device *dev, unsigned int txqueue)
 968{
 969	struct i596_private *lp = netdev_priv(dev);
 970
 971	/* Transmitter timeout, serious problems. */
 972	DEB(DEB_ERRORS, printk(KERN_DEBUG
 973			       "%s: transmit timed out, status resetting.\n",
 974			       dev->name));
 975
 976	dev->stats.tx_errors++;
 977
 978	/* Try to restart the adaptor */
 979	if (lp->last_restart == dev->stats.tx_packets) {
 980		DEB(DEB_ERRORS, printk(KERN_DEBUG "Resetting board.\n"));
 981		/* Shutdown and restart */
 982		i596_reset (dev, lp);
 983	} else {
 984		/* Issue a channel attention signal */
 985		DEB(DEB_ERRORS, printk(KERN_DEBUG "Kicking board.\n"));
 986		lp->dma->scb.command = SWAP16(CUC_START | RX_START);
 987		dma_sync_dev(dev, &(lp->dma->scb), sizeof(struct i596_scb));
 988		ca (dev);
 989		lp->last_restart = dev->stats.tx_packets;
 990	}
 991
 992	netif_trans_update(dev); /* prevent tx timeout */
 993	netif_wake_queue (dev);
 994}
 995
 996
 997static netdev_tx_t i596_start_xmit(struct sk_buff *skb, struct net_device *dev)
 998{
 999	struct i596_private *lp = netdev_priv(dev);
1000	struct tx_cmd *tx_cmd;
1001	struct i596_tbd *tbd;
1002	short length = skb->len;
1003
1004	DEB(DEB_STARTTX, printk(KERN_DEBUG
1005				"%s: i596_start_xmit(%x,%p) called\n",
1006				dev->name, skb->len, skb->data));
1007
1008	if (length < ETH_ZLEN) {
1009		if (skb_padto(skb, ETH_ZLEN))
1010			return NETDEV_TX_OK;
1011		length = ETH_ZLEN;
1012	}
1013
1014	netif_stop_queue(dev);
1015
1016	tx_cmd = lp->dma->tx_cmds + lp->next_tx_cmd;
1017	tbd = lp->dma->tbds + lp->next_tx_cmd;
1018
1019	if (tx_cmd->cmd.command) {
1020		DEB(DEB_ERRORS, printk(KERN_DEBUG
1021				       "%s: xmit ring full, dropping packet.\n",
1022				       dev->name));
1023		dev->stats.tx_dropped++;
1024
1025		dev_kfree_skb_any(skb);
1026	} else {
1027		if (++lp->next_tx_cmd == TX_RING_SIZE)
1028			lp->next_tx_cmd = 0;
1029		tx_cmd->tbd = SWAP32(virt_to_dma(lp, tbd));
1030		tbd->next = I596_NULL;
1031
1032		tx_cmd->cmd.command = SWAP16(CMD_FLEX | CmdTx);
1033		tx_cmd->skb = skb;
1034
1035		tx_cmd->pad = 0;
1036		tx_cmd->size = 0;
1037		tbd->pad = 0;
1038		tbd->size = SWAP16(EOF | length);
1039
1040		tx_cmd->dma_addr = dma_map_single(dev->dev.parent, skb->data,
1041						  skb->len, DMA_TO_DEVICE);
1042		tbd->data = SWAP32(tx_cmd->dma_addr);
1043
1044		DEB(DEB_TXADDR, print_eth(skb->data, "tx-queued"));
1045		dma_sync_dev(dev, tx_cmd, sizeof(struct tx_cmd));
1046		dma_sync_dev(dev, tbd, sizeof(struct i596_tbd));
1047		i596_add_cmd(dev, &tx_cmd->cmd);
1048
1049		dev->stats.tx_packets++;
1050		dev->stats.tx_bytes += length;
1051	}
1052
1053	netif_start_queue(dev);
1054
1055	return NETDEV_TX_OK;
1056}
1057
1058static void print_eth(unsigned char *add, char *str)
1059{
1060	printk(KERN_DEBUG "i596 0x%p, %pM --> %pM %02X%02X, %s\n",
1061	       add, add + 6, add, add[12], add[13], str);
1062}
1063static const struct net_device_ops i596_netdev_ops = {
1064	.ndo_open		= i596_open,
1065	.ndo_stop		= i596_close,
1066	.ndo_start_xmit		= i596_start_xmit,
1067	.ndo_set_rx_mode	= set_multicast_list,
1068	.ndo_tx_timeout		= i596_tx_timeout,
 
1069	.ndo_validate_addr	= eth_validate_addr,
1070	.ndo_set_mac_address	= eth_mac_addr,
1071#ifdef CONFIG_NET_POLL_CONTROLLER
1072	.ndo_poll_controller	= i596_poll_controller,
1073#endif
1074};
1075
1076static int i82596_probe(struct net_device *dev)
1077{
 
1078	struct i596_private *lp = netdev_priv(dev);
1079	int ret;
1080
1081	/* This lot is ensure things have been cache line aligned. */
1082	BUILD_BUG_ON(sizeof(struct i596_rfd) != 32);
1083	BUILD_BUG_ON(sizeof(struct i596_rbd) &  31);
1084	BUILD_BUG_ON(sizeof(struct tx_cmd)   &  31);
1085	BUILD_BUG_ON(sizeof(struct i596_tbd) != 32);
1086#ifndef __LP64__
1087	BUILD_BUG_ON(sizeof(struct i596_dma) > 4096);
1088#endif
1089
1090	if (!dev->base_addr || !dev->irq)
1091		return -ENODEV;
1092
 
 
 
 
 
 
 
1093	dev->netdev_ops = &i596_netdev_ops;
1094	dev->watchdog_timeo = TX_TIMEOUT;
1095
1096	memset(lp->dma, 0, sizeof(struct i596_dma));
1097	lp->dma->scb.command = 0;
1098	lp->dma->scb.cmd = I596_NULL;
1099	lp->dma->scb.rfd = I596_NULL;
 
 
1100	spin_lock_init(&lp->lock);
1101
1102	dma_sync_dev(dev, lp->dma, sizeof(struct i596_dma));
1103
1104	ret = register_netdev(dev);
1105	if (ret)
1106		return ret;
 
 
 
1107
1108	DEB(DEB_PROBE, printk(KERN_INFO "%s: 82596 at %#3lx, %pM IRQ %d.\n",
1109			      dev->name, dev->base_addr, dev->dev_addr,
1110			      dev->irq));
1111	DEB(DEB_INIT, printk(KERN_INFO
1112			     "%s: dma at 0x%p (%d bytes), lp->scb at 0x%p\n",
1113			     dev->name, lp->dma, (int)sizeof(struct i596_dma),
1114			     &lp->dma->scb));
1115
1116	return 0;
1117}
1118
1119#ifdef CONFIG_NET_POLL_CONTROLLER
1120static void i596_poll_controller(struct net_device *dev)
1121{
1122	disable_irq(dev->irq);
1123	i596_interrupt(dev->irq, dev);
1124	enable_irq(dev->irq);
1125}
1126#endif
1127
1128static irqreturn_t i596_interrupt(int irq, void *dev_id)
1129{
1130	struct net_device *dev = dev_id;
1131	struct i596_private *lp;
1132	struct i596_dma *dma;
1133	unsigned short status, ack_cmd = 0;
1134
1135	lp = netdev_priv(dev);
1136	dma = lp->dma;
1137
1138	spin_lock (&lp->lock);
1139
1140	wait_cmd(dev, dma, 100, "i596 interrupt, timeout");
1141	status = SWAP16(dma->scb.status);
1142
1143	DEB(DEB_INTS, printk(KERN_DEBUG
1144			     "%s: i596 interrupt, IRQ %d, status %4.4x.\n",
1145			dev->name, dev->irq, status));
1146
1147	ack_cmd = status & 0xf000;
1148
1149	if (!ack_cmd) {
1150		DEB(DEB_ERRORS, printk(KERN_DEBUG
1151				       "%s: interrupt with no events\n",
1152				       dev->name));
1153		spin_unlock (&lp->lock);
1154		return IRQ_NONE;
1155	}
1156
1157	if ((status & 0x8000) || (status & 0x2000)) {
1158		struct i596_cmd *ptr;
1159
1160		if ((status & 0x8000))
1161			DEB(DEB_INTS,
1162			    printk(KERN_DEBUG
1163				   "%s: i596 interrupt completed command.\n",
1164				   dev->name));
1165		if ((status & 0x2000))
1166			DEB(DEB_INTS,
1167			    printk(KERN_DEBUG
1168				   "%s: i596 interrupt command unit inactive %x.\n",
1169				   dev->name, status & 0x0700));
1170
1171		while (lp->cmd_head != NULL) {
1172			dma_sync_cpu(dev, lp->cmd_head, sizeof(struct i596_cmd));
1173			if (!(lp->cmd_head->status & SWAP16(STAT_C)))
1174				break;
1175
1176			ptr = lp->cmd_head;
1177
1178			DEB(DEB_STATUS,
1179			    printk(KERN_DEBUG
1180				   "cmd_head->status = %04x, ->command = %04x\n",
1181				   SWAP16(lp->cmd_head->status),
1182				   SWAP16(lp->cmd_head->command)));
1183			lp->cmd_head = ptr->v_next;
1184			lp->cmd_backlog--;
1185
1186			switch (SWAP16(ptr->command) & 0x7) {
1187			case CmdTx:
1188			    {
1189				struct tx_cmd *tx_cmd = (struct tx_cmd *) ptr;
1190				struct sk_buff *skb = tx_cmd->skb;
1191
1192				if (ptr->status & SWAP16(STAT_OK)) {
1193					DEB(DEB_TXADDR,
1194					    print_eth(skb->data, "tx-done"));
1195				} else {
1196					dev->stats.tx_errors++;
1197					if (ptr->status & SWAP16(0x0020))
1198						dev->stats.collisions++;
1199					if (!(ptr->status & SWAP16(0x0040)))
1200						dev->stats.tx_heartbeat_errors++;
1201					if (ptr->status & SWAP16(0x0400))
1202						dev->stats.tx_carrier_errors++;
1203					if (ptr->status & SWAP16(0x0800))
1204						dev->stats.collisions++;
1205					if (ptr->status & SWAP16(0x1000))
1206						dev->stats.tx_aborted_errors++;
1207				}
1208				dma_unmap_single(dev->dev.parent,
1209						 tx_cmd->dma_addr,
1210						 skb->len, DMA_TO_DEVICE);
1211				dev_consume_skb_irq(skb);
1212
1213				tx_cmd->cmd.command = 0; /* Mark free */
1214				break;
1215			    }
1216			case CmdTDR:
1217			    {
1218				unsigned short status = SWAP16(((struct tdr_cmd *)ptr)->status);
1219
1220				if (status & 0x8000) {
1221					DEB(DEB_ANY,
1222					    printk(KERN_DEBUG "%s: link ok.\n",
1223						   dev->name));
1224				} else {
1225					if (status & 0x4000)
1226						printk(KERN_ERR
1227						       "%s: Transceiver problem.\n",
1228						       dev->name);
1229					if (status & 0x2000)
1230						printk(KERN_ERR
1231						       "%s: Termination problem.\n",
1232						       dev->name);
1233					if (status & 0x1000)
1234						printk(KERN_ERR
1235						       "%s: Short circuit.\n",
1236						       dev->name);
1237
1238					DEB(DEB_TDR,
1239					    printk(KERN_DEBUG "%s: Time %d.\n",
1240						   dev->name, status & 0x07ff));
1241				}
1242				break;
1243			    }
1244			case CmdConfigure:
1245				/*
1246				 * Zap command so set_multicast_list() know
1247				 * it is free
1248				 */
1249				ptr->command = 0;
1250				break;
1251			}
1252			ptr->v_next = NULL;
1253			ptr->b_next = I596_NULL;
1254			dma_sync_dev(dev, ptr, sizeof(struct i596_cmd));
1255			lp->last_cmd = jiffies;
1256		}
1257
1258		/* This mess is arranging that only the last of any outstanding
1259		 * commands has the interrupt bit set.  Should probably really
1260		 * only add to the cmd queue when the CU is stopped.
1261		 */
1262		ptr = lp->cmd_head;
1263		while ((ptr != NULL) && (ptr != lp->cmd_tail)) {
1264			struct i596_cmd *prev = ptr;
1265
1266			ptr->command &= SWAP16(0x1fff);
1267			ptr = ptr->v_next;
1268			dma_sync_dev(dev, prev, sizeof(struct i596_cmd));
1269		}
1270
1271		if (lp->cmd_head != NULL)
1272			ack_cmd |= CUC_START;
1273		dma->scb.cmd = SWAP32(virt_to_dma(lp, &lp->cmd_head->status));
1274		dma_sync_dev(dev, &dma->scb, sizeof(struct i596_scb));
1275	}
1276	if ((status & 0x1000) || (status & 0x4000)) {
1277		if ((status & 0x4000))
1278			DEB(DEB_INTS,
1279			    printk(KERN_DEBUG
1280				   "%s: i596 interrupt received a frame.\n",
1281				   dev->name));
1282		i596_rx(dev);
1283		/* Only RX_START if stopped - RGH 07-07-96 */
1284		if (status & 0x1000) {
1285			if (netif_running(dev)) {
1286				DEB(DEB_ERRORS,
1287				    printk(KERN_DEBUG
1288					   "%s: i596 interrupt receive unit inactive, status 0x%x\n",
1289					   dev->name, status));
1290				ack_cmd |= RX_START;
1291				dev->stats.rx_errors++;
1292				dev->stats.rx_fifo_errors++;
1293				rebuild_rx_bufs(dev);
1294			}
1295		}
1296	}
1297	wait_cmd(dev, dma, 100, "i596 interrupt, timeout");
1298	dma->scb.command = SWAP16(ack_cmd);
1299	dma_sync_dev(dev, &dma->scb, sizeof(struct i596_scb));
1300
1301	/* DANGER: I suspect that some kind of interrupt
1302	 acknowledgement aside from acking the 82596 might be needed
1303	 here...  but it's running acceptably without */
1304
1305	ca(dev);
1306
1307	wait_cmd(dev, dma, 100, "i596 interrupt, exit timeout");
1308	DEB(DEB_INTS, printk(KERN_DEBUG "%s: exiting interrupt.\n", dev->name));
1309
1310	spin_unlock (&lp->lock);
1311	return IRQ_HANDLED;
1312}
1313
1314static int i596_close(struct net_device *dev)
1315{
1316	struct i596_private *lp = netdev_priv(dev);
1317	unsigned long flags;
1318
1319	netif_stop_queue(dev);
1320
1321	DEB(DEB_INIT,
1322	    printk(KERN_DEBUG
1323		   "%s: Shutting down ethercard, status was %4.4x.\n",
1324		   dev->name, SWAP16(lp->dma->scb.status)));
1325
1326	spin_lock_irqsave(&lp->lock, flags);
1327
1328	wait_cmd(dev, lp->dma, 100, "close1 timed out");
1329	lp->dma->scb.command = SWAP16(CUC_ABORT | RX_ABORT);
1330	dma_sync_dev(dev, &lp->dma->scb, sizeof(struct i596_scb));
1331
1332	ca(dev);
1333
1334	wait_cmd(dev, lp->dma, 100, "close2 timed out");
1335	spin_unlock_irqrestore(&lp->lock, flags);
1336	DEB(DEB_STRUCT, i596_display_data(dev));
1337	i596_cleanup_cmd(dev, lp);
1338
1339	free_irq(dev->irq, dev);
1340	remove_rx_bufs(dev);
1341
1342	return 0;
1343}
1344
1345/*
1346 *    Set or clear the multicast filter for this adaptor.
1347 */
1348
1349static void set_multicast_list(struct net_device *dev)
1350{
1351	struct i596_private *lp = netdev_priv(dev);
1352	struct i596_dma *dma = lp->dma;
1353	int config = 0, cnt;
1354
1355	DEB(DEB_MULTI,
1356	    printk(KERN_DEBUG
1357		   "%s: set multicast list, %d entries, promisc %s, allmulti %s\n",
1358		   dev->name, netdev_mc_count(dev),
1359		   dev->flags & IFF_PROMISC ? "ON" : "OFF",
1360		   dev->flags & IFF_ALLMULTI ? "ON" : "OFF"));
1361
1362	if ((dev->flags & IFF_PROMISC) &&
1363	    !(dma->cf_cmd.i596_config[8] & 0x01)) {
1364		dma->cf_cmd.i596_config[8] |= 0x01;
1365		config = 1;
1366	}
1367	if (!(dev->flags & IFF_PROMISC) &&
1368	    (dma->cf_cmd.i596_config[8] & 0x01)) {
1369		dma->cf_cmd.i596_config[8] &= ~0x01;
1370		config = 1;
1371	}
1372	if ((dev->flags & IFF_ALLMULTI) &&
1373	    (dma->cf_cmd.i596_config[11] & 0x20)) {
1374		dma->cf_cmd.i596_config[11] &= ~0x20;
1375		config = 1;
1376	}
1377	if (!(dev->flags & IFF_ALLMULTI) &&
1378	    !(dma->cf_cmd.i596_config[11] & 0x20)) {
1379		dma->cf_cmd.i596_config[11] |= 0x20;
1380		config = 1;
1381	}
1382	if (config) {
1383		if (dma->cf_cmd.cmd.command)
1384			printk(KERN_INFO
1385			       "%s: config change request already queued\n",
1386			       dev->name);
1387		else {
1388			dma->cf_cmd.cmd.command = SWAP16(CmdConfigure);
1389			dma_sync_dev(dev, &dma->cf_cmd, sizeof(struct cf_cmd));
1390			i596_add_cmd(dev, &dma->cf_cmd.cmd);
1391		}
1392	}
1393
1394	cnt = netdev_mc_count(dev);
1395	if (cnt > MAX_MC_CNT) {
1396		cnt = MAX_MC_CNT;
1397		printk(KERN_NOTICE "%s: Only %d multicast addresses supported",
1398			dev->name, cnt);
1399	}
1400
1401	if (!netdev_mc_empty(dev)) {
1402		struct netdev_hw_addr *ha;
1403		unsigned char *cp;
1404		struct mc_cmd *cmd;
1405
1406		cmd = &dma->mc_cmd;
1407		cmd->cmd.command = SWAP16(CmdMulticastList);
1408		cmd->mc_cnt = SWAP16(netdev_mc_count(dev) * 6);
1409		cp = cmd->mc_addrs;
1410		netdev_for_each_mc_addr(ha, dev) {
1411			if (!cnt--)
1412				break;
1413			memcpy(cp, ha->addr, ETH_ALEN);
1414			if (i596_debug > 1)
1415				DEB(DEB_MULTI,
1416				    printk(KERN_DEBUG
1417					   "%s: Adding address %pM\n",
1418					   dev->name, cp));
1419			cp += ETH_ALEN;
1420		}
1421		dma_sync_dev(dev, &dma->mc_cmd, sizeof(struct mc_cmd));
1422		i596_add_cmd(dev, &cmd->cmd);
1423	}
1424}