Linux Audio

Check our new training course

Loading...
Note: File does not exist in v4.17.
   1/*
   2 * e100net.c: A network driver for the ETRAX 100LX network controller.
   3 *
   4 * Copyright (c) 1998-2002 Axis Communications AB.
   5 *
   6 * The outline of this driver comes from skeleton.c.
   7 *
   8 */
   9
  10#include <linux/kernel.h>
  11#include <linux/delay.h>
  12#include <linux/types.h>
  13#include <linux/fcntl.h>
  14#include <linux/interrupt.h>
  15#include <linux/ptrace.h>
  16#include <linux/ioport.h>
  17#include <linux/in.h>
  18#include <linux/string.h>
  19#include <linux/spinlock.h>
  20#include <linux/errno.h>
  21#include <linux/init.h>
  22#include <linux/bitops.h>
  23
  24#include <linux/if.h>
  25#include <linux/mii.h>
  26#include <linux/netdevice.h>
  27#include <linux/etherdevice.h>
  28#include <linux/skbuff.h>
  29#include <linux/ethtool.h>
  30
  31#include <arch/svinto.h>/* DMA and register descriptions */
  32#include <asm/io.h>         /* CRIS_LED_* I/O functions */
  33#include <asm/irq.h>
  34#include <asm/dma.h>
  35#include <asm/ethernet.h>
  36#include <asm/cache.h>
  37#include <arch/io_interface_mux.h>
  38
  39//#define ETHDEBUG
  40#define D(x)
  41
  42/*
  43 * The name of the card. Is used for messages and in the requests for
  44 * io regions, irqs and dma channels
  45 */
  46
  47static const char* cardname = "ETRAX 100LX built-in ethernet controller";
  48
  49/* A default ethernet address. Highlevel SW will set the real one later */
  50
  51static struct sockaddr default_mac = {
  52	0,
  53	{ 0x00, 0x40, 0x8C, 0xCD, 0x00, 0x00 }
  54};
  55
  56/* Information that need to be kept for each board. */
  57struct net_local {
  58	struct mii_if_info mii_if;
  59
  60	/* Tx control lock.  This protects the transmit buffer ring
  61	 * state along with the "tx full" state of the driver.  This
  62	 * means all netif_queue flow control actions are protected
  63	 * by this lock as well.
  64	 */
  65	spinlock_t lock;
  66
  67	spinlock_t led_lock; /* Protect LED state */
  68	spinlock_t transceiver_lock; /* Protect transceiver state. */
  69};
  70
  71typedef struct etrax_eth_descr
  72{
  73	etrax_dma_descr descr;
  74	struct sk_buff* skb;
  75} etrax_eth_descr;
  76
  77/* Some transceivers requires special handling */
  78struct transceiver_ops
  79{
  80	unsigned int oui;
  81	void (*check_speed)(struct net_device* dev);
  82	void (*check_duplex)(struct net_device* dev);
  83};
  84
  85/* Duplex settings */
  86enum duplex
  87{
  88	half,
  89	full,
  90	autoneg
  91};
  92
  93/* Dma descriptors etc. */
  94
  95#define MAX_MEDIA_DATA_SIZE 1522
  96
  97#define MIN_PACKET_LEN      46
  98#define ETHER_HEAD_LEN      14
  99
 100/*
 101** MDIO constants.
 102*/
 103#define MDIO_START                          0x1
 104#define MDIO_READ                           0x2
 105#define MDIO_WRITE                          0x1
 106#define MDIO_PREAMBLE              0xfffffffful
 107
 108/* Broadcom specific */
 109#define MDIO_AUX_CTRL_STATUS_REG           0x18
 110#define MDIO_BC_FULL_DUPLEX_IND             0x1
 111#define MDIO_BC_SPEED                       0x2
 112
 113/* TDK specific */
 114#define MDIO_TDK_DIAGNOSTIC_REG              18
 115#define MDIO_TDK_DIAGNOSTIC_RATE          0x400
 116#define MDIO_TDK_DIAGNOSTIC_DPLX          0x800
 117
 118/*Intel LXT972A specific*/
 119#define MDIO_INT_STATUS_REG_2			0x0011
 120#define MDIO_INT_FULL_DUPLEX_IND       (1 << 9)
 121#define MDIO_INT_SPEED                (1 << 14)
 122
 123/* Network flash constants */
 124#define NET_FLASH_TIME                  (HZ/50) /* 20 ms */
 125#define NET_FLASH_PAUSE                (HZ/100) /* 10 ms */
 126#define NET_LINK_UP_CHECK_INTERVAL       (2*HZ) /* 2 s   */
 127#define NET_DUPLEX_CHECK_INTERVAL        (2*HZ) /* 2 s   */
 128
 129#define NO_NETWORK_ACTIVITY 0
 130#define NETWORK_ACTIVITY    1
 131
 132#define NBR_OF_RX_DESC     32
 133#define NBR_OF_TX_DESC     16
 134
 135/* Large packets are sent directly to upper layers while small packets are */
 136/* copied (to reduce memory waste). The following constant decides the breakpoint */
 137#define RX_COPYBREAK 256
 138
 139/* Due to a chip bug we need to flush the cache when descriptors are returned */
 140/* to the DMA. To decrease performance impact we return descriptors in chunks. */
 141/* The following constant determines the number of descriptors to return. */
 142#define RX_QUEUE_THRESHOLD  NBR_OF_RX_DESC/2
 143
 144#define GET_BIT(bit,val)   (((val) >> (bit)) & 0x01)
 145
 146/* Define some macros to access ETRAX 100 registers */
 147#define SETF(var, reg, field, val) var = (var & ~IO_MASK_(reg##_, field##_)) | \
 148					  IO_FIELD_(reg##_, field##_, val)
 149#define SETS(var, reg, field, val) var = (var & ~IO_MASK_(reg##_, field##_)) | \
 150					  IO_STATE_(reg##_, field##_, _##val)
 151
 152static etrax_eth_descr *myNextRxDesc;  /* Points to the next descriptor to
 153                                          to be processed */
 154static etrax_eth_descr *myLastRxDesc;  /* The last processed descriptor */
 155
 156static etrax_eth_descr RxDescList[NBR_OF_RX_DESC] __attribute__ ((aligned(32)));
 157
 158static etrax_eth_descr* myFirstTxDesc; /* First packet not yet sent */
 159static etrax_eth_descr* myLastTxDesc;  /* End of send queue */
 160static etrax_eth_descr* myNextTxDesc;  /* Next descriptor to use */
 161static etrax_eth_descr TxDescList[NBR_OF_TX_DESC] __attribute__ ((aligned(32)));
 162
 163static unsigned int network_rec_config_shadow = 0;
 164
 165static unsigned int network_tr_ctrl_shadow = 0;
 166
 167/* Network speed indication. */
 168static DEFINE_TIMER(speed_timer, NULL, 0, 0);
 169static DEFINE_TIMER(clear_led_timer, NULL, 0, 0);
 170static int current_speed; /* Speed read from transceiver */
 171static int current_speed_selection; /* Speed selected by user */
 172static unsigned long led_next_time;
 173static int led_active;
 174static int rx_queue_len;
 175
 176/* Duplex */
 177static DEFINE_TIMER(duplex_timer, NULL, 0, 0);
 178static int full_duplex;
 179static enum duplex current_duplex;
 180
 181/* Index to functions, as function prototypes. */
 182
 183static int etrax_ethernet_init(void);
 184
 185static int e100_open(struct net_device *dev);
 186static int e100_set_mac_address(struct net_device *dev, void *addr);
 187static int e100_send_packet(struct sk_buff *skb, struct net_device *dev);
 188static irqreturn_t e100rxtx_interrupt(int irq, void *dev_id);
 189static irqreturn_t e100nw_interrupt(int irq, void *dev_id);
 190static void e100_rx(struct net_device *dev);
 191static int e100_close(struct net_device *dev);
 192static int e100_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd);
 193static int e100_set_config(struct net_device* dev, struct ifmap* map);
 194static void e100_tx_timeout(struct net_device *dev);
 195static struct net_device_stats *e100_get_stats(struct net_device *dev);
 196static void set_multicast_list(struct net_device *dev);
 197static void e100_hardware_send_packet(struct net_local* np, char *buf, int length);
 198static void update_rx_stats(struct net_device_stats *);
 199static void update_tx_stats(struct net_device_stats *);
 200static int e100_probe_transceiver(struct net_device* dev);
 201
 202static void e100_check_speed(unsigned long priv);
 203static void e100_set_speed(struct net_device* dev, unsigned long speed);
 204static void e100_check_duplex(unsigned long priv);
 205static void e100_set_duplex(struct net_device* dev, enum duplex);
 206static void e100_negotiate(struct net_device* dev);
 207
 208static int e100_get_mdio_reg(struct net_device *dev, int phy_id, int location);
 209static void e100_set_mdio_reg(struct net_device *dev, int phy_id, int location, int value);
 210
 211static void e100_send_mdio_cmd(unsigned short cmd, int write_cmd);
 212static void e100_send_mdio_bit(unsigned char bit);
 213static unsigned char e100_receive_mdio_bit(void);
 214static void e100_reset_transceiver(struct net_device* net);
 215
 216static void e100_clear_network_leds(unsigned long dummy);
 217static void e100_set_network_leds(int active);
 218
 219static const struct ethtool_ops e100_ethtool_ops;
 220#if defined(CONFIG_ETRAX_NO_PHY)
 221static void dummy_check_speed(struct net_device* dev);
 222static void dummy_check_duplex(struct net_device* dev);
 223#else
 224static void broadcom_check_speed(struct net_device* dev);
 225static void broadcom_check_duplex(struct net_device* dev);
 226static void tdk_check_speed(struct net_device* dev);
 227static void tdk_check_duplex(struct net_device* dev);
 228static void intel_check_speed(struct net_device* dev);
 229static void intel_check_duplex(struct net_device* dev);
 230static void generic_check_speed(struct net_device* dev);
 231static void generic_check_duplex(struct net_device* dev);
 232#endif
 233#ifdef CONFIG_NET_POLL_CONTROLLER
 234static void e100_netpoll(struct net_device* dev);
 235#endif
 236
 237static int autoneg_normal = 1;
 238
 239struct transceiver_ops transceivers[] =
 240{
 241#if defined(CONFIG_ETRAX_NO_PHY)
 242	{0x0000, dummy_check_speed, dummy_check_duplex}        /* Dummy */
 243#else
 244	{0x1018, broadcom_check_speed, broadcom_check_duplex},  /* Broadcom */
 245	{0xC039, tdk_check_speed, tdk_check_duplex},            /* TDK 2120 */
 246	{0x039C, tdk_check_speed, tdk_check_duplex},            /* TDK 2120C */
 247        {0x04de, intel_check_speed, intel_check_duplex},     	/* Intel LXT972A*/
 248	{0x0000, generic_check_speed, generic_check_duplex}     /* Generic, must be last */
 249#endif
 250};
 251
 252struct transceiver_ops* transceiver = &transceivers[0];
 253
 254static const struct net_device_ops e100_netdev_ops = {
 255	.ndo_open		= e100_open,
 256	.ndo_stop		= e100_close,
 257	.ndo_start_xmit		= e100_send_packet,
 258	.ndo_tx_timeout		= e100_tx_timeout,
 259	.ndo_get_stats		= e100_get_stats,
 260	.ndo_set_rx_mode	= set_multicast_list,
 261	.ndo_do_ioctl		= e100_ioctl,
 262	.ndo_set_mac_address	= e100_set_mac_address,
 263	.ndo_validate_addr	= eth_validate_addr,
 264	.ndo_set_config		= e100_set_config,
 265#ifdef CONFIG_NET_POLL_CONTROLLER
 266	.ndo_poll_controller	= e100_netpoll,
 267#endif
 268};
 269
 270#define tx_done(dev) (*R_DMA_CH0_CMD == 0)
 271
 272/*
 273 * Check for a network adaptor of this type, and return '0' if one exists.
 274 * If dev->base_addr == 0, probe all likely locations.
 275 * If dev->base_addr == 1, always return failure.
 276 * If dev->base_addr == 2, allocate space for the device and return success
 277 * (detachable devices only).
 278 */
 279
 280static int __init
 281etrax_ethernet_init(void)
 282{
 283	struct net_device *dev;
 284        struct net_local* np;
 285	int i, err;
 286
 287	printk(KERN_INFO
 288	       "ETRAX 100LX 10/100MBit ethernet v2.0 (c) 1998-2007 Axis Communications AB\n");
 289
 290	if (cris_request_io_interface(if_eth, cardname)) {
 291		printk(KERN_CRIT "etrax_ethernet_init failed to get IO interface\n");
 292		return -EBUSY;
 293	}
 294
 295	dev = alloc_etherdev(sizeof(struct net_local));
 296	if (!dev)
 297		return -ENOMEM;
 298
 299	np = netdev_priv(dev);
 300
 301	/* we do our own locking */
 302	dev->features |= NETIF_F_LLTX;
 303
 304	dev->base_addr = (unsigned int)R_NETWORK_SA_0; /* just to have something to show */
 305
 306	/* now setup our etrax specific stuff */
 307
 308	dev->irq = NETWORK_DMA_RX_IRQ_NBR; /* we really use DMATX as well... */
 309	dev->dma = NETWORK_RX_DMA_NBR;
 310
 311	/* fill in our handlers so the network layer can talk to us in the future */
 312
 313	dev->ethtool_ops	= &e100_ethtool_ops;
 314	dev->netdev_ops		= &e100_netdev_ops;
 315
 316	spin_lock_init(&np->lock);
 317	spin_lock_init(&np->led_lock);
 318	spin_lock_init(&np->transceiver_lock);
 319
 320	/* Initialise the list of Etrax DMA-descriptors */
 321
 322	/* Initialise receive descriptors */
 323
 324	for (i = 0; i < NBR_OF_RX_DESC; i++) {
 325		/* Allocate two extra cachelines to make sure that buffer used
 326		 * by DMA does not share cacheline with any other data (to
 327		 * avoid cache bug)
 328		 */
 329		RxDescList[i].skb = dev_alloc_skb(MAX_MEDIA_DATA_SIZE + 2 * L1_CACHE_BYTES);
 330		if (!RxDescList[i].skb)
 331			return -ENOMEM;
 332		RxDescList[i].descr.ctrl   = 0;
 333		RxDescList[i].descr.sw_len = MAX_MEDIA_DATA_SIZE;
 334		RxDescList[i].descr.next   = virt_to_phys(&RxDescList[i + 1]);
 335		RxDescList[i].descr.buf    = L1_CACHE_ALIGN(virt_to_phys(RxDescList[i].skb->data));
 336		RxDescList[i].descr.status = 0;
 337		RxDescList[i].descr.hw_len = 0;
 338		prepare_rx_descriptor(&RxDescList[i].descr);
 339	}
 340
 341	RxDescList[NBR_OF_RX_DESC - 1].descr.ctrl   = d_eol;
 342	RxDescList[NBR_OF_RX_DESC - 1].descr.next   = virt_to_phys(&RxDescList[0]);
 343	rx_queue_len = 0;
 344
 345	/* Initialize transmit descriptors */
 346	for (i = 0; i < NBR_OF_TX_DESC; i++) {
 347		TxDescList[i].descr.ctrl   = 0;
 348		TxDescList[i].descr.sw_len = 0;
 349		TxDescList[i].descr.next   = virt_to_phys(&TxDescList[i + 1].descr);
 350		TxDescList[i].descr.buf    = 0;
 351		TxDescList[i].descr.status = 0;
 352		TxDescList[i].descr.hw_len = 0;
 353		TxDescList[i].skb = 0;
 354	}
 355
 356	TxDescList[NBR_OF_TX_DESC - 1].descr.ctrl   = d_eol;
 357	TxDescList[NBR_OF_TX_DESC - 1].descr.next   = virt_to_phys(&TxDescList[0].descr);
 358
 359	/* Initialise initial pointers */
 360
 361	myNextRxDesc  = &RxDescList[0];
 362	myLastRxDesc  = &RxDescList[NBR_OF_RX_DESC - 1];
 363	myFirstTxDesc = &TxDescList[0];
 364	myNextTxDesc  = &TxDescList[0];
 365	myLastTxDesc  = &TxDescList[NBR_OF_TX_DESC - 1];
 366
 367	/* Register device */
 368	err = register_netdev(dev);
 369	if (err) {
 370		free_netdev(dev);
 371		return err;
 372	}
 373
 374	/* set the default MAC address */
 375
 376	e100_set_mac_address(dev, &default_mac);
 377
 378	/* Initialize speed indicator stuff. */
 379
 380	current_speed = 10;
 381	current_speed_selection = 0; /* Auto */
 382	speed_timer.expires = jiffies + NET_LINK_UP_CHECK_INTERVAL;
 383	speed_timer.data = (unsigned long)dev;
 384	speed_timer.function = e100_check_speed;
 385
 386	clear_led_timer.function = e100_clear_network_leds;
 387	clear_led_timer.data = (unsigned long)dev;
 388
 389	full_duplex = 0;
 390	current_duplex = autoneg;
 391	duplex_timer.expires = jiffies + NET_DUPLEX_CHECK_INTERVAL;
 392        duplex_timer.data = (unsigned long)dev;
 393	duplex_timer.function = e100_check_duplex;
 394
 395        /* Initialize mii interface */
 396	np->mii_if.phy_id_mask = 0x1f;
 397	np->mii_if.reg_num_mask = 0x1f;
 398	np->mii_if.dev = dev;
 399	np->mii_if.mdio_read = e100_get_mdio_reg;
 400	np->mii_if.mdio_write = e100_set_mdio_reg;
 401
 402	/* Initialize group address registers to make sure that no */
 403	/* unwanted addresses are matched */
 404	*R_NETWORK_GA_0 = 0x00000000;
 405	*R_NETWORK_GA_1 = 0x00000000;
 406
 407	/* Initialize next time the led can flash */
 408	led_next_time = jiffies;
 409	return 0;
 410}
 411device_initcall(etrax_ethernet_init)
 412
 413/* set MAC address of the interface. called from the core after a
 414 * SIOCSIFADDR ioctl, and from the bootup above.
 415 */
 416
 417static int
 418e100_set_mac_address(struct net_device *dev, void *p)
 419{
 420	struct net_local *np = netdev_priv(dev);
 421	struct sockaddr *addr = p;
 422
 423	spin_lock(&np->lock); /* preemption protection */
 424
 425	/* remember it */
 426
 427	memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
 428
 429	/* Write it to the hardware.
 430	 * Note the way the address is wrapped:
 431	 * *R_NETWORK_SA_0 = a0_0 | (a0_1 << 8) | (a0_2 << 16) | (a0_3 << 24);
 432	 * *R_NETWORK_SA_1 = a0_4 | (a0_5 << 8);
 433	 */
 434
 435	*R_NETWORK_SA_0 = dev->dev_addr[0] | (dev->dev_addr[1] << 8) |
 436		(dev->dev_addr[2] << 16) | (dev->dev_addr[3] << 24);
 437	*R_NETWORK_SA_1 = dev->dev_addr[4] | (dev->dev_addr[5] << 8);
 438	*R_NETWORK_SA_2 = 0;
 439
 440	/* show it in the log as well */
 441
 442	printk(KERN_INFO "%s: changed MAC to %pM\n", dev->name, dev->dev_addr);
 443
 444	spin_unlock(&np->lock);
 445
 446	return 0;
 447}
 448
 449/*
 450 * Open/initialize the board. This is called (in the current kernel)
 451 * sometime after booting when the 'ifconfig' program is run.
 452 *
 453 * This routine should set everything up anew at each open, even
 454 * registers that "should" only need to be set once at boot, so that
 455 * there is non-reboot way to recover if something goes wrong.
 456 */
 457
 458static int
 459e100_open(struct net_device *dev)
 460{
 461	unsigned long flags;
 462
 463	/* enable the MDIO output pin */
 464
 465	*R_NETWORK_MGM_CTRL = IO_STATE(R_NETWORK_MGM_CTRL, mdoe, enable);
 466
 467	*R_IRQ_MASK0_CLR =
 468		IO_STATE(R_IRQ_MASK0_CLR, overrun, clr) |
 469		IO_STATE(R_IRQ_MASK0_CLR, underrun, clr) |
 470		IO_STATE(R_IRQ_MASK0_CLR, excessive_col, clr);
 471
 472	/* clear dma0 and 1 eop and descr irq masks */
 473	*R_IRQ_MASK2_CLR =
 474		IO_STATE(R_IRQ_MASK2_CLR, dma0_descr, clr) |
 475		IO_STATE(R_IRQ_MASK2_CLR, dma0_eop, clr) |
 476		IO_STATE(R_IRQ_MASK2_CLR, dma1_descr, clr) |
 477		IO_STATE(R_IRQ_MASK2_CLR, dma1_eop, clr);
 478
 479	/* Reset and wait for the DMA channels */
 480
 481	RESET_DMA(NETWORK_TX_DMA_NBR);
 482	RESET_DMA(NETWORK_RX_DMA_NBR);
 483	WAIT_DMA(NETWORK_TX_DMA_NBR);
 484	WAIT_DMA(NETWORK_RX_DMA_NBR);
 485
 486	/* Initialise the etrax network controller */
 487
 488	/* allocate the irq corresponding to the receiving DMA */
 489
 490	if (request_irq(NETWORK_DMA_RX_IRQ_NBR, e100rxtx_interrupt, 0, cardname,
 491			(void *)dev)) {
 492		goto grace_exit0;
 493	}
 494
 495	/* allocate the irq corresponding to the transmitting DMA */
 496
 497	if (request_irq(NETWORK_DMA_TX_IRQ_NBR, e100rxtx_interrupt, 0,
 498			cardname, (void *)dev)) {
 499		goto grace_exit1;
 500	}
 501
 502	/* allocate the irq corresponding to the network errors etc */
 503
 504	if (request_irq(NETWORK_STATUS_IRQ_NBR, e100nw_interrupt, 0,
 505			cardname, (void *)dev)) {
 506		goto grace_exit2;
 507	}
 508
 509	/*
 510	 * Always allocate the DMA channels after the IRQ,
 511	 * and clean up on failure.
 512	 */
 513
 514	if (cris_request_dma(NETWORK_TX_DMA_NBR,
 515	                     cardname,
 516	                     DMA_VERBOSE_ON_ERROR,
 517	                     dma_eth)) {
 518		goto grace_exit3;
 519        }
 520
 521	if (cris_request_dma(NETWORK_RX_DMA_NBR,
 522	                     cardname,
 523	                     DMA_VERBOSE_ON_ERROR,
 524	                     dma_eth)) {
 525		goto grace_exit4;
 526        }
 527
 528	/* give the HW an idea of what MAC address we want */
 529
 530	*R_NETWORK_SA_0 = dev->dev_addr[0] | (dev->dev_addr[1] << 8) |
 531		(dev->dev_addr[2] << 16) | (dev->dev_addr[3] << 24);
 532	*R_NETWORK_SA_1 = dev->dev_addr[4] | (dev->dev_addr[5] << 8);
 533	*R_NETWORK_SA_2 = 0;
 534
 535#if 0
 536	/* use promiscuous mode for testing */
 537	*R_NETWORK_GA_0 = 0xffffffff;
 538	*R_NETWORK_GA_1 = 0xffffffff;
 539
 540	*R_NETWORK_REC_CONFIG = 0xd; /* broadcast rec, individ. rec, ma0 enabled */
 541#else
 542	SETS(network_rec_config_shadow, R_NETWORK_REC_CONFIG, max_size, size1522);
 543	SETS(network_rec_config_shadow, R_NETWORK_REC_CONFIG, broadcast, receive);
 544	SETS(network_rec_config_shadow, R_NETWORK_REC_CONFIG, ma0, enable);
 545	SETF(network_rec_config_shadow, R_NETWORK_REC_CONFIG, duplex, full_duplex);
 546	*R_NETWORK_REC_CONFIG = network_rec_config_shadow;
 547#endif
 548
 549	*R_NETWORK_GEN_CONFIG =
 550		IO_STATE(R_NETWORK_GEN_CONFIG, phy,    mii_clk) |
 551		IO_STATE(R_NETWORK_GEN_CONFIG, enable, on);
 552
 553	SETS(network_tr_ctrl_shadow, R_NETWORK_TR_CTRL, clr_error, clr);
 554	SETS(network_tr_ctrl_shadow, R_NETWORK_TR_CTRL, delay, none);
 555	SETS(network_tr_ctrl_shadow, R_NETWORK_TR_CTRL, cancel, dont);
 556	SETS(network_tr_ctrl_shadow, R_NETWORK_TR_CTRL, cd, enable);
 557	SETS(network_tr_ctrl_shadow, R_NETWORK_TR_CTRL, retry, enable);
 558	SETS(network_tr_ctrl_shadow, R_NETWORK_TR_CTRL, pad, enable);
 559	SETS(network_tr_ctrl_shadow, R_NETWORK_TR_CTRL, crc, enable);
 560	*R_NETWORK_TR_CTRL = network_tr_ctrl_shadow;
 561
 562	local_irq_save(flags);
 563
 564	/* enable the irq's for ethernet DMA */
 565
 566	*R_IRQ_MASK2_SET =
 567		IO_STATE(R_IRQ_MASK2_SET, dma0_eop, set) |
 568		IO_STATE(R_IRQ_MASK2_SET, dma1_eop, set);
 569
 570	*R_IRQ_MASK0_SET =
 571		IO_STATE(R_IRQ_MASK0_SET, overrun,       set) |
 572		IO_STATE(R_IRQ_MASK0_SET, underrun,      set) |
 573		IO_STATE(R_IRQ_MASK0_SET, excessive_col, set);
 574
 575	/* make sure the irqs are cleared */
 576
 577	*R_DMA_CH0_CLR_INTR = IO_STATE(R_DMA_CH0_CLR_INTR, clr_eop, do);
 578	*R_DMA_CH1_CLR_INTR = IO_STATE(R_DMA_CH1_CLR_INTR, clr_eop, do);
 579
 580	/* make sure the rec and transmit error counters are cleared */
 581
 582	(void)*R_REC_COUNTERS;  /* dummy read */
 583	(void)*R_TR_COUNTERS;   /* dummy read */
 584
 585	/* start the receiving DMA channel so we can receive packets from now on */
 586
 587	*R_DMA_CH1_FIRST = virt_to_phys(myNextRxDesc);
 588	*R_DMA_CH1_CMD = IO_STATE(R_DMA_CH1_CMD, cmd, start);
 589
 590	/* Set up transmit DMA channel so it can be restarted later */
 591
 592	*R_DMA_CH0_FIRST = 0;
 593	*R_DMA_CH0_DESCR = virt_to_phys(myLastTxDesc);
 594	netif_start_queue(dev);
 595
 596	local_irq_restore(flags);
 597
 598	/* Probe for transceiver */
 599	if (e100_probe_transceiver(dev))
 600		goto grace_exit5;
 601
 602	/* Start duplex/speed timers */
 603	add_timer(&speed_timer);
 604	add_timer(&duplex_timer);
 605
 606	/* We are now ready to accept transmit requeusts from
 607	 * the queueing layer of the networking.
 608	 */
 609	netif_carrier_on(dev);
 610
 611	return 0;
 612
 613grace_exit5:
 614	cris_free_dma(NETWORK_RX_DMA_NBR, cardname);
 615grace_exit4:
 616	cris_free_dma(NETWORK_TX_DMA_NBR, cardname);
 617grace_exit3:
 618	free_irq(NETWORK_STATUS_IRQ_NBR, (void *)dev);
 619grace_exit2:
 620	free_irq(NETWORK_DMA_TX_IRQ_NBR, (void *)dev);
 621grace_exit1:
 622	free_irq(NETWORK_DMA_RX_IRQ_NBR, (void *)dev);
 623grace_exit0:
 624	return -EAGAIN;
 625}
 626
 627#if defined(CONFIG_ETRAX_NO_PHY)
 628static void
 629dummy_check_speed(struct net_device* dev)
 630{
 631	current_speed = 100;
 632}
 633#else
 634static void
 635generic_check_speed(struct net_device* dev)
 636{
 637	unsigned long data;
 638	struct net_local *np = netdev_priv(dev);
 639
 640	data = e100_get_mdio_reg(dev, np->mii_if.phy_id, MII_ADVERTISE);
 641	if ((data & ADVERTISE_100FULL) ||
 642	    (data & ADVERTISE_100HALF))
 643		current_speed = 100;
 644	else
 645		current_speed = 10;
 646}
 647
 648static void
 649tdk_check_speed(struct net_device* dev)
 650{
 651	unsigned long data;
 652	struct net_local *np = netdev_priv(dev);
 653
 654	data = e100_get_mdio_reg(dev, np->mii_if.phy_id,
 655				 MDIO_TDK_DIAGNOSTIC_REG);
 656	current_speed = (data & MDIO_TDK_DIAGNOSTIC_RATE ? 100 : 10);
 657}
 658
 659static void
 660broadcom_check_speed(struct net_device* dev)
 661{
 662	unsigned long data;
 663	struct net_local *np = netdev_priv(dev);
 664
 665	data = e100_get_mdio_reg(dev, np->mii_if.phy_id,
 666				 MDIO_AUX_CTRL_STATUS_REG);
 667	current_speed = (data & MDIO_BC_SPEED ? 100 : 10);
 668}
 669
 670static void
 671intel_check_speed(struct net_device* dev)
 672{
 673	unsigned long data;
 674	struct net_local *np = netdev_priv(dev);
 675
 676	data = e100_get_mdio_reg(dev, np->mii_if.phy_id,
 677				 MDIO_INT_STATUS_REG_2);
 678	current_speed = (data & MDIO_INT_SPEED ? 100 : 10);
 679}
 680#endif
 681static void
 682e100_check_speed(unsigned long priv)
 683{
 684	struct net_device* dev = (struct net_device*)priv;
 685	struct net_local *np = netdev_priv(dev);
 686	static int led_initiated = 0;
 687	unsigned long data;
 688	int old_speed = current_speed;
 689
 690	spin_lock(&np->transceiver_lock);
 691
 692	data = e100_get_mdio_reg(dev, np->mii_if.phy_id, MII_BMSR);
 693	if (!(data & BMSR_LSTATUS)) {
 694		current_speed = 0;
 695	} else {
 696		transceiver->check_speed(dev);
 697	}
 698
 699	spin_lock(&np->led_lock);
 700	if ((old_speed != current_speed) || !led_initiated) {
 701		led_initiated = 1;
 702		e100_set_network_leds(NO_NETWORK_ACTIVITY);
 703		if (current_speed)
 704			netif_carrier_on(dev);
 705		else
 706			netif_carrier_off(dev);
 707	}
 708	spin_unlock(&np->led_lock);
 709
 710	/* Reinitialize the timer. */
 711	speed_timer.expires = jiffies + NET_LINK_UP_CHECK_INTERVAL;
 712	add_timer(&speed_timer);
 713
 714	spin_unlock(&np->transceiver_lock);
 715}
 716
 717static void
 718e100_negotiate(struct net_device* dev)
 719{
 720	struct net_local *np = netdev_priv(dev);
 721	unsigned short data = e100_get_mdio_reg(dev, np->mii_if.phy_id,
 722						MII_ADVERTISE);
 723
 724	/* Discard old speed and duplex settings */
 725	data &= ~(ADVERTISE_100HALF | ADVERTISE_100FULL |
 726	          ADVERTISE_10HALF | ADVERTISE_10FULL);
 727
 728	switch (current_speed_selection) {
 729		case 10:
 730			if (current_duplex == full)
 731				data |= ADVERTISE_10FULL;
 732			else if (current_duplex == half)
 733				data |= ADVERTISE_10HALF;
 734			else
 735				data |= ADVERTISE_10HALF | ADVERTISE_10FULL;
 736			break;
 737
 738		case 100:
 739			 if (current_duplex == full)
 740				data |= ADVERTISE_100FULL;
 741			else if (current_duplex == half)
 742				data |= ADVERTISE_100HALF;
 743			else
 744				data |= ADVERTISE_100HALF | ADVERTISE_100FULL;
 745			break;
 746
 747		case 0: /* Auto */
 748			 if (current_duplex == full)
 749				data |= ADVERTISE_100FULL | ADVERTISE_10FULL;
 750			else if (current_duplex == half)
 751				data |= ADVERTISE_100HALF | ADVERTISE_10HALF;
 752			else
 753				data |= ADVERTISE_10HALF | ADVERTISE_10FULL |
 754				  ADVERTISE_100HALF | ADVERTISE_100FULL;
 755			break;
 756
 757		default: /* assume autoneg speed and duplex */
 758			data |= ADVERTISE_10HALF | ADVERTISE_10FULL |
 759				  ADVERTISE_100HALF | ADVERTISE_100FULL;
 760			break;
 761	}
 762
 763	e100_set_mdio_reg(dev, np->mii_if.phy_id, MII_ADVERTISE, data);
 764
 765	data = e100_get_mdio_reg(dev, np->mii_if.phy_id, MII_BMCR);
 766	if (autoneg_normal) {
 767		/* Renegotiate with link partner */
 768		data |= BMCR_ANENABLE | BMCR_ANRESTART;
 769	} else {
 770		/* Don't negotiate speed or duplex */
 771		data &= ~(BMCR_ANENABLE | BMCR_ANRESTART);
 772
 773		/* Set speed and duplex static */
 774		if (current_speed_selection == 10)
 775			data &= ~BMCR_SPEED100;
 776		else
 777			data |= BMCR_SPEED100;
 778
 779		if (current_duplex != full)
 780			data &= ~BMCR_FULLDPLX;
 781		else
 782			data |= BMCR_FULLDPLX;
 783	}
 784	e100_set_mdio_reg(dev, np->mii_if.phy_id, MII_BMCR, data);
 785}
 786
 787static void
 788e100_set_speed(struct net_device* dev, unsigned long speed)
 789{
 790	struct net_local *np = netdev_priv(dev);
 791
 792	spin_lock(&np->transceiver_lock);
 793	if (speed != current_speed_selection) {
 794		current_speed_selection = speed;
 795		e100_negotiate(dev);
 796	}
 797	spin_unlock(&np->transceiver_lock);
 798}
 799
 800static void
 801e100_check_duplex(unsigned long priv)
 802{
 803	struct net_device *dev = (struct net_device *)priv;
 804	struct net_local *np = netdev_priv(dev);
 805	int old_duplex;
 806
 807	spin_lock(&np->transceiver_lock);
 808	old_duplex = full_duplex;
 809	transceiver->check_duplex(dev);
 810	if (old_duplex != full_duplex) {
 811		/* Duplex changed */
 812		SETF(network_rec_config_shadow, R_NETWORK_REC_CONFIG, duplex, full_duplex);
 813		*R_NETWORK_REC_CONFIG = network_rec_config_shadow;
 814	}
 815
 816	/* Reinitialize the timer. */
 817	duplex_timer.expires = jiffies + NET_DUPLEX_CHECK_INTERVAL;
 818	add_timer(&duplex_timer);
 819	np->mii_if.full_duplex = full_duplex;
 820	spin_unlock(&np->transceiver_lock);
 821}
 822#if defined(CONFIG_ETRAX_NO_PHY)
 823static void
 824dummy_check_duplex(struct net_device* dev)
 825{
 826	full_duplex = 1;
 827}
 828#else
 829static void
 830generic_check_duplex(struct net_device* dev)
 831{
 832	unsigned long data;
 833	struct net_local *np = netdev_priv(dev);
 834
 835	data = e100_get_mdio_reg(dev, np->mii_if.phy_id, MII_ADVERTISE);
 836	if ((data & ADVERTISE_10FULL) ||
 837	    (data & ADVERTISE_100FULL))
 838		full_duplex = 1;
 839	else
 840		full_duplex = 0;
 841}
 842
 843static void
 844tdk_check_duplex(struct net_device* dev)
 845{
 846	unsigned long data;
 847	struct net_local *np = netdev_priv(dev);
 848
 849	data = e100_get_mdio_reg(dev, np->mii_if.phy_id,
 850				 MDIO_TDK_DIAGNOSTIC_REG);
 851	full_duplex = (data & MDIO_TDK_DIAGNOSTIC_DPLX) ? 1 : 0;
 852}
 853
 854static void
 855broadcom_check_duplex(struct net_device* dev)
 856{
 857	unsigned long data;
 858	struct net_local *np = netdev_priv(dev);
 859
 860	data = e100_get_mdio_reg(dev, np->mii_if.phy_id,
 861				 MDIO_AUX_CTRL_STATUS_REG);
 862	full_duplex = (data & MDIO_BC_FULL_DUPLEX_IND) ? 1 : 0;
 863}
 864
 865static void
 866intel_check_duplex(struct net_device* dev)
 867{
 868	unsigned long data;
 869	struct net_local *np = netdev_priv(dev);
 870
 871	data = e100_get_mdio_reg(dev, np->mii_if.phy_id,
 872				 MDIO_INT_STATUS_REG_2);
 873	full_duplex = (data & MDIO_INT_FULL_DUPLEX_IND) ? 1 : 0;
 874}
 875#endif
 876static void
 877e100_set_duplex(struct net_device* dev, enum duplex new_duplex)
 878{
 879	struct net_local *np = netdev_priv(dev);
 880
 881	spin_lock(&np->transceiver_lock);
 882	if (new_duplex != current_duplex) {
 883		current_duplex = new_duplex;
 884		e100_negotiate(dev);
 885	}
 886	spin_unlock(&np->transceiver_lock);
 887}
 888
 889static int
 890e100_probe_transceiver(struct net_device* dev)
 891{
 892	int ret = 0;
 893
 894#if !defined(CONFIG_ETRAX_NO_PHY)
 895	unsigned int phyid_high;
 896	unsigned int phyid_low;
 897	unsigned int oui;
 898	struct transceiver_ops* ops = NULL;
 899	struct net_local *np = netdev_priv(dev);
 900
 901	spin_lock(&np->transceiver_lock);
 902
 903	/* Probe MDIO physical address */
 904	for (np->mii_if.phy_id = 0; np->mii_if.phy_id <= 31;
 905	     np->mii_if.phy_id++) {
 906		if (e100_get_mdio_reg(dev,
 907				      np->mii_if.phy_id, MII_BMSR) != 0xffff)
 908			break;
 909	}
 910	if (np->mii_if.phy_id == 32) {
 911		ret = -ENODEV;
 912		goto out;
 913	}
 914
 915	/* Get manufacturer */
 916	phyid_high = e100_get_mdio_reg(dev, np->mii_if.phy_id, MII_PHYSID1);
 917	phyid_low = e100_get_mdio_reg(dev, np->mii_if.phy_id, MII_PHYSID2);
 918	oui = (phyid_high << 6) | (phyid_low >> 10);
 919
 920	for (ops = &transceivers[0]; ops->oui; ops++) {
 921		if (ops->oui == oui)
 922			break;
 923	}
 924	transceiver = ops;
 925out:
 926	spin_unlock(&np->transceiver_lock);
 927#endif
 928	return ret;
 929}
 930
 931static int
 932e100_get_mdio_reg(struct net_device *dev, int phy_id, int location)
 933{
 934	unsigned short cmd;    /* Data to be sent on MDIO port */
 935	int data;   /* Data read from MDIO */
 936	int bitCounter;
 937
 938	/* Start of frame, OP Code, Physical Address, Register Address */
 939	cmd = (MDIO_START << 14) | (MDIO_READ << 12) | (phy_id << 7) |
 940		(location << 2);
 941
 942	e100_send_mdio_cmd(cmd, 0);
 943
 944	data = 0;
 945
 946	/* Data... */
 947	for (bitCounter=15; bitCounter>=0 ; bitCounter--) {
 948		data |= (e100_receive_mdio_bit() << bitCounter);
 949	}
 950
 951	return data;
 952}
 953
 954static void
 955e100_set_mdio_reg(struct net_device *dev, int phy_id, int location, int value)
 956{
 957	int bitCounter;
 958	unsigned short cmd;
 959
 960	cmd = (MDIO_START << 14) | (MDIO_WRITE << 12) | (phy_id << 7) |
 961	      (location << 2);
 962
 963	e100_send_mdio_cmd(cmd, 1);
 964
 965	/* Data... */
 966	for (bitCounter=15; bitCounter>=0 ; bitCounter--) {
 967		e100_send_mdio_bit(GET_BIT(bitCounter, value));
 968	}
 969
 970}
 971
 972static void
 973e100_send_mdio_cmd(unsigned short cmd, int write_cmd)
 974{
 975	int bitCounter;
 976	unsigned char data = 0x2;
 977
 978	/* Preamble */
 979	for (bitCounter = 31; bitCounter>= 0; bitCounter--)
 980		e100_send_mdio_bit(GET_BIT(bitCounter, MDIO_PREAMBLE));
 981
 982	for (bitCounter = 15; bitCounter >= 2; bitCounter--)
 983		e100_send_mdio_bit(GET_BIT(bitCounter, cmd));
 984
 985	/* Turnaround */
 986	for (bitCounter = 1; bitCounter >= 0 ; bitCounter--)
 987		if (write_cmd)
 988			e100_send_mdio_bit(GET_BIT(bitCounter, data));
 989		else
 990			e100_receive_mdio_bit();
 991}
 992
 993static void
 994e100_send_mdio_bit(unsigned char bit)
 995{
 996	*R_NETWORK_MGM_CTRL =
 997		IO_STATE(R_NETWORK_MGM_CTRL, mdoe, enable) |
 998		IO_FIELD(R_NETWORK_MGM_CTRL, mdio, bit);
 999	udelay(1);
1000	*R_NETWORK_MGM_CTRL =
1001		IO_STATE(R_NETWORK_MGM_CTRL, mdoe, enable) |
1002		IO_MASK(R_NETWORK_MGM_CTRL, mdck) |
1003		IO_FIELD(R_NETWORK_MGM_CTRL, mdio, bit);
1004	udelay(1);
1005}
1006
1007static unsigned char
1008e100_receive_mdio_bit(void)
1009{
1010	unsigned char bit;
1011	*R_NETWORK_MGM_CTRL = 0;
1012	bit = IO_EXTRACT(R_NETWORK_STAT, mdio, *R_NETWORK_STAT);
1013	udelay(1);
1014	*R_NETWORK_MGM_CTRL = IO_MASK(R_NETWORK_MGM_CTRL, mdck);
1015	udelay(1);
1016	return bit;
1017}
1018
1019static void
1020e100_reset_transceiver(struct net_device* dev)
1021{
1022	struct net_local *np = netdev_priv(dev);
1023	unsigned short cmd;
1024	unsigned short data;
1025	int bitCounter;
1026
1027	data = e100_get_mdio_reg(dev, np->mii_if.phy_id, MII_BMCR);
1028
1029	cmd = (MDIO_START << 14) | (MDIO_WRITE << 12) | (np->mii_if.phy_id << 7) | (MII_BMCR << 2);
1030
1031	e100_send_mdio_cmd(cmd, 1);
1032
1033	data |= 0x8000;
1034
1035	for (bitCounter = 15; bitCounter >= 0 ; bitCounter--) {
1036		e100_send_mdio_bit(GET_BIT(bitCounter, data));
1037	}
1038}
1039
1040/* Called by upper layers if they decide it took too long to complete
1041 * sending a packet - we need to reset and stuff.
1042 */
1043
1044static void
1045e100_tx_timeout(struct net_device *dev)
1046{
1047	struct net_local *np = netdev_priv(dev);
1048	unsigned long flags;
1049
1050	spin_lock_irqsave(&np->lock, flags);
1051
1052	printk(KERN_WARNING "%s: transmit timed out, %s?\n", dev->name,
1053	       tx_done(dev) ? "IRQ problem" : "network cable problem");
1054
1055	/* remember we got an error */
1056
1057	dev->stats.tx_errors++;
1058
1059	/* reset the TX DMA in case it has hung on something */
1060
1061	RESET_DMA(NETWORK_TX_DMA_NBR);
1062	WAIT_DMA(NETWORK_TX_DMA_NBR);
1063
1064	/* Reset the transceiver. */
1065
1066	e100_reset_transceiver(dev);
1067
1068	/* and get rid of the packets that never got an interrupt */
1069	while (myFirstTxDesc != myNextTxDesc) {
1070		dev_kfree_skb(myFirstTxDesc->skb);
1071		myFirstTxDesc->skb = 0;
1072		myFirstTxDesc = phys_to_virt(myFirstTxDesc->descr.next);
1073	}
1074
1075	/* Set up transmit DMA channel so it can be restarted later */
1076	*R_DMA_CH0_FIRST = 0;
1077	*R_DMA_CH0_DESCR = virt_to_phys(myLastTxDesc);
1078
1079	/* tell the upper layers we're ok again */
1080
1081	netif_wake_queue(dev);
1082	spin_unlock_irqrestore(&np->lock, flags);
1083}
1084
1085
1086/* This will only be invoked if the driver is _not_ in XOFF state.
1087 * What this means is that we need not check it, and that this
1088 * invariant will hold if we make sure that the netif_*_queue()
1089 * calls are done at the proper times.
1090 */
1091
1092static int
1093e100_send_packet(struct sk_buff *skb, struct net_device *dev)
1094{
1095	struct net_local *np = netdev_priv(dev);
1096	unsigned char *buf = skb->data;
1097	unsigned long flags;
1098
1099#ifdef ETHDEBUG
1100	printk("send packet len %d\n", length);
1101#endif
1102	spin_lock_irqsave(&np->lock, flags);  /* protect from tx_interrupt and ourself */
1103
1104	myNextTxDesc->skb = skb;
1105
1106	netif_trans_update(dev); /* NETIF_F_LLTX driver :( */
1107
1108	e100_hardware_send_packet(np, buf, skb->len);
1109
1110	myNextTxDesc = phys_to_virt(myNextTxDesc->descr.next);
1111
1112	/* Stop queue if full */
1113	if (myNextTxDesc == myFirstTxDesc) {
1114		netif_stop_queue(dev);
1115	}
1116
1117	spin_unlock_irqrestore(&np->lock, flags);
1118
1119	return NETDEV_TX_OK;
1120}
1121
1122/*
1123 * The typical workload of the driver:
1124 *   Handle the network interface interrupts.
1125 */
1126
1127static irqreturn_t
1128e100rxtx_interrupt(int irq, void *dev_id)
1129{
1130	struct net_device *dev = (struct net_device *)dev_id;
1131	unsigned long irqbits;
1132
1133	/*
1134	 * Note that both rx and tx interrupts are blocked at this point,
1135	 * regardless of which got us here.
1136	 */
1137
1138	irqbits = *R_IRQ_MASK2_RD;
1139
1140	/* Handle received packets */
1141	if (irqbits & IO_STATE(R_IRQ_MASK2_RD, dma1_eop, active)) {
1142		/* acknowledge the eop interrupt */
1143
1144		*R_DMA_CH1_CLR_INTR = IO_STATE(R_DMA_CH1_CLR_INTR, clr_eop, do);
1145
1146		/* check if one or more complete packets were indeed received */
1147
1148		while ((*R_DMA_CH1_FIRST != virt_to_phys(myNextRxDesc)) &&
1149		       (myNextRxDesc != myLastRxDesc)) {
1150			/* Take out the buffer and give it to the OS, then
1151			 * allocate a new buffer to put a packet in.
1152			 */
1153			e100_rx(dev);
1154			dev->stats.rx_packets++;
1155			/* restart/continue on the channel, for safety */
1156			*R_DMA_CH1_CMD = IO_STATE(R_DMA_CH1_CMD, cmd, restart);
1157			/* clear dma channel 1 eop/descr irq bits */
1158			*R_DMA_CH1_CLR_INTR =
1159				IO_STATE(R_DMA_CH1_CLR_INTR, clr_eop, do) |
1160				IO_STATE(R_DMA_CH1_CLR_INTR, clr_descr, do);
1161
1162			/* now, we might have gotten another packet
1163			   so we have to loop back and check if so */
1164		}
1165	}
1166
1167	/* Report any packets that have been sent */
1168	while (virt_to_phys(myFirstTxDesc) != *R_DMA_CH0_FIRST &&
1169	       (netif_queue_stopped(dev) || myFirstTxDesc != myNextTxDesc)) {
1170		dev->stats.tx_bytes += myFirstTxDesc->skb->len;
1171		dev->stats.tx_packets++;
1172
1173		/* dma is ready with the transmission of the data in tx_skb, so now
1174		   we can release the skb memory */
1175		dev_kfree_skb_irq(myFirstTxDesc->skb);
1176		myFirstTxDesc->skb = 0;
1177		myFirstTxDesc = phys_to_virt(myFirstTxDesc->descr.next);
1178                /* Wake up queue. */
1179		netif_wake_queue(dev);
1180	}
1181
1182	if (irqbits & IO_STATE(R_IRQ_MASK2_RD, dma0_eop, active)) {
1183		/* acknowledge the eop interrupt. */
1184		*R_DMA_CH0_CLR_INTR = IO_STATE(R_DMA_CH0_CLR_INTR, clr_eop, do);
1185	}
1186
1187	return IRQ_HANDLED;
1188}
1189
1190static irqreturn_t
1191e100nw_interrupt(int irq, void *dev_id)
1192{
1193	struct net_device *dev = (struct net_device *)dev_id;
1194	unsigned long irqbits = *R_IRQ_MASK0_RD;
1195
1196	/* check for underrun irq */
1197	if (irqbits & IO_STATE(R_IRQ_MASK0_RD, underrun, active)) {
1198		SETS(network_tr_ctrl_shadow, R_NETWORK_TR_CTRL, clr_error, clr);
1199		*R_NETWORK_TR_CTRL = network_tr_ctrl_shadow;
1200		SETS(network_tr_ctrl_shadow, R_NETWORK_TR_CTRL, clr_error, nop);
1201		dev->stats.tx_errors++;
1202		D(printk("ethernet receiver underrun!\n"));
1203	}
1204
1205	/* check for overrun irq */
1206	if (irqbits & IO_STATE(R_IRQ_MASK0_RD, overrun, active)) {
1207		update_rx_stats(&dev->stats); /* this will ack the irq */
1208		D(printk("ethernet receiver overrun!\n"));
1209	}
1210	/* check for excessive collision irq */
1211	if (irqbits & IO_STATE(R_IRQ_MASK0_RD, excessive_col, active)) {
1212		SETS(network_tr_ctrl_shadow, R_NETWORK_TR_CTRL, clr_error, clr);
1213		*R_NETWORK_TR_CTRL = network_tr_ctrl_shadow;
1214		SETS(network_tr_ctrl_shadow, R_NETWORK_TR_CTRL, clr_error, nop);
1215		dev->stats.tx_errors++;
1216		D(printk("ethernet excessive collisions!\n"));
1217	}
1218	return IRQ_HANDLED;
1219}
1220
1221/* We have a good packet(s), get it/them out of the buffers. */
1222static void
1223e100_rx(struct net_device *dev)
1224{
1225	struct sk_buff *skb;
1226	int length = 0;
1227	struct net_local *np = netdev_priv(dev);
1228	unsigned char *skb_data_ptr;
1229#ifdef ETHDEBUG
1230	int i;
1231#endif
1232	etrax_eth_descr *prevRxDesc;  /* The descriptor right before myNextRxDesc */
1233	spin_lock(&np->led_lock);
1234	if (!led_active && time_after(jiffies, led_next_time)) {
1235		/* light the network leds depending on the current speed. */
1236		e100_set_network_leds(NETWORK_ACTIVITY);
1237
1238		/* Set the earliest time we may clear the LED */
1239		led_next_time = jiffies + NET_FLASH_TIME;
1240		led_active = 1;
1241		mod_timer(&clear_led_timer, jiffies + HZ/10);
1242	}
1243	spin_unlock(&np->led_lock);
1244
1245	length = myNextRxDesc->descr.hw_len - 4;
1246	dev->stats.rx_bytes += length;
1247
1248#ifdef ETHDEBUG
1249	printk("Got a packet of length %d:\n", length);
1250	/* dump the first bytes in the packet */
1251	skb_data_ptr = (unsigned char *)phys_to_virt(myNextRxDesc->descr.buf);
1252	for (i = 0; i < 8; i++) {
1253		printk("%d: %.2x %.2x %.2x %.2x %.2x %.2x %.2x %.2x\n", i * 8,
1254		       skb_data_ptr[0],skb_data_ptr[1],skb_data_ptr[2],skb_data_ptr[3],
1255		       skb_data_ptr[4],skb_data_ptr[5],skb_data_ptr[6],skb_data_ptr[7]);
1256		skb_data_ptr += 8;
1257	}
1258#endif
1259
1260	if (length < RX_COPYBREAK) {
1261		/* Small packet, copy data */
1262		skb = dev_alloc_skb(length - ETHER_HEAD_LEN);
1263		if (!skb) {
1264			dev->stats.rx_errors++;
1265			printk(KERN_NOTICE "%s: Memory squeeze, dropping packet.\n", dev->name);
1266			goto update_nextrxdesc;
1267		}
1268
1269		skb_put(skb, length - ETHER_HEAD_LEN);        /* allocate room for the packet body */
1270		skb_data_ptr = skb_push(skb, ETHER_HEAD_LEN); /* allocate room for the header */
1271
1272#ifdef ETHDEBUG
1273		printk("head = 0x%x, data = 0x%x, tail = 0x%x, end = 0x%x\n",
1274		       skb->head, skb->data, skb_tail_pointer(skb),
1275		       skb_end_pointer(skb));
1276		printk("copying packet to 0x%x.\n", skb_data_ptr);
1277#endif
1278
1279		memcpy(skb_data_ptr, phys_to_virt(myNextRxDesc->descr.buf), length);
1280	}
1281	else {
1282		/* Large packet, send directly to upper layers and allocate new
1283		 * memory (aligned to cache line boundary to avoid bug).
1284		 * Before sending the skb to upper layers we must make sure
1285		 * that skb->data points to the aligned start of the packet.
1286		 */
1287		int align;
1288		struct sk_buff *new_skb = dev_alloc_skb(MAX_MEDIA_DATA_SIZE + 2 * L1_CACHE_BYTES);
1289		if (!new_skb) {
1290			dev->stats.rx_errors++;
1291			printk(KERN_NOTICE "%s: Memory squeeze, dropping packet.\n", dev->name);
1292			goto update_nextrxdesc;
1293		}
1294		skb = myNextRxDesc->skb;
1295		align = (int)phys_to_virt(myNextRxDesc->descr.buf) - (int)skb->data;
1296		skb_put(skb, length + align);
1297		skb_pull(skb, align); /* Remove alignment bytes */
1298		myNextRxDesc->skb = new_skb;
1299		myNextRxDesc->descr.buf = L1_CACHE_ALIGN(virt_to_phys(myNextRxDesc->skb->data));
1300	}
1301
1302	skb->protocol = eth_type_trans(skb, dev);
1303
1304	/* Send the packet to the upper layers */
1305	netif_rx(skb);
1306
1307  update_nextrxdesc:
1308	/* Prepare for next packet */
1309	myNextRxDesc->descr.status = 0;
1310	prevRxDesc = myNextRxDesc;
1311	myNextRxDesc = phys_to_virt(myNextRxDesc->descr.next);
1312
1313	rx_queue_len++;
1314
1315	/* Check if descriptors should be returned */
1316	if (rx_queue_len == RX_QUEUE_THRESHOLD) {
1317		flush_etrax_cache();
1318		prevRxDesc->descr.ctrl |= d_eol;
1319		myLastRxDesc->descr.ctrl &= ~d_eol;
1320		myLastRxDesc = prevRxDesc;
1321		rx_queue_len = 0;
1322	}
1323}
1324
1325/* The inverse routine to net_open(). */
1326static int
1327e100_close(struct net_device *dev)
1328{
1329	printk(KERN_INFO "Closing %s.\n", dev->name);
1330
1331	netif_stop_queue(dev);
1332
1333	*R_IRQ_MASK0_CLR =
1334		IO_STATE(R_IRQ_MASK0_CLR, overrun, clr) |
1335		IO_STATE(R_IRQ_MASK0_CLR, underrun, clr) |
1336		IO_STATE(R_IRQ_MASK0_CLR, excessive_col, clr);
1337
1338	*R_IRQ_MASK2_CLR =
1339		IO_STATE(R_IRQ_MASK2_CLR, dma0_descr, clr) |
1340		IO_STATE(R_IRQ_MASK2_CLR, dma0_eop, clr) |
1341		IO_STATE(R_IRQ_MASK2_CLR, dma1_descr, clr) |
1342		IO_STATE(R_IRQ_MASK2_CLR, dma1_eop, clr);
1343
1344	/* Stop the receiver and the transmitter */
1345
1346	RESET_DMA(NETWORK_TX_DMA_NBR);
1347	RESET_DMA(NETWORK_RX_DMA_NBR);
1348
1349	/* Flush the Tx and disable Rx here. */
1350
1351	free_irq(NETWORK_DMA_RX_IRQ_NBR, (void *)dev);
1352	free_irq(NETWORK_DMA_TX_IRQ_NBR, (void *)dev);
1353	free_irq(NETWORK_STATUS_IRQ_NBR, (void *)dev);
1354
1355	cris_free_dma(NETWORK_TX_DMA_NBR, cardname);
1356	cris_free_dma(NETWORK_RX_DMA_NBR, cardname);
1357
1358	/* Update the statistics here. */
1359
1360	update_rx_stats(&dev->stats);
1361	update_tx_stats(&dev->stats);
1362
1363	/* Stop speed/duplex timers */
1364	del_timer(&speed_timer);
1365	del_timer(&duplex_timer);
1366
1367	return 0;
1368}
1369
1370static int
1371e100_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
1372{
1373	struct mii_ioctl_data *data = if_mii(ifr);
1374	struct net_local *np = netdev_priv(dev);
1375	int rc = 0;
1376        int old_autoneg;
1377
1378	spin_lock(&np->lock); /* Preempt protection */
1379	switch (cmd) {
1380		/* The ioctls below should be considered obsolete but are */
1381		/* still present for compatibility with old scripts/apps  */
1382		case SET_ETH_SPEED_10:                  /* 10 Mbps */
1383			e100_set_speed(dev, 10);
1384			break;
1385		case SET_ETH_SPEED_100:                /* 100 Mbps */
1386			e100_set_speed(dev, 100);
1387			break;
1388		case SET_ETH_SPEED_AUTO:        /* Auto-negotiate speed */
1389			e100_set_speed(dev, 0);
1390			break;
1391		case SET_ETH_DUPLEX_HALF:       /* Half duplex */
1392			e100_set_duplex(dev, half);
1393			break;
1394		case SET_ETH_DUPLEX_FULL:       /* Full duplex */
1395			e100_set_duplex(dev, full);
1396			break;
1397		case SET_ETH_DUPLEX_AUTO:       /* Auto-negotiate duplex */
1398			e100_set_duplex(dev, autoneg);
1399			break;
1400	        case SET_ETH_AUTONEG:
1401			old_autoneg = autoneg_normal;
1402		        autoneg_normal = *(int*)data;
1403			if (autoneg_normal != old_autoneg)
1404				e100_negotiate(dev);
1405			break;
1406		default:
1407			rc = generic_mii_ioctl(&np->mii_if, if_mii(ifr),
1408						cmd, NULL);
1409			break;
1410	}
1411	spin_unlock(&np->lock);
1412	return rc;
1413}
1414
1415static int e100_get_settings(struct net_device *dev,
1416			     struct ethtool_cmd *cmd)
1417{
1418	struct net_local *np = netdev_priv(dev);
1419	int err;
1420
1421	spin_lock_irq(&np->lock);
1422	err = mii_ethtool_gset(&np->mii_if, cmd);
1423	spin_unlock_irq(&np->lock);
1424
1425	/* The PHY may support 1000baseT, but the Etrax100 does not.  */
1426	cmd->supported &= ~(SUPPORTED_1000baseT_Half
1427			    | SUPPORTED_1000baseT_Full);
1428	return err;
1429}
1430
1431static int e100_set_settings(struct net_device *dev,
1432			     struct ethtool_cmd *ecmd)
1433{
1434	if (ecmd->autoneg == AUTONEG_ENABLE) {
1435		e100_set_duplex(dev, autoneg);
1436		e100_set_speed(dev, 0);
1437	} else {
1438		e100_set_duplex(dev, ecmd->duplex == DUPLEX_HALF ? half : full);
1439		e100_set_speed(dev, ecmd->speed == SPEED_10 ? 10: 100);
1440	}
1441
1442	return 0;
1443}
1444
1445static void e100_get_drvinfo(struct net_device *dev,
1446			     struct ethtool_drvinfo *info)
1447{
1448	strlcpy(info->driver, "ETRAX 100LX", sizeof(info->driver));
1449	strlcpy(info->version, "$Revision: 1.31 $", sizeof(info->version));
1450	strlcpy(info->fw_version, "N/A", sizeof(info->fw_version));
1451	strlcpy(info->bus_info, "N/A", sizeof(info->bus_info));
1452}
1453
1454static int e100_nway_reset(struct net_device *dev)
1455{
1456	if (current_duplex == autoneg && current_speed_selection == 0)
1457		e100_negotiate(dev);
1458	return 0;
1459}
1460
1461static const struct ethtool_ops e100_ethtool_ops = {
1462	.get_settings	= e100_get_settings,
1463	.set_settings	= e100_set_settings,
1464	.get_drvinfo	= e100_get_drvinfo,
1465	.nway_reset	= e100_nway_reset,
1466	.get_link	= ethtool_op_get_link,
1467};
1468
1469static int
1470e100_set_config(struct net_device *dev, struct ifmap *map)
1471{
1472	struct net_local *np = netdev_priv(dev);
1473
1474	spin_lock(&np->lock); /* Preempt protection */
1475
1476	switch(map->port) {
1477		case IF_PORT_UNKNOWN:
1478			/* Use autoneg */
1479			e100_set_speed(dev, 0);
1480			e100_set_duplex(dev, autoneg);
1481			break;
1482		case IF_PORT_10BASET:
1483			e100_set_speed(dev, 10);
1484			e100_set_duplex(dev, autoneg);
1485			break;
1486		case IF_PORT_100BASET:
1487		case IF_PORT_100BASETX:
1488			e100_set_speed(dev, 100);
1489			e100_set_duplex(dev, autoneg);
1490			break;
1491		case IF_PORT_100BASEFX:
1492		case IF_PORT_10BASE2:
1493		case IF_PORT_AUI:
1494			spin_unlock(&np->lock);
1495			return -EOPNOTSUPP;
1496		default:
1497			printk(KERN_ERR "%s: Invalid media selected", dev->name);
1498			spin_unlock(&np->lock);
1499			return -EINVAL;
1500	}
1501	spin_unlock(&np->lock);
1502	return 0;
1503}
1504
1505static void
1506update_rx_stats(struct net_device_stats *es)
1507{
1508	unsigned long r = *R_REC_COUNTERS;
1509	/* update stats relevant to reception errors */
1510	es->rx_fifo_errors += IO_EXTRACT(R_REC_COUNTERS, congestion, r);
1511	es->rx_crc_errors += IO_EXTRACT(R_REC_COUNTERS, crc_error, r);
1512	es->rx_frame_errors += IO_EXTRACT(R_REC_COUNTERS, alignment_error, r);
1513	es->rx_length_errors += IO_EXTRACT(R_REC_COUNTERS, oversize, r);
1514}
1515
1516static void
1517update_tx_stats(struct net_device_stats *es)
1518{
1519	unsigned long r = *R_TR_COUNTERS;
1520	/* update stats relevant to transmission errors */
1521	es->collisions +=
1522		IO_EXTRACT(R_TR_COUNTERS, single_col, r) +
1523		IO_EXTRACT(R_TR_COUNTERS, multiple_col, r);
1524}
1525
1526/*
1527 * Get the current statistics.
1528 * This may be called with the card open or closed.
1529 */
1530static struct net_device_stats *
1531e100_get_stats(struct net_device *dev)
1532{
1533	struct net_local *lp = netdev_priv(dev);
1534	unsigned long flags;
1535
1536	spin_lock_irqsave(&lp->lock, flags);
1537
1538	update_rx_stats(&dev->stats);
1539	update_tx_stats(&dev->stats);
1540
1541	spin_unlock_irqrestore(&lp->lock, flags);
1542	return &dev->stats;
1543}
1544
1545/*
1546 * Set or clear the multicast filter for this adaptor.
1547 * num_addrs == -1	Promiscuous mode, receive all packets
1548 * num_addrs == 0	Normal mode, clear multicast list
1549 * num_addrs > 0	Multicast mode, receive normal and MC packets,
1550 *			and do best-effort filtering.
1551 */
1552static void
1553set_multicast_list(struct net_device *dev)
1554{
1555	struct net_local *lp = netdev_priv(dev);
1556	int num_addr = netdev_mc_count(dev);
1557	unsigned long int lo_bits;
1558	unsigned long int hi_bits;
1559
1560	spin_lock(&lp->lock);
1561	if (dev->flags & IFF_PROMISC) {
1562		/* promiscuous mode */
1563		lo_bits = 0xfffffffful;
1564		hi_bits = 0xfffffffful;
1565
1566		/* Enable individual receive */
1567		SETS(network_rec_config_shadow, R_NETWORK_REC_CONFIG, individual, receive);
1568		*R_NETWORK_REC_CONFIG = network_rec_config_shadow;
1569	} else if (dev->flags & IFF_ALLMULTI) {
1570		/* enable all multicasts */
1571		lo_bits = 0xfffffffful;
1572		hi_bits = 0xfffffffful;
1573
1574		/* Disable individual receive */
1575		SETS(network_rec_config_shadow, R_NETWORK_REC_CONFIG, individual, discard);
1576		*R_NETWORK_REC_CONFIG =  network_rec_config_shadow;
1577	} else if (num_addr == 0) {
1578		/* Normal, clear the mc list */
1579		lo_bits = 0x00000000ul;
1580		hi_bits = 0x00000000ul;
1581
1582		/* Disable individual receive */
1583		SETS(network_rec_config_shadow, R_NETWORK_REC_CONFIG, individual, discard);
1584		*R_NETWORK_REC_CONFIG =  network_rec_config_shadow;
1585	} else {
1586		/* MC mode, receive normal and MC packets */
1587		char hash_ix;
1588		struct netdev_hw_addr *ha;
1589		char *baddr;
1590
1591		lo_bits = 0x00000000ul;
1592		hi_bits = 0x00000000ul;
1593		netdev_for_each_mc_addr(ha, dev) {
1594			/* Calculate the hash index for the GA registers */
1595
1596			hash_ix = 0;
1597			baddr = ha->addr;
1598			hash_ix ^= (*baddr) & 0x3f;
1599			hash_ix ^= ((*baddr) >> 6) & 0x03;
1600			++baddr;
1601			hash_ix ^= ((*baddr) << 2) & 0x03c;
1602			hash_ix ^= ((*baddr) >> 4) & 0xf;
1603			++baddr;
1604			hash_ix ^= ((*baddr) << 4) & 0x30;
1605			hash_ix ^= ((*baddr) >> 2) & 0x3f;
1606			++baddr;
1607			hash_ix ^= (*baddr) & 0x3f;
1608			hash_ix ^= ((*baddr) >> 6) & 0x03;
1609			++baddr;
1610			hash_ix ^= ((*baddr) << 2) & 0x03c;
1611			hash_ix ^= ((*baddr) >> 4) & 0xf;
1612			++baddr;
1613			hash_ix ^= ((*baddr) << 4) & 0x30;
1614			hash_ix ^= ((*baddr) >> 2) & 0x3f;
1615
1616			hash_ix &= 0x3f;
1617
1618			if (hash_ix >= 32) {
1619				hi_bits |= (1 << (hash_ix-32));
1620			} else {
1621				lo_bits |= (1 << hash_ix);
1622			}
1623		}
1624		/* Disable individual receive */
1625		SETS(network_rec_config_shadow, R_NETWORK_REC_CONFIG, individual, discard);
1626		*R_NETWORK_REC_CONFIG = network_rec_config_shadow;
1627	}
1628	*R_NETWORK_GA_0 = lo_bits;
1629	*R_NETWORK_GA_1 = hi_bits;
1630	spin_unlock(&lp->lock);
1631}
1632
1633void
1634e100_hardware_send_packet(struct net_local *np, char *buf, int length)
1635{
1636	D(printk("e100 send pack, buf 0x%x len %d\n", buf, length));
1637
1638	spin_lock(&np->led_lock);
1639	if (!led_active && time_after(jiffies, led_next_time)) {
1640		/* light the network leds depending on the current speed. */
1641		e100_set_network_leds(NETWORK_ACTIVITY);
1642
1643		/* Set the earliest time we may clear the LED */
1644		led_next_time = jiffies + NET_FLASH_TIME;
1645		led_active = 1;
1646		mod_timer(&clear_led_timer, jiffies + HZ/10);
1647	}
1648	spin_unlock(&np->led_lock);
1649
1650	/* configure the tx dma descriptor */
1651	myNextTxDesc->descr.sw_len = length;
1652	myNextTxDesc->descr.ctrl = d_eop | d_eol | d_wait;
1653	myNextTxDesc->descr.buf = virt_to_phys(buf);
1654
1655        /* Move end of list */
1656        myLastTxDesc->descr.ctrl &= ~d_eol;
1657        myLastTxDesc = myNextTxDesc;
1658
1659	/* Restart DMA channel */
1660	*R_DMA_CH0_CMD = IO_STATE(R_DMA_CH0_CMD, cmd, restart);
1661}
1662
1663static void
1664e100_clear_network_leds(unsigned long dummy)
1665{
1666	struct net_device *dev = (struct net_device *)dummy;
1667	struct net_local *np = netdev_priv(dev);
1668
1669	spin_lock(&np->led_lock);
1670
1671	if (led_active && time_after(jiffies, led_next_time)) {
1672		e100_set_network_leds(NO_NETWORK_ACTIVITY);
1673
1674		/* Set the earliest time we may set the LED */
1675		led_next_time = jiffies + NET_FLASH_PAUSE;
1676		led_active = 0;
1677	}
1678
1679	spin_unlock(&np->led_lock);
1680}
1681
1682static void
1683e100_set_network_leds(int active)
1684{
1685#if defined(CONFIG_ETRAX_NETWORK_LED_ON_WHEN_LINK)
1686	int light_leds = (active == NO_NETWORK_ACTIVITY);
1687#elif defined(CONFIG_ETRAX_NETWORK_LED_ON_WHEN_ACTIVITY)
1688	int light_leds = (active == NETWORK_ACTIVITY);
1689#else
1690#error "Define either CONFIG_ETRAX_NETWORK_LED_ON_WHEN_LINK or CONFIG_ETRAX_NETWORK_LED_ON_WHEN_ACTIVITY"
1691#endif
1692
1693	if (!current_speed) {
1694		/* Make LED red, link is down */
1695		CRIS_LED_NETWORK_SET(CRIS_LED_OFF);
1696	} else if (light_leds) {
1697		if (current_speed == 10) {
1698			CRIS_LED_NETWORK_SET(CRIS_LED_ORANGE);
1699		} else {
1700			CRIS_LED_NETWORK_SET(CRIS_LED_GREEN);
1701		}
1702	} else {
1703		CRIS_LED_NETWORK_SET(CRIS_LED_OFF);
1704	}
1705}
1706
1707#ifdef CONFIG_NET_POLL_CONTROLLER
1708static void
1709e100_netpoll(struct net_device* netdev)
1710{
1711	e100rxtx_interrupt(NETWORK_DMA_TX_IRQ_NBR, netdev);
1712}
1713#endif
1714
1715
1716static int __init
1717e100_boot_setup(char* str)
1718{
1719	struct sockaddr sa = {0};
1720	int i;
1721
1722	/* Parse the colon separated Ethernet station address */
1723	for (i = 0; i <  ETH_ALEN; i++) {
1724		unsigned int tmp;
1725		if (sscanf(str + 3*i, "%2x", &tmp) != 1) {
1726			printk(KERN_WARNING "Malformed station address");
1727			return 0;
1728		}
1729		sa.sa_data[i] = (char)tmp;
1730	}
1731
1732	default_mac = sa;
1733	return 1;
1734}
1735
1736__setup("etrax100_eth=", e100_boot_setup);