Linux Audio

Check our new training course

Loading...
Note: File does not exist in v6.2.
   1/*
   2 * e100net.c: A network driver for the ETRAX 100LX network controller.
   3 *
   4 * Copyright (c) 1998-2002 Axis Communications AB.
   5 *
   6 * The outline of this driver comes from skeleton.c.
   7 *
   8 */
   9
  10
  11#include <linux/module.h>
  12
  13#include <linux/kernel.h>
  14#include <linux/delay.h>
  15#include <linux/types.h>
  16#include <linux/fcntl.h>
  17#include <linux/interrupt.h>
  18#include <linux/ptrace.h>
  19#include <linux/ioport.h>
  20#include <linux/in.h>
  21#include <linux/string.h>
  22#include <linux/spinlock.h>
  23#include <linux/errno.h>
  24#include <linux/init.h>
  25#include <linux/bitops.h>
  26
  27#include <linux/if.h>
  28#include <linux/mii.h>
  29#include <linux/netdevice.h>
  30#include <linux/etherdevice.h>
  31#include <linux/skbuff.h>
  32#include <linux/ethtool.h>
  33
  34#include <arch/svinto.h>/* DMA and register descriptions */
  35#include <asm/io.h>         /* CRIS_LED_* I/O functions */
  36#include <asm/irq.h>
  37#include <asm/dma.h>
  38#include <asm/system.h>
  39#include <asm/ethernet.h>
  40#include <asm/cache.h>
  41#include <arch/io_interface_mux.h>
  42
  43//#define ETHDEBUG
  44#define D(x)
  45
  46/*
  47 * The name of the card. Is used for messages and in the requests for
  48 * io regions, irqs and dma channels
  49 */
  50
  51static const char* cardname = "ETRAX 100LX built-in ethernet controller";
  52
  53/* A default ethernet address. Highlevel SW will set the real one later */
  54
  55static struct sockaddr default_mac = {
  56	0,
  57	{ 0x00, 0x40, 0x8C, 0xCD, 0x00, 0x00 }
  58};
  59
  60/* Information that need to be kept for each board. */
  61struct net_local {
  62	struct mii_if_info mii_if;
  63
  64	/* Tx control lock.  This protects the transmit buffer ring
  65	 * state along with the "tx full" state of the driver.  This
  66	 * means all netif_queue flow control actions are protected
  67	 * by this lock as well.
  68	 */
  69	spinlock_t lock;
  70
  71	spinlock_t led_lock; /* Protect LED state */
  72	spinlock_t transceiver_lock; /* Protect transceiver state. */
  73};
  74
  75typedef struct etrax_eth_descr
  76{
  77	etrax_dma_descr descr;
  78	struct sk_buff* skb;
  79} etrax_eth_descr;
  80
  81/* Some transceivers requires special handling */
  82struct transceiver_ops
  83{
  84	unsigned int oui;
  85	void (*check_speed)(struct net_device* dev);
  86	void (*check_duplex)(struct net_device* dev);
  87};
  88
  89/* Duplex settings */
  90enum duplex
  91{
  92	half,
  93	full,
  94	autoneg
  95};
  96
  97/* Dma descriptors etc. */
  98
  99#define MAX_MEDIA_DATA_SIZE 1522
 100
 101#define MIN_PACKET_LEN      46
 102#define ETHER_HEAD_LEN      14
 103
 104/*
 105** MDIO constants.
 106*/
 107#define MDIO_START                          0x1
 108#define MDIO_READ                           0x2
 109#define MDIO_WRITE                          0x1
 110#define MDIO_PREAMBLE              0xfffffffful
 111
 112/* Broadcom specific */
 113#define MDIO_AUX_CTRL_STATUS_REG           0x18
 114#define MDIO_BC_FULL_DUPLEX_IND             0x1
 115#define MDIO_BC_SPEED                       0x2
 116
 117/* TDK specific */
 118#define MDIO_TDK_DIAGNOSTIC_REG              18
 119#define MDIO_TDK_DIAGNOSTIC_RATE          0x400
 120#define MDIO_TDK_DIAGNOSTIC_DPLX          0x800
 121
 122/*Intel LXT972A specific*/
 123#define MDIO_INT_STATUS_REG_2			0x0011
 124#define MDIO_INT_FULL_DUPLEX_IND       (1 << 9)
 125#define MDIO_INT_SPEED                (1 << 14)
 126
 127/* Network flash constants */
 128#define NET_FLASH_TIME                  (HZ/50) /* 20 ms */
 129#define NET_FLASH_PAUSE                (HZ/100) /* 10 ms */
 130#define NET_LINK_UP_CHECK_INTERVAL       (2*HZ) /* 2 s   */
 131#define NET_DUPLEX_CHECK_INTERVAL        (2*HZ) /* 2 s   */
 132
 133#define NO_NETWORK_ACTIVITY 0
 134#define NETWORK_ACTIVITY    1
 135
 136#define NBR_OF_RX_DESC     32
 137#define NBR_OF_TX_DESC     16
 138
 139/* Large packets are sent directly to upper layers while small packets are */
 140/* copied (to reduce memory waste). The following constant decides the breakpoint */
 141#define RX_COPYBREAK 256
 142
 143/* Due to a chip bug we need to flush the cache when descriptors are returned */
 144/* to the DMA. To decrease performance impact we return descriptors in chunks. */
 145/* The following constant determines the number of descriptors to return. */
 146#define RX_QUEUE_THRESHOLD  NBR_OF_RX_DESC/2
 147
 148#define GET_BIT(bit,val)   (((val) >> (bit)) & 0x01)
 149
 150/* Define some macros to access ETRAX 100 registers */
 151#define SETF(var, reg, field, val) var = (var & ~IO_MASK_(reg##_, field##_)) | \
 152					  IO_FIELD_(reg##_, field##_, val)
 153#define SETS(var, reg, field, val) var = (var & ~IO_MASK_(reg##_, field##_)) | \
 154					  IO_STATE_(reg##_, field##_, _##val)
 155
 156static etrax_eth_descr *myNextRxDesc;  /* Points to the next descriptor to
 157                                          to be processed */
 158static etrax_eth_descr *myLastRxDesc;  /* The last processed descriptor */
 159
 160static etrax_eth_descr RxDescList[NBR_OF_RX_DESC] __attribute__ ((aligned(32)));
 161
 162static etrax_eth_descr* myFirstTxDesc; /* First packet not yet sent */
 163static etrax_eth_descr* myLastTxDesc;  /* End of send queue */
 164static etrax_eth_descr* myNextTxDesc;  /* Next descriptor to use */
 165static etrax_eth_descr TxDescList[NBR_OF_TX_DESC] __attribute__ ((aligned(32)));
 166
 167static unsigned int network_rec_config_shadow = 0;
 168
 169static unsigned int network_tr_ctrl_shadow = 0;
 170
 171/* Network speed indication. */
 172static DEFINE_TIMER(speed_timer, NULL, 0, 0);
 173static DEFINE_TIMER(clear_led_timer, NULL, 0, 0);
 174static int current_speed; /* Speed read from transceiver */
 175static int current_speed_selection; /* Speed selected by user */
 176static unsigned long led_next_time;
 177static int led_active;
 178static int rx_queue_len;
 179
 180/* Duplex */
 181static DEFINE_TIMER(duplex_timer, NULL, 0, 0);
 182static int full_duplex;
 183static enum duplex current_duplex;
 184
 185/* Index to functions, as function prototypes. */
 186
 187static int etrax_ethernet_init(void);
 188
 189static int e100_open(struct net_device *dev);
 190static int e100_set_mac_address(struct net_device *dev, void *addr);
 191static int e100_send_packet(struct sk_buff *skb, struct net_device *dev);
 192static irqreturn_t e100rxtx_interrupt(int irq, void *dev_id);
 193static irqreturn_t e100nw_interrupt(int irq, void *dev_id);
 194static void e100_rx(struct net_device *dev);
 195static int e100_close(struct net_device *dev);
 196static int e100_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd);
 197static int e100_set_config(struct net_device* dev, struct ifmap* map);
 198static void e100_tx_timeout(struct net_device *dev);
 199static struct net_device_stats *e100_get_stats(struct net_device *dev);
 200static void set_multicast_list(struct net_device *dev);
 201static void e100_hardware_send_packet(struct net_local* np, char *buf, int length);
 202static void update_rx_stats(struct net_device_stats *);
 203static void update_tx_stats(struct net_device_stats *);
 204static int e100_probe_transceiver(struct net_device* dev);
 205
 206static void e100_check_speed(unsigned long priv);
 207static void e100_set_speed(struct net_device* dev, unsigned long speed);
 208static void e100_check_duplex(unsigned long priv);
 209static void e100_set_duplex(struct net_device* dev, enum duplex);
 210static void e100_negotiate(struct net_device* dev);
 211
 212static int e100_get_mdio_reg(struct net_device *dev, int phy_id, int location);
 213static void e100_set_mdio_reg(struct net_device *dev, int phy_id, int location, int value);
 214
 215static void e100_send_mdio_cmd(unsigned short cmd, int write_cmd);
 216static void e100_send_mdio_bit(unsigned char bit);
 217static unsigned char e100_receive_mdio_bit(void);
 218static void e100_reset_transceiver(struct net_device* net);
 219
 220static void e100_clear_network_leds(unsigned long dummy);
 221static void e100_set_network_leds(int active);
 222
 223static const struct ethtool_ops e100_ethtool_ops;
 224#if defined(CONFIG_ETRAX_NO_PHY)
 225static void dummy_check_speed(struct net_device* dev);
 226static void dummy_check_duplex(struct net_device* dev);
 227#else
 228static void broadcom_check_speed(struct net_device* dev);
 229static void broadcom_check_duplex(struct net_device* dev);
 230static void tdk_check_speed(struct net_device* dev);
 231static void tdk_check_duplex(struct net_device* dev);
 232static void intel_check_speed(struct net_device* dev);
 233static void intel_check_duplex(struct net_device* dev);
 234static void generic_check_speed(struct net_device* dev);
 235static void generic_check_duplex(struct net_device* dev);
 236#endif
 237#ifdef CONFIG_NET_POLL_CONTROLLER
 238static void e100_netpoll(struct net_device* dev);
 239#endif
 240
 241static int autoneg_normal = 1;
 242
 243struct transceiver_ops transceivers[] =
 244{
 245#if defined(CONFIG_ETRAX_NO_PHY)
 246	{0x0000, dummy_check_speed, dummy_check_duplex}        /* Dummy */
 247#else
 248	{0x1018, broadcom_check_speed, broadcom_check_duplex},  /* Broadcom */
 249	{0xC039, tdk_check_speed, tdk_check_duplex},            /* TDK 2120 */
 250	{0x039C, tdk_check_speed, tdk_check_duplex},            /* TDK 2120C */
 251        {0x04de, intel_check_speed, intel_check_duplex},     	/* Intel LXT972A*/
 252	{0x0000, generic_check_speed, generic_check_duplex}     /* Generic, must be last */
 253#endif
 254};
 255
 256struct transceiver_ops* transceiver = &transceivers[0];
 257
 258static const struct net_device_ops e100_netdev_ops = {
 259	.ndo_open		= e100_open,
 260	.ndo_stop		= e100_close,
 261	.ndo_start_xmit		= e100_send_packet,
 262	.ndo_tx_timeout		= e100_tx_timeout,
 263	.ndo_get_stats		= e100_get_stats,
 264	.ndo_set_multicast_list	= set_multicast_list,
 265	.ndo_do_ioctl		= e100_ioctl,
 266	.ndo_set_mac_address	= e100_set_mac_address,
 267	.ndo_validate_addr	= eth_validate_addr,
 268	.ndo_change_mtu		= eth_change_mtu,
 269	.ndo_set_config		= e100_set_config,
 270#ifdef CONFIG_NET_POLL_CONTROLLER
 271	.ndo_poll_controller	= e100_netpoll,
 272#endif
 273};
 274
 275#define tx_done(dev) (*R_DMA_CH0_CMD == 0)
 276
 277/*
 278 * Check for a network adaptor of this type, and return '0' if one exists.
 279 * If dev->base_addr == 0, probe all likely locations.
 280 * If dev->base_addr == 1, always return failure.
 281 * If dev->base_addr == 2, allocate space for the device and return success
 282 * (detachable devices only).
 283 */
 284
 285static int __init
 286etrax_ethernet_init(void)
 287{
 288	struct net_device *dev;
 289        struct net_local* np;
 290	int i, err;
 291
 292	printk(KERN_INFO
 293	       "ETRAX 100LX 10/100MBit ethernet v2.0 (c) 1998-2007 Axis Communications AB\n");
 294
 295	if (cris_request_io_interface(if_eth, cardname)) {
 296		printk(KERN_CRIT "etrax_ethernet_init failed to get IO interface\n");
 297		return -EBUSY;
 298	}
 299
 300	dev = alloc_etherdev(sizeof(struct net_local));
 301	if (!dev)
 302		return -ENOMEM;
 303
 304	np = netdev_priv(dev);
 305
 306	/* we do our own locking */
 307	dev->features |= NETIF_F_LLTX;
 308
 309	dev->base_addr = (unsigned int)R_NETWORK_SA_0; /* just to have something to show */
 310
 311	/* now setup our etrax specific stuff */
 312
 313	dev->irq = NETWORK_DMA_RX_IRQ_NBR; /* we really use DMATX as well... */
 314	dev->dma = NETWORK_RX_DMA_NBR;
 315
 316	/* fill in our handlers so the network layer can talk to us in the future */
 317
 318	dev->ethtool_ops	= &e100_ethtool_ops;
 319	dev->netdev_ops		= &e100_netdev_ops;
 320
 321	spin_lock_init(&np->lock);
 322	spin_lock_init(&np->led_lock);
 323	spin_lock_init(&np->transceiver_lock);
 324
 325	/* Initialise the list of Etrax DMA-descriptors */
 326
 327	/* Initialise receive descriptors */
 328
 329	for (i = 0; i < NBR_OF_RX_DESC; i++) {
 330		/* Allocate two extra cachelines to make sure that buffer used
 331		 * by DMA does not share cacheline with any other data (to
 332		 * avoid cache bug)
 333		 */
 334		RxDescList[i].skb = dev_alloc_skb(MAX_MEDIA_DATA_SIZE + 2 * L1_CACHE_BYTES);
 335		if (!RxDescList[i].skb)
 336			return -ENOMEM;
 337		RxDescList[i].descr.ctrl   = 0;
 338		RxDescList[i].descr.sw_len = MAX_MEDIA_DATA_SIZE;
 339		RxDescList[i].descr.next   = virt_to_phys(&RxDescList[i + 1]);
 340		RxDescList[i].descr.buf    = L1_CACHE_ALIGN(virt_to_phys(RxDescList[i].skb->data));
 341		RxDescList[i].descr.status = 0;
 342		RxDescList[i].descr.hw_len = 0;
 343		prepare_rx_descriptor(&RxDescList[i].descr);
 344	}
 345
 346	RxDescList[NBR_OF_RX_DESC - 1].descr.ctrl   = d_eol;
 347	RxDescList[NBR_OF_RX_DESC - 1].descr.next   = virt_to_phys(&RxDescList[0]);
 348	rx_queue_len = 0;
 349
 350	/* Initialize transmit descriptors */
 351	for (i = 0; i < NBR_OF_TX_DESC; i++) {
 352		TxDescList[i].descr.ctrl   = 0;
 353		TxDescList[i].descr.sw_len = 0;
 354		TxDescList[i].descr.next   = virt_to_phys(&TxDescList[i + 1].descr);
 355		TxDescList[i].descr.buf    = 0;
 356		TxDescList[i].descr.status = 0;
 357		TxDescList[i].descr.hw_len = 0;
 358		TxDescList[i].skb = 0;
 359	}
 360
 361	TxDescList[NBR_OF_TX_DESC - 1].descr.ctrl   = d_eol;
 362	TxDescList[NBR_OF_TX_DESC - 1].descr.next   = virt_to_phys(&TxDescList[0].descr);
 363
 364	/* Initialise initial pointers */
 365
 366	myNextRxDesc  = &RxDescList[0];
 367	myLastRxDesc  = &RxDescList[NBR_OF_RX_DESC - 1];
 368	myFirstTxDesc = &TxDescList[0];
 369	myNextTxDesc  = &TxDescList[0];
 370	myLastTxDesc  = &TxDescList[NBR_OF_TX_DESC - 1];
 371
 372	/* Register device */
 373	err = register_netdev(dev);
 374	if (err) {
 375		free_netdev(dev);
 376		return err;
 377	}
 378
 379	/* set the default MAC address */
 380
 381	e100_set_mac_address(dev, &default_mac);
 382
 383	/* Initialize speed indicator stuff. */
 384
 385	current_speed = 10;
 386	current_speed_selection = 0; /* Auto */
 387	speed_timer.expires = jiffies + NET_LINK_UP_CHECK_INTERVAL;
 388	speed_timer.data = (unsigned long)dev;
 389	speed_timer.function = e100_check_speed;
 390
 391	clear_led_timer.function = e100_clear_network_leds;
 392	clear_led_timer.data = (unsigned long)dev;
 393
 394	full_duplex = 0;
 395	current_duplex = autoneg;
 396	duplex_timer.expires = jiffies + NET_DUPLEX_CHECK_INTERVAL;
 397        duplex_timer.data = (unsigned long)dev;
 398	duplex_timer.function = e100_check_duplex;
 399
 400        /* Initialize mii interface */
 401	np->mii_if.phy_id_mask = 0x1f;
 402	np->mii_if.reg_num_mask = 0x1f;
 403	np->mii_if.dev = dev;
 404	np->mii_if.mdio_read = e100_get_mdio_reg;
 405	np->mii_if.mdio_write = e100_set_mdio_reg;
 406
 407	/* Initialize group address registers to make sure that no */
 408	/* unwanted addresses are matched */
 409	*R_NETWORK_GA_0 = 0x00000000;
 410	*R_NETWORK_GA_1 = 0x00000000;
 411
 412	/* Initialize next time the led can flash */
 413	led_next_time = jiffies;
 414	return 0;
 415}
 416
 417/* set MAC address of the interface. called from the core after a
 418 * SIOCSIFADDR ioctl, and from the bootup above.
 419 */
 420
 421static int
 422e100_set_mac_address(struct net_device *dev, void *p)
 423{
 424	struct net_local *np = netdev_priv(dev);
 425	struct sockaddr *addr = p;
 426
 427	spin_lock(&np->lock); /* preemption protection */
 428
 429	/* remember it */
 430
 431	memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
 432
 433	/* Write it to the hardware.
 434	 * Note the way the address is wrapped:
 435	 * *R_NETWORK_SA_0 = a0_0 | (a0_1 << 8) | (a0_2 << 16) | (a0_3 << 24);
 436	 * *R_NETWORK_SA_1 = a0_4 | (a0_5 << 8);
 437	 */
 438
 439	*R_NETWORK_SA_0 = dev->dev_addr[0] | (dev->dev_addr[1] << 8) |
 440		(dev->dev_addr[2] << 16) | (dev->dev_addr[3] << 24);
 441	*R_NETWORK_SA_1 = dev->dev_addr[4] | (dev->dev_addr[5] << 8);
 442	*R_NETWORK_SA_2 = 0;
 443
 444	/* show it in the log as well */
 445
 446	printk(KERN_INFO "%s: changed MAC to %pM\n", dev->name, dev->dev_addr);
 447
 448	spin_unlock(&np->lock);
 449
 450	return 0;
 451}
 452
 453/*
 454 * Open/initialize the board. This is called (in the current kernel)
 455 * sometime after booting when the 'ifconfig' program is run.
 456 *
 457 * This routine should set everything up anew at each open, even
 458 * registers that "should" only need to be set once at boot, so that
 459 * there is non-reboot way to recover if something goes wrong.
 460 */
 461
 462static int
 463e100_open(struct net_device *dev)
 464{
 465	unsigned long flags;
 466
 467	/* enable the MDIO output pin */
 468
 469	*R_NETWORK_MGM_CTRL = IO_STATE(R_NETWORK_MGM_CTRL, mdoe, enable);
 470
 471	*R_IRQ_MASK0_CLR =
 472		IO_STATE(R_IRQ_MASK0_CLR, overrun, clr) |
 473		IO_STATE(R_IRQ_MASK0_CLR, underrun, clr) |
 474		IO_STATE(R_IRQ_MASK0_CLR, excessive_col, clr);
 475
 476	/* clear dma0 and 1 eop and descr irq masks */
 477	*R_IRQ_MASK2_CLR =
 478		IO_STATE(R_IRQ_MASK2_CLR, dma0_descr, clr) |
 479		IO_STATE(R_IRQ_MASK2_CLR, dma0_eop, clr) |
 480		IO_STATE(R_IRQ_MASK2_CLR, dma1_descr, clr) |
 481		IO_STATE(R_IRQ_MASK2_CLR, dma1_eop, clr);
 482
 483	/* Reset and wait for the DMA channels */
 484
 485	RESET_DMA(NETWORK_TX_DMA_NBR);
 486	RESET_DMA(NETWORK_RX_DMA_NBR);
 487	WAIT_DMA(NETWORK_TX_DMA_NBR);
 488	WAIT_DMA(NETWORK_RX_DMA_NBR);
 489
 490	/* Initialise the etrax network controller */
 491
 492	/* allocate the irq corresponding to the receiving DMA */
 493
 494	if (request_irq(NETWORK_DMA_RX_IRQ_NBR, e100rxtx_interrupt, 0, cardname,
 495			(void *)dev)) {
 496		goto grace_exit0;
 497	}
 498
 499	/* allocate the irq corresponding to the transmitting DMA */
 500
 501	if (request_irq(NETWORK_DMA_TX_IRQ_NBR, e100rxtx_interrupt, 0,
 502			cardname, (void *)dev)) {
 503		goto grace_exit1;
 504	}
 505
 506	/* allocate the irq corresponding to the network errors etc */
 507
 508	if (request_irq(NETWORK_STATUS_IRQ_NBR, e100nw_interrupt, 0,
 509			cardname, (void *)dev)) {
 510		goto grace_exit2;
 511	}
 512
 513	/*
 514	 * Always allocate the DMA channels after the IRQ,
 515	 * and clean up on failure.
 516	 */
 517
 518	if (cris_request_dma(NETWORK_TX_DMA_NBR,
 519	                     cardname,
 520	                     DMA_VERBOSE_ON_ERROR,
 521	                     dma_eth)) {
 522		goto grace_exit3;
 523        }
 524
 525	if (cris_request_dma(NETWORK_RX_DMA_NBR,
 526	                     cardname,
 527	                     DMA_VERBOSE_ON_ERROR,
 528	                     dma_eth)) {
 529		goto grace_exit4;
 530        }
 531
 532	/* give the HW an idea of what MAC address we want */
 533
 534	*R_NETWORK_SA_0 = dev->dev_addr[0] | (dev->dev_addr[1] << 8) |
 535		(dev->dev_addr[2] << 16) | (dev->dev_addr[3] << 24);
 536	*R_NETWORK_SA_1 = dev->dev_addr[4] | (dev->dev_addr[5] << 8);
 537	*R_NETWORK_SA_2 = 0;
 538
 539#if 0
 540	/* use promiscuous mode for testing */
 541	*R_NETWORK_GA_0 = 0xffffffff;
 542	*R_NETWORK_GA_1 = 0xffffffff;
 543
 544	*R_NETWORK_REC_CONFIG = 0xd; /* broadcast rec, individ. rec, ma0 enabled */
 545#else
 546	SETS(network_rec_config_shadow, R_NETWORK_REC_CONFIG, max_size, size1522);
 547	SETS(network_rec_config_shadow, R_NETWORK_REC_CONFIG, broadcast, receive);
 548	SETS(network_rec_config_shadow, R_NETWORK_REC_CONFIG, ma0, enable);
 549	SETF(network_rec_config_shadow, R_NETWORK_REC_CONFIG, duplex, full_duplex);
 550	*R_NETWORK_REC_CONFIG = network_rec_config_shadow;
 551#endif
 552
 553	*R_NETWORK_GEN_CONFIG =
 554		IO_STATE(R_NETWORK_GEN_CONFIG, phy,    mii_clk) |
 555		IO_STATE(R_NETWORK_GEN_CONFIG, enable, on);
 556
 557	SETS(network_tr_ctrl_shadow, R_NETWORK_TR_CTRL, clr_error, clr);
 558	SETS(network_tr_ctrl_shadow, R_NETWORK_TR_CTRL, delay, none);
 559	SETS(network_tr_ctrl_shadow, R_NETWORK_TR_CTRL, cancel, dont);
 560	SETS(network_tr_ctrl_shadow, R_NETWORK_TR_CTRL, cd, enable);
 561	SETS(network_tr_ctrl_shadow, R_NETWORK_TR_CTRL, retry, enable);
 562	SETS(network_tr_ctrl_shadow, R_NETWORK_TR_CTRL, pad, enable);
 563	SETS(network_tr_ctrl_shadow, R_NETWORK_TR_CTRL, crc, enable);
 564	*R_NETWORK_TR_CTRL = network_tr_ctrl_shadow;
 565
 566	local_irq_save(flags);
 567
 568	/* enable the irq's for ethernet DMA */
 569
 570	*R_IRQ_MASK2_SET =
 571		IO_STATE(R_IRQ_MASK2_SET, dma0_eop, set) |
 572		IO_STATE(R_IRQ_MASK2_SET, dma1_eop, set);
 573
 574	*R_IRQ_MASK0_SET =
 575		IO_STATE(R_IRQ_MASK0_SET, overrun,       set) |
 576		IO_STATE(R_IRQ_MASK0_SET, underrun,      set) |
 577		IO_STATE(R_IRQ_MASK0_SET, excessive_col, set);
 578
 579	/* make sure the irqs are cleared */
 580
 581	*R_DMA_CH0_CLR_INTR = IO_STATE(R_DMA_CH0_CLR_INTR, clr_eop, do);
 582	*R_DMA_CH1_CLR_INTR = IO_STATE(R_DMA_CH1_CLR_INTR, clr_eop, do);
 583
 584	/* make sure the rec and transmit error counters are cleared */
 585
 586	(void)*R_REC_COUNTERS;  /* dummy read */
 587	(void)*R_TR_COUNTERS;   /* dummy read */
 588
 589	/* start the receiving DMA channel so we can receive packets from now on */
 590
 591	*R_DMA_CH1_FIRST = virt_to_phys(myNextRxDesc);
 592	*R_DMA_CH1_CMD = IO_STATE(R_DMA_CH1_CMD, cmd, start);
 593
 594	/* Set up transmit DMA channel so it can be restarted later */
 595
 596	*R_DMA_CH0_FIRST = 0;
 597	*R_DMA_CH0_DESCR = virt_to_phys(myLastTxDesc);
 598	netif_start_queue(dev);
 599
 600	local_irq_restore(flags);
 601
 602	/* Probe for transceiver */
 603	if (e100_probe_transceiver(dev))
 604		goto grace_exit5;
 605
 606	/* Start duplex/speed timers */
 607	add_timer(&speed_timer);
 608	add_timer(&duplex_timer);
 609
 610	/* We are now ready to accept transmit requeusts from
 611	 * the queueing layer of the networking.
 612	 */
 613	netif_carrier_on(dev);
 614
 615	return 0;
 616
 617grace_exit5:
 618	cris_free_dma(NETWORK_RX_DMA_NBR, cardname);
 619grace_exit4:
 620	cris_free_dma(NETWORK_TX_DMA_NBR, cardname);
 621grace_exit3:
 622	free_irq(NETWORK_STATUS_IRQ_NBR, (void *)dev);
 623grace_exit2:
 624	free_irq(NETWORK_DMA_TX_IRQ_NBR, (void *)dev);
 625grace_exit1:
 626	free_irq(NETWORK_DMA_RX_IRQ_NBR, (void *)dev);
 627grace_exit0:
 628	return -EAGAIN;
 629}
 630
 631#if defined(CONFIG_ETRAX_NO_PHY)
 632static void
 633dummy_check_speed(struct net_device* dev)
 634{
 635	current_speed = 100;
 636}
 637#else
 638static void
 639generic_check_speed(struct net_device* dev)
 640{
 641	unsigned long data;
 642	struct net_local *np = netdev_priv(dev);
 643
 644	data = e100_get_mdio_reg(dev, np->mii_if.phy_id, MII_ADVERTISE);
 645	if ((data & ADVERTISE_100FULL) ||
 646	    (data & ADVERTISE_100HALF))
 647		current_speed = 100;
 648	else
 649		current_speed = 10;
 650}
 651
 652static void
 653tdk_check_speed(struct net_device* dev)
 654{
 655	unsigned long data;
 656	struct net_local *np = netdev_priv(dev);
 657
 658	data = e100_get_mdio_reg(dev, np->mii_if.phy_id,
 659				 MDIO_TDK_DIAGNOSTIC_REG);
 660	current_speed = (data & MDIO_TDK_DIAGNOSTIC_RATE ? 100 : 10);
 661}
 662
 663static void
 664broadcom_check_speed(struct net_device* dev)
 665{
 666	unsigned long data;
 667	struct net_local *np = netdev_priv(dev);
 668
 669	data = e100_get_mdio_reg(dev, np->mii_if.phy_id,
 670				 MDIO_AUX_CTRL_STATUS_REG);
 671	current_speed = (data & MDIO_BC_SPEED ? 100 : 10);
 672}
 673
 674static void
 675intel_check_speed(struct net_device* dev)
 676{
 677	unsigned long data;
 678	struct net_local *np = netdev_priv(dev);
 679
 680	data = e100_get_mdio_reg(dev, np->mii_if.phy_id,
 681				 MDIO_INT_STATUS_REG_2);
 682	current_speed = (data & MDIO_INT_SPEED ? 100 : 10);
 683}
 684#endif
 685static void
 686e100_check_speed(unsigned long priv)
 687{
 688	struct net_device* dev = (struct net_device*)priv;
 689	struct net_local *np = netdev_priv(dev);
 690	static int led_initiated = 0;
 691	unsigned long data;
 692	int old_speed = current_speed;
 693
 694	spin_lock(&np->transceiver_lock);
 695
 696	data = e100_get_mdio_reg(dev, np->mii_if.phy_id, MII_BMSR);
 697	if (!(data & BMSR_LSTATUS)) {
 698		current_speed = 0;
 699	} else {
 700		transceiver->check_speed(dev);
 701	}
 702
 703	spin_lock(&np->led_lock);
 704	if ((old_speed != current_speed) || !led_initiated) {
 705		led_initiated = 1;
 706		e100_set_network_leds(NO_NETWORK_ACTIVITY);
 707		if (current_speed)
 708			netif_carrier_on(dev);
 709		else
 710			netif_carrier_off(dev);
 711	}
 712	spin_unlock(&np->led_lock);
 713
 714	/* Reinitialize the timer. */
 715	speed_timer.expires = jiffies + NET_LINK_UP_CHECK_INTERVAL;
 716	add_timer(&speed_timer);
 717
 718	spin_unlock(&np->transceiver_lock);
 719}
 720
 721static void
 722e100_negotiate(struct net_device* dev)
 723{
 724	struct net_local *np = netdev_priv(dev);
 725	unsigned short data = e100_get_mdio_reg(dev, np->mii_if.phy_id,
 726						MII_ADVERTISE);
 727
 728	/* Discard old speed and duplex settings */
 729	data &= ~(ADVERTISE_100HALF | ADVERTISE_100FULL |
 730	          ADVERTISE_10HALF | ADVERTISE_10FULL);
 731
 732	switch (current_speed_selection) {
 733		case 10:
 734			if (current_duplex == full)
 735				data |= ADVERTISE_10FULL;
 736			else if (current_duplex == half)
 737				data |= ADVERTISE_10HALF;
 738			else
 739				data |= ADVERTISE_10HALF | ADVERTISE_10FULL;
 740			break;
 741
 742		case 100:
 743			 if (current_duplex == full)
 744				data |= ADVERTISE_100FULL;
 745			else if (current_duplex == half)
 746				data |= ADVERTISE_100HALF;
 747			else
 748				data |= ADVERTISE_100HALF | ADVERTISE_100FULL;
 749			break;
 750
 751		case 0: /* Auto */
 752			 if (current_duplex == full)
 753				data |= ADVERTISE_100FULL | ADVERTISE_10FULL;
 754			else if (current_duplex == half)
 755				data |= ADVERTISE_100HALF | ADVERTISE_10HALF;
 756			else
 757				data |= ADVERTISE_10HALF | ADVERTISE_10FULL |
 758				  ADVERTISE_100HALF | ADVERTISE_100FULL;
 759			break;
 760
 761		default: /* assume autoneg speed and duplex */
 762			data |= ADVERTISE_10HALF | ADVERTISE_10FULL |
 763				  ADVERTISE_100HALF | ADVERTISE_100FULL;
 764			break;
 765	}
 766
 767	e100_set_mdio_reg(dev, np->mii_if.phy_id, MII_ADVERTISE, data);
 768
 769	data = e100_get_mdio_reg(dev, np->mii_if.phy_id, MII_BMCR);
 770	if (autoneg_normal) {
 771		/* Renegotiate with link partner */
 772		data |= BMCR_ANENABLE | BMCR_ANRESTART;
 773	} else {
 774		/* Don't negotiate speed or duplex */
 775		data &= ~(BMCR_ANENABLE | BMCR_ANRESTART);
 776
 777		/* Set speed and duplex static */
 778		if (current_speed_selection == 10)
 779			data &= ~BMCR_SPEED100;
 780		else
 781			data |= BMCR_SPEED100;
 782
 783		if (current_duplex != full)
 784			data &= ~BMCR_FULLDPLX;
 785		else
 786			data |= BMCR_FULLDPLX;
 787	}
 788	e100_set_mdio_reg(dev, np->mii_if.phy_id, MII_BMCR, data);
 789}
 790
 791static void
 792e100_set_speed(struct net_device* dev, unsigned long speed)
 793{
 794	struct net_local *np = netdev_priv(dev);
 795
 796	spin_lock(&np->transceiver_lock);
 797	if (speed != current_speed_selection) {
 798		current_speed_selection = speed;
 799		e100_negotiate(dev);
 800	}
 801	spin_unlock(&np->transceiver_lock);
 802}
 803
 804static void
 805e100_check_duplex(unsigned long priv)
 806{
 807	struct net_device *dev = (struct net_device *)priv;
 808	struct net_local *np = netdev_priv(dev);
 809	int old_duplex;
 810
 811	spin_lock(&np->transceiver_lock);
 812	old_duplex = full_duplex;
 813	transceiver->check_duplex(dev);
 814	if (old_duplex != full_duplex) {
 815		/* Duplex changed */
 816		SETF(network_rec_config_shadow, R_NETWORK_REC_CONFIG, duplex, full_duplex);
 817		*R_NETWORK_REC_CONFIG = network_rec_config_shadow;
 818	}
 819
 820	/* Reinitialize the timer. */
 821	duplex_timer.expires = jiffies + NET_DUPLEX_CHECK_INTERVAL;
 822	add_timer(&duplex_timer);
 823	np->mii_if.full_duplex = full_duplex;
 824	spin_unlock(&np->transceiver_lock);
 825}
 826#if defined(CONFIG_ETRAX_NO_PHY)
 827static void
 828dummy_check_duplex(struct net_device* dev)
 829{
 830	full_duplex = 1;
 831}
 832#else
 833static void
 834generic_check_duplex(struct net_device* dev)
 835{
 836	unsigned long data;
 837	struct net_local *np = netdev_priv(dev);
 838
 839	data = e100_get_mdio_reg(dev, np->mii_if.phy_id, MII_ADVERTISE);
 840	if ((data & ADVERTISE_10FULL) ||
 841	    (data & ADVERTISE_100FULL))
 842		full_duplex = 1;
 843	else
 844		full_duplex = 0;
 845}
 846
 847static void
 848tdk_check_duplex(struct net_device* dev)
 849{
 850	unsigned long data;
 851	struct net_local *np = netdev_priv(dev);
 852
 853	data = e100_get_mdio_reg(dev, np->mii_if.phy_id,
 854				 MDIO_TDK_DIAGNOSTIC_REG);
 855	full_duplex = (data & MDIO_TDK_DIAGNOSTIC_DPLX) ? 1 : 0;
 856}
 857
 858static void
 859broadcom_check_duplex(struct net_device* dev)
 860{
 861	unsigned long data;
 862	struct net_local *np = netdev_priv(dev);
 863
 864	data = e100_get_mdio_reg(dev, np->mii_if.phy_id,
 865				 MDIO_AUX_CTRL_STATUS_REG);
 866	full_duplex = (data & MDIO_BC_FULL_DUPLEX_IND) ? 1 : 0;
 867}
 868
 869static void
 870intel_check_duplex(struct net_device* dev)
 871{
 872	unsigned long data;
 873	struct net_local *np = netdev_priv(dev);
 874
 875	data = e100_get_mdio_reg(dev, np->mii_if.phy_id,
 876				 MDIO_INT_STATUS_REG_2);
 877	full_duplex = (data & MDIO_INT_FULL_DUPLEX_IND) ? 1 : 0;
 878}
 879#endif
 880static void
 881e100_set_duplex(struct net_device* dev, enum duplex new_duplex)
 882{
 883	struct net_local *np = netdev_priv(dev);
 884
 885	spin_lock(&np->transceiver_lock);
 886	if (new_duplex != current_duplex) {
 887		current_duplex = new_duplex;
 888		e100_negotiate(dev);
 889	}
 890	spin_unlock(&np->transceiver_lock);
 891}
 892
 893static int
 894e100_probe_transceiver(struct net_device* dev)
 895{
 896	int ret = 0;
 897
 898#if !defined(CONFIG_ETRAX_NO_PHY)
 899	unsigned int phyid_high;
 900	unsigned int phyid_low;
 901	unsigned int oui;
 902	struct transceiver_ops* ops = NULL;
 903	struct net_local *np = netdev_priv(dev);
 904
 905	spin_lock(&np->transceiver_lock);
 906
 907	/* Probe MDIO physical address */
 908	for (np->mii_if.phy_id = 0; np->mii_if.phy_id <= 31;
 909	     np->mii_if.phy_id++) {
 910		if (e100_get_mdio_reg(dev,
 911				      np->mii_if.phy_id, MII_BMSR) != 0xffff)
 912			break;
 913	}
 914	if (np->mii_if.phy_id == 32) {
 915		ret = -ENODEV;
 916		goto out;
 917	}
 918
 919	/* Get manufacturer */
 920	phyid_high = e100_get_mdio_reg(dev, np->mii_if.phy_id, MII_PHYSID1);
 921	phyid_low = e100_get_mdio_reg(dev, np->mii_if.phy_id, MII_PHYSID2);
 922	oui = (phyid_high << 6) | (phyid_low >> 10);
 923
 924	for (ops = &transceivers[0]; ops->oui; ops++) {
 925		if (ops->oui == oui)
 926			break;
 927	}
 928	transceiver = ops;
 929out:
 930	spin_unlock(&np->transceiver_lock);
 931#endif
 932	return ret;
 933}
 934
 935static int
 936e100_get_mdio_reg(struct net_device *dev, int phy_id, int location)
 937{
 938	unsigned short cmd;    /* Data to be sent on MDIO port */
 939	int data;   /* Data read from MDIO */
 940	int bitCounter;
 941
 942	/* Start of frame, OP Code, Physical Address, Register Address */
 943	cmd = (MDIO_START << 14) | (MDIO_READ << 12) | (phy_id << 7) |
 944		(location << 2);
 945
 946	e100_send_mdio_cmd(cmd, 0);
 947
 948	data = 0;
 949
 950	/* Data... */
 951	for (bitCounter=15; bitCounter>=0 ; bitCounter--) {
 952		data |= (e100_receive_mdio_bit() << bitCounter);
 953	}
 954
 955	return data;
 956}
 957
 958static void
 959e100_set_mdio_reg(struct net_device *dev, int phy_id, int location, int value)
 960{
 961	int bitCounter;
 962	unsigned short cmd;
 963
 964	cmd = (MDIO_START << 14) | (MDIO_WRITE << 12) | (phy_id << 7) |
 965	      (location << 2);
 966
 967	e100_send_mdio_cmd(cmd, 1);
 968
 969	/* Data... */
 970	for (bitCounter=15; bitCounter>=0 ; bitCounter--) {
 971		e100_send_mdio_bit(GET_BIT(bitCounter, value));
 972	}
 973
 974}
 975
 976static void
 977e100_send_mdio_cmd(unsigned short cmd, int write_cmd)
 978{
 979	int bitCounter;
 980	unsigned char data = 0x2;
 981
 982	/* Preamble */
 983	for (bitCounter = 31; bitCounter>= 0; bitCounter--)
 984		e100_send_mdio_bit(GET_BIT(bitCounter, MDIO_PREAMBLE));
 985
 986	for (bitCounter = 15; bitCounter >= 2; bitCounter--)
 987		e100_send_mdio_bit(GET_BIT(bitCounter, cmd));
 988
 989	/* Turnaround */
 990	for (bitCounter = 1; bitCounter >= 0 ; bitCounter--)
 991		if (write_cmd)
 992			e100_send_mdio_bit(GET_BIT(bitCounter, data));
 993		else
 994			e100_receive_mdio_bit();
 995}
 996
 997static void
 998e100_send_mdio_bit(unsigned char bit)
 999{
1000	*R_NETWORK_MGM_CTRL =
1001		IO_STATE(R_NETWORK_MGM_CTRL, mdoe, enable) |
1002		IO_FIELD(R_NETWORK_MGM_CTRL, mdio, bit);
1003	udelay(1);
1004	*R_NETWORK_MGM_CTRL =
1005		IO_STATE(R_NETWORK_MGM_CTRL, mdoe, enable) |
1006		IO_MASK(R_NETWORK_MGM_CTRL, mdck) |
1007		IO_FIELD(R_NETWORK_MGM_CTRL, mdio, bit);
1008	udelay(1);
1009}
1010
1011static unsigned char
1012e100_receive_mdio_bit()
1013{
1014	unsigned char bit;
1015	*R_NETWORK_MGM_CTRL = 0;
1016	bit = IO_EXTRACT(R_NETWORK_STAT, mdio, *R_NETWORK_STAT);
1017	udelay(1);
1018	*R_NETWORK_MGM_CTRL = IO_MASK(R_NETWORK_MGM_CTRL, mdck);
1019	udelay(1);
1020	return bit;
1021}
1022
1023static void
1024e100_reset_transceiver(struct net_device* dev)
1025{
1026	struct net_local *np = netdev_priv(dev);
1027	unsigned short cmd;
1028	unsigned short data;
1029	int bitCounter;
1030
1031	data = e100_get_mdio_reg(dev, np->mii_if.phy_id, MII_BMCR);
1032
1033	cmd = (MDIO_START << 14) | (MDIO_WRITE << 12) | (np->mii_if.phy_id << 7) | (MII_BMCR << 2);
1034
1035	e100_send_mdio_cmd(cmd, 1);
1036
1037	data |= 0x8000;
1038
1039	for (bitCounter = 15; bitCounter >= 0 ; bitCounter--) {
1040		e100_send_mdio_bit(GET_BIT(bitCounter, data));
1041	}
1042}
1043
1044/* Called by upper layers if they decide it took too long to complete
1045 * sending a packet - we need to reset and stuff.
1046 */
1047
1048static void
1049e100_tx_timeout(struct net_device *dev)
1050{
1051	struct net_local *np = netdev_priv(dev);
1052	unsigned long flags;
1053
1054	spin_lock_irqsave(&np->lock, flags);
1055
1056	printk(KERN_WARNING "%s: transmit timed out, %s?\n", dev->name,
1057	       tx_done(dev) ? "IRQ problem" : "network cable problem");
1058
1059	/* remember we got an error */
1060
1061	dev->stats.tx_errors++;
1062
1063	/* reset the TX DMA in case it has hung on something */
1064
1065	RESET_DMA(NETWORK_TX_DMA_NBR);
1066	WAIT_DMA(NETWORK_TX_DMA_NBR);
1067
1068	/* Reset the transceiver. */
1069
1070	e100_reset_transceiver(dev);
1071
1072	/* and get rid of the packets that never got an interrupt */
1073	while (myFirstTxDesc != myNextTxDesc) {
1074		dev_kfree_skb(myFirstTxDesc->skb);
1075		myFirstTxDesc->skb = 0;
1076		myFirstTxDesc = phys_to_virt(myFirstTxDesc->descr.next);
1077	}
1078
1079	/* Set up transmit DMA channel so it can be restarted later */
1080	*R_DMA_CH0_FIRST = 0;
1081	*R_DMA_CH0_DESCR = virt_to_phys(myLastTxDesc);
1082
1083	/* tell the upper layers we're ok again */
1084
1085	netif_wake_queue(dev);
1086	spin_unlock_irqrestore(&np->lock, flags);
1087}
1088
1089
1090/* This will only be invoked if the driver is _not_ in XOFF state.
1091 * What this means is that we need not check it, and that this
1092 * invariant will hold if we make sure that the netif_*_queue()
1093 * calls are done at the proper times.
1094 */
1095
1096static int
1097e100_send_packet(struct sk_buff *skb, struct net_device *dev)
1098{
1099	struct net_local *np = netdev_priv(dev);
1100	unsigned char *buf = skb->data;
1101	unsigned long flags;
1102
1103#ifdef ETHDEBUG
1104	printk("send packet len %d\n", length);
1105#endif
1106	spin_lock_irqsave(&np->lock, flags);  /* protect from tx_interrupt and ourself */
1107
1108	myNextTxDesc->skb = skb;
1109
1110	dev->trans_start = jiffies; /* NETIF_F_LLTX driver :( */
1111
1112	e100_hardware_send_packet(np, buf, skb->len);
1113
1114	myNextTxDesc = phys_to_virt(myNextTxDesc->descr.next);
1115
1116	/* Stop queue if full */
1117	if (myNextTxDesc == myFirstTxDesc) {
1118		netif_stop_queue(dev);
1119	}
1120
1121	spin_unlock_irqrestore(&np->lock, flags);
1122
1123	return NETDEV_TX_OK;
1124}
1125
1126/*
1127 * The typical workload of the driver:
1128 *   Handle the network interface interrupts.
1129 */
1130
1131static irqreturn_t
1132e100rxtx_interrupt(int irq, void *dev_id)
1133{
1134	struct net_device *dev = (struct net_device *)dev_id;
1135	struct net_local *np = netdev_priv(dev);
1136	unsigned long irqbits;
1137
1138	/*
1139	 * Note that both rx and tx interrupts are blocked at this point,
1140	 * regardless of which got us here.
1141	 */
1142
1143	irqbits = *R_IRQ_MASK2_RD;
1144
1145	/* Handle received packets */
1146	if (irqbits & IO_STATE(R_IRQ_MASK2_RD, dma1_eop, active)) {
1147		/* acknowledge the eop interrupt */
1148
1149		*R_DMA_CH1_CLR_INTR = IO_STATE(R_DMA_CH1_CLR_INTR, clr_eop, do);
1150
1151		/* check if one or more complete packets were indeed received */
1152
1153		while ((*R_DMA_CH1_FIRST != virt_to_phys(myNextRxDesc)) &&
1154		       (myNextRxDesc != myLastRxDesc)) {
1155			/* Take out the buffer and give it to the OS, then
1156			 * allocate a new buffer to put a packet in.
1157			 */
1158			e100_rx(dev);
1159			dev->stats.rx_packets++;
1160			/* restart/continue on the channel, for safety */
1161			*R_DMA_CH1_CMD = IO_STATE(R_DMA_CH1_CMD, cmd, restart);
1162			/* clear dma channel 1 eop/descr irq bits */
1163			*R_DMA_CH1_CLR_INTR =
1164				IO_STATE(R_DMA_CH1_CLR_INTR, clr_eop, do) |
1165				IO_STATE(R_DMA_CH1_CLR_INTR, clr_descr, do);
1166
1167			/* now, we might have gotten another packet
1168			   so we have to loop back and check if so */
1169		}
1170	}
1171
1172	/* Report any packets that have been sent */
1173	while (virt_to_phys(myFirstTxDesc) != *R_DMA_CH0_FIRST &&
1174	       (netif_queue_stopped(dev) || myFirstTxDesc != myNextTxDesc)) {
1175		dev->stats.tx_bytes += myFirstTxDesc->skb->len;
1176		dev->stats.tx_packets++;
1177
1178		/* dma is ready with the transmission of the data in tx_skb, so now
1179		   we can release the skb memory */
1180		dev_kfree_skb_irq(myFirstTxDesc->skb);
1181		myFirstTxDesc->skb = 0;
1182		myFirstTxDesc = phys_to_virt(myFirstTxDesc->descr.next);
1183                /* Wake up queue. */
1184		netif_wake_queue(dev);
1185	}
1186
1187	if (irqbits & IO_STATE(R_IRQ_MASK2_RD, dma0_eop, active)) {
1188		/* acknowledge the eop interrupt. */
1189		*R_DMA_CH0_CLR_INTR = IO_STATE(R_DMA_CH0_CLR_INTR, clr_eop, do);
1190	}
1191
1192	return IRQ_HANDLED;
1193}
1194
1195static irqreturn_t
1196e100nw_interrupt(int irq, void *dev_id)
1197{
1198	struct net_device *dev = (struct net_device *)dev_id;
1199	unsigned long irqbits = *R_IRQ_MASK0_RD;
1200
1201	/* check for underrun irq */
1202	if (irqbits & IO_STATE(R_IRQ_MASK0_RD, underrun, active)) {
1203		SETS(network_tr_ctrl_shadow, R_NETWORK_TR_CTRL, clr_error, clr);
1204		*R_NETWORK_TR_CTRL = network_tr_ctrl_shadow;
1205		SETS(network_tr_ctrl_shadow, R_NETWORK_TR_CTRL, clr_error, nop);
1206		dev->stats.tx_errors++;
1207		D(printk("ethernet receiver underrun!\n"));
1208	}
1209
1210	/* check for overrun irq */
1211	if (irqbits & IO_STATE(R_IRQ_MASK0_RD, overrun, active)) {
1212		update_rx_stats(&dev->stats); /* this will ack the irq */
1213		D(printk("ethernet receiver overrun!\n"));
1214	}
1215	/* check for excessive collision irq */
1216	if (irqbits & IO_STATE(R_IRQ_MASK0_RD, excessive_col, active)) {
1217		SETS(network_tr_ctrl_shadow, R_NETWORK_TR_CTRL, clr_error, clr);
1218		*R_NETWORK_TR_CTRL = network_tr_ctrl_shadow;
1219		SETS(network_tr_ctrl_shadow, R_NETWORK_TR_CTRL, clr_error, nop);
1220		dev->stats.tx_errors++;
1221		D(printk("ethernet excessive collisions!\n"));
1222	}
1223	return IRQ_HANDLED;
1224}
1225
1226/* We have a good packet(s), get it/them out of the buffers. */
1227static void
1228e100_rx(struct net_device *dev)
1229{
1230	struct sk_buff *skb;
1231	int length = 0;
1232	struct net_local *np = netdev_priv(dev);
1233	unsigned char *skb_data_ptr;
1234#ifdef ETHDEBUG
1235	int i;
1236#endif
1237	etrax_eth_descr *prevRxDesc;  /* The descriptor right before myNextRxDesc */
1238	spin_lock(&np->led_lock);
1239	if (!led_active && time_after(jiffies, led_next_time)) {
1240		/* light the network leds depending on the current speed. */
1241		e100_set_network_leds(NETWORK_ACTIVITY);
1242
1243		/* Set the earliest time we may clear the LED */
1244		led_next_time = jiffies + NET_FLASH_TIME;
1245		led_active = 1;
1246		mod_timer(&clear_led_timer, jiffies + HZ/10);
1247	}
1248	spin_unlock(&np->led_lock);
1249
1250	length = myNextRxDesc->descr.hw_len - 4;
1251	dev->stats.rx_bytes += length;
1252
1253#ifdef ETHDEBUG
1254	printk("Got a packet of length %d:\n", length);
1255	/* dump the first bytes in the packet */
1256	skb_data_ptr = (unsigned char *)phys_to_virt(myNextRxDesc->descr.buf);
1257	for (i = 0; i < 8; i++) {
1258		printk("%d: %.2x %.2x %.2x %.2x %.2x %.2x %.2x %.2x\n", i * 8,
1259		       skb_data_ptr[0],skb_data_ptr[1],skb_data_ptr[2],skb_data_ptr[3],
1260		       skb_data_ptr[4],skb_data_ptr[5],skb_data_ptr[6],skb_data_ptr[7]);
1261		skb_data_ptr += 8;
1262	}
1263#endif
1264
1265	if (length < RX_COPYBREAK) {
1266		/* Small packet, copy data */
1267		skb = dev_alloc_skb(length - ETHER_HEAD_LEN);
1268		if (!skb) {
1269			dev->stats.rx_errors++;
1270			printk(KERN_NOTICE "%s: Memory squeeze, dropping packet.\n", dev->name);
1271			goto update_nextrxdesc;
1272		}
1273
1274		skb_put(skb, length - ETHER_HEAD_LEN);        /* allocate room for the packet body */
1275		skb_data_ptr = skb_push(skb, ETHER_HEAD_LEN); /* allocate room for the header */
1276
1277#ifdef ETHDEBUG
1278		printk("head = 0x%x, data = 0x%x, tail = 0x%x, end = 0x%x\n",
1279		       skb->head, skb->data, skb_tail_pointer(skb),
1280		       skb_end_pointer(skb));
1281		printk("copying packet to 0x%x.\n", skb_data_ptr);
1282#endif
1283
1284		memcpy(skb_data_ptr, phys_to_virt(myNextRxDesc->descr.buf), length);
1285	}
1286	else {
1287		/* Large packet, send directly to upper layers and allocate new
1288		 * memory (aligned to cache line boundary to avoid bug).
1289		 * Before sending the skb to upper layers we must make sure
1290		 * that skb->data points to the aligned start of the packet.
1291		 */
1292		int align;
1293		struct sk_buff *new_skb = dev_alloc_skb(MAX_MEDIA_DATA_SIZE + 2 * L1_CACHE_BYTES);
1294		if (!new_skb) {
1295			dev->stats.rx_errors++;
1296			printk(KERN_NOTICE "%s: Memory squeeze, dropping packet.\n", dev->name);
1297			goto update_nextrxdesc;
1298		}
1299		skb = myNextRxDesc->skb;
1300		align = (int)phys_to_virt(myNextRxDesc->descr.buf) - (int)skb->data;
1301		skb_put(skb, length + align);
1302		skb_pull(skb, align); /* Remove alignment bytes */
1303		myNextRxDesc->skb = new_skb;
1304		myNextRxDesc->descr.buf = L1_CACHE_ALIGN(virt_to_phys(myNextRxDesc->skb->data));
1305	}
1306
1307	skb->protocol = eth_type_trans(skb, dev);
1308
1309	/* Send the packet to the upper layers */
1310	netif_rx(skb);
1311
1312  update_nextrxdesc:
1313	/* Prepare for next packet */
1314	myNextRxDesc->descr.status = 0;
1315	prevRxDesc = myNextRxDesc;
1316	myNextRxDesc = phys_to_virt(myNextRxDesc->descr.next);
1317
1318	rx_queue_len++;
1319
1320	/* Check if descriptors should be returned */
1321	if (rx_queue_len == RX_QUEUE_THRESHOLD) {
1322		flush_etrax_cache();
1323		prevRxDesc->descr.ctrl |= d_eol;
1324		myLastRxDesc->descr.ctrl &= ~d_eol;
1325		myLastRxDesc = prevRxDesc;
1326		rx_queue_len = 0;
1327	}
1328}
1329
1330/* The inverse routine to net_open(). */
1331static int
1332e100_close(struct net_device *dev)
1333{
1334	printk(KERN_INFO "Closing %s.\n", dev->name);
1335
1336	netif_stop_queue(dev);
1337
1338	*R_IRQ_MASK0_CLR =
1339		IO_STATE(R_IRQ_MASK0_CLR, overrun, clr) |
1340		IO_STATE(R_IRQ_MASK0_CLR, underrun, clr) |
1341		IO_STATE(R_IRQ_MASK0_CLR, excessive_col, clr);
1342
1343	*R_IRQ_MASK2_CLR =
1344		IO_STATE(R_IRQ_MASK2_CLR, dma0_descr, clr) |
1345		IO_STATE(R_IRQ_MASK2_CLR, dma0_eop, clr) |
1346		IO_STATE(R_IRQ_MASK2_CLR, dma1_descr, clr) |
1347		IO_STATE(R_IRQ_MASK2_CLR, dma1_eop, clr);
1348
1349	/* Stop the receiver and the transmitter */
1350
1351	RESET_DMA(NETWORK_TX_DMA_NBR);
1352	RESET_DMA(NETWORK_RX_DMA_NBR);
1353
1354	/* Flush the Tx and disable Rx here. */
1355
1356	free_irq(NETWORK_DMA_RX_IRQ_NBR, (void *)dev);
1357	free_irq(NETWORK_DMA_TX_IRQ_NBR, (void *)dev);
1358	free_irq(NETWORK_STATUS_IRQ_NBR, (void *)dev);
1359
1360	cris_free_dma(NETWORK_TX_DMA_NBR, cardname);
1361	cris_free_dma(NETWORK_RX_DMA_NBR, cardname);
1362
1363	/* Update the statistics here. */
1364
1365	update_rx_stats(&dev->stats);
1366	update_tx_stats(&dev->stats);
1367
1368	/* Stop speed/duplex timers */
1369	del_timer(&speed_timer);
1370	del_timer(&duplex_timer);
1371
1372	return 0;
1373}
1374
1375static int
1376e100_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
1377{
1378	struct mii_ioctl_data *data = if_mii(ifr);
1379	struct net_local *np = netdev_priv(dev);
1380	int rc = 0;
1381        int old_autoneg;
1382
1383	spin_lock(&np->lock); /* Preempt protection */
1384	switch (cmd) {
1385		/* The ioctls below should be considered obsolete but are */
1386		/* still present for compatibility with old scripts/apps  */
1387		case SET_ETH_SPEED_10:                  /* 10 Mbps */
1388			e100_set_speed(dev, 10);
1389			break;
1390		case SET_ETH_SPEED_100:                /* 100 Mbps */
1391			e100_set_speed(dev, 100);
1392			break;
1393		case SET_ETH_SPEED_AUTO:        /* Auto-negotiate speed */
1394			e100_set_speed(dev, 0);
1395			break;
1396		case SET_ETH_DUPLEX_HALF:       /* Half duplex */
1397			e100_set_duplex(dev, half);
1398			break;
1399		case SET_ETH_DUPLEX_FULL:       /* Full duplex */
1400			e100_set_duplex(dev, full);
1401			break;
1402		case SET_ETH_DUPLEX_AUTO:       /* Auto-negotiate duplex */
1403			e100_set_duplex(dev, autoneg);
1404			break;
1405	        case SET_ETH_AUTONEG:
1406			old_autoneg = autoneg_normal;
1407		        autoneg_normal = *(int*)data;
1408			if (autoneg_normal != old_autoneg)
1409				e100_negotiate(dev);
1410			break;
1411		default:
1412			rc = generic_mii_ioctl(&np->mii_if, if_mii(ifr),
1413						cmd, NULL);
1414			break;
1415	}
1416	spin_unlock(&np->lock);
1417	return rc;
1418}
1419
1420static int e100_get_settings(struct net_device *dev,
1421			     struct ethtool_cmd *cmd)
1422{
1423	struct net_local *np = netdev_priv(dev);
1424	int err;
1425
1426	spin_lock_irq(&np->lock);
1427	err = mii_ethtool_gset(&np->mii_if, cmd);
1428	spin_unlock_irq(&np->lock);
1429
1430	/* The PHY may support 1000baseT, but the Etrax100 does not.  */
1431	cmd->supported &= ~(SUPPORTED_1000baseT_Half
1432			    | SUPPORTED_1000baseT_Full);
1433	return err;
1434}
1435
1436static int e100_set_settings(struct net_device *dev,
1437			     struct ethtool_cmd *ecmd)
1438{
1439	if (ecmd->autoneg == AUTONEG_ENABLE) {
1440		e100_set_duplex(dev, autoneg);
1441		e100_set_speed(dev, 0);
1442	} else {
1443		e100_set_duplex(dev, ecmd->duplex == DUPLEX_HALF ? half : full);
1444		e100_set_speed(dev, ecmd->speed == SPEED_10 ? 10: 100);
1445	}
1446
1447	return 0;
1448}
1449
1450static void e100_get_drvinfo(struct net_device *dev,
1451			     struct ethtool_drvinfo *info)
1452{
1453	strncpy(info->driver, "ETRAX 100LX", sizeof(info->driver) - 1);
1454	strncpy(info->version, "$Revision: 1.31 $", sizeof(info->version) - 1);
1455	strncpy(info->fw_version, "N/A", sizeof(info->fw_version) - 1);
1456	strncpy(info->bus_info, "N/A", sizeof(info->bus_info) - 1);
1457}
1458
1459static int e100_nway_reset(struct net_device *dev)
1460{
1461	if (current_duplex == autoneg && current_speed_selection == 0)
1462		e100_negotiate(dev);
1463	return 0;
1464}
1465
1466static const struct ethtool_ops e100_ethtool_ops = {
1467	.get_settings	= e100_get_settings,
1468	.set_settings	= e100_set_settings,
1469	.get_drvinfo	= e100_get_drvinfo,
1470	.nway_reset	= e100_nway_reset,
1471	.get_link	= ethtool_op_get_link,
1472};
1473
1474static int
1475e100_set_config(struct net_device *dev, struct ifmap *map)
1476{
1477	struct net_local *np = netdev_priv(dev);
1478
1479	spin_lock(&np->lock); /* Preempt protection */
1480
1481	switch(map->port) {
1482		case IF_PORT_UNKNOWN:
1483			/* Use autoneg */
1484			e100_set_speed(dev, 0);
1485			e100_set_duplex(dev, autoneg);
1486			break;
1487		case IF_PORT_10BASET:
1488			e100_set_speed(dev, 10);
1489			e100_set_duplex(dev, autoneg);
1490			break;
1491		case IF_PORT_100BASET:
1492		case IF_PORT_100BASETX:
1493			e100_set_speed(dev, 100);
1494			e100_set_duplex(dev, autoneg);
1495			break;
1496		case IF_PORT_100BASEFX:
1497		case IF_PORT_10BASE2:
1498		case IF_PORT_AUI:
1499			spin_unlock(&np->lock);
1500			return -EOPNOTSUPP;
1501			break;
1502		default:
1503			printk(KERN_ERR "%s: Invalid media selected", dev->name);
1504			spin_unlock(&np->lock);
1505			return -EINVAL;
1506	}
1507	spin_unlock(&np->lock);
1508	return 0;
1509}
1510
1511static void
1512update_rx_stats(struct net_device_stats *es)
1513{
1514	unsigned long r = *R_REC_COUNTERS;
1515	/* update stats relevant to reception errors */
1516	es->rx_fifo_errors += IO_EXTRACT(R_REC_COUNTERS, congestion, r);
1517	es->rx_crc_errors += IO_EXTRACT(R_REC_COUNTERS, crc_error, r);
1518	es->rx_frame_errors += IO_EXTRACT(R_REC_COUNTERS, alignment_error, r);
1519	es->rx_length_errors += IO_EXTRACT(R_REC_COUNTERS, oversize, r);
1520}
1521
1522static void
1523update_tx_stats(struct net_device_stats *es)
1524{
1525	unsigned long r = *R_TR_COUNTERS;
1526	/* update stats relevant to transmission errors */
1527	es->collisions +=
1528		IO_EXTRACT(R_TR_COUNTERS, single_col, r) +
1529		IO_EXTRACT(R_TR_COUNTERS, multiple_col, r);
1530}
1531
1532/*
1533 * Get the current statistics.
1534 * This may be called with the card open or closed.
1535 */
1536static struct net_device_stats *
1537e100_get_stats(struct net_device *dev)
1538{
1539	struct net_local *lp = netdev_priv(dev);
1540	unsigned long flags;
1541
1542	spin_lock_irqsave(&lp->lock, flags);
1543
1544	update_rx_stats(&dev->stats);
1545	update_tx_stats(&dev->stats);
1546
1547	spin_unlock_irqrestore(&lp->lock, flags);
1548	return &dev->stats;
1549}
1550
1551/*
1552 * Set or clear the multicast filter for this adaptor.
1553 * num_addrs == -1	Promiscuous mode, receive all packets
1554 * num_addrs == 0	Normal mode, clear multicast list
1555 * num_addrs > 0	Multicast mode, receive normal and MC packets,
1556 *			and do best-effort filtering.
1557 */
1558static void
1559set_multicast_list(struct net_device *dev)
1560{
1561	struct net_local *lp = netdev_priv(dev);
1562	int num_addr = netdev_mc_count(dev);
1563	unsigned long int lo_bits;
1564	unsigned long int hi_bits;
1565
1566	spin_lock(&lp->lock);
1567	if (dev->flags & IFF_PROMISC) {
1568		/* promiscuous mode */
1569		lo_bits = 0xfffffffful;
1570		hi_bits = 0xfffffffful;
1571
1572		/* Enable individual receive */
1573		SETS(network_rec_config_shadow, R_NETWORK_REC_CONFIG, individual, receive);
1574		*R_NETWORK_REC_CONFIG = network_rec_config_shadow;
1575	} else if (dev->flags & IFF_ALLMULTI) {
1576		/* enable all multicasts */
1577		lo_bits = 0xfffffffful;
1578		hi_bits = 0xfffffffful;
1579
1580		/* Disable individual receive */
1581		SETS(network_rec_config_shadow, R_NETWORK_REC_CONFIG, individual, discard);
1582		*R_NETWORK_REC_CONFIG =  network_rec_config_shadow;
1583	} else if (num_addr == 0) {
1584		/* Normal, clear the mc list */
1585		lo_bits = 0x00000000ul;
1586		hi_bits = 0x00000000ul;
1587
1588		/* Disable individual receive */
1589		SETS(network_rec_config_shadow, R_NETWORK_REC_CONFIG, individual, discard);
1590		*R_NETWORK_REC_CONFIG =  network_rec_config_shadow;
1591	} else {
1592		/* MC mode, receive normal and MC packets */
1593		char hash_ix;
1594		struct netdev_hw_addr *ha;
1595		char *baddr;
1596
1597		lo_bits = 0x00000000ul;
1598		hi_bits = 0x00000000ul;
1599		netdev_for_each_mc_addr(ha, dev) {
1600			/* Calculate the hash index for the GA registers */
1601
1602			hash_ix = 0;
1603			baddr = ha->addr;
1604			hash_ix ^= (*baddr) & 0x3f;
1605			hash_ix ^= ((*baddr) >> 6) & 0x03;
1606			++baddr;
1607			hash_ix ^= ((*baddr) << 2) & 0x03c;
1608			hash_ix ^= ((*baddr) >> 4) & 0xf;
1609			++baddr;
1610			hash_ix ^= ((*baddr) << 4) & 0x30;
1611			hash_ix ^= ((*baddr) >> 2) & 0x3f;
1612			++baddr;
1613			hash_ix ^= (*baddr) & 0x3f;
1614			hash_ix ^= ((*baddr) >> 6) & 0x03;
1615			++baddr;
1616			hash_ix ^= ((*baddr) << 2) & 0x03c;
1617			hash_ix ^= ((*baddr) >> 4) & 0xf;
1618			++baddr;
1619			hash_ix ^= ((*baddr) << 4) & 0x30;
1620			hash_ix ^= ((*baddr) >> 2) & 0x3f;
1621
1622			hash_ix &= 0x3f;
1623
1624			if (hash_ix >= 32) {
1625				hi_bits |= (1 << (hash_ix-32));
1626			} else {
1627				lo_bits |= (1 << hash_ix);
1628			}
1629		}
1630		/* Disable individual receive */
1631		SETS(network_rec_config_shadow, R_NETWORK_REC_CONFIG, individual, discard);
1632		*R_NETWORK_REC_CONFIG = network_rec_config_shadow;
1633	}
1634	*R_NETWORK_GA_0 = lo_bits;
1635	*R_NETWORK_GA_1 = hi_bits;
1636	spin_unlock(&lp->lock);
1637}
1638
1639void
1640e100_hardware_send_packet(struct net_local *np, char *buf, int length)
1641{
1642	D(printk("e100 send pack, buf 0x%x len %d\n", buf, length));
1643
1644	spin_lock(&np->led_lock);
1645	if (!led_active && time_after(jiffies, led_next_time)) {
1646		/* light the network leds depending on the current speed. */
1647		e100_set_network_leds(NETWORK_ACTIVITY);
1648
1649		/* Set the earliest time we may clear the LED */
1650		led_next_time = jiffies + NET_FLASH_TIME;
1651		led_active = 1;
1652		mod_timer(&clear_led_timer, jiffies + HZ/10);
1653	}
1654	spin_unlock(&np->led_lock);
1655
1656	/* configure the tx dma descriptor */
1657	myNextTxDesc->descr.sw_len = length;
1658	myNextTxDesc->descr.ctrl = d_eop | d_eol | d_wait;
1659	myNextTxDesc->descr.buf = virt_to_phys(buf);
1660
1661        /* Move end of list */
1662        myLastTxDesc->descr.ctrl &= ~d_eol;
1663        myLastTxDesc = myNextTxDesc;
1664
1665	/* Restart DMA channel */
1666	*R_DMA_CH0_CMD = IO_STATE(R_DMA_CH0_CMD, cmd, restart);
1667}
1668
1669static void
1670e100_clear_network_leds(unsigned long dummy)
1671{
1672	struct net_device *dev = (struct net_device *)dummy;
1673	struct net_local *np = netdev_priv(dev);
1674
1675	spin_lock(&np->led_lock);
1676
1677	if (led_active && time_after(jiffies, led_next_time)) {
1678		e100_set_network_leds(NO_NETWORK_ACTIVITY);
1679
1680		/* Set the earliest time we may set the LED */
1681		led_next_time = jiffies + NET_FLASH_PAUSE;
1682		led_active = 0;
1683	}
1684
1685	spin_unlock(&np->led_lock);
1686}
1687
1688static void
1689e100_set_network_leds(int active)
1690{
1691#if defined(CONFIG_ETRAX_NETWORK_LED_ON_WHEN_LINK)
1692	int light_leds = (active == NO_NETWORK_ACTIVITY);
1693#elif defined(CONFIG_ETRAX_NETWORK_LED_ON_WHEN_ACTIVITY)
1694	int light_leds = (active == NETWORK_ACTIVITY);
1695#else
1696#error "Define either CONFIG_ETRAX_NETWORK_LED_ON_WHEN_LINK or CONFIG_ETRAX_NETWORK_LED_ON_WHEN_ACTIVITY"
1697#endif
1698
1699	if (!current_speed) {
1700		/* Make LED red, link is down */
1701		CRIS_LED_NETWORK_SET(CRIS_LED_OFF);
1702	} else if (light_leds) {
1703		if (current_speed == 10) {
1704			CRIS_LED_NETWORK_SET(CRIS_LED_ORANGE);
1705		} else {
1706			CRIS_LED_NETWORK_SET(CRIS_LED_GREEN);
1707		}
1708	} else {
1709		CRIS_LED_NETWORK_SET(CRIS_LED_OFF);
1710	}
1711}
1712
1713#ifdef CONFIG_NET_POLL_CONTROLLER
1714static void
1715e100_netpoll(struct net_device* netdev)
1716{
1717	e100rxtx_interrupt(NETWORK_DMA_TX_IRQ_NBR, netdev, NULL);
1718}
1719#endif
1720
1721static int
1722etrax_init_module(void)
1723{
1724	return etrax_ethernet_init();
1725}
1726
1727static int __init
1728e100_boot_setup(char* str)
1729{
1730	struct sockaddr sa = {0};
1731	int i;
1732
1733	/* Parse the colon separated Ethernet station address */
1734	for (i = 0; i <  ETH_ALEN; i++) {
1735		unsigned int tmp;
1736		if (sscanf(str + 3*i, "%2x", &tmp) != 1) {
1737			printk(KERN_WARNING "Malformed station address");
1738			return 0;
1739		}
1740		sa.sa_data[i] = (char)tmp;
1741	}
1742
1743	default_mac = sa;
1744	return 1;
1745}
1746
1747__setup("etrax100_eth=", e100_boot_setup);
1748
1749module_init(etrax_init_module);