Linux Audio

Check our new training course

Loading...
Note: File does not exist in v6.8.
   1/*
   2 *  Driver for the IDT RC32434 (Korina) on-chip ethernet controller.
   3 *
   4 *  Copyright 2004 IDT Inc. (rischelp@idt.com)
   5 *  Copyright 2006 Felix Fietkau <nbd@openwrt.org>
   6 *  Copyright 2008 Florian Fainelli <florian@openwrt.org>
   7 *
   8 *  This program is free software; you can redistribute  it and/or modify it
   9 *  under  the terms of  the GNU General  Public License as published by the
  10 *  Free Software Foundation;  either version 2 of the  License, or (at your
  11 *  option) any later version.
  12 *
  13 *  THIS  SOFTWARE  IS PROVIDED   ``AS  IS'' AND   ANY  EXPRESS OR IMPLIED
  14 *  WARRANTIES,   INCLUDING, BUT NOT  LIMITED  TO, THE IMPLIED WARRANTIES OF
  15 *  MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN
  16 *  NO  EVENT  SHALL   THE AUTHOR  BE    LIABLE FOR ANY   DIRECT, INDIRECT,
  17 *  INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
  18 *  NOT LIMITED   TO, PROCUREMENT OF  SUBSTITUTE GOODS  OR SERVICES; LOSS OF
  19 *  USE, DATA,  OR PROFITS; OR  BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
  20 *  ANY THEORY OF LIABILITY, WHETHER IN  CONTRACT, STRICT LIABILITY, OR TORT
  21 *  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
  22 *  THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  23 *
  24 *  You should have received a copy of the  GNU General Public License along
  25 *  with this program; if not, write  to the Free Software Foundation, Inc.,
  26 *  675 Mass Ave, Cambridge, MA 02139, USA.
  27 *
  28 *  Writing to a DMA status register:
  29 *
  30 *  When writing to the status register, you should mask the bit you have
  31 *  been testing the status register with. Both Tx and Rx DMA registers
  32 *  should stick to this procedure.
  33 */
  34
  35#include <linux/module.h>
  36#include <linux/kernel.h>
  37#include <linux/moduleparam.h>
  38#include <linux/sched.h>
  39#include <linux/ctype.h>
  40#include <linux/types.h>
  41#include <linux/interrupt.h>
  42#include <linux/init.h>
  43#include <linux/ioport.h>
  44#include <linux/in.h>
  45#include <linux/slab.h>
  46#include <linux/string.h>
  47#include <linux/delay.h>
  48#include <linux/netdevice.h>
  49#include <linux/etherdevice.h>
  50#include <linux/skbuff.h>
  51#include <linux/errno.h>
  52#include <linux/platform_device.h>
  53#include <linux/mii.h>
  54#include <linux/ethtool.h>
  55#include <linux/crc32.h>
  56
  57#include <asm/bootinfo.h>
  58#include <asm/system.h>
  59#include <asm/bitops.h>
  60#include <asm/pgtable.h>
  61#include <asm/segment.h>
  62#include <asm/io.h>
  63#include <asm/dma.h>
  64
  65#include <asm/mach-rc32434/rb.h>
  66#include <asm/mach-rc32434/rc32434.h>
  67#include <asm/mach-rc32434/eth.h>
  68#include <asm/mach-rc32434/dma_v.h>
  69
  70#define DRV_NAME        "korina"
  71#define DRV_VERSION     "0.10"
  72#define DRV_RELDATE     "04Mar2008"
  73
  74#define STATION_ADDRESS_HIGH(dev) (((dev)->dev_addr[0] << 8) | \
  75				   ((dev)->dev_addr[1]))
  76#define STATION_ADDRESS_LOW(dev)  (((dev)->dev_addr[2] << 24) | \
  77				   ((dev)->dev_addr[3] << 16) | \
  78				   ((dev)->dev_addr[4] << 8)  | \
  79				   ((dev)->dev_addr[5]))
  80
  81#define MII_CLOCK 1250000 	/* no more than 2.5MHz */
  82
  83/* the following must be powers of two */
  84#define KORINA_NUM_RDS	64  /* number of receive descriptors */
  85#define KORINA_NUM_TDS	64  /* number of transmit descriptors */
  86
  87/* KORINA_RBSIZE is the hardware's default maximum receive
  88 * frame size in bytes. Having this hardcoded means that there
  89 * is no support for MTU sizes greater than 1500. */
  90#define KORINA_RBSIZE	1536 /* size of one resource buffer = Ether MTU */
  91#define KORINA_RDS_MASK	(KORINA_NUM_RDS - 1)
  92#define KORINA_TDS_MASK	(KORINA_NUM_TDS - 1)
  93#define RD_RING_SIZE 	(KORINA_NUM_RDS * sizeof(struct dma_desc))
  94#define TD_RING_SIZE	(KORINA_NUM_TDS * sizeof(struct dma_desc))
  95
  96#define TX_TIMEOUT 	(6000 * HZ / 1000)
  97
  98enum chain_status { desc_filled, desc_empty };
  99#define IS_DMA_FINISHED(X)   (((X) & (DMA_DESC_FINI)) != 0)
 100#define IS_DMA_DONE(X)   (((X) & (DMA_DESC_DONE)) != 0)
 101#define RCVPKT_LENGTH(X)     (((X) & ETH_RX_LEN) >> ETH_RX_LEN_BIT)
 102
 103/* Information that need to be kept for each board. */
 104struct korina_private {
 105	struct eth_regs *eth_regs;
 106	struct dma_reg *rx_dma_regs;
 107	struct dma_reg *tx_dma_regs;
 108	struct dma_desc *td_ring; /* transmit descriptor ring */
 109	struct dma_desc *rd_ring; /* receive descriptor ring  */
 110
 111	struct sk_buff *tx_skb[KORINA_NUM_TDS];
 112	struct sk_buff *rx_skb[KORINA_NUM_RDS];
 113
 114	int rx_next_done;
 115	int rx_chain_head;
 116	int rx_chain_tail;
 117	enum chain_status rx_chain_status;
 118
 119	int tx_next_done;
 120	int tx_chain_head;
 121	int tx_chain_tail;
 122	enum chain_status tx_chain_status;
 123	int tx_count;
 124	int tx_full;
 125
 126	int rx_irq;
 127	int tx_irq;
 128	int ovr_irq;
 129	int und_irq;
 130
 131	spinlock_t lock;        /* NIC xmit lock */
 132
 133	int dma_halt_cnt;
 134	int dma_run_cnt;
 135	struct napi_struct napi;
 136	struct timer_list media_check_timer;
 137	struct mii_if_info mii_if;
 138	struct work_struct restart_task;
 139	struct net_device *dev;
 140	int phy_addr;
 141};
 142
 143extern unsigned int idt_cpu_freq;
 144
 145static inline void korina_start_dma(struct dma_reg *ch, u32 dma_addr)
 146{
 147	writel(0, &ch->dmandptr);
 148	writel(dma_addr, &ch->dmadptr);
 149}
 150
 151static inline void korina_abort_dma(struct net_device *dev,
 152					struct dma_reg *ch)
 153{
 154       if (readl(&ch->dmac) & DMA_CHAN_RUN_BIT) {
 155	       writel(0x10, &ch->dmac);
 156
 157	       while (!(readl(&ch->dmas) & DMA_STAT_HALT))
 158		       dev->trans_start = jiffies;
 159
 160	       writel(0, &ch->dmas);
 161       }
 162
 163       writel(0, &ch->dmadptr);
 164       writel(0, &ch->dmandptr);
 165}
 166
 167static inline void korina_chain_dma(struct dma_reg *ch, u32 dma_addr)
 168{
 169	writel(dma_addr, &ch->dmandptr);
 170}
 171
 172static void korina_abort_tx(struct net_device *dev)
 173{
 174	struct korina_private *lp = netdev_priv(dev);
 175
 176	korina_abort_dma(dev, lp->tx_dma_regs);
 177}
 178
 179static void korina_abort_rx(struct net_device *dev)
 180{
 181	struct korina_private *lp = netdev_priv(dev);
 182
 183	korina_abort_dma(dev, lp->rx_dma_regs);
 184}
 185
 186static void korina_start_rx(struct korina_private *lp,
 187					struct dma_desc *rd)
 188{
 189	korina_start_dma(lp->rx_dma_regs, CPHYSADDR(rd));
 190}
 191
 192static void korina_chain_rx(struct korina_private *lp,
 193					struct dma_desc *rd)
 194{
 195	korina_chain_dma(lp->rx_dma_regs, CPHYSADDR(rd));
 196}
 197
 198/* transmit packet */
 199static int korina_send_packet(struct sk_buff *skb, struct net_device *dev)
 200{
 201	struct korina_private *lp = netdev_priv(dev);
 202	unsigned long flags;
 203	u32 length;
 204	u32 chain_prev, chain_next;
 205	struct dma_desc *td;
 206
 207	spin_lock_irqsave(&lp->lock, flags);
 208
 209	td = &lp->td_ring[lp->tx_chain_tail];
 210
 211	/* stop queue when full, drop pkts if queue already full */
 212	if (lp->tx_count >= (KORINA_NUM_TDS - 2)) {
 213		lp->tx_full = 1;
 214
 215		if (lp->tx_count == (KORINA_NUM_TDS - 2))
 216			netif_stop_queue(dev);
 217		else {
 218			dev->stats.tx_dropped++;
 219			dev_kfree_skb_any(skb);
 220			spin_unlock_irqrestore(&lp->lock, flags);
 221
 222			return NETDEV_TX_BUSY;
 223		}
 224	}
 225
 226	lp->tx_count++;
 227
 228	lp->tx_skb[lp->tx_chain_tail] = skb;
 229
 230	length = skb->len;
 231	dma_cache_wback((u32)skb->data, skb->len);
 232
 233	/* Setup the transmit descriptor. */
 234	dma_cache_inv((u32) td, sizeof(*td));
 235	td->ca = CPHYSADDR(skb->data);
 236	chain_prev = (lp->tx_chain_tail - 1) & KORINA_TDS_MASK;
 237	chain_next = (lp->tx_chain_tail + 1) & KORINA_TDS_MASK;
 238
 239	if (readl(&(lp->tx_dma_regs->dmandptr)) == 0) {
 240		if (lp->tx_chain_status == desc_empty) {
 241			/* Update tail */
 242			td->control = DMA_COUNT(length) |
 243					DMA_DESC_COF | DMA_DESC_IOF;
 244			/* Move tail */
 245			lp->tx_chain_tail = chain_next;
 246			/* Write to NDPTR */
 247			writel(CPHYSADDR(&lp->td_ring[lp->tx_chain_head]),
 248					&lp->tx_dma_regs->dmandptr);
 249			/* Move head to tail */
 250			lp->tx_chain_head = lp->tx_chain_tail;
 251		} else {
 252			/* Update tail */
 253			td->control = DMA_COUNT(length) |
 254					DMA_DESC_COF | DMA_DESC_IOF;
 255			/* Link to prev */
 256			lp->td_ring[chain_prev].control &=
 257					~DMA_DESC_COF;
 258			/* Link to prev */
 259			lp->td_ring[chain_prev].link =  CPHYSADDR(td);
 260			/* Move tail */
 261			lp->tx_chain_tail = chain_next;
 262			/* Write to NDPTR */
 263			writel(CPHYSADDR(&lp->td_ring[lp->tx_chain_head]),
 264					&(lp->tx_dma_regs->dmandptr));
 265			/* Move head to tail */
 266			lp->tx_chain_head = lp->tx_chain_tail;
 267			lp->tx_chain_status = desc_empty;
 268		}
 269	} else {
 270		if (lp->tx_chain_status == desc_empty) {
 271			/* Update tail */
 272			td->control = DMA_COUNT(length) |
 273					DMA_DESC_COF | DMA_DESC_IOF;
 274			/* Move tail */
 275			lp->tx_chain_tail = chain_next;
 276			lp->tx_chain_status = desc_filled;
 277		} else {
 278			/* Update tail */
 279			td->control = DMA_COUNT(length) |
 280					DMA_DESC_COF | DMA_DESC_IOF;
 281			lp->td_ring[chain_prev].control &=
 282					~DMA_DESC_COF;
 283			lp->td_ring[chain_prev].link =  CPHYSADDR(td);
 284			lp->tx_chain_tail = chain_next;
 285		}
 286	}
 287	dma_cache_wback((u32) td, sizeof(*td));
 288
 289	dev->trans_start = jiffies;
 290	spin_unlock_irqrestore(&lp->lock, flags);
 291
 292	return NETDEV_TX_OK;
 293}
 294
 295static int mdio_read(struct net_device *dev, int mii_id, int reg)
 296{
 297	struct korina_private *lp = netdev_priv(dev);
 298	int ret;
 299
 300	mii_id = ((lp->rx_irq == 0x2c ? 1 : 0) << 8);
 301
 302	writel(0, &lp->eth_regs->miimcfg);
 303	writel(0, &lp->eth_regs->miimcmd);
 304	writel(mii_id | reg, &lp->eth_regs->miimaddr);
 305	writel(ETH_MII_CMD_SCN, &lp->eth_regs->miimcmd);
 306
 307	ret = (int)(readl(&lp->eth_regs->miimrdd));
 308	return ret;
 309}
 310
 311static void mdio_write(struct net_device *dev, int mii_id, int reg, int val)
 312{
 313	struct korina_private *lp = netdev_priv(dev);
 314
 315	mii_id = ((lp->rx_irq == 0x2c ? 1 : 0) << 8);
 316
 317	writel(0, &lp->eth_regs->miimcfg);
 318	writel(1, &lp->eth_regs->miimcmd);
 319	writel(mii_id | reg, &lp->eth_regs->miimaddr);
 320	writel(ETH_MII_CMD_SCN, &lp->eth_regs->miimcmd);
 321	writel(val, &lp->eth_regs->miimwtd);
 322}
 323
 324/* Ethernet Rx DMA interrupt */
 325static irqreturn_t korina_rx_dma_interrupt(int irq, void *dev_id)
 326{
 327	struct net_device *dev = dev_id;
 328	struct korina_private *lp = netdev_priv(dev);
 329	u32 dmas, dmasm;
 330	irqreturn_t retval;
 331
 332	dmas = readl(&lp->rx_dma_regs->dmas);
 333	if (dmas & (DMA_STAT_DONE | DMA_STAT_HALT | DMA_STAT_ERR)) {
 334		dmasm = readl(&lp->rx_dma_regs->dmasm);
 335		writel(dmasm | (DMA_STAT_DONE |
 336				DMA_STAT_HALT | DMA_STAT_ERR),
 337				&lp->rx_dma_regs->dmasm);
 338
 339		napi_schedule(&lp->napi);
 340
 341		if (dmas & DMA_STAT_ERR)
 342			printk(KERN_ERR "%s: DMA error\n", dev->name);
 343
 344		retval = IRQ_HANDLED;
 345	} else
 346		retval = IRQ_NONE;
 347
 348	return retval;
 349}
 350
 351static int korina_rx(struct net_device *dev, int limit)
 352{
 353	struct korina_private *lp = netdev_priv(dev);
 354	struct dma_desc *rd = &lp->rd_ring[lp->rx_next_done];
 355	struct sk_buff *skb, *skb_new;
 356	u8 *pkt_buf;
 357	u32 devcs, pkt_len, dmas;
 358	int count;
 359
 360	dma_cache_inv((u32)rd, sizeof(*rd));
 361
 362	for (count = 0; count < limit; count++) {
 363		skb = lp->rx_skb[lp->rx_next_done];
 364		skb_new = NULL;
 365
 366		devcs = rd->devcs;
 367
 368		if ((KORINA_RBSIZE - (u32)DMA_COUNT(rd->control)) == 0)
 369			break;
 370
 371		/* Update statistics counters */
 372		if (devcs & ETH_RX_CRC)
 373			dev->stats.rx_crc_errors++;
 374		if (devcs & ETH_RX_LOR)
 375			dev->stats.rx_length_errors++;
 376		if (devcs & ETH_RX_LE)
 377			dev->stats.rx_length_errors++;
 378		if (devcs & ETH_RX_OVR)
 379			dev->stats.rx_fifo_errors++;
 380		if (devcs & ETH_RX_CV)
 381			dev->stats.rx_frame_errors++;
 382		if (devcs & ETH_RX_CES)
 383			dev->stats.rx_length_errors++;
 384		if (devcs & ETH_RX_MP)
 385			dev->stats.multicast++;
 386
 387		if ((devcs & ETH_RX_LD) != ETH_RX_LD) {
 388			/* check that this is a whole packet
 389			 * WARNING: DMA_FD bit incorrectly set
 390			 * in Rc32434 (errata ref #077) */
 391			dev->stats.rx_errors++;
 392			dev->stats.rx_dropped++;
 393		} else if ((devcs & ETH_RX_ROK)) {
 394			pkt_len = RCVPKT_LENGTH(devcs);
 395
 396			/* must be the (first and) last
 397			 * descriptor then */
 398			pkt_buf = (u8 *)lp->rx_skb[lp->rx_next_done]->data;
 399
 400			/* invalidate the cache */
 401			dma_cache_inv((unsigned long)pkt_buf, pkt_len - 4);
 402
 403			/* Malloc up new buffer. */
 404			skb_new = netdev_alloc_skb_ip_align(dev, KORINA_RBSIZE);
 405
 406			if (!skb_new)
 407				break;
 408			/* Do not count the CRC */
 409			skb_put(skb, pkt_len - 4);
 410			skb->protocol = eth_type_trans(skb, dev);
 411
 412			/* Pass the packet to upper layers */
 413			netif_receive_skb(skb);
 414			dev->stats.rx_packets++;
 415			dev->stats.rx_bytes += pkt_len;
 416
 417			/* Update the mcast stats */
 418			if (devcs & ETH_RX_MP)
 419				dev->stats.multicast++;
 420
 421			lp->rx_skb[lp->rx_next_done] = skb_new;
 422		}
 423
 424		rd->devcs = 0;
 425
 426		/* Restore descriptor's curr_addr */
 427		if (skb_new)
 428			rd->ca = CPHYSADDR(skb_new->data);
 429		else
 430			rd->ca = CPHYSADDR(skb->data);
 431
 432		rd->control = DMA_COUNT(KORINA_RBSIZE) |
 433			DMA_DESC_COD | DMA_DESC_IOD;
 434		lp->rd_ring[(lp->rx_next_done - 1) &
 435			KORINA_RDS_MASK].control &=
 436			~DMA_DESC_COD;
 437
 438		lp->rx_next_done = (lp->rx_next_done + 1) & KORINA_RDS_MASK;
 439		dma_cache_wback((u32)rd, sizeof(*rd));
 440		rd = &lp->rd_ring[lp->rx_next_done];
 441		writel(~DMA_STAT_DONE, &lp->rx_dma_regs->dmas);
 442	}
 443
 444	dmas = readl(&lp->rx_dma_regs->dmas);
 445
 446	if (dmas & DMA_STAT_HALT) {
 447		writel(~(DMA_STAT_HALT | DMA_STAT_ERR),
 448				&lp->rx_dma_regs->dmas);
 449
 450		lp->dma_halt_cnt++;
 451		rd->devcs = 0;
 452		skb = lp->rx_skb[lp->rx_next_done];
 453		rd->ca = CPHYSADDR(skb->data);
 454		dma_cache_wback((u32)rd, sizeof(*rd));
 455		korina_chain_rx(lp, rd);
 456	}
 457
 458	return count;
 459}
 460
 461static int korina_poll(struct napi_struct *napi, int budget)
 462{
 463	struct korina_private *lp =
 464		container_of(napi, struct korina_private, napi);
 465	struct net_device *dev = lp->dev;
 466	int work_done;
 467
 468	work_done = korina_rx(dev, budget);
 469	if (work_done < budget) {
 470		napi_complete(napi);
 471
 472		writel(readl(&lp->rx_dma_regs->dmasm) &
 473			~(DMA_STAT_DONE | DMA_STAT_HALT | DMA_STAT_ERR),
 474			&lp->rx_dma_regs->dmasm);
 475	}
 476	return work_done;
 477}
 478
 479/*
 480 * Set or clear the multicast filter for this adaptor.
 481 */
 482static void korina_multicast_list(struct net_device *dev)
 483{
 484	struct korina_private *lp = netdev_priv(dev);
 485	unsigned long flags;
 486	struct netdev_hw_addr *ha;
 487	u32 recognise = ETH_ARC_AB;	/* always accept broadcasts */
 488	int i;
 489
 490	/* Set promiscuous mode */
 491	if (dev->flags & IFF_PROMISC)
 492		recognise |= ETH_ARC_PRO;
 493
 494	else if ((dev->flags & IFF_ALLMULTI) || (netdev_mc_count(dev) > 4))
 495		/* All multicast and broadcast */
 496		recognise |= ETH_ARC_AM;
 497
 498	/* Build the hash table */
 499	if (netdev_mc_count(dev) > 4) {
 500		u16 hash_table[4];
 501		u32 crc;
 502
 503		for (i = 0; i < 4; i++)
 504			hash_table[i] = 0;
 505
 506		netdev_for_each_mc_addr(ha, dev) {
 507			crc = ether_crc_le(6, ha->addr);
 508			crc >>= 26;
 509			hash_table[crc >> 4] |= 1 << (15 - (crc & 0xf));
 510		}
 511		/* Accept filtered multicast */
 512		recognise |= ETH_ARC_AFM;
 513
 514		/* Fill the MAC hash tables with their values */
 515		writel((u32)(hash_table[1] << 16 | hash_table[0]),
 516					&lp->eth_regs->ethhash0);
 517		writel((u32)(hash_table[3] << 16 | hash_table[2]),
 518					&lp->eth_regs->ethhash1);
 519	}
 520
 521	spin_lock_irqsave(&lp->lock, flags);
 522	writel(recognise, &lp->eth_regs->etharc);
 523	spin_unlock_irqrestore(&lp->lock, flags);
 524}
 525
 526static void korina_tx(struct net_device *dev)
 527{
 528	struct korina_private *lp = netdev_priv(dev);
 529	struct dma_desc *td = &lp->td_ring[lp->tx_next_done];
 530	u32 devcs;
 531	u32 dmas;
 532
 533	spin_lock(&lp->lock);
 534
 535	/* Process all desc that are done */
 536	while (IS_DMA_FINISHED(td->control)) {
 537		if (lp->tx_full == 1) {
 538			netif_wake_queue(dev);
 539			lp->tx_full = 0;
 540		}
 541
 542		devcs = lp->td_ring[lp->tx_next_done].devcs;
 543		if ((devcs & (ETH_TX_FD | ETH_TX_LD)) !=
 544				(ETH_TX_FD | ETH_TX_LD)) {
 545			dev->stats.tx_errors++;
 546			dev->stats.tx_dropped++;
 547
 548			/* Should never happen */
 549			printk(KERN_ERR "%s: split tx ignored\n",
 550							dev->name);
 551		} else if (devcs & ETH_TX_TOK) {
 552			dev->stats.tx_packets++;
 553			dev->stats.tx_bytes +=
 554					lp->tx_skb[lp->tx_next_done]->len;
 555		} else {
 556			dev->stats.tx_errors++;
 557			dev->stats.tx_dropped++;
 558
 559			/* Underflow */
 560			if (devcs & ETH_TX_UND)
 561				dev->stats.tx_fifo_errors++;
 562
 563			/* Oversized frame */
 564			if (devcs & ETH_TX_OF)
 565				dev->stats.tx_aborted_errors++;
 566
 567			/* Excessive deferrals */
 568			if (devcs & ETH_TX_ED)
 569				dev->stats.tx_carrier_errors++;
 570
 571			/* Collisions: medium busy */
 572			if (devcs & ETH_TX_EC)
 573				dev->stats.collisions++;
 574
 575			/* Late collision */
 576			if (devcs & ETH_TX_LC)
 577				dev->stats.tx_window_errors++;
 578		}
 579
 580		/* We must always free the original skb */
 581		if (lp->tx_skb[lp->tx_next_done]) {
 582			dev_kfree_skb_any(lp->tx_skb[lp->tx_next_done]);
 583			lp->tx_skb[lp->tx_next_done] = NULL;
 584		}
 585
 586		lp->td_ring[lp->tx_next_done].control = DMA_DESC_IOF;
 587		lp->td_ring[lp->tx_next_done].devcs = ETH_TX_FD | ETH_TX_LD;
 588		lp->td_ring[lp->tx_next_done].link = 0;
 589		lp->td_ring[lp->tx_next_done].ca = 0;
 590		lp->tx_count--;
 591
 592		/* Go on to next transmission */
 593		lp->tx_next_done = (lp->tx_next_done + 1) & KORINA_TDS_MASK;
 594		td = &lp->td_ring[lp->tx_next_done];
 595
 596	}
 597
 598	/* Clear the DMA status register */
 599	dmas = readl(&lp->tx_dma_regs->dmas);
 600	writel(~dmas, &lp->tx_dma_regs->dmas);
 601
 602	writel(readl(&lp->tx_dma_regs->dmasm) &
 603			~(DMA_STAT_FINI | DMA_STAT_ERR),
 604			&lp->tx_dma_regs->dmasm);
 605
 606	spin_unlock(&lp->lock);
 607}
 608
 609static irqreturn_t
 610korina_tx_dma_interrupt(int irq, void *dev_id)
 611{
 612	struct net_device *dev = dev_id;
 613	struct korina_private *lp = netdev_priv(dev);
 614	u32 dmas, dmasm;
 615	irqreturn_t retval;
 616
 617	dmas = readl(&lp->tx_dma_regs->dmas);
 618
 619	if (dmas & (DMA_STAT_FINI | DMA_STAT_ERR)) {
 620		dmasm = readl(&lp->tx_dma_regs->dmasm);
 621		writel(dmasm | (DMA_STAT_FINI | DMA_STAT_ERR),
 622				&lp->tx_dma_regs->dmasm);
 623
 624		korina_tx(dev);
 625
 626		if (lp->tx_chain_status == desc_filled &&
 627			(readl(&(lp->tx_dma_regs->dmandptr)) == 0)) {
 628			writel(CPHYSADDR(&lp->td_ring[lp->tx_chain_head]),
 629				&(lp->tx_dma_regs->dmandptr));
 630			lp->tx_chain_status = desc_empty;
 631			lp->tx_chain_head = lp->tx_chain_tail;
 632			dev->trans_start = jiffies;
 633		}
 634		if (dmas & DMA_STAT_ERR)
 635			printk(KERN_ERR "%s: DMA error\n", dev->name);
 636
 637		retval = IRQ_HANDLED;
 638	} else
 639		retval = IRQ_NONE;
 640
 641	return retval;
 642}
 643
 644
 645static void korina_check_media(struct net_device *dev, unsigned int init_media)
 646{
 647	struct korina_private *lp = netdev_priv(dev);
 648
 649	mii_check_media(&lp->mii_if, 0, init_media);
 650
 651	if (lp->mii_if.full_duplex)
 652		writel(readl(&lp->eth_regs->ethmac2) | ETH_MAC2_FD,
 653						&lp->eth_regs->ethmac2);
 654	else
 655		writel(readl(&lp->eth_regs->ethmac2) & ~ETH_MAC2_FD,
 656						&lp->eth_regs->ethmac2);
 657}
 658
 659static void korina_poll_media(unsigned long data)
 660{
 661	struct net_device *dev = (struct net_device *) data;
 662	struct korina_private *lp = netdev_priv(dev);
 663
 664	korina_check_media(dev, 0);
 665	mod_timer(&lp->media_check_timer, jiffies + HZ);
 666}
 667
 668static void korina_set_carrier(struct mii_if_info *mii)
 669{
 670	if (mii->force_media) {
 671		/* autoneg is off: Link is always assumed to be up */
 672		if (!netif_carrier_ok(mii->dev))
 673			netif_carrier_on(mii->dev);
 674	} else  /* Let MMI library update carrier status */
 675		korina_check_media(mii->dev, 0);
 676}
 677
 678static int korina_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
 679{
 680	struct korina_private *lp = netdev_priv(dev);
 681	struct mii_ioctl_data *data = if_mii(rq);
 682	int rc;
 683
 684	if (!netif_running(dev))
 685		return -EINVAL;
 686	spin_lock_irq(&lp->lock);
 687	rc = generic_mii_ioctl(&lp->mii_if, data, cmd, NULL);
 688	spin_unlock_irq(&lp->lock);
 689	korina_set_carrier(&lp->mii_if);
 690
 691	return rc;
 692}
 693
 694/* ethtool helpers */
 695static void netdev_get_drvinfo(struct net_device *dev,
 696			struct ethtool_drvinfo *info)
 697{
 698	struct korina_private *lp = netdev_priv(dev);
 699
 700	strcpy(info->driver, DRV_NAME);
 701	strcpy(info->version, DRV_VERSION);
 702	strcpy(info->bus_info, lp->dev->name);
 703}
 704
 705static int netdev_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
 706{
 707	struct korina_private *lp = netdev_priv(dev);
 708	int rc;
 709
 710	spin_lock_irq(&lp->lock);
 711	rc = mii_ethtool_gset(&lp->mii_if, cmd);
 712	spin_unlock_irq(&lp->lock);
 713
 714	return rc;
 715}
 716
 717static int netdev_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
 718{
 719	struct korina_private *lp = netdev_priv(dev);
 720	int rc;
 721
 722	spin_lock_irq(&lp->lock);
 723	rc = mii_ethtool_sset(&lp->mii_if, cmd);
 724	spin_unlock_irq(&lp->lock);
 725	korina_set_carrier(&lp->mii_if);
 726
 727	return rc;
 728}
 729
 730static u32 netdev_get_link(struct net_device *dev)
 731{
 732	struct korina_private *lp = netdev_priv(dev);
 733
 734	return mii_link_ok(&lp->mii_if);
 735}
 736
 737static const struct ethtool_ops netdev_ethtool_ops = {
 738	.get_drvinfo            = netdev_get_drvinfo,
 739	.get_settings           = netdev_get_settings,
 740	.set_settings           = netdev_set_settings,
 741	.get_link               = netdev_get_link,
 742};
 743
 744static int korina_alloc_ring(struct net_device *dev)
 745{
 746	struct korina_private *lp = netdev_priv(dev);
 747	struct sk_buff *skb;
 748	int i;
 749
 750	/* Initialize the transmit descriptors */
 751	for (i = 0; i < KORINA_NUM_TDS; i++) {
 752		lp->td_ring[i].control = DMA_DESC_IOF;
 753		lp->td_ring[i].devcs = ETH_TX_FD | ETH_TX_LD;
 754		lp->td_ring[i].ca = 0;
 755		lp->td_ring[i].link = 0;
 756	}
 757	lp->tx_next_done = lp->tx_chain_head = lp->tx_chain_tail =
 758			lp->tx_full = lp->tx_count = 0;
 759	lp->tx_chain_status = desc_empty;
 760
 761	/* Initialize the receive descriptors */
 762	for (i = 0; i < KORINA_NUM_RDS; i++) {
 763		skb = netdev_alloc_skb_ip_align(dev, KORINA_RBSIZE);
 764		if (!skb)
 765			return -ENOMEM;
 766		lp->rx_skb[i] = skb;
 767		lp->rd_ring[i].control = DMA_DESC_IOD |
 768				DMA_COUNT(KORINA_RBSIZE);
 769		lp->rd_ring[i].devcs = 0;
 770		lp->rd_ring[i].ca = CPHYSADDR(skb->data);
 771		lp->rd_ring[i].link = CPHYSADDR(&lp->rd_ring[i+1]);
 772	}
 773
 774	/* loop back receive descriptors, so the last
 775	 * descriptor points to the first one */
 776	lp->rd_ring[i - 1].link = CPHYSADDR(&lp->rd_ring[0]);
 777	lp->rd_ring[i - 1].control |= DMA_DESC_COD;
 778
 779	lp->rx_next_done  = 0;
 780	lp->rx_chain_head = 0;
 781	lp->rx_chain_tail = 0;
 782	lp->rx_chain_status = desc_empty;
 783
 784	return 0;
 785}
 786
 787static void korina_free_ring(struct net_device *dev)
 788{
 789	struct korina_private *lp = netdev_priv(dev);
 790	int i;
 791
 792	for (i = 0; i < KORINA_NUM_RDS; i++) {
 793		lp->rd_ring[i].control = 0;
 794		if (lp->rx_skb[i])
 795			dev_kfree_skb_any(lp->rx_skb[i]);
 796		lp->rx_skb[i] = NULL;
 797	}
 798
 799	for (i = 0; i < KORINA_NUM_TDS; i++) {
 800		lp->td_ring[i].control = 0;
 801		if (lp->tx_skb[i])
 802			dev_kfree_skb_any(lp->tx_skb[i]);
 803		lp->tx_skb[i] = NULL;
 804	}
 805}
 806
 807/*
 808 * Initialize the RC32434 ethernet controller.
 809 */
 810static int korina_init(struct net_device *dev)
 811{
 812	struct korina_private *lp = netdev_priv(dev);
 813
 814	/* Disable DMA */
 815	korina_abort_tx(dev);
 816	korina_abort_rx(dev);
 817
 818	/* reset ethernet logic */
 819	writel(0, &lp->eth_regs->ethintfc);
 820	while ((readl(&lp->eth_regs->ethintfc) & ETH_INT_FC_RIP))
 821		dev->trans_start = jiffies;
 822
 823	/* Enable Ethernet Interface */
 824	writel(ETH_INT_FC_EN, &lp->eth_regs->ethintfc);
 825
 826	/* Allocate rings */
 827	if (korina_alloc_ring(dev)) {
 828		printk(KERN_ERR "%s: descriptor allocation failed\n", dev->name);
 829		korina_free_ring(dev);
 830		return -ENOMEM;
 831	}
 832
 833	writel(0, &lp->rx_dma_regs->dmas);
 834	/* Start Rx DMA */
 835	korina_start_rx(lp, &lp->rd_ring[0]);
 836
 837	writel(readl(&lp->tx_dma_regs->dmasm) &
 838			~(DMA_STAT_FINI | DMA_STAT_ERR),
 839			&lp->tx_dma_regs->dmasm);
 840	writel(readl(&lp->rx_dma_regs->dmasm) &
 841			~(DMA_STAT_DONE | DMA_STAT_HALT | DMA_STAT_ERR),
 842			&lp->rx_dma_regs->dmasm);
 843
 844	/* Accept only packets destined for this Ethernet device address */
 845	writel(ETH_ARC_AB, &lp->eth_regs->etharc);
 846
 847	/* Set all Ether station address registers to their initial values */
 848	writel(STATION_ADDRESS_LOW(dev), &lp->eth_regs->ethsal0);
 849	writel(STATION_ADDRESS_HIGH(dev), &lp->eth_regs->ethsah0);
 850
 851	writel(STATION_ADDRESS_LOW(dev), &lp->eth_regs->ethsal1);
 852	writel(STATION_ADDRESS_HIGH(dev), &lp->eth_regs->ethsah1);
 853
 854	writel(STATION_ADDRESS_LOW(dev), &lp->eth_regs->ethsal2);
 855	writel(STATION_ADDRESS_HIGH(dev), &lp->eth_regs->ethsah2);
 856
 857	writel(STATION_ADDRESS_LOW(dev), &lp->eth_regs->ethsal3);
 858	writel(STATION_ADDRESS_HIGH(dev), &lp->eth_regs->ethsah3);
 859
 860
 861	/* Frame Length Checking, Pad Enable, CRC Enable, Full Duplex set */
 862	writel(ETH_MAC2_PE | ETH_MAC2_CEN | ETH_MAC2_FD,
 863			&lp->eth_regs->ethmac2);
 864
 865	/* Back to back inter-packet-gap */
 866	writel(0x15, &lp->eth_regs->ethipgt);
 867	/* Non - Back to back inter-packet-gap */
 868	writel(0x12, &lp->eth_regs->ethipgr);
 869
 870	/* Management Clock Prescaler Divisor
 871	 * Clock independent setting */
 872	writel(((idt_cpu_freq) / MII_CLOCK + 1) & ~1,
 873		       &lp->eth_regs->ethmcp);
 874
 875	/* don't transmit until fifo contains 48b */
 876	writel(48, &lp->eth_regs->ethfifott);
 877
 878	writel(ETH_MAC1_RE, &lp->eth_regs->ethmac1);
 879
 880	napi_enable(&lp->napi);
 881	netif_start_queue(dev);
 882
 883	return 0;
 884}
 885
 886/*
 887 * Restart the RC32434 ethernet controller.
 888 */
 889static void korina_restart_task(struct work_struct *work)
 890{
 891	struct korina_private *lp = container_of(work,
 892			struct korina_private, restart_task);
 893	struct net_device *dev = lp->dev;
 894
 895	/*
 896	 * Disable interrupts
 897	 */
 898	disable_irq(lp->rx_irq);
 899	disable_irq(lp->tx_irq);
 900	disable_irq(lp->ovr_irq);
 901	disable_irq(lp->und_irq);
 902
 903	writel(readl(&lp->tx_dma_regs->dmasm) |
 904				DMA_STAT_FINI | DMA_STAT_ERR,
 905				&lp->tx_dma_regs->dmasm);
 906	writel(readl(&lp->rx_dma_regs->dmasm) |
 907				DMA_STAT_DONE | DMA_STAT_HALT | DMA_STAT_ERR,
 908				&lp->rx_dma_regs->dmasm);
 909
 910	korina_free_ring(dev);
 911
 912	napi_disable(&lp->napi);
 913
 914	if (korina_init(dev) < 0) {
 915		printk(KERN_ERR "%s: cannot restart device\n", dev->name);
 916		return;
 917	}
 918	korina_multicast_list(dev);
 919
 920	enable_irq(lp->und_irq);
 921	enable_irq(lp->ovr_irq);
 922	enable_irq(lp->tx_irq);
 923	enable_irq(lp->rx_irq);
 924}
 925
 926static void korina_clear_and_restart(struct net_device *dev, u32 value)
 927{
 928	struct korina_private *lp = netdev_priv(dev);
 929
 930	netif_stop_queue(dev);
 931	writel(value, &lp->eth_regs->ethintfc);
 932	schedule_work(&lp->restart_task);
 933}
 934
 935/* Ethernet Tx Underflow interrupt */
 936static irqreturn_t korina_und_interrupt(int irq, void *dev_id)
 937{
 938	struct net_device *dev = dev_id;
 939	struct korina_private *lp = netdev_priv(dev);
 940	unsigned int und;
 941
 942	spin_lock(&lp->lock);
 943
 944	und = readl(&lp->eth_regs->ethintfc);
 945
 946	if (und & ETH_INT_FC_UND)
 947		korina_clear_and_restart(dev, und & ~ETH_INT_FC_UND);
 948
 949	spin_unlock(&lp->lock);
 950
 951	return IRQ_HANDLED;
 952}
 953
 954static void korina_tx_timeout(struct net_device *dev)
 955{
 956	struct korina_private *lp = netdev_priv(dev);
 957
 958	schedule_work(&lp->restart_task);
 959}
 960
 961/* Ethernet Rx Overflow interrupt */
 962static irqreturn_t
 963korina_ovr_interrupt(int irq, void *dev_id)
 964{
 965	struct net_device *dev = dev_id;
 966	struct korina_private *lp = netdev_priv(dev);
 967	unsigned int ovr;
 968
 969	spin_lock(&lp->lock);
 970	ovr = readl(&lp->eth_regs->ethintfc);
 971
 972	if (ovr & ETH_INT_FC_OVR)
 973		korina_clear_and_restart(dev, ovr & ~ETH_INT_FC_OVR);
 974
 975	spin_unlock(&lp->lock);
 976
 977	return IRQ_HANDLED;
 978}
 979
 980#ifdef CONFIG_NET_POLL_CONTROLLER
 981static void korina_poll_controller(struct net_device *dev)
 982{
 983	disable_irq(dev->irq);
 984	korina_tx_dma_interrupt(dev->irq, dev);
 985	enable_irq(dev->irq);
 986}
 987#endif
 988
 989static int korina_open(struct net_device *dev)
 990{
 991	struct korina_private *lp = netdev_priv(dev);
 992	int ret;
 993
 994	/* Initialize */
 995	ret = korina_init(dev);
 996	if (ret < 0) {
 997		printk(KERN_ERR "%s: cannot open device\n", dev->name);
 998		goto out;
 999	}
1000
1001	/* Install the interrupt handler
1002	 * that handles the Done Finished
1003	 * Ovr and Und Events */
1004	ret = request_irq(lp->rx_irq, korina_rx_dma_interrupt,
1005			IRQF_DISABLED, "Korina ethernet Rx", dev);
1006	if (ret < 0) {
1007		printk(KERN_ERR "%s: unable to get Rx DMA IRQ %d\n",
1008		    dev->name, lp->rx_irq);
1009		goto err_release;
1010	}
1011	ret = request_irq(lp->tx_irq, korina_tx_dma_interrupt,
1012			IRQF_DISABLED, "Korina ethernet Tx", dev);
1013	if (ret < 0) {
1014		printk(KERN_ERR "%s: unable to get Tx DMA IRQ %d\n",
1015		    dev->name, lp->tx_irq);
1016		goto err_free_rx_irq;
1017	}
1018
1019	/* Install handler for overrun error. */
1020	ret = request_irq(lp->ovr_irq, korina_ovr_interrupt,
1021			IRQF_DISABLED, "Ethernet Overflow", dev);
1022	if (ret < 0) {
1023		printk(KERN_ERR "%s: unable to get OVR IRQ %d\n",
1024		    dev->name, lp->ovr_irq);
1025		goto err_free_tx_irq;
1026	}
1027
1028	/* Install handler for underflow error. */
1029	ret = request_irq(lp->und_irq, korina_und_interrupt,
1030			IRQF_DISABLED, "Ethernet Underflow", dev);
1031	if (ret < 0) {
1032		printk(KERN_ERR "%s: unable to get UND IRQ %d\n",
1033		    dev->name, lp->und_irq);
1034		goto err_free_ovr_irq;
1035	}
1036	mod_timer(&lp->media_check_timer, jiffies + 1);
1037out:
1038	return ret;
1039
1040err_free_ovr_irq:
1041	free_irq(lp->ovr_irq, dev);
1042err_free_tx_irq:
1043	free_irq(lp->tx_irq, dev);
1044err_free_rx_irq:
1045	free_irq(lp->rx_irq, dev);
1046err_release:
1047	korina_free_ring(dev);
1048	goto out;
1049}
1050
1051static int korina_close(struct net_device *dev)
1052{
1053	struct korina_private *lp = netdev_priv(dev);
1054	u32 tmp;
1055
1056	del_timer(&lp->media_check_timer);
1057
1058	/* Disable interrupts */
1059	disable_irq(lp->rx_irq);
1060	disable_irq(lp->tx_irq);
1061	disable_irq(lp->ovr_irq);
1062	disable_irq(lp->und_irq);
1063
1064	korina_abort_tx(dev);
1065	tmp = readl(&lp->tx_dma_regs->dmasm);
1066	tmp = tmp | DMA_STAT_FINI | DMA_STAT_ERR;
1067	writel(tmp, &lp->tx_dma_regs->dmasm);
1068
1069	korina_abort_rx(dev);
1070	tmp = readl(&lp->rx_dma_regs->dmasm);
1071	tmp = tmp | DMA_STAT_DONE | DMA_STAT_HALT | DMA_STAT_ERR;
1072	writel(tmp, &lp->rx_dma_regs->dmasm);
1073
1074	korina_free_ring(dev);
1075
1076	napi_disable(&lp->napi);
1077
1078	cancel_work_sync(&lp->restart_task);
1079
1080	free_irq(lp->rx_irq, dev);
1081	free_irq(lp->tx_irq, dev);
1082	free_irq(lp->ovr_irq, dev);
1083	free_irq(lp->und_irq, dev);
1084
1085	return 0;
1086}
1087
1088static const struct net_device_ops korina_netdev_ops = {
1089	.ndo_open		= korina_open,
1090	.ndo_stop		= korina_close,
1091	.ndo_start_xmit		= korina_send_packet,
1092	.ndo_set_multicast_list	= korina_multicast_list,
1093	.ndo_tx_timeout		= korina_tx_timeout,
1094	.ndo_do_ioctl		= korina_ioctl,
1095	.ndo_change_mtu		= eth_change_mtu,
1096	.ndo_validate_addr	= eth_validate_addr,
1097	.ndo_set_mac_address	= eth_mac_addr,
1098#ifdef CONFIG_NET_POLL_CONTROLLER
1099	.ndo_poll_controller	= korina_poll_controller,
1100#endif
1101};
1102
1103static int korina_probe(struct platform_device *pdev)
1104{
1105	struct korina_device *bif = platform_get_drvdata(pdev);
1106	struct korina_private *lp;
1107	struct net_device *dev;
1108	struct resource *r;
1109	int rc;
1110
1111	dev = alloc_etherdev(sizeof(struct korina_private));
1112	if (!dev) {
1113		printk(KERN_ERR DRV_NAME ": alloc_etherdev failed\n");
1114		return -ENOMEM;
1115	}
1116	SET_NETDEV_DEV(dev, &pdev->dev);
1117	lp = netdev_priv(dev);
1118
1119	bif->dev = dev;
1120	memcpy(dev->dev_addr, bif->mac, 6);
1121
1122	lp->rx_irq = platform_get_irq_byname(pdev, "korina_rx");
1123	lp->tx_irq = platform_get_irq_byname(pdev, "korina_tx");
1124	lp->ovr_irq = platform_get_irq_byname(pdev, "korina_ovr");
1125	lp->und_irq = platform_get_irq_byname(pdev, "korina_und");
1126
1127	r = platform_get_resource_byname(pdev, IORESOURCE_MEM, "korina_regs");
1128	dev->base_addr = r->start;
1129	lp->eth_regs = ioremap_nocache(r->start, resource_size(r));
1130	if (!lp->eth_regs) {
1131		printk(KERN_ERR DRV_NAME ": cannot remap registers\n");
1132		rc = -ENXIO;
1133		goto probe_err_out;
1134	}
1135
1136	r = platform_get_resource_byname(pdev, IORESOURCE_MEM, "korina_dma_rx");
1137	lp->rx_dma_regs = ioremap_nocache(r->start, resource_size(r));
1138	if (!lp->rx_dma_regs) {
1139		printk(KERN_ERR DRV_NAME ": cannot remap Rx DMA registers\n");
1140		rc = -ENXIO;
1141		goto probe_err_dma_rx;
1142	}
1143
1144	r = platform_get_resource_byname(pdev, IORESOURCE_MEM, "korina_dma_tx");
1145	lp->tx_dma_regs = ioremap_nocache(r->start, resource_size(r));
1146	if (!lp->tx_dma_regs) {
1147		printk(KERN_ERR DRV_NAME ": cannot remap Tx DMA registers\n");
1148		rc = -ENXIO;
1149		goto probe_err_dma_tx;
1150	}
1151
1152	lp->td_ring = kmalloc(TD_RING_SIZE + RD_RING_SIZE, GFP_KERNEL);
1153	if (!lp->td_ring) {
1154		printk(KERN_ERR DRV_NAME ": cannot allocate descriptors\n");
1155		rc = -ENXIO;
1156		goto probe_err_td_ring;
1157	}
1158
1159	dma_cache_inv((unsigned long)(lp->td_ring),
1160			TD_RING_SIZE + RD_RING_SIZE);
1161
1162	/* now convert TD_RING pointer to KSEG1 */
1163	lp->td_ring = (struct dma_desc *)KSEG1ADDR(lp->td_ring);
1164	lp->rd_ring = &lp->td_ring[KORINA_NUM_TDS];
1165
1166	spin_lock_init(&lp->lock);
1167	/* just use the rx dma irq */
1168	dev->irq = lp->rx_irq;
1169	lp->dev = dev;
1170
1171	dev->netdev_ops = &korina_netdev_ops;
1172	dev->ethtool_ops = &netdev_ethtool_ops;
1173	dev->watchdog_timeo = TX_TIMEOUT;
1174	netif_napi_add(dev, &lp->napi, korina_poll, 64);
1175
1176	lp->phy_addr = (((lp->rx_irq == 0x2c? 1:0) << 8) | 0x05);
1177	lp->mii_if.dev = dev;
1178	lp->mii_if.mdio_read = mdio_read;
1179	lp->mii_if.mdio_write = mdio_write;
1180	lp->mii_if.phy_id = lp->phy_addr;
1181	lp->mii_if.phy_id_mask = 0x1f;
1182	lp->mii_if.reg_num_mask = 0x1f;
1183
1184	rc = register_netdev(dev);
1185	if (rc < 0) {
1186		printk(KERN_ERR DRV_NAME
1187			": cannot register net device: %d\n", rc);
1188		goto probe_err_register;
1189	}
1190	setup_timer(&lp->media_check_timer, korina_poll_media, (unsigned long) dev);
1191
1192	INIT_WORK(&lp->restart_task, korina_restart_task);
1193
1194	printk(KERN_INFO "%s: " DRV_NAME "-" DRV_VERSION " " DRV_RELDATE "\n",
1195			dev->name);
1196out:
1197	return rc;
1198
1199probe_err_register:
1200	kfree(lp->td_ring);
1201probe_err_td_ring:
1202	iounmap(lp->tx_dma_regs);
1203probe_err_dma_tx:
1204	iounmap(lp->rx_dma_regs);
1205probe_err_dma_rx:
1206	iounmap(lp->eth_regs);
1207probe_err_out:
1208	free_netdev(dev);
1209	goto out;
1210}
1211
1212static int korina_remove(struct platform_device *pdev)
1213{
1214	struct korina_device *bif = platform_get_drvdata(pdev);
1215	struct korina_private *lp = netdev_priv(bif->dev);
1216
1217	iounmap(lp->eth_regs);
1218	iounmap(lp->rx_dma_regs);
1219	iounmap(lp->tx_dma_regs);
1220
1221	platform_set_drvdata(pdev, NULL);
1222	unregister_netdev(bif->dev);
1223	free_netdev(bif->dev);
1224
1225	return 0;
1226}
1227
1228static struct platform_driver korina_driver = {
1229	.driver.name = "korina",
1230	.probe = korina_probe,
1231	.remove = korina_remove,
1232};
1233
1234static int __init korina_init_module(void)
1235{
1236	return platform_driver_register(&korina_driver);
1237}
1238
1239static void korina_cleanup_module(void)
1240{
1241	return platform_driver_unregister(&korina_driver);
1242}
1243
1244module_init(korina_init_module);
1245module_exit(korina_cleanup_module);
1246
1247MODULE_AUTHOR("Philip Rischel <rischelp@idt.com>");
1248MODULE_AUTHOR("Felix Fietkau <nbd@openwrt.org>");
1249MODULE_AUTHOR("Florian Fainelli <florian@openwrt.org>");
1250MODULE_DESCRIPTION("IDT RC32434 (Korina) Ethernet driver");
1251MODULE_LICENSE("GPL");