Linux Audio

Check our new training course

Loading...
Note: File does not exist in v3.1.
   1/*
   2 *  Driver for the IDT RC32434 (Korina) on-chip ethernet controller.
   3 *
   4 *  Copyright 2004 IDT Inc. (rischelp@idt.com)
   5 *  Copyright 2006 Felix Fietkau <nbd@openwrt.org>
   6 *  Copyright 2008 Florian Fainelli <florian@openwrt.org>
   7 *
   8 *  This program is free software; you can redistribute  it and/or modify it
   9 *  under  the terms of  the GNU General  Public License as published by the
  10 *  Free Software Foundation;  either version 2 of the  License, or (at your
  11 *  option) any later version.
  12 *
  13 *  THIS  SOFTWARE  IS PROVIDED   ``AS  IS'' AND   ANY  EXPRESS OR IMPLIED
  14 *  WARRANTIES,   INCLUDING, BUT NOT  LIMITED  TO, THE IMPLIED WARRANTIES OF
  15 *  MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN
  16 *  NO  EVENT  SHALL   THE AUTHOR  BE    LIABLE FOR ANY   DIRECT, INDIRECT,
  17 *  INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
  18 *  NOT LIMITED   TO, PROCUREMENT OF  SUBSTITUTE GOODS  OR SERVICES; LOSS OF
  19 *  USE, DATA,  OR PROFITS; OR  BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
  20 *  ANY THEORY OF LIABILITY, WHETHER IN  CONTRACT, STRICT LIABILITY, OR TORT
  21 *  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
  22 *  THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  23 *
  24 *  You should have received a copy of the  GNU General Public License along
  25 *  with this program; if not, write  to the Free Software Foundation, Inc.,
  26 *  675 Mass Ave, Cambridge, MA 02139, USA.
  27 *
  28 *  Writing to a DMA status register:
  29 *
  30 *  When writing to the status register, you should mask the bit you have
  31 *  been testing the status register with. Both Tx and Rx DMA registers
  32 *  should stick to this procedure.
  33 */
  34
  35#include <linux/module.h>
  36#include <linux/kernel.h>
  37#include <linux/moduleparam.h>
  38#include <linux/sched.h>
  39#include <linux/ctype.h>
  40#include <linux/types.h>
  41#include <linux/interrupt.h>
  42#include <linux/ioport.h>
  43#include <linux/in.h>
  44#include <linux/slab.h>
  45#include <linux/string.h>
  46#include <linux/delay.h>
  47#include <linux/netdevice.h>
  48#include <linux/etherdevice.h>
  49#include <linux/skbuff.h>
  50#include <linux/errno.h>
  51#include <linux/platform_device.h>
  52#include <linux/mii.h>
  53#include <linux/ethtool.h>
  54#include <linux/crc32.h>
  55
  56#include <asm/bootinfo.h>
  57#include <asm/bitops.h>
  58#include <asm/pgtable.h>
  59#include <asm/io.h>
  60#include <asm/dma.h>
  61
  62#include <asm/mach-rc32434/rb.h>
  63#include <asm/mach-rc32434/rc32434.h>
  64#include <asm/mach-rc32434/eth.h>
  65#include <asm/mach-rc32434/dma_v.h>
  66
  67#define DRV_NAME        "korina"
  68#define DRV_VERSION     "0.10"
  69#define DRV_RELDATE     "04Mar2008"
  70
  71#define STATION_ADDRESS_HIGH(dev) (((dev)->dev_addr[0] << 8) | \
  72				   ((dev)->dev_addr[1]))
  73#define STATION_ADDRESS_LOW(dev)  (((dev)->dev_addr[2] << 24) | \
  74				   ((dev)->dev_addr[3] << 16) | \
  75				   ((dev)->dev_addr[4] << 8)  | \
  76				   ((dev)->dev_addr[5]))
  77
  78#define MII_CLOCK 1250000 	/* no more than 2.5MHz */
  79
  80/* the following must be powers of two */
  81#define KORINA_NUM_RDS	64  /* number of receive descriptors */
  82#define KORINA_NUM_TDS	64  /* number of transmit descriptors */
  83
  84/* KORINA_RBSIZE is the hardware's default maximum receive
  85 * frame size in bytes. Having this hardcoded means that there
  86 * is no support for MTU sizes greater than 1500. */
  87#define KORINA_RBSIZE	1536 /* size of one resource buffer = Ether MTU */
  88#define KORINA_RDS_MASK	(KORINA_NUM_RDS - 1)
  89#define KORINA_TDS_MASK	(KORINA_NUM_TDS - 1)
  90#define RD_RING_SIZE 	(KORINA_NUM_RDS * sizeof(struct dma_desc))
  91#define TD_RING_SIZE	(KORINA_NUM_TDS * sizeof(struct dma_desc))
  92
  93#define TX_TIMEOUT 	(6000 * HZ / 1000)
  94
  95enum chain_status { desc_filled, desc_empty };
  96#define IS_DMA_FINISHED(X)   (((X) & (DMA_DESC_FINI)) != 0)
  97#define IS_DMA_DONE(X)   (((X) & (DMA_DESC_DONE)) != 0)
  98#define RCVPKT_LENGTH(X)     (((X) & ETH_RX_LEN) >> ETH_RX_LEN_BIT)
  99
 100/* Information that need to be kept for each board. */
 101struct korina_private {
 102	struct eth_regs *eth_regs;
 103	struct dma_reg *rx_dma_regs;
 104	struct dma_reg *tx_dma_regs;
 105	struct dma_desc *td_ring; /* transmit descriptor ring */
 106	struct dma_desc *rd_ring; /* receive descriptor ring  */
 107
 108	struct sk_buff *tx_skb[KORINA_NUM_TDS];
 109	struct sk_buff *rx_skb[KORINA_NUM_RDS];
 110
 111	int rx_next_done;
 112	int rx_chain_head;
 113	int rx_chain_tail;
 114	enum chain_status rx_chain_status;
 115
 116	int tx_next_done;
 117	int tx_chain_head;
 118	int tx_chain_tail;
 119	enum chain_status tx_chain_status;
 120	int tx_count;
 121	int tx_full;
 122
 123	int rx_irq;
 124	int tx_irq;
 125	int ovr_irq;
 126	int und_irq;
 127
 128	spinlock_t lock;        /* NIC xmit lock */
 129
 130	int dma_halt_cnt;
 131	int dma_run_cnt;
 132	struct napi_struct napi;
 133	struct timer_list media_check_timer;
 134	struct mii_if_info mii_if;
 135	struct work_struct restart_task;
 136	struct net_device *dev;
 137	int phy_addr;
 138};
 139
 140extern unsigned int idt_cpu_freq;
 141
 142static inline void korina_start_dma(struct dma_reg *ch, u32 dma_addr)
 143{
 144	writel(0, &ch->dmandptr);
 145	writel(dma_addr, &ch->dmadptr);
 146}
 147
 148static inline void korina_abort_dma(struct net_device *dev,
 149					struct dma_reg *ch)
 150{
 151       if (readl(&ch->dmac) & DMA_CHAN_RUN_BIT) {
 152	       writel(0x10, &ch->dmac);
 153
 154	       while (!(readl(&ch->dmas) & DMA_STAT_HALT))
 155		       netif_trans_update(dev);
 156
 157	       writel(0, &ch->dmas);
 158       }
 159
 160       writel(0, &ch->dmadptr);
 161       writel(0, &ch->dmandptr);
 162}
 163
 164static inline void korina_chain_dma(struct dma_reg *ch, u32 dma_addr)
 165{
 166	writel(dma_addr, &ch->dmandptr);
 167}
 168
 169static void korina_abort_tx(struct net_device *dev)
 170{
 171	struct korina_private *lp = netdev_priv(dev);
 172
 173	korina_abort_dma(dev, lp->tx_dma_regs);
 174}
 175
 176static void korina_abort_rx(struct net_device *dev)
 177{
 178	struct korina_private *lp = netdev_priv(dev);
 179
 180	korina_abort_dma(dev, lp->rx_dma_regs);
 181}
 182
 183static void korina_start_rx(struct korina_private *lp,
 184					struct dma_desc *rd)
 185{
 186	korina_start_dma(lp->rx_dma_regs, CPHYSADDR(rd));
 187}
 188
 189static void korina_chain_rx(struct korina_private *lp,
 190					struct dma_desc *rd)
 191{
 192	korina_chain_dma(lp->rx_dma_regs, CPHYSADDR(rd));
 193}
 194
 195/* transmit packet */
 196static int korina_send_packet(struct sk_buff *skb, struct net_device *dev)
 197{
 198	struct korina_private *lp = netdev_priv(dev);
 199	unsigned long flags;
 200	u32 length;
 201	u32 chain_prev, chain_next;
 202	struct dma_desc *td;
 203
 204	spin_lock_irqsave(&lp->lock, flags);
 205
 206	td = &lp->td_ring[lp->tx_chain_tail];
 207
 208	/* stop queue when full, drop pkts if queue already full */
 209	if (lp->tx_count >= (KORINA_NUM_TDS - 2)) {
 210		lp->tx_full = 1;
 211
 212		if (lp->tx_count == (KORINA_NUM_TDS - 2))
 213			netif_stop_queue(dev);
 214		else {
 215			dev->stats.tx_dropped++;
 216			dev_kfree_skb_any(skb);
 217			spin_unlock_irqrestore(&lp->lock, flags);
 218
 219			return NETDEV_TX_BUSY;
 220		}
 221	}
 222
 223	lp->tx_count++;
 224
 225	lp->tx_skb[lp->tx_chain_tail] = skb;
 226
 227	length = skb->len;
 228	dma_cache_wback((u32)skb->data, skb->len);
 229
 230	/* Setup the transmit descriptor. */
 231	dma_cache_inv((u32) td, sizeof(*td));
 232	td->ca = CPHYSADDR(skb->data);
 233	chain_prev = (lp->tx_chain_tail - 1) & KORINA_TDS_MASK;
 234	chain_next = (lp->tx_chain_tail + 1) & KORINA_TDS_MASK;
 235
 236	if (readl(&(lp->tx_dma_regs->dmandptr)) == 0) {
 237		if (lp->tx_chain_status == desc_empty) {
 238			/* Update tail */
 239			td->control = DMA_COUNT(length) |
 240					DMA_DESC_COF | DMA_DESC_IOF;
 241			/* Move tail */
 242			lp->tx_chain_tail = chain_next;
 243			/* Write to NDPTR */
 244			writel(CPHYSADDR(&lp->td_ring[lp->tx_chain_head]),
 245					&lp->tx_dma_regs->dmandptr);
 246			/* Move head to tail */
 247			lp->tx_chain_head = lp->tx_chain_tail;
 248		} else {
 249			/* Update tail */
 250			td->control = DMA_COUNT(length) |
 251					DMA_DESC_COF | DMA_DESC_IOF;
 252			/* Link to prev */
 253			lp->td_ring[chain_prev].control &=
 254					~DMA_DESC_COF;
 255			/* Link to prev */
 256			lp->td_ring[chain_prev].link =  CPHYSADDR(td);
 257			/* Move tail */
 258			lp->tx_chain_tail = chain_next;
 259			/* Write to NDPTR */
 260			writel(CPHYSADDR(&lp->td_ring[lp->tx_chain_head]),
 261					&(lp->tx_dma_regs->dmandptr));
 262			/* Move head to tail */
 263			lp->tx_chain_head = lp->tx_chain_tail;
 264			lp->tx_chain_status = desc_empty;
 265		}
 266	} else {
 267		if (lp->tx_chain_status == desc_empty) {
 268			/* Update tail */
 269			td->control = DMA_COUNT(length) |
 270					DMA_DESC_COF | DMA_DESC_IOF;
 271			/* Move tail */
 272			lp->tx_chain_tail = chain_next;
 273			lp->tx_chain_status = desc_filled;
 274		} else {
 275			/* Update tail */
 276			td->control = DMA_COUNT(length) |
 277					DMA_DESC_COF | DMA_DESC_IOF;
 278			lp->td_ring[chain_prev].control &=
 279					~DMA_DESC_COF;
 280			lp->td_ring[chain_prev].link =  CPHYSADDR(td);
 281			lp->tx_chain_tail = chain_next;
 282		}
 283	}
 284	dma_cache_wback((u32) td, sizeof(*td));
 285
 286	netif_trans_update(dev);
 287	spin_unlock_irqrestore(&lp->lock, flags);
 288
 289	return NETDEV_TX_OK;
 290}
 291
 292static int mdio_read(struct net_device *dev, int mii_id, int reg)
 293{
 294	struct korina_private *lp = netdev_priv(dev);
 295	int ret;
 296
 297	mii_id = ((lp->rx_irq == 0x2c ? 1 : 0) << 8);
 298
 299	writel(0, &lp->eth_regs->miimcfg);
 300	writel(0, &lp->eth_regs->miimcmd);
 301	writel(mii_id | reg, &lp->eth_regs->miimaddr);
 302	writel(ETH_MII_CMD_SCN, &lp->eth_regs->miimcmd);
 303
 304	ret = (int)(readl(&lp->eth_regs->miimrdd));
 305	return ret;
 306}
 307
 308static void mdio_write(struct net_device *dev, int mii_id, int reg, int val)
 309{
 310	struct korina_private *lp = netdev_priv(dev);
 311
 312	mii_id = ((lp->rx_irq == 0x2c ? 1 : 0) << 8);
 313
 314	writel(0, &lp->eth_regs->miimcfg);
 315	writel(1, &lp->eth_regs->miimcmd);
 316	writel(mii_id | reg, &lp->eth_regs->miimaddr);
 317	writel(ETH_MII_CMD_SCN, &lp->eth_regs->miimcmd);
 318	writel(val, &lp->eth_regs->miimwtd);
 319}
 320
 321/* Ethernet Rx DMA interrupt */
 322static irqreturn_t korina_rx_dma_interrupt(int irq, void *dev_id)
 323{
 324	struct net_device *dev = dev_id;
 325	struct korina_private *lp = netdev_priv(dev);
 326	u32 dmas, dmasm;
 327	irqreturn_t retval;
 328
 329	dmas = readl(&lp->rx_dma_regs->dmas);
 330	if (dmas & (DMA_STAT_DONE | DMA_STAT_HALT | DMA_STAT_ERR)) {
 331		dmasm = readl(&lp->rx_dma_regs->dmasm);
 332		writel(dmasm | (DMA_STAT_DONE |
 333				DMA_STAT_HALT | DMA_STAT_ERR),
 334				&lp->rx_dma_regs->dmasm);
 335
 336		napi_schedule(&lp->napi);
 337
 338		if (dmas & DMA_STAT_ERR)
 339			printk(KERN_ERR "%s: DMA error\n", dev->name);
 340
 341		retval = IRQ_HANDLED;
 342	} else
 343		retval = IRQ_NONE;
 344
 345	return retval;
 346}
 347
 348static int korina_rx(struct net_device *dev, int limit)
 349{
 350	struct korina_private *lp = netdev_priv(dev);
 351	struct dma_desc *rd = &lp->rd_ring[lp->rx_next_done];
 352	struct sk_buff *skb, *skb_new;
 353	u8 *pkt_buf;
 354	u32 devcs, pkt_len, dmas;
 355	int count;
 356
 357	dma_cache_inv((u32)rd, sizeof(*rd));
 358
 359	for (count = 0; count < limit; count++) {
 360		skb = lp->rx_skb[lp->rx_next_done];
 361		skb_new = NULL;
 362
 363		devcs = rd->devcs;
 364
 365		if ((KORINA_RBSIZE - (u32)DMA_COUNT(rd->control)) == 0)
 366			break;
 367
 368		/* Update statistics counters */
 369		if (devcs & ETH_RX_CRC)
 370			dev->stats.rx_crc_errors++;
 371		if (devcs & ETH_RX_LOR)
 372			dev->stats.rx_length_errors++;
 373		if (devcs & ETH_RX_LE)
 374			dev->stats.rx_length_errors++;
 375		if (devcs & ETH_RX_OVR)
 376			dev->stats.rx_fifo_errors++;
 377		if (devcs & ETH_RX_CV)
 378			dev->stats.rx_frame_errors++;
 379		if (devcs & ETH_RX_CES)
 380			dev->stats.rx_length_errors++;
 381		if (devcs & ETH_RX_MP)
 382			dev->stats.multicast++;
 383
 384		if ((devcs & ETH_RX_LD) != ETH_RX_LD) {
 385			/* check that this is a whole packet
 386			 * WARNING: DMA_FD bit incorrectly set
 387			 * in Rc32434 (errata ref #077) */
 388			dev->stats.rx_errors++;
 389			dev->stats.rx_dropped++;
 390		} else if ((devcs & ETH_RX_ROK)) {
 391			pkt_len = RCVPKT_LENGTH(devcs);
 392
 393			/* must be the (first and) last
 394			 * descriptor then */
 395			pkt_buf = (u8 *)lp->rx_skb[lp->rx_next_done]->data;
 396
 397			/* invalidate the cache */
 398			dma_cache_inv((unsigned long)pkt_buf, pkt_len - 4);
 399
 400			/* Malloc up new buffer. */
 401			skb_new = netdev_alloc_skb_ip_align(dev, KORINA_RBSIZE);
 402
 403			if (!skb_new)
 404				break;
 405			/* Do not count the CRC */
 406			skb_put(skb, pkt_len - 4);
 407			skb->protocol = eth_type_trans(skb, dev);
 408
 409			/* Pass the packet to upper layers */
 410			netif_receive_skb(skb);
 411			dev->stats.rx_packets++;
 412			dev->stats.rx_bytes += pkt_len;
 413
 414			/* Update the mcast stats */
 415			if (devcs & ETH_RX_MP)
 416				dev->stats.multicast++;
 417
 418			lp->rx_skb[lp->rx_next_done] = skb_new;
 419		}
 420
 421		rd->devcs = 0;
 422
 423		/* Restore descriptor's curr_addr */
 424		if (skb_new)
 425			rd->ca = CPHYSADDR(skb_new->data);
 426		else
 427			rd->ca = CPHYSADDR(skb->data);
 428
 429		rd->control = DMA_COUNT(KORINA_RBSIZE) |
 430			DMA_DESC_COD | DMA_DESC_IOD;
 431		lp->rd_ring[(lp->rx_next_done - 1) &
 432			KORINA_RDS_MASK].control &=
 433			~DMA_DESC_COD;
 434
 435		lp->rx_next_done = (lp->rx_next_done + 1) & KORINA_RDS_MASK;
 436		dma_cache_wback((u32)rd, sizeof(*rd));
 437		rd = &lp->rd_ring[lp->rx_next_done];
 438		writel(~DMA_STAT_DONE, &lp->rx_dma_regs->dmas);
 439	}
 440
 441	dmas = readl(&lp->rx_dma_regs->dmas);
 442
 443	if (dmas & DMA_STAT_HALT) {
 444		writel(~(DMA_STAT_HALT | DMA_STAT_ERR),
 445				&lp->rx_dma_regs->dmas);
 446
 447		lp->dma_halt_cnt++;
 448		rd->devcs = 0;
 449		skb = lp->rx_skb[lp->rx_next_done];
 450		rd->ca = CPHYSADDR(skb->data);
 451		dma_cache_wback((u32)rd, sizeof(*rd));
 452		korina_chain_rx(lp, rd);
 453	}
 454
 455	return count;
 456}
 457
 458static int korina_poll(struct napi_struct *napi, int budget)
 459{
 460	struct korina_private *lp =
 461		container_of(napi, struct korina_private, napi);
 462	struct net_device *dev = lp->dev;
 463	int work_done;
 464
 465	work_done = korina_rx(dev, budget);
 466	if (work_done < budget) {
 467		napi_complete(napi);
 468
 469		writel(readl(&lp->rx_dma_regs->dmasm) &
 470			~(DMA_STAT_DONE | DMA_STAT_HALT | DMA_STAT_ERR),
 471			&lp->rx_dma_regs->dmasm);
 472	}
 473	return work_done;
 474}
 475
 476/*
 477 * Set or clear the multicast filter for this adaptor.
 478 */
 479static void korina_multicast_list(struct net_device *dev)
 480{
 481	struct korina_private *lp = netdev_priv(dev);
 482	unsigned long flags;
 483	struct netdev_hw_addr *ha;
 484	u32 recognise = ETH_ARC_AB;	/* always accept broadcasts */
 485
 486	/* Set promiscuous mode */
 487	if (dev->flags & IFF_PROMISC)
 488		recognise |= ETH_ARC_PRO;
 489
 490	else if ((dev->flags & IFF_ALLMULTI) || (netdev_mc_count(dev) > 4))
 491		/* All multicast and broadcast */
 492		recognise |= ETH_ARC_AM;
 493
 494	/* Build the hash table */
 495	if (netdev_mc_count(dev) > 4) {
 496		u16 hash_table[4] = { 0 };
 497		u32 crc;
 498
 499		netdev_for_each_mc_addr(ha, dev) {
 500			crc = ether_crc_le(6, ha->addr);
 501			crc >>= 26;
 502			hash_table[crc >> 4] |= 1 << (15 - (crc & 0xf));
 503		}
 504		/* Accept filtered multicast */
 505		recognise |= ETH_ARC_AFM;
 506
 507		/* Fill the MAC hash tables with their values */
 508		writel((u32)(hash_table[1] << 16 | hash_table[0]),
 509					&lp->eth_regs->ethhash0);
 510		writel((u32)(hash_table[3] << 16 | hash_table[2]),
 511					&lp->eth_regs->ethhash1);
 512	}
 513
 514	spin_lock_irqsave(&lp->lock, flags);
 515	writel(recognise, &lp->eth_regs->etharc);
 516	spin_unlock_irqrestore(&lp->lock, flags);
 517}
 518
 519static void korina_tx(struct net_device *dev)
 520{
 521	struct korina_private *lp = netdev_priv(dev);
 522	struct dma_desc *td = &lp->td_ring[lp->tx_next_done];
 523	u32 devcs;
 524	u32 dmas;
 525
 526	spin_lock(&lp->lock);
 527
 528	/* Process all desc that are done */
 529	while (IS_DMA_FINISHED(td->control)) {
 530		if (lp->tx_full == 1) {
 531			netif_wake_queue(dev);
 532			lp->tx_full = 0;
 533		}
 534
 535		devcs = lp->td_ring[lp->tx_next_done].devcs;
 536		if ((devcs & (ETH_TX_FD | ETH_TX_LD)) !=
 537				(ETH_TX_FD | ETH_TX_LD)) {
 538			dev->stats.tx_errors++;
 539			dev->stats.tx_dropped++;
 540
 541			/* Should never happen */
 542			printk(KERN_ERR "%s: split tx ignored\n",
 543							dev->name);
 544		} else if (devcs & ETH_TX_TOK) {
 545			dev->stats.tx_packets++;
 546			dev->stats.tx_bytes +=
 547					lp->tx_skb[lp->tx_next_done]->len;
 548		} else {
 549			dev->stats.tx_errors++;
 550			dev->stats.tx_dropped++;
 551
 552			/* Underflow */
 553			if (devcs & ETH_TX_UND)
 554				dev->stats.tx_fifo_errors++;
 555
 556			/* Oversized frame */
 557			if (devcs & ETH_TX_OF)
 558				dev->stats.tx_aborted_errors++;
 559
 560			/* Excessive deferrals */
 561			if (devcs & ETH_TX_ED)
 562				dev->stats.tx_carrier_errors++;
 563
 564			/* Collisions: medium busy */
 565			if (devcs & ETH_TX_EC)
 566				dev->stats.collisions++;
 567
 568			/* Late collision */
 569			if (devcs & ETH_TX_LC)
 570				dev->stats.tx_window_errors++;
 571		}
 572
 573		/* We must always free the original skb */
 574		if (lp->tx_skb[lp->tx_next_done]) {
 575			dev_kfree_skb_any(lp->tx_skb[lp->tx_next_done]);
 576			lp->tx_skb[lp->tx_next_done] = NULL;
 577		}
 578
 579		lp->td_ring[lp->tx_next_done].control = DMA_DESC_IOF;
 580		lp->td_ring[lp->tx_next_done].devcs = ETH_TX_FD | ETH_TX_LD;
 581		lp->td_ring[lp->tx_next_done].link = 0;
 582		lp->td_ring[lp->tx_next_done].ca = 0;
 583		lp->tx_count--;
 584
 585		/* Go on to next transmission */
 586		lp->tx_next_done = (lp->tx_next_done + 1) & KORINA_TDS_MASK;
 587		td = &lp->td_ring[lp->tx_next_done];
 588
 589	}
 590
 591	/* Clear the DMA status register */
 592	dmas = readl(&lp->tx_dma_regs->dmas);
 593	writel(~dmas, &lp->tx_dma_regs->dmas);
 594
 595	writel(readl(&lp->tx_dma_regs->dmasm) &
 596			~(DMA_STAT_FINI | DMA_STAT_ERR),
 597			&lp->tx_dma_regs->dmasm);
 598
 599	spin_unlock(&lp->lock);
 600}
 601
 602static irqreturn_t
 603korina_tx_dma_interrupt(int irq, void *dev_id)
 604{
 605	struct net_device *dev = dev_id;
 606	struct korina_private *lp = netdev_priv(dev);
 607	u32 dmas, dmasm;
 608	irqreturn_t retval;
 609
 610	dmas = readl(&lp->tx_dma_regs->dmas);
 611
 612	if (dmas & (DMA_STAT_FINI | DMA_STAT_ERR)) {
 613		dmasm = readl(&lp->tx_dma_regs->dmasm);
 614		writel(dmasm | (DMA_STAT_FINI | DMA_STAT_ERR),
 615				&lp->tx_dma_regs->dmasm);
 616
 617		korina_tx(dev);
 618
 619		if (lp->tx_chain_status == desc_filled &&
 620			(readl(&(lp->tx_dma_regs->dmandptr)) == 0)) {
 621			writel(CPHYSADDR(&lp->td_ring[lp->tx_chain_head]),
 622				&(lp->tx_dma_regs->dmandptr));
 623			lp->tx_chain_status = desc_empty;
 624			lp->tx_chain_head = lp->tx_chain_tail;
 625			netif_trans_update(dev);
 626		}
 627		if (dmas & DMA_STAT_ERR)
 628			printk(KERN_ERR "%s: DMA error\n", dev->name);
 629
 630		retval = IRQ_HANDLED;
 631	} else
 632		retval = IRQ_NONE;
 633
 634	return retval;
 635}
 636
 637
 638static void korina_check_media(struct net_device *dev, unsigned int init_media)
 639{
 640	struct korina_private *lp = netdev_priv(dev);
 641
 642	mii_check_media(&lp->mii_if, 0, init_media);
 643
 644	if (lp->mii_if.full_duplex)
 645		writel(readl(&lp->eth_regs->ethmac2) | ETH_MAC2_FD,
 646						&lp->eth_regs->ethmac2);
 647	else
 648		writel(readl(&lp->eth_regs->ethmac2) & ~ETH_MAC2_FD,
 649						&lp->eth_regs->ethmac2);
 650}
 651
 652static void korina_poll_media(unsigned long data)
 653{
 654	struct net_device *dev = (struct net_device *) data;
 655	struct korina_private *lp = netdev_priv(dev);
 656
 657	korina_check_media(dev, 0);
 658	mod_timer(&lp->media_check_timer, jiffies + HZ);
 659}
 660
 661static void korina_set_carrier(struct mii_if_info *mii)
 662{
 663	if (mii->force_media) {
 664		/* autoneg is off: Link is always assumed to be up */
 665		if (!netif_carrier_ok(mii->dev))
 666			netif_carrier_on(mii->dev);
 667	} else  /* Let MMI library update carrier status */
 668		korina_check_media(mii->dev, 0);
 669}
 670
 671static int korina_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
 672{
 673	struct korina_private *lp = netdev_priv(dev);
 674	struct mii_ioctl_data *data = if_mii(rq);
 675	int rc;
 676
 677	if (!netif_running(dev))
 678		return -EINVAL;
 679	spin_lock_irq(&lp->lock);
 680	rc = generic_mii_ioctl(&lp->mii_if, data, cmd, NULL);
 681	spin_unlock_irq(&lp->lock);
 682	korina_set_carrier(&lp->mii_if);
 683
 684	return rc;
 685}
 686
 687/* ethtool helpers */
 688static void netdev_get_drvinfo(struct net_device *dev,
 689			struct ethtool_drvinfo *info)
 690{
 691	struct korina_private *lp = netdev_priv(dev);
 692
 693	strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
 694	strlcpy(info->version, DRV_VERSION, sizeof(info->version));
 695	strlcpy(info->bus_info, lp->dev->name, sizeof(info->bus_info));
 696}
 697
 698static int netdev_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
 699{
 700	struct korina_private *lp = netdev_priv(dev);
 701	int rc;
 702
 703	spin_lock_irq(&lp->lock);
 704	rc = mii_ethtool_gset(&lp->mii_if, cmd);
 705	spin_unlock_irq(&lp->lock);
 706
 707	return rc;
 708}
 709
 710static int netdev_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
 711{
 712	struct korina_private *lp = netdev_priv(dev);
 713	int rc;
 714
 715	spin_lock_irq(&lp->lock);
 716	rc = mii_ethtool_sset(&lp->mii_if, cmd);
 717	spin_unlock_irq(&lp->lock);
 718	korina_set_carrier(&lp->mii_if);
 719
 720	return rc;
 721}
 722
 723static u32 netdev_get_link(struct net_device *dev)
 724{
 725	struct korina_private *lp = netdev_priv(dev);
 726
 727	return mii_link_ok(&lp->mii_if);
 728}
 729
 730static const struct ethtool_ops netdev_ethtool_ops = {
 731	.get_drvinfo            = netdev_get_drvinfo,
 732	.get_settings           = netdev_get_settings,
 733	.set_settings           = netdev_set_settings,
 734	.get_link               = netdev_get_link,
 735};
 736
 737static int korina_alloc_ring(struct net_device *dev)
 738{
 739	struct korina_private *lp = netdev_priv(dev);
 740	struct sk_buff *skb;
 741	int i;
 742
 743	/* Initialize the transmit descriptors */
 744	for (i = 0; i < KORINA_NUM_TDS; i++) {
 745		lp->td_ring[i].control = DMA_DESC_IOF;
 746		lp->td_ring[i].devcs = ETH_TX_FD | ETH_TX_LD;
 747		lp->td_ring[i].ca = 0;
 748		lp->td_ring[i].link = 0;
 749	}
 750	lp->tx_next_done = lp->tx_chain_head = lp->tx_chain_tail =
 751			lp->tx_full = lp->tx_count = 0;
 752	lp->tx_chain_status = desc_empty;
 753
 754	/* Initialize the receive descriptors */
 755	for (i = 0; i < KORINA_NUM_RDS; i++) {
 756		skb = netdev_alloc_skb_ip_align(dev, KORINA_RBSIZE);
 757		if (!skb)
 758			return -ENOMEM;
 759		lp->rx_skb[i] = skb;
 760		lp->rd_ring[i].control = DMA_DESC_IOD |
 761				DMA_COUNT(KORINA_RBSIZE);
 762		lp->rd_ring[i].devcs = 0;
 763		lp->rd_ring[i].ca = CPHYSADDR(skb->data);
 764		lp->rd_ring[i].link = CPHYSADDR(&lp->rd_ring[i+1]);
 765	}
 766
 767	/* loop back receive descriptors, so the last
 768	 * descriptor points to the first one */
 769	lp->rd_ring[i - 1].link = CPHYSADDR(&lp->rd_ring[0]);
 770	lp->rd_ring[i - 1].control |= DMA_DESC_COD;
 771
 772	lp->rx_next_done  = 0;
 773	lp->rx_chain_head = 0;
 774	lp->rx_chain_tail = 0;
 775	lp->rx_chain_status = desc_empty;
 776
 777	return 0;
 778}
 779
 780static void korina_free_ring(struct net_device *dev)
 781{
 782	struct korina_private *lp = netdev_priv(dev);
 783	int i;
 784
 785	for (i = 0; i < KORINA_NUM_RDS; i++) {
 786		lp->rd_ring[i].control = 0;
 787		if (lp->rx_skb[i])
 788			dev_kfree_skb_any(lp->rx_skb[i]);
 789		lp->rx_skb[i] = NULL;
 790	}
 791
 792	for (i = 0; i < KORINA_NUM_TDS; i++) {
 793		lp->td_ring[i].control = 0;
 794		if (lp->tx_skb[i])
 795			dev_kfree_skb_any(lp->tx_skb[i]);
 796		lp->tx_skb[i] = NULL;
 797	}
 798}
 799
 800/*
 801 * Initialize the RC32434 ethernet controller.
 802 */
 803static int korina_init(struct net_device *dev)
 804{
 805	struct korina_private *lp = netdev_priv(dev);
 806
 807	/* Disable DMA */
 808	korina_abort_tx(dev);
 809	korina_abort_rx(dev);
 810
 811	/* reset ethernet logic */
 812	writel(0, &lp->eth_regs->ethintfc);
 813	while ((readl(&lp->eth_regs->ethintfc) & ETH_INT_FC_RIP))
 814		netif_trans_update(dev);
 815
 816	/* Enable Ethernet Interface */
 817	writel(ETH_INT_FC_EN, &lp->eth_regs->ethintfc);
 818
 819	/* Allocate rings */
 820	if (korina_alloc_ring(dev)) {
 821		printk(KERN_ERR "%s: descriptor allocation failed\n", dev->name);
 822		korina_free_ring(dev);
 823		return -ENOMEM;
 824	}
 825
 826	writel(0, &lp->rx_dma_regs->dmas);
 827	/* Start Rx DMA */
 828	korina_start_rx(lp, &lp->rd_ring[0]);
 829
 830	writel(readl(&lp->tx_dma_regs->dmasm) &
 831			~(DMA_STAT_FINI | DMA_STAT_ERR),
 832			&lp->tx_dma_regs->dmasm);
 833	writel(readl(&lp->rx_dma_regs->dmasm) &
 834			~(DMA_STAT_DONE | DMA_STAT_HALT | DMA_STAT_ERR),
 835			&lp->rx_dma_regs->dmasm);
 836
 837	/* Accept only packets destined for this Ethernet device address */
 838	writel(ETH_ARC_AB, &lp->eth_regs->etharc);
 839
 840	/* Set all Ether station address registers to their initial values */
 841	writel(STATION_ADDRESS_LOW(dev), &lp->eth_regs->ethsal0);
 842	writel(STATION_ADDRESS_HIGH(dev), &lp->eth_regs->ethsah0);
 843
 844	writel(STATION_ADDRESS_LOW(dev), &lp->eth_regs->ethsal1);
 845	writel(STATION_ADDRESS_HIGH(dev), &lp->eth_regs->ethsah1);
 846
 847	writel(STATION_ADDRESS_LOW(dev), &lp->eth_regs->ethsal2);
 848	writel(STATION_ADDRESS_HIGH(dev), &lp->eth_regs->ethsah2);
 849
 850	writel(STATION_ADDRESS_LOW(dev), &lp->eth_regs->ethsal3);
 851	writel(STATION_ADDRESS_HIGH(dev), &lp->eth_regs->ethsah3);
 852
 853
 854	/* Frame Length Checking, Pad Enable, CRC Enable, Full Duplex set */
 855	writel(ETH_MAC2_PE | ETH_MAC2_CEN | ETH_MAC2_FD,
 856			&lp->eth_regs->ethmac2);
 857
 858	/* Back to back inter-packet-gap */
 859	writel(0x15, &lp->eth_regs->ethipgt);
 860	/* Non - Back to back inter-packet-gap */
 861	writel(0x12, &lp->eth_regs->ethipgr);
 862
 863	/* Management Clock Prescaler Divisor
 864	 * Clock independent setting */
 865	writel(((idt_cpu_freq) / MII_CLOCK + 1) & ~1,
 866		       &lp->eth_regs->ethmcp);
 867
 868	/* don't transmit until fifo contains 48b */
 869	writel(48, &lp->eth_regs->ethfifott);
 870
 871	writel(ETH_MAC1_RE, &lp->eth_regs->ethmac1);
 872
 873	napi_enable(&lp->napi);
 874	netif_start_queue(dev);
 875
 876	return 0;
 877}
 878
 879/*
 880 * Restart the RC32434 ethernet controller.
 881 */
 882static void korina_restart_task(struct work_struct *work)
 883{
 884	struct korina_private *lp = container_of(work,
 885			struct korina_private, restart_task);
 886	struct net_device *dev = lp->dev;
 887
 888	/*
 889	 * Disable interrupts
 890	 */
 891	disable_irq(lp->rx_irq);
 892	disable_irq(lp->tx_irq);
 893	disable_irq(lp->ovr_irq);
 894	disable_irq(lp->und_irq);
 895
 896	writel(readl(&lp->tx_dma_regs->dmasm) |
 897				DMA_STAT_FINI | DMA_STAT_ERR,
 898				&lp->tx_dma_regs->dmasm);
 899	writel(readl(&lp->rx_dma_regs->dmasm) |
 900				DMA_STAT_DONE | DMA_STAT_HALT | DMA_STAT_ERR,
 901				&lp->rx_dma_regs->dmasm);
 902
 903	napi_disable(&lp->napi);
 904
 905	korina_free_ring(dev);
 906
 907	if (korina_init(dev) < 0) {
 908		printk(KERN_ERR "%s: cannot restart device\n", dev->name);
 909		return;
 910	}
 911	korina_multicast_list(dev);
 912
 913	enable_irq(lp->und_irq);
 914	enable_irq(lp->ovr_irq);
 915	enable_irq(lp->tx_irq);
 916	enable_irq(lp->rx_irq);
 917}
 918
 919static void korina_clear_and_restart(struct net_device *dev, u32 value)
 920{
 921	struct korina_private *lp = netdev_priv(dev);
 922
 923	netif_stop_queue(dev);
 924	writel(value, &lp->eth_regs->ethintfc);
 925	schedule_work(&lp->restart_task);
 926}
 927
 928/* Ethernet Tx Underflow interrupt */
 929static irqreturn_t korina_und_interrupt(int irq, void *dev_id)
 930{
 931	struct net_device *dev = dev_id;
 932	struct korina_private *lp = netdev_priv(dev);
 933	unsigned int und;
 934
 935	spin_lock(&lp->lock);
 936
 937	und = readl(&lp->eth_regs->ethintfc);
 938
 939	if (und & ETH_INT_FC_UND)
 940		korina_clear_and_restart(dev, und & ~ETH_INT_FC_UND);
 941
 942	spin_unlock(&lp->lock);
 943
 944	return IRQ_HANDLED;
 945}
 946
 947static void korina_tx_timeout(struct net_device *dev)
 948{
 949	struct korina_private *lp = netdev_priv(dev);
 950
 951	schedule_work(&lp->restart_task);
 952}
 953
 954/* Ethernet Rx Overflow interrupt */
 955static irqreturn_t
 956korina_ovr_interrupt(int irq, void *dev_id)
 957{
 958	struct net_device *dev = dev_id;
 959	struct korina_private *lp = netdev_priv(dev);
 960	unsigned int ovr;
 961
 962	spin_lock(&lp->lock);
 963	ovr = readl(&lp->eth_regs->ethintfc);
 964
 965	if (ovr & ETH_INT_FC_OVR)
 966		korina_clear_and_restart(dev, ovr & ~ETH_INT_FC_OVR);
 967
 968	spin_unlock(&lp->lock);
 969
 970	return IRQ_HANDLED;
 971}
 972
 973#ifdef CONFIG_NET_POLL_CONTROLLER
 974static void korina_poll_controller(struct net_device *dev)
 975{
 976	disable_irq(dev->irq);
 977	korina_tx_dma_interrupt(dev->irq, dev);
 978	enable_irq(dev->irq);
 979}
 980#endif
 981
 982static int korina_open(struct net_device *dev)
 983{
 984	struct korina_private *lp = netdev_priv(dev);
 985	int ret;
 986
 987	/* Initialize */
 988	ret = korina_init(dev);
 989	if (ret < 0) {
 990		printk(KERN_ERR "%s: cannot open device\n", dev->name);
 991		goto out;
 992	}
 993
 994	/* Install the interrupt handler
 995	 * that handles the Done Finished
 996	 * Ovr and Und Events */
 997	ret = request_irq(lp->rx_irq, korina_rx_dma_interrupt,
 998			0, "Korina ethernet Rx", dev);
 999	if (ret < 0) {
1000		printk(KERN_ERR "%s: unable to get Rx DMA IRQ %d\n",
1001		    dev->name, lp->rx_irq);
1002		goto err_release;
1003	}
1004	ret = request_irq(lp->tx_irq, korina_tx_dma_interrupt,
1005			0, "Korina ethernet Tx", dev);
1006	if (ret < 0) {
1007		printk(KERN_ERR "%s: unable to get Tx DMA IRQ %d\n",
1008		    dev->name, lp->tx_irq);
1009		goto err_free_rx_irq;
1010	}
1011
1012	/* Install handler for overrun error. */
1013	ret = request_irq(lp->ovr_irq, korina_ovr_interrupt,
1014			0, "Ethernet Overflow", dev);
1015	if (ret < 0) {
1016		printk(KERN_ERR "%s: unable to get OVR IRQ %d\n",
1017		    dev->name, lp->ovr_irq);
1018		goto err_free_tx_irq;
1019	}
1020
1021	/* Install handler for underflow error. */
1022	ret = request_irq(lp->und_irq, korina_und_interrupt,
1023			0, "Ethernet Underflow", dev);
1024	if (ret < 0) {
1025		printk(KERN_ERR "%s: unable to get UND IRQ %d\n",
1026		    dev->name, lp->und_irq);
1027		goto err_free_ovr_irq;
1028	}
1029	mod_timer(&lp->media_check_timer, jiffies + 1);
1030out:
1031	return ret;
1032
1033err_free_ovr_irq:
1034	free_irq(lp->ovr_irq, dev);
1035err_free_tx_irq:
1036	free_irq(lp->tx_irq, dev);
1037err_free_rx_irq:
1038	free_irq(lp->rx_irq, dev);
1039err_release:
1040	korina_free_ring(dev);
1041	goto out;
1042}
1043
1044static int korina_close(struct net_device *dev)
1045{
1046	struct korina_private *lp = netdev_priv(dev);
1047	u32 tmp;
1048
1049	del_timer(&lp->media_check_timer);
1050
1051	/* Disable interrupts */
1052	disable_irq(lp->rx_irq);
1053	disable_irq(lp->tx_irq);
1054	disable_irq(lp->ovr_irq);
1055	disable_irq(lp->und_irq);
1056
1057	korina_abort_tx(dev);
1058	tmp = readl(&lp->tx_dma_regs->dmasm);
1059	tmp = tmp | DMA_STAT_FINI | DMA_STAT_ERR;
1060	writel(tmp, &lp->tx_dma_regs->dmasm);
1061
1062	korina_abort_rx(dev);
1063	tmp = readl(&lp->rx_dma_regs->dmasm);
1064	tmp = tmp | DMA_STAT_DONE | DMA_STAT_HALT | DMA_STAT_ERR;
1065	writel(tmp, &lp->rx_dma_regs->dmasm);
1066
1067	napi_disable(&lp->napi);
1068
1069	cancel_work_sync(&lp->restart_task);
1070
1071	korina_free_ring(dev);
1072
1073	free_irq(lp->rx_irq, dev);
1074	free_irq(lp->tx_irq, dev);
1075	free_irq(lp->ovr_irq, dev);
1076	free_irq(lp->und_irq, dev);
1077
1078	return 0;
1079}
1080
1081static const struct net_device_ops korina_netdev_ops = {
1082	.ndo_open		= korina_open,
1083	.ndo_stop		= korina_close,
1084	.ndo_start_xmit		= korina_send_packet,
1085	.ndo_set_rx_mode	= korina_multicast_list,
1086	.ndo_tx_timeout		= korina_tx_timeout,
1087	.ndo_do_ioctl		= korina_ioctl,
1088	.ndo_validate_addr	= eth_validate_addr,
1089	.ndo_set_mac_address	= eth_mac_addr,
1090#ifdef CONFIG_NET_POLL_CONTROLLER
1091	.ndo_poll_controller	= korina_poll_controller,
1092#endif
1093};
1094
1095static int korina_probe(struct platform_device *pdev)
1096{
1097	struct korina_device *bif = platform_get_drvdata(pdev);
1098	struct korina_private *lp;
1099	struct net_device *dev;
1100	struct resource *r;
1101	int rc;
1102
1103	dev = alloc_etherdev(sizeof(struct korina_private));
1104	if (!dev)
1105		return -ENOMEM;
1106
1107	SET_NETDEV_DEV(dev, &pdev->dev);
1108	lp = netdev_priv(dev);
1109
1110	bif->dev = dev;
1111	memcpy(dev->dev_addr, bif->mac, ETH_ALEN);
1112
1113	lp->rx_irq = platform_get_irq_byname(pdev, "korina_rx");
1114	lp->tx_irq = platform_get_irq_byname(pdev, "korina_tx");
1115	lp->ovr_irq = platform_get_irq_byname(pdev, "korina_ovr");
1116	lp->und_irq = platform_get_irq_byname(pdev, "korina_und");
1117
1118	r = platform_get_resource_byname(pdev, IORESOURCE_MEM, "korina_regs");
1119	dev->base_addr = r->start;
1120	lp->eth_regs = ioremap_nocache(r->start, resource_size(r));
1121	if (!lp->eth_regs) {
1122		printk(KERN_ERR DRV_NAME ": cannot remap registers\n");
1123		rc = -ENXIO;
1124		goto probe_err_out;
1125	}
1126
1127	r = platform_get_resource_byname(pdev, IORESOURCE_MEM, "korina_dma_rx");
1128	lp->rx_dma_regs = ioremap_nocache(r->start, resource_size(r));
1129	if (!lp->rx_dma_regs) {
1130		printk(KERN_ERR DRV_NAME ": cannot remap Rx DMA registers\n");
1131		rc = -ENXIO;
1132		goto probe_err_dma_rx;
1133	}
1134
1135	r = platform_get_resource_byname(pdev, IORESOURCE_MEM, "korina_dma_tx");
1136	lp->tx_dma_regs = ioremap_nocache(r->start, resource_size(r));
1137	if (!lp->tx_dma_regs) {
1138		printk(KERN_ERR DRV_NAME ": cannot remap Tx DMA registers\n");
1139		rc = -ENXIO;
1140		goto probe_err_dma_tx;
1141	}
1142
1143	lp->td_ring = kmalloc(TD_RING_SIZE + RD_RING_SIZE, GFP_KERNEL);
1144	if (!lp->td_ring) {
1145		rc = -ENXIO;
1146		goto probe_err_td_ring;
1147	}
1148
1149	dma_cache_inv((unsigned long)(lp->td_ring),
1150			TD_RING_SIZE + RD_RING_SIZE);
1151
1152	/* now convert TD_RING pointer to KSEG1 */
1153	lp->td_ring = (struct dma_desc *)KSEG1ADDR(lp->td_ring);
1154	lp->rd_ring = &lp->td_ring[KORINA_NUM_TDS];
1155
1156	spin_lock_init(&lp->lock);
1157	/* just use the rx dma irq */
1158	dev->irq = lp->rx_irq;
1159	lp->dev = dev;
1160
1161	dev->netdev_ops = &korina_netdev_ops;
1162	dev->ethtool_ops = &netdev_ethtool_ops;
1163	dev->watchdog_timeo = TX_TIMEOUT;
1164	netif_napi_add(dev, &lp->napi, korina_poll, 64);
1165
1166	lp->phy_addr = (((lp->rx_irq == 0x2c? 1:0) << 8) | 0x05);
1167	lp->mii_if.dev = dev;
1168	lp->mii_if.mdio_read = mdio_read;
1169	lp->mii_if.mdio_write = mdio_write;
1170	lp->mii_if.phy_id = lp->phy_addr;
1171	lp->mii_if.phy_id_mask = 0x1f;
1172	lp->mii_if.reg_num_mask = 0x1f;
1173
1174	rc = register_netdev(dev);
1175	if (rc < 0) {
1176		printk(KERN_ERR DRV_NAME
1177			": cannot register net device: %d\n", rc);
1178		goto probe_err_register;
1179	}
1180	setup_timer(&lp->media_check_timer, korina_poll_media, (unsigned long) dev);
1181
1182	INIT_WORK(&lp->restart_task, korina_restart_task);
1183
1184	printk(KERN_INFO "%s: " DRV_NAME "-" DRV_VERSION " " DRV_RELDATE "\n",
1185			dev->name);
1186out:
1187	return rc;
1188
1189probe_err_register:
1190	kfree(lp->td_ring);
1191probe_err_td_ring:
1192	iounmap(lp->tx_dma_regs);
1193probe_err_dma_tx:
1194	iounmap(lp->rx_dma_regs);
1195probe_err_dma_rx:
1196	iounmap(lp->eth_regs);
1197probe_err_out:
1198	free_netdev(dev);
1199	goto out;
1200}
1201
1202static int korina_remove(struct platform_device *pdev)
1203{
1204	struct korina_device *bif = platform_get_drvdata(pdev);
1205	struct korina_private *lp = netdev_priv(bif->dev);
1206
1207	iounmap(lp->eth_regs);
1208	iounmap(lp->rx_dma_regs);
1209	iounmap(lp->tx_dma_regs);
1210
1211	unregister_netdev(bif->dev);
1212	free_netdev(bif->dev);
1213
1214	return 0;
1215}
1216
1217static struct platform_driver korina_driver = {
1218	.driver.name = "korina",
1219	.probe = korina_probe,
1220	.remove = korina_remove,
1221};
1222
1223module_platform_driver(korina_driver);
1224
1225MODULE_AUTHOR("Philip Rischel <rischelp@idt.com>");
1226MODULE_AUTHOR("Felix Fietkau <nbd@openwrt.org>");
1227MODULE_AUTHOR("Florian Fainelli <florian@openwrt.org>");
1228MODULE_DESCRIPTION("IDT RC32434 (Korina) Ethernet driver");
1229MODULE_LICENSE("GPL");