Linux Audio

Check our new training course

Loading...
v6.8
   1/*
   2 *  Driver for the IDT RC32434 (Korina) on-chip ethernet controller.
   3 *
   4 *  Copyright 2004 IDT Inc. (rischelp@idt.com)
   5 *  Copyright 2006 Felix Fietkau <nbd@openwrt.org>
   6 *  Copyright 2008 Florian Fainelli <florian@openwrt.org>
   7 *  Copyright 2017 Roman Yeryomin <roman@advem.lv>
   8 *
   9 *  This program is free software; you can redistribute  it and/or modify it
  10 *  under  the terms of  the GNU General  Public License as published by the
  11 *  Free Software Foundation;  either version 2 of the  License, or (at your
  12 *  option) any later version.
  13 *
  14 *  THIS  SOFTWARE  IS PROVIDED   ``AS  IS'' AND   ANY  EXPRESS OR IMPLIED
  15 *  WARRANTIES,   INCLUDING, BUT NOT  LIMITED  TO, THE IMPLIED WARRANTIES OF
  16 *  MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN
  17 *  NO  EVENT  SHALL   THE AUTHOR  BE    LIABLE FOR ANY   DIRECT, INDIRECT,
  18 *  INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
  19 *  NOT LIMITED   TO, PROCUREMENT OF  SUBSTITUTE GOODS  OR SERVICES; LOSS OF
  20 *  USE, DATA,  OR PROFITS; OR  BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
  21 *  ANY THEORY OF LIABILITY, WHETHER IN  CONTRACT, STRICT LIABILITY, OR TORT
  22 *  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
  23 *  THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  24 *
  25 *  You should have received a copy of the  GNU General Public License along
  26 *  with this program; if not, write  to the Free Software Foundation, Inc.,
  27 *  675 Mass Ave, Cambridge, MA 02139, USA.
  28 *
  29 *  Writing to a DMA status register:
  30 *
  31 *  When writing to the status register, you should mask the bit you have
  32 *  been testing the status register with. Both Tx and Rx DMA registers
  33 *  should stick to this procedure.
  34 */
  35
  36#include <linux/module.h>
  37#include <linux/kernel.h>
  38#include <linux/moduleparam.h>
  39#include <linux/sched.h>
  40#include <linux/ctype.h>
  41#include <linux/types.h>
  42#include <linux/interrupt.h>
  43#include <linux/ioport.h>
  44#include <linux/iopoll.h>
  45#include <linux/in.h>
  46#include <linux/of.h>
  47#include <linux/of_net.h>
  48#include <linux/slab.h>
  49#include <linux/string.h>
  50#include <linux/delay.h>
  51#include <linux/netdevice.h>
  52#include <linux/etherdevice.h>
  53#include <linux/skbuff.h>
  54#include <linux/errno.h>
  55#include <linux/platform_device.h>
  56#include <linux/mii.h>
  57#include <linux/ethtool.h>
  58#include <linux/crc32.h>
  59#include <linux/pgtable.h>
  60#include <linux/clk.h>
  61
  62#define DRV_NAME	"korina"
  63#define DRV_VERSION	"0.20"
  64#define DRV_RELDATE	"15Sep2017"
  65
  66struct eth_regs {
  67	u32 ethintfc;
  68	u32 ethfifott;
  69	u32 etharc;
  70	u32 ethhash0;
  71	u32 ethhash1;
  72	u32 ethu0[4];		/* Reserved. */
  73	u32 ethpfs;
  74	u32 ethmcp;
  75	u32 eth_u1[10];		/* Reserved. */
  76	u32 ethspare;
  77	u32 eth_u2[42];		/* Reserved. */
  78	u32 ethsal0;
  79	u32 ethsah0;
  80	u32 ethsal1;
  81	u32 ethsah1;
  82	u32 ethsal2;
  83	u32 ethsah2;
  84	u32 ethsal3;
  85	u32 ethsah3;
  86	u32 ethrbc;
  87	u32 ethrpc;
  88	u32 ethrupc;
  89	u32 ethrfc;
  90	u32 ethtbc;
  91	u32 ethgpf;
  92	u32 eth_u9[50];		/* Reserved. */
  93	u32 ethmac1;
  94	u32 ethmac2;
  95	u32 ethipgt;
  96	u32 ethipgr;
  97	u32 ethclrt;
  98	u32 ethmaxf;
  99	u32 eth_u10;		/* Reserved. */
 100	u32 ethmtest;
 101	u32 miimcfg;
 102	u32 miimcmd;
 103	u32 miimaddr;
 104	u32 miimwtd;
 105	u32 miimrdd;
 106	u32 miimind;
 107	u32 eth_u11;		/* Reserved. */
 108	u32 eth_u12;		/* Reserved. */
 109	u32 ethcfsa0;
 110	u32 ethcfsa1;
 111	u32 ethcfsa2;
 112};
 113
 114/* Ethernet interrupt registers */
 115#define ETH_INT_FC_EN		BIT(0)
 116#define ETH_INT_FC_ITS		BIT(1)
 117#define ETH_INT_FC_RIP		BIT(2)
 118#define ETH_INT_FC_JAM		BIT(3)
 119#define ETH_INT_FC_OVR		BIT(4)
 120#define ETH_INT_FC_UND		BIT(5)
 121#define ETH_INT_FC_IOC		0x000000c0
 122
 123/* Ethernet FIFO registers */
 124#define ETH_FIFI_TT_TTH_BIT	0
 125#define ETH_FIFO_TT_TTH		0x0000007f
 126
 127/* Ethernet ARC/multicast registers */
 128#define ETH_ARC_PRO		BIT(0)
 129#define ETH_ARC_AM		BIT(1)
 130#define ETH_ARC_AFM		BIT(2)
 131#define ETH_ARC_AB		BIT(3)
 132
 133/* Ethernet SAL registers */
 134#define ETH_SAL_BYTE_5		0x000000ff
 135#define ETH_SAL_BYTE_4		0x0000ff00
 136#define ETH_SAL_BYTE_3		0x00ff0000
 137#define ETH_SAL_BYTE_2		0xff000000
 138
 139/* Ethernet SAH registers */
 140#define ETH_SAH_BYTE1		0x000000ff
 141#define ETH_SAH_BYTE0		0x0000ff00
 142
 143/* Ethernet GPF register */
 144#define ETH_GPF_PTV		0x0000ffff
 145
 146/* Ethernet PFG register */
 147#define ETH_PFS_PFD		BIT(0)
 148
 149/* Ethernet CFSA[0-3] registers */
 150#define ETH_CFSA0_CFSA4		0x000000ff
 151#define ETH_CFSA0_CFSA5		0x0000ff00
 152#define ETH_CFSA1_CFSA2		0x000000ff
 153#define ETH_CFSA1_CFSA3		0x0000ff00
 154#define ETH_CFSA1_CFSA0		0x000000ff
 155#define ETH_CFSA1_CFSA1		0x0000ff00
 156
 157/* Ethernet MAC1 registers */
 158#define ETH_MAC1_RE		BIT(0)
 159#define ETH_MAC1_PAF		BIT(1)
 160#define ETH_MAC1_RFC		BIT(2)
 161#define ETH_MAC1_TFC		BIT(3)
 162#define ETH_MAC1_LB		BIT(4)
 163#define ETH_MAC1_MR		BIT(31)
 164
 165/* Ethernet MAC2 registers */
 166#define ETH_MAC2_FD		BIT(0)
 167#define ETH_MAC2_FLC		BIT(1)
 168#define ETH_MAC2_HFE		BIT(2)
 169#define ETH_MAC2_DC		BIT(3)
 170#define ETH_MAC2_CEN		BIT(4)
 171#define ETH_MAC2_PE		BIT(5)
 172#define ETH_MAC2_VPE		BIT(6)
 173#define ETH_MAC2_APE		BIT(7)
 174#define ETH_MAC2_PPE		BIT(8)
 175#define ETH_MAC2_LPE		BIT(9)
 176#define ETH_MAC2_NB		BIT(12)
 177#define ETH_MAC2_BP		BIT(13)
 178#define ETH_MAC2_ED		BIT(14)
 179
 180/* Ethernet IPGT register */
 181#define ETH_IPGT		0x0000007f
 182
 183/* Ethernet IPGR registers */
 184#define ETH_IPGR_IPGR2		0x0000007f
 185#define ETH_IPGR_IPGR1		0x00007f00
 186
 187/* Ethernet CLRT registers */
 188#define ETH_CLRT_MAX_RET	0x0000000f
 189#define ETH_CLRT_COL_WIN	0x00003f00
 190
 191/* Ethernet MAXF register */
 192#define ETH_MAXF		0x0000ffff
 193
 194/* Ethernet test registers */
 195#define ETH_TEST_REG		BIT(2)
 196#define ETH_MCP_DIV		0x000000ff
 197
 198/* MII registers */
 199#define ETH_MII_CFG_RSVD	0x0000000c
 200#define ETH_MII_CMD_RD		BIT(0)
 201#define ETH_MII_CMD_SCN		BIT(1)
 202#define ETH_MII_REG_ADDR	0x0000001f
 203#define ETH_MII_PHY_ADDR	0x00001f00
 204#define ETH_MII_WTD_DATA	0x0000ffff
 205#define ETH_MII_RDD_DATA	0x0000ffff
 206#define ETH_MII_IND_BSY		BIT(0)
 207#define ETH_MII_IND_SCN		BIT(1)
 208#define ETH_MII_IND_NV		BIT(2)
 209
 210/* Values for the DEVCS field of the Ethernet DMA Rx and Tx descriptors. */
 211#define ETH_RX_FD		BIT(0)
 212#define ETH_RX_LD		BIT(1)
 213#define ETH_RX_ROK		BIT(2)
 214#define ETH_RX_FM		BIT(3)
 215#define ETH_RX_MP		BIT(4)
 216#define ETH_RX_BP		BIT(5)
 217#define ETH_RX_VLT		BIT(6)
 218#define ETH_RX_CF		BIT(7)
 219#define ETH_RX_OVR		BIT(8)
 220#define ETH_RX_CRC		BIT(9)
 221#define ETH_RX_CV		BIT(10)
 222#define ETH_RX_DB		BIT(11)
 223#define ETH_RX_LE		BIT(12)
 224#define ETH_RX_LOR		BIT(13)
 225#define ETH_RX_CES		BIT(14)
 226#define ETH_RX_LEN_BIT		16
 227#define ETH_RX_LEN		0xffff0000
 228
 229#define ETH_TX_FD		BIT(0)
 230#define ETH_TX_LD		BIT(1)
 231#define ETH_TX_OEN		BIT(2)
 232#define ETH_TX_PEN		BIT(3)
 233#define ETH_TX_CEN		BIT(4)
 234#define ETH_TX_HEN		BIT(5)
 235#define ETH_TX_TOK		BIT(6)
 236#define ETH_TX_MP		BIT(7)
 237#define ETH_TX_BP		BIT(8)
 238#define ETH_TX_UND		BIT(9)
 239#define ETH_TX_OF		BIT(10)
 240#define ETH_TX_ED		BIT(11)
 241#define ETH_TX_EC		BIT(12)
 242#define ETH_TX_LC		BIT(13)
 243#define ETH_TX_TD		BIT(14)
 244#define ETH_TX_CRC		BIT(15)
 245#define ETH_TX_LE		BIT(16)
 246#define ETH_TX_CC		0x001E0000
 247
 248/* DMA descriptor (in physical memory). */
 249struct dma_desc {
 250	u32 control;			/* Control. use DMAD_* */
 251	u32 ca;				/* Current Address. */
 252	u32 devcs;			/* Device control and status. */
 253	u32 link;			/* Next descriptor in chain. */
 254};
 255
 256#define DMA_DESC_COUNT_BIT		0
 257#define DMA_DESC_COUNT_MSK		0x0003ffff
 258#define DMA_DESC_DS_BIT			20
 259#define DMA_DESC_DS_MSK			0x00300000
 260
 261#define DMA_DESC_DEV_CMD_BIT		22
 262#define DMA_DESC_DEV_CMD_MSK		0x01c00000
 263
 264/* DMA descriptors interrupts */
 265#define DMA_DESC_COF			BIT(25) /* Chain on finished */
 266#define DMA_DESC_COD			BIT(26) /* Chain on done */
 267#define DMA_DESC_IOF			BIT(27) /* Interrupt on finished */
 268#define DMA_DESC_IOD			BIT(28) /* Interrupt on done */
 269#define DMA_DESC_TERM			BIT(29) /* Terminated */
 270#define DMA_DESC_DONE			BIT(30) /* Done */
 271#define DMA_DESC_FINI			BIT(31) /* Finished */
 272
 273/* DMA register (within Internal Register Map).  */
 274struct dma_reg {
 275	u32 dmac;		/* Control. */
 276	u32 dmas;		/* Status. */
 277	u32 dmasm;		/* Mask. */
 278	u32 dmadptr;		/* Descriptor pointer. */
 279	u32 dmandptr;		/* Next descriptor pointer. */
 280};
 281
 282/* DMA channels specific registers */
 283#define DMA_CHAN_RUN_BIT		BIT(0)
 284#define DMA_CHAN_DONE_BIT		BIT(1)
 285#define DMA_CHAN_MODE_BIT		BIT(2)
 286#define DMA_CHAN_MODE_MSK		0x0000000c
 287#define	 DMA_CHAN_MODE_AUTO		0
 288#define	 DMA_CHAN_MODE_BURST		1
 289#define	 DMA_CHAN_MODE_XFRT		2
 290#define	 DMA_CHAN_MODE_RSVD		3
 291#define DMA_CHAN_ACT_BIT		BIT(4)
 292
 293/* DMA status registers */
 294#define DMA_STAT_FINI			BIT(0)
 295#define DMA_STAT_DONE			BIT(1)
 296#define DMA_STAT_CHAIN			BIT(2)
 297#define DMA_STAT_ERR			BIT(3)
 298#define DMA_STAT_HALT			BIT(4)
 299
 300#define STATION_ADDRESS_HIGH(dev) (((dev)->dev_addr[0] << 8) | \
 301				   ((dev)->dev_addr[1]))
 302#define STATION_ADDRESS_LOW(dev)  (((dev)->dev_addr[2] << 24) | \
 303				   ((dev)->dev_addr[3] << 16) | \
 304				   ((dev)->dev_addr[4] << 8)  | \
 305				   ((dev)->dev_addr[5]))
 306
 307#define MII_CLOCK	1250000 /* no more than 2.5MHz */
 308
 309/* the following must be powers of two */
 310#define KORINA_NUM_RDS	64  /* number of receive descriptors */
 311#define KORINA_NUM_TDS	64  /* number of transmit descriptors */
 312
 313/* KORINA_RBSIZE is the hardware's default maximum receive
 314 * frame size in bytes. Having this hardcoded means that there
 315 * is no support for MTU sizes greater than 1500. */
 316#define KORINA_RBSIZE	1536 /* size of one resource buffer = Ether MTU */
 317#define KORINA_RDS_MASK	(KORINA_NUM_RDS - 1)
 318#define KORINA_TDS_MASK	(KORINA_NUM_TDS - 1)
 319#define RD_RING_SIZE	(KORINA_NUM_RDS * sizeof(struct dma_desc))
 320#define TD_RING_SIZE	(KORINA_NUM_TDS * sizeof(struct dma_desc))
 321
 322#define TX_TIMEOUT	(6000 * HZ / 1000)
 323
 324enum chain_status {
 325	desc_filled,
 326	desc_is_empty
 327};
 328
 329#define DMA_COUNT(count)	((count) & DMA_DESC_COUNT_MSK)
 330#define IS_DMA_FINISHED(X)	(((X) & (DMA_DESC_FINI)) != 0)
 331#define IS_DMA_DONE(X)		(((X) & (DMA_DESC_DONE)) != 0)
 332#define RCVPKT_LENGTH(X)	(((X) & ETH_RX_LEN) >> ETH_RX_LEN_BIT)
 333
 334/* Information that need to be kept for each board. */
 335struct korina_private {
 336	struct eth_regs __iomem *eth_regs;
 337	struct dma_reg __iomem *rx_dma_regs;
 338	struct dma_reg __iomem *tx_dma_regs;
 339	struct dma_desc *td_ring; /* transmit descriptor ring */
 340	struct dma_desc *rd_ring; /* receive descriptor ring  */
 341	dma_addr_t td_dma;
 342	dma_addr_t rd_dma;
 343
 344	struct sk_buff *tx_skb[KORINA_NUM_TDS];
 345	struct sk_buff *rx_skb[KORINA_NUM_RDS];
 346
 347	dma_addr_t rx_skb_dma[KORINA_NUM_RDS];
 348	dma_addr_t tx_skb_dma[KORINA_NUM_TDS];
 349
 350	int rx_next_done;
 351	int rx_chain_head;
 352	int rx_chain_tail;
 353	enum chain_status rx_chain_status;
 354
 355	int tx_next_done;
 356	int tx_chain_head;
 357	int tx_chain_tail;
 358	enum chain_status tx_chain_status;
 359	int tx_count;
 360	int tx_full;
 361
 362	int rx_irq;
 363	int tx_irq;
 
 
 364
 365	spinlock_t lock;	/* NIC xmit lock */
 366
 367	int dma_halt_cnt;
 368	int dma_run_cnt;
 369	struct napi_struct napi;
 370	struct timer_list media_check_timer;
 371	struct mii_if_info mii_if;
 372	struct work_struct restart_task;
 373	struct net_device *dev;
 374	struct device *dmadev;
 375	int mii_clock_freq;
 376};
 377
 378static dma_addr_t korina_tx_dma(struct korina_private *lp, int idx)
 379{
 380	return lp->td_dma + (idx * sizeof(struct dma_desc));
 381}
 382
 383static dma_addr_t korina_rx_dma(struct korina_private *lp, int idx)
 384{
 385	return lp->rd_dma + (idx * sizeof(struct dma_desc));
 
 386}
 387
 388static inline void korina_abort_dma(struct net_device *dev,
 389					struct dma_reg *ch)
 390{
 391	if (readl(&ch->dmac) & DMA_CHAN_RUN_BIT) {
 392		writel(0x10, &ch->dmac);
 393
 394		while (!(readl(&ch->dmas) & DMA_STAT_HALT))
 395			netif_trans_update(dev);
 396
 397		writel(0, &ch->dmas);
 398	}
 399
 400	writel(0, &ch->dmadptr);
 401	writel(0, &ch->dmandptr);
 
 
 
 
 
 402}
 403
 404static void korina_abort_tx(struct net_device *dev)
 405{
 406	struct korina_private *lp = netdev_priv(dev);
 407
 408	korina_abort_dma(dev, lp->tx_dma_regs);
 409}
 410
 411static void korina_abort_rx(struct net_device *dev)
 412{
 413	struct korina_private *lp = netdev_priv(dev);
 414
 415	korina_abort_dma(dev, lp->rx_dma_regs);
 416}
 417
 
 
 
 
 
 
 
 
 
 
 
 
 418/* transmit packet */
 419static netdev_tx_t korina_send_packet(struct sk_buff *skb,
 420				      struct net_device *dev)
 421{
 422	struct korina_private *lp = netdev_priv(dev);
 423	u32 chain_prev, chain_next;
 424	unsigned long flags;
 425	struct dma_desc *td;
 426	dma_addr_t ca;
 427	u32 length;
 428	int idx;
 
 429
 430	spin_lock_irqsave(&lp->lock, flags);
 431
 432	idx = lp->tx_chain_tail;
 433	td = &lp->td_ring[idx];
 434
 435	/* stop queue when full, drop pkts if queue already full */
 436	if (lp->tx_count >= (KORINA_NUM_TDS - 2)) {
 437		lp->tx_full = 1;
 438
 439		if (lp->tx_count == (KORINA_NUM_TDS - 2))
 440			netif_stop_queue(dev);
 441		else
 442			goto drop_packet;
 
 
 
 
 
 443	}
 444
 445	lp->tx_count++;
 446
 447	lp->tx_skb[idx] = skb;
 448
 449	length = skb->len;
 
 450
 451	/* Setup the transmit descriptor. */
 452	ca = dma_map_single(lp->dmadev, skb->data, length, DMA_TO_DEVICE);
 453	if (dma_mapping_error(lp->dmadev, ca))
 454		goto drop_packet;
 455
 456	lp->tx_skb_dma[idx] = ca;
 457	td->ca = ca;
 458
 459	chain_prev = (idx - 1) & KORINA_TDS_MASK;
 460	chain_next = (idx + 1) & KORINA_TDS_MASK;
 461
 462	if (readl(&(lp->tx_dma_regs->dmandptr)) == 0) {
 463		if (lp->tx_chain_status == desc_is_empty) {
 464			/* Update tail */
 465			td->control = DMA_COUNT(length) |
 466					DMA_DESC_COF | DMA_DESC_IOF;
 467			/* Move tail */
 468			lp->tx_chain_tail = chain_next;
 469			/* Write to NDPTR */
 470			writel(korina_tx_dma(lp, lp->tx_chain_head),
 471			       &lp->tx_dma_regs->dmandptr);
 472			/* Move head to tail */
 473			lp->tx_chain_head = lp->tx_chain_tail;
 474		} else {
 475			/* Update tail */
 476			td->control = DMA_COUNT(length) |
 477					DMA_DESC_COF | DMA_DESC_IOF;
 478			/* Link to prev */
 479			lp->td_ring[chain_prev].control &=
 480					~DMA_DESC_COF;
 481			/* Link to prev */
 482			lp->td_ring[chain_prev].link = korina_tx_dma(lp, idx);
 483			/* Move tail */
 484			lp->tx_chain_tail = chain_next;
 485			/* Write to NDPTR */
 486			writel(korina_tx_dma(lp, lp->tx_chain_head),
 487			       &lp->tx_dma_regs->dmandptr);
 488			/* Move head to tail */
 489			lp->tx_chain_head = lp->tx_chain_tail;
 490			lp->tx_chain_status = desc_is_empty;
 491		}
 492	} else {
 493		if (lp->tx_chain_status == desc_is_empty) {
 494			/* Update tail */
 495			td->control = DMA_COUNT(length) |
 496					DMA_DESC_COF | DMA_DESC_IOF;
 497			/* Move tail */
 498			lp->tx_chain_tail = chain_next;
 499			lp->tx_chain_status = desc_filled;
 500		} else {
 501			/* Update tail */
 502			td->control = DMA_COUNT(length) |
 503					DMA_DESC_COF | DMA_DESC_IOF;
 504			lp->td_ring[chain_prev].control &=
 505					~DMA_DESC_COF;
 506			lp->td_ring[chain_prev].link = korina_tx_dma(lp, idx);
 507			lp->tx_chain_tail = chain_next;
 508		}
 509	}
 
 510
 511	netif_trans_update(dev);
 512	spin_unlock_irqrestore(&lp->lock, flags);
 513
 514	return NETDEV_TX_OK;
 515
 516drop_packet:
 517	dev->stats.tx_dropped++;
 518	dev_kfree_skb_any(skb);
 519	spin_unlock_irqrestore(&lp->lock, flags);
 520
 521	return NETDEV_TX_OK;
 522}
 523
 524static int korina_mdio_wait(struct korina_private *lp)
 525{
 526	u32 value;
 527
 528	return readl_poll_timeout_atomic(&lp->eth_regs->miimind,
 529					 value, value & ETH_MII_IND_BSY,
 530					 1, 1000);
 531}
 532
 533static int korina_mdio_read(struct net_device *dev, int phy, int reg)
 534{
 535	struct korina_private *lp = netdev_priv(dev);
 536	int ret;
 537
 538	ret = korina_mdio_wait(lp);
 539	if (ret < 0)
 540		return ret;
 541
 542	writel(phy << 8 | reg, &lp->eth_regs->miimaddr);
 543	writel(1, &lp->eth_regs->miimcmd);
 544
 545	ret = korina_mdio_wait(lp);
 546	if (ret < 0)
 547		return ret;
 548
 549	if (readl(&lp->eth_regs->miimind) & ETH_MII_IND_NV)
 550		return -EINVAL;
 551
 552	ret = readl(&lp->eth_regs->miimrdd);
 553	writel(0, &lp->eth_regs->miimcmd);
 
 
 
 
 554	return ret;
 555}
 556
 557static void korina_mdio_write(struct net_device *dev, int phy, int reg, int val)
 558{
 559	struct korina_private *lp = netdev_priv(dev);
 560
 561	if (korina_mdio_wait(lp))
 562		return;
 563
 564	writel(0, &lp->eth_regs->miimcmd);
 565	writel(phy << 8 | reg, &lp->eth_regs->miimaddr);
 
 
 566	writel(val, &lp->eth_regs->miimwtd);
 567}
 568
 569/* Ethernet Rx DMA interrupt */
 570static irqreturn_t korina_rx_dma_interrupt(int irq, void *dev_id)
 571{
 572	struct net_device *dev = dev_id;
 573	struct korina_private *lp = netdev_priv(dev);
 574	u32 dmas, dmasm;
 575	irqreturn_t retval;
 576
 577	dmas = readl(&lp->rx_dma_regs->dmas);
 578	if (dmas & (DMA_STAT_DONE | DMA_STAT_HALT | DMA_STAT_ERR)) {
 579		dmasm = readl(&lp->rx_dma_regs->dmasm);
 580		writel(dmasm | (DMA_STAT_DONE |
 581				DMA_STAT_HALT | DMA_STAT_ERR),
 582				&lp->rx_dma_regs->dmasm);
 583
 584		napi_schedule(&lp->napi);
 585
 586		if (dmas & DMA_STAT_ERR)
 587			printk(KERN_ERR "%s: DMA error\n", dev->name);
 588
 589		retval = IRQ_HANDLED;
 590	} else
 591		retval = IRQ_NONE;
 592
 593	return retval;
 594}
 595
 596static int korina_rx(struct net_device *dev, int limit)
 597{
 598	struct korina_private *lp = netdev_priv(dev);
 599	struct dma_desc *rd = &lp->rd_ring[lp->rx_next_done];
 600	struct sk_buff *skb, *skb_new;
 
 601	u32 devcs, pkt_len, dmas;
 602	dma_addr_t ca;
 603	int count;
 604
 
 
 605	for (count = 0; count < limit; count++) {
 606		skb = lp->rx_skb[lp->rx_next_done];
 607		skb_new = NULL;
 608
 609		devcs = rd->devcs;
 610
 611		if ((KORINA_RBSIZE - (u32)DMA_COUNT(rd->control)) == 0)
 612			break;
 613
 614		/* check that this is a whole packet
 615		 * WARNING: DMA_FD bit incorrectly set
 616		 * in Rc32434 (errata ref #077) */
 617		if (!(devcs & ETH_RX_LD))
 618			goto next;
 
 
 
 
 
 
 
 
 
 
 619
 620		if (!(devcs & ETH_RX_ROK)) {
 621			/* Update statistics counters */
 
 
 622			dev->stats.rx_errors++;
 623			dev->stats.rx_dropped++;
 624			if (devcs & ETH_RX_CRC)
 625				dev->stats.rx_crc_errors++;
 626			if (devcs & ETH_RX_LE)
 627				dev->stats.rx_length_errors++;
 628			if (devcs & ETH_RX_OVR)
 629				dev->stats.rx_fifo_errors++;
 630			if (devcs & ETH_RX_CV)
 631				dev->stats.rx_frame_errors++;
 632			if (devcs & ETH_RX_CES)
 633				dev->stats.rx_frame_errors++;
 634
 635			goto next;
 636		}
 637
 638		/* Malloc up new buffer. */
 639		skb_new = netdev_alloc_skb_ip_align(dev, KORINA_RBSIZE);
 640		if (!skb_new)
 641			break;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 642
 643		ca = dma_map_single(lp->dmadev, skb_new->data, KORINA_RBSIZE,
 644				    DMA_FROM_DEVICE);
 645		if (dma_mapping_error(lp->dmadev, ca)) {
 646			dev_kfree_skb_any(skb_new);
 647			break;
 648		}
 649
 650		pkt_len = RCVPKT_LENGTH(devcs);
 651		dma_unmap_single(lp->dmadev, lp->rx_skb_dma[lp->rx_next_done],
 652				 pkt_len, DMA_FROM_DEVICE);
 653
 654		/* Do not count the CRC */
 655		skb_put(skb, pkt_len - 4);
 656		skb->protocol = eth_type_trans(skb, dev);
 657
 658		/* Pass the packet to upper layers */
 659		napi_gro_receive(&lp->napi, skb);
 660		dev->stats.rx_packets++;
 661		dev->stats.rx_bytes += pkt_len;
 662
 663		/* Update the mcast stats */
 664		if (devcs & ETH_RX_MP)
 665			dev->stats.multicast++;
 666
 667		lp->rx_skb[lp->rx_next_done] = skb_new;
 668		lp->rx_skb_dma[lp->rx_next_done] = ca;
 669
 670next:
 671		rd->devcs = 0;
 672
 673		/* Restore descriptor's curr_addr */
 674		rd->ca = lp->rx_skb_dma[lp->rx_next_done];
 
 
 
 675
 676		rd->control = DMA_COUNT(KORINA_RBSIZE) |
 677			DMA_DESC_COD | DMA_DESC_IOD;
 678		lp->rd_ring[(lp->rx_next_done - 1) &
 679			KORINA_RDS_MASK].control &=
 680			~DMA_DESC_COD;
 681
 682		lp->rx_next_done = (lp->rx_next_done + 1) & KORINA_RDS_MASK;
 
 683		rd = &lp->rd_ring[lp->rx_next_done];
 684		writel((u32)~DMA_STAT_DONE, &lp->rx_dma_regs->dmas);
 685	}
 686
 687	dmas = readl(&lp->rx_dma_regs->dmas);
 688
 689	if (dmas & DMA_STAT_HALT) {
 690		writel((u32)~(DMA_STAT_HALT | DMA_STAT_ERR),
 691		       &lp->rx_dma_regs->dmas);
 692
 693		lp->dma_halt_cnt++;
 694		rd->devcs = 0;
 695		rd->ca = lp->rx_skb_dma[lp->rx_next_done];
 696		writel(korina_rx_dma(lp, rd - lp->rd_ring),
 697		       &lp->rx_dma_regs->dmandptr);
 
 698	}
 699
 700	return count;
 701}
 702
 703static int korina_poll(struct napi_struct *napi, int budget)
 704{
 705	struct korina_private *lp =
 706		container_of(napi, struct korina_private, napi);
 707	struct net_device *dev = lp->dev;
 708	int work_done;
 709
 710	work_done = korina_rx(dev, budget);
 711	if (work_done < budget) {
 712		napi_complete_done(napi, work_done);
 713
 714		writel(readl(&lp->rx_dma_regs->dmasm) &
 715			~(DMA_STAT_DONE | DMA_STAT_HALT | DMA_STAT_ERR),
 716			&lp->rx_dma_regs->dmasm);
 717	}
 718	return work_done;
 719}
 720
 721/*
 722 * Set or clear the multicast filter for this adaptor.
 723 */
 724static void korina_multicast_list(struct net_device *dev)
 725{
 726	struct korina_private *lp = netdev_priv(dev);
 727	unsigned long flags;
 728	struct netdev_hw_addr *ha;
 729	u32 recognise = ETH_ARC_AB;	/* always accept broadcasts */
 730
 731	/* Set promiscuous mode */
 732	if (dev->flags & IFF_PROMISC)
 733		recognise |= ETH_ARC_PRO;
 734
 735	else if ((dev->flags & IFF_ALLMULTI) || (netdev_mc_count(dev) > 4))
 736		/* All multicast and broadcast */
 737		recognise |= ETH_ARC_AM;
 738
 739	/* Build the hash table */
 740	if (netdev_mc_count(dev) > 4) {
 741		u16 hash_table[4] = { 0 };
 742		u32 crc;
 743
 744		netdev_for_each_mc_addr(ha, dev) {
 745			crc = ether_crc_le(6, ha->addr);
 746			crc >>= 26;
 747			hash_table[crc >> 4] |= 1 << (15 - (crc & 0xf));
 748		}
 749		/* Accept filtered multicast */
 750		recognise |= ETH_ARC_AFM;
 751
 752		/* Fill the MAC hash tables with their values */
 753		writel((u32)(hash_table[1] << 16 | hash_table[0]),
 754					&lp->eth_regs->ethhash0);
 755		writel((u32)(hash_table[3] << 16 | hash_table[2]),
 756					&lp->eth_regs->ethhash1);
 757	}
 758
 759	spin_lock_irqsave(&lp->lock, flags);
 760	writel(recognise, &lp->eth_regs->etharc);
 761	spin_unlock_irqrestore(&lp->lock, flags);
 762}
 763
 764static void korina_tx(struct net_device *dev)
 765{
 766	struct korina_private *lp = netdev_priv(dev);
 767	struct dma_desc *td = &lp->td_ring[lp->tx_next_done];
 768	u32 devcs;
 769	u32 dmas;
 770
 771	spin_lock(&lp->lock);
 772
 773	/* Process all desc that are done */
 774	while (IS_DMA_FINISHED(td->control)) {
 775		if (lp->tx_full == 1) {
 776			netif_wake_queue(dev);
 777			lp->tx_full = 0;
 778		}
 779
 780		devcs = lp->td_ring[lp->tx_next_done].devcs;
 781		if ((devcs & (ETH_TX_FD | ETH_TX_LD)) !=
 782				(ETH_TX_FD | ETH_TX_LD)) {
 783			dev->stats.tx_errors++;
 784			dev->stats.tx_dropped++;
 785
 786			/* Should never happen */
 787			printk(KERN_ERR "%s: split tx ignored\n",
 788							dev->name);
 789		} else if (devcs & ETH_TX_TOK) {
 790			dev->stats.tx_packets++;
 791			dev->stats.tx_bytes +=
 792					lp->tx_skb[lp->tx_next_done]->len;
 793		} else {
 794			dev->stats.tx_errors++;
 795			dev->stats.tx_dropped++;
 796
 797			/* Underflow */
 798			if (devcs & ETH_TX_UND)
 799				dev->stats.tx_fifo_errors++;
 800
 801			/* Oversized frame */
 802			if (devcs & ETH_TX_OF)
 803				dev->stats.tx_aborted_errors++;
 804
 805			/* Excessive deferrals */
 806			if (devcs & ETH_TX_ED)
 807				dev->stats.tx_carrier_errors++;
 808
 809			/* Collisions: medium busy */
 810			if (devcs & ETH_TX_EC)
 811				dev->stats.collisions++;
 812
 813			/* Late collision */
 814			if (devcs & ETH_TX_LC)
 815				dev->stats.tx_window_errors++;
 816		}
 817
 818		/* We must always free the original skb */
 819		if (lp->tx_skb[lp->tx_next_done]) {
 820			dma_unmap_single(lp->dmadev,
 821					 lp->tx_skb_dma[lp->tx_next_done],
 822					 lp->tx_skb[lp->tx_next_done]->len,
 823					 DMA_TO_DEVICE);
 824			dev_kfree_skb_any(lp->tx_skb[lp->tx_next_done]);
 825			lp->tx_skb[lp->tx_next_done] = NULL;
 826		}
 827
 828		lp->td_ring[lp->tx_next_done].control = DMA_DESC_IOF;
 829		lp->td_ring[lp->tx_next_done].devcs = ETH_TX_FD | ETH_TX_LD;
 830		lp->td_ring[lp->tx_next_done].link = 0;
 831		lp->td_ring[lp->tx_next_done].ca = 0;
 832		lp->tx_count--;
 833
 834		/* Go on to next transmission */
 835		lp->tx_next_done = (lp->tx_next_done + 1) & KORINA_TDS_MASK;
 836		td = &lp->td_ring[lp->tx_next_done];
 837
 838	}
 839
 840	/* Clear the DMA status register */
 841	dmas = readl(&lp->tx_dma_regs->dmas);
 842	writel(~dmas, &lp->tx_dma_regs->dmas);
 843
 844	writel(readl(&lp->tx_dma_regs->dmasm) &
 845			~(DMA_STAT_FINI | DMA_STAT_ERR),
 846			&lp->tx_dma_regs->dmasm);
 847
 848	spin_unlock(&lp->lock);
 849}
 850
 851static irqreturn_t
 852korina_tx_dma_interrupt(int irq, void *dev_id)
 853{
 854	struct net_device *dev = dev_id;
 855	struct korina_private *lp = netdev_priv(dev);
 856	u32 dmas, dmasm;
 857	irqreturn_t retval;
 858
 859	dmas = readl(&lp->tx_dma_regs->dmas);
 860
 861	if (dmas & (DMA_STAT_FINI | DMA_STAT_ERR)) {
 862		dmasm = readl(&lp->tx_dma_regs->dmasm);
 863		writel(dmasm | (DMA_STAT_FINI | DMA_STAT_ERR),
 864				&lp->tx_dma_regs->dmasm);
 865
 866		korina_tx(dev);
 867
 868		if (lp->tx_chain_status == desc_filled &&
 869			(readl(&(lp->tx_dma_regs->dmandptr)) == 0)) {
 870			writel(korina_tx_dma(lp, lp->tx_chain_head),
 871			       &lp->tx_dma_regs->dmandptr);
 872			lp->tx_chain_status = desc_is_empty;
 873			lp->tx_chain_head = lp->tx_chain_tail;
 874			netif_trans_update(dev);
 875		}
 876		if (dmas & DMA_STAT_ERR)
 877			printk(KERN_ERR "%s: DMA error\n", dev->name);
 878
 879		retval = IRQ_HANDLED;
 880	} else
 881		retval = IRQ_NONE;
 882
 883	return retval;
 884}
 885
 886
 887static void korina_check_media(struct net_device *dev, unsigned int init_media)
 888{
 889	struct korina_private *lp = netdev_priv(dev);
 890
 891	mii_check_media(&lp->mii_if, 1, init_media);
 892
 893	if (lp->mii_if.full_duplex)
 894		writel(readl(&lp->eth_regs->ethmac2) | ETH_MAC2_FD,
 895						&lp->eth_regs->ethmac2);
 896	else
 897		writel(readl(&lp->eth_regs->ethmac2) & ~ETH_MAC2_FD,
 898						&lp->eth_regs->ethmac2);
 899}
 900
 901static void korina_poll_media(struct timer_list *t)
 902{
 903	struct korina_private *lp = from_timer(lp, t, media_check_timer);
 904	struct net_device *dev = lp->dev;
 905
 906	korina_check_media(dev, 0);
 907	mod_timer(&lp->media_check_timer, jiffies + HZ);
 908}
 909
 910static void korina_set_carrier(struct mii_if_info *mii)
 911{
 912	if (mii->force_media) {
 913		/* autoneg is off: Link is always assumed to be up */
 914		if (!netif_carrier_ok(mii->dev))
 915			netif_carrier_on(mii->dev);
 916	} else  /* Let MMI library update carrier status */
 917		korina_check_media(mii->dev, 0);
 918}
 919
 920static int korina_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
 921{
 922	struct korina_private *lp = netdev_priv(dev);
 923	struct mii_ioctl_data *data = if_mii(rq);
 924	int rc;
 925
 926	if (!netif_running(dev))
 927		return -EINVAL;
 928	spin_lock_irq(&lp->lock);
 929	rc = generic_mii_ioctl(&lp->mii_if, data, cmd, NULL);
 930	spin_unlock_irq(&lp->lock);
 931	korina_set_carrier(&lp->mii_if);
 932
 933	return rc;
 934}
 935
 936/* ethtool helpers */
 937static void netdev_get_drvinfo(struct net_device *dev,
 938				struct ethtool_drvinfo *info)
 939{
 940	struct korina_private *lp = netdev_priv(dev);
 941
 942	strscpy(info->driver, DRV_NAME, sizeof(info->driver));
 943	strscpy(info->version, DRV_VERSION, sizeof(info->version));
 944	strscpy(info->bus_info, lp->dev->name, sizeof(info->bus_info));
 945}
 946
 947static int netdev_get_link_ksettings(struct net_device *dev,
 948				     struct ethtool_link_ksettings *cmd)
 949{
 950	struct korina_private *lp = netdev_priv(dev);
 
 951
 952	spin_lock_irq(&lp->lock);
 953	mii_ethtool_get_link_ksettings(&lp->mii_if, cmd);
 954	spin_unlock_irq(&lp->lock);
 955
 956	return 0;
 957}
 958
 959static int netdev_set_link_ksettings(struct net_device *dev,
 960				     const struct ethtool_link_ksettings *cmd)
 961{
 962	struct korina_private *lp = netdev_priv(dev);
 963	int rc;
 964
 965	spin_lock_irq(&lp->lock);
 966	rc = mii_ethtool_set_link_ksettings(&lp->mii_if, cmd);
 967	spin_unlock_irq(&lp->lock);
 968	korina_set_carrier(&lp->mii_if);
 969
 970	return rc;
 971}
 972
 973static u32 netdev_get_link(struct net_device *dev)
 974{
 975	struct korina_private *lp = netdev_priv(dev);
 976
 977	return mii_link_ok(&lp->mii_if);
 978}
 979
 980static const struct ethtool_ops netdev_ethtool_ops = {
 981	.get_drvinfo		= netdev_get_drvinfo,
 982	.get_link		= netdev_get_link,
 983	.get_link_ksettings	= netdev_get_link_ksettings,
 984	.set_link_ksettings	= netdev_set_link_ksettings,
 985};
 986
 987static int korina_alloc_ring(struct net_device *dev)
 988{
 989	struct korina_private *lp = netdev_priv(dev);
 990	struct sk_buff *skb;
 991	dma_addr_t ca;
 992	int i;
 993
 994	/* Initialize the transmit descriptors */
 995	for (i = 0; i < KORINA_NUM_TDS; i++) {
 996		lp->td_ring[i].control = DMA_DESC_IOF;
 997		lp->td_ring[i].devcs = ETH_TX_FD | ETH_TX_LD;
 998		lp->td_ring[i].ca = 0;
 999		lp->td_ring[i].link = 0;
1000	}
1001	lp->tx_next_done = lp->tx_chain_head = lp->tx_chain_tail =
1002			lp->tx_full = lp->tx_count = 0;
1003	lp->tx_chain_status = desc_is_empty;
1004
1005	/* Initialize the receive descriptors */
1006	for (i = 0; i < KORINA_NUM_RDS; i++) {
1007		skb = netdev_alloc_skb_ip_align(dev, KORINA_RBSIZE);
1008		if (!skb)
1009			return -ENOMEM;
1010		lp->rx_skb[i] = skb;
1011		lp->rd_ring[i].control = DMA_DESC_IOD |
1012				DMA_COUNT(KORINA_RBSIZE);
1013		lp->rd_ring[i].devcs = 0;
1014		ca = dma_map_single(lp->dmadev, skb->data, KORINA_RBSIZE,
1015				    DMA_FROM_DEVICE);
1016		if (dma_mapping_error(lp->dmadev, ca))
1017			return -ENOMEM;
1018		lp->rd_ring[i].ca = ca;
1019		lp->rx_skb_dma[i] = ca;
1020		lp->rd_ring[i].link = korina_rx_dma(lp, i + 1);
1021	}
1022
1023	/* loop back receive descriptors, so the last
1024	 * descriptor points to the first one */
1025	lp->rd_ring[i - 1].link = lp->rd_dma;
1026	lp->rd_ring[i - 1].control |= DMA_DESC_COD;
1027
1028	lp->rx_next_done  = 0;
1029	lp->rx_chain_head = 0;
1030	lp->rx_chain_tail = 0;
1031	lp->rx_chain_status = desc_is_empty;
1032
1033	return 0;
1034}
1035
1036static void korina_free_ring(struct net_device *dev)
1037{
1038	struct korina_private *lp = netdev_priv(dev);
1039	int i;
1040
1041	for (i = 0; i < KORINA_NUM_RDS; i++) {
1042		lp->rd_ring[i].control = 0;
1043		if (lp->rx_skb[i]) {
1044			dma_unmap_single(lp->dmadev, lp->rx_skb_dma[i],
1045					 KORINA_RBSIZE, DMA_FROM_DEVICE);
1046			dev_kfree_skb_any(lp->rx_skb[i]);
1047			lp->rx_skb[i] = NULL;
1048		}
1049	}
1050
1051	for (i = 0; i < KORINA_NUM_TDS; i++) {
1052		lp->td_ring[i].control = 0;
1053		if (lp->tx_skb[i]) {
1054			dma_unmap_single(lp->dmadev, lp->tx_skb_dma[i],
1055					 lp->tx_skb[i]->len, DMA_TO_DEVICE);
1056			dev_kfree_skb_any(lp->tx_skb[i]);
1057			lp->tx_skb[i] = NULL;
1058		}
1059	}
1060}
1061
1062/*
1063 * Initialize the RC32434 ethernet controller.
1064 */
1065static int korina_init(struct net_device *dev)
1066{
1067	struct korina_private *lp = netdev_priv(dev);
1068
1069	/* Disable DMA */
1070	korina_abort_tx(dev);
1071	korina_abort_rx(dev);
1072
1073	/* reset ethernet logic */
1074	writel(0, &lp->eth_regs->ethintfc);
1075	while ((readl(&lp->eth_regs->ethintfc) & ETH_INT_FC_RIP))
1076		netif_trans_update(dev);
1077
1078	/* Enable Ethernet Interface */
1079	writel(ETH_INT_FC_EN, &lp->eth_regs->ethintfc);
1080
1081	/* Allocate rings */
1082	if (korina_alloc_ring(dev)) {
1083		printk(KERN_ERR "%s: descriptor allocation failed\n", dev->name);
1084		korina_free_ring(dev);
1085		return -ENOMEM;
1086	}
1087
1088	writel(0, &lp->rx_dma_regs->dmas);
1089	/* Start Rx DMA */
1090	writel(0, &lp->rx_dma_regs->dmandptr);
1091	writel(korina_rx_dma(lp, 0), &lp->rx_dma_regs->dmadptr);
1092
1093	writel(readl(&lp->tx_dma_regs->dmasm) &
1094			~(DMA_STAT_FINI | DMA_STAT_ERR),
1095			&lp->tx_dma_regs->dmasm);
1096	writel(readl(&lp->rx_dma_regs->dmasm) &
1097			~(DMA_STAT_DONE | DMA_STAT_HALT | DMA_STAT_ERR),
1098			&lp->rx_dma_regs->dmasm);
1099
1100	/* Accept only packets destined for this Ethernet device address */
1101	writel(ETH_ARC_AB, &lp->eth_regs->etharc);
1102
1103	/* Set all Ether station address registers to their initial values */
1104	writel(STATION_ADDRESS_LOW(dev), &lp->eth_regs->ethsal0);
1105	writel(STATION_ADDRESS_HIGH(dev), &lp->eth_regs->ethsah0);
1106
1107	writel(STATION_ADDRESS_LOW(dev), &lp->eth_regs->ethsal1);
1108	writel(STATION_ADDRESS_HIGH(dev), &lp->eth_regs->ethsah1);
1109
1110	writel(STATION_ADDRESS_LOW(dev), &lp->eth_regs->ethsal2);
1111	writel(STATION_ADDRESS_HIGH(dev), &lp->eth_regs->ethsah2);
1112
1113	writel(STATION_ADDRESS_LOW(dev), &lp->eth_regs->ethsal3);
1114	writel(STATION_ADDRESS_HIGH(dev), &lp->eth_regs->ethsah3);
1115
1116
1117	/* Frame Length Checking, Pad Enable, CRC Enable, Full Duplex set */
1118	writel(ETH_MAC2_PE | ETH_MAC2_CEN | ETH_MAC2_FD,
1119			&lp->eth_regs->ethmac2);
1120
1121	/* Back to back inter-packet-gap */
1122	writel(0x15, &lp->eth_regs->ethipgt);
1123	/* Non - Back to back inter-packet-gap */
1124	writel(0x12, &lp->eth_regs->ethipgr);
1125
1126	/* Management Clock Prescaler Divisor
1127	 * Clock independent setting */
1128	writel(((lp->mii_clock_freq) / MII_CLOCK + 1) & ~1,
1129	       &lp->eth_regs->ethmcp);
1130	writel(0, &lp->eth_regs->miimcfg);
1131
1132	/* don't transmit until fifo contains 48b */
1133	writel(48, &lp->eth_regs->ethfifott);
1134
1135	writel(ETH_MAC1_RE, &lp->eth_regs->ethmac1);
1136
1137	korina_check_media(dev, 1);
1138
1139	napi_enable(&lp->napi);
1140	netif_start_queue(dev);
1141
1142	return 0;
1143}
1144
1145/*
1146 * Restart the RC32434 ethernet controller.
1147 */
1148static void korina_restart_task(struct work_struct *work)
1149{
1150	struct korina_private *lp = container_of(work,
1151			struct korina_private, restart_task);
1152	struct net_device *dev = lp->dev;
1153
1154	/*
1155	 * Disable interrupts
1156	 */
1157	disable_irq(lp->rx_irq);
1158	disable_irq(lp->tx_irq);
 
 
1159
1160	writel(readl(&lp->tx_dma_regs->dmasm) |
1161				DMA_STAT_FINI | DMA_STAT_ERR,
1162				&lp->tx_dma_regs->dmasm);
1163	writel(readl(&lp->rx_dma_regs->dmasm) |
1164				DMA_STAT_DONE | DMA_STAT_HALT | DMA_STAT_ERR,
1165				&lp->rx_dma_regs->dmasm);
1166
1167	napi_disable(&lp->napi);
1168
1169	korina_free_ring(dev);
1170
1171	if (korina_init(dev) < 0) {
1172		printk(KERN_ERR "%s: cannot restart device\n", dev->name);
1173		return;
1174	}
1175	korina_multicast_list(dev);
1176
 
 
1177	enable_irq(lp->tx_irq);
1178	enable_irq(lp->rx_irq);
1179}
1180
1181static void korina_tx_timeout(struct net_device *dev, unsigned int txqueue)
1182{
1183	struct korina_private *lp = netdev_priv(dev);
1184
 
 
1185	schedule_work(&lp->restart_task);
1186}
1187
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1188#ifdef CONFIG_NET_POLL_CONTROLLER
1189static void korina_poll_controller(struct net_device *dev)
1190{
1191	disable_irq(dev->irq);
1192	korina_tx_dma_interrupt(dev->irq, dev);
1193	enable_irq(dev->irq);
1194}
1195#endif
1196
1197static int korina_open(struct net_device *dev)
1198{
1199	struct korina_private *lp = netdev_priv(dev);
1200	int ret;
1201
1202	/* Initialize */
1203	ret = korina_init(dev);
1204	if (ret < 0) {
1205		printk(KERN_ERR "%s: cannot open device\n", dev->name);
1206		goto out;
1207	}
1208
1209	/* Install the interrupt handler
1210	 * that handles the Done Finished */
 
1211	ret = request_irq(lp->rx_irq, korina_rx_dma_interrupt,
1212			0, "Korina ethernet Rx", dev);
1213	if (ret < 0) {
1214		printk(KERN_ERR "%s: unable to get Rx DMA IRQ %d\n",
1215			dev->name, lp->rx_irq);
1216		goto err_release;
1217	}
1218	ret = request_irq(lp->tx_irq, korina_tx_dma_interrupt,
1219			0, "Korina ethernet Tx", dev);
1220	if (ret < 0) {
1221		printk(KERN_ERR "%s: unable to get Tx DMA IRQ %d\n",
1222			dev->name, lp->tx_irq);
1223		goto err_free_rx_irq;
1224	}
1225
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1226	mod_timer(&lp->media_check_timer, jiffies + 1);
1227out:
1228	return ret;
1229
 
 
 
 
1230err_free_rx_irq:
1231	free_irq(lp->rx_irq, dev);
1232err_release:
1233	korina_free_ring(dev);
1234	goto out;
1235}
1236
1237static int korina_close(struct net_device *dev)
1238{
1239	struct korina_private *lp = netdev_priv(dev);
1240	u32 tmp;
1241
1242	del_timer(&lp->media_check_timer);
1243
1244	/* Disable interrupts */
1245	disable_irq(lp->rx_irq);
1246	disable_irq(lp->tx_irq);
 
 
1247
1248	korina_abort_tx(dev);
1249	tmp = readl(&lp->tx_dma_regs->dmasm);
1250	tmp = tmp | DMA_STAT_FINI | DMA_STAT_ERR;
1251	writel(tmp, &lp->tx_dma_regs->dmasm);
1252
1253	korina_abort_rx(dev);
1254	tmp = readl(&lp->rx_dma_regs->dmasm);
1255	tmp = tmp | DMA_STAT_DONE | DMA_STAT_HALT | DMA_STAT_ERR;
1256	writel(tmp, &lp->rx_dma_regs->dmasm);
1257
1258	napi_disable(&lp->napi);
1259
1260	cancel_work_sync(&lp->restart_task);
1261
1262	korina_free_ring(dev);
1263
1264	free_irq(lp->rx_irq, dev);
1265	free_irq(lp->tx_irq, dev);
 
 
1266
1267	return 0;
1268}
1269
1270static const struct net_device_ops korina_netdev_ops = {
1271	.ndo_open		= korina_open,
1272	.ndo_stop		= korina_close,
1273	.ndo_start_xmit		= korina_send_packet,
1274	.ndo_set_rx_mode	= korina_multicast_list,
1275	.ndo_tx_timeout		= korina_tx_timeout,
1276	.ndo_eth_ioctl		= korina_ioctl,
1277	.ndo_validate_addr	= eth_validate_addr,
1278	.ndo_set_mac_address	= eth_mac_addr,
1279#ifdef CONFIG_NET_POLL_CONTROLLER
1280	.ndo_poll_controller	= korina_poll_controller,
1281#endif
1282};
1283
1284static int korina_probe(struct platform_device *pdev)
1285{
1286	u8 *mac_addr = dev_get_platdata(&pdev->dev);
1287	struct korina_private *lp;
1288	struct net_device *dev;
1289	struct clk *clk;
1290	void __iomem *p;
1291	int rc;
1292
1293	dev = devm_alloc_etherdev(&pdev->dev, sizeof(struct korina_private));
1294	if (!dev)
1295		return -ENOMEM;
1296
1297	SET_NETDEV_DEV(dev, &pdev->dev);
1298	lp = netdev_priv(dev);
1299
1300	if (mac_addr)
1301		eth_hw_addr_set(dev, mac_addr);
1302	else if (of_get_ethdev_address(pdev->dev.of_node, dev) < 0)
1303		eth_hw_addr_random(dev);
1304
1305	clk = devm_clk_get_optional_enabled(&pdev->dev, "mdioclk");
1306	if (IS_ERR(clk))
1307		return PTR_ERR(clk);
1308	if (clk) {
1309		lp->mii_clock_freq = clk_get_rate(clk);
1310	} else {
1311		lp->mii_clock_freq = 200000000; /* max possible input clk */
1312	}
1313
1314	lp->rx_irq = platform_get_irq_byname(pdev, "rx");
1315	lp->tx_irq = platform_get_irq_byname(pdev, "tx");
1316
1317	p = devm_platform_ioremap_resource_byname(pdev, "emac");
1318	if (IS_ERR(p)) {
 
 
 
 
 
 
 
1319		printk(KERN_ERR DRV_NAME ": cannot remap registers\n");
1320		return PTR_ERR(p);
 
1321	}
1322	lp->eth_regs = p;
1323
1324	p = devm_platform_ioremap_resource_byname(pdev, "dma_rx");
1325	if (IS_ERR(p)) {
 
1326		printk(KERN_ERR DRV_NAME ": cannot remap Rx DMA registers\n");
1327		return PTR_ERR(p);
 
1328	}
1329	lp->rx_dma_regs = p;
1330
1331	p = devm_platform_ioremap_resource_byname(pdev, "dma_tx");
1332	if (IS_ERR(p)) {
 
1333		printk(KERN_ERR DRV_NAME ": cannot remap Tx DMA registers\n");
1334		return PTR_ERR(p);
 
1335	}
1336	lp->tx_dma_regs = p;
1337
1338	lp->td_ring = dmam_alloc_coherent(&pdev->dev, TD_RING_SIZE,
1339					  &lp->td_dma, GFP_KERNEL);
1340	if (!lp->td_ring)
1341		return -ENOMEM;
 
1342
1343	lp->rd_ring = dmam_alloc_coherent(&pdev->dev, RD_RING_SIZE,
1344					  &lp->rd_dma, GFP_KERNEL);
1345	if (!lp->rd_ring)
1346		return -ENOMEM;
 
 
1347
1348	spin_lock_init(&lp->lock);
1349	/* just use the rx dma irq */
1350	dev->irq = lp->rx_irq;
1351	lp->dev = dev;
1352	lp->dmadev = &pdev->dev;
1353
1354	dev->netdev_ops = &korina_netdev_ops;
1355	dev->ethtool_ops = &netdev_ethtool_ops;
1356	dev->watchdog_timeo = TX_TIMEOUT;
1357	netif_napi_add(dev, &lp->napi, korina_poll);
1358
 
1359	lp->mii_if.dev = dev;
1360	lp->mii_if.mdio_read = korina_mdio_read;
1361	lp->mii_if.mdio_write = korina_mdio_write;
1362	lp->mii_if.phy_id = 1;
1363	lp->mii_if.phy_id_mask = 0x1f;
1364	lp->mii_if.reg_num_mask = 0x1f;
1365
1366	platform_set_drvdata(pdev, dev);
1367
1368	rc = register_netdev(dev);
1369	if (rc < 0) {
1370		printk(KERN_ERR DRV_NAME
1371			": cannot register net device: %d\n", rc);
1372		return rc;
1373	}
1374	timer_setup(&lp->media_check_timer, korina_poll_media, 0);
1375
1376	INIT_WORK(&lp->restart_task, korina_restart_task);
1377
1378	printk(KERN_INFO "%s: " DRV_NAME "-" DRV_VERSION " " DRV_RELDATE "\n",
1379			dev->name);
 
1380	return rc;
 
 
 
 
 
 
 
 
 
 
 
 
1381}
1382
1383static void korina_remove(struct platform_device *pdev)
1384{
1385	struct net_device *dev = platform_get_drvdata(pdev);
 
1386
1387	unregister_netdev(dev);
1388}
 
1389
1390#ifdef CONFIG_OF
1391static const struct of_device_id korina_match[] = {
1392	{
1393		.compatible = "idt,3243x-emac",
1394	},
1395	{ }
1396};
1397MODULE_DEVICE_TABLE(of, korina_match);
1398#endif
1399
1400static struct platform_driver korina_driver = {
1401	.driver = {
1402		.name = "korina",
1403		.of_match_table = of_match_ptr(korina_match),
1404	},
1405	.probe = korina_probe,
1406	.remove_new = korina_remove,
1407};
1408
1409module_platform_driver(korina_driver);
1410
1411MODULE_AUTHOR("Philip Rischel <rischelp@idt.com>");
1412MODULE_AUTHOR("Felix Fietkau <nbd@openwrt.org>");
1413MODULE_AUTHOR("Florian Fainelli <florian@openwrt.org>");
1414MODULE_AUTHOR("Roman Yeryomin <roman@advem.lv>");
1415MODULE_DESCRIPTION("IDT RC32434 (Korina) Ethernet driver");
1416MODULE_LICENSE("GPL");
v4.10.11
   1/*
   2 *  Driver for the IDT RC32434 (Korina) on-chip ethernet controller.
   3 *
   4 *  Copyright 2004 IDT Inc. (rischelp@idt.com)
   5 *  Copyright 2006 Felix Fietkau <nbd@openwrt.org>
   6 *  Copyright 2008 Florian Fainelli <florian@openwrt.org>
 
   7 *
   8 *  This program is free software; you can redistribute  it and/or modify it
   9 *  under  the terms of  the GNU General  Public License as published by the
  10 *  Free Software Foundation;  either version 2 of the  License, or (at your
  11 *  option) any later version.
  12 *
  13 *  THIS  SOFTWARE  IS PROVIDED   ``AS  IS'' AND   ANY  EXPRESS OR IMPLIED
  14 *  WARRANTIES,   INCLUDING, BUT NOT  LIMITED  TO, THE IMPLIED WARRANTIES OF
  15 *  MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN
  16 *  NO  EVENT  SHALL   THE AUTHOR  BE    LIABLE FOR ANY   DIRECT, INDIRECT,
  17 *  INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
  18 *  NOT LIMITED   TO, PROCUREMENT OF  SUBSTITUTE GOODS  OR SERVICES; LOSS OF
  19 *  USE, DATA,  OR PROFITS; OR  BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
  20 *  ANY THEORY OF LIABILITY, WHETHER IN  CONTRACT, STRICT LIABILITY, OR TORT
  21 *  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
  22 *  THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  23 *
  24 *  You should have received a copy of the  GNU General Public License along
  25 *  with this program; if not, write  to the Free Software Foundation, Inc.,
  26 *  675 Mass Ave, Cambridge, MA 02139, USA.
  27 *
  28 *  Writing to a DMA status register:
  29 *
  30 *  When writing to the status register, you should mask the bit you have
  31 *  been testing the status register with. Both Tx and Rx DMA registers
  32 *  should stick to this procedure.
  33 */
  34
  35#include <linux/module.h>
  36#include <linux/kernel.h>
  37#include <linux/moduleparam.h>
  38#include <linux/sched.h>
  39#include <linux/ctype.h>
  40#include <linux/types.h>
  41#include <linux/interrupt.h>
  42#include <linux/ioport.h>
 
  43#include <linux/in.h>
 
 
  44#include <linux/slab.h>
  45#include <linux/string.h>
  46#include <linux/delay.h>
  47#include <linux/netdevice.h>
  48#include <linux/etherdevice.h>
  49#include <linux/skbuff.h>
  50#include <linux/errno.h>
  51#include <linux/platform_device.h>
  52#include <linux/mii.h>
  53#include <linux/ethtool.h>
  54#include <linux/crc32.h>
 
 
  55
  56#include <asm/bootinfo.h>
  57#include <asm/bitops.h>
  58#include <asm/pgtable.h>
  59#include <asm/io.h>
  60#include <asm/dma.h>
  61
  62#include <asm/mach-rc32434/rb.h>
  63#include <asm/mach-rc32434/rc32434.h>
  64#include <asm/mach-rc32434/eth.h>
  65#include <asm/mach-rc32434/dma_v.h>
  66
  67#define DRV_NAME        "korina"
  68#define DRV_VERSION     "0.10"
  69#define DRV_RELDATE     "04Mar2008"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  70
  71#define STATION_ADDRESS_HIGH(dev) (((dev)->dev_addr[0] << 8) | \
  72				   ((dev)->dev_addr[1]))
  73#define STATION_ADDRESS_LOW(dev)  (((dev)->dev_addr[2] << 24) | \
  74				   ((dev)->dev_addr[3] << 16) | \
  75				   ((dev)->dev_addr[4] << 8)  | \
  76				   ((dev)->dev_addr[5]))
  77
  78#define MII_CLOCK 1250000 	/* no more than 2.5MHz */
  79
  80/* the following must be powers of two */
  81#define KORINA_NUM_RDS	64  /* number of receive descriptors */
  82#define KORINA_NUM_TDS	64  /* number of transmit descriptors */
  83
  84/* KORINA_RBSIZE is the hardware's default maximum receive
  85 * frame size in bytes. Having this hardcoded means that there
  86 * is no support for MTU sizes greater than 1500. */
  87#define KORINA_RBSIZE	1536 /* size of one resource buffer = Ether MTU */
  88#define KORINA_RDS_MASK	(KORINA_NUM_RDS - 1)
  89#define KORINA_TDS_MASK	(KORINA_NUM_TDS - 1)
  90#define RD_RING_SIZE 	(KORINA_NUM_RDS * sizeof(struct dma_desc))
  91#define TD_RING_SIZE	(KORINA_NUM_TDS * sizeof(struct dma_desc))
  92
  93#define TX_TIMEOUT 	(6000 * HZ / 1000)
 
 
 
 
 
  94
  95enum chain_status { desc_filled, desc_empty };
  96#define IS_DMA_FINISHED(X)   (((X) & (DMA_DESC_FINI)) != 0)
  97#define IS_DMA_DONE(X)   (((X) & (DMA_DESC_DONE)) != 0)
  98#define RCVPKT_LENGTH(X)     (((X) & ETH_RX_LEN) >> ETH_RX_LEN_BIT)
  99
 100/* Information that need to be kept for each board. */
 101struct korina_private {
 102	struct eth_regs *eth_regs;
 103	struct dma_reg *rx_dma_regs;
 104	struct dma_reg *tx_dma_regs;
 105	struct dma_desc *td_ring; /* transmit descriptor ring */
 106	struct dma_desc *rd_ring; /* receive descriptor ring  */
 
 
 107
 108	struct sk_buff *tx_skb[KORINA_NUM_TDS];
 109	struct sk_buff *rx_skb[KORINA_NUM_RDS];
 110
 
 
 
 111	int rx_next_done;
 112	int rx_chain_head;
 113	int rx_chain_tail;
 114	enum chain_status rx_chain_status;
 115
 116	int tx_next_done;
 117	int tx_chain_head;
 118	int tx_chain_tail;
 119	enum chain_status tx_chain_status;
 120	int tx_count;
 121	int tx_full;
 122
 123	int rx_irq;
 124	int tx_irq;
 125	int ovr_irq;
 126	int und_irq;
 127
 128	spinlock_t lock;        /* NIC xmit lock */
 129
 130	int dma_halt_cnt;
 131	int dma_run_cnt;
 132	struct napi_struct napi;
 133	struct timer_list media_check_timer;
 134	struct mii_if_info mii_if;
 135	struct work_struct restart_task;
 136	struct net_device *dev;
 137	int phy_addr;
 
 138};
 139
 140extern unsigned int idt_cpu_freq;
 
 
 
 141
 142static inline void korina_start_dma(struct dma_reg *ch, u32 dma_addr)
 143{
 144	writel(0, &ch->dmandptr);
 145	writel(dma_addr, &ch->dmadptr);
 146}
 147
 148static inline void korina_abort_dma(struct net_device *dev,
 149					struct dma_reg *ch)
 150{
 151       if (readl(&ch->dmac) & DMA_CHAN_RUN_BIT) {
 152	       writel(0x10, &ch->dmac);
 153
 154	       while (!(readl(&ch->dmas) & DMA_STAT_HALT))
 155		       netif_trans_update(dev);
 156
 157	       writel(0, &ch->dmas);
 158       }
 159
 160       writel(0, &ch->dmadptr);
 161       writel(0, &ch->dmandptr);
 162}
 163
 164static inline void korina_chain_dma(struct dma_reg *ch, u32 dma_addr)
 165{
 166	writel(dma_addr, &ch->dmandptr);
 167}
 168
 169static void korina_abort_tx(struct net_device *dev)
 170{
 171	struct korina_private *lp = netdev_priv(dev);
 172
 173	korina_abort_dma(dev, lp->tx_dma_regs);
 174}
 175
 176static void korina_abort_rx(struct net_device *dev)
 177{
 178	struct korina_private *lp = netdev_priv(dev);
 179
 180	korina_abort_dma(dev, lp->rx_dma_regs);
 181}
 182
 183static void korina_start_rx(struct korina_private *lp,
 184					struct dma_desc *rd)
 185{
 186	korina_start_dma(lp->rx_dma_regs, CPHYSADDR(rd));
 187}
 188
 189static void korina_chain_rx(struct korina_private *lp,
 190					struct dma_desc *rd)
 191{
 192	korina_chain_dma(lp->rx_dma_regs, CPHYSADDR(rd));
 193}
 194
 195/* transmit packet */
 196static int korina_send_packet(struct sk_buff *skb, struct net_device *dev)
 
 197{
 198	struct korina_private *lp = netdev_priv(dev);
 
 199	unsigned long flags;
 
 
 200	u32 length;
 201	u32 chain_prev, chain_next;
 202	struct dma_desc *td;
 203
 204	spin_lock_irqsave(&lp->lock, flags);
 205
 206	td = &lp->td_ring[lp->tx_chain_tail];
 
 207
 208	/* stop queue when full, drop pkts if queue already full */
 209	if (lp->tx_count >= (KORINA_NUM_TDS - 2)) {
 210		lp->tx_full = 1;
 211
 212		if (lp->tx_count == (KORINA_NUM_TDS - 2))
 213			netif_stop_queue(dev);
 214		else {
 215			dev->stats.tx_dropped++;
 216			dev_kfree_skb_any(skb);
 217			spin_unlock_irqrestore(&lp->lock, flags);
 218
 219			return NETDEV_TX_BUSY;
 220		}
 221	}
 222
 223	lp->tx_count++;
 224
 225	lp->tx_skb[lp->tx_chain_tail] = skb;
 226
 227	length = skb->len;
 228	dma_cache_wback((u32)skb->data, skb->len);
 229
 230	/* Setup the transmit descriptor. */
 231	dma_cache_inv((u32) td, sizeof(*td));
 232	td->ca = CPHYSADDR(skb->data);
 233	chain_prev = (lp->tx_chain_tail - 1) & KORINA_TDS_MASK;
 234	chain_next = (lp->tx_chain_tail + 1) & KORINA_TDS_MASK;
 
 
 
 
 
 235
 236	if (readl(&(lp->tx_dma_regs->dmandptr)) == 0) {
 237		if (lp->tx_chain_status == desc_empty) {
 238			/* Update tail */
 239			td->control = DMA_COUNT(length) |
 240					DMA_DESC_COF | DMA_DESC_IOF;
 241			/* Move tail */
 242			lp->tx_chain_tail = chain_next;
 243			/* Write to NDPTR */
 244			writel(CPHYSADDR(&lp->td_ring[lp->tx_chain_head]),
 245					&lp->tx_dma_regs->dmandptr);
 246			/* Move head to tail */
 247			lp->tx_chain_head = lp->tx_chain_tail;
 248		} else {
 249			/* Update tail */
 250			td->control = DMA_COUNT(length) |
 251					DMA_DESC_COF | DMA_DESC_IOF;
 252			/* Link to prev */
 253			lp->td_ring[chain_prev].control &=
 254					~DMA_DESC_COF;
 255			/* Link to prev */
 256			lp->td_ring[chain_prev].link =  CPHYSADDR(td);
 257			/* Move tail */
 258			lp->tx_chain_tail = chain_next;
 259			/* Write to NDPTR */
 260			writel(CPHYSADDR(&lp->td_ring[lp->tx_chain_head]),
 261					&(lp->tx_dma_regs->dmandptr));
 262			/* Move head to tail */
 263			lp->tx_chain_head = lp->tx_chain_tail;
 264			lp->tx_chain_status = desc_empty;
 265		}
 266	} else {
 267		if (lp->tx_chain_status == desc_empty) {
 268			/* Update tail */
 269			td->control = DMA_COUNT(length) |
 270					DMA_DESC_COF | DMA_DESC_IOF;
 271			/* Move tail */
 272			lp->tx_chain_tail = chain_next;
 273			lp->tx_chain_status = desc_filled;
 274		} else {
 275			/* Update tail */
 276			td->control = DMA_COUNT(length) |
 277					DMA_DESC_COF | DMA_DESC_IOF;
 278			lp->td_ring[chain_prev].control &=
 279					~DMA_DESC_COF;
 280			lp->td_ring[chain_prev].link =  CPHYSADDR(td);
 281			lp->tx_chain_tail = chain_next;
 282		}
 283	}
 284	dma_cache_wback((u32) td, sizeof(*td));
 285
 286	netif_trans_update(dev);
 287	spin_unlock_irqrestore(&lp->lock, flags);
 288
 289	return NETDEV_TX_OK;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 290}
 291
 292static int mdio_read(struct net_device *dev, int mii_id, int reg)
 293{
 294	struct korina_private *lp = netdev_priv(dev);
 295	int ret;
 296
 297	mii_id = ((lp->rx_irq == 0x2c ? 1 : 0) << 8);
 
 
 
 
 
 
 
 
 
 298
 299	writel(0, &lp->eth_regs->miimcfg);
 
 
 
 300	writel(0, &lp->eth_regs->miimcmd);
 301	writel(mii_id | reg, &lp->eth_regs->miimaddr);
 302	writel(ETH_MII_CMD_SCN, &lp->eth_regs->miimcmd);
 303
 304	ret = (int)(readl(&lp->eth_regs->miimrdd));
 305	return ret;
 306}
 307
 308static void mdio_write(struct net_device *dev, int mii_id, int reg, int val)
 309{
 310	struct korina_private *lp = netdev_priv(dev);
 311
 312	mii_id = ((lp->rx_irq == 0x2c ? 1 : 0) << 8);
 
 313
 314	writel(0, &lp->eth_regs->miimcfg);
 315	writel(1, &lp->eth_regs->miimcmd);
 316	writel(mii_id | reg, &lp->eth_regs->miimaddr);
 317	writel(ETH_MII_CMD_SCN, &lp->eth_regs->miimcmd);
 318	writel(val, &lp->eth_regs->miimwtd);
 319}
 320
 321/* Ethernet Rx DMA interrupt */
 322static irqreturn_t korina_rx_dma_interrupt(int irq, void *dev_id)
 323{
 324	struct net_device *dev = dev_id;
 325	struct korina_private *lp = netdev_priv(dev);
 326	u32 dmas, dmasm;
 327	irqreturn_t retval;
 328
 329	dmas = readl(&lp->rx_dma_regs->dmas);
 330	if (dmas & (DMA_STAT_DONE | DMA_STAT_HALT | DMA_STAT_ERR)) {
 331		dmasm = readl(&lp->rx_dma_regs->dmasm);
 332		writel(dmasm | (DMA_STAT_DONE |
 333				DMA_STAT_HALT | DMA_STAT_ERR),
 334				&lp->rx_dma_regs->dmasm);
 335
 336		napi_schedule(&lp->napi);
 337
 338		if (dmas & DMA_STAT_ERR)
 339			printk(KERN_ERR "%s: DMA error\n", dev->name);
 340
 341		retval = IRQ_HANDLED;
 342	} else
 343		retval = IRQ_NONE;
 344
 345	return retval;
 346}
 347
 348static int korina_rx(struct net_device *dev, int limit)
 349{
 350	struct korina_private *lp = netdev_priv(dev);
 351	struct dma_desc *rd = &lp->rd_ring[lp->rx_next_done];
 352	struct sk_buff *skb, *skb_new;
 353	u8 *pkt_buf;
 354	u32 devcs, pkt_len, dmas;
 
 355	int count;
 356
 357	dma_cache_inv((u32)rd, sizeof(*rd));
 358
 359	for (count = 0; count < limit; count++) {
 360		skb = lp->rx_skb[lp->rx_next_done];
 361		skb_new = NULL;
 362
 363		devcs = rd->devcs;
 364
 365		if ((KORINA_RBSIZE - (u32)DMA_COUNT(rd->control)) == 0)
 366			break;
 367
 368		/* Update statistics counters */
 369		if (devcs & ETH_RX_CRC)
 370			dev->stats.rx_crc_errors++;
 371		if (devcs & ETH_RX_LOR)
 372			dev->stats.rx_length_errors++;
 373		if (devcs & ETH_RX_LE)
 374			dev->stats.rx_length_errors++;
 375		if (devcs & ETH_RX_OVR)
 376			dev->stats.rx_fifo_errors++;
 377		if (devcs & ETH_RX_CV)
 378			dev->stats.rx_frame_errors++;
 379		if (devcs & ETH_RX_CES)
 380			dev->stats.rx_length_errors++;
 381		if (devcs & ETH_RX_MP)
 382			dev->stats.multicast++;
 383
 384		if ((devcs & ETH_RX_LD) != ETH_RX_LD) {
 385			/* check that this is a whole packet
 386			 * WARNING: DMA_FD bit incorrectly set
 387			 * in Rc32434 (errata ref #077) */
 388			dev->stats.rx_errors++;
 389			dev->stats.rx_dropped++;
 390		} else if ((devcs & ETH_RX_ROK)) {
 391			pkt_len = RCVPKT_LENGTH(devcs);
 
 
 
 
 
 
 
 
 
 
 
 392
 393			/* must be the (first and) last
 394			 * descriptor then */
 395			pkt_buf = (u8 *)lp->rx_skb[lp->rx_next_done]->data;
 396
 397			/* invalidate the cache */
 398			dma_cache_inv((unsigned long)pkt_buf, pkt_len - 4);
 399
 400			/* Malloc up new buffer. */
 401			skb_new = netdev_alloc_skb_ip_align(dev, KORINA_RBSIZE);
 402
 403			if (!skb_new)
 404				break;
 405			/* Do not count the CRC */
 406			skb_put(skb, pkt_len - 4);
 407			skb->protocol = eth_type_trans(skb, dev);
 408
 409			/* Pass the packet to upper layers */
 410			netif_receive_skb(skb);
 411			dev->stats.rx_packets++;
 412			dev->stats.rx_bytes += pkt_len;
 413
 414			/* Update the mcast stats */
 415			if (devcs & ETH_RX_MP)
 416				dev->stats.multicast++;
 417
 418			lp->rx_skb[lp->rx_next_done] = skb_new;
 
 
 
 
 419		}
 420
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 421		rd->devcs = 0;
 422
 423		/* Restore descriptor's curr_addr */
 424		if (skb_new)
 425			rd->ca = CPHYSADDR(skb_new->data);
 426		else
 427			rd->ca = CPHYSADDR(skb->data);
 428
 429		rd->control = DMA_COUNT(KORINA_RBSIZE) |
 430			DMA_DESC_COD | DMA_DESC_IOD;
 431		lp->rd_ring[(lp->rx_next_done - 1) &
 432			KORINA_RDS_MASK].control &=
 433			~DMA_DESC_COD;
 434
 435		lp->rx_next_done = (lp->rx_next_done + 1) & KORINA_RDS_MASK;
 436		dma_cache_wback((u32)rd, sizeof(*rd));
 437		rd = &lp->rd_ring[lp->rx_next_done];
 438		writel(~DMA_STAT_DONE, &lp->rx_dma_regs->dmas);
 439	}
 440
 441	dmas = readl(&lp->rx_dma_regs->dmas);
 442
 443	if (dmas & DMA_STAT_HALT) {
 444		writel(~(DMA_STAT_HALT | DMA_STAT_ERR),
 445				&lp->rx_dma_regs->dmas);
 446
 447		lp->dma_halt_cnt++;
 448		rd->devcs = 0;
 449		skb = lp->rx_skb[lp->rx_next_done];
 450		rd->ca = CPHYSADDR(skb->data);
 451		dma_cache_wback((u32)rd, sizeof(*rd));
 452		korina_chain_rx(lp, rd);
 453	}
 454
 455	return count;
 456}
 457
 458static int korina_poll(struct napi_struct *napi, int budget)
 459{
 460	struct korina_private *lp =
 461		container_of(napi, struct korina_private, napi);
 462	struct net_device *dev = lp->dev;
 463	int work_done;
 464
 465	work_done = korina_rx(dev, budget);
 466	if (work_done < budget) {
 467		napi_complete(napi);
 468
 469		writel(readl(&lp->rx_dma_regs->dmasm) &
 470			~(DMA_STAT_DONE | DMA_STAT_HALT | DMA_STAT_ERR),
 471			&lp->rx_dma_regs->dmasm);
 472	}
 473	return work_done;
 474}
 475
 476/*
 477 * Set or clear the multicast filter for this adaptor.
 478 */
 479static void korina_multicast_list(struct net_device *dev)
 480{
 481	struct korina_private *lp = netdev_priv(dev);
 482	unsigned long flags;
 483	struct netdev_hw_addr *ha;
 484	u32 recognise = ETH_ARC_AB;	/* always accept broadcasts */
 485
 486	/* Set promiscuous mode */
 487	if (dev->flags & IFF_PROMISC)
 488		recognise |= ETH_ARC_PRO;
 489
 490	else if ((dev->flags & IFF_ALLMULTI) || (netdev_mc_count(dev) > 4))
 491		/* All multicast and broadcast */
 492		recognise |= ETH_ARC_AM;
 493
 494	/* Build the hash table */
 495	if (netdev_mc_count(dev) > 4) {
 496		u16 hash_table[4] = { 0 };
 497		u32 crc;
 498
 499		netdev_for_each_mc_addr(ha, dev) {
 500			crc = ether_crc_le(6, ha->addr);
 501			crc >>= 26;
 502			hash_table[crc >> 4] |= 1 << (15 - (crc & 0xf));
 503		}
 504		/* Accept filtered multicast */
 505		recognise |= ETH_ARC_AFM;
 506
 507		/* Fill the MAC hash tables with their values */
 508		writel((u32)(hash_table[1] << 16 | hash_table[0]),
 509					&lp->eth_regs->ethhash0);
 510		writel((u32)(hash_table[3] << 16 | hash_table[2]),
 511					&lp->eth_regs->ethhash1);
 512	}
 513
 514	spin_lock_irqsave(&lp->lock, flags);
 515	writel(recognise, &lp->eth_regs->etharc);
 516	spin_unlock_irqrestore(&lp->lock, flags);
 517}
 518
 519static void korina_tx(struct net_device *dev)
 520{
 521	struct korina_private *lp = netdev_priv(dev);
 522	struct dma_desc *td = &lp->td_ring[lp->tx_next_done];
 523	u32 devcs;
 524	u32 dmas;
 525
 526	spin_lock(&lp->lock);
 527
 528	/* Process all desc that are done */
 529	while (IS_DMA_FINISHED(td->control)) {
 530		if (lp->tx_full == 1) {
 531			netif_wake_queue(dev);
 532			lp->tx_full = 0;
 533		}
 534
 535		devcs = lp->td_ring[lp->tx_next_done].devcs;
 536		if ((devcs & (ETH_TX_FD | ETH_TX_LD)) !=
 537				(ETH_TX_FD | ETH_TX_LD)) {
 538			dev->stats.tx_errors++;
 539			dev->stats.tx_dropped++;
 540
 541			/* Should never happen */
 542			printk(KERN_ERR "%s: split tx ignored\n",
 543							dev->name);
 544		} else if (devcs & ETH_TX_TOK) {
 545			dev->stats.tx_packets++;
 546			dev->stats.tx_bytes +=
 547					lp->tx_skb[lp->tx_next_done]->len;
 548		} else {
 549			dev->stats.tx_errors++;
 550			dev->stats.tx_dropped++;
 551
 552			/* Underflow */
 553			if (devcs & ETH_TX_UND)
 554				dev->stats.tx_fifo_errors++;
 555
 556			/* Oversized frame */
 557			if (devcs & ETH_TX_OF)
 558				dev->stats.tx_aborted_errors++;
 559
 560			/* Excessive deferrals */
 561			if (devcs & ETH_TX_ED)
 562				dev->stats.tx_carrier_errors++;
 563
 564			/* Collisions: medium busy */
 565			if (devcs & ETH_TX_EC)
 566				dev->stats.collisions++;
 567
 568			/* Late collision */
 569			if (devcs & ETH_TX_LC)
 570				dev->stats.tx_window_errors++;
 571		}
 572
 573		/* We must always free the original skb */
 574		if (lp->tx_skb[lp->tx_next_done]) {
 
 
 
 
 575			dev_kfree_skb_any(lp->tx_skb[lp->tx_next_done]);
 576			lp->tx_skb[lp->tx_next_done] = NULL;
 577		}
 578
 579		lp->td_ring[lp->tx_next_done].control = DMA_DESC_IOF;
 580		lp->td_ring[lp->tx_next_done].devcs = ETH_TX_FD | ETH_TX_LD;
 581		lp->td_ring[lp->tx_next_done].link = 0;
 582		lp->td_ring[lp->tx_next_done].ca = 0;
 583		lp->tx_count--;
 584
 585		/* Go on to next transmission */
 586		lp->tx_next_done = (lp->tx_next_done + 1) & KORINA_TDS_MASK;
 587		td = &lp->td_ring[lp->tx_next_done];
 588
 589	}
 590
 591	/* Clear the DMA status register */
 592	dmas = readl(&lp->tx_dma_regs->dmas);
 593	writel(~dmas, &lp->tx_dma_regs->dmas);
 594
 595	writel(readl(&lp->tx_dma_regs->dmasm) &
 596			~(DMA_STAT_FINI | DMA_STAT_ERR),
 597			&lp->tx_dma_regs->dmasm);
 598
 599	spin_unlock(&lp->lock);
 600}
 601
 602static irqreturn_t
 603korina_tx_dma_interrupt(int irq, void *dev_id)
 604{
 605	struct net_device *dev = dev_id;
 606	struct korina_private *lp = netdev_priv(dev);
 607	u32 dmas, dmasm;
 608	irqreturn_t retval;
 609
 610	dmas = readl(&lp->tx_dma_regs->dmas);
 611
 612	if (dmas & (DMA_STAT_FINI | DMA_STAT_ERR)) {
 613		dmasm = readl(&lp->tx_dma_regs->dmasm);
 614		writel(dmasm | (DMA_STAT_FINI | DMA_STAT_ERR),
 615				&lp->tx_dma_regs->dmasm);
 616
 617		korina_tx(dev);
 618
 619		if (lp->tx_chain_status == desc_filled &&
 620			(readl(&(lp->tx_dma_regs->dmandptr)) == 0)) {
 621			writel(CPHYSADDR(&lp->td_ring[lp->tx_chain_head]),
 622				&(lp->tx_dma_regs->dmandptr));
 623			lp->tx_chain_status = desc_empty;
 624			lp->tx_chain_head = lp->tx_chain_tail;
 625			netif_trans_update(dev);
 626		}
 627		if (dmas & DMA_STAT_ERR)
 628			printk(KERN_ERR "%s: DMA error\n", dev->name);
 629
 630		retval = IRQ_HANDLED;
 631	} else
 632		retval = IRQ_NONE;
 633
 634	return retval;
 635}
 636
 637
 638static void korina_check_media(struct net_device *dev, unsigned int init_media)
 639{
 640	struct korina_private *lp = netdev_priv(dev);
 641
 642	mii_check_media(&lp->mii_if, 0, init_media);
 643
 644	if (lp->mii_if.full_duplex)
 645		writel(readl(&lp->eth_regs->ethmac2) | ETH_MAC2_FD,
 646						&lp->eth_regs->ethmac2);
 647	else
 648		writel(readl(&lp->eth_regs->ethmac2) & ~ETH_MAC2_FD,
 649						&lp->eth_regs->ethmac2);
 650}
 651
 652static void korina_poll_media(unsigned long data)
 653{
 654	struct net_device *dev = (struct net_device *) data;
 655	struct korina_private *lp = netdev_priv(dev);
 656
 657	korina_check_media(dev, 0);
 658	mod_timer(&lp->media_check_timer, jiffies + HZ);
 659}
 660
 661static void korina_set_carrier(struct mii_if_info *mii)
 662{
 663	if (mii->force_media) {
 664		/* autoneg is off: Link is always assumed to be up */
 665		if (!netif_carrier_ok(mii->dev))
 666			netif_carrier_on(mii->dev);
 667	} else  /* Let MMI library update carrier status */
 668		korina_check_media(mii->dev, 0);
 669}
 670
 671static int korina_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
 672{
 673	struct korina_private *lp = netdev_priv(dev);
 674	struct mii_ioctl_data *data = if_mii(rq);
 675	int rc;
 676
 677	if (!netif_running(dev))
 678		return -EINVAL;
 679	spin_lock_irq(&lp->lock);
 680	rc = generic_mii_ioctl(&lp->mii_if, data, cmd, NULL);
 681	spin_unlock_irq(&lp->lock);
 682	korina_set_carrier(&lp->mii_if);
 683
 684	return rc;
 685}
 686
 687/* ethtool helpers */
 688static void netdev_get_drvinfo(struct net_device *dev,
 689			struct ethtool_drvinfo *info)
 690{
 691	struct korina_private *lp = netdev_priv(dev);
 692
 693	strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
 694	strlcpy(info->version, DRV_VERSION, sizeof(info->version));
 695	strlcpy(info->bus_info, lp->dev->name, sizeof(info->bus_info));
 696}
 697
 698static int netdev_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
 
 699{
 700	struct korina_private *lp = netdev_priv(dev);
 701	int rc;
 702
 703	spin_lock_irq(&lp->lock);
 704	rc = mii_ethtool_gset(&lp->mii_if, cmd);
 705	spin_unlock_irq(&lp->lock);
 706
 707	return rc;
 708}
 709
 710static int netdev_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
 
 711{
 712	struct korina_private *lp = netdev_priv(dev);
 713	int rc;
 714
 715	spin_lock_irq(&lp->lock);
 716	rc = mii_ethtool_sset(&lp->mii_if, cmd);
 717	spin_unlock_irq(&lp->lock);
 718	korina_set_carrier(&lp->mii_if);
 719
 720	return rc;
 721}
 722
 723static u32 netdev_get_link(struct net_device *dev)
 724{
 725	struct korina_private *lp = netdev_priv(dev);
 726
 727	return mii_link_ok(&lp->mii_if);
 728}
 729
 730static const struct ethtool_ops netdev_ethtool_ops = {
 731	.get_drvinfo            = netdev_get_drvinfo,
 732	.get_settings           = netdev_get_settings,
 733	.set_settings           = netdev_set_settings,
 734	.get_link               = netdev_get_link,
 735};
 736
 737static int korina_alloc_ring(struct net_device *dev)
 738{
 739	struct korina_private *lp = netdev_priv(dev);
 740	struct sk_buff *skb;
 
 741	int i;
 742
 743	/* Initialize the transmit descriptors */
 744	for (i = 0; i < KORINA_NUM_TDS; i++) {
 745		lp->td_ring[i].control = DMA_DESC_IOF;
 746		lp->td_ring[i].devcs = ETH_TX_FD | ETH_TX_LD;
 747		lp->td_ring[i].ca = 0;
 748		lp->td_ring[i].link = 0;
 749	}
 750	lp->tx_next_done = lp->tx_chain_head = lp->tx_chain_tail =
 751			lp->tx_full = lp->tx_count = 0;
 752	lp->tx_chain_status = desc_empty;
 753
 754	/* Initialize the receive descriptors */
 755	for (i = 0; i < KORINA_NUM_RDS; i++) {
 756		skb = netdev_alloc_skb_ip_align(dev, KORINA_RBSIZE);
 757		if (!skb)
 758			return -ENOMEM;
 759		lp->rx_skb[i] = skb;
 760		lp->rd_ring[i].control = DMA_DESC_IOD |
 761				DMA_COUNT(KORINA_RBSIZE);
 762		lp->rd_ring[i].devcs = 0;
 763		lp->rd_ring[i].ca = CPHYSADDR(skb->data);
 764		lp->rd_ring[i].link = CPHYSADDR(&lp->rd_ring[i+1]);
 
 
 
 
 
 765	}
 766
 767	/* loop back receive descriptors, so the last
 768	 * descriptor points to the first one */
 769	lp->rd_ring[i - 1].link = CPHYSADDR(&lp->rd_ring[0]);
 770	lp->rd_ring[i - 1].control |= DMA_DESC_COD;
 771
 772	lp->rx_next_done  = 0;
 773	lp->rx_chain_head = 0;
 774	lp->rx_chain_tail = 0;
 775	lp->rx_chain_status = desc_empty;
 776
 777	return 0;
 778}
 779
 780static void korina_free_ring(struct net_device *dev)
 781{
 782	struct korina_private *lp = netdev_priv(dev);
 783	int i;
 784
 785	for (i = 0; i < KORINA_NUM_RDS; i++) {
 786		lp->rd_ring[i].control = 0;
 787		if (lp->rx_skb[i])
 
 
 788			dev_kfree_skb_any(lp->rx_skb[i]);
 789		lp->rx_skb[i] = NULL;
 
 790	}
 791
 792	for (i = 0; i < KORINA_NUM_TDS; i++) {
 793		lp->td_ring[i].control = 0;
 794		if (lp->tx_skb[i])
 
 
 795			dev_kfree_skb_any(lp->tx_skb[i]);
 796		lp->tx_skb[i] = NULL;
 
 797	}
 798}
 799
 800/*
 801 * Initialize the RC32434 ethernet controller.
 802 */
 803static int korina_init(struct net_device *dev)
 804{
 805	struct korina_private *lp = netdev_priv(dev);
 806
 807	/* Disable DMA */
 808	korina_abort_tx(dev);
 809	korina_abort_rx(dev);
 810
 811	/* reset ethernet logic */
 812	writel(0, &lp->eth_regs->ethintfc);
 813	while ((readl(&lp->eth_regs->ethintfc) & ETH_INT_FC_RIP))
 814		netif_trans_update(dev);
 815
 816	/* Enable Ethernet Interface */
 817	writel(ETH_INT_FC_EN, &lp->eth_regs->ethintfc);
 818
 819	/* Allocate rings */
 820	if (korina_alloc_ring(dev)) {
 821		printk(KERN_ERR "%s: descriptor allocation failed\n", dev->name);
 822		korina_free_ring(dev);
 823		return -ENOMEM;
 824	}
 825
 826	writel(0, &lp->rx_dma_regs->dmas);
 827	/* Start Rx DMA */
 828	korina_start_rx(lp, &lp->rd_ring[0]);
 
 829
 830	writel(readl(&lp->tx_dma_regs->dmasm) &
 831			~(DMA_STAT_FINI | DMA_STAT_ERR),
 832			&lp->tx_dma_regs->dmasm);
 833	writel(readl(&lp->rx_dma_regs->dmasm) &
 834			~(DMA_STAT_DONE | DMA_STAT_HALT | DMA_STAT_ERR),
 835			&lp->rx_dma_regs->dmasm);
 836
 837	/* Accept only packets destined for this Ethernet device address */
 838	writel(ETH_ARC_AB, &lp->eth_regs->etharc);
 839
 840	/* Set all Ether station address registers to their initial values */
 841	writel(STATION_ADDRESS_LOW(dev), &lp->eth_regs->ethsal0);
 842	writel(STATION_ADDRESS_HIGH(dev), &lp->eth_regs->ethsah0);
 843
 844	writel(STATION_ADDRESS_LOW(dev), &lp->eth_regs->ethsal1);
 845	writel(STATION_ADDRESS_HIGH(dev), &lp->eth_regs->ethsah1);
 846
 847	writel(STATION_ADDRESS_LOW(dev), &lp->eth_regs->ethsal2);
 848	writel(STATION_ADDRESS_HIGH(dev), &lp->eth_regs->ethsah2);
 849
 850	writel(STATION_ADDRESS_LOW(dev), &lp->eth_regs->ethsal3);
 851	writel(STATION_ADDRESS_HIGH(dev), &lp->eth_regs->ethsah3);
 852
 853
 854	/* Frame Length Checking, Pad Enable, CRC Enable, Full Duplex set */
 855	writel(ETH_MAC2_PE | ETH_MAC2_CEN | ETH_MAC2_FD,
 856			&lp->eth_regs->ethmac2);
 857
 858	/* Back to back inter-packet-gap */
 859	writel(0x15, &lp->eth_regs->ethipgt);
 860	/* Non - Back to back inter-packet-gap */
 861	writel(0x12, &lp->eth_regs->ethipgr);
 862
 863	/* Management Clock Prescaler Divisor
 864	 * Clock independent setting */
 865	writel(((idt_cpu_freq) / MII_CLOCK + 1) & ~1,
 866		       &lp->eth_regs->ethmcp);
 
 867
 868	/* don't transmit until fifo contains 48b */
 869	writel(48, &lp->eth_regs->ethfifott);
 870
 871	writel(ETH_MAC1_RE, &lp->eth_regs->ethmac1);
 872
 
 
 873	napi_enable(&lp->napi);
 874	netif_start_queue(dev);
 875
 876	return 0;
 877}
 878
 879/*
 880 * Restart the RC32434 ethernet controller.
 881 */
 882static void korina_restart_task(struct work_struct *work)
 883{
 884	struct korina_private *lp = container_of(work,
 885			struct korina_private, restart_task);
 886	struct net_device *dev = lp->dev;
 887
 888	/*
 889	 * Disable interrupts
 890	 */
 891	disable_irq(lp->rx_irq);
 892	disable_irq(lp->tx_irq);
 893	disable_irq(lp->ovr_irq);
 894	disable_irq(lp->und_irq);
 895
 896	writel(readl(&lp->tx_dma_regs->dmasm) |
 897				DMA_STAT_FINI | DMA_STAT_ERR,
 898				&lp->tx_dma_regs->dmasm);
 899	writel(readl(&lp->rx_dma_regs->dmasm) |
 900				DMA_STAT_DONE | DMA_STAT_HALT | DMA_STAT_ERR,
 901				&lp->rx_dma_regs->dmasm);
 902
 903	napi_disable(&lp->napi);
 904
 905	korina_free_ring(dev);
 906
 907	if (korina_init(dev) < 0) {
 908		printk(KERN_ERR "%s: cannot restart device\n", dev->name);
 909		return;
 910	}
 911	korina_multicast_list(dev);
 912
 913	enable_irq(lp->und_irq);
 914	enable_irq(lp->ovr_irq);
 915	enable_irq(lp->tx_irq);
 916	enable_irq(lp->rx_irq);
 917}
 918
 919static void korina_clear_and_restart(struct net_device *dev, u32 value)
 920{
 921	struct korina_private *lp = netdev_priv(dev);
 922
 923	netif_stop_queue(dev);
 924	writel(value, &lp->eth_regs->ethintfc);
 925	schedule_work(&lp->restart_task);
 926}
 927
 928/* Ethernet Tx Underflow interrupt */
 929static irqreturn_t korina_und_interrupt(int irq, void *dev_id)
 930{
 931	struct net_device *dev = dev_id;
 932	struct korina_private *lp = netdev_priv(dev);
 933	unsigned int und;
 934
 935	spin_lock(&lp->lock);
 936
 937	und = readl(&lp->eth_regs->ethintfc);
 938
 939	if (und & ETH_INT_FC_UND)
 940		korina_clear_and_restart(dev, und & ~ETH_INT_FC_UND);
 941
 942	spin_unlock(&lp->lock);
 943
 944	return IRQ_HANDLED;
 945}
 946
 947static void korina_tx_timeout(struct net_device *dev)
 948{
 949	struct korina_private *lp = netdev_priv(dev);
 950
 951	schedule_work(&lp->restart_task);
 952}
 953
 954/* Ethernet Rx Overflow interrupt */
 955static irqreturn_t
 956korina_ovr_interrupt(int irq, void *dev_id)
 957{
 958	struct net_device *dev = dev_id;
 959	struct korina_private *lp = netdev_priv(dev);
 960	unsigned int ovr;
 961
 962	spin_lock(&lp->lock);
 963	ovr = readl(&lp->eth_regs->ethintfc);
 964
 965	if (ovr & ETH_INT_FC_OVR)
 966		korina_clear_and_restart(dev, ovr & ~ETH_INT_FC_OVR);
 967
 968	spin_unlock(&lp->lock);
 969
 970	return IRQ_HANDLED;
 971}
 972
 973#ifdef CONFIG_NET_POLL_CONTROLLER
 974static void korina_poll_controller(struct net_device *dev)
 975{
 976	disable_irq(dev->irq);
 977	korina_tx_dma_interrupt(dev->irq, dev);
 978	enable_irq(dev->irq);
 979}
 980#endif
 981
 982static int korina_open(struct net_device *dev)
 983{
 984	struct korina_private *lp = netdev_priv(dev);
 985	int ret;
 986
 987	/* Initialize */
 988	ret = korina_init(dev);
 989	if (ret < 0) {
 990		printk(KERN_ERR "%s: cannot open device\n", dev->name);
 991		goto out;
 992	}
 993
 994	/* Install the interrupt handler
 995	 * that handles the Done Finished
 996	 * Ovr and Und Events */
 997	ret = request_irq(lp->rx_irq, korina_rx_dma_interrupt,
 998			0, "Korina ethernet Rx", dev);
 999	if (ret < 0) {
1000		printk(KERN_ERR "%s: unable to get Rx DMA IRQ %d\n",
1001		    dev->name, lp->rx_irq);
1002		goto err_release;
1003	}
1004	ret = request_irq(lp->tx_irq, korina_tx_dma_interrupt,
1005			0, "Korina ethernet Tx", dev);
1006	if (ret < 0) {
1007		printk(KERN_ERR "%s: unable to get Tx DMA IRQ %d\n",
1008		    dev->name, lp->tx_irq);
1009		goto err_free_rx_irq;
1010	}
1011
1012	/* Install handler for overrun error. */
1013	ret = request_irq(lp->ovr_irq, korina_ovr_interrupt,
1014			0, "Ethernet Overflow", dev);
1015	if (ret < 0) {
1016		printk(KERN_ERR "%s: unable to get OVR IRQ %d\n",
1017		    dev->name, lp->ovr_irq);
1018		goto err_free_tx_irq;
1019	}
1020
1021	/* Install handler for underflow error. */
1022	ret = request_irq(lp->und_irq, korina_und_interrupt,
1023			0, "Ethernet Underflow", dev);
1024	if (ret < 0) {
1025		printk(KERN_ERR "%s: unable to get UND IRQ %d\n",
1026		    dev->name, lp->und_irq);
1027		goto err_free_ovr_irq;
1028	}
1029	mod_timer(&lp->media_check_timer, jiffies + 1);
1030out:
1031	return ret;
1032
1033err_free_ovr_irq:
1034	free_irq(lp->ovr_irq, dev);
1035err_free_tx_irq:
1036	free_irq(lp->tx_irq, dev);
1037err_free_rx_irq:
1038	free_irq(lp->rx_irq, dev);
1039err_release:
1040	korina_free_ring(dev);
1041	goto out;
1042}
1043
1044static int korina_close(struct net_device *dev)
1045{
1046	struct korina_private *lp = netdev_priv(dev);
1047	u32 tmp;
1048
1049	del_timer(&lp->media_check_timer);
1050
1051	/* Disable interrupts */
1052	disable_irq(lp->rx_irq);
1053	disable_irq(lp->tx_irq);
1054	disable_irq(lp->ovr_irq);
1055	disable_irq(lp->und_irq);
1056
1057	korina_abort_tx(dev);
1058	tmp = readl(&lp->tx_dma_regs->dmasm);
1059	tmp = tmp | DMA_STAT_FINI | DMA_STAT_ERR;
1060	writel(tmp, &lp->tx_dma_regs->dmasm);
1061
1062	korina_abort_rx(dev);
1063	tmp = readl(&lp->rx_dma_regs->dmasm);
1064	tmp = tmp | DMA_STAT_DONE | DMA_STAT_HALT | DMA_STAT_ERR;
1065	writel(tmp, &lp->rx_dma_regs->dmasm);
1066
1067	napi_disable(&lp->napi);
1068
1069	cancel_work_sync(&lp->restart_task);
1070
1071	korina_free_ring(dev);
1072
1073	free_irq(lp->rx_irq, dev);
1074	free_irq(lp->tx_irq, dev);
1075	free_irq(lp->ovr_irq, dev);
1076	free_irq(lp->und_irq, dev);
1077
1078	return 0;
1079}
1080
1081static const struct net_device_ops korina_netdev_ops = {
1082	.ndo_open		= korina_open,
1083	.ndo_stop		= korina_close,
1084	.ndo_start_xmit		= korina_send_packet,
1085	.ndo_set_rx_mode	= korina_multicast_list,
1086	.ndo_tx_timeout		= korina_tx_timeout,
1087	.ndo_do_ioctl		= korina_ioctl,
1088	.ndo_validate_addr	= eth_validate_addr,
1089	.ndo_set_mac_address	= eth_mac_addr,
1090#ifdef CONFIG_NET_POLL_CONTROLLER
1091	.ndo_poll_controller	= korina_poll_controller,
1092#endif
1093};
1094
1095static int korina_probe(struct platform_device *pdev)
1096{
1097	struct korina_device *bif = platform_get_drvdata(pdev);
1098	struct korina_private *lp;
1099	struct net_device *dev;
1100	struct resource *r;
 
1101	int rc;
1102
1103	dev = alloc_etherdev(sizeof(struct korina_private));
1104	if (!dev)
1105		return -ENOMEM;
1106
1107	SET_NETDEV_DEV(dev, &pdev->dev);
1108	lp = netdev_priv(dev);
1109
1110	bif->dev = dev;
1111	memcpy(dev->dev_addr, bif->mac, ETH_ALEN);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1112
1113	lp->rx_irq = platform_get_irq_byname(pdev, "korina_rx");
1114	lp->tx_irq = platform_get_irq_byname(pdev, "korina_tx");
1115	lp->ovr_irq = platform_get_irq_byname(pdev, "korina_ovr");
1116	lp->und_irq = platform_get_irq_byname(pdev, "korina_und");
1117
1118	r = platform_get_resource_byname(pdev, IORESOURCE_MEM, "korina_regs");
1119	dev->base_addr = r->start;
1120	lp->eth_regs = ioremap_nocache(r->start, resource_size(r));
1121	if (!lp->eth_regs) {
1122		printk(KERN_ERR DRV_NAME ": cannot remap registers\n");
1123		rc = -ENXIO;
1124		goto probe_err_out;
1125	}
 
1126
1127	r = platform_get_resource_byname(pdev, IORESOURCE_MEM, "korina_dma_rx");
1128	lp->rx_dma_regs = ioremap_nocache(r->start, resource_size(r));
1129	if (!lp->rx_dma_regs) {
1130		printk(KERN_ERR DRV_NAME ": cannot remap Rx DMA registers\n");
1131		rc = -ENXIO;
1132		goto probe_err_dma_rx;
1133	}
 
1134
1135	r = platform_get_resource_byname(pdev, IORESOURCE_MEM, "korina_dma_tx");
1136	lp->tx_dma_regs = ioremap_nocache(r->start, resource_size(r));
1137	if (!lp->tx_dma_regs) {
1138		printk(KERN_ERR DRV_NAME ": cannot remap Tx DMA registers\n");
1139		rc = -ENXIO;
1140		goto probe_err_dma_tx;
1141	}
 
1142
1143	lp->td_ring = kmalloc(TD_RING_SIZE + RD_RING_SIZE, GFP_KERNEL);
1144	if (!lp->td_ring) {
1145		rc = -ENXIO;
1146		goto probe_err_td_ring;
1147	}
1148
1149	dma_cache_inv((unsigned long)(lp->td_ring),
1150			TD_RING_SIZE + RD_RING_SIZE);
1151
1152	/* now convert TD_RING pointer to KSEG1 */
1153	lp->td_ring = (struct dma_desc *)KSEG1ADDR(lp->td_ring);
1154	lp->rd_ring = &lp->td_ring[KORINA_NUM_TDS];
1155
1156	spin_lock_init(&lp->lock);
1157	/* just use the rx dma irq */
1158	dev->irq = lp->rx_irq;
1159	lp->dev = dev;
 
1160
1161	dev->netdev_ops = &korina_netdev_ops;
1162	dev->ethtool_ops = &netdev_ethtool_ops;
1163	dev->watchdog_timeo = TX_TIMEOUT;
1164	netif_napi_add(dev, &lp->napi, korina_poll, 64);
1165
1166	lp->phy_addr = (((lp->rx_irq == 0x2c? 1:0) << 8) | 0x05);
1167	lp->mii_if.dev = dev;
1168	lp->mii_if.mdio_read = mdio_read;
1169	lp->mii_if.mdio_write = mdio_write;
1170	lp->mii_if.phy_id = lp->phy_addr;
1171	lp->mii_if.phy_id_mask = 0x1f;
1172	lp->mii_if.reg_num_mask = 0x1f;
1173
 
 
1174	rc = register_netdev(dev);
1175	if (rc < 0) {
1176		printk(KERN_ERR DRV_NAME
1177			": cannot register net device: %d\n", rc);
1178		goto probe_err_register;
1179	}
1180	setup_timer(&lp->media_check_timer, korina_poll_media, (unsigned long) dev);
1181
1182	INIT_WORK(&lp->restart_task, korina_restart_task);
1183
1184	printk(KERN_INFO "%s: " DRV_NAME "-" DRV_VERSION " " DRV_RELDATE "\n",
1185			dev->name);
1186out:
1187	return rc;
1188
1189probe_err_register:
1190	kfree(lp->td_ring);
1191probe_err_td_ring:
1192	iounmap(lp->tx_dma_regs);
1193probe_err_dma_tx:
1194	iounmap(lp->rx_dma_regs);
1195probe_err_dma_rx:
1196	iounmap(lp->eth_regs);
1197probe_err_out:
1198	free_netdev(dev);
1199	goto out;
1200}
1201
1202static int korina_remove(struct platform_device *pdev)
1203{
1204	struct korina_device *bif = platform_get_drvdata(pdev);
1205	struct korina_private *lp = netdev_priv(bif->dev);
1206
1207	iounmap(lp->eth_regs);
1208	iounmap(lp->rx_dma_regs);
1209	iounmap(lp->tx_dma_regs);
1210
1211	unregister_netdev(bif->dev);
1212	free_netdev(bif->dev);
1213
1214	return 0;
1215}
 
 
 
 
1216
1217static struct platform_driver korina_driver = {
1218	.driver.name = "korina",
 
 
 
1219	.probe = korina_probe,
1220	.remove = korina_remove,
1221};
1222
1223module_platform_driver(korina_driver);
1224
1225MODULE_AUTHOR("Philip Rischel <rischelp@idt.com>");
1226MODULE_AUTHOR("Felix Fietkau <nbd@openwrt.org>");
1227MODULE_AUTHOR("Florian Fainelli <florian@openwrt.org>");
 
1228MODULE_DESCRIPTION("IDT RC32434 (Korina) Ethernet driver");
1229MODULE_LICENSE("GPL");