Linux Audio

Check our new training course

Loading...
Note: File does not exist in v3.5.6.
   1/* sunqe.c: Sparc QuadEthernet 10baseT SBUS card driver.
   2 *          Once again I am out to prove that every ethernet
   3 *          controller out there can be most efficiently programmed
   4 *          if you make it look like a LANCE.
   5 *
   6 * Copyright (C) 1996, 1999, 2003, 2006, 2008 David S. Miller (davem@davemloft.net)
   7 */
   8
   9#include <linux/module.h>
  10#include <linux/kernel.h>
  11#include <linux/types.h>
  12#include <linux/errno.h>
  13#include <linux/fcntl.h>
  14#include <linux/interrupt.h>
  15#include <linux/ioport.h>
  16#include <linux/in.h>
  17#include <linux/slab.h>
  18#include <linux/string.h>
  19#include <linux/delay.h>
  20#include <linux/init.h>
  21#include <linux/crc32.h>
  22#include <linux/netdevice.h>
  23#include <linux/etherdevice.h>
  24#include <linux/skbuff.h>
  25#include <linux/ethtool.h>
  26#include <linux/bitops.h>
  27#include <linux/dma-mapping.h>
  28#include <linux/of.h>
  29#include <linux/of_device.h>
  30
  31#include <asm/system.h>
  32#include <asm/io.h>
  33#include <asm/dma.h>
  34#include <asm/byteorder.h>
  35#include <asm/idprom.h>
  36#include <asm/openprom.h>
  37#include <asm/oplib.h>
  38#include <asm/auxio.h>
  39#include <asm/pgtable.h>
  40#include <asm/irq.h>
  41
  42#include "sunqe.h"
  43
  44#define DRV_NAME	"sunqe"
  45#define DRV_VERSION	"4.1"
  46#define DRV_RELDATE	"August 27, 2008"
  47#define DRV_AUTHOR	"David S. Miller (davem@davemloft.net)"
  48
  49static char version[] =
  50	DRV_NAME ".c:v" DRV_VERSION " " DRV_RELDATE " " DRV_AUTHOR "\n";
  51
  52MODULE_VERSION(DRV_VERSION);
  53MODULE_AUTHOR(DRV_AUTHOR);
  54MODULE_DESCRIPTION("Sun QuadEthernet 10baseT SBUS card driver");
  55MODULE_LICENSE("GPL");
  56
  57static struct sunqec *root_qec_dev;
  58
  59static void qe_set_multicast(struct net_device *dev);
  60
  61#define QEC_RESET_TRIES 200
  62
  63static inline int qec_global_reset(void __iomem *gregs)
  64{
  65	int tries = QEC_RESET_TRIES;
  66
  67	sbus_writel(GLOB_CTRL_RESET, gregs + GLOB_CTRL);
  68	while (--tries) {
  69		u32 tmp = sbus_readl(gregs + GLOB_CTRL);
  70		if (tmp & GLOB_CTRL_RESET) {
  71			udelay(20);
  72			continue;
  73		}
  74		break;
  75	}
  76	if (tries)
  77		return 0;
  78	printk(KERN_ERR "QuadEther: AIEEE cannot reset the QEC!\n");
  79	return -1;
  80}
  81
  82#define MACE_RESET_RETRIES 200
  83#define QE_RESET_RETRIES   200
  84
  85static inline int qe_stop(struct sunqe *qep)
  86{
  87	void __iomem *cregs = qep->qcregs;
  88	void __iomem *mregs = qep->mregs;
  89	int tries;
  90
  91	/* Reset the MACE, then the QEC channel. */
  92	sbus_writeb(MREGS_BCONFIG_RESET, mregs + MREGS_BCONFIG);
  93	tries = MACE_RESET_RETRIES;
  94	while (--tries) {
  95		u8 tmp = sbus_readb(mregs + MREGS_BCONFIG);
  96		if (tmp & MREGS_BCONFIG_RESET) {
  97			udelay(20);
  98			continue;
  99		}
 100		break;
 101	}
 102	if (!tries) {
 103		printk(KERN_ERR "QuadEther: AIEEE cannot reset the MACE!\n");
 104		return -1;
 105	}
 106
 107	sbus_writel(CREG_CTRL_RESET, cregs + CREG_CTRL);
 108	tries = QE_RESET_RETRIES;
 109	while (--tries) {
 110		u32 tmp = sbus_readl(cregs + CREG_CTRL);
 111		if (tmp & CREG_CTRL_RESET) {
 112			udelay(20);
 113			continue;
 114		}
 115		break;
 116	}
 117	if (!tries) {
 118		printk(KERN_ERR "QuadEther: Cannot reset QE channel!\n");
 119		return -1;
 120	}
 121	return 0;
 122}
 123
 124static void qe_init_rings(struct sunqe *qep)
 125{
 126	struct qe_init_block *qb = qep->qe_block;
 127	struct sunqe_buffers *qbufs = qep->buffers;
 128	__u32 qbufs_dvma = qep->buffers_dvma;
 129	int i;
 130
 131	qep->rx_new = qep->rx_old = qep->tx_new = qep->tx_old = 0;
 132	memset(qb, 0, sizeof(struct qe_init_block));
 133	memset(qbufs, 0, sizeof(struct sunqe_buffers));
 134	for (i = 0; i < RX_RING_SIZE; i++) {
 135		qb->qe_rxd[i].rx_addr = qbufs_dvma + qebuf_offset(rx_buf, i);
 136		qb->qe_rxd[i].rx_flags =
 137			(RXD_OWN | ((RXD_PKT_SZ) & RXD_LENGTH));
 138	}
 139}
 140
 141static int qe_init(struct sunqe *qep, int from_irq)
 142{
 143	struct sunqec *qecp = qep->parent;
 144	void __iomem *cregs = qep->qcregs;
 145	void __iomem *mregs = qep->mregs;
 146	void __iomem *gregs = qecp->gregs;
 147	unsigned char *e = &qep->dev->dev_addr[0];
 148	u32 tmp;
 149	int i;
 150
 151	/* Shut it up. */
 152	if (qe_stop(qep))
 153		return -EAGAIN;
 154
 155	/* Setup initial rx/tx init block pointers. */
 156	sbus_writel(qep->qblock_dvma + qib_offset(qe_rxd, 0), cregs + CREG_RXDS);
 157	sbus_writel(qep->qblock_dvma + qib_offset(qe_txd, 0), cregs + CREG_TXDS);
 158
 159	/* Enable/mask the various irq's. */
 160	sbus_writel(0, cregs + CREG_RIMASK);
 161	sbus_writel(1, cregs + CREG_TIMASK);
 162
 163	sbus_writel(0, cregs + CREG_QMASK);
 164	sbus_writel(CREG_MMASK_RXCOLL, cregs + CREG_MMASK);
 165
 166	/* Setup the FIFO pointers into QEC local memory. */
 167	tmp = qep->channel * sbus_readl(gregs + GLOB_MSIZE);
 168	sbus_writel(tmp, cregs + CREG_RXRBUFPTR);
 169	sbus_writel(tmp, cregs + CREG_RXWBUFPTR);
 170
 171	tmp = sbus_readl(cregs + CREG_RXRBUFPTR) +
 172		sbus_readl(gregs + GLOB_RSIZE);
 173	sbus_writel(tmp, cregs + CREG_TXRBUFPTR);
 174	sbus_writel(tmp, cregs + CREG_TXWBUFPTR);
 175
 176	/* Clear the channel collision counter. */
 177	sbus_writel(0, cregs + CREG_CCNT);
 178
 179	/* For 10baseT, inter frame space nor throttle seems to be necessary. */
 180	sbus_writel(0, cregs + CREG_PIPG);
 181
 182	/* Now dork with the AMD MACE. */
 183	sbus_writeb(MREGS_PHYCONFIG_AUTO, mregs + MREGS_PHYCONFIG);
 184	sbus_writeb(MREGS_TXFCNTL_AUTOPAD, mregs + MREGS_TXFCNTL);
 185	sbus_writeb(0, mregs + MREGS_RXFCNTL);
 186
 187	/* The QEC dma's the rx'd packets from local memory out to main memory,
 188	 * and therefore it interrupts when the packet reception is "complete".
 189	 * So don't listen for the MACE talking about it.
 190	 */
 191	sbus_writeb(MREGS_IMASK_COLL | MREGS_IMASK_RXIRQ, mregs + MREGS_IMASK);
 192	sbus_writeb(MREGS_BCONFIG_BSWAP | MREGS_BCONFIG_64TS, mregs + MREGS_BCONFIG);
 193	sbus_writeb((MREGS_FCONFIG_TXF16 | MREGS_FCONFIG_RXF32 |
 194		     MREGS_FCONFIG_RFWU | MREGS_FCONFIG_TFWU),
 195		    mregs + MREGS_FCONFIG);
 196
 197	/* Only usable interface on QuadEther is twisted pair. */
 198	sbus_writeb(MREGS_PLSCONFIG_TP, mregs + MREGS_PLSCONFIG);
 199
 200	/* Tell MACE we are changing the ether address. */
 201	sbus_writeb(MREGS_IACONFIG_ACHNGE | MREGS_IACONFIG_PARESET,
 202		    mregs + MREGS_IACONFIG);
 203	while ((sbus_readb(mregs + MREGS_IACONFIG) & MREGS_IACONFIG_ACHNGE) != 0)
 204		barrier();
 205	sbus_writeb(e[0], mregs + MREGS_ETHADDR);
 206	sbus_writeb(e[1], mregs + MREGS_ETHADDR);
 207	sbus_writeb(e[2], mregs + MREGS_ETHADDR);
 208	sbus_writeb(e[3], mregs + MREGS_ETHADDR);
 209	sbus_writeb(e[4], mregs + MREGS_ETHADDR);
 210	sbus_writeb(e[5], mregs + MREGS_ETHADDR);
 211
 212	/* Clear out the address filter. */
 213	sbus_writeb(MREGS_IACONFIG_ACHNGE | MREGS_IACONFIG_LARESET,
 214		    mregs + MREGS_IACONFIG);
 215	while ((sbus_readb(mregs + MREGS_IACONFIG) & MREGS_IACONFIG_ACHNGE) != 0)
 216		barrier();
 217	for (i = 0; i < 8; i++)
 218		sbus_writeb(0, mregs + MREGS_FILTER);
 219
 220	/* Address changes are now complete. */
 221	sbus_writeb(0, mregs + MREGS_IACONFIG);
 222
 223	qe_init_rings(qep);
 224
 225	/* Wait a little bit for the link to come up... */
 226	mdelay(5);
 227	if (!(sbus_readb(mregs + MREGS_PHYCONFIG) & MREGS_PHYCONFIG_LTESTDIS)) {
 228		int tries = 50;
 229
 230		while (--tries) {
 231			u8 tmp;
 232
 233			mdelay(5);
 234			barrier();
 235			tmp = sbus_readb(mregs + MREGS_PHYCONFIG);
 236			if ((tmp & MREGS_PHYCONFIG_LSTAT) != 0)
 237				break;
 238		}
 239		if (tries == 0)
 240			printk(KERN_NOTICE "%s: Warning, link state is down.\n", qep->dev->name);
 241	}
 242
 243	/* Missed packet counter is cleared on a read. */
 244	sbus_readb(mregs + MREGS_MPCNT);
 245
 246	/* Reload multicast information, this will enable the receiver
 247	 * and transmitter.
 248	 */
 249	qe_set_multicast(qep->dev);
 250
 251	/* QEC should now start to show interrupts. */
 252	return 0;
 253}
 254
 255/* Grrr, certain error conditions completely lock up the AMD MACE,
 256 * so when we get these we _must_ reset the chip.
 257 */
 258static int qe_is_bolixed(struct sunqe *qep, u32 qe_status)
 259{
 260	struct net_device *dev = qep->dev;
 261	int mace_hwbug_workaround = 0;
 262
 263	if (qe_status & CREG_STAT_EDEFER) {
 264		printk(KERN_ERR "%s: Excessive transmit defers.\n", dev->name);
 265		dev->stats.tx_errors++;
 266	}
 267
 268	if (qe_status & CREG_STAT_CLOSS) {
 269		printk(KERN_ERR "%s: Carrier lost, link down?\n", dev->name);
 270		dev->stats.tx_errors++;
 271		dev->stats.tx_carrier_errors++;
 272	}
 273
 274	if (qe_status & CREG_STAT_ERETRIES) {
 275		printk(KERN_ERR "%s: Excessive transmit retries (more than 16).\n", dev->name);
 276		dev->stats.tx_errors++;
 277		mace_hwbug_workaround = 1;
 278	}
 279
 280	if (qe_status & CREG_STAT_LCOLL) {
 281		printk(KERN_ERR "%s: Late transmit collision.\n", dev->name);
 282		dev->stats.tx_errors++;
 283		dev->stats.collisions++;
 284		mace_hwbug_workaround = 1;
 285	}
 286
 287	if (qe_status & CREG_STAT_FUFLOW) {
 288		printk(KERN_ERR "%s: Transmit fifo underflow, driver bug.\n", dev->name);
 289		dev->stats.tx_errors++;
 290		mace_hwbug_workaround = 1;
 291	}
 292
 293	if (qe_status & CREG_STAT_JERROR) {
 294		printk(KERN_ERR "%s: Jabber error.\n", dev->name);
 295	}
 296
 297	if (qe_status & CREG_STAT_BERROR) {
 298		printk(KERN_ERR "%s: Babble error.\n", dev->name);
 299	}
 300
 301	if (qe_status & CREG_STAT_CCOFLOW) {
 302		dev->stats.tx_errors += 256;
 303		dev->stats.collisions += 256;
 304	}
 305
 306	if (qe_status & CREG_STAT_TXDERROR) {
 307		printk(KERN_ERR "%s: Transmit descriptor is bogus, driver bug.\n", dev->name);
 308		dev->stats.tx_errors++;
 309		dev->stats.tx_aborted_errors++;
 310		mace_hwbug_workaround = 1;
 311	}
 312
 313	if (qe_status & CREG_STAT_TXLERR) {
 314		printk(KERN_ERR "%s: Transmit late error.\n", dev->name);
 315		dev->stats.tx_errors++;
 316		mace_hwbug_workaround = 1;
 317	}
 318
 319	if (qe_status & CREG_STAT_TXPERR) {
 320		printk(KERN_ERR "%s: Transmit DMA parity error.\n", dev->name);
 321		dev->stats.tx_errors++;
 322		dev->stats.tx_aborted_errors++;
 323		mace_hwbug_workaround = 1;
 324	}
 325
 326	if (qe_status & CREG_STAT_TXSERR) {
 327		printk(KERN_ERR "%s: Transmit DMA sbus error ack.\n", dev->name);
 328		dev->stats.tx_errors++;
 329		dev->stats.tx_aborted_errors++;
 330		mace_hwbug_workaround = 1;
 331	}
 332
 333	if (qe_status & CREG_STAT_RCCOFLOW) {
 334		dev->stats.rx_errors += 256;
 335		dev->stats.collisions += 256;
 336	}
 337
 338	if (qe_status & CREG_STAT_RUOFLOW) {
 339		dev->stats.rx_errors += 256;
 340		dev->stats.rx_over_errors += 256;
 341	}
 342
 343	if (qe_status & CREG_STAT_MCOFLOW) {
 344		dev->stats.rx_errors += 256;
 345		dev->stats.rx_missed_errors += 256;
 346	}
 347
 348	if (qe_status & CREG_STAT_RXFOFLOW) {
 349		printk(KERN_ERR "%s: Receive fifo overflow.\n", dev->name);
 350		dev->stats.rx_errors++;
 351		dev->stats.rx_over_errors++;
 352	}
 353
 354	if (qe_status & CREG_STAT_RLCOLL) {
 355		printk(KERN_ERR "%s: Late receive collision.\n", dev->name);
 356		dev->stats.rx_errors++;
 357		dev->stats.collisions++;
 358	}
 359
 360	if (qe_status & CREG_STAT_FCOFLOW) {
 361		dev->stats.rx_errors += 256;
 362		dev->stats.rx_frame_errors += 256;
 363	}
 364
 365	if (qe_status & CREG_STAT_CECOFLOW) {
 366		dev->stats.rx_errors += 256;
 367		dev->stats.rx_crc_errors += 256;
 368	}
 369
 370	if (qe_status & CREG_STAT_RXDROP) {
 371		printk(KERN_ERR "%s: Receive packet dropped.\n", dev->name);
 372		dev->stats.rx_errors++;
 373		dev->stats.rx_dropped++;
 374		dev->stats.rx_missed_errors++;
 375	}
 376
 377	if (qe_status & CREG_STAT_RXSMALL) {
 378		printk(KERN_ERR "%s: Receive buffer too small, driver bug.\n", dev->name);
 379		dev->stats.rx_errors++;
 380		dev->stats.rx_length_errors++;
 381	}
 382
 383	if (qe_status & CREG_STAT_RXLERR) {
 384		printk(KERN_ERR "%s: Receive late error.\n", dev->name);
 385		dev->stats.rx_errors++;
 386		mace_hwbug_workaround = 1;
 387	}
 388
 389	if (qe_status & CREG_STAT_RXPERR) {
 390		printk(KERN_ERR "%s: Receive DMA parity error.\n", dev->name);
 391		dev->stats.rx_errors++;
 392		dev->stats.rx_missed_errors++;
 393		mace_hwbug_workaround = 1;
 394	}
 395
 396	if (qe_status & CREG_STAT_RXSERR) {
 397		printk(KERN_ERR "%s: Receive DMA sbus error ack.\n", dev->name);
 398		dev->stats.rx_errors++;
 399		dev->stats.rx_missed_errors++;
 400		mace_hwbug_workaround = 1;
 401	}
 402
 403	if (mace_hwbug_workaround)
 404		qe_init(qep, 1);
 405	return mace_hwbug_workaround;
 406}
 407
 408/* Per-QE receive interrupt service routine.  Just like on the happy meal
 409 * we receive directly into skb's with a small packet copy water mark.
 410 */
 411static void qe_rx(struct sunqe *qep)
 412{
 413	struct qe_rxd *rxbase = &qep->qe_block->qe_rxd[0];
 414	struct net_device *dev = qep->dev;
 415	struct qe_rxd *this;
 416	struct sunqe_buffers *qbufs = qep->buffers;
 417	__u32 qbufs_dvma = qep->buffers_dvma;
 418	int elem = qep->rx_new, drops = 0;
 419	u32 flags;
 420
 421	this = &rxbase[elem];
 422	while (!((flags = this->rx_flags) & RXD_OWN)) {
 423		struct sk_buff *skb;
 424		unsigned char *this_qbuf =
 425			&qbufs->rx_buf[elem & (RX_RING_SIZE - 1)][0];
 426		__u32 this_qbuf_dvma = qbufs_dvma +
 427			qebuf_offset(rx_buf, (elem & (RX_RING_SIZE - 1)));
 428		struct qe_rxd *end_rxd =
 429			&rxbase[(elem+RX_RING_SIZE)&(RX_RING_MAXSIZE-1)];
 430		int len = (flags & RXD_LENGTH) - 4;  /* QE adds ether FCS size to len */
 431
 432		/* Check for errors. */
 433		if (len < ETH_ZLEN) {
 434			dev->stats.rx_errors++;
 435			dev->stats.rx_length_errors++;
 436			dev->stats.rx_dropped++;
 437		} else {
 438			skb = dev_alloc_skb(len + 2);
 439			if (skb == NULL) {
 440				drops++;
 441				dev->stats.rx_dropped++;
 442			} else {
 443				skb_reserve(skb, 2);
 444				skb_put(skb, len);
 445				skb_copy_to_linear_data(skb, (unsigned char *) this_qbuf,
 446						 len);
 447				skb->protocol = eth_type_trans(skb, qep->dev);
 448				netif_rx(skb);
 449				dev->stats.rx_packets++;
 450				dev->stats.rx_bytes += len;
 451			}
 452		}
 453		end_rxd->rx_addr = this_qbuf_dvma;
 454		end_rxd->rx_flags = (RXD_OWN | ((RXD_PKT_SZ) & RXD_LENGTH));
 455
 456		elem = NEXT_RX(elem);
 457		this = &rxbase[elem];
 458	}
 459	qep->rx_new = elem;
 460	if (drops)
 461		printk(KERN_NOTICE "%s: Memory squeeze, deferring packet.\n", qep->dev->name);
 462}
 463
 464static void qe_tx_reclaim(struct sunqe *qep);
 465
 466/* Interrupts for all QE's get filtered out via the QEC master controller,
 467 * so we just run through each qe and check to see who is signaling
 468 * and thus needs to be serviced.
 469 */
 470static irqreturn_t qec_interrupt(int irq, void *dev_id)
 471{
 472	struct sunqec *qecp = dev_id;
 473	u32 qec_status;
 474	int channel = 0;
 475
 476	/* Latch the status now. */
 477	qec_status = sbus_readl(qecp->gregs + GLOB_STAT);
 478	while (channel < 4) {
 479		if (qec_status & 0xf) {
 480			struct sunqe *qep = qecp->qes[channel];
 481			u32 qe_status;
 482
 483			qe_status = sbus_readl(qep->qcregs + CREG_STAT);
 484			if (qe_status & CREG_STAT_ERRORS) {
 485				if (qe_is_bolixed(qep, qe_status))
 486					goto next;
 487			}
 488			if (qe_status & CREG_STAT_RXIRQ)
 489				qe_rx(qep);
 490			if (netif_queue_stopped(qep->dev) &&
 491			    (qe_status & CREG_STAT_TXIRQ)) {
 492				spin_lock(&qep->lock);
 493				qe_tx_reclaim(qep);
 494				if (TX_BUFFS_AVAIL(qep) > 0) {
 495					/* Wake net queue and return to
 496					 * lazy tx reclaim.
 497					 */
 498					netif_wake_queue(qep->dev);
 499					sbus_writel(1, qep->qcregs + CREG_TIMASK);
 500				}
 501				spin_unlock(&qep->lock);
 502			}
 503	next:
 504			;
 505		}
 506		qec_status >>= 4;
 507		channel++;
 508	}
 509
 510	return IRQ_HANDLED;
 511}
 512
 513static int qe_open(struct net_device *dev)
 514{
 515	struct sunqe *qep = netdev_priv(dev);
 516
 517	qep->mconfig = (MREGS_MCONFIG_TXENAB |
 518			MREGS_MCONFIG_RXENAB |
 519			MREGS_MCONFIG_MBAENAB);
 520	return qe_init(qep, 0);
 521}
 522
 523static int qe_close(struct net_device *dev)
 524{
 525	struct sunqe *qep = netdev_priv(dev);
 526
 527	qe_stop(qep);
 528	return 0;
 529}
 530
 531/* Reclaim TX'd frames from the ring.  This must always run under
 532 * the IRQ protected qep->lock.
 533 */
 534static void qe_tx_reclaim(struct sunqe *qep)
 535{
 536	struct qe_txd *txbase = &qep->qe_block->qe_txd[0];
 537	int elem = qep->tx_old;
 538
 539	while (elem != qep->tx_new) {
 540		u32 flags = txbase[elem].tx_flags;
 541
 542		if (flags & TXD_OWN)
 543			break;
 544		elem = NEXT_TX(elem);
 545	}
 546	qep->tx_old = elem;
 547}
 548
 549static void qe_tx_timeout(struct net_device *dev)
 550{
 551	struct sunqe *qep = netdev_priv(dev);
 552	int tx_full;
 553
 554	spin_lock_irq(&qep->lock);
 555
 556	/* Try to reclaim, if that frees up some tx
 557	 * entries, we're fine.
 558	 */
 559	qe_tx_reclaim(qep);
 560	tx_full = TX_BUFFS_AVAIL(qep) <= 0;
 561
 562	spin_unlock_irq(&qep->lock);
 563
 564	if (! tx_full)
 565		goto out;
 566
 567	printk(KERN_ERR "%s: transmit timed out, resetting\n", dev->name);
 568	qe_init(qep, 1);
 569
 570out:
 571	netif_wake_queue(dev);
 572}
 573
 574/* Get a packet queued to go onto the wire. */
 575static int qe_start_xmit(struct sk_buff *skb, struct net_device *dev)
 576{
 577	struct sunqe *qep = netdev_priv(dev);
 578	struct sunqe_buffers *qbufs = qep->buffers;
 579	__u32 txbuf_dvma, qbufs_dvma = qep->buffers_dvma;
 580	unsigned char *txbuf;
 581	int len, entry;
 582
 583	spin_lock_irq(&qep->lock);
 584
 585	qe_tx_reclaim(qep);
 586
 587	len = skb->len;
 588	entry = qep->tx_new;
 589
 590	txbuf = &qbufs->tx_buf[entry & (TX_RING_SIZE - 1)][0];
 591	txbuf_dvma = qbufs_dvma +
 592		qebuf_offset(tx_buf, (entry & (TX_RING_SIZE - 1)));
 593
 594	/* Avoid a race... */
 595	qep->qe_block->qe_txd[entry].tx_flags = TXD_UPDATE;
 596
 597	skb_copy_from_linear_data(skb, txbuf, len);
 598
 599	qep->qe_block->qe_txd[entry].tx_addr = txbuf_dvma;
 600	qep->qe_block->qe_txd[entry].tx_flags =
 601		(TXD_OWN | TXD_SOP | TXD_EOP | (len & TXD_LENGTH));
 602	qep->tx_new = NEXT_TX(entry);
 603
 604	/* Get it going. */
 605	sbus_writel(CREG_CTRL_TWAKEUP, qep->qcregs + CREG_CTRL);
 606
 607	dev->stats.tx_packets++;
 608	dev->stats.tx_bytes += len;
 609
 610	if (TX_BUFFS_AVAIL(qep) <= 0) {
 611		/* Halt the net queue and enable tx interrupts.
 612		 * When the tx queue empties the tx irq handler
 613		 * will wake up the queue and return us back to
 614		 * the lazy tx reclaim scheme.
 615		 */
 616		netif_stop_queue(dev);
 617		sbus_writel(0, qep->qcregs + CREG_TIMASK);
 618	}
 619	spin_unlock_irq(&qep->lock);
 620
 621	dev_kfree_skb(skb);
 622
 623	return NETDEV_TX_OK;
 624}
 625
 626static void qe_set_multicast(struct net_device *dev)
 627{
 628	struct sunqe *qep = netdev_priv(dev);
 629	struct netdev_hw_addr *ha;
 630	u8 new_mconfig = qep->mconfig;
 631	int i;
 632	u32 crc;
 633
 634	/* Lock out others. */
 635	netif_stop_queue(dev);
 636
 637	if ((dev->flags & IFF_ALLMULTI) || (netdev_mc_count(dev) > 64)) {
 638		sbus_writeb(MREGS_IACONFIG_ACHNGE | MREGS_IACONFIG_LARESET,
 639			    qep->mregs + MREGS_IACONFIG);
 640		while ((sbus_readb(qep->mregs + MREGS_IACONFIG) & MREGS_IACONFIG_ACHNGE) != 0)
 641			barrier();
 642		for (i = 0; i < 8; i++)
 643			sbus_writeb(0xff, qep->mregs + MREGS_FILTER);
 644		sbus_writeb(0, qep->mregs + MREGS_IACONFIG);
 645	} else if (dev->flags & IFF_PROMISC) {
 646		new_mconfig |= MREGS_MCONFIG_PROMISC;
 647	} else {
 648		u16 hash_table[4];
 649		u8 *hbytes = (unsigned char *) &hash_table[0];
 650
 651		memset(hash_table, 0, sizeof(hash_table));
 652		netdev_for_each_mc_addr(ha, dev) {
 653			crc = ether_crc_le(6, ha->addr);
 654			crc >>= 26;
 655			hash_table[crc >> 4] |= 1 << (crc & 0xf);
 656		}
 657		/* Program the qe with the new filter value. */
 658		sbus_writeb(MREGS_IACONFIG_ACHNGE | MREGS_IACONFIG_LARESET,
 659			    qep->mregs + MREGS_IACONFIG);
 660		while ((sbus_readb(qep->mregs + MREGS_IACONFIG) & MREGS_IACONFIG_ACHNGE) != 0)
 661			barrier();
 662		for (i = 0; i < 8; i++) {
 663			u8 tmp = *hbytes++;
 664			sbus_writeb(tmp, qep->mregs + MREGS_FILTER);
 665		}
 666		sbus_writeb(0, qep->mregs + MREGS_IACONFIG);
 667	}
 668
 669	/* Any change of the logical address filter, the physical address,
 670	 * or enabling/disabling promiscuous mode causes the MACE to disable
 671	 * the receiver.  So we must re-enable them here or else the MACE
 672	 * refuses to listen to anything on the network.  Sheesh, took
 673	 * me a day or two to find this bug.
 674	 */
 675	qep->mconfig = new_mconfig;
 676	sbus_writeb(qep->mconfig, qep->mregs + MREGS_MCONFIG);
 677
 678	/* Let us get going again. */
 679	netif_wake_queue(dev);
 680}
 681
 682/* Ethtool support... */
 683static void qe_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
 684{
 685	const struct linux_prom_registers *regs;
 686	struct sunqe *qep = netdev_priv(dev);
 687	struct platform_device *op;
 688
 689	strcpy(info->driver, "sunqe");
 690	strcpy(info->version, "3.0");
 691
 692	op = qep->op;
 693	regs = of_get_property(op->dev.of_node, "reg", NULL);
 694	if (regs)
 695		sprintf(info->bus_info, "SBUS:%d", regs->which_io);
 696
 697}
 698
 699static u32 qe_get_link(struct net_device *dev)
 700{
 701	struct sunqe *qep = netdev_priv(dev);
 702	void __iomem *mregs = qep->mregs;
 703	u8 phyconfig;
 704
 705	spin_lock_irq(&qep->lock);
 706	phyconfig = sbus_readb(mregs + MREGS_PHYCONFIG);
 707	spin_unlock_irq(&qep->lock);
 708
 709	return phyconfig & MREGS_PHYCONFIG_LSTAT;
 710}
 711
 712static const struct ethtool_ops qe_ethtool_ops = {
 713	.get_drvinfo		= qe_get_drvinfo,
 714	.get_link		= qe_get_link,
 715};
 716
 717/* This is only called once at boot time for each card probed. */
 718static void qec_init_once(struct sunqec *qecp, struct platform_device *op)
 719{
 720	u8 bsizes = qecp->qec_bursts;
 721
 722	if (sbus_can_burst64() && (bsizes & DMA_BURST64)) {
 723		sbus_writel(GLOB_CTRL_B64, qecp->gregs + GLOB_CTRL);
 724	} else if (bsizes & DMA_BURST32) {
 725		sbus_writel(GLOB_CTRL_B32, qecp->gregs + GLOB_CTRL);
 726	} else {
 727		sbus_writel(GLOB_CTRL_B16, qecp->gregs + GLOB_CTRL);
 728	}
 729
 730	/* Packetsize only used in 100baseT BigMAC configurations,
 731	 * set it to zero just to be on the safe side.
 732	 */
 733	sbus_writel(GLOB_PSIZE_2048, qecp->gregs + GLOB_PSIZE);
 734
 735	/* Set the local memsize register, divided up to one piece per QE channel. */
 736	sbus_writel((resource_size(&op->resource[1]) >> 2),
 737		    qecp->gregs + GLOB_MSIZE);
 738
 739	/* Divide up the local QEC memory amongst the 4 QE receiver and
 740	 * transmitter FIFOs.  Basically it is (total / 2 / num_channels).
 741	 */
 742	sbus_writel((resource_size(&op->resource[1]) >> 2) >> 1,
 743		    qecp->gregs + GLOB_TSIZE);
 744	sbus_writel((resource_size(&op->resource[1]) >> 2) >> 1,
 745		    qecp->gregs + GLOB_RSIZE);
 746}
 747
 748static u8 __devinit qec_get_burst(struct device_node *dp)
 749{
 750	u8 bsizes, bsizes_more;
 751
 752	/* Find and set the burst sizes for the QEC, since it
 753	 * does the actual dma for all 4 channels.
 754	 */
 755	bsizes = of_getintprop_default(dp, "burst-sizes", 0xff);
 756	bsizes &= 0xff;
 757	bsizes_more = of_getintprop_default(dp->parent, "burst-sizes", 0xff);
 758
 759	if (bsizes_more != 0xff)
 760		bsizes &= bsizes_more;
 761	if (bsizes == 0xff || (bsizes & DMA_BURST16) == 0 ||
 762	    (bsizes & DMA_BURST32)==0)
 763		bsizes = (DMA_BURST32 - 1);
 764
 765	return bsizes;
 766}
 767
 768static struct sunqec * __devinit get_qec(struct platform_device *child)
 769{
 770	struct platform_device *op = to_platform_device(child->dev.parent);
 771	struct sunqec *qecp;
 772
 773	qecp = dev_get_drvdata(&op->dev);
 774	if (!qecp) {
 775		qecp = kzalloc(sizeof(struct sunqec), GFP_KERNEL);
 776		if (qecp) {
 777			u32 ctrl;
 778
 779			qecp->op = op;
 780			qecp->gregs = of_ioremap(&op->resource[0], 0,
 781						 GLOB_REG_SIZE,
 782						 "QEC Global Registers");
 783			if (!qecp->gregs)
 784				goto fail;
 785
 786			/* Make sure the QEC is in MACE mode. */
 787			ctrl = sbus_readl(qecp->gregs + GLOB_CTRL);
 788			ctrl &= 0xf0000000;
 789			if (ctrl != GLOB_CTRL_MMODE) {
 790				printk(KERN_ERR "qec: Not in MACE mode!\n");
 791				goto fail;
 792			}
 793
 794			if (qec_global_reset(qecp->gregs))
 795				goto fail;
 796
 797			qecp->qec_bursts = qec_get_burst(op->dev.of_node);
 798
 799			qec_init_once(qecp, op);
 800
 801			if (request_irq(op->archdata.irqs[0], qec_interrupt,
 802					IRQF_SHARED, "qec", (void *) qecp)) {
 803				printk(KERN_ERR "qec: Can't register irq.\n");
 804				goto fail;
 805			}
 806
 807			dev_set_drvdata(&op->dev, qecp);
 808
 809			qecp->next_module = root_qec_dev;
 810			root_qec_dev = qecp;
 811		}
 812	}
 813
 814	return qecp;
 815
 816fail:
 817	if (qecp->gregs)
 818		of_iounmap(&op->resource[0], qecp->gregs, GLOB_REG_SIZE);
 819	kfree(qecp);
 820	return NULL;
 821}
 822
 823static const struct net_device_ops qec_ops = {
 824	.ndo_open		= qe_open,
 825	.ndo_stop		= qe_close,
 826	.ndo_start_xmit		= qe_start_xmit,
 827	.ndo_set_multicast_list	= qe_set_multicast,
 828	.ndo_tx_timeout		= qe_tx_timeout,
 829	.ndo_change_mtu		= eth_change_mtu,
 830	.ndo_set_mac_address	= eth_mac_addr,
 831	.ndo_validate_addr	= eth_validate_addr,
 832};
 833
 834static int __devinit qec_ether_init(struct platform_device *op)
 835{
 836	static unsigned version_printed;
 837	struct net_device *dev;
 838	struct sunqec *qecp;
 839	struct sunqe *qe;
 840	int i, res;
 841
 842	if (version_printed++ == 0)
 843		printk(KERN_INFO "%s", version);
 844
 845	dev = alloc_etherdev(sizeof(struct sunqe));
 846	if (!dev)
 847		return -ENOMEM;
 848
 849	memcpy(dev->dev_addr, idprom->id_ethaddr, 6);
 850
 851	qe = netdev_priv(dev);
 852
 853	res = -ENODEV;
 854
 855	i = of_getintprop_default(op->dev.of_node, "channel#", -1);
 856	if (i == -1)
 857		goto fail;
 858	qe->channel = i;
 859	spin_lock_init(&qe->lock);
 860
 861	qecp = get_qec(op);
 862	if (!qecp)
 863		goto fail;
 864
 865	qecp->qes[qe->channel] = qe;
 866	qe->dev = dev;
 867	qe->parent = qecp;
 868	qe->op = op;
 869
 870	res = -ENOMEM;
 871	qe->qcregs = of_ioremap(&op->resource[0], 0,
 872				CREG_REG_SIZE, "QEC Channel Registers");
 873	if (!qe->qcregs) {
 874		printk(KERN_ERR "qe: Cannot map channel registers.\n");
 875		goto fail;
 876	}
 877
 878	qe->mregs = of_ioremap(&op->resource[1], 0,
 879			       MREGS_REG_SIZE, "QE MACE Registers");
 880	if (!qe->mregs) {
 881		printk(KERN_ERR "qe: Cannot map MACE registers.\n");
 882		goto fail;
 883	}
 884
 885	qe->qe_block = dma_alloc_coherent(&op->dev, PAGE_SIZE,
 886					  &qe->qblock_dvma, GFP_ATOMIC);
 887	qe->buffers = dma_alloc_coherent(&op->dev, sizeof(struct sunqe_buffers),
 888					 &qe->buffers_dvma, GFP_ATOMIC);
 889	if (qe->qe_block == NULL || qe->qblock_dvma == 0 ||
 890	    qe->buffers == NULL || qe->buffers_dvma == 0)
 891		goto fail;
 892
 893	/* Stop this QE. */
 894	qe_stop(qe);
 895
 896	SET_NETDEV_DEV(dev, &op->dev);
 897
 898	dev->watchdog_timeo = 5*HZ;
 899	dev->irq = op->archdata.irqs[0];
 900	dev->dma = 0;
 901	dev->ethtool_ops = &qe_ethtool_ops;
 902	dev->netdev_ops = &qec_ops;
 903
 904	res = register_netdev(dev);
 905	if (res)
 906		goto fail;
 907
 908	dev_set_drvdata(&op->dev, qe);
 909
 910	printk(KERN_INFO "%s: qe channel[%d] ", dev->name, qe->channel);
 911	for (i = 0; i < 6; i++)
 912		printk ("%2.2x%c",
 913			dev->dev_addr[i],
 914			i == 5 ? ' ': ':');
 915	printk("\n");
 916
 917
 918	return 0;
 919
 920fail:
 921	if (qe->qcregs)
 922		of_iounmap(&op->resource[0], qe->qcregs, CREG_REG_SIZE);
 923	if (qe->mregs)
 924		of_iounmap(&op->resource[1], qe->mregs, MREGS_REG_SIZE);
 925	if (qe->qe_block)
 926		dma_free_coherent(&op->dev, PAGE_SIZE,
 927				  qe->qe_block, qe->qblock_dvma);
 928	if (qe->buffers)
 929		dma_free_coherent(&op->dev,
 930				  sizeof(struct sunqe_buffers),
 931				  qe->buffers,
 932				  qe->buffers_dvma);
 933
 934	free_netdev(dev);
 935
 936	return res;
 937}
 938
 939static int __devinit qec_sbus_probe(struct platform_device *op)
 940{
 941	return qec_ether_init(op);
 942}
 943
 944static int __devexit qec_sbus_remove(struct platform_device *op)
 945{
 946	struct sunqe *qp = dev_get_drvdata(&op->dev);
 947	struct net_device *net_dev = qp->dev;
 948
 949	unregister_netdev(net_dev);
 950
 951	of_iounmap(&op->resource[0], qp->qcregs, CREG_REG_SIZE);
 952	of_iounmap(&op->resource[1], qp->mregs, MREGS_REG_SIZE);
 953	dma_free_coherent(&op->dev, PAGE_SIZE,
 954			  qp->qe_block, qp->qblock_dvma);
 955	dma_free_coherent(&op->dev, sizeof(struct sunqe_buffers),
 956			  qp->buffers, qp->buffers_dvma);
 957
 958	free_netdev(net_dev);
 959
 960	dev_set_drvdata(&op->dev, NULL);
 961
 962	return 0;
 963}
 964
 965static const struct of_device_id qec_sbus_match[] = {
 966	{
 967		.name = "qe",
 968	},
 969	{},
 970};
 971
 972MODULE_DEVICE_TABLE(of, qec_sbus_match);
 973
 974static struct platform_driver qec_sbus_driver = {
 975	.driver = {
 976		.name = "qec",
 977		.owner = THIS_MODULE,
 978		.of_match_table = qec_sbus_match,
 979	},
 980	.probe		= qec_sbus_probe,
 981	.remove		= __devexit_p(qec_sbus_remove),
 982};
 983
 984static int __init qec_init(void)
 985{
 986	return platform_driver_register(&qec_sbus_driver);
 987}
 988
 989static void __exit qec_exit(void)
 990{
 991	platform_driver_unregister(&qec_sbus_driver);
 992
 993	while (root_qec_dev) {
 994		struct sunqec *next = root_qec_dev->next_module;
 995		struct platform_device *op = root_qec_dev->op;
 996
 997		free_irq(op->archdata.irqs[0], (void *) root_qec_dev);
 998		of_iounmap(&op->resource[0], root_qec_dev->gregs,
 999			   GLOB_REG_SIZE);
1000		kfree(root_qec_dev);
1001
1002		root_qec_dev = next;
1003	}
1004}
1005
1006module_init(qec_init);
1007module_exit(qec_exit);