Linux Audio

Check our new training course

Loading...
Note: File does not exist in v3.1.
   1/*
   2 * Combined Ethernet driver for Motorola MPC8xx and MPC82xx.
   3 *
   4 * Copyright (c) 2003 Intracom S.A.
   5 *  by Pantelis Antoniou <panto@intracom.gr>
   6 *
   7 * 2005 (c) MontaVista Software, Inc.
   8 * Vitaly Bordug <vbordug@ru.mvista.com>
   9 *
  10 * Heavily based on original FEC driver by Dan Malek <dan@embeddededge.com>
  11 * and modifications by Joakim Tjernlund <joakim.tjernlund@lumentis.se>
  12 *
  13 * This file is licensed under the terms of the GNU General Public License
  14 * version 2. This program is licensed "as is" without any warranty of any
  15 * kind, whether express or implied.
  16 */
  17
  18#include <linux/module.h>
  19#include <linux/kernel.h>
  20#include <linux/types.h>
  21#include <linux/string.h>
  22#include <linux/ptrace.h>
  23#include <linux/errno.h>
  24#include <linux/ioport.h>
  25#include <linux/slab.h>
  26#include <linux/interrupt.h>
  27#include <linux/delay.h>
  28#include <linux/netdevice.h>
  29#include <linux/etherdevice.h>
  30#include <linux/skbuff.h>
  31#include <linux/spinlock.h>
  32#include <linux/mii.h>
  33#include <linux/ethtool.h>
  34#include <linux/bitops.h>
  35#include <linux/fs.h>
  36#include <linux/platform_device.h>
  37#include <linux/phy.h>
  38#include <linux/of.h>
  39#include <linux/of_mdio.h>
  40#include <linux/of_platform.h>
  41#include <linux/of_gpio.h>
  42#include <linux/of_net.h>
  43
  44#include <linux/vmalloc.h>
  45#include <asm/pgtable.h>
  46#include <asm/irq.h>
  47#include <asm/uaccess.h>
  48
  49#include "fs_enet.h"
  50
  51/*************************************************/
  52
  53MODULE_AUTHOR("Pantelis Antoniou <panto@intracom.gr>");
  54MODULE_DESCRIPTION("Freescale Ethernet Driver");
  55MODULE_LICENSE("GPL");
  56MODULE_VERSION(DRV_MODULE_VERSION);
  57
  58static int fs_enet_debug = -1; /* -1 == use FS_ENET_DEF_MSG_ENABLE as value */
  59module_param(fs_enet_debug, int, 0);
  60MODULE_PARM_DESC(fs_enet_debug,
  61		 "Freescale bitmapped debugging message enable value");
  62
  63#ifdef CONFIG_NET_POLL_CONTROLLER
  64static void fs_enet_netpoll(struct net_device *dev);
  65#endif
  66
  67static void fs_set_multicast_list(struct net_device *dev)
  68{
  69	struct fs_enet_private *fep = netdev_priv(dev);
  70
  71	(*fep->ops->set_multicast_list)(dev);
  72}
  73
  74static void skb_align(struct sk_buff *skb, int align)
  75{
  76	int off = ((unsigned long)skb->data) & (align - 1);
  77
  78	if (off)
  79		skb_reserve(skb, align - off);
  80}
  81
  82/* NAPI receive function */
  83static int fs_enet_rx_napi(struct napi_struct *napi, int budget)
  84{
  85	struct fs_enet_private *fep = container_of(napi, struct fs_enet_private, napi);
  86	struct net_device *dev = fep->ndev;
  87	const struct fs_platform_info *fpi = fep->fpi;
  88	cbd_t __iomem *bdp;
  89	struct sk_buff *skb, *skbn, *skbt;
  90	int received = 0;
  91	u16 pkt_len, sc;
  92	int curidx;
  93
  94	if (budget <= 0)
  95		return received;
  96
  97	/*
  98	 * First, grab all of the stats for the incoming packet.
  99	 * These get messed up if we get called due to a busy condition.
 100	 */
 101	bdp = fep->cur_rx;
 102
 103	/* clear RX status bits for napi*/
 104	(*fep->ops->napi_clear_rx_event)(dev);
 105
 106	while (((sc = CBDR_SC(bdp)) & BD_ENET_RX_EMPTY) == 0) {
 107		curidx = bdp - fep->rx_bd_base;
 108
 109		/*
 110		 * Since we have allocated space to hold a complete frame,
 111		 * the last indicator should be set.
 112		 */
 113		if ((sc & BD_ENET_RX_LAST) == 0)
 114			dev_warn(fep->dev, "rcv is not +last\n");
 115
 116		/*
 117		 * Check for errors.
 118		 */
 119		if (sc & (BD_ENET_RX_LG | BD_ENET_RX_SH | BD_ENET_RX_CL |
 120			  BD_ENET_RX_NO | BD_ENET_RX_CR | BD_ENET_RX_OV)) {
 121			fep->stats.rx_errors++;
 122			/* Frame too long or too short. */
 123			if (sc & (BD_ENET_RX_LG | BD_ENET_RX_SH))
 124				fep->stats.rx_length_errors++;
 125			/* Frame alignment */
 126			if (sc & (BD_ENET_RX_NO | BD_ENET_RX_CL))
 127				fep->stats.rx_frame_errors++;
 128			/* CRC Error */
 129			if (sc & BD_ENET_RX_CR)
 130				fep->stats.rx_crc_errors++;
 131			/* FIFO overrun */
 132			if (sc & BD_ENET_RX_OV)
 133				fep->stats.rx_crc_errors++;
 134
 135			skb = fep->rx_skbuff[curidx];
 136
 137			dma_unmap_single(fep->dev, CBDR_BUFADDR(bdp),
 138				L1_CACHE_ALIGN(PKT_MAXBUF_SIZE),
 139				DMA_FROM_DEVICE);
 140
 141			skbn = skb;
 142
 143		} else {
 144			skb = fep->rx_skbuff[curidx];
 145
 146			dma_unmap_single(fep->dev, CBDR_BUFADDR(bdp),
 147				L1_CACHE_ALIGN(PKT_MAXBUF_SIZE),
 148				DMA_FROM_DEVICE);
 149
 150			/*
 151			 * Process the incoming frame.
 152			 */
 153			fep->stats.rx_packets++;
 154			pkt_len = CBDR_DATLEN(bdp) - 4;	/* remove CRC */
 155			fep->stats.rx_bytes += pkt_len + 4;
 156
 157			if (pkt_len <= fpi->rx_copybreak) {
 158				/* +2 to make IP header L1 cache aligned */
 159				skbn = netdev_alloc_skb(dev, pkt_len + 2);
 160				if (skbn != NULL) {
 161					skb_reserve(skbn, 2);	/* align IP header */
 162					skb_copy_from_linear_data(skb,
 163						      skbn->data, pkt_len);
 164					/* swap */
 165					skbt = skb;
 166					skb = skbn;
 167					skbn = skbt;
 168				}
 169			} else {
 170				skbn = netdev_alloc_skb(dev, ENET_RX_FRSIZE);
 171
 172				if (skbn)
 173					skb_align(skbn, ENET_RX_ALIGN);
 174			}
 175
 176			if (skbn != NULL) {
 177				skb_put(skb, pkt_len);	/* Make room */
 178				skb->protocol = eth_type_trans(skb, dev);
 179				received++;
 180				netif_receive_skb(skb);
 181			} else {
 182				fep->stats.rx_dropped++;
 183				skbn = skb;
 184			}
 185		}
 186
 187		fep->rx_skbuff[curidx] = skbn;
 188		CBDW_BUFADDR(bdp, dma_map_single(fep->dev, skbn->data,
 189			     L1_CACHE_ALIGN(PKT_MAXBUF_SIZE),
 190			     DMA_FROM_DEVICE));
 191		CBDW_DATLEN(bdp, 0);
 192		CBDW_SC(bdp, (sc & ~BD_ENET_RX_STATS) | BD_ENET_RX_EMPTY);
 193
 194		/*
 195		 * Update BD pointer to next entry.
 196		 */
 197		if ((sc & BD_ENET_RX_WRAP) == 0)
 198			bdp++;
 199		else
 200			bdp = fep->rx_bd_base;
 201
 202		(*fep->ops->rx_bd_done)(dev);
 203
 204		if (received >= budget)
 205			break;
 206	}
 207
 208	fep->cur_rx = bdp;
 209
 210	if (received < budget) {
 211		/* done */
 212		napi_complete(napi);
 213		(*fep->ops->napi_enable_rx)(dev);
 214	}
 215	return received;
 216}
 217
 218/* non NAPI receive function */
 219static int fs_enet_rx_non_napi(struct net_device *dev)
 220{
 221	struct fs_enet_private *fep = netdev_priv(dev);
 222	const struct fs_platform_info *fpi = fep->fpi;
 223	cbd_t __iomem *bdp;
 224	struct sk_buff *skb, *skbn, *skbt;
 225	int received = 0;
 226	u16 pkt_len, sc;
 227	int curidx;
 228	/*
 229	 * First, grab all of the stats for the incoming packet.
 230	 * These get messed up if we get called due to a busy condition.
 231	 */
 232	bdp = fep->cur_rx;
 233
 234	while (((sc = CBDR_SC(bdp)) & BD_ENET_RX_EMPTY) == 0) {
 235
 236		curidx = bdp - fep->rx_bd_base;
 237
 238		/*
 239		 * Since we have allocated space to hold a complete frame,
 240		 * the last indicator should be set.
 241		 */
 242		if ((sc & BD_ENET_RX_LAST) == 0)
 243			dev_warn(fep->dev, "rcv is not +last\n");
 244
 245		/*
 246		 * Check for errors.
 247		 */
 248		if (sc & (BD_ENET_RX_LG | BD_ENET_RX_SH | BD_ENET_RX_CL |
 249			  BD_ENET_RX_NO | BD_ENET_RX_CR | BD_ENET_RX_OV)) {
 250			fep->stats.rx_errors++;
 251			/* Frame too long or too short. */
 252			if (sc & (BD_ENET_RX_LG | BD_ENET_RX_SH))
 253				fep->stats.rx_length_errors++;
 254			/* Frame alignment */
 255			if (sc & (BD_ENET_RX_NO | BD_ENET_RX_CL))
 256				fep->stats.rx_frame_errors++;
 257			/* CRC Error */
 258			if (sc & BD_ENET_RX_CR)
 259				fep->stats.rx_crc_errors++;
 260			/* FIFO overrun */
 261			if (sc & BD_ENET_RX_OV)
 262				fep->stats.rx_crc_errors++;
 263
 264			skb = fep->rx_skbuff[curidx];
 265
 266			dma_unmap_single(fep->dev, CBDR_BUFADDR(bdp),
 267				L1_CACHE_ALIGN(PKT_MAXBUF_SIZE),
 268				DMA_FROM_DEVICE);
 269
 270			skbn = skb;
 271
 272		} else {
 273
 274			skb = fep->rx_skbuff[curidx];
 275
 276			dma_unmap_single(fep->dev, CBDR_BUFADDR(bdp),
 277				L1_CACHE_ALIGN(PKT_MAXBUF_SIZE),
 278				DMA_FROM_DEVICE);
 279
 280			/*
 281			 * Process the incoming frame.
 282			 */
 283			fep->stats.rx_packets++;
 284			pkt_len = CBDR_DATLEN(bdp) - 4;	/* remove CRC */
 285			fep->stats.rx_bytes += pkt_len + 4;
 286
 287			if (pkt_len <= fpi->rx_copybreak) {
 288				/* +2 to make IP header L1 cache aligned */
 289				skbn = netdev_alloc_skb(dev, pkt_len + 2);
 290				if (skbn != NULL) {
 291					skb_reserve(skbn, 2);	/* align IP header */
 292					skb_copy_from_linear_data(skb,
 293						      skbn->data, pkt_len);
 294					/* swap */
 295					skbt = skb;
 296					skb = skbn;
 297					skbn = skbt;
 298				}
 299			} else {
 300				skbn = netdev_alloc_skb(dev, ENET_RX_FRSIZE);
 301
 302				if (skbn)
 303					skb_align(skbn, ENET_RX_ALIGN);
 304			}
 305
 306			if (skbn != NULL) {
 307				skb_put(skb, pkt_len);	/* Make room */
 308				skb->protocol = eth_type_trans(skb, dev);
 309				received++;
 310				netif_rx(skb);
 311			} else {
 312				fep->stats.rx_dropped++;
 313				skbn = skb;
 314			}
 315		}
 316
 317		fep->rx_skbuff[curidx] = skbn;
 318		CBDW_BUFADDR(bdp, dma_map_single(fep->dev, skbn->data,
 319			     L1_CACHE_ALIGN(PKT_MAXBUF_SIZE),
 320			     DMA_FROM_DEVICE));
 321		CBDW_DATLEN(bdp, 0);
 322		CBDW_SC(bdp, (sc & ~BD_ENET_RX_STATS) | BD_ENET_RX_EMPTY);
 323
 324		/*
 325		 * Update BD pointer to next entry.
 326		 */
 327		if ((sc & BD_ENET_RX_WRAP) == 0)
 328			bdp++;
 329		else
 330			bdp = fep->rx_bd_base;
 331
 332		(*fep->ops->rx_bd_done)(dev);
 333	}
 334
 335	fep->cur_rx = bdp;
 336
 337	return 0;
 338}
 339
 340static void fs_enet_tx(struct net_device *dev)
 341{
 342	struct fs_enet_private *fep = netdev_priv(dev);
 343	cbd_t __iomem *bdp;
 344	struct sk_buff *skb;
 345	int dirtyidx, do_wake, do_restart;
 346	u16 sc;
 347
 348	spin_lock(&fep->tx_lock);
 349	bdp = fep->dirty_tx;
 350
 351	do_wake = do_restart = 0;
 352	while (((sc = CBDR_SC(bdp)) & BD_ENET_TX_READY) == 0) {
 353		dirtyidx = bdp - fep->tx_bd_base;
 354
 355		if (fep->tx_free == fep->tx_ring)
 356			break;
 357
 358		skb = fep->tx_skbuff[dirtyidx];
 359
 360		/*
 361		 * Check for errors.
 362		 */
 363		if (sc & (BD_ENET_TX_HB | BD_ENET_TX_LC |
 364			  BD_ENET_TX_RL | BD_ENET_TX_UN | BD_ENET_TX_CSL)) {
 365
 366			if (sc & BD_ENET_TX_HB)	/* No heartbeat */
 367				fep->stats.tx_heartbeat_errors++;
 368			if (sc & BD_ENET_TX_LC)	/* Late collision */
 369				fep->stats.tx_window_errors++;
 370			if (sc & BD_ENET_TX_RL)	/* Retrans limit */
 371				fep->stats.tx_aborted_errors++;
 372			if (sc & BD_ENET_TX_UN)	/* Underrun */
 373				fep->stats.tx_fifo_errors++;
 374			if (sc & BD_ENET_TX_CSL)	/* Carrier lost */
 375				fep->stats.tx_carrier_errors++;
 376
 377			if (sc & (BD_ENET_TX_LC | BD_ENET_TX_RL | BD_ENET_TX_UN)) {
 378				fep->stats.tx_errors++;
 379				do_restart = 1;
 380			}
 381		} else
 382			fep->stats.tx_packets++;
 383
 384		if (sc & BD_ENET_TX_READY) {
 385			dev_warn(fep->dev,
 386				 "HEY! Enet xmit interrupt and TX_READY.\n");
 387		}
 388
 389		/*
 390		 * Deferred means some collisions occurred during transmit,
 391		 * but we eventually sent the packet OK.
 392		 */
 393		if (sc & BD_ENET_TX_DEF)
 394			fep->stats.collisions++;
 395
 396		/* unmap */
 397		dma_unmap_single(fep->dev, CBDR_BUFADDR(bdp),
 398				skb->len, DMA_TO_DEVICE);
 399
 400		/*
 401		 * Free the sk buffer associated with this last transmit.
 402		 */
 403		dev_kfree_skb_irq(skb);
 404		fep->tx_skbuff[dirtyidx] = NULL;
 405
 406		/*
 407		 * Update pointer to next buffer descriptor to be transmitted.
 408		 */
 409		if ((sc & BD_ENET_TX_WRAP) == 0)
 410			bdp++;
 411		else
 412			bdp = fep->tx_bd_base;
 413
 414		/*
 415		 * Since we have freed up a buffer, the ring is no longer
 416		 * full.
 417		 */
 418		if (!fep->tx_free++)
 419			do_wake = 1;
 420	}
 421
 422	fep->dirty_tx = bdp;
 423
 424	if (do_restart)
 425		(*fep->ops->tx_restart)(dev);
 426
 427	spin_unlock(&fep->tx_lock);
 428
 429	if (do_wake)
 430		netif_wake_queue(dev);
 431}
 432
 433/*
 434 * The interrupt handler.
 435 * This is called from the MPC core interrupt.
 436 */
 437static irqreturn_t
 438fs_enet_interrupt(int irq, void *dev_id)
 439{
 440	struct net_device *dev = dev_id;
 441	struct fs_enet_private *fep;
 442	const struct fs_platform_info *fpi;
 443	u32 int_events;
 444	u32 int_clr_events;
 445	int nr, napi_ok;
 446	int handled;
 447
 448	fep = netdev_priv(dev);
 449	fpi = fep->fpi;
 450
 451	nr = 0;
 452	while ((int_events = (*fep->ops->get_int_events)(dev)) != 0) {
 453		nr++;
 454
 455		int_clr_events = int_events;
 456		if (fpi->use_napi)
 457			int_clr_events &= ~fep->ev_napi_rx;
 458
 459		(*fep->ops->clear_int_events)(dev, int_clr_events);
 460
 461		if (int_events & fep->ev_err)
 462			(*fep->ops->ev_error)(dev, int_events);
 463
 464		if (int_events & fep->ev_rx) {
 465			if (!fpi->use_napi)
 466				fs_enet_rx_non_napi(dev);
 467			else {
 468				napi_ok = napi_schedule_prep(&fep->napi);
 469
 470				(*fep->ops->napi_disable_rx)(dev);
 471				(*fep->ops->clear_int_events)(dev, fep->ev_napi_rx);
 472
 473				/* NOTE: it is possible for FCCs in NAPI mode    */
 474				/* to submit a spurious interrupt while in poll  */
 475				if (napi_ok)
 476					__napi_schedule(&fep->napi);
 477			}
 478		}
 479
 480		if (int_events & fep->ev_tx)
 481			fs_enet_tx(dev);
 482	}
 483
 484	handled = nr > 0;
 485	return IRQ_RETVAL(handled);
 486}
 487
 488void fs_init_bds(struct net_device *dev)
 489{
 490	struct fs_enet_private *fep = netdev_priv(dev);
 491	cbd_t __iomem *bdp;
 492	struct sk_buff *skb;
 493	int i;
 494
 495	fs_cleanup_bds(dev);
 496
 497	fep->dirty_tx = fep->cur_tx = fep->tx_bd_base;
 498	fep->tx_free = fep->tx_ring;
 499	fep->cur_rx = fep->rx_bd_base;
 500
 501	/*
 502	 * Initialize the receive buffer descriptors.
 503	 */
 504	for (i = 0, bdp = fep->rx_bd_base; i < fep->rx_ring; i++, bdp++) {
 505		skb = netdev_alloc_skb(dev, ENET_RX_FRSIZE);
 506		if (skb == NULL)
 507			break;
 508
 509		skb_align(skb, ENET_RX_ALIGN);
 510		fep->rx_skbuff[i] = skb;
 511		CBDW_BUFADDR(bdp,
 512			dma_map_single(fep->dev, skb->data,
 513				L1_CACHE_ALIGN(PKT_MAXBUF_SIZE),
 514				DMA_FROM_DEVICE));
 515		CBDW_DATLEN(bdp, 0);	/* zero */
 516		CBDW_SC(bdp, BD_ENET_RX_EMPTY |
 517			((i < fep->rx_ring - 1) ? 0 : BD_SC_WRAP));
 518	}
 519	/*
 520	 * if we failed, fillup remainder
 521	 */
 522	for (; i < fep->rx_ring; i++, bdp++) {
 523		fep->rx_skbuff[i] = NULL;
 524		CBDW_SC(bdp, (i < fep->rx_ring - 1) ? 0 : BD_SC_WRAP);
 525	}
 526
 527	/*
 528	 * ...and the same for transmit.
 529	 */
 530	for (i = 0, bdp = fep->tx_bd_base; i < fep->tx_ring; i++, bdp++) {
 531		fep->tx_skbuff[i] = NULL;
 532		CBDW_BUFADDR(bdp, 0);
 533		CBDW_DATLEN(bdp, 0);
 534		CBDW_SC(bdp, (i < fep->tx_ring - 1) ? 0 : BD_SC_WRAP);
 535	}
 536}
 537
 538void fs_cleanup_bds(struct net_device *dev)
 539{
 540	struct fs_enet_private *fep = netdev_priv(dev);
 541	struct sk_buff *skb;
 542	cbd_t __iomem *bdp;
 543	int i;
 544
 545	/*
 546	 * Reset SKB transmit buffers.
 547	 */
 548	for (i = 0, bdp = fep->tx_bd_base; i < fep->tx_ring; i++, bdp++) {
 549		if ((skb = fep->tx_skbuff[i]) == NULL)
 550			continue;
 551
 552		/* unmap */
 553		dma_unmap_single(fep->dev, CBDR_BUFADDR(bdp),
 554				skb->len, DMA_TO_DEVICE);
 555
 556		fep->tx_skbuff[i] = NULL;
 557		dev_kfree_skb(skb);
 558	}
 559
 560	/*
 561	 * Reset SKB receive buffers
 562	 */
 563	for (i = 0, bdp = fep->rx_bd_base; i < fep->rx_ring; i++, bdp++) {
 564		if ((skb = fep->rx_skbuff[i]) == NULL)
 565			continue;
 566
 567		/* unmap */
 568		dma_unmap_single(fep->dev, CBDR_BUFADDR(bdp),
 569			L1_CACHE_ALIGN(PKT_MAXBUF_SIZE),
 570			DMA_FROM_DEVICE);
 571
 572		fep->rx_skbuff[i] = NULL;
 573
 574		dev_kfree_skb(skb);
 575	}
 576}
 577
 578/**********************************************************************************/
 579
 580#ifdef CONFIG_FS_ENET_MPC5121_FEC
 581/*
 582 * MPC5121 FEC requeries 4-byte alignment for TX data buffer!
 583 */
 584static struct sk_buff *tx_skb_align_workaround(struct net_device *dev,
 585					       struct sk_buff *skb)
 586{
 587	struct sk_buff *new_skb;
 588
 589	/* Alloc new skb */
 590	new_skb = netdev_alloc_skb(dev, skb->len + 4);
 591	if (!new_skb)
 592		return NULL;
 593
 594	/* Make sure new skb is properly aligned */
 595	skb_align(new_skb, 4);
 596
 597	/* Copy data to new skb ... */
 598	skb_copy_from_linear_data(skb, new_skb->data, skb->len);
 599	skb_put(new_skb, skb->len);
 600
 601	/* ... and free an old one */
 602	dev_kfree_skb_any(skb);
 603
 604	return new_skb;
 605}
 606#endif
 607
 608static int fs_enet_start_xmit(struct sk_buff *skb, struct net_device *dev)
 609{
 610	struct fs_enet_private *fep = netdev_priv(dev);
 611	cbd_t __iomem *bdp;
 612	int curidx;
 613	u16 sc;
 614	unsigned long flags;
 615
 616#ifdef CONFIG_FS_ENET_MPC5121_FEC
 617	if (((unsigned long)skb->data) & 0x3) {
 618		skb = tx_skb_align_workaround(dev, skb);
 619		if (!skb) {
 620			/*
 621			 * We have lost packet due to memory allocation error
 622			 * in tx_skb_align_workaround(). Hopefully original
 623			 * skb is still valid, so try transmit it later.
 624			 */
 625			return NETDEV_TX_BUSY;
 626		}
 627	}
 628#endif
 629	spin_lock_irqsave(&fep->tx_lock, flags);
 630
 631	/*
 632	 * Fill in a Tx ring entry
 633	 */
 634	bdp = fep->cur_tx;
 635
 636	if (!fep->tx_free || (CBDR_SC(bdp) & BD_ENET_TX_READY)) {
 637		netif_stop_queue(dev);
 638		spin_unlock_irqrestore(&fep->tx_lock, flags);
 639
 640		/*
 641		 * Ooops.  All transmit buffers are full.  Bail out.
 642		 * This should not happen, since the tx queue should be stopped.
 643		 */
 644		dev_warn(fep->dev, "tx queue full!.\n");
 645		return NETDEV_TX_BUSY;
 646	}
 647
 648	curidx = bdp - fep->tx_bd_base;
 649	/*
 650	 * Clear all of the status flags.
 651	 */
 652	CBDC_SC(bdp, BD_ENET_TX_STATS);
 653
 654	/*
 655	 * Save skb pointer.
 656	 */
 657	fep->tx_skbuff[curidx] = skb;
 658
 659	fep->stats.tx_bytes += skb->len;
 660
 661	/*
 662	 * Push the data cache so the CPM does not get stale memory data.
 663	 */
 664	CBDW_BUFADDR(bdp, dma_map_single(fep->dev,
 665				skb->data, skb->len, DMA_TO_DEVICE));
 666	CBDW_DATLEN(bdp, skb->len);
 667
 668	/*
 669	 * If this was the last BD in the ring, start at the beginning again.
 670	 */
 671	if ((CBDR_SC(bdp) & BD_ENET_TX_WRAP) == 0)
 672		fep->cur_tx++;
 673	else
 674		fep->cur_tx = fep->tx_bd_base;
 675
 676	if (!--fep->tx_free)
 677		netif_stop_queue(dev);
 678
 679	/* Trigger transmission start */
 680	sc = BD_ENET_TX_READY | BD_ENET_TX_INTR |
 681	     BD_ENET_TX_LAST | BD_ENET_TX_TC;
 682
 683	/* note that while FEC does not have this bit
 684	 * it marks it as available for software use
 685	 * yay for hw reuse :) */
 686	if (skb->len <= 60)
 687		sc |= BD_ENET_TX_PAD;
 688	CBDS_SC(bdp, sc);
 689
 690	skb_tx_timestamp(skb);
 691
 692	(*fep->ops->tx_kickstart)(dev);
 693
 694	spin_unlock_irqrestore(&fep->tx_lock, flags);
 695
 696	return NETDEV_TX_OK;
 697}
 698
 699static void fs_timeout(struct net_device *dev)
 700{
 701	struct fs_enet_private *fep = netdev_priv(dev);
 702	unsigned long flags;
 703	int wake = 0;
 704
 705	fep->stats.tx_errors++;
 706
 707	spin_lock_irqsave(&fep->lock, flags);
 708
 709	if (dev->flags & IFF_UP) {
 710		phy_stop(fep->phydev);
 711		(*fep->ops->stop)(dev);
 712		(*fep->ops->restart)(dev);
 713		phy_start(fep->phydev);
 714	}
 715
 716	phy_start(fep->phydev);
 717	wake = fep->tx_free && !(CBDR_SC(fep->cur_tx) & BD_ENET_TX_READY);
 718	spin_unlock_irqrestore(&fep->lock, flags);
 719
 720	if (wake)
 721		netif_wake_queue(dev);
 722}
 723
 724/*-----------------------------------------------------------------------------
 725 *  generic link-change handler - should be sufficient for most cases
 726 *-----------------------------------------------------------------------------*/
 727static void generic_adjust_link(struct  net_device *dev)
 728{
 729	struct fs_enet_private *fep = netdev_priv(dev);
 730	struct phy_device *phydev = fep->phydev;
 731	int new_state = 0;
 732
 733	if (phydev->link) {
 734		/* adjust to duplex mode */
 735		if (phydev->duplex != fep->oldduplex) {
 736			new_state = 1;
 737			fep->oldduplex = phydev->duplex;
 738		}
 739
 740		if (phydev->speed != fep->oldspeed) {
 741			new_state = 1;
 742			fep->oldspeed = phydev->speed;
 743		}
 744
 745		if (!fep->oldlink) {
 746			new_state = 1;
 747			fep->oldlink = 1;
 748		}
 749
 750		if (new_state)
 751			fep->ops->restart(dev);
 752	} else if (fep->oldlink) {
 753		new_state = 1;
 754		fep->oldlink = 0;
 755		fep->oldspeed = 0;
 756		fep->oldduplex = -1;
 757	}
 758
 759	if (new_state && netif_msg_link(fep))
 760		phy_print_status(phydev);
 761}
 762
 763
 764static void fs_adjust_link(struct net_device *dev)
 765{
 766	struct fs_enet_private *fep = netdev_priv(dev);
 767	unsigned long flags;
 768
 769	spin_lock_irqsave(&fep->lock, flags);
 770
 771	if(fep->ops->adjust_link)
 772		fep->ops->adjust_link(dev);
 773	else
 774		generic_adjust_link(dev);
 775
 776	spin_unlock_irqrestore(&fep->lock, flags);
 777}
 778
 779static int fs_init_phy(struct net_device *dev)
 780{
 781	struct fs_enet_private *fep = netdev_priv(dev);
 782	struct phy_device *phydev;
 783	phy_interface_t iface;
 784
 785	fep->oldlink = 0;
 786	fep->oldspeed = 0;
 787	fep->oldduplex = -1;
 788
 789	iface = fep->fpi->use_rmii ?
 790		PHY_INTERFACE_MODE_RMII : PHY_INTERFACE_MODE_MII;
 791
 792	phydev = of_phy_connect(dev, fep->fpi->phy_node, &fs_adjust_link, 0,
 793				iface);
 794	if (!phydev) {
 795		phydev = of_phy_connect_fixed_link(dev, &fs_adjust_link,
 796						   iface);
 797	}
 798	if (!phydev) {
 799		dev_err(&dev->dev, "Could not attach to PHY\n");
 800		return -ENODEV;
 801	}
 802
 803	fep->phydev = phydev;
 804
 805	return 0;
 806}
 807
 808static int fs_enet_open(struct net_device *dev)
 809{
 810	struct fs_enet_private *fep = netdev_priv(dev);
 811	int r;
 812	int err;
 813
 814	/* to initialize the fep->cur_rx,... */
 815	/* not doing this, will cause a crash in fs_enet_rx_napi */
 816	fs_init_bds(fep->ndev);
 817
 818	if (fep->fpi->use_napi)
 819		napi_enable(&fep->napi);
 820
 821	/* Install our interrupt handler. */
 822	r = request_irq(fep->interrupt, fs_enet_interrupt, IRQF_SHARED,
 823			"fs_enet-mac", dev);
 824	if (r != 0) {
 825		dev_err(fep->dev, "Could not allocate FS_ENET IRQ!");
 826		if (fep->fpi->use_napi)
 827			napi_disable(&fep->napi);
 828		return -EINVAL;
 829	}
 830
 831	err = fs_init_phy(dev);
 832	if (err) {
 833		free_irq(fep->interrupt, dev);
 834		if (fep->fpi->use_napi)
 835			napi_disable(&fep->napi);
 836		return err;
 837	}
 838	phy_start(fep->phydev);
 839
 840	netif_start_queue(dev);
 841
 842	return 0;
 843}
 844
 845static int fs_enet_close(struct net_device *dev)
 846{
 847	struct fs_enet_private *fep = netdev_priv(dev);
 848	unsigned long flags;
 849
 850	netif_stop_queue(dev);
 851	netif_carrier_off(dev);
 852	if (fep->fpi->use_napi)
 853		napi_disable(&fep->napi);
 854	phy_stop(fep->phydev);
 855
 856	spin_lock_irqsave(&fep->lock, flags);
 857	spin_lock(&fep->tx_lock);
 858	(*fep->ops->stop)(dev);
 859	spin_unlock(&fep->tx_lock);
 860	spin_unlock_irqrestore(&fep->lock, flags);
 861
 862	/* release any irqs */
 863	phy_disconnect(fep->phydev);
 864	fep->phydev = NULL;
 865	free_irq(fep->interrupt, dev);
 866
 867	return 0;
 868}
 869
 870static struct net_device_stats *fs_enet_get_stats(struct net_device *dev)
 871{
 872	struct fs_enet_private *fep = netdev_priv(dev);
 873	return &fep->stats;
 874}
 875
 876/*************************************************************************/
 877
 878static void fs_get_drvinfo(struct net_device *dev,
 879			    struct ethtool_drvinfo *info)
 880{
 881	strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
 882	strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version));
 883}
 884
 885static int fs_get_regs_len(struct net_device *dev)
 886{
 887	struct fs_enet_private *fep = netdev_priv(dev);
 888
 889	return (*fep->ops->get_regs_len)(dev);
 890}
 891
 892static void fs_get_regs(struct net_device *dev, struct ethtool_regs *regs,
 893			 void *p)
 894{
 895	struct fs_enet_private *fep = netdev_priv(dev);
 896	unsigned long flags;
 897	int r, len;
 898
 899	len = regs->len;
 900
 901	spin_lock_irqsave(&fep->lock, flags);
 902	r = (*fep->ops->get_regs)(dev, p, &len);
 903	spin_unlock_irqrestore(&fep->lock, flags);
 904
 905	if (r == 0)
 906		regs->version = 0;
 907}
 908
 909static int fs_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
 910{
 911	struct fs_enet_private *fep = netdev_priv(dev);
 912
 913	if (!fep->phydev)
 914		return -ENODEV;
 915
 916	return phy_ethtool_gset(fep->phydev, cmd);
 917}
 918
 919static int fs_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
 920{
 921	struct fs_enet_private *fep = netdev_priv(dev);
 922
 923	if (!fep->phydev)
 924		return -ENODEV;
 925
 926	return phy_ethtool_sset(fep->phydev, cmd);
 927}
 928
 929static int fs_nway_reset(struct net_device *dev)
 930{
 931	return 0;
 932}
 933
 934static u32 fs_get_msglevel(struct net_device *dev)
 935{
 936	struct fs_enet_private *fep = netdev_priv(dev);
 937	return fep->msg_enable;
 938}
 939
 940static void fs_set_msglevel(struct net_device *dev, u32 value)
 941{
 942	struct fs_enet_private *fep = netdev_priv(dev);
 943	fep->msg_enable = value;
 944}
 945
 946static const struct ethtool_ops fs_ethtool_ops = {
 947	.get_drvinfo = fs_get_drvinfo,
 948	.get_regs_len = fs_get_regs_len,
 949	.get_settings = fs_get_settings,
 950	.set_settings = fs_set_settings,
 951	.nway_reset = fs_nway_reset,
 952	.get_link = ethtool_op_get_link,
 953	.get_msglevel = fs_get_msglevel,
 954	.set_msglevel = fs_set_msglevel,
 955	.get_regs = fs_get_regs,
 956	.get_ts_info = ethtool_op_get_ts_info,
 957};
 958
 959static int fs_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
 960{
 961	struct fs_enet_private *fep = netdev_priv(dev);
 962
 963	if (!netif_running(dev))
 964		return -EINVAL;
 965
 966	return phy_mii_ioctl(fep->phydev, rq, cmd);
 967}
 968
 969extern int fs_mii_connect(struct net_device *dev);
 970extern void fs_mii_disconnect(struct net_device *dev);
 971
 972/**************************************************************************************/
 973
 974#ifdef CONFIG_FS_ENET_HAS_FEC
 975#define IS_FEC(match) ((match)->data == &fs_fec_ops)
 976#else
 977#define IS_FEC(match) 0
 978#endif
 979
 980static const struct net_device_ops fs_enet_netdev_ops = {
 981	.ndo_open		= fs_enet_open,
 982	.ndo_stop		= fs_enet_close,
 983	.ndo_get_stats		= fs_enet_get_stats,
 984	.ndo_start_xmit		= fs_enet_start_xmit,
 985	.ndo_tx_timeout		= fs_timeout,
 986	.ndo_set_rx_mode	= fs_set_multicast_list,
 987	.ndo_do_ioctl		= fs_ioctl,
 988	.ndo_validate_addr	= eth_validate_addr,
 989	.ndo_set_mac_address	= eth_mac_addr,
 990	.ndo_change_mtu		= eth_change_mtu,
 991#ifdef CONFIG_NET_POLL_CONTROLLER
 992	.ndo_poll_controller	= fs_enet_netpoll,
 993#endif
 994};
 995
 996static struct of_device_id fs_enet_match[];
 997static int fs_enet_probe(struct platform_device *ofdev)
 998{
 999	const struct of_device_id *match;
1000	struct net_device *ndev;
1001	struct fs_enet_private *fep;
1002	struct fs_platform_info *fpi;
1003	const u32 *data;
1004	struct clk *clk;
1005	int err;
1006	const u8 *mac_addr;
1007	const char *phy_connection_type;
1008	int privsize, len, ret = -ENODEV;
1009
1010	match = of_match_device(fs_enet_match, &ofdev->dev);
1011	if (!match)
1012		return -EINVAL;
1013
1014	fpi = kzalloc(sizeof(*fpi), GFP_KERNEL);
1015	if (!fpi)
1016		return -ENOMEM;
1017
1018	if (!IS_FEC(match)) {
1019		data = of_get_property(ofdev->dev.of_node, "fsl,cpm-command", &len);
1020		if (!data || len != 4)
1021			goto out_free_fpi;
1022
1023		fpi->cp_command = *data;
1024	}
1025
1026	fpi->rx_ring = 32;
1027	fpi->tx_ring = 32;
1028	fpi->rx_copybreak = 240;
1029	fpi->use_napi = 1;
1030	fpi->napi_weight = 17;
1031	fpi->phy_node = of_parse_phandle(ofdev->dev.of_node, "phy-handle", 0);
1032	if ((!fpi->phy_node) && (!of_get_property(ofdev->dev.of_node, "fixed-link",
1033						  NULL)))
1034		goto out_free_fpi;
1035
1036	if (of_device_is_compatible(ofdev->dev.of_node, "fsl,mpc5125-fec")) {
1037		phy_connection_type = of_get_property(ofdev->dev.of_node,
1038						"phy-connection-type", NULL);
1039		if (phy_connection_type && !strcmp("rmii", phy_connection_type))
1040			fpi->use_rmii = 1;
1041	}
1042
1043	/* make clock lookup non-fatal (the driver is shared among platforms),
1044	 * but require enable to succeed when a clock was specified/found,
1045	 * keep a reference to the clock upon successful acquisition
1046	 */
1047	clk = devm_clk_get(&ofdev->dev, "per");
1048	if (!IS_ERR(clk)) {
1049		err = clk_prepare_enable(clk);
1050		if (err) {
1051			ret = err;
1052			goto out_free_fpi;
1053		}
1054		fpi->clk_per = clk;
1055	}
1056
1057	privsize = sizeof(*fep) +
1058	           sizeof(struct sk_buff **) *
1059	           (fpi->rx_ring + fpi->tx_ring);
1060
1061	ndev = alloc_etherdev(privsize);
1062	if (!ndev) {
1063		ret = -ENOMEM;
1064		goto out_put;
1065	}
1066
1067	SET_NETDEV_DEV(ndev, &ofdev->dev);
1068	platform_set_drvdata(ofdev, ndev);
1069
1070	fep = netdev_priv(ndev);
1071	fep->dev = &ofdev->dev;
1072	fep->ndev = ndev;
1073	fep->fpi = fpi;
1074	fep->ops = match->data;
1075
1076	ret = fep->ops->setup_data(ndev);
1077	if (ret)
1078		goto out_free_dev;
1079
1080	fep->rx_skbuff = (struct sk_buff **)&fep[1];
1081	fep->tx_skbuff = fep->rx_skbuff + fpi->rx_ring;
1082
1083	spin_lock_init(&fep->lock);
1084	spin_lock_init(&fep->tx_lock);
1085
1086	mac_addr = of_get_mac_address(ofdev->dev.of_node);
1087	if (mac_addr)
1088		memcpy(ndev->dev_addr, mac_addr, ETH_ALEN);
1089
1090	ret = fep->ops->allocate_bd(ndev);
1091	if (ret)
1092		goto out_cleanup_data;
1093
1094	fep->rx_bd_base = fep->ring_base;
1095	fep->tx_bd_base = fep->rx_bd_base + fpi->rx_ring;
1096
1097	fep->tx_ring = fpi->tx_ring;
1098	fep->rx_ring = fpi->rx_ring;
1099
1100	ndev->netdev_ops = &fs_enet_netdev_ops;
1101	ndev->watchdog_timeo = 2 * HZ;
1102	if (fpi->use_napi)
1103		netif_napi_add(ndev, &fep->napi, fs_enet_rx_napi,
1104		               fpi->napi_weight);
1105
1106	ndev->ethtool_ops = &fs_ethtool_ops;
1107
1108	init_timer(&fep->phy_timer_list);
1109
1110	netif_carrier_off(ndev);
1111
1112	ret = register_netdev(ndev);
1113	if (ret)
1114		goto out_free_bd;
1115
1116	pr_info("%s: fs_enet: %pM\n", ndev->name, ndev->dev_addr);
1117
1118	return 0;
1119
1120out_free_bd:
1121	fep->ops->free_bd(ndev);
1122out_cleanup_data:
1123	fep->ops->cleanup_data(ndev);
1124out_free_dev:
1125	free_netdev(ndev);
1126out_put:
1127	of_node_put(fpi->phy_node);
1128	if (fpi->clk_per)
1129		clk_disable_unprepare(fpi->clk_per);
1130out_free_fpi:
1131	kfree(fpi);
1132	return ret;
1133}
1134
1135static int fs_enet_remove(struct platform_device *ofdev)
1136{
1137	struct net_device *ndev = platform_get_drvdata(ofdev);
1138	struct fs_enet_private *fep = netdev_priv(ndev);
1139
1140	unregister_netdev(ndev);
1141
1142	fep->ops->free_bd(ndev);
1143	fep->ops->cleanup_data(ndev);
1144	dev_set_drvdata(fep->dev, NULL);
1145	of_node_put(fep->fpi->phy_node);
1146	if (fep->fpi->clk_per)
1147		clk_disable_unprepare(fep->fpi->clk_per);
1148	free_netdev(ndev);
1149	return 0;
1150}
1151
1152static struct of_device_id fs_enet_match[] = {
1153#ifdef CONFIG_FS_ENET_HAS_SCC
1154	{
1155		.compatible = "fsl,cpm1-scc-enet",
1156		.data = (void *)&fs_scc_ops,
1157	},
1158	{
1159		.compatible = "fsl,cpm2-scc-enet",
1160		.data = (void *)&fs_scc_ops,
1161	},
1162#endif
1163#ifdef CONFIG_FS_ENET_HAS_FCC
1164	{
1165		.compatible = "fsl,cpm2-fcc-enet",
1166		.data = (void *)&fs_fcc_ops,
1167	},
1168#endif
1169#ifdef CONFIG_FS_ENET_HAS_FEC
1170#ifdef CONFIG_FS_ENET_MPC5121_FEC
1171	{
1172		.compatible = "fsl,mpc5121-fec",
1173		.data = (void *)&fs_fec_ops,
1174	},
1175	{
1176		.compatible = "fsl,mpc5125-fec",
1177		.data = (void *)&fs_fec_ops,
1178	},
1179#else
1180	{
1181		.compatible = "fsl,pq1-fec-enet",
1182		.data = (void *)&fs_fec_ops,
1183	},
1184#endif
1185#endif
1186	{}
1187};
1188MODULE_DEVICE_TABLE(of, fs_enet_match);
1189
1190static struct platform_driver fs_enet_driver = {
1191	.driver = {
1192		.owner = THIS_MODULE,
1193		.name = "fs_enet",
1194		.of_match_table = fs_enet_match,
1195	},
1196	.probe = fs_enet_probe,
1197	.remove = fs_enet_remove,
1198};
1199
1200#ifdef CONFIG_NET_POLL_CONTROLLER
1201static void fs_enet_netpoll(struct net_device *dev)
1202{
1203       disable_irq(dev->irq);
1204       fs_enet_interrupt(dev->irq, dev);
1205       enable_irq(dev->irq);
1206}
1207#endif
1208
1209module_platform_driver(fs_enet_driver);