Linux Audio

Check our new training course

Loading...
v3.15
   1/* drivers/net/ethernet/freescale/gianfar.c
   2 *
   3 * Gianfar Ethernet Driver
   4 * This driver is designed for the non-CPM ethernet controllers
   5 * on the 85xx and 83xx family of integrated processors
   6 * Based on 8260_io/fcc_enet.c
   7 *
   8 * Author: Andy Fleming
   9 * Maintainer: Kumar Gala
  10 * Modifier: Sandeep Gopalpet <sandeep.kumar@freescale.com>
  11 *
  12 * Copyright 2002-2009, 2011-2013 Freescale Semiconductor, Inc.
  13 * Copyright 2007 MontaVista Software, Inc.
  14 *
  15 * This program is free software; you can redistribute  it and/or modify it
  16 * under  the terms of  the GNU General  Public License as published by the
  17 * Free Software Foundation;  either version 2 of the  License, or (at your
  18 * option) any later version.
  19 *
  20 *  Gianfar:  AKA Lambda Draconis, "Dragon"
  21 *  RA 11 31 24.2
  22 *  Dec +69 19 52
  23 *  V 3.84
  24 *  B-V +1.62
  25 *
  26 *  Theory of operation
  27 *
  28 *  The driver is initialized through of_device. Configuration information
  29 *  is therefore conveyed through an OF-style device tree.
  30 *
  31 *  The Gianfar Ethernet Controller uses a ring of buffer
  32 *  descriptors.  The beginning is indicated by a register
  33 *  pointing to the physical address of the start of the ring.
  34 *  The end is determined by a "wrap" bit being set in the
  35 *  last descriptor of the ring.
  36 *
  37 *  When a packet is received, the RXF bit in the
  38 *  IEVENT register is set, triggering an interrupt when the
  39 *  corresponding bit in the IMASK register is also set (if
  40 *  interrupt coalescing is active, then the interrupt may not
  41 *  happen immediately, but will wait until either a set number
  42 *  of frames or amount of time have passed).  In NAPI, the
  43 *  interrupt handler will signal there is work to be done, and
  44 *  exit. This method will start at the last known empty
  45 *  descriptor, and process every subsequent descriptor until there
  46 *  are none left with data (NAPI will stop after a set number of
  47 *  packets to give time to other tasks, but will eventually
  48 *  process all the packets).  The data arrives inside a
  49 *  pre-allocated skb, and so after the skb is passed up to the
  50 *  stack, a new skb must be allocated, and the address field in
  51 *  the buffer descriptor must be updated to indicate this new
  52 *  skb.
  53 *
  54 *  When the kernel requests that a packet be transmitted, the
  55 *  driver starts where it left off last time, and points the
  56 *  descriptor at the buffer which was passed in.  The driver
  57 *  then informs the DMA engine that there are packets ready to
  58 *  be transmitted.  Once the controller is finished transmitting
  59 *  the packet, an interrupt may be triggered (under the same
  60 *  conditions as for reception, but depending on the TXF bit).
  61 *  The driver then cleans up the buffer.
  62 */
  63
  64#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  65#define DEBUG
  66
  67#include <linux/kernel.h>
  68#include <linux/string.h>
  69#include <linux/errno.h>
  70#include <linux/unistd.h>
  71#include <linux/slab.h>
  72#include <linux/interrupt.h>
  73#include <linux/delay.h>
  74#include <linux/netdevice.h>
  75#include <linux/etherdevice.h>
  76#include <linux/skbuff.h>
  77#include <linux/if_vlan.h>
  78#include <linux/spinlock.h>
  79#include <linux/mm.h>
  80#include <linux/of_address.h>
  81#include <linux/of_irq.h>
  82#include <linux/of_mdio.h>
  83#include <linux/of_platform.h>
  84#include <linux/ip.h>
  85#include <linux/tcp.h>
  86#include <linux/udp.h>
  87#include <linux/in.h>
  88#include <linux/net_tstamp.h>
  89
  90#include <asm/io.h>
 
  91#include <asm/reg.h>
  92#include <asm/mpc85xx.h>
 
  93#include <asm/irq.h>
  94#include <asm/uaccess.h>
  95#include <linux/module.h>
  96#include <linux/dma-mapping.h>
  97#include <linux/crc32.h>
  98#include <linux/mii.h>
  99#include <linux/phy.h>
 100#include <linux/phy_fixed.h>
 101#include <linux/of.h>
 102#include <linux/of_net.h>
 
 
 103
 104#include "gianfar.h"
 105
 106#define TX_TIMEOUT      (1*HZ)
 107
 108const char gfar_driver_version[] = "1.3";
 109
 110static int gfar_enet_open(struct net_device *dev);
 111static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev);
 112static void gfar_reset_task(struct work_struct *work);
 113static void gfar_timeout(struct net_device *dev);
 114static int gfar_close(struct net_device *dev);
 115struct sk_buff *gfar_new_skb(struct net_device *dev);
 116static void gfar_new_rxbdp(struct gfar_priv_rx_q *rx_queue, struct rxbd8 *bdp,
 117			   struct sk_buff *skb);
 118static int gfar_set_mac_address(struct net_device *dev);
 119static int gfar_change_mtu(struct net_device *dev, int new_mtu);
 120static irqreturn_t gfar_error(int irq, void *dev_id);
 121static irqreturn_t gfar_transmit(int irq, void *dev_id);
 122static irqreturn_t gfar_interrupt(int irq, void *dev_id);
 123static void adjust_link(struct net_device *dev);
 124static noinline void gfar_update_link_state(struct gfar_private *priv);
 125static int init_phy(struct net_device *dev);
 126static int gfar_probe(struct platform_device *ofdev);
 127static int gfar_remove(struct platform_device *ofdev);
 128static void free_skb_resources(struct gfar_private *priv);
 129static void gfar_set_multi(struct net_device *dev);
 130static void gfar_set_hash_for_addr(struct net_device *dev, u8 *addr);
 131static void gfar_configure_serdes(struct net_device *dev);
 132static int gfar_poll_rx(struct napi_struct *napi, int budget);
 133static int gfar_poll_tx(struct napi_struct *napi, int budget);
 134static int gfar_poll_rx_sq(struct napi_struct *napi, int budget);
 135static int gfar_poll_tx_sq(struct napi_struct *napi, int budget);
 136#ifdef CONFIG_NET_POLL_CONTROLLER
 137static void gfar_netpoll(struct net_device *dev);
 138#endif
 139int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit);
 140static void gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue);
 141static void gfar_process_frame(struct net_device *dev, struct sk_buff *skb,
 142			       int amount_pull, struct napi_struct *napi);
 143static void gfar_halt_nodisable(struct gfar_private *priv);
 144static void gfar_clear_exact_match(struct net_device *dev);
 145static void gfar_set_mac_for_addr(struct net_device *dev, int num,
 146				  const u8 *addr);
 147static int gfar_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
 148
 149MODULE_AUTHOR("Freescale Semiconductor, Inc");
 150MODULE_DESCRIPTION("Gianfar Ethernet Driver");
 151MODULE_LICENSE("GPL");
 152
 153static void gfar_init_rxbdp(struct gfar_priv_rx_q *rx_queue, struct rxbd8 *bdp,
 154			    dma_addr_t buf)
 155{
 156	u32 lstatus;
 157
 158	bdp->bufPtr = buf;
 159
 160	lstatus = BD_LFLAG(RXBD_EMPTY | RXBD_INTERRUPT);
 161	if (bdp == rx_queue->rx_bd_base + rx_queue->rx_ring_size - 1)
 162		lstatus |= BD_LFLAG(RXBD_WRAP);
 163
 164	eieio();
 165
 166	bdp->lstatus = lstatus;
 167}
 168
 169static int gfar_init_bds(struct net_device *ndev)
 170{
 171	struct gfar_private *priv = netdev_priv(ndev);
 
 172	struct gfar_priv_tx_q *tx_queue = NULL;
 173	struct gfar_priv_rx_q *rx_queue = NULL;
 174	struct txbd8 *txbdp;
 175	struct rxbd8 *rxbdp;
 176	int i, j;
 177
 178	for (i = 0; i < priv->num_tx_queues; i++) {
 179		tx_queue = priv->tx_queue[i];
 180		/* Initialize some variables in our dev structure */
 181		tx_queue->num_txbdfree = tx_queue->tx_ring_size;
 182		tx_queue->dirty_tx = tx_queue->tx_bd_base;
 183		tx_queue->cur_tx = tx_queue->tx_bd_base;
 184		tx_queue->skb_curtx = 0;
 185		tx_queue->skb_dirtytx = 0;
 186
 187		/* Initialize Transmit Descriptor Ring */
 188		txbdp = tx_queue->tx_bd_base;
 189		for (j = 0; j < tx_queue->tx_ring_size; j++) {
 190			txbdp->lstatus = 0;
 191			txbdp->bufPtr = 0;
 192			txbdp++;
 193		}
 194
 195		/* Set the last descriptor in the ring to indicate wrap */
 196		txbdp--;
 197		txbdp->status |= TXBD_WRAP;
 
 198	}
 199
 
 200	for (i = 0; i < priv->num_rx_queues; i++) {
 201		rx_queue = priv->rx_queue[i];
 202		rx_queue->cur_rx = rx_queue->rx_bd_base;
 203		rx_queue->skb_currx = 0;
 204		rxbdp = rx_queue->rx_bd_base;
 205
 206		for (j = 0; j < rx_queue->rx_ring_size; j++) {
 207			struct sk_buff *skb = rx_queue->rx_skbuff[j];
 208
 209			if (skb) {
 210				gfar_init_rxbdp(rx_queue, rxbdp,
 211						rxbdp->bufPtr);
 212			} else {
 213				skb = gfar_new_skb(ndev);
 214				if (!skb) {
 215					netdev_err(ndev, "Can't allocate RX buffers\n");
 216					return -ENOMEM;
 217				}
 218				rx_queue->rx_skbuff[j] = skb;
 219
 220				gfar_new_rxbdp(rx_queue, rxbdp, skb);
 221			}
 
 222
 223			rxbdp++;
 224		}
 
 
 225
 
 
 226	}
 227
 228	return 0;
 229}
 230
 231static int gfar_alloc_skb_resources(struct net_device *ndev)
 232{
 233	void *vaddr;
 234	dma_addr_t addr;
 235	int i, j, k;
 236	struct gfar_private *priv = netdev_priv(ndev);
 237	struct device *dev = priv->dev;
 238	struct gfar_priv_tx_q *tx_queue = NULL;
 239	struct gfar_priv_rx_q *rx_queue = NULL;
 240
 241	priv->total_tx_ring_size = 0;
 242	for (i = 0; i < priv->num_tx_queues; i++)
 243		priv->total_tx_ring_size += priv->tx_queue[i]->tx_ring_size;
 244
 245	priv->total_rx_ring_size = 0;
 246	for (i = 0; i < priv->num_rx_queues; i++)
 247		priv->total_rx_ring_size += priv->rx_queue[i]->rx_ring_size;
 248
 249	/* Allocate memory for the buffer descriptors */
 250	vaddr = dma_alloc_coherent(dev,
 251				   (priv->total_tx_ring_size *
 252				    sizeof(struct txbd8)) +
 253				   (priv->total_rx_ring_size *
 254				    sizeof(struct rxbd8)),
 255				   &addr, GFP_KERNEL);
 256	if (!vaddr)
 257		return -ENOMEM;
 258
 259	for (i = 0; i < priv->num_tx_queues; i++) {
 260		tx_queue = priv->tx_queue[i];
 261		tx_queue->tx_bd_base = vaddr;
 262		tx_queue->tx_bd_dma_base = addr;
 263		tx_queue->dev = ndev;
 264		/* enet DMA only understands physical addresses */
 265		addr  += sizeof(struct txbd8) * tx_queue->tx_ring_size;
 266		vaddr += sizeof(struct txbd8) * tx_queue->tx_ring_size;
 267	}
 268
 269	/* Start the rx descriptor ring where the tx ring leaves off */
 270	for (i = 0; i < priv->num_rx_queues; i++) {
 271		rx_queue = priv->rx_queue[i];
 272		rx_queue->rx_bd_base = vaddr;
 273		rx_queue->rx_bd_dma_base = addr;
 274		rx_queue->dev = ndev;
 
 275		addr  += sizeof(struct rxbd8) * rx_queue->rx_ring_size;
 276		vaddr += sizeof(struct rxbd8) * rx_queue->rx_ring_size;
 277	}
 278
 279	/* Setup the skbuff rings */
 280	for (i = 0; i < priv->num_tx_queues; i++) {
 281		tx_queue = priv->tx_queue[i];
 282		tx_queue->tx_skbuff =
 283			kmalloc_array(tx_queue->tx_ring_size,
 284				      sizeof(*tx_queue->tx_skbuff),
 285				      GFP_KERNEL);
 286		if (!tx_queue->tx_skbuff)
 287			goto cleanup;
 288
 289		for (k = 0; k < tx_queue->tx_ring_size; k++)
 290			tx_queue->tx_skbuff[k] = NULL;
 291	}
 292
 293	for (i = 0; i < priv->num_rx_queues; i++) {
 294		rx_queue = priv->rx_queue[i];
 295		rx_queue->rx_skbuff =
 296			kmalloc_array(rx_queue->rx_ring_size,
 297				      sizeof(*rx_queue->rx_skbuff),
 298				      GFP_KERNEL);
 299		if (!rx_queue->rx_skbuff)
 300			goto cleanup;
 301
 302		for (j = 0; j < rx_queue->rx_ring_size; j++)
 303			rx_queue->rx_skbuff[j] = NULL;
 304	}
 305
 306	if (gfar_init_bds(ndev))
 307		goto cleanup;
 308
 309	return 0;
 310
 311cleanup:
 312	free_skb_resources(priv);
 313	return -ENOMEM;
 314}
 315
 316static void gfar_init_tx_rx_base(struct gfar_private *priv)
 317{
 318	struct gfar __iomem *regs = priv->gfargrp[0].regs;
 319	u32 __iomem *baddr;
 320	int i;
 321
 322	baddr = &regs->tbase0;
 323	for (i = 0; i < priv->num_tx_queues; i++) {
 324		gfar_write(baddr, priv->tx_queue[i]->tx_bd_dma_base);
 325		baddr += 2;
 326	}
 327
 328	baddr = &regs->rbase0;
 329	for (i = 0; i < priv->num_rx_queues; i++) {
 330		gfar_write(baddr, priv->rx_queue[i]->rx_bd_dma_base);
 331		baddr += 2;
 332	}
 333}
 334
 335static void gfar_rx_buff_size_config(struct gfar_private *priv)
 336{
 337	int frame_size = priv->ndev->mtu + ETH_HLEN;
 
 
 
 
 
 
 
 
 
 
 338
 
 
 339	/* set this when rx hw offload (TOE) functions are being used */
 340	priv->uses_rxfcb = 0;
 341
 342	if (priv->ndev->features & (NETIF_F_RXCSUM | NETIF_F_HW_VLAN_CTAG_RX))
 343		priv->uses_rxfcb = 1;
 344
 345	if (priv->hwts_rx_en)
 346		priv->uses_rxfcb = 1;
 347
 348	if (priv->uses_rxfcb)
 349		frame_size += GMAC_FCB_LEN;
 350
 351	frame_size += priv->padding;
 352
 353	frame_size = (frame_size & ~(INCREMENTAL_BUFFER_SIZE - 1)) +
 354		     INCREMENTAL_BUFFER_SIZE;
 355
 356	priv->rx_buffer_size = frame_size;
 357}
 358
 359static void gfar_mac_rx_config(struct gfar_private *priv)
 360{
 361	struct gfar __iomem *regs = priv->gfargrp[0].regs;
 362	u32 rctrl = 0;
 363
 364	if (priv->rx_filer_enable) {
 365		rctrl |= RCTRL_FILREN;
 366		/* Program the RIR0 reg with the required distribution */
 367		if (priv->poll_mode == GFAR_SQ_POLLING)
 368			gfar_write(&regs->rir0, DEFAULT_2RXQ_RIR0);
 369		else /* GFAR_MQ_POLLING */
 370			gfar_write(&regs->rir0, DEFAULT_8RXQ_RIR0);
 371	}
 372
 373	/* Restore PROMISC mode */
 374	if (priv->ndev->flags & IFF_PROMISC)
 375		rctrl |= RCTRL_PROM;
 376
 377	if (priv->ndev->features & NETIF_F_RXCSUM)
 378		rctrl |= RCTRL_CHECKSUMMING;
 379
 380	if (priv->extended_hash)
 381		rctrl |= RCTRL_EXTHASH | RCTRL_EMEN;
 382
 383	if (priv->padding) {
 384		rctrl &= ~RCTRL_PAL_MASK;
 385		rctrl |= RCTRL_PADDING(priv->padding);
 386	}
 387
 388	/* Enable HW time stamping if requested from user space */
 389	if (priv->hwts_rx_en)
 390		rctrl |= RCTRL_PRSDEP_INIT | RCTRL_TS_ENABLE;
 391
 392	if (priv->ndev->features & NETIF_F_HW_VLAN_CTAG_RX)
 393		rctrl |= RCTRL_VLEX | RCTRL_PRSDEP_INIT;
 394
 
 
 
 
 
 
 
 395	/* Init rctrl based on our settings */
 396	gfar_write(&regs->rctrl, rctrl);
 397}
 398
 399static void gfar_mac_tx_config(struct gfar_private *priv)
 400{
 401	struct gfar __iomem *regs = priv->gfargrp[0].regs;
 402	u32 tctrl = 0;
 403
 404	if (priv->ndev->features & NETIF_F_IP_CSUM)
 405		tctrl |= TCTRL_INIT_CSUM;
 406
 407	if (priv->prio_sched_en)
 408		tctrl |= TCTRL_TXSCHED_PRIO;
 409	else {
 410		tctrl |= TCTRL_TXSCHED_WRRS;
 411		gfar_write(&regs->tr03wt, DEFAULT_WRRS_WEIGHT);
 412		gfar_write(&regs->tr47wt, DEFAULT_WRRS_WEIGHT);
 413	}
 414
 415	if (priv->ndev->features & NETIF_F_HW_VLAN_CTAG_TX)
 416		tctrl |= TCTRL_VLINS;
 417
 418	gfar_write(&regs->tctrl, tctrl);
 419}
 420
 421static void gfar_configure_coalescing(struct gfar_private *priv,
 422			       unsigned long tx_mask, unsigned long rx_mask)
 423{
 424	struct gfar __iomem *regs = priv->gfargrp[0].regs;
 425	u32 __iomem *baddr;
 426
 427	if (priv->mode == MQ_MG_MODE) {
 428		int i = 0;
 429
 430		baddr = &regs->txic0;
 431		for_each_set_bit(i, &tx_mask, priv->num_tx_queues) {
 432			gfar_write(baddr + i, 0);
 433			if (likely(priv->tx_queue[i]->txcoalescing))
 434				gfar_write(baddr + i, priv->tx_queue[i]->txic);
 435		}
 436
 437		baddr = &regs->rxic0;
 438		for_each_set_bit(i, &rx_mask, priv->num_rx_queues) {
 439			gfar_write(baddr + i, 0);
 440			if (likely(priv->rx_queue[i]->rxcoalescing))
 441				gfar_write(baddr + i, priv->rx_queue[i]->rxic);
 442		}
 443	} else {
 444		/* Backward compatible case -- even if we enable
 445		 * multiple queues, there's only single reg to program
 446		 */
 447		gfar_write(&regs->txic, 0);
 448		if (likely(priv->tx_queue[0]->txcoalescing))
 449			gfar_write(&regs->txic, priv->tx_queue[0]->txic);
 450
 451		gfar_write(&regs->rxic, 0);
 452		if (unlikely(priv->rx_queue[0]->rxcoalescing))
 453			gfar_write(&regs->rxic, priv->rx_queue[0]->rxic);
 454	}
 455}
 456
 457void gfar_configure_coalescing_all(struct gfar_private *priv)
 458{
 459	gfar_configure_coalescing(priv, 0xFF, 0xFF);
 460}
 461
 462static struct net_device_stats *gfar_get_stats(struct net_device *dev)
 463{
 464	struct gfar_private *priv = netdev_priv(dev);
 465	unsigned long rx_packets = 0, rx_bytes = 0, rx_dropped = 0;
 466	unsigned long tx_packets = 0, tx_bytes = 0;
 467	int i;
 468
 469	for (i = 0; i < priv->num_rx_queues; i++) {
 470		rx_packets += priv->rx_queue[i]->stats.rx_packets;
 471		rx_bytes   += priv->rx_queue[i]->stats.rx_bytes;
 472		rx_dropped += priv->rx_queue[i]->stats.rx_dropped;
 473	}
 474
 475	dev->stats.rx_packets = rx_packets;
 476	dev->stats.rx_bytes   = rx_bytes;
 477	dev->stats.rx_dropped = rx_dropped;
 478
 479	for (i = 0; i < priv->num_tx_queues; i++) {
 480		tx_bytes += priv->tx_queue[i]->stats.tx_bytes;
 481		tx_packets += priv->tx_queue[i]->stats.tx_packets;
 482	}
 483
 484	dev->stats.tx_bytes   = tx_bytes;
 485	dev->stats.tx_packets = tx_packets;
 486
 487	return &dev->stats;
 488}
 489
 
 
 
 
 
 
 
 
 
 490static const struct net_device_ops gfar_netdev_ops = {
 491	.ndo_open = gfar_enet_open,
 492	.ndo_start_xmit = gfar_start_xmit,
 493	.ndo_stop = gfar_close,
 494	.ndo_change_mtu = gfar_change_mtu,
 495	.ndo_set_features = gfar_set_features,
 496	.ndo_set_rx_mode = gfar_set_multi,
 497	.ndo_tx_timeout = gfar_timeout,
 498	.ndo_do_ioctl = gfar_ioctl,
 499	.ndo_get_stats = gfar_get_stats,
 500	.ndo_set_mac_address = eth_mac_addr,
 501	.ndo_validate_addr = eth_validate_addr,
 502#ifdef CONFIG_NET_POLL_CONTROLLER
 503	.ndo_poll_controller = gfar_netpoll,
 504#endif
 505};
 506
 507static void gfar_ints_disable(struct gfar_private *priv)
 508{
 509	int i;
 510	for (i = 0; i < priv->num_grps; i++) {
 511		struct gfar __iomem *regs = priv->gfargrp[i].regs;
 512		/* Clear IEVENT */
 513		gfar_write(&regs->ievent, IEVENT_INIT_CLEAR);
 514
 515		/* Initialize IMASK */
 516		gfar_write(&regs->imask, IMASK_INIT_CLEAR);
 517	}
 518}
 519
 520static void gfar_ints_enable(struct gfar_private *priv)
 521{
 522	int i;
 523	for (i = 0; i < priv->num_grps; i++) {
 524		struct gfar __iomem *regs = priv->gfargrp[i].regs;
 525		/* Unmask the interrupts we look for */
 526		gfar_write(&regs->imask, IMASK_DEFAULT);
 527	}
 528}
 529
 530void lock_tx_qs(struct gfar_private *priv)
 531{
 532	int i;
 533
 534	for (i = 0; i < priv->num_tx_queues; i++)
 535		spin_lock(&priv->tx_queue[i]->txlock);
 536}
 537
 538void unlock_tx_qs(struct gfar_private *priv)
 539{
 540	int i;
 541
 542	for (i = 0; i < priv->num_tx_queues; i++)
 543		spin_unlock(&priv->tx_queue[i]->txlock);
 544}
 545
 546static int gfar_alloc_tx_queues(struct gfar_private *priv)
 547{
 548	int i;
 549
 550	for (i = 0; i < priv->num_tx_queues; i++) {
 551		priv->tx_queue[i] = kzalloc(sizeof(struct gfar_priv_tx_q),
 552					    GFP_KERNEL);
 553		if (!priv->tx_queue[i])
 554			return -ENOMEM;
 555
 556		priv->tx_queue[i]->tx_skbuff = NULL;
 557		priv->tx_queue[i]->qindex = i;
 558		priv->tx_queue[i]->dev = priv->ndev;
 559		spin_lock_init(&(priv->tx_queue[i]->txlock));
 560	}
 561	return 0;
 562}
 563
 564static int gfar_alloc_rx_queues(struct gfar_private *priv)
 565{
 566	int i;
 567
 568	for (i = 0; i < priv->num_rx_queues; i++) {
 569		priv->rx_queue[i] = kzalloc(sizeof(struct gfar_priv_rx_q),
 570					    GFP_KERNEL);
 571		if (!priv->rx_queue[i])
 572			return -ENOMEM;
 573
 574		priv->rx_queue[i]->rx_skbuff = NULL;
 575		priv->rx_queue[i]->qindex = i;
 576		priv->rx_queue[i]->dev = priv->ndev;
 577	}
 578	return 0;
 579}
 580
 581static void gfar_free_tx_queues(struct gfar_private *priv)
 582{
 583	int i;
 584
 585	for (i = 0; i < priv->num_tx_queues; i++)
 586		kfree(priv->tx_queue[i]);
 587}
 588
 589static void gfar_free_rx_queues(struct gfar_private *priv)
 590{
 591	int i;
 592
 593	for (i = 0; i < priv->num_rx_queues; i++)
 594		kfree(priv->rx_queue[i]);
 595}
 596
 597static void unmap_group_regs(struct gfar_private *priv)
 598{
 599	int i;
 600
 601	for (i = 0; i < MAXGROUPS; i++)
 602		if (priv->gfargrp[i].regs)
 603			iounmap(priv->gfargrp[i].regs);
 604}
 605
 606static void free_gfar_dev(struct gfar_private *priv)
 607{
 608	int i, j;
 609
 610	for (i = 0; i < priv->num_grps; i++)
 611		for (j = 0; j < GFAR_NUM_IRQS; j++) {
 612			kfree(priv->gfargrp[i].irqinfo[j]);
 613			priv->gfargrp[i].irqinfo[j] = NULL;
 614		}
 615
 616	free_netdev(priv->ndev);
 617}
 618
 619static void disable_napi(struct gfar_private *priv)
 620{
 621	int i;
 622
 623	for (i = 0; i < priv->num_grps; i++) {
 624		napi_disable(&priv->gfargrp[i].napi_rx);
 625		napi_disable(&priv->gfargrp[i].napi_tx);
 626	}
 627}
 628
 629static void enable_napi(struct gfar_private *priv)
 630{
 631	int i;
 632
 633	for (i = 0; i < priv->num_grps; i++) {
 634		napi_enable(&priv->gfargrp[i].napi_rx);
 635		napi_enable(&priv->gfargrp[i].napi_tx);
 636	}
 637}
 638
 639static int gfar_parse_group(struct device_node *np,
 640			    struct gfar_private *priv, const char *model)
 641{
 642	struct gfar_priv_grp *grp = &priv->gfargrp[priv->num_grps];
 643	int i;
 644
 645	for (i = 0; i < GFAR_NUM_IRQS; i++) {
 646		grp->irqinfo[i] = kzalloc(sizeof(struct gfar_irqinfo),
 647					  GFP_KERNEL);
 648		if (!grp->irqinfo[i])
 649			return -ENOMEM;
 650	}
 651
 652	grp->regs = of_iomap(np, 0);
 653	if (!grp->regs)
 654		return -ENOMEM;
 655
 656	gfar_irq(grp, TX)->irq = irq_of_parse_and_map(np, 0);
 657
 658	/* If we aren't the FEC we have multiple interrupts */
 659	if (model && strcasecmp(model, "FEC")) {
 660		gfar_irq(grp, RX)->irq = irq_of_parse_and_map(np, 1);
 661		gfar_irq(grp, ER)->irq = irq_of_parse_and_map(np, 2);
 662		if (gfar_irq(grp, TX)->irq == NO_IRQ ||
 663		    gfar_irq(grp, RX)->irq == NO_IRQ ||
 664		    gfar_irq(grp, ER)->irq == NO_IRQ)
 665			return -EINVAL;
 666	}
 667
 668	grp->priv = priv;
 669	spin_lock_init(&grp->grplock);
 670	if (priv->mode == MQ_MG_MODE) {
 671		u32 *rxq_mask, *txq_mask;
 672		rxq_mask = (u32 *)of_get_property(np, "fsl,rx-bit-map", NULL);
 673		txq_mask = (u32 *)of_get_property(np, "fsl,tx-bit-map", NULL);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 674
 675		if (priv->poll_mode == GFAR_SQ_POLLING) {
 676			/* One Q per interrupt group: Q0 to G0, Q1 to G1 */
 677			grp->rx_bit_map = (DEFAULT_MAPPING >> priv->num_grps);
 678			grp->tx_bit_map = (DEFAULT_MAPPING >> priv->num_grps);
 679		} else { /* GFAR_MQ_POLLING */
 680			grp->rx_bit_map = rxq_mask ?
 681			*rxq_mask : (DEFAULT_MAPPING >> priv->num_grps);
 682			grp->tx_bit_map = txq_mask ?
 683			*txq_mask : (DEFAULT_MAPPING >> priv->num_grps);
 684		}
 685	} else {
 686		grp->rx_bit_map = 0xFF;
 687		grp->tx_bit_map = 0xFF;
 688	}
 689
 690	/* bit_map's MSB is q0 (from q0 to q7) but, for_each_set_bit parses
 691	 * right to left, so we need to revert the 8 bits to get the q index
 692	 */
 693	grp->rx_bit_map = bitrev8(grp->rx_bit_map);
 694	grp->tx_bit_map = bitrev8(grp->tx_bit_map);
 695
 696	/* Calculate RSTAT, TSTAT, RQUEUE and TQUEUE values,
 697	 * also assign queues to groups
 698	 */
 699	for_each_set_bit(i, &grp->rx_bit_map, priv->num_rx_queues) {
 700		if (!grp->rx_queue)
 701			grp->rx_queue = priv->rx_queue[i];
 702		grp->num_rx_queues++;
 703		grp->rstat |= (RSTAT_CLEAR_RHALT >> i);
 704		priv->rqueue |= ((RQUEUE_EN0 | RQUEUE_EX0) >> i);
 705		priv->rx_queue[i]->grp = grp;
 706	}
 707
 708	for_each_set_bit(i, &grp->tx_bit_map, priv->num_tx_queues) {
 709		if (!grp->tx_queue)
 710			grp->tx_queue = priv->tx_queue[i];
 711		grp->num_tx_queues++;
 712		grp->tstat |= (TSTAT_CLEAR_THALT >> i);
 713		priv->tqueue |= (TQUEUE_EN0 >> i);
 714		priv->tx_queue[i]->grp = grp;
 715	}
 716
 717	priv->num_grps++;
 718
 719	return 0;
 720}
 721
 
 
 
 
 
 
 
 
 
 
 
 
 722static int gfar_of_init(struct platform_device *ofdev, struct net_device **pdev)
 723{
 724	const char *model;
 725	const char *ctype;
 726	const void *mac_addr;
 727	int err = 0, i;
 728	struct net_device *dev = NULL;
 729	struct gfar_private *priv = NULL;
 730	struct device_node *np = ofdev->dev.of_node;
 731	struct device_node *child = NULL;
 732	const u32 *stash;
 733	const u32 *stash_len;
 734	const u32 *stash_idx;
 735	unsigned int num_tx_qs, num_rx_qs;
 736	u32 *tx_queues, *rx_queues;
 737	unsigned short mode, poll_mode;
 738
 739	if (!np || !of_device_is_available(np))
 740		return -ENODEV;
 741
 742	if (of_device_is_compatible(np, "fsl,etsec2")) {
 743		mode = MQ_MG_MODE;
 744		poll_mode = GFAR_SQ_POLLING;
 745	} else {
 746		mode = SQ_SG_MODE;
 747		poll_mode = GFAR_SQ_POLLING;
 748	}
 749
 750	/* parse the num of HW tx and rx queues */
 751	tx_queues = (u32 *)of_get_property(np, "fsl,num_tx_queues", NULL);
 752	rx_queues = (u32 *)of_get_property(np, "fsl,num_rx_queues", NULL);
 753
 754	if (mode == SQ_SG_MODE) {
 755		num_tx_qs = 1;
 756		num_rx_qs = 1;
 757	} else { /* MQ_MG_MODE */
 758		/* get the actual number of supported groups */
 759		unsigned int num_grps = of_get_available_child_count(np);
 760
 761		if (num_grps == 0 || num_grps > MAXGROUPS) {
 762			dev_err(&ofdev->dev, "Invalid # of int groups(%d)\n",
 763				num_grps);
 764			pr_err("Cannot do alloc_etherdev, aborting\n");
 765			return -EINVAL;
 766		}
 767
 768		if (poll_mode == GFAR_SQ_POLLING) {
 769			num_tx_qs = num_grps; /* one txq per int group */
 770			num_rx_qs = num_grps; /* one rxq per int group */
 771		} else { /* GFAR_MQ_POLLING */
 772			num_tx_qs = tx_queues ? *tx_queues : 1;
 773			num_rx_qs = rx_queues ? *rx_queues : 1;
 
 
 
 
 
 
 
 
 
 774		}
 775	}
 776
 777	if (num_tx_qs > MAX_TX_QS) {
 778		pr_err("num_tx_qs(=%d) greater than MAX_TX_QS(=%d)\n",
 779		       num_tx_qs, MAX_TX_QS);
 780		pr_err("Cannot do alloc_etherdev, aborting\n");
 781		return -EINVAL;
 782	}
 783
 784	if (num_rx_qs > MAX_RX_QS) {
 785		pr_err("num_rx_qs(=%d) greater than MAX_RX_QS(=%d)\n",
 786		       num_rx_qs, MAX_RX_QS);
 787		pr_err("Cannot do alloc_etherdev, aborting\n");
 788		return -EINVAL;
 789	}
 790
 791	*pdev = alloc_etherdev_mq(sizeof(*priv), num_tx_qs);
 792	dev = *pdev;
 793	if (NULL == dev)
 794		return -ENOMEM;
 795
 796	priv = netdev_priv(dev);
 797	priv->ndev = dev;
 798
 799	priv->mode = mode;
 800	priv->poll_mode = poll_mode;
 801
 802	priv->num_tx_queues = num_tx_qs;
 803	netif_set_real_num_rx_queues(dev, num_rx_qs);
 804	priv->num_rx_queues = num_rx_qs;
 805
 806	err = gfar_alloc_tx_queues(priv);
 807	if (err)
 808		goto tx_alloc_failed;
 809
 810	err = gfar_alloc_rx_queues(priv);
 811	if (err)
 812		goto rx_alloc_failed;
 813
 
 
 
 
 
 
 814	/* Init Rx queue filer rule set linked list */
 815	INIT_LIST_HEAD(&priv->rx_list.list);
 816	priv->rx_list.count = 0;
 817	mutex_init(&priv->rx_queue_access);
 818
 819	model = of_get_property(np, "model", NULL);
 820
 821	for (i = 0; i < MAXGROUPS; i++)
 822		priv->gfargrp[i].regs = NULL;
 823
 824	/* Parse and initialize group specific information */
 825	if (priv->mode == MQ_MG_MODE) {
 826		for_each_child_of_node(np, child) {
 
 
 
 827			err = gfar_parse_group(child, priv, model);
 828			if (err)
 829				goto err_grp_init;
 830		}
 831	} else { /* SQ_SG_MODE */
 832		err = gfar_parse_group(np, priv, model);
 833		if (err)
 834			goto err_grp_init;
 835	}
 836
 837	stash = of_get_property(np, "bd-stash", NULL);
 838
 839	if (stash) {
 840		priv->device_flags |= FSL_GIANFAR_DEV_HAS_BD_STASHING;
 841		priv->bd_stash_en = 1;
 842	}
 843
 844	stash_len = of_get_property(np, "rx-stash-len", NULL);
 845
 846	if (stash_len)
 847		priv->rx_stash_size = *stash_len;
 848
 849	stash_idx = of_get_property(np, "rx-stash-idx", NULL);
 850
 851	if (stash_idx)
 852		priv->rx_stash_index = *stash_idx;
 853
 854	if (stash_len || stash_idx)
 855		priv->device_flags |= FSL_GIANFAR_DEV_HAS_BUF_STASHING;
 856
 857	mac_addr = of_get_mac_address(np);
 858
 859	if (mac_addr)
 860		memcpy(dev->dev_addr, mac_addr, ETH_ALEN);
 861
 862	if (model && !strcasecmp(model, "TSEC"))
 863		priv->device_flags |= FSL_GIANFAR_DEV_HAS_GIGABIT |
 864				     FSL_GIANFAR_DEV_HAS_COALESCE |
 865				     FSL_GIANFAR_DEV_HAS_RMON |
 866				     FSL_GIANFAR_DEV_HAS_MULTI_INTR;
 867
 868	if (model && !strcasecmp(model, "eTSEC"))
 869		priv->device_flags |= FSL_GIANFAR_DEV_HAS_GIGABIT |
 870				     FSL_GIANFAR_DEV_HAS_COALESCE |
 871				     FSL_GIANFAR_DEV_HAS_RMON |
 872				     FSL_GIANFAR_DEV_HAS_MULTI_INTR |
 873				     FSL_GIANFAR_DEV_HAS_CSUM |
 874				     FSL_GIANFAR_DEV_HAS_VLAN |
 875				     FSL_GIANFAR_DEV_HAS_MAGIC_PACKET |
 876				     FSL_GIANFAR_DEV_HAS_EXTENDED_HASH |
 877				     FSL_GIANFAR_DEV_HAS_TIMER;
 
 878
 879	ctype = of_get_property(np, "phy-connection-type", NULL);
 880
 881	/* We only care about rgmii-id.  The rest are autodetected */
 882	if (ctype && !strcmp(ctype, "rgmii-id"))
 883		priv->interface = PHY_INTERFACE_MODE_RGMII_ID;
 884	else
 885		priv->interface = PHY_INTERFACE_MODE_MII;
 886
 887	if (of_get_property(np, "fsl,magic-packet", NULL))
 888		priv->device_flags |= FSL_GIANFAR_DEV_HAS_MAGIC_PACKET;
 889
 
 
 
 890	priv->phy_node = of_parse_phandle(np, "phy-handle", 0);
 891
 
 
 
 
 
 
 
 
 
 
 
 892	/* Find the TBI PHY.  If it's not there, we don't support SGMII */
 893	priv->tbi_node = of_parse_phandle(np, "tbi-handle", 0);
 894
 895	return 0;
 896
 897err_grp_init:
 898	unmap_group_regs(priv);
 899rx_alloc_failed:
 900	gfar_free_rx_queues(priv);
 901tx_alloc_failed:
 902	gfar_free_tx_queues(priv);
 903	free_gfar_dev(priv);
 904	return err;
 905}
 906
 907static int gfar_hwtstamp_set(struct net_device *netdev, struct ifreq *ifr)
 908{
 909	struct hwtstamp_config config;
 910	struct gfar_private *priv = netdev_priv(netdev);
 911
 912	if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
 913		return -EFAULT;
 914
 915	/* reserved for future extensions */
 916	if (config.flags)
 917		return -EINVAL;
 918
 919	switch (config.tx_type) {
 920	case HWTSTAMP_TX_OFF:
 921		priv->hwts_tx_en = 0;
 922		break;
 923	case HWTSTAMP_TX_ON:
 924		if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER))
 925			return -ERANGE;
 926		priv->hwts_tx_en = 1;
 927		break;
 928	default:
 929		return -ERANGE;
 930	}
 931
 932	switch (config.rx_filter) {
 933	case HWTSTAMP_FILTER_NONE:
 934		if (priv->hwts_rx_en) {
 935			priv->hwts_rx_en = 0;
 936			reset_gfar(netdev);
 937		}
 938		break;
 939	default:
 940		if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER))
 941			return -ERANGE;
 942		if (!priv->hwts_rx_en) {
 943			priv->hwts_rx_en = 1;
 944			reset_gfar(netdev);
 945		}
 946		config.rx_filter = HWTSTAMP_FILTER_ALL;
 947		break;
 948	}
 949
 950	return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ?
 951		-EFAULT : 0;
 952}
 953
 954static int gfar_hwtstamp_get(struct net_device *netdev, struct ifreq *ifr)
 955{
 956	struct hwtstamp_config config;
 957	struct gfar_private *priv = netdev_priv(netdev);
 958
 959	config.flags = 0;
 960	config.tx_type = priv->hwts_tx_en ? HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF;
 961	config.rx_filter = (priv->hwts_rx_en ?
 962			    HWTSTAMP_FILTER_ALL : HWTSTAMP_FILTER_NONE);
 963
 964	return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ?
 965		-EFAULT : 0;
 966}
 967
 968static int gfar_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
 969{
 970	struct gfar_private *priv = netdev_priv(dev);
 971
 972	if (!netif_running(dev))
 973		return -EINVAL;
 974
 975	if (cmd == SIOCSHWTSTAMP)
 976		return gfar_hwtstamp_set(dev, rq);
 977	if (cmd == SIOCGHWTSTAMP)
 978		return gfar_hwtstamp_get(dev, rq);
 979
 980	if (!priv->phydev)
 981		return -ENODEV;
 982
 983	return phy_mii_ioctl(priv->phydev, rq, cmd);
 984}
 985
 986static u32 cluster_entry_per_class(struct gfar_private *priv, u32 rqfar,
 987				   u32 class)
 988{
 989	u32 rqfpr = FPR_FILER_MASK;
 990	u32 rqfcr = 0x0;
 991
 992	rqfar--;
 993	rqfcr = RQFCR_CLE | RQFCR_PID_MASK | RQFCR_CMP_EXACT;
 994	priv->ftp_rqfpr[rqfar] = rqfpr;
 995	priv->ftp_rqfcr[rqfar] = rqfcr;
 996	gfar_write_filer(priv, rqfar, rqfcr, rqfpr);
 997
 998	rqfar--;
 999	rqfcr = RQFCR_CMP_NOMATCH;
1000	priv->ftp_rqfpr[rqfar] = rqfpr;
1001	priv->ftp_rqfcr[rqfar] = rqfcr;
1002	gfar_write_filer(priv, rqfar, rqfcr, rqfpr);
1003
1004	rqfar--;
1005	rqfcr = RQFCR_CMP_EXACT | RQFCR_PID_PARSE | RQFCR_CLE | RQFCR_AND;
1006	rqfpr = class;
1007	priv->ftp_rqfcr[rqfar] = rqfcr;
1008	priv->ftp_rqfpr[rqfar] = rqfpr;
1009	gfar_write_filer(priv, rqfar, rqfcr, rqfpr);
1010
1011	rqfar--;
1012	rqfcr = RQFCR_CMP_EXACT | RQFCR_PID_MASK | RQFCR_AND;
1013	rqfpr = class;
1014	priv->ftp_rqfcr[rqfar] = rqfcr;
1015	priv->ftp_rqfpr[rqfar] = rqfpr;
1016	gfar_write_filer(priv, rqfar, rqfcr, rqfpr);
1017
1018	return rqfar;
1019}
1020
1021static void gfar_init_filer_table(struct gfar_private *priv)
1022{
1023	int i = 0x0;
1024	u32 rqfar = MAX_FILER_IDX;
1025	u32 rqfcr = 0x0;
1026	u32 rqfpr = FPR_FILER_MASK;
1027
1028	/* Default rule */
1029	rqfcr = RQFCR_CMP_MATCH;
1030	priv->ftp_rqfcr[rqfar] = rqfcr;
1031	priv->ftp_rqfpr[rqfar] = rqfpr;
1032	gfar_write_filer(priv, rqfar, rqfcr, rqfpr);
1033
1034	rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV6);
1035	rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV6 | RQFPR_UDP);
1036	rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV6 | RQFPR_TCP);
1037	rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV4);
1038	rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV4 | RQFPR_UDP);
1039	rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV4 | RQFPR_TCP);
1040
1041	/* cur_filer_idx indicated the first non-masked rule */
1042	priv->cur_filer_idx = rqfar;
1043
1044	/* Rest are masked rules */
1045	rqfcr = RQFCR_CMP_NOMATCH;
1046	for (i = 0; i < rqfar; i++) {
1047		priv->ftp_rqfcr[i] = rqfcr;
1048		priv->ftp_rqfpr[i] = rqfpr;
1049		gfar_write_filer(priv, i, rqfcr, rqfpr);
1050	}
1051}
1052
 
1053static void __gfar_detect_errata_83xx(struct gfar_private *priv)
1054{
1055	unsigned int pvr = mfspr(SPRN_PVR);
1056	unsigned int svr = mfspr(SPRN_SVR);
1057	unsigned int mod = (svr >> 16) & 0xfff6; /* w/o E suffix */
1058	unsigned int rev = svr & 0xffff;
1059
1060	/* MPC8313 Rev 2.0 and higher; All MPC837x */
1061	if ((pvr == 0x80850010 && mod == 0x80b0 && rev >= 0x0020) ||
1062	    (pvr == 0x80861010 && (mod & 0xfff9) == 0x80c0))
1063		priv->errata |= GFAR_ERRATA_74;
1064
1065	/* MPC8313 and MPC837x all rev */
1066	if ((pvr == 0x80850010 && mod == 0x80b0) ||
1067	    (pvr == 0x80861010 && (mod & 0xfff9) == 0x80c0))
1068		priv->errata |= GFAR_ERRATA_76;
1069
1070	/* MPC8313 Rev < 2.0 */
1071	if (pvr == 0x80850010 && mod == 0x80b0 && rev < 0x0020)
1072		priv->errata |= GFAR_ERRATA_12;
1073}
1074
1075static void __gfar_detect_errata_85xx(struct gfar_private *priv)
1076{
1077	unsigned int svr = mfspr(SPRN_SVR);
1078
1079	if ((SVR_SOC_VER(svr) == SVR_8548) && (SVR_REV(svr) == 0x20))
1080		priv->errata |= GFAR_ERRATA_12;
 
1081	if (((SVR_SOC_VER(svr) == SVR_P2020) && (SVR_REV(svr) < 0x20)) ||
1082	    ((SVR_SOC_VER(svr) == SVR_P2010) && (SVR_REV(svr) < 0x20)))
 
1083		priv->errata |= GFAR_ERRATA_76; /* aka eTSEC 20 */
1084}
 
1085
1086static void gfar_detect_errata(struct gfar_private *priv)
1087{
1088	struct device *dev = &priv->ofdev->dev;
1089
1090	/* no plans to fix */
1091	priv->errata |= GFAR_ERRATA_A002;
1092
 
1093	if (pvr_version_is(PVR_VER_E500V1) || pvr_version_is(PVR_VER_E500V2))
1094		__gfar_detect_errata_85xx(priv);
1095	else /* non-mpc85xx parts, i.e. e300 core based */
1096		__gfar_detect_errata_83xx(priv);
 
1097
1098	if (priv->errata)
1099		dev_info(dev, "enabled errata workarounds, flags: 0x%x\n",
1100			 priv->errata);
1101}
1102
1103void gfar_mac_reset(struct gfar_private *priv)
1104{
1105	struct gfar __iomem *regs = priv->gfargrp[0].regs;
1106	u32 tempval;
1107
1108	/* Reset MAC layer */
1109	gfar_write(&regs->maccfg1, MACCFG1_SOFT_RESET);
1110
1111	/* We need to delay at least 3 TX clocks */
1112	udelay(3);
1113
1114	/* the soft reset bit is not self-resetting, so we need to
1115	 * clear it before resuming normal operation
1116	 */
1117	gfar_write(&regs->maccfg1, 0);
1118
1119	udelay(3);
1120
1121	/* Compute rx_buff_size based on config flags */
1122	gfar_rx_buff_size_config(priv);
1123
1124	/* Initialize the max receive frame/buffer lengths */
1125	gfar_write(&regs->maxfrm, priv->rx_buffer_size);
1126	gfar_write(&regs->mrblr, priv->rx_buffer_size);
1127
1128	/* Initialize the Minimum Frame Length Register */
1129	gfar_write(&regs->minflr, MINFLR_INIT_SETTINGS);
1130
1131	/* Initialize MACCFG2. */
1132	tempval = MACCFG2_INIT_SETTINGS;
1133
1134	/* If the mtu is larger than the max size for standard
1135	 * ethernet frames (ie, a jumbo frame), then set maccfg2
1136	 * to allow huge frames, and to check the length
1137	 */
1138	if (priv->rx_buffer_size > DEFAULT_RX_BUFFER_SIZE ||
1139	    gfar_has_errata(priv, GFAR_ERRATA_74))
1140		tempval |= MACCFG2_HUGEFRAME | MACCFG2_LENGTHCHECK;
1141
1142	gfar_write(&regs->maccfg2, tempval);
1143
1144	/* Clear mac addr hash registers */
1145	gfar_write(&regs->igaddr0, 0);
1146	gfar_write(&regs->igaddr1, 0);
1147	gfar_write(&regs->igaddr2, 0);
1148	gfar_write(&regs->igaddr3, 0);
1149	gfar_write(&regs->igaddr4, 0);
1150	gfar_write(&regs->igaddr5, 0);
1151	gfar_write(&regs->igaddr6, 0);
1152	gfar_write(&regs->igaddr7, 0);
1153
1154	gfar_write(&regs->gaddr0, 0);
1155	gfar_write(&regs->gaddr1, 0);
1156	gfar_write(&regs->gaddr2, 0);
1157	gfar_write(&regs->gaddr3, 0);
1158	gfar_write(&regs->gaddr4, 0);
1159	gfar_write(&regs->gaddr5, 0);
1160	gfar_write(&regs->gaddr6, 0);
1161	gfar_write(&regs->gaddr7, 0);
1162
1163	if (priv->extended_hash)
1164		gfar_clear_exact_match(priv->ndev);
1165
1166	gfar_mac_rx_config(priv);
1167
1168	gfar_mac_tx_config(priv);
1169
1170	gfar_set_mac_address(priv->ndev);
1171
1172	gfar_set_multi(priv->ndev);
1173
1174	/* clear ievent and imask before configuring coalescing */
1175	gfar_ints_disable(priv);
1176
1177	/* Configure the coalescing support */
1178	gfar_configure_coalescing_all(priv);
1179}
1180
1181static void gfar_hw_init(struct gfar_private *priv)
1182{
1183	struct gfar __iomem *regs = priv->gfargrp[0].regs;
1184	u32 attrs;
1185
1186	/* Stop the DMA engine now, in case it was running before
1187	 * (The firmware could have used it, and left it running).
1188	 */
1189	gfar_halt(priv);
1190
1191	gfar_mac_reset(priv);
1192
1193	/* Zero out the rmon mib registers if it has them */
1194	if (priv->device_flags & FSL_GIANFAR_DEV_HAS_RMON) {
1195		memset_io(&(regs->rmon), 0, sizeof(struct rmon_mib));
1196
1197		/* Mask off the CAM interrupts */
1198		gfar_write(&regs->rmon.cam1, 0xffffffff);
1199		gfar_write(&regs->rmon.cam2, 0xffffffff);
1200	}
1201
1202	/* Initialize ECNTRL */
1203	gfar_write(&regs->ecntrl, ECNTRL_INIT_SETTINGS);
1204
1205	/* Set the extraction length and index */
1206	attrs = ATTRELI_EL(priv->rx_stash_size) |
1207		ATTRELI_EI(priv->rx_stash_index);
1208
1209	gfar_write(&regs->attreli, attrs);
1210
1211	/* Start with defaults, and add stashing
1212	 * depending on driver parameters
1213	 */
1214	attrs = ATTR_INIT_SETTINGS;
1215
1216	if (priv->bd_stash_en)
1217		attrs |= ATTR_BDSTASH;
1218
1219	if (priv->rx_stash_size != 0)
1220		attrs |= ATTR_BUFSTASH;
1221
1222	gfar_write(&regs->attr, attrs);
1223
1224	/* FIFO configs */
1225	gfar_write(&regs->fifo_tx_thr, DEFAULT_FIFO_TX_THR);
1226	gfar_write(&regs->fifo_tx_starve, DEFAULT_FIFO_TX_STARVE);
1227	gfar_write(&regs->fifo_tx_starve_shutoff, DEFAULT_FIFO_TX_STARVE_OFF);
1228
1229	/* Program the interrupt steering regs, only for MG devices */
1230	if (priv->num_grps > 1)
1231		gfar_write_isrg(priv);
1232}
1233
1234static void __init gfar_init_addr_hash_table(struct gfar_private *priv)
1235{
1236	struct gfar __iomem *regs = priv->gfargrp[0].regs;
1237
1238	if (priv->device_flags & FSL_GIANFAR_DEV_HAS_EXTENDED_HASH) {
1239		priv->extended_hash = 1;
1240		priv->hash_width = 9;
1241
1242		priv->hash_regs[0] = &regs->igaddr0;
1243		priv->hash_regs[1] = &regs->igaddr1;
1244		priv->hash_regs[2] = &regs->igaddr2;
1245		priv->hash_regs[3] = &regs->igaddr3;
1246		priv->hash_regs[4] = &regs->igaddr4;
1247		priv->hash_regs[5] = &regs->igaddr5;
1248		priv->hash_regs[6] = &regs->igaddr6;
1249		priv->hash_regs[7] = &regs->igaddr7;
1250		priv->hash_regs[8] = &regs->gaddr0;
1251		priv->hash_regs[9] = &regs->gaddr1;
1252		priv->hash_regs[10] = &regs->gaddr2;
1253		priv->hash_regs[11] = &regs->gaddr3;
1254		priv->hash_regs[12] = &regs->gaddr4;
1255		priv->hash_regs[13] = &regs->gaddr5;
1256		priv->hash_regs[14] = &regs->gaddr6;
1257		priv->hash_regs[15] = &regs->gaddr7;
1258
1259	} else {
1260		priv->extended_hash = 0;
1261		priv->hash_width = 8;
1262
1263		priv->hash_regs[0] = &regs->gaddr0;
1264		priv->hash_regs[1] = &regs->gaddr1;
1265		priv->hash_regs[2] = &regs->gaddr2;
1266		priv->hash_regs[3] = &regs->gaddr3;
1267		priv->hash_regs[4] = &regs->gaddr4;
1268		priv->hash_regs[5] = &regs->gaddr5;
1269		priv->hash_regs[6] = &regs->gaddr6;
1270		priv->hash_regs[7] = &regs->gaddr7;
1271	}
1272}
1273
1274/* Set up the ethernet device structure, private data,
1275 * and anything else we need before we start
1276 */
1277static int gfar_probe(struct platform_device *ofdev)
1278{
 
1279	struct net_device *dev = NULL;
1280	struct gfar_private *priv = NULL;
1281	int err = 0, i;
1282
1283	err = gfar_of_init(ofdev, &dev);
1284
1285	if (err)
1286		return err;
1287
1288	priv = netdev_priv(dev);
1289	priv->ndev = dev;
1290	priv->ofdev = ofdev;
1291	priv->dev = &ofdev->dev;
1292	SET_NETDEV_DEV(dev, &ofdev->dev);
1293
1294	spin_lock_init(&priv->bflock);
1295	INIT_WORK(&priv->reset_task, gfar_reset_task);
1296
1297	platform_set_drvdata(ofdev, priv);
1298
1299	gfar_detect_errata(priv);
1300
1301	/* Set the dev->base_addr to the gfar reg region */
1302	dev->base_addr = (unsigned long) priv->gfargrp[0].regs;
1303
1304	/* Fill in the dev structure */
1305	dev->watchdog_timeo = TX_TIMEOUT;
 
1306	dev->mtu = 1500;
 
 
1307	dev->netdev_ops = &gfar_netdev_ops;
1308	dev->ethtool_ops = &gfar_ethtool_ops;
1309
1310	/* Register for napi ...We are registering NAPI for each grp */
1311	for (i = 0; i < priv->num_grps; i++) {
1312		if (priv->poll_mode == GFAR_SQ_POLLING) {
1313			netif_napi_add(dev, &priv->gfargrp[i].napi_rx,
1314				       gfar_poll_rx_sq, GFAR_DEV_WEIGHT);
1315			netif_napi_add(dev, &priv->gfargrp[i].napi_tx,
1316				       gfar_poll_tx_sq, 2);
1317		} else {
1318			netif_napi_add(dev, &priv->gfargrp[i].napi_rx,
1319				       gfar_poll_rx, GFAR_DEV_WEIGHT);
1320			netif_napi_add(dev, &priv->gfargrp[i].napi_tx,
1321				       gfar_poll_tx, 2);
1322		}
1323	}
1324
1325	if (priv->device_flags & FSL_GIANFAR_DEV_HAS_CSUM) {
1326		dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_SG |
1327				   NETIF_F_RXCSUM;
1328		dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG |
1329				 NETIF_F_RXCSUM | NETIF_F_HIGHDMA;
1330	}
1331
1332	if (priv->device_flags & FSL_GIANFAR_DEV_HAS_VLAN) {
1333		dev->hw_features |= NETIF_F_HW_VLAN_CTAG_TX |
1334				    NETIF_F_HW_VLAN_CTAG_RX;
1335		dev->features |= NETIF_F_HW_VLAN_CTAG_RX;
1336	}
1337
 
 
1338	gfar_init_addr_hash_table(priv);
1339
1340	/* Insert receive time stamps into padding alignment bytes */
 
 
1341	if (priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER)
1342		priv->padding = 8;
1343
1344	if (dev->features & NETIF_F_IP_CSUM ||
1345	    priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER)
1346		dev->needed_headroom = GMAC_FCB_LEN;
1347
1348	priv->rx_buffer_size = DEFAULT_RX_BUFFER_SIZE;
1349
1350	/* Initializing some of the rx/tx queue level parameters */
1351	for (i = 0; i < priv->num_tx_queues; i++) {
1352		priv->tx_queue[i]->tx_ring_size = DEFAULT_TX_RING_SIZE;
1353		priv->tx_queue[i]->num_txbdfree = DEFAULT_TX_RING_SIZE;
1354		priv->tx_queue[i]->txcoalescing = DEFAULT_TX_COALESCE;
1355		priv->tx_queue[i]->txic = DEFAULT_TXIC;
1356	}
1357
1358	for (i = 0; i < priv->num_rx_queues; i++) {
1359		priv->rx_queue[i]->rx_ring_size = DEFAULT_RX_RING_SIZE;
1360		priv->rx_queue[i]->rxcoalescing = DEFAULT_RX_COALESCE;
1361		priv->rx_queue[i]->rxic = DEFAULT_RXIC;
1362	}
1363
1364	/* always enable rx filer */
1365	priv->rx_filer_enable = 1;
 
1366	/* Enable most messages by default */
1367	priv->msg_enable = (NETIF_MSG_IFUP << 1 ) - 1;
1368	/* use pritority h/w tx queue scheduling for single queue devices */
1369	if (priv->num_tx_queues == 1)
1370		priv->prio_sched_en = 1;
1371
1372	set_bit(GFAR_DOWN, &priv->state);
1373
1374	gfar_hw_init(priv);
1375
 
 
 
1376	err = register_netdev(dev);
1377
1378	if (err) {
1379		pr_err("%s: Cannot register net device, aborting\n", dev->name);
1380		goto register_fail;
1381	}
1382
1383	/* Carrier starts down, phylib will bring it up */
1384	netif_carrier_off(dev);
1385
1386	device_init_wakeup(&dev->dev,
1387			   priv->device_flags &
1388			   FSL_GIANFAR_DEV_HAS_MAGIC_PACKET);
 
 
1389
1390	/* fill out IRQ number and name fields */
1391	for (i = 0; i < priv->num_grps; i++) {
1392		struct gfar_priv_grp *grp = &priv->gfargrp[i];
1393		if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) {
1394			sprintf(gfar_irq(grp, TX)->name, "%s%s%c%s",
1395				dev->name, "_g", '0' + i, "_tx");
1396			sprintf(gfar_irq(grp, RX)->name, "%s%s%c%s",
1397				dev->name, "_g", '0' + i, "_rx");
1398			sprintf(gfar_irq(grp, ER)->name, "%s%s%c%s",
1399				dev->name, "_g", '0' + i, "_er");
1400		} else
1401			strcpy(gfar_irq(grp, TX)->name, dev->name);
1402	}
1403
1404	/* Initialize the filer table */
1405	gfar_init_filer_table(priv);
1406
1407	/* Print out the device info */
1408	netdev_info(dev, "mac: %pM\n", dev->dev_addr);
1409
1410	/* Even more device info helps when determining which kernel
1411	 * provided which set of benchmarks.
1412	 */
1413	netdev_info(dev, "Running with NAPI enabled\n");
1414	for (i = 0; i < priv->num_rx_queues; i++)
1415		netdev_info(dev, "RX BD ring size for Q[%d]: %d\n",
1416			    i, priv->rx_queue[i]->rx_ring_size);
1417	for (i = 0; i < priv->num_tx_queues; i++)
1418		netdev_info(dev, "TX BD ring size for Q[%d]: %d\n",
1419			    i, priv->tx_queue[i]->tx_ring_size);
1420
1421	return 0;
1422
1423register_fail:
 
 
1424	unmap_group_regs(priv);
1425	gfar_free_rx_queues(priv);
1426	gfar_free_tx_queues(priv);
1427	if (priv->phy_node)
1428		of_node_put(priv->phy_node);
1429	if (priv->tbi_node)
1430		of_node_put(priv->tbi_node);
1431	free_gfar_dev(priv);
1432	return err;
1433}
1434
1435static int gfar_remove(struct platform_device *ofdev)
1436{
1437	struct gfar_private *priv = platform_get_drvdata(ofdev);
 
1438
1439	if (priv->phy_node)
1440		of_node_put(priv->phy_node);
1441	if (priv->tbi_node)
1442		of_node_put(priv->tbi_node);
1443
1444	unregister_netdev(priv->ndev);
 
 
 
 
1445	unmap_group_regs(priv);
1446	gfar_free_rx_queues(priv);
1447	gfar_free_tx_queues(priv);
1448	free_gfar_dev(priv);
1449
1450	return 0;
1451}
1452
1453#ifdef CONFIG_PM
1454
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1455static int gfar_suspend(struct device *dev)
1456{
1457	struct gfar_private *priv = dev_get_drvdata(dev);
1458	struct net_device *ndev = priv->ndev;
1459	struct gfar __iomem *regs = priv->gfargrp[0].regs;
1460	unsigned long flags;
1461	u32 tempval;
 
1462
1463	int magic_packet = priv->wol_en &&
1464			   (priv->device_flags &
1465			    FSL_GIANFAR_DEV_HAS_MAGIC_PACKET);
1466
 
 
1467	netif_device_detach(ndev);
 
1468
1469	if (netif_running(ndev)) {
1470
1471		local_irq_save(flags);
1472		lock_tx_qs(priv);
1473
1474		gfar_halt_nodisable(priv);
 
 
 
 
 
 
 
1475
1476		/* Disable Tx, and Rx if wake-on-LAN is disabled. */
1477		tempval = gfar_read(&regs->maccfg1);
1478
1479		tempval &= ~MACCFG1_TX_EN;
1480
1481		if (!magic_packet)
1482			tempval &= ~MACCFG1_RX_EN;
1483
1484		gfar_write(&regs->maccfg1, tempval);
1485
1486		unlock_tx_qs(priv);
1487		local_irq_restore(flags);
1488
1489		disable_napi(priv);
1490
1491		if (magic_packet) {
1492			/* Enable interrupt on Magic Packet */
1493			gfar_write(&regs->imask, IMASK_MAG);
1494
1495			/* Enable Magic Packet mode */
1496			tempval = gfar_read(&regs->maccfg2);
1497			tempval |= MACCFG2_MPEN;
1498			gfar_write(&regs->maccfg2, tempval);
1499		} else {
1500			phy_stop(priv->phydev);
1501		}
1502	}
1503
1504	return 0;
1505}
1506
1507static int gfar_resume(struct device *dev)
1508{
1509	struct gfar_private *priv = dev_get_drvdata(dev);
1510	struct net_device *ndev = priv->ndev;
1511	struct gfar __iomem *regs = priv->gfargrp[0].regs;
1512	unsigned long flags;
1513	u32 tempval;
1514	int magic_packet = priv->wol_en &&
1515			   (priv->device_flags &
1516			    FSL_GIANFAR_DEV_HAS_MAGIC_PACKET);
1517
1518	if (!netif_running(ndev)) {
1519		netif_device_attach(ndev);
1520		return 0;
1521	}
1522
1523	if (!magic_packet && priv->phydev)
1524		phy_start(priv->phydev);
 
 
 
1525
1526	/* Disable Magic Packet mode, in case something
1527	 * else woke us up.
1528	 */
1529	local_irq_save(flags);
1530	lock_tx_qs(priv);
1531
1532	tempval = gfar_read(&regs->maccfg2);
1533	tempval &= ~MACCFG2_MPEN;
1534	gfar_write(&regs->maccfg2, tempval);
1535
1536	gfar_start(priv);
1537
1538	unlock_tx_qs(priv);
1539	local_irq_restore(flags);
1540
1541	netif_device_attach(ndev);
1542
1543	enable_napi(priv);
1544
1545	return 0;
1546}
1547
1548static int gfar_restore(struct device *dev)
1549{
1550	struct gfar_private *priv = dev_get_drvdata(dev);
1551	struct net_device *ndev = priv->ndev;
1552
1553	if (!netif_running(ndev)) {
1554		netif_device_attach(ndev);
1555
1556		return 0;
1557	}
1558
1559	if (gfar_init_bds(ndev)) {
1560		free_skb_resources(priv);
1561		return -ENOMEM;
1562	}
1563
1564	gfar_mac_reset(priv);
1565
1566	gfar_init_tx_rx_base(priv);
1567
1568	gfar_start(priv);
1569
1570	priv->oldlink = 0;
1571	priv->oldspeed = 0;
1572	priv->oldduplex = -1;
1573
1574	if (priv->phydev)
1575		phy_start(priv->phydev);
1576
1577	netif_device_attach(ndev);
1578	enable_napi(priv);
1579
1580	return 0;
1581}
1582
1583static struct dev_pm_ops gfar_pm_ops = {
1584	.suspend = gfar_suspend,
1585	.resume = gfar_resume,
1586	.freeze = gfar_suspend,
1587	.thaw = gfar_resume,
1588	.restore = gfar_restore,
1589};
1590
1591#define GFAR_PM_OPS (&gfar_pm_ops)
1592
1593#else
1594
1595#define GFAR_PM_OPS NULL
1596
1597#endif
1598
1599/* Reads the controller's registers to determine what interface
1600 * connects it to the PHY.
1601 */
1602static phy_interface_t gfar_get_interface(struct net_device *dev)
1603{
1604	struct gfar_private *priv = netdev_priv(dev);
1605	struct gfar __iomem *regs = priv->gfargrp[0].regs;
1606	u32 ecntrl;
1607
1608	ecntrl = gfar_read(&regs->ecntrl);
1609
1610	if (ecntrl & ECNTRL_SGMII_MODE)
1611		return PHY_INTERFACE_MODE_SGMII;
1612
1613	if (ecntrl & ECNTRL_TBI_MODE) {
1614		if (ecntrl & ECNTRL_REDUCED_MODE)
1615			return PHY_INTERFACE_MODE_RTBI;
1616		else
1617			return PHY_INTERFACE_MODE_TBI;
1618	}
1619
1620	if (ecntrl & ECNTRL_REDUCED_MODE) {
1621		if (ecntrl & ECNTRL_REDUCED_MII_MODE) {
1622			return PHY_INTERFACE_MODE_RMII;
1623		}
1624		else {
1625			phy_interface_t interface = priv->interface;
1626
1627			/* This isn't autodetected right now, so it must
1628			 * be set by the device tree or platform code.
1629			 */
1630			if (interface == PHY_INTERFACE_MODE_RGMII_ID)
1631				return PHY_INTERFACE_MODE_RGMII_ID;
1632
1633			return PHY_INTERFACE_MODE_RGMII;
1634		}
1635	}
1636
1637	if (priv->device_flags & FSL_GIANFAR_DEV_HAS_GIGABIT)
1638		return PHY_INTERFACE_MODE_GMII;
1639
1640	return PHY_INTERFACE_MODE_MII;
1641}
1642
1643
1644/* Initializes driver's PHY state, and attaches to the PHY.
1645 * Returns 0 on success.
1646 */
1647static int init_phy(struct net_device *dev)
1648{
1649	struct gfar_private *priv = netdev_priv(dev);
1650	uint gigabit_support =
1651		priv->device_flags & FSL_GIANFAR_DEV_HAS_GIGABIT ?
1652		GFAR_SUPPORTED_GBIT : 0;
1653	phy_interface_t interface;
 
 
1654
1655	priv->oldlink = 0;
1656	priv->oldspeed = 0;
1657	priv->oldduplex = -1;
1658
1659	interface = gfar_get_interface(dev);
1660
1661	priv->phydev = of_phy_connect(dev, priv->phy_node, &adjust_link, 0,
1662				      interface);
1663	if (!priv->phydev)
1664		priv->phydev = of_phy_connect_fixed_link(dev, &adjust_link,
1665							 interface);
1666	if (!priv->phydev) {
1667		dev_err(&dev->dev, "could not attach to PHY\n");
1668		return -ENODEV;
1669	}
1670
1671	if (interface == PHY_INTERFACE_MODE_SGMII)
1672		gfar_configure_serdes(dev);
1673
1674	/* Remove any features not supported by the controller */
1675	priv->phydev->supported &= (GFAR_SUPPORTED | gigabit_support);
1676	priv->phydev->advertising = priv->phydev->supported;
 
 
 
 
 
 
 
1677
1678	return 0;
1679}
1680
1681/* Initialize TBI PHY interface for communicating with the
1682 * SERDES lynx PHY on the chip.  We communicate with this PHY
1683 * through the MDIO bus on each controller, treating it as a
1684 * "normal" PHY at the address found in the TBIPA register.  We assume
1685 * that the TBIPA register is valid.  Either the MDIO bus code will set
1686 * it to a value that doesn't conflict with other PHYs on the bus, or the
1687 * value doesn't matter, as there are no other PHYs on the bus.
1688 */
1689static void gfar_configure_serdes(struct net_device *dev)
1690{
1691	struct gfar_private *priv = netdev_priv(dev);
1692	struct phy_device *tbiphy;
1693
1694	if (!priv->tbi_node) {
1695		dev_warn(&dev->dev, "error: SGMII mode requires that the "
1696				    "device tree specify a tbi-handle\n");
1697		return;
1698	}
1699
1700	tbiphy = of_phy_find_device(priv->tbi_node);
1701	if (!tbiphy) {
1702		dev_err(&dev->dev, "error: Could not get TBI device\n");
1703		return;
1704	}
1705
1706	/* If the link is already up, we must already be ok, and don't need to
1707	 * configure and reset the TBI<->SerDes link.  Maybe U-Boot configured
1708	 * everything for us?  Resetting it takes the link down and requires
1709	 * several seconds for it to come back.
1710	 */
1711	if (phy_read(tbiphy, MII_BMSR) & BMSR_LSTATUS)
 
1712		return;
 
1713
1714	/* Single clk mode, mii mode off(for serdes communication) */
1715	phy_write(tbiphy, MII_TBICON, TBICON_CLK_SELECT);
1716
1717	phy_write(tbiphy, MII_ADVERTISE,
1718		  ADVERTISE_1000XFULL | ADVERTISE_1000XPAUSE |
1719		  ADVERTISE_1000XPSE_ASYM);
1720
1721	phy_write(tbiphy, MII_BMCR,
1722		  BMCR_ANENABLE | BMCR_ANRESTART | BMCR_FULLDPLX |
1723		  BMCR_SPEED1000);
 
 
1724}
1725
1726static int __gfar_is_rx_idle(struct gfar_private *priv)
1727{
1728	u32 res;
1729
1730	/* Normaly TSEC should not hang on GRS commands, so we should
1731	 * actually wait for IEVENT_GRSC flag.
1732	 */
1733	if (!gfar_has_errata(priv, GFAR_ERRATA_A002))
1734		return 0;
1735
1736	/* Read the eTSEC register at offset 0xD1C. If bits 7-14 are
1737	 * the same as bits 23-30, the eTSEC Rx is assumed to be idle
1738	 * and the Rx can be safely reset.
1739	 */
1740	res = gfar_read((void __iomem *)priv->gfargrp[0].regs + 0xd1c);
1741	res &= 0x7f807f80;
1742	if ((res & 0xffff) == (res >> 16))
1743		return 1;
1744
1745	return 0;
1746}
1747
1748/* Halt the receive and transmit queues */
1749static void gfar_halt_nodisable(struct gfar_private *priv)
1750{
1751	struct gfar __iomem *regs = priv->gfargrp[0].regs;
1752	u32 tempval;
 
 
1753
1754	gfar_ints_disable(priv);
1755
 
 
 
1756	/* Stop the DMA, and wait for it to stop */
1757	tempval = gfar_read(&regs->dmactrl);
1758	if ((tempval & (DMACTRL_GRS | DMACTRL_GTS)) !=
1759	    (DMACTRL_GRS | DMACTRL_GTS)) {
1760		int ret;
1761
1762		tempval |= (DMACTRL_GRS | DMACTRL_GTS);
1763		gfar_write(&regs->dmactrl, tempval);
1764
1765		do {
1766			ret = spin_event_timeout(((gfar_read(&regs->ievent) &
1767				 (IEVENT_GRSC | IEVENT_GTSC)) ==
1768				 (IEVENT_GRSC | IEVENT_GTSC)), 1000000, 0);
1769			if (!ret && !(gfar_read(&regs->ievent) & IEVENT_GRSC))
1770				ret = __gfar_is_rx_idle(priv);
1771		} while (!ret);
1772	}
 
 
 
 
 
 
 
1773}
1774
1775/* Halt the receive and transmit queues */
1776void gfar_halt(struct gfar_private *priv)
1777{
1778	struct gfar __iomem *regs = priv->gfargrp[0].regs;
1779	u32 tempval;
1780
1781	/* Dissable the Rx/Tx hw queues */
1782	gfar_write(&regs->rqueue, 0);
1783	gfar_write(&regs->tqueue, 0);
1784
1785	mdelay(10);
1786
1787	gfar_halt_nodisable(priv);
1788
1789	/* Disable Rx/Tx DMA */
1790	tempval = gfar_read(&regs->maccfg1);
1791	tempval &= ~(MACCFG1_RX_EN | MACCFG1_TX_EN);
1792	gfar_write(&regs->maccfg1, tempval);
1793}
1794
1795void stop_gfar(struct net_device *dev)
1796{
1797	struct gfar_private *priv = netdev_priv(dev);
1798
1799	netif_tx_stop_all_queues(dev);
1800
1801	smp_mb__before_clear_bit();
1802	set_bit(GFAR_DOWN, &priv->state);
1803	smp_mb__after_clear_bit();
1804
1805	disable_napi(priv);
1806
1807	/* disable ints and gracefully shut down Rx/Tx DMA */
1808	gfar_halt(priv);
1809
1810	phy_stop(priv->phydev);
1811
1812	free_skb_resources(priv);
1813}
1814
1815static void free_skb_tx_queue(struct gfar_priv_tx_q *tx_queue)
1816{
1817	struct txbd8 *txbdp;
1818	struct gfar_private *priv = netdev_priv(tx_queue->dev);
1819	int i, j;
1820
1821	txbdp = tx_queue->tx_bd_base;
1822
1823	for (i = 0; i < tx_queue->tx_ring_size; i++) {
1824		if (!tx_queue->tx_skbuff[i])
1825			continue;
1826
1827		dma_unmap_single(priv->dev, txbdp->bufPtr,
1828				 txbdp->length, DMA_TO_DEVICE);
1829		txbdp->lstatus = 0;
1830		for (j = 0; j < skb_shinfo(tx_queue->tx_skbuff[i])->nr_frags;
1831		     j++) {
1832			txbdp++;
1833			dma_unmap_page(priv->dev, txbdp->bufPtr,
1834				       txbdp->length, DMA_TO_DEVICE);
 
1835		}
1836		txbdp++;
1837		dev_kfree_skb_any(tx_queue->tx_skbuff[i]);
1838		tx_queue->tx_skbuff[i] = NULL;
1839	}
1840	kfree(tx_queue->tx_skbuff);
1841	tx_queue->tx_skbuff = NULL;
1842}
1843
1844static void free_skb_rx_queue(struct gfar_priv_rx_q *rx_queue)
1845{
1846	struct rxbd8 *rxbdp;
1847	struct gfar_private *priv = netdev_priv(rx_queue->dev);
1848	int i;
1849
1850	rxbdp = rx_queue->rx_bd_base;
 
 
 
1851
1852	for (i = 0; i < rx_queue->rx_ring_size; i++) {
1853		if (rx_queue->rx_skbuff[i]) {
1854			dma_unmap_single(priv->dev, rxbdp->bufPtr,
1855					 priv->rx_buffer_size,
1856					 DMA_FROM_DEVICE);
1857			dev_kfree_skb_any(rx_queue->rx_skbuff[i]);
1858			rx_queue->rx_skbuff[i] = NULL;
1859		}
1860		rxbdp->lstatus = 0;
1861		rxbdp->bufPtr = 0;
1862		rxbdp++;
 
 
 
 
 
 
 
 
 
1863	}
1864	kfree(rx_queue->rx_skbuff);
1865	rx_queue->rx_skbuff = NULL;
 
1866}
1867
1868/* If there are any tx skbs or rx skbs still around, free them.
1869 * Then free tx_skbuff and rx_skbuff
1870 */
1871static void free_skb_resources(struct gfar_private *priv)
1872{
1873	struct gfar_priv_tx_q *tx_queue = NULL;
1874	struct gfar_priv_rx_q *rx_queue = NULL;
1875	int i;
1876
1877	/* Go through all the buffer descriptors and free their data buffers */
1878	for (i = 0; i < priv->num_tx_queues; i++) {
1879		struct netdev_queue *txq;
1880
1881		tx_queue = priv->tx_queue[i];
1882		txq = netdev_get_tx_queue(tx_queue->dev, tx_queue->qindex);
1883		if (tx_queue->tx_skbuff)
1884			free_skb_tx_queue(tx_queue);
1885		netdev_tx_reset_queue(txq);
1886	}
1887
1888	for (i = 0; i < priv->num_rx_queues; i++) {
1889		rx_queue = priv->rx_queue[i];
1890		if (rx_queue->rx_skbuff)
1891			free_skb_rx_queue(rx_queue);
1892	}
1893
1894	dma_free_coherent(priv->dev,
1895			  sizeof(struct txbd8) * priv->total_tx_ring_size +
1896			  sizeof(struct rxbd8) * priv->total_rx_ring_size,
1897			  priv->tx_queue[0]->tx_bd_base,
1898			  priv->tx_queue[0]->tx_bd_dma_base);
1899}
1900
1901void gfar_start(struct gfar_private *priv)
1902{
1903	struct gfar __iomem *regs = priv->gfargrp[0].regs;
1904	u32 tempval;
1905	int i = 0;
1906
1907	/* Enable Rx/Tx hw queues */
1908	gfar_write(&regs->rqueue, priv->rqueue);
1909	gfar_write(&regs->tqueue, priv->tqueue);
1910
1911	/* Initialize DMACTRL to have WWR and WOP */
1912	tempval = gfar_read(&regs->dmactrl);
1913	tempval |= DMACTRL_INIT_SETTINGS;
1914	gfar_write(&regs->dmactrl, tempval);
1915
1916	/* Make sure we aren't stopped */
1917	tempval = gfar_read(&regs->dmactrl);
1918	tempval &= ~(DMACTRL_GRS | DMACTRL_GTS);
1919	gfar_write(&regs->dmactrl, tempval);
1920
1921	for (i = 0; i < priv->num_grps; i++) {
1922		regs = priv->gfargrp[i].regs;
1923		/* Clear THLT/RHLT, so that the DMA starts polling now */
1924		gfar_write(&regs->tstat, priv->gfargrp[i].tstat);
1925		gfar_write(&regs->rstat, priv->gfargrp[i].rstat);
1926	}
1927
1928	/* Enable Rx/Tx DMA */
1929	tempval = gfar_read(&regs->maccfg1);
1930	tempval |= (MACCFG1_RX_EN | MACCFG1_TX_EN);
1931	gfar_write(&regs->maccfg1, tempval);
1932
1933	gfar_ints_enable(priv);
1934
1935	priv->ndev->trans_start = jiffies; /* prevent tx timeout */
1936}
1937
1938static void free_grp_irqs(struct gfar_priv_grp *grp)
1939{
1940	free_irq(gfar_irq(grp, TX)->irq, grp);
1941	free_irq(gfar_irq(grp, RX)->irq, grp);
1942	free_irq(gfar_irq(grp, ER)->irq, grp);
1943}
1944
1945static int register_grp_irqs(struct gfar_priv_grp *grp)
1946{
1947	struct gfar_private *priv = grp->priv;
1948	struct net_device *dev = priv->ndev;
1949	int err;
1950
1951	/* If the device has multiple interrupts, register for
1952	 * them.  Otherwise, only register for the one
1953	 */
1954	if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) {
1955		/* Install our interrupt handlers for Error,
1956		 * Transmit, and Receive
1957		 */
1958		err = request_irq(gfar_irq(grp, ER)->irq, gfar_error, 0,
1959				  gfar_irq(grp, ER)->name, grp);
1960		if (err < 0) {
1961			netif_err(priv, intr, dev, "Can't get IRQ %d\n",
1962				  gfar_irq(grp, ER)->irq);
1963
1964			goto err_irq_fail;
1965		}
 
 
1966		err = request_irq(gfar_irq(grp, TX)->irq, gfar_transmit, 0,
1967				  gfar_irq(grp, TX)->name, grp);
1968		if (err < 0) {
1969			netif_err(priv, intr, dev, "Can't get IRQ %d\n",
1970				  gfar_irq(grp, TX)->irq);
1971			goto tx_irq_fail;
1972		}
1973		err = request_irq(gfar_irq(grp, RX)->irq, gfar_receive, 0,
1974				  gfar_irq(grp, RX)->name, grp);
1975		if (err < 0) {
1976			netif_err(priv, intr, dev, "Can't get IRQ %d\n",
1977				  gfar_irq(grp, RX)->irq);
1978			goto rx_irq_fail;
1979		}
 
 
1980	} else {
1981		err = request_irq(gfar_irq(grp, TX)->irq, gfar_interrupt, 0,
1982				  gfar_irq(grp, TX)->name, grp);
1983		if (err < 0) {
1984			netif_err(priv, intr, dev, "Can't get IRQ %d\n",
1985				  gfar_irq(grp, TX)->irq);
1986			goto err_irq_fail;
1987		}
 
1988	}
1989
1990	return 0;
1991
1992rx_irq_fail:
1993	free_irq(gfar_irq(grp, TX)->irq, grp);
1994tx_irq_fail:
1995	free_irq(gfar_irq(grp, ER)->irq, grp);
1996err_irq_fail:
1997	return err;
1998
1999}
2000
2001static void gfar_free_irq(struct gfar_private *priv)
2002{
2003	int i;
2004
2005	/* Free the IRQs */
2006	if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) {
2007		for (i = 0; i < priv->num_grps; i++)
2008			free_grp_irqs(&priv->gfargrp[i]);
2009	} else {
2010		for (i = 0; i < priv->num_grps; i++)
2011			free_irq(gfar_irq(&priv->gfargrp[i], TX)->irq,
2012				 &priv->gfargrp[i]);
2013	}
2014}
2015
2016static int gfar_request_irq(struct gfar_private *priv)
2017{
2018	int err, i, j;
2019
2020	for (i = 0; i < priv->num_grps; i++) {
2021		err = register_grp_irqs(&priv->gfargrp[i]);
2022		if (err) {
2023			for (j = 0; j < i; j++)
2024				free_grp_irqs(&priv->gfargrp[j]);
2025			return err;
2026		}
2027	}
2028
2029	return 0;
2030}
2031
2032/* Bring the controller up and running */
2033int startup_gfar(struct net_device *ndev)
2034{
2035	struct gfar_private *priv = netdev_priv(ndev);
2036	int err;
2037
2038	gfar_mac_reset(priv);
2039
2040	err = gfar_alloc_skb_resources(ndev);
2041	if (err)
2042		return err;
2043
2044	gfar_init_tx_rx_base(priv);
2045
2046	smp_mb__before_clear_bit();
2047	clear_bit(GFAR_DOWN, &priv->state);
2048	smp_mb__after_clear_bit();
2049
2050	/* Start Rx/Tx DMA and enable the interrupts */
2051	gfar_start(priv);
2052
2053	phy_start(priv->phydev);
 
 
 
 
 
2054
2055	enable_napi(priv);
2056
2057	netif_tx_wake_all_queues(ndev);
2058
2059	return 0;
2060}
2061
2062/* Called when something needs to use the ethernet device
2063 * Returns 0 for success.
2064 */
2065static int gfar_enet_open(struct net_device *dev)
2066{
2067	struct gfar_private *priv = netdev_priv(dev);
2068	int err;
2069
2070	err = init_phy(dev);
2071	if (err)
2072		return err;
2073
2074	err = gfar_request_irq(priv);
2075	if (err)
2076		return err;
2077
2078	err = startup_gfar(dev);
2079	if (err)
2080		return err;
2081
2082	device_set_wakeup_enable(&dev->dev, priv->wol_en);
2083
2084	return err;
2085}
2086
2087static inline struct txfcb *gfar_add_fcb(struct sk_buff *skb)
2088{
2089	struct txfcb *fcb = (struct txfcb *)skb_push(skb, GMAC_FCB_LEN);
2090
2091	memset(fcb, 0, GMAC_FCB_LEN);
2092
2093	return fcb;
2094}
2095
2096static inline void gfar_tx_checksum(struct sk_buff *skb, struct txfcb *fcb,
2097				    int fcb_length)
2098{
2099	/* If we're here, it's a IP packet with a TCP or UDP
2100	 * payload.  We set it to checksum, using a pseudo-header
2101	 * we provide
2102	 */
2103	u8 flags = TXFCB_DEFAULT;
2104
2105	/* Tell the controller what the protocol is
2106	 * And provide the already calculated phcs
2107	 */
2108	if (ip_hdr(skb)->protocol == IPPROTO_UDP) {
2109		flags |= TXFCB_UDP;
2110		fcb->phcs = udp_hdr(skb)->check;
2111	} else
2112		fcb->phcs = tcp_hdr(skb)->check;
2113
2114	/* l3os is the distance between the start of the
2115	 * frame (skb->data) and the start of the IP hdr.
2116	 * l4os is the distance between the start of the
2117	 * l3 hdr and the l4 hdr
2118	 */
2119	fcb->l3os = (u16)(skb_network_offset(skb) - fcb_length);
2120	fcb->l4os = skb_network_header_len(skb);
2121
2122	fcb->flags = flags;
2123}
2124
2125void inline gfar_tx_vlan(struct sk_buff *skb, struct txfcb *fcb)
2126{
2127	fcb->flags |= TXFCB_VLN;
2128	fcb->vlctl = vlan_tx_tag_get(skb);
2129}
2130
2131static inline struct txbd8 *skip_txbd(struct txbd8 *bdp, int stride,
2132				      struct txbd8 *base, int ring_size)
2133{
2134	struct txbd8 *new_bd = bdp + stride;
2135
2136	return (new_bd >= (base + ring_size)) ? (new_bd - ring_size) : new_bd;
2137}
2138
2139static inline struct txbd8 *next_txbd(struct txbd8 *bdp, struct txbd8 *base,
2140				      int ring_size)
2141{
2142	return skip_txbd(bdp, 1, base, ring_size);
2143}
2144
2145/* eTSEC12: csum generation not supported for some fcb offsets */
2146static inline bool gfar_csum_errata_12(struct gfar_private *priv,
2147				       unsigned long fcb_addr)
2148{
2149	return (gfar_has_errata(priv, GFAR_ERRATA_12) &&
2150	       (fcb_addr % 0x20) > 0x18);
2151}
2152
2153/* eTSEC76: csum generation for frames larger than 2500 may
2154 * cause excess delays before start of transmission
2155 */
2156static inline bool gfar_csum_errata_76(struct gfar_private *priv,
2157				       unsigned int len)
2158{
2159	return (gfar_has_errata(priv, GFAR_ERRATA_76) &&
2160	       (len > 2500));
2161}
2162
2163/* This is called by the kernel when a frame is ready for transmission.
2164 * It is pointed to by the dev->hard_start_xmit function pointer
2165 */
2166static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
2167{
2168	struct gfar_private *priv = netdev_priv(dev);
2169	struct gfar_priv_tx_q *tx_queue = NULL;
2170	struct netdev_queue *txq;
2171	struct gfar __iomem *regs = NULL;
2172	struct txfcb *fcb = NULL;
2173	struct txbd8 *txbdp, *txbdp_start, *base, *txbdp_tstamp = NULL;
2174	u32 lstatus;
 
2175	int i, rq = 0;
2176	int do_tstamp, do_csum, do_vlan;
2177	u32 bufaddr;
2178	unsigned long flags;
2179	unsigned int nr_frags, nr_txbds, bytes_sent, fcb_len = 0;
2180
2181	rq = skb->queue_mapping;
2182	tx_queue = priv->tx_queue[rq];
2183	txq = netdev_get_tx_queue(dev, rq);
2184	base = tx_queue->tx_bd_base;
2185	regs = tx_queue->grp->regs;
2186
2187	do_csum = (CHECKSUM_PARTIAL == skb->ip_summed);
2188	do_vlan = vlan_tx_tag_present(skb);
2189	do_tstamp = (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
2190		    priv->hwts_tx_en;
2191
2192	if (do_csum || do_vlan)
2193		fcb_len = GMAC_FCB_LEN;
2194
2195	/* check if time stamp should be generated */
2196	if (unlikely(do_tstamp))
2197		fcb_len = GMAC_FCB_LEN + GMAC_TXPAL_LEN;
2198
2199	/* make space for additional header when fcb is needed */
2200	if (fcb_len && unlikely(skb_headroom(skb) < fcb_len)) {
2201		struct sk_buff *skb_new;
2202
2203		skb_new = skb_realloc_headroom(skb, fcb_len);
2204		if (!skb_new) {
2205			dev->stats.tx_errors++;
2206			dev_kfree_skb_any(skb);
2207			return NETDEV_TX_OK;
2208		}
2209
2210		if (skb->sk)
2211			skb_set_owner_w(skb_new, skb->sk);
2212		dev_consume_skb_any(skb);
2213		skb = skb_new;
2214	}
2215
2216	/* total number of fragments in the SKB */
2217	nr_frags = skb_shinfo(skb)->nr_frags;
2218
2219	/* calculate the required number of TxBDs for this skb */
2220	if (unlikely(do_tstamp))
2221		nr_txbds = nr_frags + 2;
2222	else
2223		nr_txbds = nr_frags + 1;
2224
2225	/* check if there is space to queue this packet */
2226	if (nr_txbds > tx_queue->num_txbdfree) {
2227		/* no space, stop the queue */
2228		netif_tx_stop_queue(txq);
2229		dev->stats.tx_fifo_errors++;
2230		return NETDEV_TX_BUSY;
2231	}
2232
2233	/* Update transmit stats */
2234	bytes_sent = skb->len;
2235	tx_queue->stats.tx_bytes += bytes_sent;
2236	/* keep Tx bytes on wire for BQL accounting */
2237	GFAR_CB(skb)->bytes_sent = bytes_sent;
2238	tx_queue->stats.tx_packets++;
2239
2240	txbdp = txbdp_start = tx_queue->cur_tx;
2241	lstatus = txbdp->lstatus;
2242
2243	/* Time stamp insertion requires one additional TxBD */
2244	if (unlikely(do_tstamp))
2245		txbdp_tstamp = txbdp = next_txbd(txbdp, base,
2246						 tx_queue->tx_ring_size);
2247
2248	if (nr_frags == 0) {
2249		if (unlikely(do_tstamp))
2250			txbdp_tstamp->lstatus |= BD_LFLAG(TXBD_LAST |
2251							  TXBD_INTERRUPT);
2252		else
2253			lstatus |= BD_LFLAG(TXBD_LAST | TXBD_INTERRUPT);
2254	} else {
2255		/* Place the fragment addresses and lengths into the TxBDs */
2256		for (i = 0; i < nr_frags; i++) {
2257			unsigned int frag_len;
2258			/* Point at the next BD, wrapping as needed */
2259			txbdp = next_txbd(txbdp, base, tx_queue->tx_ring_size);
2260
2261			frag_len = skb_shinfo(skb)->frags[i].size;
2262
2263			lstatus = txbdp->lstatus | frag_len |
2264				  BD_LFLAG(TXBD_READY);
2265
2266			/* Handle the last BD specially */
2267			if (i == nr_frags - 1)
2268				lstatus |= BD_LFLAG(TXBD_LAST | TXBD_INTERRUPT);
2269
2270			bufaddr = skb_frag_dma_map(priv->dev,
2271						   &skb_shinfo(skb)->frags[i],
2272						   0,
2273						   frag_len,
2274						   DMA_TO_DEVICE);
2275
2276			/* set the TxBD length and buffer pointer */
2277			txbdp->bufPtr = bufaddr;
2278			txbdp->lstatus = lstatus;
2279		}
2280
2281		lstatus = txbdp_start->lstatus;
2282	}
2283
2284	/* Add TxPAL between FCB and frame if required */
2285	if (unlikely(do_tstamp)) {
2286		skb_push(skb, GMAC_TXPAL_LEN);
2287		memset(skb->data, 0, GMAC_TXPAL_LEN);
2288	}
2289
2290	/* Add TxFCB if required */
2291	if (fcb_len) {
2292		fcb = gfar_add_fcb(skb);
2293		lstatus |= BD_LFLAG(TXBD_TOE);
2294	}
2295
2296	/* Set up checksumming */
2297	if (do_csum) {
2298		gfar_tx_checksum(skb, fcb, fcb_len);
2299
2300		if (unlikely(gfar_csum_errata_12(priv, (unsigned long)fcb)) ||
2301		    unlikely(gfar_csum_errata_76(priv, skb->len))) {
2302			__skb_pull(skb, GMAC_FCB_LEN);
2303			skb_checksum_help(skb);
2304			if (do_vlan || do_tstamp) {
2305				/* put back a new fcb for vlan/tstamp TOE */
2306				fcb = gfar_add_fcb(skb);
2307			} else {
2308				/* Tx TOE not used */
2309				lstatus &= ~(BD_LFLAG(TXBD_TOE));
2310				fcb = NULL;
2311			}
2312		}
2313	}
2314
2315	if (do_vlan)
2316		gfar_tx_vlan(skb, fcb);
2317
2318	/* Setup tx hardware time stamping if requested */
2319	if (unlikely(do_tstamp)) {
2320		skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
2321		fcb->ptp = 1;
2322	}
 
2323
2324	txbdp_start->bufPtr = dma_map_single(priv->dev, skb->data,
2325					     skb_headlen(skb), DMA_TO_DEVICE);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2326
2327	/* If time stamping is requested one additional TxBD must be set up. The
2328	 * first TxBD points to the FCB and must have a data length of
2329	 * GMAC_FCB_LEN. The second TxBD points to the actual frame data with
2330	 * the full frame length.
2331	 */
2332	if (unlikely(do_tstamp)) {
2333		txbdp_tstamp->bufPtr = txbdp_start->bufPtr + fcb_len;
2334		txbdp_tstamp->lstatus |= BD_LFLAG(TXBD_READY) |
2335					 (skb_headlen(skb) - fcb_len);
 
 
 
 
 
 
 
 
 
2336		lstatus |= BD_LFLAG(TXBD_CRC | TXBD_READY) | GMAC_FCB_LEN;
 
 
 
 
2337	} else {
2338		lstatus |= BD_LFLAG(TXBD_CRC | TXBD_READY) | skb_headlen(skb);
2339	}
2340
2341	netdev_tx_sent_queue(txq, bytes_sent);
2342
2343	/* We can work in parallel with gfar_clean_tx_ring(), except
2344	 * when modifying num_txbdfree. Note that we didn't grab the lock
2345	 * when we were reading the num_txbdfree and checking for available
2346	 * space, that's because outside of this function it can only grow,
2347	 * and once we've got needed space, it cannot suddenly disappear.
2348	 *
2349	 * The lock also protects us from gfar_error(), which can modify
2350	 * regs->tstat and thus retrigger the transfers, which is why we
2351	 * also must grab the lock before setting ready bit for the first
2352	 * to be transmitted BD.
2353	 */
2354	spin_lock_irqsave(&tx_queue->txlock, flags);
2355
2356	/* The powerpc-specific eieio() is used, as wmb() has too strong
2357	 * semantics (it requires synchronization between cacheable and
2358	 * uncacheable mappings, which eieio doesn't provide and which we
2359	 * don't need), thus requiring a more expensive sync instruction.  At
2360	 * some point, the set of architecture-independent barrier functions
2361	 * should be expanded to include weaker barriers.
2362	 */
2363	eieio();
2364
2365	txbdp_start->lstatus = lstatus;
2366
2367	eieio(); /* force lstatus write before tx_skbuff */
2368
2369	tx_queue->tx_skbuff[tx_queue->skb_curtx] = skb;
2370
2371	/* Update the current skb pointer to the next entry we will use
2372	 * (wrapping if necessary)
2373	 */
2374	tx_queue->skb_curtx = (tx_queue->skb_curtx + 1) &
2375			      TX_RING_MOD_MASK(tx_queue->tx_ring_size);
2376
2377	tx_queue->cur_tx = next_txbd(txbdp, base, tx_queue->tx_ring_size);
2378
 
 
 
 
 
 
2379	/* reduce TxBD free count */
2380	tx_queue->num_txbdfree -= (nr_txbds);
 
2381
2382	/* If the next BD still needs to be cleaned up, then the bds
2383	 * are full.  We need to tell the kernel to stop sending us stuff.
2384	 */
2385	if (!tx_queue->num_txbdfree) {
2386		netif_tx_stop_queue(txq);
2387
2388		dev->stats.tx_fifo_errors++;
2389	}
2390
2391	/* Tell the DMA to go go go */
2392	gfar_write(&regs->tstat, TSTAT_CLEAR_THALT >> tx_queue->qindex);
2393
2394	/* Unlock priv */
2395	spin_unlock_irqrestore(&tx_queue->txlock, flags);
2396
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2397	return NETDEV_TX_OK;
2398}
2399
2400/* Stops the kernel queue, and halts the controller */
2401static int gfar_close(struct net_device *dev)
2402{
2403	struct gfar_private *priv = netdev_priv(dev);
2404
2405	cancel_work_sync(&priv->reset_task);
2406	stop_gfar(dev);
2407
2408	/* Disconnect from the PHY */
2409	phy_disconnect(priv->phydev);
2410	priv->phydev = NULL;
2411
2412	gfar_free_irq(priv);
2413
2414	return 0;
2415}
2416
2417/* Changes the mac address if the controller is not running. */
2418static int gfar_set_mac_address(struct net_device *dev)
2419{
2420	gfar_set_mac_for_addr(dev, 0, dev->dev_addr);
2421
2422	return 0;
2423}
2424
2425static int gfar_change_mtu(struct net_device *dev, int new_mtu)
2426{
2427	struct gfar_private *priv = netdev_priv(dev);
2428	int frame_size = new_mtu + ETH_HLEN;
2429
2430	if ((frame_size < 64) || (frame_size > JUMBO_FRAME_SIZE)) {
2431		netif_err(priv, drv, dev, "Invalid MTU setting\n");
2432		return -EINVAL;
2433	}
2434
2435	while (test_and_set_bit_lock(GFAR_RESETTING, &priv->state))
2436		cpu_relax();
2437
2438	if (dev->flags & IFF_UP)
2439		stop_gfar(dev);
2440
2441	dev->mtu = new_mtu;
2442
2443	if (dev->flags & IFF_UP)
2444		startup_gfar(dev);
2445
2446	clear_bit_unlock(GFAR_RESETTING, &priv->state);
2447
2448	return 0;
2449}
2450
2451void reset_gfar(struct net_device *ndev)
2452{
2453	struct gfar_private *priv = netdev_priv(ndev);
2454
2455	while (test_and_set_bit_lock(GFAR_RESETTING, &priv->state))
2456		cpu_relax();
2457
2458	stop_gfar(ndev);
2459	startup_gfar(ndev);
2460
2461	clear_bit_unlock(GFAR_RESETTING, &priv->state);
2462}
2463
2464/* gfar_reset_task gets scheduled when a packet has not been
2465 * transmitted after a set amount of time.
2466 * For now, assume that clearing out all the structures, and
2467 * starting over will fix the problem.
2468 */
2469static void gfar_reset_task(struct work_struct *work)
2470{
2471	struct gfar_private *priv = container_of(work, struct gfar_private,
2472						 reset_task);
2473	reset_gfar(priv->ndev);
2474}
2475
2476static void gfar_timeout(struct net_device *dev)
2477{
2478	struct gfar_private *priv = netdev_priv(dev);
2479
2480	dev->stats.tx_errors++;
2481	schedule_work(&priv->reset_task);
2482}
2483
2484static void gfar_align_skb(struct sk_buff *skb)
2485{
2486	/* We need the data buffer to be aligned properly.  We will reserve
2487	 * as many bytes as needed to align the data properly
2488	 */
2489	skb_reserve(skb, RXBUF_ALIGNMENT -
2490		    (((unsigned long) skb->data) & (RXBUF_ALIGNMENT - 1)));
2491}
2492
2493/* Interrupt Handler for Transmit complete */
2494static void gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue)
2495{
2496	struct net_device *dev = tx_queue->dev;
2497	struct netdev_queue *txq;
2498	struct gfar_private *priv = netdev_priv(dev);
2499	struct txbd8 *bdp, *next = NULL;
2500	struct txbd8 *lbdp = NULL;
2501	struct txbd8 *base = tx_queue->tx_bd_base;
2502	struct sk_buff *skb;
2503	int skb_dirtytx;
2504	int tx_ring_size = tx_queue->tx_ring_size;
2505	int frags = 0, nr_txbds = 0;
2506	int i;
2507	int howmany = 0;
2508	int tqi = tx_queue->qindex;
2509	unsigned int bytes_sent = 0;
2510	u32 lstatus;
2511	size_t buflen;
2512
2513	txq = netdev_get_tx_queue(dev, tqi);
2514	bdp = tx_queue->dirty_tx;
2515	skb_dirtytx = tx_queue->skb_dirtytx;
2516
2517	while ((skb = tx_queue->tx_skbuff[skb_dirtytx])) {
2518		unsigned long flags;
2519
2520		frags = skb_shinfo(skb)->nr_frags;
2521
2522		/* When time stamping, one additional TxBD must be freed.
2523		 * Also, we need to dma_unmap_single() the TxPAL.
2524		 */
2525		if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS))
2526			nr_txbds = frags + 2;
2527		else
2528			nr_txbds = frags + 1;
2529
2530		lbdp = skip_txbd(bdp, nr_txbds - 1, base, tx_ring_size);
2531
2532		lstatus = lbdp->lstatus;
2533
2534		/* Only clean completed frames */
2535		if ((lstatus & BD_LFLAG(TXBD_READY)) &&
2536		    (lstatus & BD_LENGTH_MASK))
2537			break;
2538
2539		if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)) {
2540			next = next_txbd(bdp, base, tx_ring_size);
2541			buflen = next->length + GMAC_FCB_LEN + GMAC_TXPAL_LEN;
 
2542		} else
2543			buflen = bdp->length;
2544
2545		dma_unmap_single(priv->dev, bdp->bufPtr,
2546				 buflen, DMA_TO_DEVICE);
2547
2548		if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)) {
2549			struct skb_shared_hwtstamps shhwtstamps;
2550			u64 *ns = (u64*) (((u32)skb->data + 0x10) & ~0x7);
 
2551
2552			memset(&shhwtstamps, 0, sizeof(shhwtstamps));
2553			shhwtstamps.hwtstamp = ns_to_ktime(*ns);
2554			skb_pull(skb, GMAC_FCB_LEN + GMAC_TXPAL_LEN);
2555			skb_tstamp_tx(skb, &shhwtstamps);
2556			bdp->lstatus &= BD_LFLAG(TXBD_WRAP);
2557			bdp = next;
2558		}
2559
2560		bdp->lstatus &= BD_LFLAG(TXBD_WRAP);
2561		bdp = next_txbd(bdp, base, tx_ring_size);
2562
2563		for (i = 0; i < frags; i++) {
2564			dma_unmap_page(priv->dev, bdp->bufPtr,
2565				       bdp->length, DMA_TO_DEVICE);
2566			bdp->lstatus &= BD_LFLAG(TXBD_WRAP);
 
2567			bdp = next_txbd(bdp, base, tx_ring_size);
2568		}
2569
2570		bytes_sent += GFAR_CB(skb)->bytes_sent;
2571
2572		dev_kfree_skb_any(skb);
2573
2574		tx_queue->tx_skbuff[skb_dirtytx] = NULL;
2575
2576		skb_dirtytx = (skb_dirtytx + 1) &
2577			      TX_RING_MOD_MASK(tx_ring_size);
2578
2579		howmany++;
2580		spin_lock_irqsave(&tx_queue->txlock, flags);
2581		tx_queue->num_txbdfree += nr_txbds;
2582		spin_unlock_irqrestore(&tx_queue->txlock, flags);
2583	}
2584
2585	/* If we freed a buffer, we can restart transmission, if necessary */
2586	if (tx_queue->num_txbdfree &&
2587	    netif_tx_queue_stopped(txq) &&
2588	    !(test_bit(GFAR_DOWN, &priv->state)))
2589		netif_wake_subqueue(priv->ndev, tqi);
2590
2591	/* Update dirty indicators */
2592	tx_queue->skb_dirtytx = skb_dirtytx;
2593	tx_queue->dirty_tx = bdp;
2594
2595	netdev_tx_completed_queue(txq, howmany, bytes_sent);
2596}
2597
2598static void gfar_new_rxbdp(struct gfar_priv_rx_q *rx_queue, struct rxbd8 *bdp,
2599			   struct sk_buff *skb)
2600{
2601	struct net_device *dev = rx_queue->dev;
2602	struct gfar_private *priv = netdev_priv(dev);
2603	dma_addr_t buf;
2604
2605	buf = dma_map_single(priv->dev, skb->data,
2606			     priv->rx_buffer_size, DMA_FROM_DEVICE);
2607	gfar_init_rxbdp(rx_queue, bdp, buf);
2608}
2609
2610static struct sk_buff *gfar_alloc_skb(struct net_device *dev)
2611{
2612	struct gfar_private *priv = netdev_priv(dev);
2613	struct sk_buff *skb;
2614
2615	skb = netdev_alloc_skb(dev, priv->rx_buffer_size + RXBUF_ALIGNMENT);
2616	if (!skb)
2617		return NULL;
2618
2619	gfar_align_skb(skb);
 
 
2620
2621	return skb;
2622}
2623
2624struct sk_buff *gfar_new_skb(struct net_device *dev)
2625{
2626	return gfar_alloc_skb(dev);
 
 
 
 
2627}
2628
2629static inline void count_errors(unsigned short status, struct net_device *dev)
 
2630{
2631	struct gfar_private *priv = netdev_priv(dev);
2632	struct net_device_stats *stats = &dev->stats;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2633	struct gfar_extra_stats *estats = &priv->extra_stats;
2634
2635	/* If the packet was truncated, none of the other errors matter */
2636	if (status & RXBD_TRUNCATED) {
2637		stats->rx_length_errors++;
2638
2639		atomic64_inc(&estats->rx_trunc);
2640
2641		return;
2642	}
2643	/* Count the errors, if there were any */
2644	if (status & (RXBD_LARGE | RXBD_SHORT)) {
2645		stats->rx_length_errors++;
2646
2647		if (status & RXBD_LARGE)
2648			atomic64_inc(&estats->rx_large);
2649		else
2650			atomic64_inc(&estats->rx_short);
2651	}
2652	if (status & RXBD_NONOCTET) {
2653		stats->rx_frame_errors++;
2654		atomic64_inc(&estats->rx_nonoctet);
2655	}
2656	if (status & RXBD_CRCERR) {
2657		atomic64_inc(&estats->rx_crcerr);
2658		stats->rx_crc_errors++;
2659	}
2660	if (status & RXBD_OVERRUN) {
2661		atomic64_inc(&estats->rx_overrun);
2662		stats->rx_crc_errors++;
2663	}
2664}
2665
2666irqreturn_t gfar_receive(int irq, void *grp_id)
2667{
2668	struct gfar_priv_grp *grp = (struct gfar_priv_grp *)grp_id;
2669	unsigned long flags;
2670	u32 imask;
 
 
 
 
 
 
 
2671
2672	if (likely(napi_schedule_prep(&grp->napi_rx))) {
2673		spin_lock_irqsave(&grp->grplock, flags);
2674		imask = gfar_read(&grp->regs->imask);
2675		imask &= IMASK_RX_DISABLED;
2676		gfar_write(&grp->regs->imask, imask);
2677		spin_unlock_irqrestore(&grp->grplock, flags);
2678		__napi_schedule(&grp->napi_rx);
2679	} else {
2680		/* Clear IEVENT, so interrupts aren't called again
2681		 * because of the packets that have already arrived.
2682		 */
2683		gfar_write(&grp->regs->ievent, IEVENT_RX_MASK);
2684	}
2685
2686	return IRQ_HANDLED;
2687}
2688
2689/* Interrupt Handler for Transmit complete */
2690static irqreturn_t gfar_transmit(int irq, void *grp_id)
2691{
2692	struct gfar_priv_grp *grp = (struct gfar_priv_grp *)grp_id;
2693	unsigned long flags;
2694	u32 imask;
2695
2696	if (likely(napi_schedule_prep(&grp->napi_tx))) {
2697		spin_lock_irqsave(&grp->grplock, flags);
2698		imask = gfar_read(&grp->regs->imask);
2699		imask &= IMASK_TX_DISABLED;
2700		gfar_write(&grp->regs->imask, imask);
2701		spin_unlock_irqrestore(&grp->grplock, flags);
2702		__napi_schedule(&grp->napi_tx);
2703	} else {
2704		/* Clear IEVENT, so interrupts aren't called again
2705		 * because of the packets that have already arrived.
2706		 */
2707		gfar_write(&grp->regs->ievent, IEVENT_TX_MASK);
2708	}
2709
2710	return IRQ_HANDLED;
2711}
2712
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2713static inline void gfar_rx_checksum(struct sk_buff *skb, struct rxfcb *fcb)
2714{
2715	/* If valid headers were found, and valid sums
2716	 * were verified, then we tell the kernel that no
2717	 * checksumming is necessary.  Otherwise, it is [FIXME]
2718	 */
2719	if ((fcb->flags & RXFCB_CSUM_MASK) == (RXFCB_CIP | RXFCB_CTU))
 
2720		skb->ip_summed = CHECKSUM_UNNECESSARY;
2721	else
2722		skb_checksum_none_assert(skb);
2723}
2724
2725
2726/* gfar_process_frame() -- handle one incoming packet if skb isn't NULL. */
2727static void gfar_process_frame(struct net_device *dev, struct sk_buff *skb,
2728			       int amount_pull, struct napi_struct *napi)
2729{
2730	struct gfar_private *priv = netdev_priv(dev);
2731	struct rxfcb *fcb = NULL;
2732
2733	/* fcb is at the beginning if exists */
2734	fcb = (struct rxfcb *)skb->data;
2735
2736	/* Remove the FCB from the skb
2737	 * Remove the padded bytes, if there are any
2738	 */
2739	if (amount_pull) {
2740		skb_record_rx_queue(skb, fcb->rq);
2741		skb_pull(skb, amount_pull);
2742	}
2743
2744	/* Get receive timestamp from the skb */
2745	if (priv->hwts_rx_en) {
2746		struct skb_shared_hwtstamps *shhwtstamps = skb_hwtstamps(skb);
2747		u64 *ns = (u64 *) skb->data;
2748
2749		memset(shhwtstamps, 0, sizeof(*shhwtstamps));
2750		shhwtstamps->hwtstamp = ns_to_ktime(*ns);
2751	}
2752
2753	if (priv->padding)
2754		skb_pull(skb, priv->padding);
2755
2756	if (dev->features & NETIF_F_RXCSUM)
2757		gfar_rx_checksum(skb, fcb);
2758
2759	/* Tell the skb what kind of packet this is */
2760	skb->protocol = eth_type_trans(skb, dev);
2761
2762	/* There's need to check for NETIF_F_HW_VLAN_CTAG_RX here.
2763	 * Even if vlan rx accel is disabled, on some chips
2764	 * RXFCB_VLN is pseudo randomly set.
2765	 */
2766	if (dev->features & NETIF_F_HW_VLAN_CTAG_RX &&
2767	    fcb->flags & RXFCB_VLN)
2768		__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), fcb->vlctl);
2769
2770	/* Send the packet up the stack */
2771	napi_gro_receive(napi, skb);
2772
2773}
2774
2775/* gfar_clean_rx_ring() -- Processes each frame in the rx ring
2776 * until the budget/quota has been reached. Returns the number
2777 * of frames handled
2778 */
2779int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit)
2780{
2781	struct net_device *dev = rx_queue->dev;
2782	struct rxbd8 *bdp, *base;
2783	struct sk_buff *skb;
2784	int pkt_len;
2785	int amount_pull;
2786	int howmany = 0;
2787	struct gfar_private *priv = netdev_priv(dev);
2788
2789	/* Get the first full descriptor */
2790	bdp = rx_queue->cur_rx;
2791	base = rx_queue->rx_bd_base;
2792
2793	amount_pull = priv->uses_rxfcb ? GMAC_FCB_LEN : 0;
 
2794
2795	while (!((bdp->status & RXBD_EMPTY) || (--rx_work_limit < 0))) {
2796		struct sk_buff *newskb;
 
 
2797
 
 
 
 
 
 
2798		rmb();
2799
2800		/* Add another skb for the future */
2801		newskb = gfar_new_skb(dev);
 
 
2802
2803		skb = rx_queue->rx_skbuff[rx_queue->skb_currx];
 
2804
2805		dma_unmap_single(priv->dev, bdp->bufPtr,
2806				 priv->rx_buffer_size, DMA_FROM_DEVICE);
2807
2808		if (unlikely(!(bdp->status & RXBD_ERR) &&
2809			     bdp->length > priv->rx_buffer_size))
2810			bdp->status = RXBD_LARGE;
2811
2812		/* We drop the frame if we failed to allocate a new buffer */
2813		if (unlikely(!newskb || !(bdp->status & RXBD_LAST) ||
2814			     bdp->status & RXBD_ERR)) {
2815			count_errors(bdp->status, dev);
2816
2817			if (unlikely(!newskb))
2818				newskb = skb;
2819			else if (skb)
2820				dev_kfree_skb(skb);
2821		} else {
2822			/* Increment the number of packets */
2823			rx_queue->stats.rx_packets++;
2824			howmany++;
2825
2826			if (likely(skb)) {
2827				pkt_len = bdp->length - ETH_FCS_LEN;
2828				/* Remove the FCS from the packet length */
2829				skb_put(skb, pkt_len);
2830				rx_queue->stats.rx_bytes += pkt_len;
2831				skb_record_rx_queue(skb, rx_queue->qindex);
2832				gfar_process_frame(dev, skb, amount_pull,
2833						   &rx_queue->grp->napi_rx);
2834
2835			} else {
2836				netif_warn(priv, rx_err, dev, "Missing skb!\n");
2837				rx_queue->stats.rx_dropped++;
2838				atomic64_inc(&priv->extra_stats.rx_skbmissing);
2839			}
 
2840
 
 
 
 
 
2841		}
2842
2843		rx_queue->rx_skbuff[rx_queue->skb_currx] = newskb;
2844
2845		/* Setup the new bdp */
2846		gfar_new_rxbdp(rx_queue, bdp, newskb);
 
2847
2848		/* Update to the next pointer */
2849		bdp = next_bd(bdp, base, rx_queue->rx_ring_size);
 
2850
2851		/* update to point at the next skb */
2852		rx_queue->skb_currx = (rx_queue->skb_currx + 1) &
2853				      RX_RING_MOD_MASK(rx_queue->rx_ring_size);
 
2854	}
2855
2856	/* Update the current rxbd pointer to be the next one */
2857	rx_queue->cur_rx = bdp;
 
 
 
 
 
 
 
 
 
 
 
 
 
2858
2859	return howmany;
2860}
2861
2862static int gfar_poll_rx_sq(struct napi_struct *napi, int budget)
2863{
2864	struct gfar_priv_grp *gfargrp =
2865		container_of(napi, struct gfar_priv_grp, napi_rx);
2866	struct gfar __iomem *regs = gfargrp->regs;
2867	struct gfar_priv_rx_q *rx_queue = gfargrp->rx_queue;
2868	int work_done = 0;
2869
2870	/* Clear IEVENT, so interrupts aren't called again
2871	 * because of the packets that have already arrived
2872	 */
2873	gfar_write(&regs->ievent, IEVENT_RX_MASK);
2874
2875	work_done = gfar_clean_rx_ring(rx_queue, budget);
2876
2877	if (work_done < budget) {
2878		u32 imask;
2879		napi_complete(napi);
2880		/* Clear the halt bit in RSTAT */
2881		gfar_write(&regs->rstat, gfargrp->rstat);
2882
2883		spin_lock_irq(&gfargrp->grplock);
2884		imask = gfar_read(&regs->imask);
2885		imask |= IMASK_RX_DEFAULT;
2886		gfar_write(&regs->imask, imask);
2887		spin_unlock_irq(&gfargrp->grplock);
2888	}
2889
2890	return work_done;
2891}
2892
2893static int gfar_poll_tx_sq(struct napi_struct *napi, int budget)
2894{
2895	struct gfar_priv_grp *gfargrp =
2896		container_of(napi, struct gfar_priv_grp, napi_tx);
2897	struct gfar __iomem *regs = gfargrp->regs;
2898	struct gfar_priv_tx_q *tx_queue = gfargrp->tx_queue;
2899	u32 imask;
2900
2901	/* Clear IEVENT, so interrupts aren't called again
2902	 * because of the packets that have already arrived
2903	 */
2904	gfar_write(&regs->ievent, IEVENT_TX_MASK);
2905
2906	/* run Tx cleanup to completion */
2907	if (tx_queue->tx_skbuff[tx_queue->skb_dirtytx])
2908		gfar_clean_tx_ring(tx_queue);
2909
2910	napi_complete(napi);
2911
2912	spin_lock_irq(&gfargrp->grplock);
2913	imask = gfar_read(&regs->imask);
2914	imask |= IMASK_TX_DEFAULT;
2915	gfar_write(&regs->imask, imask);
2916	spin_unlock_irq(&gfargrp->grplock);
2917
2918	return 0;
2919}
2920
2921static int gfar_poll_rx(struct napi_struct *napi, int budget)
2922{
2923	struct gfar_priv_grp *gfargrp =
2924		container_of(napi, struct gfar_priv_grp, napi_rx);
2925	struct gfar_private *priv = gfargrp->priv;
2926	struct gfar __iomem *regs = gfargrp->regs;
2927	struct gfar_priv_rx_q *rx_queue = NULL;
2928	int work_done = 0, work_done_per_q = 0;
2929	int i, budget_per_q = 0;
2930	unsigned long rstat_rxf;
2931	int num_act_queues;
2932
2933	/* Clear IEVENT, so interrupts aren't called again
2934	 * because of the packets that have already arrived
2935	 */
2936	gfar_write(&regs->ievent, IEVENT_RX_MASK);
2937
2938	rstat_rxf = gfar_read(&regs->rstat) & RSTAT_RXF_MASK;
2939
2940	num_act_queues = bitmap_weight(&rstat_rxf, MAX_RX_QS);
2941	if (num_act_queues)
2942		budget_per_q = budget/num_act_queues;
2943
2944	for_each_set_bit(i, &gfargrp->rx_bit_map, priv->num_rx_queues) {
2945		/* skip queue if not active */
2946		if (!(rstat_rxf & (RSTAT_CLEAR_RXF0 >> i)))
2947			continue;
2948
2949		rx_queue = priv->rx_queue[i];
2950		work_done_per_q =
2951			gfar_clean_rx_ring(rx_queue, budget_per_q);
2952		work_done += work_done_per_q;
2953
2954		/* finished processing this queue */
2955		if (work_done_per_q < budget_per_q) {
2956			/* clear active queue hw indication */
2957			gfar_write(&regs->rstat,
2958				   RSTAT_CLEAR_RXF0 >> i);
2959			num_act_queues--;
2960
2961			if (!num_act_queues)
2962				break;
2963		}
2964	}
2965
2966	if (!num_act_queues) {
2967		u32 imask;
2968		napi_complete(napi);
2969
2970		/* Clear the halt bit in RSTAT */
2971		gfar_write(&regs->rstat, gfargrp->rstat);
2972
2973		spin_lock_irq(&gfargrp->grplock);
2974		imask = gfar_read(&regs->imask);
2975		imask |= IMASK_RX_DEFAULT;
2976		gfar_write(&regs->imask, imask);
2977		spin_unlock_irq(&gfargrp->grplock);
2978	}
2979
2980	return work_done;
2981}
2982
2983static int gfar_poll_tx(struct napi_struct *napi, int budget)
2984{
2985	struct gfar_priv_grp *gfargrp =
2986		container_of(napi, struct gfar_priv_grp, napi_tx);
2987	struct gfar_private *priv = gfargrp->priv;
2988	struct gfar __iomem *regs = gfargrp->regs;
2989	struct gfar_priv_tx_q *tx_queue = NULL;
2990	int has_tx_work = 0;
2991	int i;
2992
2993	/* Clear IEVENT, so interrupts aren't called again
2994	 * because of the packets that have already arrived
2995	 */
2996	gfar_write(&regs->ievent, IEVENT_TX_MASK);
2997
2998	for_each_set_bit(i, &gfargrp->tx_bit_map, priv->num_tx_queues) {
2999		tx_queue = priv->tx_queue[i];
3000		/* run Tx cleanup to completion */
3001		if (tx_queue->tx_skbuff[tx_queue->skb_dirtytx]) {
3002			gfar_clean_tx_ring(tx_queue);
3003			has_tx_work = 1;
3004		}
3005	}
3006
3007	if (!has_tx_work) {
3008		u32 imask;
3009		napi_complete(napi);
3010
3011		spin_lock_irq(&gfargrp->grplock);
3012		imask = gfar_read(&regs->imask);
3013		imask |= IMASK_TX_DEFAULT;
3014		gfar_write(&regs->imask, imask);
3015		spin_unlock_irq(&gfargrp->grplock);
3016	}
3017
3018	return 0;
3019}
3020
3021
3022#ifdef CONFIG_NET_POLL_CONTROLLER
3023/* Polling 'interrupt' - used by things like netconsole to send skbs
3024 * without having to re-enable interrupts. It's not called while
3025 * the interrupt routine is executing.
3026 */
3027static void gfar_netpoll(struct net_device *dev)
3028{
3029	struct gfar_private *priv = netdev_priv(dev);
3030	int i;
3031
3032	/* If the device has multiple interrupts, run tx/rx */
3033	if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) {
3034		for (i = 0; i < priv->num_grps; i++) {
3035			struct gfar_priv_grp *grp = &priv->gfargrp[i];
3036
3037			disable_irq(gfar_irq(grp, TX)->irq);
3038			disable_irq(gfar_irq(grp, RX)->irq);
3039			disable_irq(gfar_irq(grp, ER)->irq);
3040			gfar_interrupt(gfar_irq(grp, TX)->irq, grp);
3041			enable_irq(gfar_irq(grp, ER)->irq);
3042			enable_irq(gfar_irq(grp, RX)->irq);
3043			enable_irq(gfar_irq(grp, TX)->irq);
3044		}
3045	} else {
3046		for (i = 0; i < priv->num_grps; i++) {
3047			struct gfar_priv_grp *grp = &priv->gfargrp[i];
3048
3049			disable_irq(gfar_irq(grp, TX)->irq);
3050			gfar_interrupt(gfar_irq(grp, TX)->irq, grp);
3051			enable_irq(gfar_irq(grp, TX)->irq);
3052		}
3053	}
3054}
3055#endif
3056
3057/* The interrupt handler for devices with one interrupt */
3058static irqreturn_t gfar_interrupt(int irq, void *grp_id)
3059{
3060	struct gfar_priv_grp *gfargrp = grp_id;
3061
3062	/* Save ievent for future reference */
3063	u32 events = gfar_read(&gfargrp->regs->ievent);
3064
3065	/* Check for reception */
3066	if (events & IEVENT_RX_MASK)
3067		gfar_receive(irq, grp_id);
3068
3069	/* Check for transmit completion */
3070	if (events & IEVENT_TX_MASK)
3071		gfar_transmit(irq, grp_id);
3072
3073	/* Check for errors */
3074	if (events & IEVENT_ERR_MASK)
3075		gfar_error(irq, grp_id);
3076
3077	return IRQ_HANDLED;
3078}
3079
3080/* Called every time the controller might need to be made
3081 * aware of new link state.  The PHY code conveys this
3082 * information through variables in the phydev structure, and this
3083 * function converts those variables into the appropriate
3084 * register values, and can bring down the device if needed.
3085 */
3086static void adjust_link(struct net_device *dev)
3087{
3088	struct gfar_private *priv = netdev_priv(dev);
3089	struct phy_device *phydev = priv->phydev;
3090
3091	if (unlikely(phydev->link != priv->oldlink ||
3092		     phydev->duplex != priv->oldduplex ||
3093		     phydev->speed != priv->oldspeed))
3094		gfar_update_link_state(priv);
3095}
3096
3097/* Update the hash table based on the current list of multicast
3098 * addresses we subscribe to.  Also, change the promiscuity of
3099 * the device based on the flags (this function is called
3100 * whenever dev->flags is changed
3101 */
3102static void gfar_set_multi(struct net_device *dev)
3103{
3104	struct netdev_hw_addr *ha;
3105	struct gfar_private *priv = netdev_priv(dev);
3106	struct gfar __iomem *regs = priv->gfargrp[0].regs;
3107	u32 tempval;
3108
3109	if (dev->flags & IFF_PROMISC) {
3110		/* Set RCTRL to PROM */
3111		tempval = gfar_read(&regs->rctrl);
3112		tempval |= RCTRL_PROM;
3113		gfar_write(&regs->rctrl, tempval);
3114	} else {
3115		/* Set RCTRL to not PROM */
3116		tempval = gfar_read(&regs->rctrl);
3117		tempval &= ~(RCTRL_PROM);
3118		gfar_write(&regs->rctrl, tempval);
3119	}
3120
3121	if (dev->flags & IFF_ALLMULTI) {
3122		/* Set the hash to rx all multicast frames */
3123		gfar_write(&regs->igaddr0, 0xffffffff);
3124		gfar_write(&regs->igaddr1, 0xffffffff);
3125		gfar_write(&regs->igaddr2, 0xffffffff);
3126		gfar_write(&regs->igaddr3, 0xffffffff);
3127		gfar_write(&regs->igaddr4, 0xffffffff);
3128		gfar_write(&regs->igaddr5, 0xffffffff);
3129		gfar_write(&regs->igaddr6, 0xffffffff);
3130		gfar_write(&regs->igaddr7, 0xffffffff);
3131		gfar_write(&regs->gaddr0, 0xffffffff);
3132		gfar_write(&regs->gaddr1, 0xffffffff);
3133		gfar_write(&regs->gaddr2, 0xffffffff);
3134		gfar_write(&regs->gaddr3, 0xffffffff);
3135		gfar_write(&regs->gaddr4, 0xffffffff);
3136		gfar_write(&regs->gaddr5, 0xffffffff);
3137		gfar_write(&regs->gaddr6, 0xffffffff);
3138		gfar_write(&regs->gaddr7, 0xffffffff);
3139	} else {
3140		int em_num;
3141		int idx;
3142
3143		/* zero out the hash */
3144		gfar_write(&regs->igaddr0, 0x0);
3145		gfar_write(&regs->igaddr1, 0x0);
3146		gfar_write(&regs->igaddr2, 0x0);
3147		gfar_write(&regs->igaddr3, 0x0);
3148		gfar_write(&regs->igaddr4, 0x0);
3149		gfar_write(&regs->igaddr5, 0x0);
3150		gfar_write(&regs->igaddr6, 0x0);
3151		gfar_write(&regs->igaddr7, 0x0);
3152		gfar_write(&regs->gaddr0, 0x0);
3153		gfar_write(&regs->gaddr1, 0x0);
3154		gfar_write(&regs->gaddr2, 0x0);
3155		gfar_write(&regs->gaddr3, 0x0);
3156		gfar_write(&regs->gaddr4, 0x0);
3157		gfar_write(&regs->gaddr5, 0x0);
3158		gfar_write(&regs->gaddr6, 0x0);
3159		gfar_write(&regs->gaddr7, 0x0);
3160
3161		/* If we have extended hash tables, we need to
3162		 * clear the exact match registers to prepare for
3163		 * setting them
3164		 */
3165		if (priv->extended_hash) {
3166			em_num = GFAR_EM_NUM + 1;
3167			gfar_clear_exact_match(dev);
3168			idx = 1;
3169		} else {
3170			idx = 0;
3171			em_num = 0;
3172		}
3173
3174		if (netdev_mc_empty(dev))
3175			return;
3176
3177		/* Parse the list, and set the appropriate bits */
3178		netdev_for_each_mc_addr(ha, dev) {
3179			if (idx < em_num) {
3180				gfar_set_mac_for_addr(dev, idx, ha->addr);
3181				idx++;
3182			} else
3183				gfar_set_hash_for_addr(dev, ha->addr);
3184		}
3185	}
3186}
3187
3188
3189/* Clears each of the exact match registers to zero, so they
3190 * don't interfere with normal reception
3191 */
3192static void gfar_clear_exact_match(struct net_device *dev)
3193{
3194	int idx;
3195	static const u8 zero_arr[ETH_ALEN] = {0, 0, 0, 0, 0, 0};
3196
3197	for (idx = 1; idx < GFAR_EM_NUM + 1; idx++)
3198		gfar_set_mac_for_addr(dev, idx, zero_arr);
3199}
3200
3201/* Set the appropriate hash bit for the given addr */
3202/* The algorithm works like so:
3203 * 1) Take the Destination Address (ie the multicast address), and
3204 * do a CRC on it (little endian), and reverse the bits of the
3205 * result.
3206 * 2) Use the 8 most significant bits as a hash into a 256-entry
3207 * table.  The table is controlled through 8 32-bit registers:
3208 * gaddr0-7.  gaddr0's MSB is entry 0, and gaddr7's LSB is
3209 * gaddr7.  This means that the 3 most significant bits in the
3210 * hash index which gaddr register to use, and the 5 other bits
3211 * indicate which bit (assuming an IBM numbering scheme, which
3212 * for PowerPC (tm) is usually the case) in the register holds
3213 * the entry.
3214 */
3215static void gfar_set_hash_for_addr(struct net_device *dev, u8 *addr)
3216{
3217	u32 tempval;
3218	struct gfar_private *priv = netdev_priv(dev);
3219	u32 result = ether_crc(ETH_ALEN, addr);
3220	int width = priv->hash_width;
3221	u8 whichbit = (result >> (32 - width)) & 0x1f;
3222	u8 whichreg = result >> (32 - width + 5);
3223	u32 value = (1 << (31-whichbit));
3224
3225	tempval = gfar_read(priv->hash_regs[whichreg]);
3226	tempval |= value;
3227	gfar_write(priv->hash_regs[whichreg], tempval);
3228}
3229
3230
3231/* There are multiple MAC Address register pairs on some controllers
3232 * This function sets the numth pair to a given address
3233 */
3234static void gfar_set_mac_for_addr(struct net_device *dev, int num,
3235				  const u8 *addr)
3236{
3237	struct gfar_private *priv = netdev_priv(dev);
3238	struct gfar __iomem *regs = priv->gfargrp[0].regs;
3239	int idx;
3240	char tmpbuf[ETH_ALEN];
3241	u32 tempval;
3242	u32 __iomem *macptr = &regs->macstnaddr1;
3243
3244	macptr += num*2;
3245
3246	/* Now copy it into the mac registers backwards, cuz
3247	 * little endian is silly
 
3248	 */
3249	for (idx = 0; idx < ETH_ALEN; idx++)
3250		tmpbuf[ETH_ALEN - 1 - idx] = addr[idx];
3251
3252	gfar_write(macptr, *((u32 *) (tmpbuf)));
3253
3254	tempval = *((u32 *) (tmpbuf + 4));
3255
3256	gfar_write(macptr+1, tempval);
3257}
3258
3259/* GFAR error interrupt handler */
3260static irqreturn_t gfar_error(int irq, void *grp_id)
3261{
3262	struct gfar_priv_grp *gfargrp = grp_id;
3263	struct gfar __iomem *regs = gfargrp->regs;
3264	struct gfar_private *priv= gfargrp->priv;
3265	struct net_device *dev = priv->ndev;
3266
3267	/* Save ievent for future reference */
3268	u32 events = gfar_read(&regs->ievent);
3269
3270	/* Clear IEVENT */
3271	gfar_write(&regs->ievent, events & IEVENT_ERR_MASK);
3272
3273	/* Magic Packet is not an error. */
3274	if ((priv->device_flags & FSL_GIANFAR_DEV_HAS_MAGIC_PACKET) &&
3275	    (events & IEVENT_MAG))
3276		events &= ~IEVENT_MAG;
3277
3278	/* Hmm... */
3279	if (netif_msg_rx_err(priv) || netif_msg_tx_err(priv))
3280		netdev_dbg(dev,
3281			   "error interrupt (ievent=0x%08x imask=0x%08x)\n",
3282			   events, gfar_read(&regs->imask));
3283
3284	/* Update the error counters */
3285	if (events & IEVENT_TXE) {
3286		dev->stats.tx_errors++;
3287
3288		if (events & IEVENT_LC)
3289			dev->stats.tx_window_errors++;
3290		if (events & IEVENT_CRL)
3291			dev->stats.tx_aborted_errors++;
3292		if (events & IEVENT_XFUN) {
3293			unsigned long flags;
3294
3295			netif_dbg(priv, tx_err, dev,
3296				  "TX FIFO underrun, packet dropped\n");
3297			dev->stats.tx_dropped++;
3298			atomic64_inc(&priv->extra_stats.tx_underrun);
3299
3300			local_irq_save(flags);
3301			lock_tx_qs(priv);
3302
3303			/* Reactivate the Tx Queues */
3304			gfar_write(&regs->tstat, gfargrp->tstat);
3305
3306			unlock_tx_qs(priv);
3307			local_irq_restore(flags);
3308		}
3309		netif_dbg(priv, tx_err, dev, "Transmit Error\n");
3310	}
3311	if (events & IEVENT_BSY) {
3312		dev->stats.rx_errors++;
3313		atomic64_inc(&priv->extra_stats.rx_bsy);
3314
3315		gfar_receive(irq, grp_id);
3316
3317		netif_dbg(priv, rx_err, dev, "busy error (rstat: %x)\n",
3318			  gfar_read(&regs->rstat));
3319	}
3320	if (events & IEVENT_BABR) {
3321		dev->stats.rx_errors++;
3322		atomic64_inc(&priv->extra_stats.rx_babr);
3323
3324		netif_dbg(priv, rx_err, dev, "babbling RX error\n");
3325	}
3326	if (events & IEVENT_EBERR) {
3327		atomic64_inc(&priv->extra_stats.eberr);
3328		netif_dbg(priv, rx_err, dev, "bus error\n");
3329	}
3330	if (events & IEVENT_RXC)
3331		netif_dbg(priv, rx_status, dev, "control frame\n");
3332
3333	if (events & IEVENT_BABT) {
3334		atomic64_inc(&priv->extra_stats.tx_babt);
3335		netif_dbg(priv, tx_err, dev, "babbling TX error\n");
3336	}
3337	return IRQ_HANDLED;
3338}
3339
3340static u32 gfar_get_flowctrl_cfg(struct gfar_private *priv)
3341{
3342	struct phy_device *phydev = priv->phydev;
 
3343	u32 val = 0;
3344
3345	if (!phydev->duplex)
3346		return val;
3347
3348	if (!priv->pause_aneg_en) {
3349		if (priv->tx_pause_en)
3350			val |= MACCFG1_TX_FLOW;
3351		if (priv->rx_pause_en)
3352			val |= MACCFG1_RX_FLOW;
3353	} else {
3354		u16 lcl_adv, rmt_adv;
3355		u8 flowctrl;
3356		/* get link partner capabilities */
3357		rmt_adv = 0;
3358		if (phydev->pause)
3359			rmt_adv = LPA_PAUSE_CAP;
3360		if (phydev->asym_pause)
3361			rmt_adv |= LPA_PAUSE_ASYM;
3362
3363		lcl_adv = mii_advertise_flowctrl(phydev->advertising);
 
 
 
 
3364
3365		flowctrl = mii_resolve_flowctrl_fdx(lcl_adv, rmt_adv);
3366		if (flowctrl & FLOW_CTRL_TX)
3367			val |= MACCFG1_TX_FLOW;
3368		if (flowctrl & FLOW_CTRL_RX)
3369			val |= MACCFG1_RX_FLOW;
3370	}
3371
3372	return val;
3373}
3374
3375static noinline void gfar_update_link_state(struct gfar_private *priv)
3376{
3377	struct gfar __iomem *regs = priv->gfargrp[0].regs;
3378	struct phy_device *phydev = priv->phydev;
 
 
 
3379
3380	if (unlikely(test_bit(GFAR_RESETTING, &priv->state)))
3381		return;
3382
3383	if (phydev->link) {
3384		u32 tempval1 = gfar_read(&regs->maccfg1);
3385		u32 tempval = gfar_read(&regs->maccfg2);
3386		u32 ecntrl = gfar_read(&regs->ecntrl);
 
3387
3388		if (phydev->duplex != priv->oldduplex) {
3389			if (!(phydev->duplex))
3390				tempval &= ~(MACCFG2_FULL_DUPLEX);
3391			else
3392				tempval |= MACCFG2_FULL_DUPLEX;
3393
3394			priv->oldduplex = phydev->duplex;
3395		}
3396
3397		if (phydev->speed != priv->oldspeed) {
3398			switch (phydev->speed) {
3399			case 1000:
3400				tempval =
3401				    ((tempval & ~(MACCFG2_IF)) | MACCFG2_GMII);
3402
3403				ecntrl &= ~(ECNTRL_R100);
3404				break;
3405			case 100:
3406			case 10:
3407				tempval =
3408				    ((tempval & ~(MACCFG2_IF)) | MACCFG2_MII);
3409
3410				/* Reduced mode distinguishes
3411				 * between 10 and 100
3412				 */
3413				if (phydev->speed == SPEED_100)
3414					ecntrl |= ECNTRL_R100;
3415				else
3416					ecntrl &= ~(ECNTRL_R100);
3417				break;
3418			default:
3419				netif_warn(priv, link, priv->ndev,
3420					   "Ack!  Speed (%d) is not 10/100/1000!\n",
3421					   phydev->speed);
3422				break;
3423			}
3424
3425			priv->oldspeed = phydev->speed;
3426		}
3427
3428		tempval1 &= ~(MACCFG1_TX_FLOW | MACCFG1_RX_FLOW);
3429		tempval1 |= gfar_get_flowctrl_cfg(priv);
3430
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3431		gfar_write(&regs->maccfg1, tempval1);
3432		gfar_write(&regs->maccfg2, tempval);
3433		gfar_write(&regs->ecntrl, ecntrl);
3434
3435		if (!priv->oldlink)
3436			priv->oldlink = 1;
3437
3438	} else if (priv->oldlink) {
3439		priv->oldlink = 0;
3440		priv->oldspeed = 0;
3441		priv->oldduplex = -1;
3442	}
3443
3444	if (netif_msg_link(priv))
3445		phy_print_status(phydev);
3446}
3447
3448static struct of_device_id gfar_match[] =
3449{
3450	{
3451		.type = "network",
3452		.compatible = "gianfar",
3453	},
3454	{
3455		.compatible = "fsl,etsec2",
3456	},
3457	{},
3458};
3459MODULE_DEVICE_TABLE(of, gfar_match);
3460
3461/* Structure for a device driver */
3462static struct platform_driver gfar_driver = {
3463	.driver = {
3464		.name = "fsl-gianfar",
3465		.owner = THIS_MODULE,
3466		.pm = GFAR_PM_OPS,
3467		.of_match_table = gfar_match,
3468	},
3469	.probe = gfar_probe,
3470	.remove = gfar_remove,
3471};
3472
3473module_platform_driver(gfar_driver);
v4.17
   1/* drivers/net/ethernet/freescale/gianfar.c
   2 *
   3 * Gianfar Ethernet Driver
   4 * This driver is designed for the non-CPM ethernet controllers
   5 * on the 85xx and 83xx family of integrated processors
   6 * Based on 8260_io/fcc_enet.c
   7 *
   8 * Author: Andy Fleming
   9 * Maintainer: Kumar Gala
  10 * Modifier: Sandeep Gopalpet <sandeep.kumar@freescale.com>
  11 *
  12 * Copyright 2002-2009, 2011-2013 Freescale Semiconductor, Inc.
  13 * Copyright 2007 MontaVista Software, Inc.
  14 *
  15 * This program is free software; you can redistribute  it and/or modify it
  16 * under  the terms of  the GNU General  Public License as published by the
  17 * Free Software Foundation;  either version 2 of the  License, or (at your
  18 * option) any later version.
  19 *
  20 *  Gianfar:  AKA Lambda Draconis, "Dragon"
  21 *  RA 11 31 24.2
  22 *  Dec +69 19 52
  23 *  V 3.84
  24 *  B-V +1.62
  25 *
  26 *  Theory of operation
  27 *
  28 *  The driver is initialized through of_device. Configuration information
  29 *  is therefore conveyed through an OF-style device tree.
  30 *
  31 *  The Gianfar Ethernet Controller uses a ring of buffer
  32 *  descriptors.  The beginning is indicated by a register
  33 *  pointing to the physical address of the start of the ring.
  34 *  The end is determined by a "wrap" bit being set in the
  35 *  last descriptor of the ring.
  36 *
  37 *  When a packet is received, the RXF bit in the
  38 *  IEVENT register is set, triggering an interrupt when the
  39 *  corresponding bit in the IMASK register is also set (if
  40 *  interrupt coalescing is active, then the interrupt may not
  41 *  happen immediately, but will wait until either a set number
  42 *  of frames or amount of time have passed).  In NAPI, the
  43 *  interrupt handler will signal there is work to be done, and
  44 *  exit. This method will start at the last known empty
  45 *  descriptor, and process every subsequent descriptor until there
  46 *  are none left with data (NAPI will stop after a set number of
  47 *  packets to give time to other tasks, but will eventually
  48 *  process all the packets).  The data arrives inside a
  49 *  pre-allocated skb, and so after the skb is passed up to the
  50 *  stack, a new skb must be allocated, and the address field in
  51 *  the buffer descriptor must be updated to indicate this new
  52 *  skb.
  53 *
  54 *  When the kernel requests that a packet be transmitted, the
  55 *  driver starts where it left off last time, and points the
  56 *  descriptor at the buffer which was passed in.  The driver
  57 *  then informs the DMA engine that there are packets ready to
  58 *  be transmitted.  Once the controller is finished transmitting
  59 *  the packet, an interrupt may be triggered (under the same
  60 *  conditions as for reception, but depending on the TXF bit).
  61 *  The driver then cleans up the buffer.
  62 */
  63
  64#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  65#define DEBUG
  66
  67#include <linux/kernel.h>
  68#include <linux/string.h>
  69#include <linux/errno.h>
  70#include <linux/unistd.h>
  71#include <linux/slab.h>
  72#include <linux/interrupt.h>
  73#include <linux/delay.h>
  74#include <linux/netdevice.h>
  75#include <linux/etherdevice.h>
  76#include <linux/skbuff.h>
  77#include <linux/if_vlan.h>
  78#include <linux/spinlock.h>
  79#include <linux/mm.h>
  80#include <linux/of_address.h>
  81#include <linux/of_irq.h>
  82#include <linux/of_mdio.h>
  83#include <linux/of_platform.h>
  84#include <linux/ip.h>
  85#include <linux/tcp.h>
  86#include <linux/udp.h>
  87#include <linux/in.h>
  88#include <linux/net_tstamp.h>
  89
  90#include <asm/io.h>
  91#ifdef CONFIG_PPC
  92#include <asm/reg.h>
  93#include <asm/mpc85xx.h>
  94#endif
  95#include <asm/irq.h>
  96#include <linux/uaccess.h>
  97#include <linux/module.h>
  98#include <linux/dma-mapping.h>
  99#include <linux/crc32.h>
 100#include <linux/mii.h>
 101#include <linux/phy.h>
 102#include <linux/phy_fixed.h>
 103#include <linux/of.h>
 104#include <linux/of_net.h>
 105#include <linux/of_address.h>
 106#include <linux/of_irq.h>
 107
 108#include "gianfar.h"
 109
 110#define TX_TIMEOUT      (5*HZ)
 111
 112const char gfar_driver_version[] = "2.0";
 113
 114static int gfar_enet_open(struct net_device *dev);
 115static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev);
 116static void gfar_reset_task(struct work_struct *work);
 117static void gfar_timeout(struct net_device *dev);
 118static int gfar_close(struct net_device *dev);
 119static void gfar_alloc_rx_buffs(struct gfar_priv_rx_q *rx_queue,
 120				int alloc_cnt);
 
 121static int gfar_set_mac_address(struct net_device *dev);
 122static int gfar_change_mtu(struct net_device *dev, int new_mtu);
 123static irqreturn_t gfar_error(int irq, void *dev_id);
 124static irqreturn_t gfar_transmit(int irq, void *dev_id);
 125static irqreturn_t gfar_interrupt(int irq, void *dev_id);
 126static void adjust_link(struct net_device *dev);
 127static noinline void gfar_update_link_state(struct gfar_private *priv);
 128static int init_phy(struct net_device *dev);
 129static int gfar_probe(struct platform_device *ofdev);
 130static int gfar_remove(struct platform_device *ofdev);
 131static void free_skb_resources(struct gfar_private *priv);
 132static void gfar_set_multi(struct net_device *dev);
 133static void gfar_set_hash_for_addr(struct net_device *dev, u8 *addr);
 134static void gfar_configure_serdes(struct net_device *dev);
 135static int gfar_poll_rx(struct napi_struct *napi, int budget);
 136static int gfar_poll_tx(struct napi_struct *napi, int budget);
 137static int gfar_poll_rx_sq(struct napi_struct *napi, int budget);
 138static int gfar_poll_tx_sq(struct napi_struct *napi, int budget);
 139#ifdef CONFIG_NET_POLL_CONTROLLER
 140static void gfar_netpoll(struct net_device *dev);
 141#endif
 142int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit);
 143static void gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue);
 144static void gfar_process_frame(struct net_device *ndev, struct sk_buff *skb);
 
 145static void gfar_halt_nodisable(struct gfar_private *priv);
 146static void gfar_clear_exact_match(struct net_device *dev);
 147static void gfar_set_mac_for_addr(struct net_device *dev, int num,
 148				  const u8 *addr);
 149static int gfar_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
 150
 151MODULE_AUTHOR("Freescale Semiconductor, Inc");
 152MODULE_DESCRIPTION("Gianfar Ethernet Driver");
 153MODULE_LICENSE("GPL");
 154
 155static void gfar_init_rxbdp(struct gfar_priv_rx_q *rx_queue, struct rxbd8 *bdp,
 156			    dma_addr_t buf)
 157{
 158	u32 lstatus;
 159
 160	bdp->bufPtr = cpu_to_be32(buf);
 161
 162	lstatus = BD_LFLAG(RXBD_EMPTY | RXBD_INTERRUPT);
 163	if (bdp == rx_queue->rx_bd_base + rx_queue->rx_ring_size - 1)
 164		lstatus |= BD_LFLAG(RXBD_WRAP);
 165
 166	gfar_wmb();
 167
 168	bdp->lstatus = cpu_to_be32(lstatus);
 169}
 170
 171static void gfar_init_bds(struct net_device *ndev)
 172{
 173	struct gfar_private *priv = netdev_priv(ndev);
 174	struct gfar __iomem *regs = priv->gfargrp[0].regs;
 175	struct gfar_priv_tx_q *tx_queue = NULL;
 176	struct gfar_priv_rx_q *rx_queue = NULL;
 177	struct txbd8 *txbdp;
 178	u32 __iomem *rfbptr;
 179	int i, j;
 180
 181	for (i = 0; i < priv->num_tx_queues; i++) {
 182		tx_queue = priv->tx_queue[i];
 183		/* Initialize some variables in our dev structure */
 184		tx_queue->num_txbdfree = tx_queue->tx_ring_size;
 185		tx_queue->dirty_tx = tx_queue->tx_bd_base;
 186		tx_queue->cur_tx = tx_queue->tx_bd_base;
 187		tx_queue->skb_curtx = 0;
 188		tx_queue->skb_dirtytx = 0;
 189
 190		/* Initialize Transmit Descriptor Ring */
 191		txbdp = tx_queue->tx_bd_base;
 192		for (j = 0; j < tx_queue->tx_ring_size; j++) {
 193			txbdp->lstatus = 0;
 194			txbdp->bufPtr = 0;
 195			txbdp++;
 196		}
 197
 198		/* Set the last descriptor in the ring to indicate wrap */
 199		txbdp--;
 200		txbdp->status = cpu_to_be16(be16_to_cpu(txbdp->status) |
 201					    TXBD_WRAP);
 202	}
 203
 204	rfbptr = &regs->rfbptr0;
 205	for (i = 0; i < priv->num_rx_queues; i++) {
 206		rx_queue = priv->rx_queue[i];
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 207
 208		rx_queue->next_to_clean = 0;
 209		rx_queue->next_to_use = 0;
 210		rx_queue->next_to_alloc = 0;
 211
 212		/* make sure next_to_clean != next_to_use after this
 213		 * by leaving at least 1 unused descriptor
 214		 */
 215		gfar_alloc_rx_buffs(rx_queue, gfar_rxbd_unused(rx_queue));
 216
 217		rx_queue->rfbptr = rfbptr;
 218		rfbptr += 2;
 219	}
 
 
 220}
 221
 222static int gfar_alloc_skb_resources(struct net_device *ndev)
 223{
 224	void *vaddr;
 225	dma_addr_t addr;
 226	int i, j;
 227	struct gfar_private *priv = netdev_priv(ndev);
 228	struct device *dev = priv->dev;
 229	struct gfar_priv_tx_q *tx_queue = NULL;
 230	struct gfar_priv_rx_q *rx_queue = NULL;
 231
 232	priv->total_tx_ring_size = 0;
 233	for (i = 0; i < priv->num_tx_queues; i++)
 234		priv->total_tx_ring_size += priv->tx_queue[i]->tx_ring_size;
 235
 236	priv->total_rx_ring_size = 0;
 237	for (i = 0; i < priv->num_rx_queues; i++)
 238		priv->total_rx_ring_size += priv->rx_queue[i]->rx_ring_size;
 239
 240	/* Allocate memory for the buffer descriptors */
 241	vaddr = dma_alloc_coherent(dev,
 242				   (priv->total_tx_ring_size *
 243				    sizeof(struct txbd8)) +
 244				   (priv->total_rx_ring_size *
 245				    sizeof(struct rxbd8)),
 246				   &addr, GFP_KERNEL);
 247	if (!vaddr)
 248		return -ENOMEM;
 249
 250	for (i = 0; i < priv->num_tx_queues; i++) {
 251		tx_queue = priv->tx_queue[i];
 252		tx_queue->tx_bd_base = vaddr;
 253		tx_queue->tx_bd_dma_base = addr;
 254		tx_queue->dev = ndev;
 255		/* enet DMA only understands physical addresses */
 256		addr  += sizeof(struct txbd8) * tx_queue->tx_ring_size;
 257		vaddr += sizeof(struct txbd8) * tx_queue->tx_ring_size;
 258	}
 259
 260	/* Start the rx descriptor ring where the tx ring leaves off */
 261	for (i = 0; i < priv->num_rx_queues; i++) {
 262		rx_queue = priv->rx_queue[i];
 263		rx_queue->rx_bd_base = vaddr;
 264		rx_queue->rx_bd_dma_base = addr;
 265		rx_queue->ndev = ndev;
 266		rx_queue->dev = dev;
 267		addr  += sizeof(struct rxbd8) * rx_queue->rx_ring_size;
 268		vaddr += sizeof(struct rxbd8) * rx_queue->rx_ring_size;
 269	}
 270
 271	/* Setup the skbuff rings */
 272	for (i = 0; i < priv->num_tx_queues; i++) {
 273		tx_queue = priv->tx_queue[i];
 274		tx_queue->tx_skbuff =
 275			kmalloc_array(tx_queue->tx_ring_size,
 276				      sizeof(*tx_queue->tx_skbuff),
 277				      GFP_KERNEL);
 278		if (!tx_queue->tx_skbuff)
 279			goto cleanup;
 280
 281		for (j = 0; j < tx_queue->tx_ring_size; j++)
 282			tx_queue->tx_skbuff[j] = NULL;
 283	}
 284
 285	for (i = 0; i < priv->num_rx_queues; i++) {
 286		rx_queue = priv->rx_queue[i];
 287		rx_queue->rx_buff = kcalloc(rx_queue->rx_ring_size,
 288					    sizeof(*rx_queue->rx_buff),
 289					    GFP_KERNEL);
 290		if (!rx_queue->rx_buff)
 
 291			goto cleanup;
 
 
 
 292	}
 293
 294	gfar_init_bds(ndev);
 
 295
 296	return 0;
 297
 298cleanup:
 299	free_skb_resources(priv);
 300	return -ENOMEM;
 301}
 302
 303static void gfar_init_tx_rx_base(struct gfar_private *priv)
 304{
 305	struct gfar __iomem *regs = priv->gfargrp[0].regs;
 306	u32 __iomem *baddr;
 307	int i;
 308
 309	baddr = &regs->tbase0;
 310	for (i = 0; i < priv->num_tx_queues; i++) {
 311		gfar_write(baddr, priv->tx_queue[i]->tx_bd_dma_base);
 312		baddr += 2;
 313	}
 314
 315	baddr = &regs->rbase0;
 316	for (i = 0; i < priv->num_rx_queues; i++) {
 317		gfar_write(baddr, priv->rx_queue[i]->rx_bd_dma_base);
 318		baddr += 2;
 319	}
 320}
 321
 322static void gfar_init_rqprm(struct gfar_private *priv)
 323{
 324	struct gfar __iomem *regs = priv->gfargrp[0].regs;
 325	u32 __iomem *baddr;
 326	int i;
 327
 328	baddr = &regs->rqprm0;
 329	for (i = 0; i < priv->num_rx_queues; i++) {
 330		gfar_write(baddr, priv->rx_queue[i]->rx_ring_size |
 331			   (DEFAULT_RX_LFC_THR << FBTHR_SHIFT));
 332		baddr++;
 333	}
 334}
 335
 336static void gfar_rx_offload_en(struct gfar_private *priv)
 337{
 338	/* set this when rx hw offload (TOE) functions are being used */
 339	priv->uses_rxfcb = 0;
 340
 341	if (priv->ndev->features & (NETIF_F_RXCSUM | NETIF_F_HW_VLAN_CTAG_RX))
 342		priv->uses_rxfcb = 1;
 343
 344	if (priv->hwts_rx_en || priv->rx_filer_enable)
 345		priv->uses_rxfcb = 1;
 
 
 
 
 
 
 
 
 
 
 346}
 347
 348static void gfar_mac_rx_config(struct gfar_private *priv)
 349{
 350	struct gfar __iomem *regs = priv->gfargrp[0].regs;
 351	u32 rctrl = 0;
 352
 353	if (priv->rx_filer_enable) {
 354		rctrl |= RCTRL_FILREN | RCTRL_PRSDEP_INIT;
 355		/* Program the RIR0 reg with the required distribution */
 356		if (priv->poll_mode == GFAR_SQ_POLLING)
 357			gfar_write(&regs->rir0, DEFAULT_2RXQ_RIR0);
 358		else /* GFAR_MQ_POLLING */
 359			gfar_write(&regs->rir0, DEFAULT_8RXQ_RIR0);
 360	}
 361
 362	/* Restore PROMISC mode */
 363	if (priv->ndev->flags & IFF_PROMISC)
 364		rctrl |= RCTRL_PROM;
 365
 366	if (priv->ndev->features & NETIF_F_RXCSUM)
 367		rctrl |= RCTRL_CHECKSUMMING;
 368
 369	if (priv->extended_hash)
 370		rctrl |= RCTRL_EXTHASH | RCTRL_EMEN;
 371
 372	if (priv->padding) {
 373		rctrl &= ~RCTRL_PAL_MASK;
 374		rctrl |= RCTRL_PADDING(priv->padding);
 375	}
 376
 377	/* Enable HW time stamping if requested from user space */
 378	if (priv->hwts_rx_en)
 379		rctrl |= RCTRL_PRSDEP_INIT | RCTRL_TS_ENABLE;
 380
 381	if (priv->ndev->features & NETIF_F_HW_VLAN_CTAG_RX)
 382		rctrl |= RCTRL_VLEX | RCTRL_PRSDEP_INIT;
 383
 384	/* Clear the LFC bit */
 385	gfar_write(&regs->rctrl, rctrl);
 386	/* Init flow control threshold values */
 387	gfar_init_rqprm(priv);
 388	gfar_write(&regs->ptv, DEFAULT_LFC_PTVVAL);
 389	rctrl |= RCTRL_LFC;
 390
 391	/* Init rctrl based on our settings */
 392	gfar_write(&regs->rctrl, rctrl);
 393}
 394
 395static void gfar_mac_tx_config(struct gfar_private *priv)
 396{
 397	struct gfar __iomem *regs = priv->gfargrp[0].regs;
 398	u32 tctrl = 0;
 399
 400	if (priv->ndev->features & NETIF_F_IP_CSUM)
 401		tctrl |= TCTRL_INIT_CSUM;
 402
 403	if (priv->prio_sched_en)
 404		tctrl |= TCTRL_TXSCHED_PRIO;
 405	else {
 406		tctrl |= TCTRL_TXSCHED_WRRS;
 407		gfar_write(&regs->tr03wt, DEFAULT_WRRS_WEIGHT);
 408		gfar_write(&regs->tr47wt, DEFAULT_WRRS_WEIGHT);
 409	}
 410
 411	if (priv->ndev->features & NETIF_F_HW_VLAN_CTAG_TX)
 412		tctrl |= TCTRL_VLINS;
 413
 414	gfar_write(&regs->tctrl, tctrl);
 415}
 416
 417static void gfar_configure_coalescing(struct gfar_private *priv,
 418			       unsigned long tx_mask, unsigned long rx_mask)
 419{
 420	struct gfar __iomem *regs = priv->gfargrp[0].regs;
 421	u32 __iomem *baddr;
 422
 423	if (priv->mode == MQ_MG_MODE) {
 424		int i = 0;
 425
 426		baddr = &regs->txic0;
 427		for_each_set_bit(i, &tx_mask, priv->num_tx_queues) {
 428			gfar_write(baddr + i, 0);
 429			if (likely(priv->tx_queue[i]->txcoalescing))
 430				gfar_write(baddr + i, priv->tx_queue[i]->txic);
 431		}
 432
 433		baddr = &regs->rxic0;
 434		for_each_set_bit(i, &rx_mask, priv->num_rx_queues) {
 435			gfar_write(baddr + i, 0);
 436			if (likely(priv->rx_queue[i]->rxcoalescing))
 437				gfar_write(baddr + i, priv->rx_queue[i]->rxic);
 438		}
 439	} else {
 440		/* Backward compatible case -- even if we enable
 441		 * multiple queues, there's only single reg to program
 442		 */
 443		gfar_write(&regs->txic, 0);
 444		if (likely(priv->tx_queue[0]->txcoalescing))
 445			gfar_write(&regs->txic, priv->tx_queue[0]->txic);
 446
 447		gfar_write(&regs->rxic, 0);
 448		if (unlikely(priv->rx_queue[0]->rxcoalescing))
 449			gfar_write(&regs->rxic, priv->rx_queue[0]->rxic);
 450	}
 451}
 452
 453void gfar_configure_coalescing_all(struct gfar_private *priv)
 454{
 455	gfar_configure_coalescing(priv, 0xFF, 0xFF);
 456}
 457
 458static struct net_device_stats *gfar_get_stats(struct net_device *dev)
 459{
 460	struct gfar_private *priv = netdev_priv(dev);
 461	unsigned long rx_packets = 0, rx_bytes = 0, rx_dropped = 0;
 462	unsigned long tx_packets = 0, tx_bytes = 0;
 463	int i;
 464
 465	for (i = 0; i < priv->num_rx_queues; i++) {
 466		rx_packets += priv->rx_queue[i]->stats.rx_packets;
 467		rx_bytes   += priv->rx_queue[i]->stats.rx_bytes;
 468		rx_dropped += priv->rx_queue[i]->stats.rx_dropped;
 469	}
 470
 471	dev->stats.rx_packets = rx_packets;
 472	dev->stats.rx_bytes   = rx_bytes;
 473	dev->stats.rx_dropped = rx_dropped;
 474
 475	for (i = 0; i < priv->num_tx_queues; i++) {
 476		tx_bytes += priv->tx_queue[i]->stats.tx_bytes;
 477		tx_packets += priv->tx_queue[i]->stats.tx_packets;
 478	}
 479
 480	dev->stats.tx_bytes   = tx_bytes;
 481	dev->stats.tx_packets = tx_packets;
 482
 483	return &dev->stats;
 484}
 485
 486static int gfar_set_mac_addr(struct net_device *dev, void *p)
 487{
 488	eth_mac_addr(dev, p);
 489
 490	gfar_set_mac_for_addr(dev, 0, dev->dev_addr);
 491
 492	return 0;
 493}
 494
 495static const struct net_device_ops gfar_netdev_ops = {
 496	.ndo_open = gfar_enet_open,
 497	.ndo_start_xmit = gfar_start_xmit,
 498	.ndo_stop = gfar_close,
 499	.ndo_change_mtu = gfar_change_mtu,
 500	.ndo_set_features = gfar_set_features,
 501	.ndo_set_rx_mode = gfar_set_multi,
 502	.ndo_tx_timeout = gfar_timeout,
 503	.ndo_do_ioctl = gfar_ioctl,
 504	.ndo_get_stats = gfar_get_stats,
 505	.ndo_set_mac_address = gfar_set_mac_addr,
 506	.ndo_validate_addr = eth_validate_addr,
 507#ifdef CONFIG_NET_POLL_CONTROLLER
 508	.ndo_poll_controller = gfar_netpoll,
 509#endif
 510};
 511
 512static void gfar_ints_disable(struct gfar_private *priv)
 513{
 514	int i;
 515	for (i = 0; i < priv->num_grps; i++) {
 516		struct gfar __iomem *regs = priv->gfargrp[i].regs;
 517		/* Clear IEVENT */
 518		gfar_write(&regs->ievent, IEVENT_INIT_CLEAR);
 519
 520		/* Initialize IMASK */
 521		gfar_write(&regs->imask, IMASK_INIT_CLEAR);
 522	}
 523}
 524
 525static void gfar_ints_enable(struct gfar_private *priv)
 526{
 527	int i;
 528	for (i = 0; i < priv->num_grps; i++) {
 529		struct gfar __iomem *regs = priv->gfargrp[i].regs;
 530		/* Unmask the interrupts we look for */
 531		gfar_write(&regs->imask, IMASK_DEFAULT);
 532	}
 533}
 534
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 535static int gfar_alloc_tx_queues(struct gfar_private *priv)
 536{
 537	int i;
 538
 539	for (i = 0; i < priv->num_tx_queues; i++) {
 540		priv->tx_queue[i] = kzalloc(sizeof(struct gfar_priv_tx_q),
 541					    GFP_KERNEL);
 542		if (!priv->tx_queue[i])
 543			return -ENOMEM;
 544
 545		priv->tx_queue[i]->tx_skbuff = NULL;
 546		priv->tx_queue[i]->qindex = i;
 547		priv->tx_queue[i]->dev = priv->ndev;
 548		spin_lock_init(&(priv->tx_queue[i]->txlock));
 549	}
 550	return 0;
 551}
 552
 553static int gfar_alloc_rx_queues(struct gfar_private *priv)
 554{
 555	int i;
 556
 557	for (i = 0; i < priv->num_rx_queues; i++) {
 558		priv->rx_queue[i] = kzalloc(sizeof(struct gfar_priv_rx_q),
 559					    GFP_KERNEL);
 560		if (!priv->rx_queue[i])
 561			return -ENOMEM;
 562
 
 563		priv->rx_queue[i]->qindex = i;
 564		priv->rx_queue[i]->ndev = priv->ndev;
 565	}
 566	return 0;
 567}
 568
 569static void gfar_free_tx_queues(struct gfar_private *priv)
 570{
 571	int i;
 572
 573	for (i = 0; i < priv->num_tx_queues; i++)
 574		kfree(priv->tx_queue[i]);
 575}
 576
 577static void gfar_free_rx_queues(struct gfar_private *priv)
 578{
 579	int i;
 580
 581	for (i = 0; i < priv->num_rx_queues; i++)
 582		kfree(priv->rx_queue[i]);
 583}
 584
 585static void unmap_group_regs(struct gfar_private *priv)
 586{
 587	int i;
 588
 589	for (i = 0; i < MAXGROUPS; i++)
 590		if (priv->gfargrp[i].regs)
 591			iounmap(priv->gfargrp[i].regs);
 592}
 593
 594static void free_gfar_dev(struct gfar_private *priv)
 595{
 596	int i, j;
 597
 598	for (i = 0; i < priv->num_grps; i++)
 599		for (j = 0; j < GFAR_NUM_IRQS; j++) {
 600			kfree(priv->gfargrp[i].irqinfo[j]);
 601			priv->gfargrp[i].irqinfo[j] = NULL;
 602		}
 603
 604	free_netdev(priv->ndev);
 605}
 606
 607static void disable_napi(struct gfar_private *priv)
 608{
 609	int i;
 610
 611	for (i = 0; i < priv->num_grps; i++) {
 612		napi_disable(&priv->gfargrp[i].napi_rx);
 613		napi_disable(&priv->gfargrp[i].napi_tx);
 614	}
 615}
 616
 617static void enable_napi(struct gfar_private *priv)
 618{
 619	int i;
 620
 621	for (i = 0; i < priv->num_grps; i++) {
 622		napi_enable(&priv->gfargrp[i].napi_rx);
 623		napi_enable(&priv->gfargrp[i].napi_tx);
 624	}
 625}
 626
 627static int gfar_parse_group(struct device_node *np,
 628			    struct gfar_private *priv, const char *model)
 629{
 630	struct gfar_priv_grp *grp = &priv->gfargrp[priv->num_grps];
 631	int i;
 632
 633	for (i = 0; i < GFAR_NUM_IRQS; i++) {
 634		grp->irqinfo[i] = kzalloc(sizeof(struct gfar_irqinfo),
 635					  GFP_KERNEL);
 636		if (!grp->irqinfo[i])
 637			return -ENOMEM;
 638	}
 639
 640	grp->regs = of_iomap(np, 0);
 641	if (!grp->regs)
 642		return -ENOMEM;
 643
 644	gfar_irq(grp, TX)->irq = irq_of_parse_and_map(np, 0);
 645
 646	/* If we aren't the FEC we have multiple interrupts */
 647	if (model && strcasecmp(model, "FEC")) {
 648		gfar_irq(grp, RX)->irq = irq_of_parse_and_map(np, 1);
 649		gfar_irq(grp, ER)->irq = irq_of_parse_and_map(np, 2);
 650		if (!gfar_irq(grp, TX)->irq ||
 651		    !gfar_irq(grp, RX)->irq ||
 652		    !gfar_irq(grp, ER)->irq)
 653			return -EINVAL;
 654	}
 655
 656	grp->priv = priv;
 657	spin_lock_init(&grp->grplock);
 658	if (priv->mode == MQ_MG_MODE) {
 659		u32 rxq_mask, txq_mask;
 660		int ret;
 661
 662		grp->rx_bit_map = (DEFAULT_MAPPING >> priv->num_grps);
 663		grp->tx_bit_map = (DEFAULT_MAPPING >> priv->num_grps);
 664
 665		ret = of_property_read_u32(np, "fsl,rx-bit-map", &rxq_mask);
 666		if (!ret) {
 667			grp->rx_bit_map = rxq_mask ?
 668			rxq_mask : (DEFAULT_MAPPING >> priv->num_grps);
 669		}
 670
 671		ret = of_property_read_u32(np, "fsl,tx-bit-map", &txq_mask);
 672		if (!ret) {
 673			grp->tx_bit_map = txq_mask ?
 674			txq_mask : (DEFAULT_MAPPING >> priv->num_grps);
 675		}
 676
 677		if (priv->poll_mode == GFAR_SQ_POLLING) {
 678			/* One Q per interrupt group: Q0 to G0, Q1 to G1 */
 679			grp->rx_bit_map = (DEFAULT_MAPPING >> priv->num_grps);
 680			grp->tx_bit_map = (DEFAULT_MAPPING >> priv->num_grps);
 
 
 
 
 
 681		}
 682	} else {
 683		grp->rx_bit_map = 0xFF;
 684		grp->tx_bit_map = 0xFF;
 685	}
 686
 687	/* bit_map's MSB is q0 (from q0 to q7) but, for_each_set_bit parses
 688	 * right to left, so we need to revert the 8 bits to get the q index
 689	 */
 690	grp->rx_bit_map = bitrev8(grp->rx_bit_map);
 691	grp->tx_bit_map = bitrev8(grp->tx_bit_map);
 692
 693	/* Calculate RSTAT, TSTAT, RQUEUE and TQUEUE values,
 694	 * also assign queues to groups
 695	 */
 696	for_each_set_bit(i, &grp->rx_bit_map, priv->num_rx_queues) {
 697		if (!grp->rx_queue)
 698			grp->rx_queue = priv->rx_queue[i];
 699		grp->num_rx_queues++;
 700		grp->rstat |= (RSTAT_CLEAR_RHALT >> i);
 701		priv->rqueue |= ((RQUEUE_EN0 | RQUEUE_EX0) >> i);
 702		priv->rx_queue[i]->grp = grp;
 703	}
 704
 705	for_each_set_bit(i, &grp->tx_bit_map, priv->num_tx_queues) {
 706		if (!grp->tx_queue)
 707			grp->tx_queue = priv->tx_queue[i];
 708		grp->num_tx_queues++;
 709		grp->tstat |= (TSTAT_CLEAR_THALT >> i);
 710		priv->tqueue |= (TQUEUE_EN0 >> i);
 711		priv->tx_queue[i]->grp = grp;
 712	}
 713
 714	priv->num_grps++;
 715
 716	return 0;
 717}
 718
 719static int gfar_of_group_count(struct device_node *np)
 720{
 721	struct device_node *child;
 722	int num = 0;
 723
 724	for_each_available_child_of_node(np, child)
 725		if (!of_node_cmp(child->name, "queue-group"))
 726			num++;
 727
 728	return num;
 729}
 730
 731static int gfar_of_init(struct platform_device *ofdev, struct net_device **pdev)
 732{
 733	const char *model;
 734	const char *ctype;
 735	const void *mac_addr;
 736	int err = 0, i;
 737	struct net_device *dev = NULL;
 738	struct gfar_private *priv = NULL;
 739	struct device_node *np = ofdev->dev.of_node;
 740	struct device_node *child = NULL;
 741	u32 stash_len = 0;
 742	u32 stash_idx = 0;
 
 743	unsigned int num_tx_qs, num_rx_qs;
 
 744	unsigned short mode, poll_mode;
 745
 746	if (!np)
 747		return -ENODEV;
 748
 749	if (of_device_is_compatible(np, "fsl,etsec2")) {
 750		mode = MQ_MG_MODE;
 751		poll_mode = GFAR_SQ_POLLING;
 752	} else {
 753		mode = SQ_SG_MODE;
 754		poll_mode = GFAR_SQ_POLLING;
 755	}
 756
 
 
 
 
 757	if (mode == SQ_SG_MODE) {
 758		num_tx_qs = 1;
 759		num_rx_qs = 1;
 760	} else { /* MQ_MG_MODE */
 761		/* get the actual number of supported groups */
 762		unsigned int num_grps = gfar_of_group_count(np);
 763
 764		if (num_grps == 0 || num_grps > MAXGROUPS) {
 765			dev_err(&ofdev->dev, "Invalid # of int groups(%d)\n",
 766				num_grps);
 767			pr_err("Cannot do alloc_etherdev, aborting\n");
 768			return -EINVAL;
 769		}
 770
 771		if (poll_mode == GFAR_SQ_POLLING) {
 772			num_tx_qs = num_grps; /* one txq per int group */
 773			num_rx_qs = num_grps; /* one rxq per int group */
 774		} else { /* GFAR_MQ_POLLING */
 775			u32 tx_queues, rx_queues;
 776			int ret;
 777
 778			/* parse the num of HW tx and rx queues */
 779			ret = of_property_read_u32(np, "fsl,num_tx_queues",
 780						   &tx_queues);
 781			num_tx_qs = ret ? 1 : tx_queues;
 782
 783			ret = of_property_read_u32(np, "fsl,num_rx_queues",
 784						   &rx_queues);
 785			num_rx_qs = ret ? 1 : rx_queues;
 786		}
 787	}
 788
 789	if (num_tx_qs > MAX_TX_QS) {
 790		pr_err("num_tx_qs(=%d) greater than MAX_TX_QS(=%d)\n",
 791		       num_tx_qs, MAX_TX_QS);
 792		pr_err("Cannot do alloc_etherdev, aborting\n");
 793		return -EINVAL;
 794	}
 795
 796	if (num_rx_qs > MAX_RX_QS) {
 797		pr_err("num_rx_qs(=%d) greater than MAX_RX_QS(=%d)\n",
 798		       num_rx_qs, MAX_RX_QS);
 799		pr_err("Cannot do alloc_etherdev, aborting\n");
 800		return -EINVAL;
 801	}
 802
 803	*pdev = alloc_etherdev_mq(sizeof(*priv), num_tx_qs);
 804	dev = *pdev;
 805	if (NULL == dev)
 806		return -ENOMEM;
 807
 808	priv = netdev_priv(dev);
 809	priv->ndev = dev;
 810
 811	priv->mode = mode;
 812	priv->poll_mode = poll_mode;
 813
 814	priv->num_tx_queues = num_tx_qs;
 815	netif_set_real_num_rx_queues(dev, num_rx_qs);
 816	priv->num_rx_queues = num_rx_qs;
 817
 818	err = gfar_alloc_tx_queues(priv);
 819	if (err)
 820		goto tx_alloc_failed;
 821
 822	err = gfar_alloc_rx_queues(priv);
 823	if (err)
 824		goto rx_alloc_failed;
 825
 826	err = of_property_read_string(np, "model", &model);
 827	if (err) {
 828		pr_err("Device model property missing, aborting\n");
 829		goto rx_alloc_failed;
 830	}
 831
 832	/* Init Rx queue filer rule set linked list */
 833	INIT_LIST_HEAD(&priv->rx_list.list);
 834	priv->rx_list.count = 0;
 835	mutex_init(&priv->rx_queue_access);
 836
 
 
 837	for (i = 0; i < MAXGROUPS; i++)
 838		priv->gfargrp[i].regs = NULL;
 839
 840	/* Parse and initialize group specific information */
 841	if (priv->mode == MQ_MG_MODE) {
 842		for_each_available_child_of_node(np, child) {
 843			if (of_node_cmp(child->name, "queue-group"))
 844				continue;
 845
 846			err = gfar_parse_group(child, priv, model);
 847			if (err)
 848				goto err_grp_init;
 849		}
 850	} else { /* SQ_SG_MODE */
 851		err = gfar_parse_group(np, priv, model);
 852		if (err)
 853			goto err_grp_init;
 854	}
 855
 856	if (of_property_read_bool(np, "bd-stash")) {
 
 
 857		priv->device_flags |= FSL_GIANFAR_DEV_HAS_BD_STASHING;
 858		priv->bd_stash_en = 1;
 859	}
 860
 861	err = of_property_read_u32(np, "rx-stash-len", &stash_len);
 862
 863	if (err == 0)
 864		priv->rx_stash_size = stash_len;
 865
 866	err = of_property_read_u32(np, "rx-stash-idx", &stash_idx);
 867
 868	if (err == 0)
 869		priv->rx_stash_index = stash_idx;
 870
 871	if (stash_len || stash_idx)
 872		priv->device_flags |= FSL_GIANFAR_DEV_HAS_BUF_STASHING;
 873
 874	mac_addr = of_get_mac_address(np);
 875
 876	if (mac_addr)
 877		memcpy(dev->dev_addr, mac_addr, ETH_ALEN);
 878
 879	if (model && !strcasecmp(model, "TSEC"))
 880		priv->device_flags |= FSL_GIANFAR_DEV_HAS_GIGABIT |
 881				     FSL_GIANFAR_DEV_HAS_COALESCE |
 882				     FSL_GIANFAR_DEV_HAS_RMON |
 883				     FSL_GIANFAR_DEV_HAS_MULTI_INTR;
 884
 885	if (model && !strcasecmp(model, "eTSEC"))
 886		priv->device_flags |= FSL_GIANFAR_DEV_HAS_GIGABIT |
 887				     FSL_GIANFAR_DEV_HAS_COALESCE |
 888				     FSL_GIANFAR_DEV_HAS_RMON |
 889				     FSL_GIANFAR_DEV_HAS_MULTI_INTR |
 890				     FSL_GIANFAR_DEV_HAS_CSUM |
 891				     FSL_GIANFAR_DEV_HAS_VLAN |
 892				     FSL_GIANFAR_DEV_HAS_MAGIC_PACKET |
 893				     FSL_GIANFAR_DEV_HAS_EXTENDED_HASH |
 894				     FSL_GIANFAR_DEV_HAS_TIMER |
 895				     FSL_GIANFAR_DEV_HAS_RX_FILER;
 896
 897	err = of_property_read_string(np, "phy-connection-type", &ctype);
 898
 899	/* We only care about rgmii-id.  The rest are autodetected */
 900	if (err == 0 && !strcmp(ctype, "rgmii-id"))
 901		priv->interface = PHY_INTERFACE_MODE_RGMII_ID;
 902	else
 903		priv->interface = PHY_INTERFACE_MODE_MII;
 904
 905	if (of_find_property(np, "fsl,magic-packet", NULL))
 906		priv->device_flags |= FSL_GIANFAR_DEV_HAS_MAGIC_PACKET;
 907
 908	if (of_get_property(np, "fsl,wake-on-filer", NULL))
 909		priv->device_flags |= FSL_GIANFAR_DEV_HAS_WAKE_ON_FILER;
 910
 911	priv->phy_node = of_parse_phandle(np, "phy-handle", 0);
 912
 913	/* In the case of a fixed PHY, the DT node associated
 914	 * to the PHY is the Ethernet MAC DT node.
 915	 */
 916	if (!priv->phy_node && of_phy_is_fixed_link(np)) {
 917		err = of_phy_register_fixed_link(np);
 918		if (err)
 919			goto err_grp_init;
 920
 921		priv->phy_node = of_node_get(np);
 922	}
 923
 924	/* Find the TBI PHY.  If it's not there, we don't support SGMII */
 925	priv->tbi_node = of_parse_phandle(np, "tbi-handle", 0);
 926
 927	return 0;
 928
 929err_grp_init:
 930	unmap_group_regs(priv);
 931rx_alloc_failed:
 932	gfar_free_rx_queues(priv);
 933tx_alloc_failed:
 934	gfar_free_tx_queues(priv);
 935	free_gfar_dev(priv);
 936	return err;
 937}
 938
 939static int gfar_hwtstamp_set(struct net_device *netdev, struct ifreq *ifr)
 940{
 941	struct hwtstamp_config config;
 942	struct gfar_private *priv = netdev_priv(netdev);
 943
 944	if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
 945		return -EFAULT;
 946
 947	/* reserved for future extensions */
 948	if (config.flags)
 949		return -EINVAL;
 950
 951	switch (config.tx_type) {
 952	case HWTSTAMP_TX_OFF:
 953		priv->hwts_tx_en = 0;
 954		break;
 955	case HWTSTAMP_TX_ON:
 956		if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER))
 957			return -ERANGE;
 958		priv->hwts_tx_en = 1;
 959		break;
 960	default:
 961		return -ERANGE;
 962	}
 963
 964	switch (config.rx_filter) {
 965	case HWTSTAMP_FILTER_NONE:
 966		if (priv->hwts_rx_en) {
 967			priv->hwts_rx_en = 0;
 968			reset_gfar(netdev);
 969		}
 970		break;
 971	default:
 972		if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER))
 973			return -ERANGE;
 974		if (!priv->hwts_rx_en) {
 975			priv->hwts_rx_en = 1;
 976			reset_gfar(netdev);
 977		}
 978		config.rx_filter = HWTSTAMP_FILTER_ALL;
 979		break;
 980	}
 981
 982	return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ?
 983		-EFAULT : 0;
 984}
 985
 986static int gfar_hwtstamp_get(struct net_device *netdev, struct ifreq *ifr)
 987{
 988	struct hwtstamp_config config;
 989	struct gfar_private *priv = netdev_priv(netdev);
 990
 991	config.flags = 0;
 992	config.tx_type = priv->hwts_tx_en ? HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF;
 993	config.rx_filter = (priv->hwts_rx_en ?
 994			    HWTSTAMP_FILTER_ALL : HWTSTAMP_FILTER_NONE);
 995
 996	return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ?
 997		-EFAULT : 0;
 998}
 999
1000static int gfar_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
1001{
1002	struct phy_device *phydev = dev->phydev;
1003
1004	if (!netif_running(dev))
1005		return -EINVAL;
1006
1007	if (cmd == SIOCSHWTSTAMP)
1008		return gfar_hwtstamp_set(dev, rq);
1009	if (cmd == SIOCGHWTSTAMP)
1010		return gfar_hwtstamp_get(dev, rq);
1011
1012	if (!phydev)
1013		return -ENODEV;
1014
1015	return phy_mii_ioctl(phydev, rq, cmd);
1016}
1017
1018static u32 cluster_entry_per_class(struct gfar_private *priv, u32 rqfar,
1019				   u32 class)
1020{
1021	u32 rqfpr = FPR_FILER_MASK;
1022	u32 rqfcr = 0x0;
1023
1024	rqfar--;
1025	rqfcr = RQFCR_CLE | RQFCR_PID_MASK | RQFCR_CMP_EXACT;
1026	priv->ftp_rqfpr[rqfar] = rqfpr;
1027	priv->ftp_rqfcr[rqfar] = rqfcr;
1028	gfar_write_filer(priv, rqfar, rqfcr, rqfpr);
1029
1030	rqfar--;
1031	rqfcr = RQFCR_CMP_NOMATCH;
1032	priv->ftp_rqfpr[rqfar] = rqfpr;
1033	priv->ftp_rqfcr[rqfar] = rqfcr;
1034	gfar_write_filer(priv, rqfar, rqfcr, rqfpr);
1035
1036	rqfar--;
1037	rqfcr = RQFCR_CMP_EXACT | RQFCR_PID_PARSE | RQFCR_CLE | RQFCR_AND;
1038	rqfpr = class;
1039	priv->ftp_rqfcr[rqfar] = rqfcr;
1040	priv->ftp_rqfpr[rqfar] = rqfpr;
1041	gfar_write_filer(priv, rqfar, rqfcr, rqfpr);
1042
1043	rqfar--;
1044	rqfcr = RQFCR_CMP_EXACT | RQFCR_PID_MASK | RQFCR_AND;
1045	rqfpr = class;
1046	priv->ftp_rqfcr[rqfar] = rqfcr;
1047	priv->ftp_rqfpr[rqfar] = rqfpr;
1048	gfar_write_filer(priv, rqfar, rqfcr, rqfpr);
1049
1050	return rqfar;
1051}
1052
1053static void gfar_init_filer_table(struct gfar_private *priv)
1054{
1055	int i = 0x0;
1056	u32 rqfar = MAX_FILER_IDX;
1057	u32 rqfcr = 0x0;
1058	u32 rqfpr = FPR_FILER_MASK;
1059
1060	/* Default rule */
1061	rqfcr = RQFCR_CMP_MATCH;
1062	priv->ftp_rqfcr[rqfar] = rqfcr;
1063	priv->ftp_rqfpr[rqfar] = rqfpr;
1064	gfar_write_filer(priv, rqfar, rqfcr, rqfpr);
1065
1066	rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV6);
1067	rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV6 | RQFPR_UDP);
1068	rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV6 | RQFPR_TCP);
1069	rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV4);
1070	rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV4 | RQFPR_UDP);
1071	rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV4 | RQFPR_TCP);
1072
1073	/* cur_filer_idx indicated the first non-masked rule */
1074	priv->cur_filer_idx = rqfar;
1075
1076	/* Rest are masked rules */
1077	rqfcr = RQFCR_CMP_NOMATCH;
1078	for (i = 0; i < rqfar; i++) {
1079		priv->ftp_rqfcr[i] = rqfcr;
1080		priv->ftp_rqfpr[i] = rqfpr;
1081		gfar_write_filer(priv, i, rqfcr, rqfpr);
1082	}
1083}
1084
1085#ifdef CONFIG_PPC
1086static void __gfar_detect_errata_83xx(struct gfar_private *priv)
1087{
1088	unsigned int pvr = mfspr(SPRN_PVR);
1089	unsigned int svr = mfspr(SPRN_SVR);
1090	unsigned int mod = (svr >> 16) & 0xfff6; /* w/o E suffix */
1091	unsigned int rev = svr & 0xffff;
1092
1093	/* MPC8313 Rev 2.0 and higher; All MPC837x */
1094	if ((pvr == 0x80850010 && mod == 0x80b0 && rev >= 0x0020) ||
1095	    (pvr == 0x80861010 && (mod & 0xfff9) == 0x80c0))
1096		priv->errata |= GFAR_ERRATA_74;
1097
1098	/* MPC8313 and MPC837x all rev */
1099	if ((pvr == 0x80850010 && mod == 0x80b0) ||
1100	    (pvr == 0x80861010 && (mod & 0xfff9) == 0x80c0))
1101		priv->errata |= GFAR_ERRATA_76;
1102
1103	/* MPC8313 Rev < 2.0 */
1104	if (pvr == 0x80850010 && mod == 0x80b0 && rev < 0x0020)
1105		priv->errata |= GFAR_ERRATA_12;
1106}
1107
1108static void __gfar_detect_errata_85xx(struct gfar_private *priv)
1109{
1110	unsigned int svr = mfspr(SPRN_SVR);
1111
1112	if ((SVR_SOC_VER(svr) == SVR_8548) && (SVR_REV(svr) == 0x20))
1113		priv->errata |= GFAR_ERRATA_12;
1114	/* P2020/P1010 Rev 1; MPC8548 Rev 2 */
1115	if (((SVR_SOC_VER(svr) == SVR_P2020) && (SVR_REV(svr) < 0x20)) ||
1116	    ((SVR_SOC_VER(svr) == SVR_P2010) && (SVR_REV(svr) < 0x20)) ||
1117	    ((SVR_SOC_VER(svr) == SVR_8548) && (SVR_REV(svr) < 0x31)))
1118		priv->errata |= GFAR_ERRATA_76; /* aka eTSEC 20 */
1119}
1120#endif
1121
1122static void gfar_detect_errata(struct gfar_private *priv)
1123{
1124	struct device *dev = &priv->ofdev->dev;
1125
1126	/* no plans to fix */
1127	priv->errata |= GFAR_ERRATA_A002;
1128
1129#ifdef CONFIG_PPC
1130	if (pvr_version_is(PVR_VER_E500V1) || pvr_version_is(PVR_VER_E500V2))
1131		__gfar_detect_errata_85xx(priv);
1132	else /* non-mpc85xx parts, i.e. e300 core based */
1133		__gfar_detect_errata_83xx(priv);
1134#endif
1135
1136	if (priv->errata)
1137		dev_info(dev, "enabled errata workarounds, flags: 0x%x\n",
1138			 priv->errata);
1139}
1140
1141void gfar_mac_reset(struct gfar_private *priv)
1142{
1143	struct gfar __iomem *regs = priv->gfargrp[0].regs;
1144	u32 tempval;
1145
1146	/* Reset MAC layer */
1147	gfar_write(&regs->maccfg1, MACCFG1_SOFT_RESET);
1148
1149	/* We need to delay at least 3 TX clocks */
1150	udelay(3);
1151
1152	/* the soft reset bit is not self-resetting, so we need to
1153	 * clear it before resuming normal operation
1154	 */
1155	gfar_write(&regs->maccfg1, 0);
1156
1157	udelay(3);
1158
1159	gfar_rx_offload_en(priv);
 
1160
1161	/* Initialize the max receive frame/buffer lengths */
1162	gfar_write(&regs->maxfrm, GFAR_JUMBO_FRAME_SIZE);
1163	gfar_write(&regs->mrblr, GFAR_RXB_SIZE);
1164
1165	/* Initialize the Minimum Frame Length Register */
1166	gfar_write(&regs->minflr, MINFLR_INIT_SETTINGS);
1167
1168	/* Initialize MACCFG2. */
1169	tempval = MACCFG2_INIT_SETTINGS;
1170
1171	/* eTSEC74 erratum: Rx frames of length MAXFRM or MAXFRM-1
1172	 * are marked as truncated.  Avoid this by MACCFG2[Huge Frame]=1,
1173	 * and by checking RxBD[LG] and discarding larger than MAXFRM.
1174	 */
1175	if (gfar_has_errata(priv, GFAR_ERRATA_74))
 
1176		tempval |= MACCFG2_HUGEFRAME | MACCFG2_LENGTHCHECK;
1177
1178	gfar_write(&regs->maccfg2, tempval);
1179
1180	/* Clear mac addr hash registers */
1181	gfar_write(&regs->igaddr0, 0);
1182	gfar_write(&regs->igaddr1, 0);
1183	gfar_write(&regs->igaddr2, 0);
1184	gfar_write(&regs->igaddr3, 0);
1185	gfar_write(&regs->igaddr4, 0);
1186	gfar_write(&regs->igaddr5, 0);
1187	gfar_write(&regs->igaddr6, 0);
1188	gfar_write(&regs->igaddr7, 0);
1189
1190	gfar_write(&regs->gaddr0, 0);
1191	gfar_write(&regs->gaddr1, 0);
1192	gfar_write(&regs->gaddr2, 0);
1193	gfar_write(&regs->gaddr3, 0);
1194	gfar_write(&regs->gaddr4, 0);
1195	gfar_write(&regs->gaddr5, 0);
1196	gfar_write(&regs->gaddr6, 0);
1197	gfar_write(&regs->gaddr7, 0);
1198
1199	if (priv->extended_hash)
1200		gfar_clear_exact_match(priv->ndev);
1201
1202	gfar_mac_rx_config(priv);
1203
1204	gfar_mac_tx_config(priv);
1205
1206	gfar_set_mac_address(priv->ndev);
1207
1208	gfar_set_multi(priv->ndev);
1209
1210	/* clear ievent and imask before configuring coalescing */
1211	gfar_ints_disable(priv);
1212
1213	/* Configure the coalescing support */
1214	gfar_configure_coalescing_all(priv);
1215}
1216
1217static void gfar_hw_init(struct gfar_private *priv)
1218{
1219	struct gfar __iomem *regs = priv->gfargrp[0].regs;
1220	u32 attrs;
1221
1222	/* Stop the DMA engine now, in case it was running before
1223	 * (The firmware could have used it, and left it running).
1224	 */
1225	gfar_halt(priv);
1226
1227	gfar_mac_reset(priv);
1228
1229	/* Zero out the rmon mib registers if it has them */
1230	if (priv->device_flags & FSL_GIANFAR_DEV_HAS_RMON) {
1231		memset_io(&(regs->rmon), 0, sizeof(struct rmon_mib));
1232
1233		/* Mask off the CAM interrupts */
1234		gfar_write(&regs->rmon.cam1, 0xffffffff);
1235		gfar_write(&regs->rmon.cam2, 0xffffffff);
1236	}
1237
1238	/* Initialize ECNTRL */
1239	gfar_write(&regs->ecntrl, ECNTRL_INIT_SETTINGS);
1240
1241	/* Set the extraction length and index */
1242	attrs = ATTRELI_EL(priv->rx_stash_size) |
1243		ATTRELI_EI(priv->rx_stash_index);
1244
1245	gfar_write(&regs->attreli, attrs);
1246
1247	/* Start with defaults, and add stashing
1248	 * depending on driver parameters
1249	 */
1250	attrs = ATTR_INIT_SETTINGS;
1251
1252	if (priv->bd_stash_en)
1253		attrs |= ATTR_BDSTASH;
1254
1255	if (priv->rx_stash_size != 0)
1256		attrs |= ATTR_BUFSTASH;
1257
1258	gfar_write(&regs->attr, attrs);
1259
1260	/* FIFO configs */
1261	gfar_write(&regs->fifo_tx_thr, DEFAULT_FIFO_TX_THR);
1262	gfar_write(&regs->fifo_tx_starve, DEFAULT_FIFO_TX_STARVE);
1263	gfar_write(&regs->fifo_tx_starve_shutoff, DEFAULT_FIFO_TX_STARVE_OFF);
1264
1265	/* Program the interrupt steering regs, only for MG devices */
1266	if (priv->num_grps > 1)
1267		gfar_write_isrg(priv);
1268}
1269
1270static void gfar_init_addr_hash_table(struct gfar_private *priv)
1271{
1272	struct gfar __iomem *regs = priv->gfargrp[0].regs;
1273
1274	if (priv->device_flags & FSL_GIANFAR_DEV_HAS_EXTENDED_HASH) {
1275		priv->extended_hash = 1;
1276		priv->hash_width = 9;
1277
1278		priv->hash_regs[0] = &regs->igaddr0;
1279		priv->hash_regs[1] = &regs->igaddr1;
1280		priv->hash_regs[2] = &regs->igaddr2;
1281		priv->hash_regs[3] = &regs->igaddr3;
1282		priv->hash_regs[4] = &regs->igaddr4;
1283		priv->hash_regs[5] = &regs->igaddr5;
1284		priv->hash_regs[6] = &regs->igaddr6;
1285		priv->hash_regs[7] = &regs->igaddr7;
1286		priv->hash_regs[8] = &regs->gaddr0;
1287		priv->hash_regs[9] = &regs->gaddr1;
1288		priv->hash_regs[10] = &regs->gaddr2;
1289		priv->hash_regs[11] = &regs->gaddr3;
1290		priv->hash_regs[12] = &regs->gaddr4;
1291		priv->hash_regs[13] = &regs->gaddr5;
1292		priv->hash_regs[14] = &regs->gaddr6;
1293		priv->hash_regs[15] = &regs->gaddr7;
1294
1295	} else {
1296		priv->extended_hash = 0;
1297		priv->hash_width = 8;
1298
1299		priv->hash_regs[0] = &regs->gaddr0;
1300		priv->hash_regs[1] = &regs->gaddr1;
1301		priv->hash_regs[2] = &regs->gaddr2;
1302		priv->hash_regs[3] = &regs->gaddr3;
1303		priv->hash_regs[4] = &regs->gaddr4;
1304		priv->hash_regs[5] = &regs->gaddr5;
1305		priv->hash_regs[6] = &regs->gaddr6;
1306		priv->hash_regs[7] = &regs->gaddr7;
1307	}
1308}
1309
1310/* Set up the ethernet device structure, private data,
1311 * and anything else we need before we start
1312 */
1313static int gfar_probe(struct platform_device *ofdev)
1314{
1315	struct device_node *np = ofdev->dev.of_node;
1316	struct net_device *dev = NULL;
1317	struct gfar_private *priv = NULL;
1318	int err = 0, i;
1319
1320	err = gfar_of_init(ofdev, &dev);
1321
1322	if (err)
1323		return err;
1324
1325	priv = netdev_priv(dev);
1326	priv->ndev = dev;
1327	priv->ofdev = ofdev;
1328	priv->dev = &ofdev->dev;
1329	SET_NETDEV_DEV(dev, &ofdev->dev);
1330
 
1331	INIT_WORK(&priv->reset_task, gfar_reset_task);
1332
1333	platform_set_drvdata(ofdev, priv);
1334
1335	gfar_detect_errata(priv);
1336
1337	/* Set the dev->base_addr to the gfar reg region */
1338	dev->base_addr = (unsigned long) priv->gfargrp[0].regs;
1339
1340	/* Fill in the dev structure */
1341	dev->watchdog_timeo = TX_TIMEOUT;
1342	/* MTU range: 50 - 9586 */
1343	dev->mtu = 1500;
1344	dev->min_mtu = 50;
1345	dev->max_mtu = GFAR_JUMBO_FRAME_SIZE - ETH_HLEN;
1346	dev->netdev_ops = &gfar_netdev_ops;
1347	dev->ethtool_ops = &gfar_ethtool_ops;
1348
1349	/* Register for napi ...We are registering NAPI for each grp */
1350	for (i = 0; i < priv->num_grps; i++) {
1351		if (priv->poll_mode == GFAR_SQ_POLLING) {
1352			netif_napi_add(dev, &priv->gfargrp[i].napi_rx,
1353				       gfar_poll_rx_sq, GFAR_DEV_WEIGHT);
1354			netif_tx_napi_add(dev, &priv->gfargrp[i].napi_tx,
1355				       gfar_poll_tx_sq, 2);
1356		} else {
1357			netif_napi_add(dev, &priv->gfargrp[i].napi_rx,
1358				       gfar_poll_rx, GFAR_DEV_WEIGHT);
1359			netif_tx_napi_add(dev, &priv->gfargrp[i].napi_tx,
1360				       gfar_poll_tx, 2);
1361		}
1362	}
1363
1364	if (priv->device_flags & FSL_GIANFAR_DEV_HAS_CSUM) {
1365		dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_SG |
1366				   NETIF_F_RXCSUM;
1367		dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG |
1368				 NETIF_F_RXCSUM | NETIF_F_HIGHDMA;
1369	}
1370
1371	if (priv->device_flags & FSL_GIANFAR_DEV_HAS_VLAN) {
1372		dev->hw_features |= NETIF_F_HW_VLAN_CTAG_TX |
1373				    NETIF_F_HW_VLAN_CTAG_RX;
1374		dev->features |= NETIF_F_HW_VLAN_CTAG_RX;
1375	}
1376
1377	dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
1378
1379	gfar_init_addr_hash_table(priv);
1380
1381	/* Insert receive time stamps into padding alignment bytes, and
1382	 * plus 2 bytes padding to ensure the cpu alignment.
1383	 */
1384	if (priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER)
1385		priv->padding = 8 + DEFAULT_PADDING;
1386
1387	if (dev->features & NETIF_F_IP_CSUM ||
1388	    priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER)
1389		dev->needed_headroom = GMAC_FCB_LEN;
1390
 
 
1391	/* Initializing some of the rx/tx queue level parameters */
1392	for (i = 0; i < priv->num_tx_queues; i++) {
1393		priv->tx_queue[i]->tx_ring_size = DEFAULT_TX_RING_SIZE;
1394		priv->tx_queue[i]->num_txbdfree = DEFAULT_TX_RING_SIZE;
1395		priv->tx_queue[i]->txcoalescing = DEFAULT_TX_COALESCE;
1396		priv->tx_queue[i]->txic = DEFAULT_TXIC;
1397	}
1398
1399	for (i = 0; i < priv->num_rx_queues; i++) {
1400		priv->rx_queue[i]->rx_ring_size = DEFAULT_RX_RING_SIZE;
1401		priv->rx_queue[i]->rxcoalescing = DEFAULT_RX_COALESCE;
1402		priv->rx_queue[i]->rxic = DEFAULT_RXIC;
1403	}
1404
1405	/* Always enable rx filer if available */
1406	priv->rx_filer_enable =
1407	    (priv->device_flags & FSL_GIANFAR_DEV_HAS_RX_FILER) ? 1 : 0;
1408	/* Enable most messages by default */
1409	priv->msg_enable = (NETIF_MSG_IFUP << 1 ) - 1;
1410	/* use pritority h/w tx queue scheduling for single queue devices */
1411	if (priv->num_tx_queues == 1)
1412		priv->prio_sched_en = 1;
1413
1414	set_bit(GFAR_DOWN, &priv->state);
1415
1416	gfar_hw_init(priv);
1417
1418	/* Carrier starts down, phylib will bring it up */
1419	netif_carrier_off(dev);
1420
1421	err = register_netdev(dev);
1422
1423	if (err) {
1424		pr_err("%s: Cannot register net device, aborting\n", dev->name);
1425		goto register_fail;
1426	}
1427
1428	if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MAGIC_PACKET)
1429		priv->wol_supported |= GFAR_WOL_MAGIC;
1430
1431	if ((priv->device_flags & FSL_GIANFAR_DEV_HAS_WAKE_ON_FILER) &&
1432	    priv->rx_filer_enable)
1433		priv->wol_supported |= GFAR_WOL_FILER_UCAST;
1434
1435	device_set_wakeup_capable(&ofdev->dev, priv->wol_supported);
1436
1437	/* fill out IRQ number and name fields */
1438	for (i = 0; i < priv->num_grps; i++) {
1439		struct gfar_priv_grp *grp = &priv->gfargrp[i];
1440		if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) {
1441			sprintf(gfar_irq(grp, TX)->name, "%s%s%c%s",
1442				dev->name, "_g", '0' + i, "_tx");
1443			sprintf(gfar_irq(grp, RX)->name, "%s%s%c%s",
1444				dev->name, "_g", '0' + i, "_rx");
1445			sprintf(gfar_irq(grp, ER)->name, "%s%s%c%s",
1446				dev->name, "_g", '0' + i, "_er");
1447		} else
1448			strcpy(gfar_irq(grp, TX)->name, dev->name);
1449	}
1450
1451	/* Initialize the filer table */
1452	gfar_init_filer_table(priv);
1453
1454	/* Print out the device info */
1455	netdev_info(dev, "mac: %pM\n", dev->dev_addr);
1456
1457	/* Even more device info helps when determining which kernel
1458	 * provided which set of benchmarks.
1459	 */
1460	netdev_info(dev, "Running with NAPI enabled\n");
1461	for (i = 0; i < priv->num_rx_queues; i++)
1462		netdev_info(dev, "RX BD ring size for Q[%d]: %d\n",
1463			    i, priv->rx_queue[i]->rx_ring_size);
1464	for (i = 0; i < priv->num_tx_queues; i++)
1465		netdev_info(dev, "TX BD ring size for Q[%d]: %d\n",
1466			    i, priv->tx_queue[i]->tx_ring_size);
1467
1468	return 0;
1469
1470register_fail:
1471	if (of_phy_is_fixed_link(np))
1472		of_phy_deregister_fixed_link(np);
1473	unmap_group_regs(priv);
1474	gfar_free_rx_queues(priv);
1475	gfar_free_tx_queues(priv);
1476	of_node_put(priv->phy_node);
1477	of_node_put(priv->tbi_node);
 
 
1478	free_gfar_dev(priv);
1479	return err;
1480}
1481
1482static int gfar_remove(struct platform_device *ofdev)
1483{
1484	struct gfar_private *priv = platform_get_drvdata(ofdev);
1485	struct device_node *np = ofdev->dev.of_node;
1486
1487	of_node_put(priv->phy_node);
1488	of_node_put(priv->tbi_node);
 
 
1489
1490	unregister_netdev(priv->ndev);
1491
1492	if (of_phy_is_fixed_link(np))
1493		of_phy_deregister_fixed_link(np);
1494
1495	unmap_group_regs(priv);
1496	gfar_free_rx_queues(priv);
1497	gfar_free_tx_queues(priv);
1498	free_gfar_dev(priv);
1499
1500	return 0;
1501}
1502
1503#ifdef CONFIG_PM
1504
1505static void __gfar_filer_disable(struct gfar_private *priv)
1506{
1507	struct gfar __iomem *regs = priv->gfargrp[0].regs;
1508	u32 temp;
1509
1510	temp = gfar_read(&regs->rctrl);
1511	temp &= ~(RCTRL_FILREN | RCTRL_PRSDEP_INIT);
1512	gfar_write(&regs->rctrl, temp);
1513}
1514
1515static void __gfar_filer_enable(struct gfar_private *priv)
1516{
1517	struct gfar __iomem *regs = priv->gfargrp[0].regs;
1518	u32 temp;
1519
1520	temp = gfar_read(&regs->rctrl);
1521	temp |= RCTRL_FILREN | RCTRL_PRSDEP_INIT;
1522	gfar_write(&regs->rctrl, temp);
1523}
1524
1525/* Filer rules implementing wol capabilities */
1526static void gfar_filer_config_wol(struct gfar_private *priv)
1527{
1528	unsigned int i;
1529	u32 rqfcr;
1530
1531	__gfar_filer_disable(priv);
1532
1533	/* clear the filer table, reject any packet by default */
1534	rqfcr = RQFCR_RJE | RQFCR_CMP_MATCH;
1535	for (i = 0; i <= MAX_FILER_IDX; i++)
1536		gfar_write_filer(priv, i, rqfcr, 0);
1537
1538	i = 0;
1539	if (priv->wol_opts & GFAR_WOL_FILER_UCAST) {
1540		/* unicast packet, accept it */
1541		struct net_device *ndev = priv->ndev;
1542		/* get the default rx queue index */
1543		u8 qindex = (u8)priv->gfargrp[0].rx_queue->qindex;
1544		u32 dest_mac_addr = (ndev->dev_addr[0] << 16) |
1545				    (ndev->dev_addr[1] << 8) |
1546				     ndev->dev_addr[2];
1547
1548		rqfcr = (qindex << 10) | RQFCR_AND |
1549			RQFCR_CMP_EXACT | RQFCR_PID_DAH;
1550
1551		gfar_write_filer(priv, i++, rqfcr, dest_mac_addr);
1552
1553		dest_mac_addr = (ndev->dev_addr[3] << 16) |
1554				(ndev->dev_addr[4] << 8) |
1555				 ndev->dev_addr[5];
1556		rqfcr = (qindex << 10) | RQFCR_GPI |
1557			RQFCR_CMP_EXACT | RQFCR_PID_DAL;
1558		gfar_write_filer(priv, i++, rqfcr, dest_mac_addr);
1559	}
1560
1561	__gfar_filer_enable(priv);
1562}
1563
1564static void gfar_filer_restore_table(struct gfar_private *priv)
1565{
1566	u32 rqfcr, rqfpr;
1567	unsigned int i;
1568
1569	__gfar_filer_disable(priv);
1570
1571	for (i = 0; i <= MAX_FILER_IDX; i++) {
1572		rqfcr = priv->ftp_rqfcr[i];
1573		rqfpr = priv->ftp_rqfpr[i];
1574		gfar_write_filer(priv, i, rqfcr, rqfpr);
1575	}
1576
1577	__gfar_filer_enable(priv);
1578}
1579
1580/* gfar_start() for Rx only and with the FGPI filer interrupt enabled */
1581static void gfar_start_wol_filer(struct gfar_private *priv)
1582{
1583	struct gfar __iomem *regs = priv->gfargrp[0].regs;
1584	u32 tempval;
1585	int i = 0;
1586
1587	/* Enable Rx hw queues */
1588	gfar_write(&regs->rqueue, priv->rqueue);
1589
1590	/* Initialize DMACTRL to have WWR and WOP */
1591	tempval = gfar_read(&regs->dmactrl);
1592	tempval |= DMACTRL_INIT_SETTINGS;
1593	gfar_write(&regs->dmactrl, tempval);
1594
1595	/* Make sure we aren't stopped */
1596	tempval = gfar_read(&regs->dmactrl);
1597	tempval &= ~DMACTRL_GRS;
1598	gfar_write(&regs->dmactrl, tempval);
1599
1600	for (i = 0; i < priv->num_grps; i++) {
1601		regs = priv->gfargrp[i].regs;
1602		/* Clear RHLT, so that the DMA starts polling now */
1603		gfar_write(&regs->rstat, priv->gfargrp[i].rstat);
1604		/* enable the Filer General Purpose Interrupt */
1605		gfar_write(&regs->imask, IMASK_FGPI);
1606	}
1607
1608	/* Enable Rx DMA */
1609	tempval = gfar_read(&regs->maccfg1);
1610	tempval |= MACCFG1_RX_EN;
1611	gfar_write(&regs->maccfg1, tempval);
1612}
1613
1614static int gfar_suspend(struct device *dev)
1615{
1616	struct gfar_private *priv = dev_get_drvdata(dev);
1617	struct net_device *ndev = priv->ndev;
1618	struct gfar __iomem *regs = priv->gfargrp[0].regs;
 
1619	u32 tempval;
1620	u16 wol = priv->wol_opts;
1621
1622	if (!netif_running(ndev))
1623		return 0;
 
1624
1625	disable_napi(priv);
1626	netif_tx_lock(ndev);
1627	netif_device_detach(ndev);
1628	netif_tx_unlock(ndev);
1629
1630	gfar_halt(priv);
 
 
 
1631
1632	if (wol & GFAR_WOL_MAGIC) {
1633		/* Enable interrupt on Magic Packet */
1634		gfar_write(&regs->imask, IMASK_MAG);
1635
1636		/* Enable Magic Packet mode */
1637		tempval = gfar_read(&regs->maccfg2);
1638		tempval |= MACCFG2_MPEN;
1639		gfar_write(&regs->maccfg2, tempval);
1640
1641		/* re-enable the Rx block */
1642		tempval = gfar_read(&regs->maccfg1);
1643		tempval |= MACCFG1_RX_EN;
 
 
 
 
 
1644		gfar_write(&regs->maccfg1, tempval);
1645
1646	} else if (wol & GFAR_WOL_FILER_UCAST) {
1647		gfar_filer_config_wol(priv);
1648		gfar_start_wol_filer(priv);
 
1649
1650	} else {
1651		phy_stop(ndev->phydev);
 
 
 
 
 
 
 
 
 
1652	}
1653
1654	return 0;
1655}
1656
1657static int gfar_resume(struct device *dev)
1658{
1659	struct gfar_private *priv = dev_get_drvdata(dev);
1660	struct net_device *ndev = priv->ndev;
1661	struct gfar __iomem *regs = priv->gfargrp[0].regs;
 
1662	u32 tempval;
1663	u16 wol = priv->wol_opts;
 
 
1664
1665	if (!netif_running(ndev))
 
1666		return 0;
 
1667
1668	if (wol & GFAR_WOL_MAGIC) {
1669		/* Disable Magic Packet mode */
1670		tempval = gfar_read(&regs->maccfg2);
1671		tempval &= ~MACCFG2_MPEN;
1672		gfar_write(&regs->maccfg2, tempval);
1673
1674	} else if (wol & GFAR_WOL_FILER_UCAST) {
1675		/* need to stop rx only, tx is already down */
1676		gfar_halt(priv);
1677		gfar_filer_restore_table(priv);
 
1678
1679	} else {
1680		phy_start(ndev->phydev);
1681	}
1682
1683	gfar_start(priv);
1684
 
 
 
1685	netif_device_attach(ndev);
 
1686	enable_napi(priv);
1687
1688	return 0;
1689}
1690
1691static int gfar_restore(struct device *dev)
1692{
1693	struct gfar_private *priv = dev_get_drvdata(dev);
1694	struct net_device *ndev = priv->ndev;
1695
1696	if (!netif_running(ndev)) {
1697		netif_device_attach(ndev);
1698
1699		return 0;
1700	}
1701
1702	gfar_init_bds(ndev);
 
 
 
1703
1704	gfar_mac_reset(priv);
1705
1706	gfar_init_tx_rx_base(priv);
1707
1708	gfar_start(priv);
1709
1710	priv->oldlink = 0;
1711	priv->oldspeed = 0;
1712	priv->oldduplex = -1;
1713
1714	if (ndev->phydev)
1715		phy_start(ndev->phydev);
1716
1717	netif_device_attach(ndev);
1718	enable_napi(priv);
1719
1720	return 0;
1721}
1722
1723static const struct dev_pm_ops gfar_pm_ops = {
1724	.suspend = gfar_suspend,
1725	.resume = gfar_resume,
1726	.freeze = gfar_suspend,
1727	.thaw = gfar_resume,
1728	.restore = gfar_restore,
1729};
1730
1731#define GFAR_PM_OPS (&gfar_pm_ops)
1732
1733#else
1734
1735#define GFAR_PM_OPS NULL
1736
1737#endif
1738
1739/* Reads the controller's registers to determine what interface
1740 * connects it to the PHY.
1741 */
1742static phy_interface_t gfar_get_interface(struct net_device *dev)
1743{
1744	struct gfar_private *priv = netdev_priv(dev);
1745	struct gfar __iomem *regs = priv->gfargrp[0].regs;
1746	u32 ecntrl;
1747
1748	ecntrl = gfar_read(&regs->ecntrl);
1749
1750	if (ecntrl & ECNTRL_SGMII_MODE)
1751		return PHY_INTERFACE_MODE_SGMII;
1752
1753	if (ecntrl & ECNTRL_TBI_MODE) {
1754		if (ecntrl & ECNTRL_REDUCED_MODE)
1755			return PHY_INTERFACE_MODE_RTBI;
1756		else
1757			return PHY_INTERFACE_MODE_TBI;
1758	}
1759
1760	if (ecntrl & ECNTRL_REDUCED_MODE) {
1761		if (ecntrl & ECNTRL_REDUCED_MII_MODE) {
1762			return PHY_INTERFACE_MODE_RMII;
1763		}
1764		else {
1765			phy_interface_t interface = priv->interface;
1766
1767			/* This isn't autodetected right now, so it must
1768			 * be set by the device tree or platform code.
1769			 */
1770			if (interface == PHY_INTERFACE_MODE_RGMII_ID)
1771				return PHY_INTERFACE_MODE_RGMII_ID;
1772
1773			return PHY_INTERFACE_MODE_RGMII;
1774		}
1775	}
1776
1777	if (priv->device_flags & FSL_GIANFAR_DEV_HAS_GIGABIT)
1778		return PHY_INTERFACE_MODE_GMII;
1779
1780	return PHY_INTERFACE_MODE_MII;
1781}
1782
1783
1784/* Initializes driver's PHY state, and attaches to the PHY.
1785 * Returns 0 on success.
1786 */
1787static int init_phy(struct net_device *dev)
1788{
1789	struct gfar_private *priv = netdev_priv(dev);
1790	uint gigabit_support =
1791		priv->device_flags & FSL_GIANFAR_DEV_HAS_GIGABIT ?
1792		GFAR_SUPPORTED_GBIT : 0;
1793	phy_interface_t interface;
1794	struct phy_device *phydev;
1795	struct ethtool_eee edata;
1796
1797	priv->oldlink = 0;
1798	priv->oldspeed = 0;
1799	priv->oldduplex = -1;
1800
1801	interface = gfar_get_interface(dev);
1802
1803	phydev = of_phy_connect(dev, priv->phy_node, &adjust_link, 0,
1804				interface);
1805	if (!phydev) {
 
 
 
1806		dev_err(&dev->dev, "could not attach to PHY\n");
1807		return -ENODEV;
1808	}
1809
1810	if (interface == PHY_INTERFACE_MODE_SGMII)
1811		gfar_configure_serdes(dev);
1812
1813	/* Remove any features not supported by the controller */
1814	phydev->supported &= (GFAR_SUPPORTED | gigabit_support);
1815	phydev->advertising = phydev->supported;
1816
1817	/* Add support for flow control, but don't advertise it by default */
1818	phydev->supported |= (SUPPORTED_Pause | SUPPORTED_Asym_Pause);
1819
1820	/* disable EEE autoneg, EEE not supported by eTSEC */
1821	memset(&edata, 0, sizeof(struct ethtool_eee));
1822	phy_ethtool_set_eee(phydev, &edata);
1823
1824	return 0;
1825}
1826
1827/* Initialize TBI PHY interface for communicating with the
1828 * SERDES lynx PHY on the chip.  We communicate with this PHY
1829 * through the MDIO bus on each controller, treating it as a
1830 * "normal" PHY at the address found in the TBIPA register.  We assume
1831 * that the TBIPA register is valid.  Either the MDIO bus code will set
1832 * it to a value that doesn't conflict with other PHYs on the bus, or the
1833 * value doesn't matter, as there are no other PHYs on the bus.
1834 */
1835static void gfar_configure_serdes(struct net_device *dev)
1836{
1837	struct gfar_private *priv = netdev_priv(dev);
1838	struct phy_device *tbiphy;
1839
1840	if (!priv->tbi_node) {
1841		dev_warn(&dev->dev, "error: SGMII mode requires that the "
1842				    "device tree specify a tbi-handle\n");
1843		return;
1844	}
1845
1846	tbiphy = of_phy_find_device(priv->tbi_node);
1847	if (!tbiphy) {
1848		dev_err(&dev->dev, "error: Could not get TBI device\n");
1849		return;
1850	}
1851
1852	/* If the link is already up, we must already be ok, and don't need to
1853	 * configure and reset the TBI<->SerDes link.  Maybe U-Boot configured
1854	 * everything for us?  Resetting it takes the link down and requires
1855	 * several seconds for it to come back.
1856	 */
1857	if (phy_read(tbiphy, MII_BMSR) & BMSR_LSTATUS) {
1858		put_device(&tbiphy->mdio.dev);
1859		return;
1860	}
1861
1862	/* Single clk mode, mii mode off(for serdes communication) */
1863	phy_write(tbiphy, MII_TBICON, TBICON_CLK_SELECT);
1864
1865	phy_write(tbiphy, MII_ADVERTISE,
1866		  ADVERTISE_1000XFULL | ADVERTISE_1000XPAUSE |
1867		  ADVERTISE_1000XPSE_ASYM);
1868
1869	phy_write(tbiphy, MII_BMCR,
1870		  BMCR_ANENABLE | BMCR_ANRESTART | BMCR_FULLDPLX |
1871		  BMCR_SPEED1000);
1872
1873	put_device(&tbiphy->mdio.dev);
1874}
1875
1876static int __gfar_is_rx_idle(struct gfar_private *priv)
1877{
1878	u32 res;
1879
1880	/* Normaly TSEC should not hang on GRS commands, so we should
1881	 * actually wait for IEVENT_GRSC flag.
1882	 */
1883	if (!gfar_has_errata(priv, GFAR_ERRATA_A002))
1884		return 0;
1885
1886	/* Read the eTSEC register at offset 0xD1C. If bits 7-14 are
1887	 * the same as bits 23-30, the eTSEC Rx is assumed to be idle
1888	 * and the Rx can be safely reset.
1889	 */
1890	res = gfar_read((void __iomem *)priv->gfargrp[0].regs + 0xd1c);
1891	res &= 0x7f807f80;
1892	if ((res & 0xffff) == (res >> 16))
1893		return 1;
1894
1895	return 0;
1896}
1897
1898/* Halt the receive and transmit queues */
1899static void gfar_halt_nodisable(struct gfar_private *priv)
1900{
1901	struct gfar __iomem *regs = priv->gfargrp[0].regs;
1902	u32 tempval;
1903	unsigned int timeout;
1904	int stopped;
1905
1906	gfar_ints_disable(priv);
1907
1908	if (gfar_is_dma_stopped(priv))
1909		return;
1910
1911	/* Stop the DMA, and wait for it to stop */
1912	tempval = gfar_read(&regs->dmactrl);
1913	tempval |= (DMACTRL_GRS | DMACTRL_GTS);
1914	gfar_write(&regs->dmactrl, tempval);
 
 
 
 
1915
1916retry:
1917	timeout = 1000;
1918	while (!(stopped = gfar_is_dma_stopped(priv)) && timeout) {
1919		cpu_relax();
1920		timeout--;
 
 
1921	}
1922
1923	if (!timeout)
1924		stopped = gfar_is_dma_stopped(priv);
1925
1926	if (!stopped && !gfar_is_rx_dma_stopped(priv) &&
1927	    !__gfar_is_rx_idle(priv))
1928		goto retry;
1929}
1930
1931/* Halt the receive and transmit queues */
1932void gfar_halt(struct gfar_private *priv)
1933{
1934	struct gfar __iomem *regs = priv->gfargrp[0].regs;
1935	u32 tempval;
1936
1937	/* Dissable the Rx/Tx hw queues */
1938	gfar_write(&regs->rqueue, 0);
1939	gfar_write(&regs->tqueue, 0);
1940
1941	mdelay(10);
1942
1943	gfar_halt_nodisable(priv);
1944
1945	/* Disable Rx/Tx DMA */
1946	tempval = gfar_read(&regs->maccfg1);
1947	tempval &= ~(MACCFG1_RX_EN | MACCFG1_TX_EN);
1948	gfar_write(&regs->maccfg1, tempval);
1949}
1950
1951void stop_gfar(struct net_device *dev)
1952{
1953	struct gfar_private *priv = netdev_priv(dev);
1954
1955	netif_tx_stop_all_queues(dev);
1956
1957	smp_mb__before_atomic();
1958	set_bit(GFAR_DOWN, &priv->state);
1959	smp_mb__after_atomic();
1960
1961	disable_napi(priv);
1962
1963	/* disable ints and gracefully shut down Rx/Tx DMA */
1964	gfar_halt(priv);
1965
1966	phy_stop(dev->phydev);
1967
1968	free_skb_resources(priv);
1969}
1970
1971static void free_skb_tx_queue(struct gfar_priv_tx_q *tx_queue)
1972{
1973	struct txbd8 *txbdp;
1974	struct gfar_private *priv = netdev_priv(tx_queue->dev);
1975	int i, j;
1976
1977	txbdp = tx_queue->tx_bd_base;
1978
1979	for (i = 0; i < tx_queue->tx_ring_size; i++) {
1980		if (!tx_queue->tx_skbuff[i])
1981			continue;
1982
1983		dma_unmap_single(priv->dev, be32_to_cpu(txbdp->bufPtr),
1984				 be16_to_cpu(txbdp->length), DMA_TO_DEVICE);
1985		txbdp->lstatus = 0;
1986		for (j = 0; j < skb_shinfo(tx_queue->tx_skbuff[i])->nr_frags;
1987		     j++) {
1988			txbdp++;
1989			dma_unmap_page(priv->dev, be32_to_cpu(txbdp->bufPtr),
1990				       be16_to_cpu(txbdp->length),
1991				       DMA_TO_DEVICE);
1992		}
1993		txbdp++;
1994		dev_kfree_skb_any(tx_queue->tx_skbuff[i]);
1995		tx_queue->tx_skbuff[i] = NULL;
1996	}
1997	kfree(tx_queue->tx_skbuff);
1998	tx_queue->tx_skbuff = NULL;
1999}
2000
2001static void free_skb_rx_queue(struct gfar_priv_rx_q *rx_queue)
2002{
 
 
2003	int i;
2004
2005	struct rxbd8 *rxbdp = rx_queue->rx_bd_base;
2006
2007	if (rx_queue->skb)
2008		dev_kfree_skb(rx_queue->skb);
2009
2010	for (i = 0; i < rx_queue->rx_ring_size; i++) {
2011		struct	gfar_rx_buff *rxb = &rx_queue->rx_buff[i];
2012
 
 
 
 
 
2013		rxbdp->lstatus = 0;
2014		rxbdp->bufPtr = 0;
2015		rxbdp++;
2016
2017		if (!rxb->page)
2018			continue;
2019
2020		dma_unmap_page(rx_queue->dev, rxb->dma,
2021			       PAGE_SIZE, DMA_FROM_DEVICE);
2022		__free_page(rxb->page);
2023
2024		rxb->page = NULL;
2025	}
2026
2027	kfree(rx_queue->rx_buff);
2028	rx_queue->rx_buff = NULL;
2029}
2030
2031/* If there are any tx skbs or rx skbs still around, free them.
2032 * Then free tx_skbuff and rx_skbuff
2033 */
2034static void free_skb_resources(struct gfar_private *priv)
2035{
2036	struct gfar_priv_tx_q *tx_queue = NULL;
2037	struct gfar_priv_rx_q *rx_queue = NULL;
2038	int i;
2039
2040	/* Go through all the buffer descriptors and free their data buffers */
2041	for (i = 0; i < priv->num_tx_queues; i++) {
2042		struct netdev_queue *txq;
2043
2044		tx_queue = priv->tx_queue[i];
2045		txq = netdev_get_tx_queue(tx_queue->dev, tx_queue->qindex);
2046		if (tx_queue->tx_skbuff)
2047			free_skb_tx_queue(tx_queue);
2048		netdev_tx_reset_queue(txq);
2049	}
2050
2051	for (i = 0; i < priv->num_rx_queues; i++) {
2052		rx_queue = priv->rx_queue[i];
2053		if (rx_queue->rx_buff)
2054			free_skb_rx_queue(rx_queue);
2055	}
2056
2057	dma_free_coherent(priv->dev,
2058			  sizeof(struct txbd8) * priv->total_tx_ring_size +
2059			  sizeof(struct rxbd8) * priv->total_rx_ring_size,
2060			  priv->tx_queue[0]->tx_bd_base,
2061			  priv->tx_queue[0]->tx_bd_dma_base);
2062}
2063
2064void gfar_start(struct gfar_private *priv)
2065{
2066	struct gfar __iomem *regs = priv->gfargrp[0].regs;
2067	u32 tempval;
2068	int i = 0;
2069
2070	/* Enable Rx/Tx hw queues */
2071	gfar_write(&regs->rqueue, priv->rqueue);
2072	gfar_write(&regs->tqueue, priv->tqueue);
2073
2074	/* Initialize DMACTRL to have WWR and WOP */
2075	tempval = gfar_read(&regs->dmactrl);
2076	tempval |= DMACTRL_INIT_SETTINGS;
2077	gfar_write(&regs->dmactrl, tempval);
2078
2079	/* Make sure we aren't stopped */
2080	tempval = gfar_read(&regs->dmactrl);
2081	tempval &= ~(DMACTRL_GRS | DMACTRL_GTS);
2082	gfar_write(&regs->dmactrl, tempval);
2083
2084	for (i = 0; i < priv->num_grps; i++) {
2085		regs = priv->gfargrp[i].regs;
2086		/* Clear THLT/RHLT, so that the DMA starts polling now */
2087		gfar_write(&regs->tstat, priv->gfargrp[i].tstat);
2088		gfar_write(&regs->rstat, priv->gfargrp[i].rstat);
2089	}
2090
2091	/* Enable Rx/Tx DMA */
2092	tempval = gfar_read(&regs->maccfg1);
2093	tempval |= (MACCFG1_RX_EN | MACCFG1_TX_EN);
2094	gfar_write(&regs->maccfg1, tempval);
2095
2096	gfar_ints_enable(priv);
2097
2098	netif_trans_update(priv->ndev); /* prevent tx timeout */
2099}
2100
2101static void free_grp_irqs(struct gfar_priv_grp *grp)
2102{
2103	free_irq(gfar_irq(grp, TX)->irq, grp);
2104	free_irq(gfar_irq(grp, RX)->irq, grp);
2105	free_irq(gfar_irq(grp, ER)->irq, grp);
2106}
2107
2108static int register_grp_irqs(struct gfar_priv_grp *grp)
2109{
2110	struct gfar_private *priv = grp->priv;
2111	struct net_device *dev = priv->ndev;
2112	int err;
2113
2114	/* If the device has multiple interrupts, register for
2115	 * them.  Otherwise, only register for the one
2116	 */
2117	if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) {
2118		/* Install our interrupt handlers for Error,
2119		 * Transmit, and Receive
2120		 */
2121		err = request_irq(gfar_irq(grp, ER)->irq, gfar_error, 0,
2122				  gfar_irq(grp, ER)->name, grp);
2123		if (err < 0) {
2124			netif_err(priv, intr, dev, "Can't get IRQ %d\n",
2125				  gfar_irq(grp, ER)->irq);
2126
2127			goto err_irq_fail;
2128		}
2129		enable_irq_wake(gfar_irq(grp, ER)->irq);
2130
2131		err = request_irq(gfar_irq(grp, TX)->irq, gfar_transmit, 0,
2132				  gfar_irq(grp, TX)->name, grp);
2133		if (err < 0) {
2134			netif_err(priv, intr, dev, "Can't get IRQ %d\n",
2135				  gfar_irq(grp, TX)->irq);
2136			goto tx_irq_fail;
2137		}
2138		err = request_irq(gfar_irq(grp, RX)->irq, gfar_receive, 0,
2139				  gfar_irq(grp, RX)->name, grp);
2140		if (err < 0) {
2141			netif_err(priv, intr, dev, "Can't get IRQ %d\n",
2142				  gfar_irq(grp, RX)->irq);
2143			goto rx_irq_fail;
2144		}
2145		enable_irq_wake(gfar_irq(grp, RX)->irq);
2146
2147	} else {
2148		err = request_irq(gfar_irq(grp, TX)->irq, gfar_interrupt, 0,
2149				  gfar_irq(grp, TX)->name, grp);
2150		if (err < 0) {
2151			netif_err(priv, intr, dev, "Can't get IRQ %d\n",
2152				  gfar_irq(grp, TX)->irq);
2153			goto err_irq_fail;
2154		}
2155		enable_irq_wake(gfar_irq(grp, TX)->irq);
2156	}
2157
2158	return 0;
2159
2160rx_irq_fail:
2161	free_irq(gfar_irq(grp, TX)->irq, grp);
2162tx_irq_fail:
2163	free_irq(gfar_irq(grp, ER)->irq, grp);
2164err_irq_fail:
2165	return err;
2166
2167}
2168
2169static void gfar_free_irq(struct gfar_private *priv)
2170{
2171	int i;
2172
2173	/* Free the IRQs */
2174	if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) {
2175		for (i = 0; i < priv->num_grps; i++)
2176			free_grp_irqs(&priv->gfargrp[i]);
2177	} else {
2178		for (i = 0; i < priv->num_grps; i++)
2179			free_irq(gfar_irq(&priv->gfargrp[i], TX)->irq,
2180				 &priv->gfargrp[i]);
2181	}
2182}
2183
2184static int gfar_request_irq(struct gfar_private *priv)
2185{
2186	int err, i, j;
2187
2188	for (i = 0; i < priv->num_grps; i++) {
2189		err = register_grp_irqs(&priv->gfargrp[i]);
2190		if (err) {
2191			for (j = 0; j < i; j++)
2192				free_grp_irqs(&priv->gfargrp[j]);
2193			return err;
2194		}
2195	}
2196
2197	return 0;
2198}
2199
2200/* Bring the controller up and running */
2201int startup_gfar(struct net_device *ndev)
2202{
2203	struct gfar_private *priv = netdev_priv(ndev);
2204	int err;
2205
2206	gfar_mac_reset(priv);
2207
2208	err = gfar_alloc_skb_resources(ndev);
2209	if (err)
2210		return err;
2211
2212	gfar_init_tx_rx_base(priv);
2213
2214	smp_mb__before_atomic();
2215	clear_bit(GFAR_DOWN, &priv->state);
2216	smp_mb__after_atomic();
2217
2218	/* Start Rx/Tx DMA and enable the interrupts */
2219	gfar_start(priv);
2220
2221	/* force link state update after mac reset */
2222	priv->oldlink = 0;
2223	priv->oldspeed = 0;
2224	priv->oldduplex = -1;
2225
2226	phy_start(ndev->phydev);
2227
2228	enable_napi(priv);
2229
2230	netif_tx_wake_all_queues(ndev);
2231
2232	return 0;
2233}
2234
2235/* Called when something needs to use the ethernet device
2236 * Returns 0 for success.
2237 */
2238static int gfar_enet_open(struct net_device *dev)
2239{
2240	struct gfar_private *priv = netdev_priv(dev);
2241	int err;
2242
2243	err = init_phy(dev);
2244	if (err)
2245		return err;
2246
2247	err = gfar_request_irq(priv);
2248	if (err)
2249		return err;
2250
2251	err = startup_gfar(dev);
2252	if (err)
2253		return err;
2254
 
 
2255	return err;
2256}
2257
2258static inline struct txfcb *gfar_add_fcb(struct sk_buff *skb)
2259{
2260	struct txfcb *fcb = skb_push(skb, GMAC_FCB_LEN);
2261
2262	memset(fcb, 0, GMAC_FCB_LEN);
2263
2264	return fcb;
2265}
2266
2267static inline void gfar_tx_checksum(struct sk_buff *skb, struct txfcb *fcb,
2268				    int fcb_length)
2269{
2270	/* If we're here, it's a IP packet with a TCP or UDP
2271	 * payload.  We set it to checksum, using a pseudo-header
2272	 * we provide
2273	 */
2274	u8 flags = TXFCB_DEFAULT;
2275
2276	/* Tell the controller what the protocol is
2277	 * And provide the already calculated phcs
2278	 */
2279	if (ip_hdr(skb)->protocol == IPPROTO_UDP) {
2280		flags |= TXFCB_UDP;
2281		fcb->phcs = (__force __be16)(udp_hdr(skb)->check);
2282	} else
2283		fcb->phcs = (__force __be16)(tcp_hdr(skb)->check);
2284
2285	/* l3os is the distance between the start of the
2286	 * frame (skb->data) and the start of the IP hdr.
2287	 * l4os is the distance between the start of the
2288	 * l3 hdr and the l4 hdr
2289	 */
2290	fcb->l3os = (u8)(skb_network_offset(skb) - fcb_length);
2291	fcb->l4os = skb_network_header_len(skb);
2292
2293	fcb->flags = flags;
2294}
2295
2296static inline void gfar_tx_vlan(struct sk_buff *skb, struct txfcb *fcb)
2297{
2298	fcb->flags |= TXFCB_VLN;
2299	fcb->vlctl = cpu_to_be16(skb_vlan_tag_get(skb));
2300}
2301
2302static inline struct txbd8 *skip_txbd(struct txbd8 *bdp, int stride,
2303				      struct txbd8 *base, int ring_size)
2304{
2305	struct txbd8 *new_bd = bdp + stride;
2306
2307	return (new_bd >= (base + ring_size)) ? (new_bd - ring_size) : new_bd;
2308}
2309
2310static inline struct txbd8 *next_txbd(struct txbd8 *bdp, struct txbd8 *base,
2311				      int ring_size)
2312{
2313	return skip_txbd(bdp, 1, base, ring_size);
2314}
2315
2316/* eTSEC12: csum generation not supported for some fcb offsets */
2317static inline bool gfar_csum_errata_12(struct gfar_private *priv,
2318				       unsigned long fcb_addr)
2319{
2320	return (gfar_has_errata(priv, GFAR_ERRATA_12) &&
2321	       (fcb_addr % 0x20) > 0x18);
2322}
2323
2324/* eTSEC76: csum generation for frames larger than 2500 may
2325 * cause excess delays before start of transmission
2326 */
2327static inline bool gfar_csum_errata_76(struct gfar_private *priv,
2328				       unsigned int len)
2329{
2330	return (gfar_has_errata(priv, GFAR_ERRATA_76) &&
2331	       (len > 2500));
2332}
2333
2334/* This is called by the kernel when a frame is ready for transmission.
2335 * It is pointed to by the dev->hard_start_xmit function pointer
2336 */
2337static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
2338{
2339	struct gfar_private *priv = netdev_priv(dev);
2340	struct gfar_priv_tx_q *tx_queue = NULL;
2341	struct netdev_queue *txq;
2342	struct gfar __iomem *regs = NULL;
2343	struct txfcb *fcb = NULL;
2344	struct txbd8 *txbdp, *txbdp_start, *base, *txbdp_tstamp = NULL;
2345	u32 lstatus;
2346	skb_frag_t *frag;
2347	int i, rq = 0;
2348	int do_tstamp, do_csum, do_vlan;
2349	u32 bufaddr;
 
2350	unsigned int nr_frags, nr_txbds, bytes_sent, fcb_len = 0;
2351
2352	rq = skb->queue_mapping;
2353	tx_queue = priv->tx_queue[rq];
2354	txq = netdev_get_tx_queue(dev, rq);
2355	base = tx_queue->tx_bd_base;
2356	regs = tx_queue->grp->regs;
2357
2358	do_csum = (CHECKSUM_PARTIAL == skb->ip_summed);
2359	do_vlan = skb_vlan_tag_present(skb);
2360	do_tstamp = (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
2361		    priv->hwts_tx_en;
2362
2363	if (do_csum || do_vlan)
2364		fcb_len = GMAC_FCB_LEN;
2365
2366	/* check if time stamp should be generated */
2367	if (unlikely(do_tstamp))
2368		fcb_len = GMAC_FCB_LEN + GMAC_TXPAL_LEN;
2369
2370	/* make space for additional header when fcb is needed */
2371	if (fcb_len && unlikely(skb_headroom(skb) < fcb_len)) {
2372		struct sk_buff *skb_new;
2373
2374		skb_new = skb_realloc_headroom(skb, fcb_len);
2375		if (!skb_new) {
2376			dev->stats.tx_errors++;
2377			dev_kfree_skb_any(skb);
2378			return NETDEV_TX_OK;
2379		}
2380
2381		if (skb->sk)
2382			skb_set_owner_w(skb_new, skb->sk);
2383		dev_consume_skb_any(skb);
2384		skb = skb_new;
2385	}
2386
2387	/* total number of fragments in the SKB */
2388	nr_frags = skb_shinfo(skb)->nr_frags;
2389
2390	/* calculate the required number of TxBDs for this skb */
2391	if (unlikely(do_tstamp))
2392		nr_txbds = nr_frags + 2;
2393	else
2394		nr_txbds = nr_frags + 1;
2395
2396	/* check if there is space to queue this packet */
2397	if (nr_txbds > tx_queue->num_txbdfree) {
2398		/* no space, stop the queue */
2399		netif_tx_stop_queue(txq);
2400		dev->stats.tx_fifo_errors++;
2401		return NETDEV_TX_BUSY;
2402	}
2403
2404	/* Update transmit stats */
2405	bytes_sent = skb->len;
2406	tx_queue->stats.tx_bytes += bytes_sent;
2407	/* keep Tx bytes on wire for BQL accounting */
2408	GFAR_CB(skb)->bytes_sent = bytes_sent;
2409	tx_queue->stats.tx_packets++;
2410
2411	txbdp = txbdp_start = tx_queue->cur_tx;
2412	lstatus = be32_to_cpu(txbdp->lstatus);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2413
2414	/* Add TxPAL between FCB and frame if required */
2415	if (unlikely(do_tstamp)) {
2416		skb_push(skb, GMAC_TXPAL_LEN);
2417		memset(skb->data, 0, GMAC_TXPAL_LEN);
2418	}
2419
2420	/* Add TxFCB if required */
2421	if (fcb_len) {
2422		fcb = gfar_add_fcb(skb);
2423		lstatus |= BD_LFLAG(TXBD_TOE);
2424	}
2425
2426	/* Set up checksumming */
2427	if (do_csum) {
2428		gfar_tx_checksum(skb, fcb, fcb_len);
2429
2430		if (unlikely(gfar_csum_errata_12(priv, (unsigned long)fcb)) ||
2431		    unlikely(gfar_csum_errata_76(priv, skb->len))) {
2432			__skb_pull(skb, GMAC_FCB_LEN);
2433			skb_checksum_help(skb);
2434			if (do_vlan || do_tstamp) {
2435				/* put back a new fcb for vlan/tstamp TOE */
2436				fcb = gfar_add_fcb(skb);
2437			} else {
2438				/* Tx TOE not used */
2439				lstatus &= ~(BD_LFLAG(TXBD_TOE));
2440				fcb = NULL;
2441			}
2442		}
2443	}
2444
2445	if (do_vlan)
2446		gfar_tx_vlan(skb, fcb);
2447
2448	bufaddr = dma_map_single(priv->dev, skb->data, skb_headlen(skb),
2449				 DMA_TO_DEVICE);
2450	if (unlikely(dma_mapping_error(priv->dev, bufaddr)))
2451		goto dma_map_err;
2452
2453	txbdp_start->bufPtr = cpu_to_be32(bufaddr);
2454
2455	/* Time stamp insertion requires one additional TxBD */
2456	if (unlikely(do_tstamp))
2457		txbdp_tstamp = txbdp = next_txbd(txbdp, base,
2458						 tx_queue->tx_ring_size);
2459
2460	if (likely(!nr_frags)) {
2461		if (likely(!do_tstamp))
2462			lstatus |= BD_LFLAG(TXBD_LAST | TXBD_INTERRUPT);
2463	} else {
2464		u32 lstatus_start = lstatus;
2465
2466		/* Place the fragment addresses and lengths into the TxBDs */
2467		frag = &skb_shinfo(skb)->frags[0];
2468		for (i = 0; i < nr_frags; i++, frag++) {
2469			unsigned int size;
2470
2471			/* Point at the next BD, wrapping as needed */
2472			txbdp = next_txbd(txbdp, base, tx_queue->tx_ring_size);
2473
2474			size = skb_frag_size(frag);
2475
2476			lstatus = be32_to_cpu(txbdp->lstatus) | size |
2477				  BD_LFLAG(TXBD_READY);
2478
2479			/* Handle the last BD specially */
2480			if (i == nr_frags - 1)
2481				lstatus |= BD_LFLAG(TXBD_LAST | TXBD_INTERRUPT);
2482
2483			bufaddr = skb_frag_dma_map(priv->dev, frag, 0,
2484						   size, DMA_TO_DEVICE);
2485			if (unlikely(dma_mapping_error(priv->dev, bufaddr)))
2486				goto dma_map_err;
2487
2488			/* set the TxBD length and buffer pointer */
2489			txbdp->bufPtr = cpu_to_be32(bufaddr);
2490			txbdp->lstatus = cpu_to_be32(lstatus);
2491		}
2492
2493		lstatus = lstatus_start;
2494	}
2495
2496	/* If time stamping is requested one additional TxBD must be set up. The
2497	 * first TxBD points to the FCB and must have a data length of
2498	 * GMAC_FCB_LEN. The second TxBD points to the actual frame data with
2499	 * the full frame length.
2500	 */
2501	if (unlikely(do_tstamp)) {
2502		u32 lstatus_ts = be32_to_cpu(txbdp_tstamp->lstatus);
2503
2504		bufaddr = be32_to_cpu(txbdp_start->bufPtr);
2505		bufaddr += fcb_len;
2506
2507		lstatus_ts |= BD_LFLAG(TXBD_READY) |
2508			      (skb_headlen(skb) - fcb_len);
2509		if (!nr_frags)
2510			lstatus_ts |= BD_LFLAG(TXBD_LAST | TXBD_INTERRUPT);
2511
2512		txbdp_tstamp->bufPtr = cpu_to_be32(bufaddr);
2513		txbdp_tstamp->lstatus = cpu_to_be32(lstatus_ts);
2514		lstatus |= BD_LFLAG(TXBD_CRC | TXBD_READY) | GMAC_FCB_LEN;
2515
2516		/* Setup tx hardware time stamping */
2517		skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
2518		fcb->ptp = 1;
2519	} else {
2520		lstatus |= BD_LFLAG(TXBD_CRC | TXBD_READY) | skb_headlen(skb);
2521	}
2522
2523	netdev_tx_sent_queue(txq, bytes_sent);
2524
2525	gfar_wmb();
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2526
2527	txbdp_start->lstatus = cpu_to_be32(lstatus);
2528
2529	gfar_wmb(); /* force lstatus write before tx_skbuff */
2530
2531	tx_queue->tx_skbuff[tx_queue->skb_curtx] = skb;
2532
2533	/* Update the current skb pointer to the next entry we will use
2534	 * (wrapping if necessary)
2535	 */
2536	tx_queue->skb_curtx = (tx_queue->skb_curtx + 1) &
2537			      TX_RING_MOD_MASK(tx_queue->tx_ring_size);
2538
2539	tx_queue->cur_tx = next_txbd(txbdp, base, tx_queue->tx_ring_size);
2540
2541	/* We can work in parallel with gfar_clean_tx_ring(), except
2542	 * when modifying num_txbdfree. Note that we didn't grab the lock
2543	 * when we were reading the num_txbdfree and checking for available
2544	 * space, that's because outside of this function it can only grow.
2545	 */
2546	spin_lock_bh(&tx_queue->txlock);
2547	/* reduce TxBD free count */
2548	tx_queue->num_txbdfree -= (nr_txbds);
2549	spin_unlock_bh(&tx_queue->txlock);
2550
2551	/* If the next BD still needs to be cleaned up, then the bds
2552	 * are full.  We need to tell the kernel to stop sending us stuff.
2553	 */
2554	if (!tx_queue->num_txbdfree) {
2555		netif_tx_stop_queue(txq);
2556
2557		dev->stats.tx_fifo_errors++;
2558	}
2559
2560	/* Tell the DMA to go go go */
2561	gfar_write(&regs->tstat, TSTAT_CLEAR_THALT >> tx_queue->qindex);
2562
2563	return NETDEV_TX_OK;
 
2564
2565dma_map_err:
2566	txbdp = next_txbd(txbdp_start, base, tx_queue->tx_ring_size);
2567	if (do_tstamp)
2568		txbdp = next_txbd(txbdp, base, tx_queue->tx_ring_size);
2569	for (i = 0; i < nr_frags; i++) {
2570		lstatus = be32_to_cpu(txbdp->lstatus);
2571		if (!(lstatus & BD_LFLAG(TXBD_READY)))
2572			break;
2573
2574		lstatus &= ~BD_LFLAG(TXBD_READY);
2575		txbdp->lstatus = cpu_to_be32(lstatus);
2576		bufaddr = be32_to_cpu(txbdp->bufPtr);
2577		dma_unmap_page(priv->dev, bufaddr, be16_to_cpu(txbdp->length),
2578			       DMA_TO_DEVICE);
2579		txbdp = next_txbd(txbdp, base, tx_queue->tx_ring_size);
2580	}
2581	gfar_wmb();
2582	dev_kfree_skb_any(skb);
2583	return NETDEV_TX_OK;
2584}
2585
2586/* Stops the kernel queue, and halts the controller */
2587static int gfar_close(struct net_device *dev)
2588{
2589	struct gfar_private *priv = netdev_priv(dev);
2590
2591	cancel_work_sync(&priv->reset_task);
2592	stop_gfar(dev);
2593
2594	/* Disconnect from the PHY */
2595	phy_disconnect(dev->phydev);
 
2596
2597	gfar_free_irq(priv);
2598
2599	return 0;
2600}
2601
2602/* Changes the mac address if the controller is not running. */
2603static int gfar_set_mac_address(struct net_device *dev)
2604{
2605	gfar_set_mac_for_addr(dev, 0, dev->dev_addr);
2606
2607	return 0;
2608}
2609
2610static int gfar_change_mtu(struct net_device *dev, int new_mtu)
2611{
2612	struct gfar_private *priv = netdev_priv(dev);
 
 
 
 
 
 
2613
2614	while (test_and_set_bit_lock(GFAR_RESETTING, &priv->state))
2615		cpu_relax();
2616
2617	if (dev->flags & IFF_UP)
2618		stop_gfar(dev);
2619
2620	dev->mtu = new_mtu;
2621
2622	if (dev->flags & IFF_UP)
2623		startup_gfar(dev);
2624
2625	clear_bit_unlock(GFAR_RESETTING, &priv->state);
2626
2627	return 0;
2628}
2629
2630void reset_gfar(struct net_device *ndev)
2631{
2632	struct gfar_private *priv = netdev_priv(ndev);
2633
2634	while (test_and_set_bit_lock(GFAR_RESETTING, &priv->state))
2635		cpu_relax();
2636
2637	stop_gfar(ndev);
2638	startup_gfar(ndev);
2639
2640	clear_bit_unlock(GFAR_RESETTING, &priv->state);
2641}
2642
2643/* gfar_reset_task gets scheduled when a packet has not been
2644 * transmitted after a set amount of time.
2645 * For now, assume that clearing out all the structures, and
2646 * starting over will fix the problem.
2647 */
2648static void gfar_reset_task(struct work_struct *work)
2649{
2650	struct gfar_private *priv = container_of(work, struct gfar_private,
2651						 reset_task);
2652	reset_gfar(priv->ndev);
2653}
2654
2655static void gfar_timeout(struct net_device *dev)
2656{
2657	struct gfar_private *priv = netdev_priv(dev);
2658
2659	dev->stats.tx_errors++;
2660	schedule_work(&priv->reset_task);
2661}
2662
 
 
 
 
 
 
 
 
 
2663/* Interrupt Handler for Transmit complete */
2664static void gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue)
2665{
2666	struct net_device *dev = tx_queue->dev;
2667	struct netdev_queue *txq;
2668	struct gfar_private *priv = netdev_priv(dev);
2669	struct txbd8 *bdp, *next = NULL;
2670	struct txbd8 *lbdp = NULL;
2671	struct txbd8 *base = tx_queue->tx_bd_base;
2672	struct sk_buff *skb;
2673	int skb_dirtytx;
2674	int tx_ring_size = tx_queue->tx_ring_size;
2675	int frags = 0, nr_txbds = 0;
2676	int i;
2677	int howmany = 0;
2678	int tqi = tx_queue->qindex;
2679	unsigned int bytes_sent = 0;
2680	u32 lstatus;
2681	size_t buflen;
2682
2683	txq = netdev_get_tx_queue(dev, tqi);
2684	bdp = tx_queue->dirty_tx;
2685	skb_dirtytx = tx_queue->skb_dirtytx;
2686
2687	while ((skb = tx_queue->tx_skbuff[skb_dirtytx])) {
 
2688
2689		frags = skb_shinfo(skb)->nr_frags;
2690
2691		/* When time stamping, one additional TxBD must be freed.
2692		 * Also, we need to dma_unmap_single() the TxPAL.
2693		 */
2694		if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS))
2695			nr_txbds = frags + 2;
2696		else
2697			nr_txbds = frags + 1;
2698
2699		lbdp = skip_txbd(bdp, nr_txbds - 1, base, tx_ring_size);
2700
2701		lstatus = be32_to_cpu(lbdp->lstatus);
2702
2703		/* Only clean completed frames */
2704		if ((lstatus & BD_LFLAG(TXBD_READY)) &&
2705		    (lstatus & BD_LENGTH_MASK))
2706			break;
2707
2708		if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)) {
2709			next = next_txbd(bdp, base, tx_ring_size);
2710			buflen = be16_to_cpu(next->length) +
2711				 GMAC_FCB_LEN + GMAC_TXPAL_LEN;
2712		} else
2713			buflen = be16_to_cpu(bdp->length);
2714
2715		dma_unmap_single(priv->dev, be32_to_cpu(bdp->bufPtr),
2716				 buflen, DMA_TO_DEVICE);
2717
2718		if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)) {
2719			struct skb_shared_hwtstamps shhwtstamps;
2720			u64 *ns = (u64 *)(((uintptr_t)skb->data + 0x10) &
2721					  ~0x7UL);
2722
2723			memset(&shhwtstamps, 0, sizeof(shhwtstamps));
2724			shhwtstamps.hwtstamp = ns_to_ktime(be64_to_cpu(*ns));
2725			skb_pull(skb, GMAC_FCB_LEN + GMAC_TXPAL_LEN);
2726			skb_tstamp_tx(skb, &shhwtstamps);
2727			gfar_clear_txbd_status(bdp);
2728			bdp = next;
2729		}
2730
2731		gfar_clear_txbd_status(bdp);
2732		bdp = next_txbd(bdp, base, tx_ring_size);
2733
2734		for (i = 0; i < frags; i++) {
2735			dma_unmap_page(priv->dev, be32_to_cpu(bdp->bufPtr),
2736				       be16_to_cpu(bdp->length),
2737				       DMA_TO_DEVICE);
2738			gfar_clear_txbd_status(bdp);
2739			bdp = next_txbd(bdp, base, tx_ring_size);
2740		}
2741
2742		bytes_sent += GFAR_CB(skb)->bytes_sent;
2743
2744		dev_kfree_skb_any(skb);
2745
2746		tx_queue->tx_skbuff[skb_dirtytx] = NULL;
2747
2748		skb_dirtytx = (skb_dirtytx + 1) &
2749			      TX_RING_MOD_MASK(tx_ring_size);
2750
2751		howmany++;
2752		spin_lock(&tx_queue->txlock);
2753		tx_queue->num_txbdfree += nr_txbds;
2754		spin_unlock(&tx_queue->txlock);
2755	}
2756
2757	/* If we freed a buffer, we can restart transmission, if necessary */
2758	if (tx_queue->num_txbdfree &&
2759	    netif_tx_queue_stopped(txq) &&
2760	    !(test_bit(GFAR_DOWN, &priv->state)))
2761		netif_wake_subqueue(priv->ndev, tqi);
2762
2763	/* Update dirty indicators */
2764	tx_queue->skb_dirtytx = skb_dirtytx;
2765	tx_queue->dirty_tx = bdp;
2766
2767	netdev_tx_completed_queue(txq, howmany, bytes_sent);
2768}
2769
2770static bool gfar_new_page(struct gfar_priv_rx_q *rxq, struct gfar_rx_buff *rxb)
 
2771{
2772	struct page *page;
2773	dma_addr_t addr;
 
2774
2775	page = dev_alloc_page();
2776	if (unlikely(!page))
2777		return false;
 
2778
2779	addr = dma_map_page(rxq->dev, page, 0, PAGE_SIZE, DMA_FROM_DEVICE);
2780	if (unlikely(dma_mapping_error(rxq->dev, addr))) {
2781		__free_page(page);
 
2782
2783		return false;
2784	}
 
2785
2786	rxb->dma = addr;
2787	rxb->page = page;
2788	rxb->page_offset = 0;
2789
2790	return true;
2791}
2792
2793static void gfar_rx_alloc_err(struct gfar_priv_rx_q *rx_queue)
2794{
2795	struct gfar_private *priv = netdev_priv(rx_queue->ndev);
2796	struct gfar_extra_stats *estats = &priv->extra_stats;
2797
2798	netdev_err(rx_queue->ndev, "Can't alloc RX buffers\n");
2799	atomic64_inc(&estats->rx_alloc_err);
2800}
2801
2802static void gfar_alloc_rx_buffs(struct gfar_priv_rx_q *rx_queue,
2803				int alloc_cnt)
2804{
2805	struct rxbd8 *bdp;
2806	struct gfar_rx_buff *rxb;
2807	int i;
2808
2809	i = rx_queue->next_to_use;
2810	bdp = &rx_queue->rx_bd_base[i];
2811	rxb = &rx_queue->rx_buff[i];
2812
2813	while (alloc_cnt--) {
2814		/* try reuse page */
2815		if (unlikely(!rxb->page)) {
2816			if (unlikely(!gfar_new_page(rx_queue, rxb))) {
2817				gfar_rx_alloc_err(rx_queue);
2818				break;
2819			}
2820		}
2821
2822		/* Setup the new RxBD */
2823		gfar_init_rxbdp(rx_queue, bdp,
2824				rxb->dma + rxb->page_offset + RXBUF_ALIGNMENT);
2825
2826		/* Update to the next pointer */
2827		bdp++;
2828		rxb++;
2829
2830		if (unlikely(++i == rx_queue->rx_ring_size)) {
2831			i = 0;
2832			bdp = rx_queue->rx_bd_base;
2833			rxb = rx_queue->rx_buff;
2834		}
2835	}
2836
2837	rx_queue->next_to_use = i;
2838	rx_queue->next_to_alloc = i;
2839}
2840
2841static void count_errors(u32 lstatus, struct net_device *ndev)
2842{
2843	struct gfar_private *priv = netdev_priv(ndev);
2844	struct net_device_stats *stats = &ndev->stats;
2845	struct gfar_extra_stats *estats = &priv->extra_stats;
2846
2847	/* If the packet was truncated, none of the other errors matter */
2848	if (lstatus & BD_LFLAG(RXBD_TRUNCATED)) {
2849		stats->rx_length_errors++;
2850
2851		atomic64_inc(&estats->rx_trunc);
2852
2853		return;
2854	}
2855	/* Count the errors, if there were any */
2856	if (lstatus & BD_LFLAG(RXBD_LARGE | RXBD_SHORT)) {
2857		stats->rx_length_errors++;
2858
2859		if (lstatus & BD_LFLAG(RXBD_LARGE))
2860			atomic64_inc(&estats->rx_large);
2861		else
2862			atomic64_inc(&estats->rx_short);
2863	}
2864	if (lstatus & BD_LFLAG(RXBD_NONOCTET)) {
2865		stats->rx_frame_errors++;
2866		atomic64_inc(&estats->rx_nonoctet);
2867	}
2868	if (lstatus & BD_LFLAG(RXBD_CRCERR)) {
2869		atomic64_inc(&estats->rx_crcerr);
2870		stats->rx_crc_errors++;
2871	}
2872	if (lstatus & BD_LFLAG(RXBD_OVERRUN)) {
2873		atomic64_inc(&estats->rx_overrun);
2874		stats->rx_over_errors++;
2875	}
2876}
2877
2878irqreturn_t gfar_receive(int irq, void *grp_id)
2879{
2880	struct gfar_priv_grp *grp = (struct gfar_priv_grp *)grp_id;
2881	unsigned long flags;
2882	u32 imask, ievent;
2883
2884	ievent = gfar_read(&grp->regs->ievent);
2885
2886	if (unlikely(ievent & IEVENT_FGPI)) {
2887		gfar_write(&grp->regs->ievent, IEVENT_FGPI);
2888		return IRQ_HANDLED;
2889	}
2890
2891	if (likely(napi_schedule_prep(&grp->napi_rx))) {
2892		spin_lock_irqsave(&grp->grplock, flags);
2893		imask = gfar_read(&grp->regs->imask);
2894		imask &= IMASK_RX_DISABLED;
2895		gfar_write(&grp->regs->imask, imask);
2896		spin_unlock_irqrestore(&grp->grplock, flags);
2897		__napi_schedule(&grp->napi_rx);
2898	} else {
2899		/* Clear IEVENT, so interrupts aren't called again
2900		 * because of the packets that have already arrived.
2901		 */
2902		gfar_write(&grp->regs->ievent, IEVENT_RX_MASK);
2903	}
2904
2905	return IRQ_HANDLED;
2906}
2907
2908/* Interrupt Handler for Transmit complete */
2909static irqreturn_t gfar_transmit(int irq, void *grp_id)
2910{
2911	struct gfar_priv_grp *grp = (struct gfar_priv_grp *)grp_id;
2912	unsigned long flags;
2913	u32 imask;
2914
2915	if (likely(napi_schedule_prep(&grp->napi_tx))) {
2916		spin_lock_irqsave(&grp->grplock, flags);
2917		imask = gfar_read(&grp->regs->imask);
2918		imask &= IMASK_TX_DISABLED;
2919		gfar_write(&grp->regs->imask, imask);
2920		spin_unlock_irqrestore(&grp->grplock, flags);
2921		__napi_schedule(&grp->napi_tx);
2922	} else {
2923		/* Clear IEVENT, so interrupts aren't called again
2924		 * because of the packets that have already arrived.
2925		 */
2926		gfar_write(&grp->regs->ievent, IEVENT_TX_MASK);
2927	}
2928
2929	return IRQ_HANDLED;
2930}
2931
2932static bool gfar_add_rx_frag(struct gfar_rx_buff *rxb, u32 lstatus,
2933			     struct sk_buff *skb, bool first)
2934{
2935	int size = lstatus & BD_LENGTH_MASK;
2936	struct page *page = rxb->page;
2937
2938	if (likely(first)) {
2939		skb_put(skb, size);
2940	} else {
2941		/* the last fragments' length contains the full frame length */
2942		if (lstatus & BD_LFLAG(RXBD_LAST))
2943			size -= skb->len;
2944
2945		skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page,
2946				rxb->page_offset + RXBUF_ALIGNMENT,
2947				size, GFAR_RXB_TRUESIZE);
2948	}
2949
2950	/* try reuse page */
2951	if (unlikely(page_count(page) != 1 || page_is_pfmemalloc(page)))
2952		return false;
2953
2954	/* change offset to the other half */
2955	rxb->page_offset ^= GFAR_RXB_TRUESIZE;
2956
2957	page_ref_inc(page);
2958
2959	return true;
2960}
2961
2962static void gfar_reuse_rx_page(struct gfar_priv_rx_q *rxq,
2963			       struct gfar_rx_buff *old_rxb)
2964{
2965	struct gfar_rx_buff *new_rxb;
2966	u16 nta = rxq->next_to_alloc;
2967
2968	new_rxb = &rxq->rx_buff[nta];
2969
2970	/* find next buf that can reuse a page */
2971	nta++;
2972	rxq->next_to_alloc = (nta < rxq->rx_ring_size) ? nta : 0;
2973
2974	/* copy page reference */
2975	*new_rxb = *old_rxb;
2976
2977	/* sync for use by the device */
2978	dma_sync_single_range_for_device(rxq->dev, old_rxb->dma,
2979					 old_rxb->page_offset,
2980					 GFAR_RXB_TRUESIZE, DMA_FROM_DEVICE);
2981}
2982
2983static struct sk_buff *gfar_get_next_rxbuff(struct gfar_priv_rx_q *rx_queue,
2984					    u32 lstatus, struct sk_buff *skb)
2985{
2986	struct gfar_rx_buff *rxb = &rx_queue->rx_buff[rx_queue->next_to_clean];
2987	struct page *page = rxb->page;
2988	bool first = false;
2989
2990	if (likely(!skb)) {
2991		void *buff_addr = page_address(page) + rxb->page_offset;
2992
2993		skb = build_skb(buff_addr, GFAR_SKBFRAG_SIZE);
2994		if (unlikely(!skb)) {
2995			gfar_rx_alloc_err(rx_queue);
2996			return NULL;
2997		}
2998		skb_reserve(skb, RXBUF_ALIGNMENT);
2999		first = true;
3000	}
3001
3002	dma_sync_single_range_for_cpu(rx_queue->dev, rxb->dma, rxb->page_offset,
3003				      GFAR_RXB_TRUESIZE, DMA_FROM_DEVICE);
3004
3005	if (gfar_add_rx_frag(rxb, lstatus, skb, first)) {
3006		/* reuse the free half of the page */
3007		gfar_reuse_rx_page(rx_queue, rxb);
3008	} else {
3009		/* page cannot be reused, unmap it */
3010		dma_unmap_page(rx_queue->dev, rxb->dma,
3011			       PAGE_SIZE, DMA_FROM_DEVICE);
3012	}
3013
3014	/* clear rxb content */
3015	rxb->page = NULL;
3016
3017	return skb;
3018}
3019
3020static inline void gfar_rx_checksum(struct sk_buff *skb, struct rxfcb *fcb)
3021{
3022	/* If valid headers were found, and valid sums
3023	 * were verified, then we tell the kernel that no
3024	 * checksumming is necessary.  Otherwise, it is [FIXME]
3025	 */
3026	if ((be16_to_cpu(fcb->flags) & RXFCB_CSUM_MASK) ==
3027	    (RXFCB_CIP | RXFCB_CTU))
3028		skb->ip_summed = CHECKSUM_UNNECESSARY;
3029	else
3030		skb_checksum_none_assert(skb);
3031}
3032
 
3033/* gfar_process_frame() -- handle one incoming packet if skb isn't NULL. */
3034static void gfar_process_frame(struct net_device *ndev, struct sk_buff *skb)
 
3035{
3036	struct gfar_private *priv = netdev_priv(ndev);
3037	struct rxfcb *fcb = NULL;
3038
3039	/* fcb is at the beginning if exists */
3040	fcb = (struct rxfcb *)skb->data;
3041
3042	/* Remove the FCB from the skb
3043	 * Remove the padded bytes, if there are any
3044	 */
3045	if (priv->uses_rxfcb)
3046		skb_pull(skb, GMAC_FCB_LEN);
 
 
3047
3048	/* Get receive timestamp from the skb */
3049	if (priv->hwts_rx_en) {
3050		struct skb_shared_hwtstamps *shhwtstamps = skb_hwtstamps(skb);
3051		u64 *ns = (u64 *) skb->data;
3052
3053		memset(shhwtstamps, 0, sizeof(*shhwtstamps));
3054		shhwtstamps->hwtstamp = ns_to_ktime(be64_to_cpu(*ns));
3055	}
3056
3057	if (priv->padding)
3058		skb_pull(skb, priv->padding);
3059
3060	/* Trim off the FCS */
3061	pskb_trim(skb, skb->len - ETH_FCS_LEN);
3062
3063	if (ndev->features & NETIF_F_RXCSUM)
3064		gfar_rx_checksum(skb, fcb);
3065
3066	/* There's need to check for NETIF_F_HW_VLAN_CTAG_RX here.
3067	 * Even if vlan rx accel is disabled, on some chips
3068	 * RXFCB_VLN is pseudo randomly set.
3069	 */
3070	if (ndev->features & NETIF_F_HW_VLAN_CTAG_RX &&
3071	    be16_to_cpu(fcb->flags) & RXFCB_VLN)
3072		__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
3073				       be16_to_cpu(fcb->vlctl));
 
 
 
3074}
3075
3076/* gfar_clean_rx_ring() -- Processes each frame in the rx ring
3077 * until the budget/quota has been reached. Returns the number
3078 * of frames handled
3079 */
3080int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit)
3081{
3082	struct net_device *ndev = rx_queue->ndev;
3083	struct gfar_private *priv = netdev_priv(ndev);
3084	struct rxbd8 *bdp;
3085	int i, howmany = 0;
3086	struct sk_buff *skb = rx_queue->skb;
3087	int cleaned_cnt = gfar_rxbd_unused(rx_queue);
3088	unsigned int total_bytes = 0, total_pkts = 0;
3089
3090	/* Get the first full descriptor */
3091	i = rx_queue->next_to_clean;
 
3092
3093	while (rx_work_limit--) {
3094		u32 lstatus;
3095
3096		if (cleaned_cnt >= GFAR_RX_BUFF_ALLOC) {
3097			gfar_alloc_rx_buffs(rx_queue, cleaned_cnt);
3098			cleaned_cnt = 0;
3099		}
3100
3101		bdp = &rx_queue->rx_bd_base[i];
3102		lstatus = be32_to_cpu(bdp->lstatus);
3103		if (lstatus & BD_LFLAG(RXBD_EMPTY))
3104			break;
3105
3106		/* order rx buffer descriptor reads */
3107		rmb();
3108
3109		/* fetch next to clean buffer from the ring */
3110		skb = gfar_get_next_rxbuff(rx_queue, lstatus, skb);
3111		if (unlikely(!skb))
3112			break;
3113
3114		cleaned_cnt++;
3115		howmany++;
3116
3117		if (unlikely(++i == rx_queue->rx_ring_size))
3118			i = 0;
3119
3120		rx_queue->next_to_clean = i;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3121
3122		/* fetch next buffer if not the last in frame */
3123		if (!(lstatus & BD_LFLAG(RXBD_LAST)))
3124			continue;
3125
3126		if (unlikely(lstatus & BD_LFLAG(RXBD_ERR))) {
3127			count_errors(lstatus, ndev);
3128
3129			/* discard faulty buffer */
3130			dev_kfree_skb(skb);
3131			skb = NULL;
3132			rx_queue->stats.rx_dropped++;
3133			continue;
3134		}
3135
3136		gfar_process_frame(ndev, skb);
3137
3138		/* Increment the number of packets */
3139		total_pkts++;
3140		total_bytes += skb->len;
3141
3142		skb_record_rx_queue(skb, rx_queue->qindex);
3143
3144		skb->protocol = eth_type_trans(skb, ndev);
3145
3146		/* Send the packet up the stack */
3147		napi_gro_receive(&rx_queue->grp->napi_rx, skb);
3148
3149		skb = NULL;
3150	}
3151
3152	/* Store incomplete frames for completion */
3153	rx_queue->skb = skb;
3154
3155	rx_queue->stats.rx_packets += total_pkts;
3156	rx_queue->stats.rx_bytes += total_bytes;
3157
3158	if (cleaned_cnt)
3159		gfar_alloc_rx_buffs(rx_queue, cleaned_cnt);
3160
3161	/* Update Last Free RxBD pointer for LFC */
3162	if (unlikely(priv->tx_actual_en)) {
3163		u32 bdp_dma = gfar_rxbd_dma_lastfree(rx_queue);
3164
3165		gfar_write(rx_queue->rfbptr, bdp_dma);
3166	}
3167
3168	return howmany;
3169}
3170
3171static int gfar_poll_rx_sq(struct napi_struct *napi, int budget)
3172{
3173	struct gfar_priv_grp *gfargrp =
3174		container_of(napi, struct gfar_priv_grp, napi_rx);
3175	struct gfar __iomem *regs = gfargrp->regs;
3176	struct gfar_priv_rx_q *rx_queue = gfargrp->rx_queue;
3177	int work_done = 0;
3178
3179	/* Clear IEVENT, so interrupts aren't called again
3180	 * because of the packets that have already arrived
3181	 */
3182	gfar_write(&regs->ievent, IEVENT_RX_MASK);
3183
3184	work_done = gfar_clean_rx_ring(rx_queue, budget);
3185
3186	if (work_done < budget) {
3187		u32 imask;
3188		napi_complete_done(napi, work_done);
3189		/* Clear the halt bit in RSTAT */
3190		gfar_write(&regs->rstat, gfargrp->rstat);
3191
3192		spin_lock_irq(&gfargrp->grplock);
3193		imask = gfar_read(&regs->imask);
3194		imask |= IMASK_RX_DEFAULT;
3195		gfar_write(&regs->imask, imask);
3196		spin_unlock_irq(&gfargrp->grplock);
3197	}
3198
3199	return work_done;
3200}
3201
3202static int gfar_poll_tx_sq(struct napi_struct *napi, int budget)
3203{
3204	struct gfar_priv_grp *gfargrp =
3205		container_of(napi, struct gfar_priv_grp, napi_tx);
3206	struct gfar __iomem *regs = gfargrp->regs;
3207	struct gfar_priv_tx_q *tx_queue = gfargrp->tx_queue;
3208	u32 imask;
3209
3210	/* Clear IEVENT, so interrupts aren't called again
3211	 * because of the packets that have already arrived
3212	 */
3213	gfar_write(&regs->ievent, IEVENT_TX_MASK);
3214
3215	/* run Tx cleanup to completion */
3216	if (tx_queue->tx_skbuff[tx_queue->skb_dirtytx])
3217		gfar_clean_tx_ring(tx_queue);
3218
3219	napi_complete(napi);
3220
3221	spin_lock_irq(&gfargrp->grplock);
3222	imask = gfar_read(&regs->imask);
3223	imask |= IMASK_TX_DEFAULT;
3224	gfar_write(&regs->imask, imask);
3225	spin_unlock_irq(&gfargrp->grplock);
3226
3227	return 0;
3228}
3229
3230static int gfar_poll_rx(struct napi_struct *napi, int budget)
3231{
3232	struct gfar_priv_grp *gfargrp =
3233		container_of(napi, struct gfar_priv_grp, napi_rx);
3234	struct gfar_private *priv = gfargrp->priv;
3235	struct gfar __iomem *regs = gfargrp->regs;
3236	struct gfar_priv_rx_q *rx_queue = NULL;
3237	int work_done = 0, work_done_per_q = 0;
3238	int i, budget_per_q = 0;
3239	unsigned long rstat_rxf;
3240	int num_act_queues;
3241
3242	/* Clear IEVENT, so interrupts aren't called again
3243	 * because of the packets that have already arrived
3244	 */
3245	gfar_write(&regs->ievent, IEVENT_RX_MASK);
3246
3247	rstat_rxf = gfar_read(&regs->rstat) & RSTAT_RXF_MASK;
3248
3249	num_act_queues = bitmap_weight(&rstat_rxf, MAX_RX_QS);
3250	if (num_act_queues)
3251		budget_per_q = budget/num_act_queues;
3252
3253	for_each_set_bit(i, &gfargrp->rx_bit_map, priv->num_rx_queues) {
3254		/* skip queue if not active */
3255		if (!(rstat_rxf & (RSTAT_CLEAR_RXF0 >> i)))
3256			continue;
3257
3258		rx_queue = priv->rx_queue[i];
3259		work_done_per_q =
3260			gfar_clean_rx_ring(rx_queue, budget_per_q);
3261		work_done += work_done_per_q;
3262
3263		/* finished processing this queue */
3264		if (work_done_per_q < budget_per_q) {
3265			/* clear active queue hw indication */
3266			gfar_write(&regs->rstat,
3267				   RSTAT_CLEAR_RXF0 >> i);
3268			num_act_queues--;
3269
3270			if (!num_act_queues)
3271				break;
3272		}
3273	}
3274
3275	if (!num_act_queues) {
3276		u32 imask;
3277		napi_complete_done(napi, work_done);
3278
3279		/* Clear the halt bit in RSTAT */
3280		gfar_write(&regs->rstat, gfargrp->rstat);
3281
3282		spin_lock_irq(&gfargrp->grplock);
3283		imask = gfar_read(&regs->imask);
3284		imask |= IMASK_RX_DEFAULT;
3285		gfar_write(&regs->imask, imask);
3286		spin_unlock_irq(&gfargrp->grplock);
3287	}
3288
3289	return work_done;
3290}
3291
3292static int gfar_poll_tx(struct napi_struct *napi, int budget)
3293{
3294	struct gfar_priv_grp *gfargrp =
3295		container_of(napi, struct gfar_priv_grp, napi_tx);
3296	struct gfar_private *priv = gfargrp->priv;
3297	struct gfar __iomem *regs = gfargrp->regs;
3298	struct gfar_priv_tx_q *tx_queue = NULL;
3299	int has_tx_work = 0;
3300	int i;
3301
3302	/* Clear IEVENT, so interrupts aren't called again
3303	 * because of the packets that have already arrived
3304	 */
3305	gfar_write(&regs->ievent, IEVENT_TX_MASK);
3306
3307	for_each_set_bit(i, &gfargrp->tx_bit_map, priv->num_tx_queues) {
3308		tx_queue = priv->tx_queue[i];
3309		/* run Tx cleanup to completion */
3310		if (tx_queue->tx_skbuff[tx_queue->skb_dirtytx]) {
3311			gfar_clean_tx_ring(tx_queue);
3312			has_tx_work = 1;
3313		}
3314	}
3315
3316	if (!has_tx_work) {
3317		u32 imask;
3318		napi_complete(napi);
3319
3320		spin_lock_irq(&gfargrp->grplock);
3321		imask = gfar_read(&regs->imask);
3322		imask |= IMASK_TX_DEFAULT;
3323		gfar_write(&regs->imask, imask);
3324		spin_unlock_irq(&gfargrp->grplock);
3325	}
3326
3327	return 0;
3328}
3329
3330
3331#ifdef CONFIG_NET_POLL_CONTROLLER
3332/* Polling 'interrupt' - used by things like netconsole to send skbs
3333 * without having to re-enable interrupts. It's not called while
3334 * the interrupt routine is executing.
3335 */
3336static void gfar_netpoll(struct net_device *dev)
3337{
3338	struct gfar_private *priv = netdev_priv(dev);
3339	int i;
3340
3341	/* If the device has multiple interrupts, run tx/rx */
3342	if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) {
3343		for (i = 0; i < priv->num_grps; i++) {
3344			struct gfar_priv_grp *grp = &priv->gfargrp[i];
3345
3346			disable_irq(gfar_irq(grp, TX)->irq);
3347			disable_irq(gfar_irq(grp, RX)->irq);
3348			disable_irq(gfar_irq(grp, ER)->irq);
3349			gfar_interrupt(gfar_irq(grp, TX)->irq, grp);
3350			enable_irq(gfar_irq(grp, ER)->irq);
3351			enable_irq(gfar_irq(grp, RX)->irq);
3352			enable_irq(gfar_irq(grp, TX)->irq);
3353		}
3354	} else {
3355		for (i = 0; i < priv->num_grps; i++) {
3356			struct gfar_priv_grp *grp = &priv->gfargrp[i];
3357
3358			disable_irq(gfar_irq(grp, TX)->irq);
3359			gfar_interrupt(gfar_irq(grp, TX)->irq, grp);
3360			enable_irq(gfar_irq(grp, TX)->irq);
3361		}
3362	}
3363}
3364#endif
3365
3366/* The interrupt handler for devices with one interrupt */
3367static irqreturn_t gfar_interrupt(int irq, void *grp_id)
3368{
3369	struct gfar_priv_grp *gfargrp = grp_id;
3370
3371	/* Save ievent for future reference */
3372	u32 events = gfar_read(&gfargrp->regs->ievent);
3373
3374	/* Check for reception */
3375	if (events & IEVENT_RX_MASK)
3376		gfar_receive(irq, grp_id);
3377
3378	/* Check for transmit completion */
3379	if (events & IEVENT_TX_MASK)
3380		gfar_transmit(irq, grp_id);
3381
3382	/* Check for errors */
3383	if (events & IEVENT_ERR_MASK)
3384		gfar_error(irq, grp_id);
3385
3386	return IRQ_HANDLED;
3387}
3388
3389/* Called every time the controller might need to be made
3390 * aware of new link state.  The PHY code conveys this
3391 * information through variables in the phydev structure, and this
3392 * function converts those variables into the appropriate
3393 * register values, and can bring down the device if needed.
3394 */
3395static void adjust_link(struct net_device *dev)
3396{
3397	struct gfar_private *priv = netdev_priv(dev);
3398	struct phy_device *phydev = dev->phydev;
3399
3400	if (unlikely(phydev->link != priv->oldlink ||
3401		     (phydev->link && (phydev->duplex != priv->oldduplex ||
3402				       phydev->speed != priv->oldspeed))))
3403		gfar_update_link_state(priv);
3404}
3405
3406/* Update the hash table based on the current list of multicast
3407 * addresses we subscribe to.  Also, change the promiscuity of
3408 * the device based on the flags (this function is called
3409 * whenever dev->flags is changed
3410 */
3411static void gfar_set_multi(struct net_device *dev)
3412{
3413	struct netdev_hw_addr *ha;
3414	struct gfar_private *priv = netdev_priv(dev);
3415	struct gfar __iomem *regs = priv->gfargrp[0].regs;
3416	u32 tempval;
3417
3418	if (dev->flags & IFF_PROMISC) {
3419		/* Set RCTRL to PROM */
3420		tempval = gfar_read(&regs->rctrl);
3421		tempval |= RCTRL_PROM;
3422		gfar_write(&regs->rctrl, tempval);
3423	} else {
3424		/* Set RCTRL to not PROM */
3425		tempval = gfar_read(&regs->rctrl);
3426		tempval &= ~(RCTRL_PROM);
3427		gfar_write(&regs->rctrl, tempval);
3428	}
3429
3430	if (dev->flags & IFF_ALLMULTI) {
3431		/* Set the hash to rx all multicast frames */
3432		gfar_write(&regs->igaddr0, 0xffffffff);
3433		gfar_write(&regs->igaddr1, 0xffffffff);
3434		gfar_write(&regs->igaddr2, 0xffffffff);
3435		gfar_write(&regs->igaddr3, 0xffffffff);
3436		gfar_write(&regs->igaddr4, 0xffffffff);
3437		gfar_write(&regs->igaddr5, 0xffffffff);
3438		gfar_write(&regs->igaddr6, 0xffffffff);
3439		gfar_write(&regs->igaddr7, 0xffffffff);
3440		gfar_write(&regs->gaddr0, 0xffffffff);
3441		gfar_write(&regs->gaddr1, 0xffffffff);
3442		gfar_write(&regs->gaddr2, 0xffffffff);
3443		gfar_write(&regs->gaddr3, 0xffffffff);
3444		gfar_write(&regs->gaddr4, 0xffffffff);
3445		gfar_write(&regs->gaddr5, 0xffffffff);
3446		gfar_write(&regs->gaddr6, 0xffffffff);
3447		gfar_write(&regs->gaddr7, 0xffffffff);
3448	} else {
3449		int em_num;
3450		int idx;
3451
3452		/* zero out the hash */
3453		gfar_write(&regs->igaddr0, 0x0);
3454		gfar_write(&regs->igaddr1, 0x0);
3455		gfar_write(&regs->igaddr2, 0x0);
3456		gfar_write(&regs->igaddr3, 0x0);
3457		gfar_write(&regs->igaddr4, 0x0);
3458		gfar_write(&regs->igaddr5, 0x0);
3459		gfar_write(&regs->igaddr6, 0x0);
3460		gfar_write(&regs->igaddr7, 0x0);
3461		gfar_write(&regs->gaddr0, 0x0);
3462		gfar_write(&regs->gaddr1, 0x0);
3463		gfar_write(&regs->gaddr2, 0x0);
3464		gfar_write(&regs->gaddr3, 0x0);
3465		gfar_write(&regs->gaddr4, 0x0);
3466		gfar_write(&regs->gaddr5, 0x0);
3467		gfar_write(&regs->gaddr6, 0x0);
3468		gfar_write(&regs->gaddr7, 0x0);
3469
3470		/* If we have extended hash tables, we need to
3471		 * clear the exact match registers to prepare for
3472		 * setting them
3473		 */
3474		if (priv->extended_hash) {
3475			em_num = GFAR_EM_NUM + 1;
3476			gfar_clear_exact_match(dev);
3477			idx = 1;
3478		} else {
3479			idx = 0;
3480			em_num = 0;
3481		}
3482
3483		if (netdev_mc_empty(dev))
3484			return;
3485
3486		/* Parse the list, and set the appropriate bits */
3487		netdev_for_each_mc_addr(ha, dev) {
3488			if (idx < em_num) {
3489				gfar_set_mac_for_addr(dev, idx, ha->addr);
3490				idx++;
3491			} else
3492				gfar_set_hash_for_addr(dev, ha->addr);
3493		}
3494	}
3495}
3496
3497
3498/* Clears each of the exact match registers to zero, so they
3499 * don't interfere with normal reception
3500 */
3501static void gfar_clear_exact_match(struct net_device *dev)
3502{
3503	int idx;
3504	static const u8 zero_arr[ETH_ALEN] = {0, 0, 0, 0, 0, 0};
3505
3506	for (idx = 1; idx < GFAR_EM_NUM + 1; idx++)
3507		gfar_set_mac_for_addr(dev, idx, zero_arr);
3508}
3509
3510/* Set the appropriate hash bit for the given addr */
3511/* The algorithm works like so:
3512 * 1) Take the Destination Address (ie the multicast address), and
3513 * do a CRC on it (little endian), and reverse the bits of the
3514 * result.
3515 * 2) Use the 8 most significant bits as a hash into a 256-entry
3516 * table.  The table is controlled through 8 32-bit registers:
3517 * gaddr0-7.  gaddr0's MSB is entry 0, and gaddr7's LSB is
3518 * gaddr7.  This means that the 3 most significant bits in the
3519 * hash index which gaddr register to use, and the 5 other bits
3520 * indicate which bit (assuming an IBM numbering scheme, which
3521 * for PowerPC (tm) is usually the case) in the register holds
3522 * the entry.
3523 */
3524static void gfar_set_hash_for_addr(struct net_device *dev, u8 *addr)
3525{
3526	u32 tempval;
3527	struct gfar_private *priv = netdev_priv(dev);
3528	u32 result = ether_crc(ETH_ALEN, addr);
3529	int width = priv->hash_width;
3530	u8 whichbit = (result >> (32 - width)) & 0x1f;
3531	u8 whichreg = result >> (32 - width + 5);
3532	u32 value = (1 << (31-whichbit));
3533
3534	tempval = gfar_read(priv->hash_regs[whichreg]);
3535	tempval |= value;
3536	gfar_write(priv->hash_regs[whichreg], tempval);
3537}
3538
3539
3540/* There are multiple MAC Address register pairs on some controllers
3541 * This function sets the numth pair to a given address
3542 */
3543static void gfar_set_mac_for_addr(struct net_device *dev, int num,
3544				  const u8 *addr)
3545{
3546	struct gfar_private *priv = netdev_priv(dev);
3547	struct gfar __iomem *regs = priv->gfargrp[0].regs;
 
 
3548	u32 tempval;
3549	u32 __iomem *macptr = &regs->macstnaddr1;
3550
3551	macptr += num*2;
3552
3553	/* For a station address of 0x12345678ABCD in transmission
3554	 * order (BE), MACnADDR1 is set to 0xCDAB7856 and
3555	 * MACnADDR2 is set to 0x34120000.
3556	 */
3557	tempval = (addr[5] << 24) | (addr[4] << 16) |
3558		  (addr[3] << 8)  |  addr[2];
3559
3560	gfar_write(macptr, tempval);
3561
3562	tempval = (addr[1] << 24) | (addr[0] << 16);
3563
3564	gfar_write(macptr+1, tempval);
3565}
3566
3567/* GFAR error interrupt handler */
3568static irqreturn_t gfar_error(int irq, void *grp_id)
3569{
3570	struct gfar_priv_grp *gfargrp = grp_id;
3571	struct gfar __iomem *regs = gfargrp->regs;
3572	struct gfar_private *priv= gfargrp->priv;
3573	struct net_device *dev = priv->ndev;
3574
3575	/* Save ievent for future reference */
3576	u32 events = gfar_read(&regs->ievent);
3577
3578	/* Clear IEVENT */
3579	gfar_write(&regs->ievent, events & IEVENT_ERR_MASK);
3580
3581	/* Magic Packet is not an error. */
3582	if ((priv->device_flags & FSL_GIANFAR_DEV_HAS_MAGIC_PACKET) &&
3583	    (events & IEVENT_MAG))
3584		events &= ~IEVENT_MAG;
3585
3586	/* Hmm... */
3587	if (netif_msg_rx_err(priv) || netif_msg_tx_err(priv))
3588		netdev_dbg(dev,
3589			   "error interrupt (ievent=0x%08x imask=0x%08x)\n",
3590			   events, gfar_read(&regs->imask));
3591
3592	/* Update the error counters */
3593	if (events & IEVENT_TXE) {
3594		dev->stats.tx_errors++;
3595
3596		if (events & IEVENT_LC)
3597			dev->stats.tx_window_errors++;
3598		if (events & IEVENT_CRL)
3599			dev->stats.tx_aborted_errors++;
3600		if (events & IEVENT_XFUN) {
 
 
3601			netif_dbg(priv, tx_err, dev,
3602				  "TX FIFO underrun, packet dropped\n");
3603			dev->stats.tx_dropped++;
3604			atomic64_inc(&priv->extra_stats.tx_underrun);
3605
3606			schedule_work(&priv->reset_task);
 
 
 
 
 
 
 
3607		}
3608		netif_dbg(priv, tx_err, dev, "Transmit Error\n");
3609	}
3610	if (events & IEVENT_BSY) {
3611		dev->stats.rx_over_errors++;
3612		atomic64_inc(&priv->extra_stats.rx_bsy);
3613
 
 
3614		netif_dbg(priv, rx_err, dev, "busy error (rstat: %x)\n",
3615			  gfar_read(&regs->rstat));
3616	}
3617	if (events & IEVENT_BABR) {
3618		dev->stats.rx_errors++;
3619		atomic64_inc(&priv->extra_stats.rx_babr);
3620
3621		netif_dbg(priv, rx_err, dev, "babbling RX error\n");
3622	}
3623	if (events & IEVENT_EBERR) {
3624		atomic64_inc(&priv->extra_stats.eberr);
3625		netif_dbg(priv, rx_err, dev, "bus error\n");
3626	}
3627	if (events & IEVENT_RXC)
3628		netif_dbg(priv, rx_status, dev, "control frame\n");
3629
3630	if (events & IEVENT_BABT) {
3631		atomic64_inc(&priv->extra_stats.tx_babt);
3632		netif_dbg(priv, tx_err, dev, "babbling TX error\n");
3633	}
3634	return IRQ_HANDLED;
3635}
3636
3637static u32 gfar_get_flowctrl_cfg(struct gfar_private *priv)
3638{
3639	struct net_device *ndev = priv->ndev;
3640	struct phy_device *phydev = ndev->phydev;
3641	u32 val = 0;
3642
3643	if (!phydev->duplex)
3644		return val;
3645
3646	if (!priv->pause_aneg_en) {
3647		if (priv->tx_pause_en)
3648			val |= MACCFG1_TX_FLOW;
3649		if (priv->rx_pause_en)
3650			val |= MACCFG1_RX_FLOW;
3651	} else {
3652		u16 lcl_adv, rmt_adv;
3653		u8 flowctrl;
3654		/* get link partner capabilities */
3655		rmt_adv = 0;
3656		if (phydev->pause)
3657			rmt_adv = LPA_PAUSE_CAP;
3658		if (phydev->asym_pause)
3659			rmt_adv |= LPA_PAUSE_ASYM;
3660
3661		lcl_adv = 0;
3662		if (phydev->advertising & ADVERTISED_Pause)
3663			lcl_adv |= ADVERTISE_PAUSE_CAP;
3664		if (phydev->advertising & ADVERTISED_Asym_Pause)
3665			lcl_adv |= ADVERTISE_PAUSE_ASYM;
3666
3667		flowctrl = mii_resolve_flowctrl_fdx(lcl_adv, rmt_adv);
3668		if (flowctrl & FLOW_CTRL_TX)
3669			val |= MACCFG1_TX_FLOW;
3670		if (flowctrl & FLOW_CTRL_RX)
3671			val |= MACCFG1_RX_FLOW;
3672	}
3673
3674	return val;
3675}
3676
3677static noinline void gfar_update_link_state(struct gfar_private *priv)
3678{
3679	struct gfar __iomem *regs = priv->gfargrp[0].regs;
3680	struct net_device *ndev = priv->ndev;
3681	struct phy_device *phydev = ndev->phydev;
3682	struct gfar_priv_rx_q *rx_queue = NULL;
3683	int i;
3684
3685	if (unlikely(test_bit(GFAR_RESETTING, &priv->state)))
3686		return;
3687
3688	if (phydev->link) {
3689		u32 tempval1 = gfar_read(&regs->maccfg1);
3690		u32 tempval = gfar_read(&regs->maccfg2);
3691		u32 ecntrl = gfar_read(&regs->ecntrl);
3692		u32 tx_flow_oldval = (tempval1 & MACCFG1_TX_FLOW);
3693
3694		if (phydev->duplex != priv->oldduplex) {
3695			if (!(phydev->duplex))
3696				tempval &= ~(MACCFG2_FULL_DUPLEX);
3697			else
3698				tempval |= MACCFG2_FULL_DUPLEX;
3699
3700			priv->oldduplex = phydev->duplex;
3701		}
3702
3703		if (phydev->speed != priv->oldspeed) {
3704			switch (phydev->speed) {
3705			case 1000:
3706				tempval =
3707				    ((tempval & ~(MACCFG2_IF)) | MACCFG2_GMII);
3708
3709				ecntrl &= ~(ECNTRL_R100);
3710				break;
3711			case 100:
3712			case 10:
3713				tempval =
3714				    ((tempval & ~(MACCFG2_IF)) | MACCFG2_MII);
3715
3716				/* Reduced mode distinguishes
3717				 * between 10 and 100
3718				 */
3719				if (phydev->speed == SPEED_100)
3720					ecntrl |= ECNTRL_R100;
3721				else
3722					ecntrl &= ~(ECNTRL_R100);
3723				break;
3724			default:
3725				netif_warn(priv, link, priv->ndev,
3726					   "Ack!  Speed (%d) is not 10/100/1000!\n",
3727					   phydev->speed);
3728				break;
3729			}
3730
3731			priv->oldspeed = phydev->speed;
3732		}
3733
3734		tempval1 &= ~(MACCFG1_TX_FLOW | MACCFG1_RX_FLOW);
3735		tempval1 |= gfar_get_flowctrl_cfg(priv);
3736
3737		/* Turn last free buffer recording on */
3738		if ((tempval1 & MACCFG1_TX_FLOW) && !tx_flow_oldval) {
3739			for (i = 0; i < priv->num_rx_queues; i++) {
3740				u32 bdp_dma;
3741
3742				rx_queue = priv->rx_queue[i];
3743				bdp_dma = gfar_rxbd_dma_lastfree(rx_queue);
3744				gfar_write(rx_queue->rfbptr, bdp_dma);
3745			}
3746
3747			priv->tx_actual_en = 1;
3748		}
3749
3750		if (unlikely(!(tempval1 & MACCFG1_TX_FLOW) && tx_flow_oldval))
3751			priv->tx_actual_en = 0;
3752
3753		gfar_write(&regs->maccfg1, tempval1);
3754		gfar_write(&regs->maccfg2, tempval);
3755		gfar_write(&regs->ecntrl, ecntrl);
3756
3757		if (!priv->oldlink)
3758			priv->oldlink = 1;
3759
3760	} else if (priv->oldlink) {
3761		priv->oldlink = 0;
3762		priv->oldspeed = 0;
3763		priv->oldduplex = -1;
3764	}
3765
3766	if (netif_msg_link(priv))
3767		phy_print_status(phydev);
3768}
3769
3770static const struct of_device_id gfar_match[] =
3771{
3772	{
3773		.type = "network",
3774		.compatible = "gianfar",
3775	},
3776	{
3777		.compatible = "fsl,etsec2",
3778	},
3779	{},
3780};
3781MODULE_DEVICE_TABLE(of, gfar_match);
3782
3783/* Structure for a device driver */
3784static struct platform_driver gfar_driver = {
3785	.driver = {
3786		.name = "fsl-gianfar",
 
3787		.pm = GFAR_PM_OPS,
3788		.of_match_table = gfar_match,
3789	},
3790	.probe = gfar_probe,
3791	.remove = gfar_remove,
3792};
3793
3794module_platform_driver(gfar_driver);