Linux Audio

Check our new training course

Loading...
Note: File does not exist in v3.1.
   1/*
   2 * Aeroflex Gaisler GRETH 10/100/1G Ethernet MAC.
   3 *
   4 * 2005-2010 (c) Aeroflex Gaisler AB
   5 *
   6 * This driver supports GRETH 10/100 and GRETH 10/100/1G Ethernet MACs
   7 * available in the GRLIB VHDL IP core library.
   8 *
   9 * Full documentation of both cores can be found here:
  10 * http://www.gaisler.com/products/grlib/grip.pdf
  11 *
  12 * The Gigabit version supports scatter/gather DMA, any alignment of
  13 * buffers and checksum offloading.
  14 *
  15 * This program is free software; you can redistribute it and/or modify it
  16 * under the terms of the GNU General Public License as published by the
  17 * Free Software Foundation; either version 2 of the License, or (at your
  18 * option) any later version.
  19 *
  20 * Contributors: Kristoffer Glembo
  21 *               Daniel Hellstrom
  22 *               Marko Isomaki
  23 */
  24
  25#include <linux/dma-mapping.h>
  26#include <linux/module.h>
  27#include <linux/uaccess.h>
  28#include <linux/interrupt.h>
  29#include <linux/netdevice.h>
  30#include <linux/etherdevice.h>
  31#include <linux/ethtool.h>
  32#include <linux/skbuff.h>
  33#include <linux/io.h>
  34#include <linux/crc32.h>
  35#include <linux/mii.h>
  36#include <linux/of_device.h>
  37#include <linux/of_platform.h>
  38#include <linux/slab.h>
  39#include <asm/cacheflush.h>
  40#include <asm/byteorder.h>
  41
  42#ifdef CONFIG_SPARC
  43#include <asm/idprom.h>
  44#endif
  45
  46#include "greth.h"
  47
  48#define GRETH_DEF_MSG_ENABLE	  \
  49	(NETIF_MSG_DRV		| \
  50	 NETIF_MSG_PROBE	| \
  51	 NETIF_MSG_LINK		| \
  52	 NETIF_MSG_IFDOWN	| \
  53	 NETIF_MSG_IFUP		| \
  54	 NETIF_MSG_RX_ERR	| \
  55	 NETIF_MSG_TX_ERR)
  56
  57static int greth_debug = -1;	/* -1 == use GRETH_DEF_MSG_ENABLE as value */
  58module_param(greth_debug, int, 0);
  59MODULE_PARM_DESC(greth_debug, "GRETH bitmapped debugging message enable value");
  60
  61/* Accept MAC address of the form macaddr=0x08,0x00,0x20,0x30,0x40,0x50 */
  62static int macaddr[6];
  63module_param_array(macaddr, int, NULL, 0);
  64MODULE_PARM_DESC(macaddr, "GRETH Ethernet MAC address");
  65
  66static int greth_edcl = 1;
  67module_param(greth_edcl, int, 0);
  68MODULE_PARM_DESC(greth_edcl, "GRETH EDCL usage indicator. Set to 1 if EDCL is used.");
  69
  70static int greth_open(struct net_device *dev);
  71static netdev_tx_t greth_start_xmit(struct sk_buff *skb,
  72	   struct net_device *dev);
  73static netdev_tx_t greth_start_xmit_gbit(struct sk_buff *skb,
  74	   struct net_device *dev);
  75static int greth_rx(struct net_device *dev, int limit);
  76static int greth_rx_gbit(struct net_device *dev, int limit);
  77static void greth_clean_tx(struct net_device *dev);
  78static void greth_clean_tx_gbit(struct net_device *dev);
  79static irqreturn_t greth_interrupt(int irq, void *dev_id);
  80static int greth_close(struct net_device *dev);
  81static int greth_set_mac_add(struct net_device *dev, void *p);
  82static void greth_set_multicast_list(struct net_device *dev);
  83
  84#define GRETH_REGLOAD(a)	    (be32_to_cpu(__raw_readl(&(a))))
  85#define GRETH_REGSAVE(a, v)         (__raw_writel(cpu_to_be32(v), &(a)))
  86#define GRETH_REGORIN(a, v)         (GRETH_REGSAVE(a, (GRETH_REGLOAD(a) | (v))))
  87#define GRETH_REGANDIN(a, v)        (GRETH_REGSAVE(a, (GRETH_REGLOAD(a) & (v))))
  88
  89#define NEXT_TX(N)      (((N) + 1) & GRETH_TXBD_NUM_MASK)
  90#define SKIP_TX(N, C)   (((N) + C) & GRETH_TXBD_NUM_MASK)
  91#define NEXT_RX(N)      (((N) + 1) & GRETH_RXBD_NUM_MASK)
  92
  93static void greth_print_rx_packet(void *addr, int len)
  94{
  95	print_hex_dump(KERN_DEBUG, "RX: ", DUMP_PREFIX_OFFSET, 16, 1,
  96			addr, len, true);
  97}
  98
  99static void greth_print_tx_packet(struct sk_buff *skb)
 100{
 101	int i;
 102	int length;
 103
 104	if (skb_shinfo(skb)->nr_frags == 0)
 105		length = skb->len;
 106	else
 107		length = skb_headlen(skb);
 108
 109	print_hex_dump(KERN_DEBUG, "TX: ", DUMP_PREFIX_OFFSET, 16, 1,
 110			skb->data, length, true);
 111
 112	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
 113
 114		print_hex_dump(KERN_DEBUG, "TX: ", DUMP_PREFIX_OFFSET, 16, 1,
 115			       skb_frag_address(&skb_shinfo(skb)->frags[i]),
 116			       skb_shinfo(skb)->frags[i].size, true);
 117	}
 118}
 119
 120static inline void greth_enable_tx(struct greth_private *greth)
 121{
 122	wmb();
 123	GRETH_REGORIN(greth->regs->control, GRETH_TXEN);
 124}
 125
 126static inline void greth_enable_tx_and_irq(struct greth_private *greth)
 127{
 128	wmb(); /* BDs must been written to memory before enabling TX */
 129	GRETH_REGORIN(greth->regs->control, GRETH_TXEN | GRETH_TXI);
 130}
 131
 132static inline void greth_disable_tx(struct greth_private *greth)
 133{
 134	GRETH_REGANDIN(greth->regs->control, ~GRETH_TXEN);
 135}
 136
 137static inline void greth_enable_rx(struct greth_private *greth)
 138{
 139	wmb();
 140	GRETH_REGORIN(greth->regs->control, GRETH_RXEN);
 141}
 142
 143static inline void greth_disable_rx(struct greth_private *greth)
 144{
 145	GRETH_REGANDIN(greth->regs->control, ~GRETH_RXEN);
 146}
 147
 148static inline void greth_enable_irqs(struct greth_private *greth)
 149{
 150	GRETH_REGORIN(greth->regs->control, GRETH_RXI | GRETH_TXI);
 151}
 152
 153static inline void greth_disable_irqs(struct greth_private *greth)
 154{
 155	GRETH_REGANDIN(greth->regs->control, ~(GRETH_RXI|GRETH_TXI));
 156}
 157
 158static inline void greth_write_bd(u32 *bd, u32 val)
 159{
 160	__raw_writel(cpu_to_be32(val), bd);
 161}
 162
 163static inline u32 greth_read_bd(u32 *bd)
 164{
 165	return be32_to_cpu(__raw_readl(bd));
 166}
 167
 168static void greth_clean_rings(struct greth_private *greth)
 169{
 170	int i;
 171	struct greth_bd *rx_bdp = greth->rx_bd_base;
 172	struct greth_bd *tx_bdp = greth->tx_bd_base;
 173
 174	if (greth->gbit_mac) {
 175
 176		/* Free and unmap RX buffers */
 177		for (i = 0; i < GRETH_RXBD_NUM; i++, rx_bdp++) {
 178			if (greth->rx_skbuff[i] != NULL) {
 179				dev_kfree_skb(greth->rx_skbuff[i]);
 180				dma_unmap_single(greth->dev,
 181						 greth_read_bd(&rx_bdp->addr),
 182						 MAX_FRAME_SIZE+NET_IP_ALIGN,
 183						 DMA_FROM_DEVICE);
 184			}
 185		}
 186
 187		/* TX buffers */
 188		while (greth->tx_free < GRETH_TXBD_NUM) {
 189
 190			struct sk_buff *skb = greth->tx_skbuff[greth->tx_last];
 191			int nr_frags = skb_shinfo(skb)->nr_frags;
 192			tx_bdp = greth->tx_bd_base + greth->tx_last;
 193			greth->tx_last = NEXT_TX(greth->tx_last);
 194
 195			dma_unmap_single(greth->dev,
 196					 greth_read_bd(&tx_bdp->addr),
 197					 skb_headlen(skb),
 198					 DMA_TO_DEVICE);
 199
 200			for (i = 0; i < nr_frags; i++) {
 201				skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
 202				tx_bdp = greth->tx_bd_base + greth->tx_last;
 203
 204				dma_unmap_page(greth->dev,
 205					       greth_read_bd(&tx_bdp->addr),
 206					       skb_frag_size(frag),
 207					       DMA_TO_DEVICE);
 208
 209				greth->tx_last = NEXT_TX(greth->tx_last);
 210			}
 211			greth->tx_free += nr_frags+1;
 212			dev_kfree_skb(skb);
 213		}
 214
 215
 216	} else { /* 10/100 Mbps MAC */
 217
 218		for (i = 0; i < GRETH_RXBD_NUM; i++, rx_bdp++) {
 219			kfree(greth->rx_bufs[i]);
 220			dma_unmap_single(greth->dev,
 221					 greth_read_bd(&rx_bdp->addr),
 222					 MAX_FRAME_SIZE,
 223					 DMA_FROM_DEVICE);
 224		}
 225		for (i = 0; i < GRETH_TXBD_NUM; i++, tx_bdp++) {
 226			kfree(greth->tx_bufs[i]);
 227			dma_unmap_single(greth->dev,
 228					 greth_read_bd(&tx_bdp->addr),
 229					 MAX_FRAME_SIZE,
 230					 DMA_TO_DEVICE);
 231		}
 232	}
 233}
 234
 235static int greth_init_rings(struct greth_private *greth)
 236{
 237	struct sk_buff *skb;
 238	struct greth_bd *rx_bd, *tx_bd;
 239	u32 dma_addr;
 240	int i;
 241
 242	rx_bd = greth->rx_bd_base;
 243	tx_bd = greth->tx_bd_base;
 244
 245	/* Initialize descriptor rings and buffers */
 246	if (greth->gbit_mac) {
 247
 248		for (i = 0; i < GRETH_RXBD_NUM; i++) {
 249			skb = netdev_alloc_skb(greth->netdev, MAX_FRAME_SIZE+NET_IP_ALIGN);
 250			if (skb == NULL) {
 251				if (netif_msg_ifup(greth))
 252					dev_err(greth->dev, "Error allocating DMA ring.\n");
 253				goto cleanup;
 254			}
 255			skb_reserve(skb, NET_IP_ALIGN);
 256			dma_addr = dma_map_single(greth->dev,
 257						  skb->data,
 258						  MAX_FRAME_SIZE+NET_IP_ALIGN,
 259						  DMA_FROM_DEVICE);
 260
 261			if (dma_mapping_error(greth->dev, dma_addr)) {
 262				if (netif_msg_ifup(greth))
 263					dev_err(greth->dev, "Could not create initial DMA mapping\n");
 264				goto cleanup;
 265			}
 266			greth->rx_skbuff[i] = skb;
 267			greth_write_bd(&rx_bd[i].addr, dma_addr);
 268			greth_write_bd(&rx_bd[i].stat, GRETH_BD_EN | GRETH_BD_IE);
 269		}
 270
 271	} else {
 272
 273		/* 10/100 MAC uses a fixed set of buffers and copy to/from SKBs */
 274		for (i = 0; i < GRETH_RXBD_NUM; i++) {
 275
 276			greth->rx_bufs[i] = kmalloc(MAX_FRAME_SIZE, GFP_KERNEL);
 277
 278			if (greth->rx_bufs[i] == NULL) {
 279				if (netif_msg_ifup(greth))
 280					dev_err(greth->dev, "Error allocating DMA ring.\n");
 281				goto cleanup;
 282			}
 283
 284			dma_addr = dma_map_single(greth->dev,
 285						  greth->rx_bufs[i],
 286						  MAX_FRAME_SIZE,
 287						  DMA_FROM_DEVICE);
 288
 289			if (dma_mapping_error(greth->dev, dma_addr)) {
 290				if (netif_msg_ifup(greth))
 291					dev_err(greth->dev, "Could not create initial DMA mapping\n");
 292				goto cleanup;
 293			}
 294			greth_write_bd(&rx_bd[i].addr, dma_addr);
 295			greth_write_bd(&rx_bd[i].stat, GRETH_BD_EN | GRETH_BD_IE);
 296		}
 297		for (i = 0; i < GRETH_TXBD_NUM; i++) {
 298
 299			greth->tx_bufs[i] = kmalloc(MAX_FRAME_SIZE, GFP_KERNEL);
 300
 301			if (greth->tx_bufs[i] == NULL) {
 302				if (netif_msg_ifup(greth))
 303					dev_err(greth->dev, "Error allocating DMA ring.\n");
 304				goto cleanup;
 305			}
 306
 307			dma_addr = dma_map_single(greth->dev,
 308						  greth->tx_bufs[i],
 309						  MAX_FRAME_SIZE,
 310						  DMA_TO_DEVICE);
 311
 312			if (dma_mapping_error(greth->dev, dma_addr)) {
 313				if (netif_msg_ifup(greth))
 314					dev_err(greth->dev, "Could not create initial DMA mapping\n");
 315				goto cleanup;
 316			}
 317			greth_write_bd(&tx_bd[i].addr, dma_addr);
 318			greth_write_bd(&tx_bd[i].stat, 0);
 319		}
 320	}
 321	greth_write_bd(&rx_bd[GRETH_RXBD_NUM - 1].stat,
 322		       greth_read_bd(&rx_bd[GRETH_RXBD_NUM - 1].stat) | GRETH_BD_WR);
 323
 324	/* Initialize pointers. */
 325	greth->rx_cur = 0;
 326	greth->tx_next = 0;
 327	greth->tx_last = 0;
 328	greth->tx_free = GRETH_TXBD_NUM;
 329
 330	/* Initialize descriptor base address */
 331	GRETH_REGSAVE(greth->regs->tx_desc_p, greth->tx_bd_base_phys);
 332	GRETH_REGSAVE(greth->regs->rx_desc_p, greth->rx_bd_base_phys);
 333
 334	return 0;
 335
 336cleanup:
 337	greth_clean_rings(greth);
 338	return -ENOMEM;
 339}
 340
 341static int greth_open(struct net_device *dev)
 342{
 343	struct greth_private *greth = netdev_priv(dev);
 344	int err;
 345
 346	err = greth_init_rings(greth);
 347	if (err) {
 348		if (netif_msg_ifup(greth))
 349			dev_err(&dev->dev, "Could not allocate memory for DMA rings\n");
 350		return err;
 351	}
 352
 353	err = request_irq(greth->irq, greth_interrupt, 0, "eth", (void *) dev);
 354	if (err) {
 355		if (netif_msg_ifup(greth))
 356			dev_err(&dev->dev, "Could not allocate interrupt %d\n", dev->irq);
 357		greth_clean_rings(greth);
 358		return err;
 359	}
 360
 361	if (netif_msg_ifup(greth))
 362		dev_dbg(&dev->dev, " starting queue\n");
 363	netif_start_queue(dev);
 364
 365	GRETH_REGSAVE(greth->regs->status, 0xFF);
 366
 367	napi_enable(&greth->napi);
 368
 369	greth_enable_irqs(greth);
 370	greth_enable_tx(greth);
 371	greth_enable_rx(greth);
 372	return 0;
 373
 374}
 375
 376static int greth_close(struct net_device *dev)
 377{
 378	struct greth_private *greth = netdev_priv(dev);
 379
 380	napi_disable(&greth->napi);
 381
 382	greth_disable_irqs(greth);
 383	greth_disable_tx(greth);
 384	greth_disable_rx(greth);
 385
 386	netif_stop_queue(dev);
 387
 388	free_irq(greth->irq, (void *) dev);
 389
 390	greth_clean_rings(greth);
 391
 392	return 0;
 393}
 394
 395static netdev_tx_t
 396greth_start_xmit(struct sk_buff *skb, struct net_device *dev)
 397{
 398	struct greth_private *greth = netdev_priv(dev);
 399	struct greth_bd *bdp;
 400	int err = NETDEV_TX_OK;
 401	u32 status, dma_addr, ctrl;
 402	unsigned long flags;
 403
 404	/* Clean TX Ring */
 405	greth_clean_tx(greth->netdev);
 406
 407	if (unlikely(greth->tx_free <= 0)) {
 408		spin_lock_irqsave(&greth->devlock, flags);/*save from poll/irq*/
 409		ctrl = GRETH_REGLOAD(greth->regs->control);
 410		/* Enable TX IRQ only if not already in poll() routine */
 411		if (ctrl & GRETH_RXI)
 412			GRETH_REGSAVE(greth->regs->control, ctrl | GRETH_TXI);
 413		netif_stop_queue(dev);
 414		spin_unlock_irqrestore(&greth->devlock, flags);
 415		return NETDEV_TX_BUSY;
 416	}
 417
 418	if (netif_msg_pktdata(greth))
 419		greth_print_tx_packet(skb);
 420
 421
 422	if (unlikely(skb->len > MAX_FRAME_SIZE)) {
 423		dev->stats.tx_errors++;
 424		goto out;
 425	}
 426
 427	bdp = greth->tx_bd_base + greth->tx_next;
 428	dma_addr = greth_read_bd(&bdp->addr);
 429
 430	memcpy((unsigned char *) phys_to_virt(dma_addr), skb->data, skb->len);
 431
 432	dma_sync_single_for_device(greth->dev, dma_addr, skb->len, DMA_TO_DEVICE);
 433
 434	status = GRETH_BD_EN | GRETH_BD_IE | (skb->len & GRETH_BD_LEN);
 435	greth->tx_bufs_length[greth->tx_next] = skb->len & GRETH_BD_LEN;
 436
 437	/* Wrap around descriptor ring */
 438	if (greth->tx_next == GRETH_TXBD_NUM_MASK) {
 439		status |= GRETH_BD_WR;
 440	}
 441
 442	greth->tx_next = NEXT_TX(greth->tx_next);
 443	greth->tx_free--;
 444
 445	/* Write descriptor control word and enable transmission */
 446	greth_write_bd(&bdp->stat, status);
 447	spin_lock_irqsave(&greth->devlock, flags); /*save from poll/irq*/
 448	greth_enable_tx(greth);
 449	spin_unlock_irqrestore(&greth->devlock, flags);
 450
 451out:
 452	dev_kfree_skb(skb);
 453	return err;
 454}
 455
 456static inline u16 greth_num_free_bds(u16 tx_last, u16 tx_next)
 457{
 458	if (tx_next < tx_last)
 459		return (tx_last - tx_next) - 1;
 460	else
 461		return GRETH_TXBD_NUM - (tx_next - tx_last) - 1;
 462}
 463
 464static netdev_tx_t
 465greth_start_xmit_gbit(struct sk_buff *skb, struct net_device *dev)
 466{
 467	struct greth_private *greth = netdev_priv(dev);
 468	struct greth_bd *bdp;
 469	u32 status, dma_addr;
 470	int curr_tx, nr_frags, i, err = NETDEV_TX_OK;
 471	unsigned long flags;
 472	u16 tx_last;
 473
 474	nr_frags = skb_shinfo(skb)->nr_frags;
 475	tx_last = greth->tx_last;
 476	rmb(); /* tx_last is updated by the poll task */
 477
 478	if (greth_num_free_bds(tx_last, greth->tx_next) < nr_frags + 1) {
 479		netif_stop_queue(dev);
 480		err = NETDEV_TX_BUSY;
 481		goto out;
 482	}
 483
 484	if (netif_msg_pktdata(greth))
 485		greth_print_tx_packet(skb);
 486
 487	if (unlikely(skb->len > MAX_FRAME_SIZE)) {
 488		dev->stats.tx_errors++;
 489		goto out;
 490	}
 491
 492	/* Save skb pointer. */
 493	greth->tx_skbuff[greth->tx_next] = skb;
 494
 495	/* Linear buf */
 496	if (nr_frags != 0)
 497		status = GRETH_TXBD_MORE;
 498	else
 499		status = GRETH_BD_IE;
 500
 501	if (skb->ip_summed == CHECKSUM_PARTIAL)
 502		status |= GRETH_TXBD_CSALL;
 503	status |= skb_headlen(skb) & GRETH_BD_LEN;
 504	if (greth->tx_next == GRETH_TXBD_NUM_MASK)
 505		status |= GRETH_BD_WR;
 506
 507
 508	bdp = greth->tx_bd_base + greth->tx_next;
 509	greth_write_bd(&bdp->stat, status);
 510	dma_addr = dma_map_single(greth->dev, skb->data, skb_headlen(skb), DMA_TO_DEVICE);
 511
 512	if (unlikely(dma_mapping_error(greth->dev, dma_addr)))
 513		goto map_error;
 514
 515	greth_write_bd(&bdp->addr, dma_addr);
 516
 517	curr_tx = NEXT_TX(greth->tx_next);
 518
 519	/* Frags */
 520	for (i = 0; i < nr_frags; i++) {
 521		skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
 522		greth->tx_skbuff[curr_tx] = NULL;
 523		bdp = greth->tx_bd_base + curr_tx;
 524
 525		status = GRETH_BD_EN;
 526		if (skb->ip_summed == CHECKSUM_PARTIAL)
 527			status |= GRETH_TXBD_CSALL;
 528		status |= skb_frag_size(frag) & GRETH_BD_LEN;
 529
 530		/* Wrap around descriptor ring */
 531		if (curr_tx == GRETH_TXBD_NUM_MASK)
 532			status |= GRETH_BD_WR;
 533
 534		/* More fragments left */
 535		if (i < nr_frags - 1)
 536			status |= GRETH_TXBD_MORE;
 537		else
 538			status |= GRETH_BD_IE; /* enable IRQ on last fragment */
 539
 540		greth_write_bd(&bdp->stat, status);
 541
 542		dma_addr = skb_frag_dma_map(greth->dev, frag, 0, skb_frag_size(frag),
 543					    DMA_TO_DEVICE);
 544
 545		if (unlikely(dma_mapping_error(greth->dev, dma_addr)))
 546			goto frag_map_error;
 547
 548		greth_write_bd(&bdp->addr, dma_addr);
 549
 550		curr_tx = NEXT_TX(curr_tx);
 551	}
 552
 553	wmb();
 554
 555	/* Enable the descriptor chain by enabling the first descriptor */
 556	bdp = greth->tx_bd_base + greth->tx_next;
 557	greth_write_bd(&bdp->stat,
 558		       greth_read_bd(&bdp->stat) | GRETH_BD_EN);
 559
 560	spin_lock_irqsave(&greth->devlock, flags); /*save from poll/irq*/
 561	greth->tx_next = curr_tx;
 562	greth_enable_tx_and_irq(greth);
 563	spin_unlock_irqrestore(&greth->devlock, flags);
 564
 565	return NETDEV_TX_OK;
 566
 567frag_map_error:
 568	/* Unmap SKB mappings that succeeded and disable descriptor */
 569	for (i = 0; greth->tx_next + i != curr_tx; i++) {
 570		bdp = greth->tx_bd_base + greth->tx_next + i;
 571		dma_unmap_single(greth->dev,
 572				 greth_read_bd(&bdp->addr),
 573				 greth_read_bd(&bdp->stat) & GRETH_BD_LEN,
 574				 DMA_TO_DEVICE);
 575		greth_write_bd(&bdp->stat, 0);
 576	}
 577map_error:
 578	if (net_ratelimit())
 579		dev_warn(greth->dev, "Could not create TX DMA mapping\n");
 580	dev_kfree_skb(skb);
 581out:
 582	return err;
 583}
 584
 585static irqreturn_t greth_interrupt(int irq, void *dev_id)
 586{
 587	struct net_device *dev = dev_id;
 588	struct greth_private *greth;
 589	u32 status, ctrl;
 590	irqreturn_t retval = IRQ_NONE;
 591
 592	greth = netdev_priv(dev);
 593
 594	spin_lock(&greth->devlock);
 595
 596	/* Get the interrupt events that caused us to be here. */
 597	status = GRETH_REGLOAD(greth->regs->status);
 598
 599	/* Must see if interrupts are enabled also, INT_TX|INT_RX flags may be
 600	 * set regardless of whether IRQ is enabled or not. Especially
 601	 * important when shared IRQ.
 602	 */
 603	ctrl = GRETH_REGLOAD(greth->regs->control);
 604
 605	/* Handle rx and tx interrupts through poll */
 606	if (((status & (GRETH_INT_RE | GRETH_INT_RX)) && (ctrl & GRETH_RXI)) ||
 607	    ((status & (GRETH_INT_TE | GRETH_INT_TX)) && (ctrl & GRETH_TXI))) {
 608		retval = IRQ_HANDLED;
 609
 610		/* Disable interrupts and schedule poll() */
 611		greth_disable_irqs(greth);
 612		napi_schedule(&greth->napi);
 613	}
 614
 615	mmiowb();
 616	spin_unlock(&greth->devlock);
 617
 618	return retval;
 619}
 620
 621static void greth_clean_tx(struct net_device *dev)
 622{
 623	struct greth_private *greth;
 624	struct greth_bd *bdp;
 625	u32 stat;
 626
 627	greth = netdev_priv(dev);
 628
 629	while (1) {
 630		bdp = greth->tx_bd_base + greth->tx_last;
 631		GRETH_REGSAVE(greth->regs->status, GRETH_INT_TE | GRETH_INT_TX);
 632		mb();
 633		stat = greth_read_bd(&bdp->stat);
 634
 635		if (unlikely(stat & GRETH_BD_EN))
 636			break;
 637
 638		if (greth->tx_free == GRETH_TXBD_NUM)
 639			break;
 640
 641		/* Check status for errors */
 642		if (unlikely(stat & GRETH_TXBD_STATUS)) {
 643			dev->stats.tx_errors++;
 644			if (stat & GRETH_TXBD_ERR_AL)
 645				dev->stats.tx_aborted_errors++;
 646			if (stat & GRETH_TXBD_ERR_UE)
 647				dev->stats.tx_fifo_errors++;
 648		}
 649		dev->stats.tx_packets++;
 650		dev->stats.tx_bytes += greth->tx_bufs_length[greth->tx_last];
 651		greth->tx_last = NEXT_TX(greth->tx_last);
 652		greth->tx_free++;
 653	}
 654
 655	if (greth->tx_free > 0) {
 656		netif_wake_queue(dev);
 657	}
 658}
 659
 660static inline void greth_update_tx_stats(struct net_device *dev, u32 stat)
 661{
 662	/* Check status for errors */
 663	if (unlikely(stat & GRETH_TXBD_STATUS)) {
 664		dev->stats.tx_errors++;
 665		if (stat & GRETH_TXBD_ERR_AL)
 666			dev->stats.tx_aborted_errors++;
 667		if (stat & GRETH_TXBD_ERR_UE)
 668			dev->stats.tx_fifo_errors++;
 669		if (stat & GRETH_TXBD_ERR_LC)
 670			dev->stats.tx_aborted_errors++;
 671	}
 672	dev->stats.tx_packets++;
 673}
 674
 675static void greth_clean_tx_gbit(struct net_device *dev)
 676{
 677	struct greth_private *greth;
 678	struct greth_bd *bdp, *bdp_last_frag;
 679	struct sk_buff *skb = NULL;
 680	u32 stat;
 681	int nr_frags, i;
 682	u16 tx_last;
 683
 684	greth = netdev_priv(dev);
 685	tx_last = greth->tx_last;
 686
 687	while (tx_last != greth->tx_next) {
 688
 689		skb = greth->tx_skbuff[tx_last];
 690
 691		nr_frags = skb_shinfo(skb)->nr_frags;
 692
 693		/* We only clean fully completed SKBs */
 694		bdp_last_frag = greth->tx_bd_base + SKIP_TX(tx_last, nr_frags);
 695
 696		GRETH_REGSAVE(greth->regs->status, GRETH_INT_TE | GRETH_INT_TX);
 697		mb();
 698		stat = greth_read_bd(&bdp_last_frag->stat);
 699
 700		if (stat & GRETH_BD_EN)
 701			break;
 702
 703		greth->tx_skbuff[tx_last] = NULL;
 704
 705		greth_update_tx_stats(dev, stat);
 706		dev->stats.tx_bytes += skb->len;
 707
 708		bdp = greth->tx_bd_base + tx_last;
 709
 710		tx_last = NEXT_TX(tx_last);
 711
 712		dma_unmap_single(greth->dev,
 713				 greth_read_bd(&bdp->addr),
 714				 skb_headlen(skb),
 715				 DMA_TO_DEVICE);
 716
 717		for (i = 0; i < nr_frags; i++) {
 718			skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
 719			bdp = greth->tx_bd_base + tx_last;
 720
 721			dma_unmap_page(greth->dev,
 722				       greth_read_bd(&bdp->addr),
 723				       skb_frag_size(frag),
 724				       DMA_TO_DEVICE);
 725
 726			tx_last = NEXT_TX(tx_last);
 727		}
 728		dev_kfree_skb(skb);
 729	}
 730	if (skb) { /* skb is set only if the above while loop was entered */
 731		wmb();
 732		greth->tx_last = tx_last;
 733
 734		if (netif_queue_stopped(dev) &&
 735		    (greth_num_free_bds(tx_last, greth->tx_next) >
 736		    (MAX_SKB_FRAGS+1)))
 737			netif_wake_queue(dev);
 738	}
 739}
 740
 741static int greth_rx(struct net_device *dev, int limit)
 742{
 743	struct greth_private *greth;
 744	struct greth_bd *bdp;
 745	struct sk_buff *skb;
 746	int pkt_len;
 747	int bad, count;
 748	u32 status, dma_addr;
 749	unsigned long flags;
 750
 751	greth = netdev_priv(dev);
 752
 753	for (count = 0; count < limit; ++count) {
 754
 755		bdp = greth->rx_bd_base + greth->rx_cur;
 756		GRETH_REGSAVE(greth->regs->status, GRETH_INT_RE | GRETH_INT_RX);
 757		mb();
 758		status = greth_read_bd(&bdp->stat);
 759
 760		if (unlikely(status & GRETH_BD_EN)) {
 761			break;
 762		}
 763
 764		dma_addr = greth_read_bd(&bdp->addr);
 765		bad = 0;
 766
 767		/* Check status for errors. */
 768		if (unlikely(status & GRETH_RXBD_STATUS)) {
 769			if (status & GRETH_RXBD_ERR_FT) {
 770				dev->stats.rx_length_errors++;
 771				bad = 1;
 772			}
 773			if (status & (GRETH_RXBD_ERR_AE | GRETH_RXBD_ERR_OE)) {
 774				dev->stats.rx_frame_errors++;
 775				bad = 1;
 776			}
 777			if (status & GRETH_RXBD_ERR_CRC) {
 778				dev->stats.rx_crc_errors++;
 779				bad = 1;
 780			}
 781		}
 782		if (unlikely(bad)) {
 783			dev->stats.rx_errors++;
 784
 785		} else {
 786
 787			pkt_len = status & GRETH_BD_LEN;
 788
 789			skb = netdev_alloc_skb(dev, pkt_len + NET_IP_ALIGN);
 790
 791			if (unlikely(skb == NULL)) {
 792
 793				if (net_ratelimit())
 794					dev_warn(&dev->dev, "low on memory - " "packet dropped\n");
 795
 796				dev->stats.rx_dropped++;
 797
 798			} else {
 799				skb_reserve(skb, NET_IP_ALIGN);
 800
 801				dma_sync_single_for_cpu(greth->dev,
 802							dma_addr,
 803							pkt_len,
 804							DMA_FROM_DEVICE);
 805
 806				if (netif_msg_pktdata(greth))
 807					greth_print_rx_packet(phys_to_virt(dma_addr), pkt_len);
 808
 809				memcpy(skb_put(skb, pkt_len), phys_to_virt(dma_addr), pkt_len);
 810
 811				skb->protocol = eth_type_trans(skb, dev);
 812				dev->stats.rx_bytes += pkt_len;
 813				dev->stats.rx_packets++;
 814				netif_receive_skb(skb);
 815			}
 816		}
 817
 818		status = GRETH_BD_EN | GRETH_BD_IE;
 819		if (greth->rx_cur == GRETH_RXBD_NUM_MASK) {
 820			status |= GRETH_BD_WR;
 821		}
 822
 823		wmb();
 824		greth_write_bd(&bdp->stat, status);
 825
 826		dma_sync_single_for_device(greth->dev, dma_addr, MAX_FRAME_SIZE, DMA_FROM_DEVICE);
 827
 828		spin_lock_irqsave(&greth->devlock, flags); /* save from XMIT */
 829		greth_enable_rx(greth);
 830		spin_unlock_irqrestore(&greth->devlock, flags);
 831
 832		greth->rx_cur = NEXT_RX(greth->rx_cur);
 833	}
 834
 835	return count;
 836}
 837
 838static inline int hw_checksummed(u32 status)
 839{
 840
 841	if (status & GRETH_RXBD_IP_FRAG)
 842		return 0;
 843
 844	if (status & GRETH_RXBD_IP && status & GRETH_RXBD_IP_CSERR)
 845		return 0;
 846
 847	if (status & GRETH_RXBD_UDP && status & GRETH_RXBD_UDP_CSERR)
 848		return 0;
 849
 850	if (status & GRETH_RXBD_TCP && status & GRETH_RXBD_TCP_CSERR)
 851		return 0;
 852
 853	return 1;
 854}
 855
 856static int greth_rx_gbit(struct net_device *dev, int limit)
 857{
 858	struct greth_private *greth;
 859	struct greth_bd *bdp;
 860	struct sk_buff *skb, *newskb;
 861	int pkt_len;
 862	int bad, count = 0;
 863	u32 status, dma_addr;
 864	unsigned long flags;
 865
 866	greth = netdev_priv(dev);
 867
 868	for (count = 0; count < limit; ++count) {
 869
 870		bdp = greth->rx_bd_base + greth->rx_cur;
 871		skb = greth->rx_skbuff[greth->rx_cur];
 872		GRETH_REGSAVE(greth->regs->status, GRETH_INT_RE | GRETH_INT_RX);
 873		mb();
 874		status = greth_read_bd(&bdp->stat);
 875		bad = 0;
 876
 877		if (status & GRETH_BD_EN)
 878			break;
 879
 880		/* Check status for errors. */
 881		if (unlikely(status & GRETH_RXBD_STATUS)) {
 882
 883			if (status & GRETH_RXBD_ERR_FT) {
 884				dev->stats.rx_length_errors++;
 885				bad = 1;
 886			} else if (status &
 887				   (GRETH_RXBD_ERR_AE | GRETH_RXBD_ERR_OE | GRETH_RXBD_ERR_LE)) {
 888				dev->stats.rx_frame_errors++;
 889				bad = 1;
 890			} else if (status & GRETH_RXBD_ERR_CRC) {
 891				dev->stats.rx_crc_errors++;
 892				bad = 1;
 893			}
 894		}
 895
 896		/* Allocate new skb to replace current, not needed if the
 897		 * current skb can be reused */
 898		if (!bad && (newskb=netdev_alloc_skb(dev, MAX_FRAME_SIZE + NET_IP_ALIGN))) {
 899			skb_reserve(newskb, NET_IP_ALIGN);
 900
 901			dma_addr = dma_map_single(greth->dev,
 902						      newskb->data,
 903						      MAX_FRAME_SIZE + NET_IP_ALIGN,
 904						      DMA_FROM_DEVICE);
 905
 906			if (!dma_mapping_error(greth->dev, dma_addr)) {
 907				/* Process the incoming frame. */
 908				pkt_len = status & GRETH_BD_LEN;
 909
 910				dma_unmap_single(greth->dev,
 911						 greth_read_bd(&bdp->addr),
 912						 MAX_FRAME_SIZE + NET_IP_ALIGN,
 913						 DMA_FROM_DEVICE);
 914
 915				if (netif_msg_pktdata(greth))
 916					greth_print_rx_packet(phys_to_virt(greth_read_bd(&bdp->addr)), pkt_len);
 917
 918				skb_put(skb, pkt_len);
 919
 920				if (dev->features & NETIF_F_RXCSUM && hw_checksummed(status))
 921					skb->ip_summed = CHECKSUM_UNNECESSARY;
 922				else
 923					skb_checksum_none_assert(skb);
 924
 925				skb->protocol = eth_type_trans(skb, dev);
 926				dev->stats.rx_packets++;
 927				dev->stats.rx_bytes += pkt_len;
 928				netif_receive_skb(skb);
 929
 930				greth->rx_skbuff[greth->rx_cur] = newskb;
 931				greth_write_bd(&bdp->addr, dma_addr);
 932			} else {
 933				if (net_ratelimit())
 934					dev_warn(greth->dev, "Could not create DMA mapping, dropping packet\n");
 935				dev_kfree_skb(newskb);
 936				/* reusing current skb, so it is a drop */
 937				dev->stats.rx_dropped++;
 938			}
 939		} else if (bad) {
 940			/* Bad Frame transfer, the skb is reused */
 941			dev->stats.rx_dropped++;
 942		} else {
 943			/* Failed Allocating a new skb. This is rather stupid
 944			 * but the current "filled" skb is reused, as if
 945			 * transfer failure. One could argue that RX descriptor
 946			 * table handling should be divided into cleaning and
 947			 * filling as the TX part of the driver
 948			 */
 949			if (net_ratelimit())
 950				dev_warn(greth->dev, "Could not allocate SKB, dropping packet\n");
 951			/* reusing current skb, so it is a drop */
 952			dev->stats.rx_dropped++;
 953		}
 954
 955		status = GRETH_BD_EN | GRETH_BD_IE;
 956		if (greth->rx_cur == GRETH_RXBD_NUM_MASK) {
 957			status |= GRETH_BD_WR;
 958		}
 959
 960		wmb();
 961		greth_write_bd(&bdp->stat, status);
 962		spin_lock_irqsave(&greth->devlock, flags);
 963		greth_enable_rx(greth);
 964		spin_unlock_irqrestore(&greth->devlock, flags);
 965		greth->rx_cur = NEXT_RX(greth->rx_cur);
 966	}
 967
 968	return count;
 969
 970}
 971
 972static int greth_poll(struct napi_struct *napi, int budget)
 973{
 974	struct greth_private *greth;
 975	int work_done = 0;
 976	unsigned long flags;
 977	u32 mask, ctrl;
 978	greth = container_of(napi, struct greth_private, napi);
 979
 980restart_txrx_poll:
 981	if (greth->gbit_mac) {
 982		greth_clean_tx_gbit(greth->netdev);
 983		work_done += greth_rx_gbit(greth->netdev, budget - work_done);
 984	} else {
 985		if (netif_queue_stopped(greth->netdev))
 986			greth_clean_tx(greth->netdev);
 987		work_done += greth_rx(greth->netdev, budget - work_done);
 988	}
 989
 990	if (work_done < budget) {
 991
 992		spin_lock_irqsave(&greth->devlock, flags);
 993
 994		ctrl = GRETH_REGLOAD(greth->regs->control);
 995		if ((greth->gbit_mac && (greth->tx_last != greth->tx_next)) ||
 996		    (!greth->gbit_mac && netif_queue_stopped(greth->netdev))) {
 997			GRETH_REGSAVE(greth->regs->control,
 998					ctrl | GRETH_TXI | GRETH_RXI);
 999			mask = GRETH_INT_RX | GRETH_INT_RE |
1000			       GRETH_INT_TX | GRETH_INT_TE;
1001		} else {
1002			GRETH_REGSAVE(greth->regs->control, ctrl | GRETH_RXI);
1003			mask = GRETH_INT_RX | GRETH_INT_RE;
1004		}
1005
1006		if (GRETH_REGLOAD(greth->regs->status) & mask) {
1007			GRETH_REGSAVE(greth->regs->control, ctrl);
1008			spin_unlock_irqrestore(&greth->devlock, flags);
1009			goto restart_txrx_poll;
1010		} else {
1011			__napi_complete(napi);
1012			spin_unlock_irqrestore(&greth->devlock, flags);
1013		}
1014	}
1015
1016	return work_done;
1017}
1018
1019static int greth_set_mac_add(struct net_device *dev, void *p)
1020{
1021	struct sockaddr *addr = p;
1022	struct greth_private *greth;
1023	struct greth_regs *regs;
1024
1025	greth = netdev_priv(dev);
1026	regs = greth->regs;
1027
1028	if (!is_valid_ether_addr(addr->sa_data))
1029		return -EADDRNOTAVAIL;
1030
1031	memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
1032	GRETH_REGSAVE(regs->esa_msb, dev->dev_addr[0] << 8 | dev->dev_addr[1]);
1033	GRETH_REGSAVE(regs->esa_lsb, dev->dev_addr[2] << 24 | dev->dev_addr[3] << 16 |
1034		      dev->dev_addr[4] << 8 | dev->dev_addr[5]);
1035
1036	return 0;
1037}
1038
1039static u32 greth_hash_get_index(__u8 *addr)
1040{
1041	return (ether_crc(6, addr)) & 0x3F;
1042}
1043
1044static void greth_set_hash_filter(struct net_device *dev)
1045{
1046	struct netdev_hw_addr *ha;
1047	struct greth_private *greth = netdev_priv(dev);
1048	struct greth_regs *regs = greth->regs;
1049	u32 mc_filter[2];
1050	unsigned int bitnr;
1051
1052	mc_filter[0] = mc_filter[1] = 0;
1053
1054	netdev_for_each_mc_addr(ha, dev) {
1055		bitnr = greth_hash_get_index(ha->addr);
1056		mc_filter[bitnr >> 5] |= 1 << (bitnr & 31);
1057	}
1058
1059	GRETH_REGSAVE(regs->hash_msb, mc_filter[1]);
1060	GRETH_REGSAVE(regs->hash_lsb, mc_filter[0]);
1061}
1062
1063static void greth_set_multicast_list(struct net_device *dev)
1064{
1065	int cfg;
1066	struct greth_private *greth = netdev_priv(dev);
1067	struct greth_regs *regs = greth->regs;
1068
1069	cfg = GRETH_REGLOAD(regs->control);
1070	if (dev->flags & IFF_PROMISC)
1071		cfg |= GRETH_CTRL_PR;
1072	else
1073		cfg &= ~GRETH_CTRL_PR;
1074
1075	if (greth->multicast) {
1076		if (dev->flags & IFF_ALLMULTI) {
1077			GRETH_REGSAVE(regs->hash_msb, -1);
1078			GRETH_REGSAVE(regs->hash_lsb, -1);
1079			cfg |= GRETH_CTRL_MCEN;
1080			GRETH_REGSAVE(regs->control, cfg);
1081			return;
1082		}
1083
1084		if (netdev_mc_empty(dev)) {
1085			cfg &= ~GRETH_CTRL_MCEN;
1086			GRETH_REGSAVE(regs->control, cfg);
1087			return;
1088		}
1089
1090		/* Setup multicast filter */
1091		greth_set_hash_filter(dev);
1092		cfg |= GRETH_CTRL_MCEN;
1093	}
1094	GRETH_REGSAVE(regs->control, cfg);
1095}
1096
1097static u32 greth_get_msglevel(struct net_device *dev)
1098{
1099	struct greth_private *greth = netdev_priv(dev);
1100	return greth->msg_enable;
1101}
1102
1103static void greth_set_msglevel(struct net_device *dev, u32 value)
1104{
1105	struct greth_private *greth = netdev_priv(dev);
1106	greth->msg_enable = value;
1107}
1108static int greth_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1109{
1110	struct greth_private *greth = netdev_priv(dev);
1111	struct phy_device *phy = greth->phy;
1112
1113	if (!phy)
1114		return -ENODEV;
1115
1116	return phy_ethtool_gset(phy, cmd);
1117}
1118
1119static int greth_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1120{
1121	struct greth_private *greth = netdev_priv(dev);
1122	struct phy_device *phy = greth->phy;
1123
1124	if (!phy)
1125		return -ENODEV;
1126
1127	return phy_ethtool_sset(phy, cmd);
1128}
1129
1130static int greth_get_regs_len(struct net_device *dev)
1131{
1132	return sizeof(struct greth_regs);
1133}
1134
1135static void greth_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
1136{
1137	struct greth_private *greth = netdev_priv(dev);
1138
1139	strlcpy(info->driver, dev_driver_string(greth->dev),
1140		sizeof(info->driver));
1141	strlcpy(info->version, "revision: 1.0", sizeof(info->version));
1142	strlcpy(info->bus_info, greth->dev->bus->name, sizeof(info->bus_info));
1143	strlcpy(info->fw_version, "N/A", sizeof(info->fw_version));
1144}
1145
1146static void greth_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *p)
1147{
1148	int i;
1149	struct greth_private *greth = netdev_priv(dev);
1150	u32 __iomem *greth_regs = (u32 __iomem *) greth->regs;
1151	u32 *buff = p;
1152
1153	for (i = 0; i < sizeof(struct greth_regs) / sizeof(u32); i++)
1154		buff[i] = greth_read_bd(&greth_regs[i]);
1155}
1156
1157static const struct ethtool_ops greth_ethtool_ops = {
1158	.get_msglevel		= greth_get_msglevel,
1159	.set_msglevel		= greth_set_msglevel,
1160	.get_settings		= greth_get_settings,
1161	.set_settings		= greth_set_settings,
1162	.get_drvinfo		= greth_get_drvinfo,
1163	.get_regs_len           = greth_get_regs_len,
1164	.get_regs               = greth_get_regs,
1165	.get_link		= ethtool_op_get_link,
1166};
1167
1168static struct net_device_ops greth_netdev_ops = {
1169	.ndo_open		= greth_open,
1170	.ndo_stop		= greth_close,
1171	.ndo_start_xmit		= greth_start_xmit,
1172	.ndo_set_mac_address	= greth_set_mac_add,
1173	.ndo_validate_addr	= eth_validate_addr,
1174};
1175
1176static inline int wait_for_mdio(struct greth_private *greth)
1177{
1178	unsigned long timeout = jiffies + 4*HZ/100;
1179	while (GRETH_REGLOAD(greth->regs->mdio) & GRETH_MII_BUSY) {
1180		if (time_after(jiffies, timeout))
1181			return 0;
1182	}
1183	return 1;
1184}
1185
1186static int greth_mdio_read(struct mii_bus *bus, int phy, int reg)
1187{
1188	struct greth_private *greth = bus->priv;
1189	int data;
1190
1191	if (!wait_for_mdio(greth))
1192		return -EBUSY;
1193
1194	GRETH_REGSAVE(greth->regs->mdio, ((phy & 0x1F) << 11) | ((reg & 0x1F) << 6) | 2);
1195
1196	if (!wait_for_mdio(greth))
1197		return -EBUSY;
1198
1199	if (!(GRETH_REGLOAD(greth->regs->mdio) & GRETH_MII_NVALID)) {
1200		data = (GRETH_REGLOAD(greth->regs->mdio) >> 16) & 0xFFFF;
1201		return data;
1202
1203	} else {
1204		return -1;
1205	}
1206}
1207
1208static int greth_mdio_write(struct mii_bus *bus, int phy, int reg, u16 val)
1209{
1210	struct greth_private *greth = bus->priv;
1211
1212	if (!wait_for_mdio(greth))
1213		return -EBUSY;
1214
1215	GRETH_REGSAVE(greth->regs->mdio,
1216		      ((val & 0xFFFF) << 16) | ((phy & 0x1F) << 11) | ((reg & 0x1F) << 6) | 1);
1217
1218	if (!wait_for_mdio(greth))
1219		return -EBUSY;
1220
1221	return 0;
1222}
1223
1224static void greth_link_change(struct net_device *dev)
1225{
1226	struct greth_private *greth = netdev_priv(dev);
1227	struct phy_device *phydev = greth->phy;
1228	unsigned long flags;
1229	int status_change = 0;
1230	u32 ctrl;
1231
1232	spin_lock_irqsave(&greth->devlock, flags);
1233
1234	if (phydev->link) {
1235
1236		if ((greth->speed != phydev->speed) || (greth->duplex != phydev->duplex)) {
1237			ctrl = GRETH_REGLOAD(greth->regs->control) &
1238			       ~(GRETH_CTRL_FD | GRETH_CTRL_SP | GRETH_CTRL_GB);
1239
1240			if (phydev->duplex)
1241				ctrl |= GRETH_CTRL_FD;
1242
1243			if (phydev->speed == SPEED_100)
1244				ctrl |= GRETH_CTRL_SP;
1245			else if (phydev->speed == SPEED_1000)
1246				ctrl |= GRETH_CTRL_GB;
1247
1248			GRETH_REGSAVE(greth->regs->control, ctrl);
1249			greth->speed = phydev->speed;
1250			greth->duplex = phydev->duplex;
1251			status_change = 1;
1252		}
1253	}
1254
1255	if (phydev->link != greth->link) {
1256		if (!phydev->link) {
1257			greth->speed = 0;
1258			greth->duplex = -1;
1259		}
1260		greth->link = phydev->link;
1261
1262		status_change = 1;
1263	}
1264
1265	spin_unlock_irqrestore(&greth->devlock, flags);
1266
1267	if (status_change) {
1268		if (phydev->link)
1269			pr_debug("%s: link up (%d/%s)\n",
1270				dev->name, phydev->speed,
1271				DUPLEX_FULL == phydev->duplex ? "Full" : "Half");
1272		else
1273			pr_debug("%s: link down\n", dev->name);
1274	}
1275}
1276
1277static int greth_mdio_probe(struct net_device *dev)
1278{
1279	struct greth_private *greth = netdev_priv(dev);
1280	struct phy_device *phy = NULL;
1281	int ret;
1282
1283	/* Find the first PHY */
1284	phy = phy_find_first(greth->mdio);
1285
1286	if (!phy) {
1287		if (netif_msg_probe(greth))
1288			dev_err(&dev->dev, "no PHY found\n");
1289		return -ENXIO;
1290	}
1291
1292	ret = phy_connect_direct(dev, phy, &greth_link_change,
1293				 greth->gbit_mac ? PHY_INTERFACE_MODE_GMII : PHY_INTERFACE_MODE_MII);
1294	if (ret) {
1295		if (netif_msg_ifup(greth))
1296			dev_err(&dev->dev, "could not attach to PHY\n");
1297		return ret;
1298	}
1299
1300	if (greth->gbit_mac)
1301		phy->supported &= PHY_GBIT_FEATURES;
1302	else
1303		phy->supported &= PHY_BASIC_FEATURES;
1304
1305	phy->advertising = phy->supported;
1306
1307	greth->link = 0;
1308	greth->speed = 0;
1309	greth->duplex = -1;
1310	greth->phy = phy;
1311
1312	return 0;
1313}
1314
1315static inline int phy_aneg_done(struct phy_device *phydev)
1316{
1317	int retval;
1318
1319	retval = phy_read(phydev, MII_BMSR);
1320
1321	return (retval < 0) ? retval : (retval & BMSR_ANEGCOMPLETE);
1322}
1323
1324static int greth_mdio_init(struct greth_private *greth)
1325{
1326	int ret, phy;
1327	unsigned long timeout;
1328
1329	greth->mdio = mdiobus_alloc();
1330	if (!greth->mdio) {
1331		return -ENOMEM;
1332	}
1333
1334	greth->mdio->name = "greth-mdio";
1335	snprintf(greth->mdio->id, MII_BUS_ID_SIZE, "%s-%d", greth->mdio->name, greth->irq);
1336	greth->mdio->read = greth_mdio_read;
1337	greth->mdio->write = greth_mdio_write;
1338	greth->mdio->priv = greth;
1339
1340	ret = mdiobus_register(greth->mdio);
1341	if (ret) {
1342		goto error;
1343	}
1344
1345	ret = greth_mdio_probe(greth->netdev);
1346	if (ret) {
1347		if (netif_msg_probe(greth))
1348			dev_err(&greth->netdev->dev, "failed to probe MDIO bus\n");
1349		goto unreg_mdio;
1350	}
1351
1352	phy_start(greth->phy);
1353
1354	/* If Ethernet debug link is used make autoneg happen right away */
1355	if (greth->edcl && greth_edcl == 1) {
1356		phy_start_aneg(greth->phy);
1357		timeout = jiffies + 6*HZ;
1358		while (!phy_aneg_done(greth->phy) && time_before(jiffies, timeout)) {
1359		}
1360		phy_read_status(greth->phy);
1361		greth_link_change(greth->netdev);
1362	}
1363
1364	return 0;
1365
1366unreg_mdio:
1367	mdiobus_unregister(greth->mdio);
1368error:
1369	mdiobus_free(greth->mdio);
1370	return ret;
1371}
1372
1373/* Initialize the GRETH MAC */
1374static int greth_of_probe(struct platform_device *ofdev)
1375{
1376	struct net_device *dev;
1377	struct greth_private *greth;
1378	struct greth_regs *regs;
1379
1380	int i;
1381	int err;
1382	int tmp;
1383	unsigned long timeout;
1384
1385	dev = alloc_etherdev(sizeof(struct greth_private));
1386
1387	if (dev == NULL)
1388		return -ENOMEM;
1389
1390	greth = netdev_priv(dev);
1391	greth->netdev = dev;
1392	greth->dev = &ofdev->dev;
1393
1394	if (greth_debug > 0)
1395		greth->msg_enable = greth_debug;
1396	else
1397		greth->msg_enable = GRETH_DEF_MSG_ENABLE;
1398
1399	spin_lock_init(&greth->devlock);
1400
1401	greth->regs = of_ioremap(&ofdev->resource[0], 0,
1402				 resource_size(&ofdev->resource[0]),
1403				 "grlib-greth regs");
1404
1405	if (greth->regs == NULL) {
1406		if (netif_msg_probe(greth))
1407			dev_err(greth->dev, "ioremap failure.\n");
1408		err = -EIO;
1409		goto error1;
1410	}
1411
1412	regs = greth->regs;
1413	greth->irq = ofdev->archdata.irqs[0];
1414
1415	dev_set_drvdata(greth->dev, dev);
1416	SET_NETDEV_DEV(dev, greth->dev);
1417
1418	if (netif_msg_probe(greth))
1419		dev_dbg(greth->dev, "resetting controller.\n");
1420
1421	/* Reset the controller. */
1422	GRETH_REGSAVE(regs->control, GRETH_RESET);
1423
1424	/* Wait for MAC to reset itself */
1425	timeout = jiffies + HZ/100;
1426	while (GRETH_REGLOAD(regs->control) & GRETH_RESET) {
1427		if (time_after(jiffies, timeout)) {
1428			err = -EIO;
1429			if (netif_msg_probe(greth))
1430				dev_err(greth->dev, "timeout when waiting for reset.\n");
1431			goto error2;
1432		}
1433	}
1434
1435	/* Get default PHY address  */
1436	greth->phyaddr = (GRETH_REGLOAD(regs->mdio) >> 11) & 0x1F;
1437
1438	/* Check if we have GBIT capable MAC */
1439	tmp = GRETH_REGLOAD(regs->control);
1440	greth->gbit_mac = (tmp >> 27) & 1;
1441
1442	/* Check for multicast capability */
1443	greth->multicast = (tmp >> 25) & 1;
1444
1445	greth->edcl = (tmp >> 31) & 1;
1446
1447	/* If we have EDCL we disable the EDCL speed-duplex FSM so
1448	 * it doesn't interfere with the software */
1449	if (greth->edcl != 0)
1450		GRETH_REGORIN(regs->control, GRETH_CTRL_DISDUPLEX);
1451
1452	/* Check if MAC can handle MDIO interrupts */
1453	greth->mdio_int_en = (tmp >> 26) & 1;
1454
1455	err = greth_mdio_init(greth);
1456	if (err) {
1457		if (netif_msg_probe(greth))
1458			dev_err(greth->dev, "failed to register MDIO bus\n");
1459		goto error2;
1460	}
1461
1462	/* Allocate TX descriptor ring in coherent memory */
1463	greth->tx_bd_base = dma_zalloc_coherent(greth->dev, 1024,
1464						&greth->tx_bd_base_phys,
1465						GFP_KERNEL);
1466	if (!greth->tx_bd_base) {
1467		err = -ENOMEM;
1468		goto error3;
1469	}
1470
1471	/* Allocate RX descriptor ring in coherent memory */
1472	greth->rx_bd_base = dma_zalloc_coherent(greth->dev, 1024,
1473						&greth->rx_bd_base_phys,
1474						GFP_KERNEL);
1475	if (!greth->rx_bd_base) {
1476		err = -ENOMEM;
1477		goto error4;
1478	}
1479
1480	/* Get MAC address from: module param, OF property or ID prom */
1481	for (i = 0; i < 6; i++) {
1482		if (macaddr[i] != 0)
1483			break;
1484	}
1485	if (i == 6) {
1486		const unsigned char *addr;
1487		int len;
1488		addr = of_get_property(ofdev->dev.of_node, "local-mac-address",
1489					&len);
1490		if (addr != NULL && len == 6) {
1491			for (i = 0; i < 6; i++)
1492				macaddr[i] = (unsigned int) addr[i];
1493		} else {
1494#ifdef CONFIG_SPARC
1495			for (i = 0; i < 6; i++)
1496				macaddr[i] = (unsigned int) idprom->id_ethaddr[i];
1497#endif
1498		}
1499	}
1500
1501	for (i = 0; i < 6; i++)
1502		dev->dev_addr[i] = macaddr[i];
1503
1504	macaddr[5]++;
1505
1506	if (!is_valid_ether_addr(&dev->dev_addr[0])) {
1507		if (netif_msg_probe(greth))
1508			dev_err(greth->dev, "no valid ethernet address, aborting.\n");
1509		err = -EINVAL;
1510		goto error5;
1511	}
1512
1513	GRETH_REGSAVE(regs->esa_msb, dev->dev_addr[0] << 8 | dev->dev_addr[1]);
1514	GRETH_REGSAVE(regs->esa_lsb, dev->dev_addr[2] << 24 | dev->dev_addr[3] << 16 |
1515		      dev->dev_addr[4] << 8 | dev->dev_addr[5]);
1516
1517	/* Clear all pending interrupts except PHY irq */
1518	GRETH_REGSAVE(regs->status, 0xFF);
1519
1520	if (greth->gbit_mac) {
1521		dev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM |
1522			NETIF_F_RXCSUM;
1523		dev->features = dev->hw_features | NETIF_F_HIGHDMA;
1524		greth_netdev_ops.ndo_start_xmit = greth_start_xmit_gbit;
1525	}
1526
1527	if (greth->multicast) {
1528		greth_netdev_ops.ndo_set_rx_mode = greth_set_multicast_list;
1529		dev->flags |= IFF_MULTICAST;
1530	} else {
1531		dev->flags &= ~IFF_MULTICAST;
1532	}
1533
1534	dev->netdev_ops = &greth_netdev_ops;
1535	dev->ethtool_ops = &greth_ethtool_ops;
1536
1537	err = register_netdev(dev);
1538	if (err) {
1539		if (netif_msg_probe(greth))
1540			dev_err(greth->dev, "netdevice registration failed.\n");
1541		goto error5;
1542	}
1543
1544	/* setup NAPI */
1545	netif_napi_add(dev, &greth->napi, greth_poll, 64);
1546
1547	return 0;
1548
1549error5:
1550	dma_free_coherent(greth->dev, 1024, greth->rx_bd_base, greth->rx_bd_base_phys);
1551error4:
1552	dma_free_coherent(greth->dev, 1024, greth->tx_bd_base, greth->tx_bd_base_phys);
1553error3:
1554	mdiobus_unregister(greth->mdio);
1555error2:
1556	of_iounmap(&ofdev->resource[0], greth->regs, resource_size(&ofdev->resource[0]));
1557error1:
1558	free_netdev(dev);
1559	return err;
1560}
1561
1562static int greth_of_remove(struct platform_device *of_dev)
1563{
1564	struct net_device *ndev = platform_get_drvdata(of_dev);
1565	struct greth_private *greth = netdev_priv(ndev);
1566
1567	/* Free descriptor areas */
1568	dma_free_coherent(&of_dev->dev, 1024, greth->rx_bd_base, greth->rx_bd_base_phys);
1569
1570	dma_free_coherent(&of_dev->dev, 1024, greth->tx_bd_base, greth->tx_bd_base_phys);
1571
1572	if (greth->phy)
1573		phy_stop(greth->phy);
1574	mdiobus_unregister(greth->mdio);
1575
1576	unregister_netdev(ndev);
1577	free_netdev(ndev);
1578
1579	of_iounmap(&of_dev->resource[0], greth->regs, resource_size(&of_dev->resource[0]));
1580
1581	return 0;
1582}
1583
1584static const struct of_device_id greth_of_match[] = {
1585	{
1586	 .name = "GAISLER_ETHMAC",
1587	 },
1588	{
1589	 .name = "01_01d",
1590	 },
1591	{},
1592};
1593
1594MODULE_DEVICE_TABLE(of, greth_of_match);
1595
1596static struct platform_driver greth_of_driver = {
1597	.driver = {
1598		.name = "grlib-greth",
1599		.of_match_table = greth_of_match,
1600	},
1601	.probe = greth_of_probe,
1602	.remove = greth_of_remove,
1603};
1604
1605module_platform_driver(greth_of_driver);
1606
1607MODULE_AUTHOR("Aeroflex Gaisler AB.");
1608MODULE_DESCRIPTION("Aeroflex Gaisler Ethernet MAC driver");
1609MODULE_LICENSE("GPL");