Linux Audio

Check our new training course

Linux BSP development engineering services

Need help to port Linux and bootloaders to your hardware?
Loading...
v6.8
   1// SPDX-License-Identifier: GPL-2.0-only
   2/* Altera Triple-Speed Ethernet MAC driver
   3 * Copyright (C) 2008-2014 Altera Corporation. All rights reserved
   4 *
   5 * Contributors:
   6 *   Dalon Westergreen
   7 *   Thomas Chou
   8 *   Ian Abbott
   9 *   Yuriy Kozlov
  10 *   Tobias Klauser
  11 *   Andriy Smolskyy
  12 *   Roman Bulgakov
  13 *   Dmytro Mytarchuk
  14 *   Matthew Gerlach
  15 *
  16 * Original driver contributed by SLS.
  17 * Major updates contributed by GlobalLogic
 
 
 
 
 
 
 
 
 
 
 
 
  18 */
  19
  20#include <linux/atomic.h>
  21#include <linux/delay.h>
  22#include <linux/etherdevice.h>
  23#include <linux/if_vlan.h>
  24#include <linux/init.h>
  25#include <linux/interrupt.h>
  26#include <linux/io.h>
  27#include <linux/kernel.h>
  28#include <linux/module.h>
  29#include <linux/mii.h>
  30#include <linux/mdio/mdio-regmap.h>
  31#include <linux/netdevice.h>
  32#include <linux/of.h>
  33#include <linux/of_mdio.h>
  34#include <linux/of_net.h>
  35#include <linux/pcs-lynx.h>
  36#include <linux/phy.h>
  37#include <linux/platform_device.h>
  38#include <linux/property.h>
  39#include <linux/regmap.h>
  40#include <linux/skbuff.h>
  41#include <asm/cacheflush.h>
  42
  43#include "altera_utils.h"
  44#include "altera_tse.h"
  45#include "altera_sgdma.h"
  46#include "altera_msgdma.h"
  47
  48static atomic_t instance_count = ATOMIC_INIT(~0);
  49/* Module parameters */
  50static int debug = -1;
  51module_param(debug, int, 0644);
  52MODULE_PARM_DESC(debug, "Message Level (-1: default, 0: no output, 16: all)");
  53
  54static const u32 default_msg_level = (NETIF_MSG_DRV | NETIF_MSG_PROBE |
  55					NETIF_MSG_LINK | NETIF_MSG_IFUP |
  56					NETIF_MSG_IFDOWN);
  57
  58#define RX_DESCRIPTORS 64
  59static int dma_rx_num = RX_DESCRIPTORS;
  60module_param(dma_rx_num, int, 0644);
  61MODULE_PARM_DESC(dma_rx_num, "Number of descriptors in the RX list");
  62
  63#define TX_DESCRIPTORS 64
  64static int dma_tx_num = TX_DESCRIPTORS;
  65module_param(dma_tx_num, int, 0644);
  66MODULE_PARM_DESC(dma_tx_num, "Number of descriptors in the TX list");
  67
  68
  69#define POLL_PHY (-1)
  70
  71/* Make sure DMA buffer size is larger than the max frame size
  72 * plus some alignment offset and a VLAN header. If the max frame size is
  73 * 1518, a VLAN header would be additional 4 bytes and additional
  74 * headroom for alignment is 2 bytes, 2048 is just fine.
  75 */
  76#define ALTERA_RXDMABUFFER_SIZE	2048
  77
  78/* Allow network stack to resume queuing packets after we've
  79 * finished transmitting at least 1/4 of the packets in the queue.
  80 */
  81#define TSE_TX_THRESH(x)	(x->tx_ring_size / 4)
  82
  83#define TXQUEUESTOP_THRESHHOLD	2
  84
 
 
  85static inline u32 tse_tx_avail(struct altera_tse_private *priv)
  86{
  87	return priv->tx_cons + priv->tx_ring_size - priv->tx_prod - 1;
  88}
  89
  90/* MDIO specific functions
  91 */
  92static int altera_tse_mdio_read(struct mii_bus *bus, int mii_id, int regnum)
  93{
  94	struct net_device *ndev = bus->priv;
  95	struct altera_tse_private *priv = netdev_priv(ndev);
  96
  97	/* set MDIO address */
  98	csrwr32((mii_id & 0x1f), priv->mac_dev,
  99		tse_csroffs(mdio_phy1_addr));
 100
 101	/* get the data */
 102	return csrrd32(priv->mac_dev,
 103		       tse_csroffs(mdio_phy1) + regnum * 4) & 0xffff;
 104}
 105
 106static int altera_tse_mdio_write(struct mii_bus *bus, int mii_id, int regnum,
 107				 u16 value)
 108{
 109	struct net_device *ndev = bus->priv;
 110	struct altera_tse_private *priv = netdev_priv(ndev);
 111
 112	/* set MDIO address */
 113	csrwr32((mii_id & 0x1f), priv->mac_dev,
 114		tse_csroffs(mdio_phy1_addr));
 115
 116	/* write the data */
 117	csrwr32(value, priv->mac_dev, tse_csroffs(mdio_phy1) + regnum * 4);
 118	return 0;
 119}
 120
 121static int altera_tse_mdio_create(struct net_device *dev, unsigned int id)
 122{
 123	struct altera_tse_private *priv = netdev_priv(dev);
 
 
 124	struct device_node *mdio_node = NULL;
 125	struct device_node *child_node = NULL;
 126	struct mii_bus *mdio = NULL;
 127	int ret;
 128
 129	for_each_child_of_node(priv->device->of_node, child_node) {
 130		if (of_device_is_compatible(child_node, "altr,tse-mdio")) {
 131			mdio_node = child_node;
 132			break;
 133		}
 134	}
 135
 136	if (mdio_node) {
 137		netdev_dbg(dev, "FOUND MDIO subnode\n");
 138	} else {
 139		netdev_dbg(dev, "NO MDIO subnode\n");
 140		return 0;
 141	}
 142
 143	mdio = mdiobus_alloc();
 144	if (mdio == NULL) {
 145		netdev_err(dev, "Error allocating MDIO bus\n");
 146		ret = -ENOMEM;
 147		goto put_node;
 148	}
 149
 150	mdio->name = ALTERA_TSE_RESOURCE_NAME;
 151	mdio->read = &altera_tse_mdio_read;
 152	mdio->write = &altera_tse_mdio_write;
 153	snprintf(mdio->id, MII_BUS_ID_SIZE, "%s-%u", mdio->name, id);
 154
 
 
 
 
 
 
 
 
 155	mdio->priv = dev;
 156	mdio->parent = priv->device;
 157
 158	ret = of_mdiobus_register(mdio, mdio_node);
 159	if (ret != 0) {
 160		netdev_err(dev, "Cannot register MDIO bus %s\n",
 161			   mdio->id);
 162		goto out_free_mdio;
 163	}
 164	of_node_put(mdio_node);
 165
 166	if (netif_msg_drv(priv))
 167		netdev_info(dev, "MDIO bus %s: created\n", mdio->id);
 168
 169	priv->mdio = mdio;
 170	return 0;
 
 
 171out_free_mdio:
 172	mdiobus_free(mdio);
 173	mdio = NULL;
 174put_node:
 175	of_node_put(mdio_node);
 176	return ret;
 177}
 178
 179static void altera_tse_mdio_destroy(struct net_device *dev)
 180{
 181	struct altera_tse_private *priv = netdev_priv(dev);
 182
 183	if (priv->mdio == NULL)
 184		return;
 185
 186	if (netif_msg_drv(priv))
 187		netdev_info(dev, "MDIO bus %s: removed\n",
 188			    priv->mdio->id);
 189
 190	mdiobus_unregister(priv->mdio);
 
 191	mdiobus_free(priv->mdio);
 192	priv->mdio = NULL;
 193}
 194
 195static int tse_init_rx_buffer(struct altera_tse_private *priv,
 196			      struct tse_buffer *rxbuffer, int len)
 197{
 198	rxbuffer->skb = netdev_alloc_skb_ip_align(priv->dev, len);
 199	if (!rxbuffer->skb)
 200		return -ENOMEM;
 201
 202	rxbuffer->dma_addr = dma_map_single(priv->device, rxbuffer->skb->data,
 203						len,
 204						DMA_FROM_DEVICE);
 205
 206	if (dma_mapping_error(priv->device, rxbuffer->dma_addr)) {
 207		netdev_err(priv->dev, "%s: DMA mapping error\n", __func__);
 208		dev_kfree_skb_any(rxbuffer->skb);
 209		return -EINVAL;
 210	}
 211	rxbuffer->dma_addr &= (dma_addr_t)~3;
 212	rxbuffer->len = len;
 213	return 0;
 214}
 215
 216static void tse_free_rx_buffer(struct altera_tse_private *priv,
 217			       struct tse_buffer *rxbuffer)
 218{
 219	dma_addr_t dma_addr = rxbuffer->dma_addr;
 220	struct sk_buff *skb = rxbuffer->skb;
 
 221
 222	if (skb != NULL) {
 223		if (dma_addr)
 224			dma_unmap_single(priv->device, dma_addr,
 225					 rxbuffer->len,
 226					 DMA_FROM_DEVICE);
 227		dev_kfree_skb_any(skb);
 228		rxbuffer->skb = NULL;
 229		rxbuffer->dma_addr = 0;
 230	}
 231}
 232
 233/* Unmap and free Tx buffer resources
 234 */
 235static void tse_free_tx_buffer(struct altera_tse_private *priv,
 236			       struct tse_buffer *buffer)
 237{
 238	if (buffer->dma_addr) {
 239		if (buffer->mapped_as_page)
 240			dma_unmap_page(priv->device, buffer->dma_addr,
 241				       buffer->len, DMA_TO_DEVICE);
 242		else
 243			dma_unmap_single(priv->device, buffer->dma_addr,
 244					 buffer->len, DMA_TO_DEVICE);
 245		buffer->dma_addr = 0;
 246	}
 247	if (buffer->skb) {
 248		dev_kfree_skb_any(buffer->skb);
 249		buffer->skb = NULL;
 250	}
 251}
 252
 253static int alloc_init_skbufs(struct altera_tse_private *priv)
 254{
 255	unsigned int rx_descs = priv->rx_ring_size;
 256	unsigned int tx_descs = priv->tx_ring_size;
 257	int ret = -ENOMEM;
 258	int i;
 259
 260	/* Create Rx ring buffer */
 261	priv->rx_ring = kcalloc(rx_descs, sizeof(struct tse_buffer),
 262				GFP_KERNEL);
 263	if (!priv->rx_ring)
 264		goto err_rx_ring;
 265
 266	/* Create Tx ring buffer */
 267	priv->tx_ring = kcalloc(tx_descs, sizeof(struct tse_buffer),
 268				GFP_KERNEL);
 269	if (!priv->tx_ring)
 270		goto err_tx_ring;
 271
 272	priv->tx_cons = 0;
 273	priv->tx_prod = 0;
 274
 275	/* Init Rx ring */
 276	for (i = 0; i < rx_descs; i++) {
 277		ret = tse_init_rx_buffer(priv, &priv->rx_ring[i],
 278					 priv->rx_dma_buf_sz);
 279		if (ret)
 280			goto err_init_rx_buffers;
 281	}
 282
 283	priv->rx_cons = 0;
 284	priv->rx_prod = 0;
 285
 286	return 0;
 287err_init_rx_buffers:
 288	while (--i >= 0)
 289		tse_free_rx_buffer(priv, &priv->rx_ring[i]);
 290	kfree(priv->tx_ring);
 291err_tx_ring:
 292	kfree(priv->rx_ring);
 293err_rx_ring:
 294	return ret;
 295}
 296
 297static void free_skbufs(struct net_device *dev)
 298{
 299	struct altera_tse_private *priv = netdev_priv(dev);
 300	unsigned int rx_descs = priv->rx_ring_size;
 301	unsigned int tx_descs = priv->tx_ring_size;
 302	int i;
 303
 304	/* Release the DMA TX/RX socket buffers */
 305	for (i = 0; i < rx_descs; i++)
 306		tse_free_rx_buffer(priv, &priv->rx_ring[i]);
 307	for (i = 0; i < tx_descs; i++)
 308		tse_free_tx_buffer(priv, &priv->tx_ring[i]);
 309
 310
 311	kfree(priv->tx_ring);
 312}
 313
 314/* Reallocate the skb for the reception process
 315 */
 316static inline void tse_rx_refill(struct altera_tse_private *priv)
 317{
 318	unsigned int rxsize = priv->rx_ring_size;
 319	unsigned int entry;
 320	int ret;
 321
 322	for (; priv->rx_cons - priv->rx_prod > 0;
 323			priv->rx_prod++) {
 324		entry = priv->rx_prod % rxsize;
 325		if (likely(priv->rx_ring[entry].skb == NULL)) {
 326			ret = tse_init_rx_buffer(priv, &priv->rx_ring[entry],
 327				priv->rx_dma_buf_sz);
 328			if (unlikely(ret != 0))
 329				break;
 330			priv->dmaops->add_rx_desc(priv, &priv->rx_ring[entry]);
 331		}
 332	}
 333}
 334
 335/* Pull out the VLAN tag and fix up the packet
 336 */
 337static inline void tse_rx_vlan(struct net_device *dev, struct sk_buff *skb)
 338{
 339	struct ethhdr *eth_hdr;
 340	u16 vid;
 341
 342	if ((dev->features & NETIF_F_HW_VLAN_CTAG_RX) &&
 343	    !__vlan_get_tag(skb, &vid)) {
 344		eth_hdr = (struct ethhdr *)skb->data;
 345		memmove(skb->data + VLAN_HLEN, eth_hdr, ETH_ALEN * 2);
 346		skb_pull(skb, VLAN_HLEN);
 347		__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid);
 348	}
 349}
 350
 351/* Receive a packet: retrieve and pass over to upper levels
 352 */
 353static int tse_rx(struct altera_tse_private *priv, int limit)
 354{
 355	unsigned int entry = priv->rx_cons % priv->rx_ring_size;
 356	unsigned int next_entry;
 357	unsigned int count = 0;
 
 358	struct sk_buff *skb;
 
 359	u32 rxstatus;
 360	u16 pktlength;
 361	u16 pktstatus;
 362
 363	/* Check for count < limit first as get_rx_status is changing
 364	* the response-fifo so we must process the next packet
 365	* after calling get_rx_status if a response is pending.
 366	* (reading the last byte of the response pops the value from the fifo.)
 367	*/
 368	while ((count < limit) &&
 369	       ((rxstatus = priv->dmaops->get_rx_status(priv)) != 0)) {
 370		pktstatus = rxstatus >> 16;
 371		pktlength = rxstatus & 0xffff;
 372
 373		if ((pktstatus & 0xFF) || (pktlength == 0))
 374			netdev_err(priv->dev,
 375				   "RCV pktstatus %08X pktlength %08X\n",
 376				   pktstatus, pktlength);
 377
 378		/* DMA transfer from TSE starts with 2 additional bytes for
 379		 * IP payload alignment. Status returned by get_rx_status()
 380		 * contains DMA transfer length. Packet is 2 bytes shorter.
 381		 */
 382		pktlength -= 2;
 383
 384		count++;
 385		next_entry = (++priv->rx_cons) % priv->rx_ring_size;
 386
 387		skb = priv->rx_ring[entry].skb;
 388		if (unlikely(!skb)) {
 389			netdev_err(priv->dev,
 390				   "%s: Inconsistent Rx descriptor chain\n",
 391				   __func__);
 392			priv->dev->stats.rx_dropped++;
 393			break;
 394		}
 395		priv->rx_ring[entry].skb = NULL;
 396
 397		skb_put(skb, pktlength);
 398
 
 
 
 
 
 
 399		dma_unmap_single(priv->device, priv->rx_ring[entry].dma_addr,
 400				 priv->rx_ring[entry].len, DMA_FROM_DEVICE);
 401
 402		if (netif_msg_pktdata(priv)) {
 403			netdev_info(priv->dev, "frame received %d bytes\n",
 404				    pktlength);
 405			print_hex_dump(KERN_ERR, "data: ", DUMP_PREFIX_OFFSET,
 406				       16, 1, skb->data, pktlength, true);
 407		}
 408
 409		tse_rx_vlan(priv->dev, skb);
 410
 411		skb->protocol = eth_type_trans(skb, priv->dev);
 412		skb_checksum_none_assert(skb);
 413
 414		napi_gro_receive(&priv->napi, skb);
 415
 416		priv->dev->stats.rx_packets++;
 417		priv->dev->stats.rx_bytes += pktlength;
 418
 419		entry = next_entry;
 420
 421		tse_rx_refill(priv);
 422	}
 423
 424	return count;
 425}
 426
 427/* Reclaim resources after transmission completes
 428 */
 429static int tse_tx_complete(struct altera_tse_private *priv)
 430{
 431	unsigned int txsize = priv->tx_ring_size;
 432	struct tse_buffer *tx_buff;
 433	unsigned int entry;
 
 434	int txcomplete = 0;
 435	u32 ready;
 436
 437	spin_lock(&priv->tx_lock);
 438
 439	ready = priv->dmaops->tx_completions(priv);
 440
 441	/* Free sent buffers */
 442	while (ready && (priv->tx_cons != priv->tx_prod)) {
 443		entry = priv->tx_cons % txsize;
 444		tx_buff = &priv->tx_ring[entry];
 445
 446		if (netif_msg_tx_done(priv))
 447			netdev_dbg(priv->dev, "%s: curr %d, dirty %d\n",
 448				   __func__, priv->tx_prod, priv->tx_cons);
 449
 450		if (likely(tx_buff->skb))
 451			priv->dev->stats.tx_packets++;
 452
 453		tse_free_tx_buffer(priv, tx_buff);
 454		priv->tx_cons++;
 455
 456		txcomplete++;
 457		ready--;
 458	}
 459
 460	if (unlikely(netif_queue_stopped(priv->dev) &&
 461		     tse_tx_avail(priv) > TSE_TX_THRESH(priv))) {
 
 462		if (netif_queue_stopped(priv->dev) &&
 463		    tse_tx_avail(priv) > TSE_TX_THRESH(priv)) {
 464			if (netif_msg_tx_done(priv))
 465				netdev_dbg(priv->dev, "%s: restart transmit\n",
 466					   __func__);
 467			netif_wake_queue(priv->dev);
 468		}
 
 469	}
 470
 471	spin_unlock(&priv->tx_lock);
 472	return txcomplete;
 473}
 474
 475/* NAPI polling function
 476 */
 477static int tse_poll(struct napi_struct *napi, int budget)
 478{
 479	struct altera_tse_private *priv =
 480			container_of(napi, struct altera_tse_private, napi);
 481	unsigned long int flags;
 482	int rxcomplete = 0;
 
 
 483
 484	tse_tx_complete(priv);
 485
 486	rxcomplete = tse_rx(priv, budget);
 487
 488	if (rxcomplete < budget) {
 
 489
 490		napi_complete_done(napi, rxcomplete);
 
 491
 492		netdev_dbg(priv->dev,
 493			   "NAPI Complete, did %d packets with budget %d\n",
 494			   rxcomplete, budget);
 495
 496		spin_lock_irqsave(&priv->rxdma_irq_lock, flags);
 497		priv->dmaops->enable_rxirq(priv);
 498		priv->dmaops->enable_txirq(priv);
 499		spin_unlock_irqrestore(&priv->rxdma_irq_lock, flags);
 500	}
 501	return rxcomplete;
 502}
 503
 504/* DMA TX & RX FIFO interrupt routing
 505 */
 506static irqreturn_t altera_isr(int irq, void *dev_id)
 507{
 508	struct net_device *dev = dev_id;
 509	struct altera_tse_private *priv;
 
 510
 511	if (unlikely(!dev)) {
 512		pr_err("%s: invalid dev pointer\n", __func__);
 513		return IRQ_NONE;
 514	}
 515	priv = netdev_priv(dev);
 516
 517	spin_lock(&priv->rxdma_irq_lock);
 518	/* reset IRQs */
 519	priv->dmaops->clear_rxirq(priv);
 520	priv->dmaops->clear_txirq(priv);
 521	spin_unlock(&priv->rxdma_irq_lock);
 522
 523	if (likely(napi_schedule_prep(&priv->napi))) {
 524		spin_lock(&priv->rxdma_irq_lock);
 525		priv->dmaops->disable_rxirq(priv);
 526		priv->dmaops->disable_txirq(priv);
 527		spin_unlock(&priv->rxdma_irq_lock);
 528		__napi_schedule(&priv->napi);
 529	}
 530
 
 
 
 
 
 531
 532	return IRQ_HANDLED;
 533}
 534
 535/* Transmit a packet (called by the kernel). Dispatches
 536 * either the SGDMA method for transmitting or the
 537 * MSGDMA method, assumes no scatter/gather support,
 538 * implying an assumption that there's only one
 539 * physically contiguous fragment starting at
 540 * skb->data, for length of skb_headlen(skb).
 541 */
 542static netdev_tx_t tse_start_xmit(struct sk_buff *skb, struct net_device *dev)
 543{
 544	struct altera_tse_private *priv = netdev_priv(dev);
 545	unsigned int nopaged_len = skb_headlen(skb);
 546	unsigned int txsize = priv->tx_ring_size;
 547	int nfrags = skb_shinfo(skb)->nr_frags;
 548	struct tse_buffer *buffer = NULL;
 549	netdev_tx_t ret = NETDEV_TX_OK;
 
 
 550	dma_addr_t dma_addr;
 551	unsigned int entry;
 552
 553	spin_lock_bh(&priv->tx_lock);
 554
 555	if (unlikely(tse_tx_avail(priv) < nfrags + 1)) {
 556		if (!netif_queue_stopped(dev)) {
 557			netif_stop_queue(dev);
 558			/* This is a hard error, log it. */
 559			netdev_err(priv->dev,
 560				   "%s: Tx list full when queue awake\n",
 561				   __func__);
 562		}
 563		ret = NETDEV_TX_BUSY;
 564		goto out;
 565	}
 566
 567	/* Map the first skb fragment */
 568	entry = priv->tx_prod % txsize;
 569	buffer = &priv->tx_ring[entry];
 570
 571	dma_addr = dma_map_single(priv->device, skb->data, nopaged_len,
 572				  DMA_TO_DEVICE);
 573	if (dma_mapping_error(priv->device, dma_addr)) {
 574		netdev_err(priv->dev, "%s: DMA mapping error\n", __func__);
 575		ret = NETDEV_TX_OK;
 576		goto out;
 577	}
 578
 579	buffer->skb = skb;
 580	buffer->dma_addr = dma_addr;
 581	buffer->len = nopaged_len;
 582
 
 
 
 
 583	priv->dmaops->tx_buffer(priv, buffer);
 584
 585	skb_tx_timestamp(skb);
 586
 587	priv->tx_prod++;
 588	dev->stats.tx_bytes += skb->len;
 589
 590	if (unlikely(tse_tx_avail(priv) <= TXQUEUESTOP_THRESHHOLD)) {
 591		if (netif_msg_hw(priv))
 592			netdev_dbg(priv->dev, "%s: stop transmitted packets\n",
 593				   __func__);
 594		netif_stop_queue(dev);
 595	}
 596
 597out:
 598	spin_unlock_bh(&priv->tx_lock);
 599
 600	return ret;
 601}
 602
 603static int altera_tse_phy_get_addr_mdio_create(struct net_device *dev)
 
 
 
 
 
 
 604{
 605	struct altera_tse_private *priv = netdev_priv(dev);
 606	struct device_node *np = priv->device->of_node;
 607	int ret;
 608
 609	ret = of_get_phy_mode(np, &priv->phy_iface);
 
 
 
 
 
 
 
 
 
 
 
 
 610
 611	/* Avoid get phy addr and create mdio if no phy is present */
 612	if (ret)
 613		return 0;
 614
 615	/* try to get PHY address from device tree, use PHY autodetection if
 616	 * no valid address is given
 617	 */
 618
 619	if (of_property_read_u32(priv->device->of_node, "phy-addr",
 620			 &priv->phy_addr)) {
 621		priv->phy_addr = POLL_PHY;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 622	}
 623
 624	if (!((priv->phy_addr == POLL_PHY) ||
 625		  ((priv->phy_addr >= 0) && (priv->phy_addr < PHY_MAX_ADDR)))) {
 626		netdev_err(dev, "invalid phy-addr specified %d\n",
 627			priv->phy_addr);
 628		return -ENODEV;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 629	}
 
 
 630
 631	/* Create/attach to MDIO bus */
 632	ret = altera_tse_mdio_create(dev,
 633					 atomic_add_return(1, &instance_count));
 
 
 
 
 634
 635	if (ret)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 636		return -ENODEV;
 
 637
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 638	return 0;
 639}
 640
 641static void tse_update_mac_addr(struct altera_tse_private *priv, const u8 *addr)
 642{
 643	u32 msb;
 644	u32 lsb;
 645
 646	msb = (addr[3] << 24) | (addr[2] << 16) | (addr[1] << 8) | addr[0];
 647	lsb = ((addr[5] << 8) | addr[4]) & 0xffff;
 648
 649	/* Set primary MAC address */
 650	csrwr32(msb, priv->mac_dev, tse_csroffs(mac_addr_0));
 651	csrwr32(lsb, priv->mac_dev, tse_csroffs(mac_addr_1));
 652}
 653
 654/* MAC software reset.
 655 * When reset is triggered, the MAC function completes the current
 656 * transmission or reception, and subsequently disables the transmit and
 657 * receive logic, flushes the receive FIFO buffer, and resets the statistics
 658 * counters.
 659 */
 660static int reset_mac(struct altera_tse_private *priv)
 661{
 662	int counter;
 663	u32 dat;
 664
 665	dat = csrrd32(priv->mac_dev, tse_csroffs(command_config));
 666	dat &= ~(MAC_CMDCFG_TX_ENA | MAC_CMDCFG_RX_ENA);
 667	dat |= MAC_CMDCFG_SW_RESET | MAC_CMDCFG_CNT_RESET;
 668	csrwr32(dat, priv->mac_dev, tse_csroffs(command_config));
 669
 670	counter = 0;
 671	while (counter++ < ALTERA_TSE_SW_RESET_WATCHDOG_CNTR) {
 672		if (tse_bit_is_clear(priv->mac_dev, tse_csroffs(command_config),
 673				     MAC_CMDCFG_SW_RESET))
 674			break;
 675		udelay(1);
 676	}
 677
 678	if (counter >= ALTERA_TSE_SW_RESET_WATCHDOG_CNTR) {
 679		dat = csrrd32(priv->mac_dev, tse_csroffs(command_config));
 680		dat &= ~MAC_CMDCFG_SW_RESET;
 681		csrwr32(dat, priv->mac_dev, tse_csroffs(command_config));
 682		return -1;
 683	}
 684	return 0;
 685}
 686
 687/* Initialize MAC core registers
 688*/
 689static int init_mac(struct altera_tse_private *priv)
 690{
 691	unsigned int cmd = 0;
 692	u32 frm_length;
 693
 694	/* Setup Rx FIFO */
 695	csrwr32(priv->rx_fifo_depth - ALTERA_TSE_RX_SECTION_EMPTY,
 696		priv->mac_dev, tse_csroffs(rx_section_empty));
 697
 698	csrwr32(ALTERA_TSE_RX_SECTION_FULL, priv->mac_dev,
 699		tse_csroffs(rx_section_full));
 700
 701	csrwr32(ALTERA_TSE_RX_ALMOST_EMPTY, priv->mac_dev,
 702		tse_csroffs(rx_almost_empty));
 703
 704	csrwr32(ALTERA_TSE_RX_ALMOST_FULL, priv->mac_dev,
 705		tse_csroffs(rx_almost_full));
 706
 707	/* Setup Tx FIFO */
 708	csrwr32(priv->tx_fifo_depth - ALTERA_TSE_TX_SECTION_EMPTY,
 709		priv->mac_dev, tse_csroffs(tx_section_empty));
 710
 711	csrwr32(ALTERA_TSE_TX_SECTION_FULL, priv->mac_dev,
 712		tse_csroffs(tx_section_full));
 713
 714	csrwr32(ALTERA_TSE_TX_ALMOST_EMPTY, priv->mac_dev,
 715		tse_csroffs(tx_almost_empty));
 716
 717	csrwr32(ALTERA_TSE_TX_ALMOST_FULL, priv->mac_dev,
 718		tse_csroffs(tx_almost_full));
 719
 720	/* MAC Address Configuration */
 721	tse_update_mac_addr(priv, priv->dev->dev_addr);
 722
 723	/* MAC Function Configuration */
 724	frm_length = ETH_HLEN + priv->dev->mtu + ETH_FCS_LEN;
 725	csrwr32(frm_length, priv->mac_dev, tse_csroffs(frm_length));
 726
 727	csrwr32(ALTERA_TSE_TX_IPG_LENGTH, priv->mac_dev,
 728		tse_csroffs(tx_ipg_length));
 729
 730	/* Disable RX/TX shift 16 for alignment of all received frames on 16-bit
 731	 * start address
 732	 */
 733	tse_set_bit(priv->mac_dev, tse_csroffs(rx_cmd_stat),
 734		    ALTERA_TSE_RX_CMD_STAT_RX_SHIFT16);
 735
 736	tse_clear_bit(priv->mac_dev, tse_csroffs(tx_cmd_stat),
 737		      ALTERA_TSE_TX_CMD_STAT_TX_SHIFT16 |
 738		      ALTERA_TSE_TX_CMD_STAT_OMIT_CRC);
 739
 740	/* Set the MAC options */
 741	cmd = csrrd32(priv->mac_dev, tse_csroffs(command_config));
 742	cmd &= ~MAC_CMDCFG_PAD_EN;	/* No padding Removal on Receive */
 743	cmd &= ~MAC_CMDCFG_CRC_FWD;	/* CRC Removal */
 744	cmd |= MAC_CMDCFG_RX_ERR_DISC;	/* Automatically discard frames
 745					 * with CRC errors
 746					 */
 747	cmd |= MAC_CMDCFG_CNTL_FRM_ENA;
 748	cmd &= ~MAC_CMDCFG_TX_ENA;
 749	cmd &= ~MAC_CMDCFG_RX_ENA;
 750
 751	/* Default speed and duplex setting, full/100 */
 752	cmd &= ~MAC_CMDCFG_HD_ENA;
 753	cmd &= ~MAC_CMDCFG_ETH_SPEED;
 754	cmd &= ~MAC_CMDCFG_ENA_10;
 755
 756	csrwr32(cmd, priv->mac_dev, tse_csroffs(command_config));
 757
 758	csrwr32(ALTERA_TSE_PAUSE_QUANTA, priv->mac_dev,
 759		tse_csroffs(pause_quanta));
 760
 761	if (netif_msg_hw(priv))
 762		dev_dbg(priv->device,
 763			"MAC post-initialization: CMD_CONFIG = 0x%08x\n", cmd);
 764
 765	return 0;
 766}
 767
 768/* Start/stop MAC transmission logic
 769 */
 770static void tse_set_mac(struct altera_tse_private *priv, bool enable)
 771{
 772	u32 value = csrrd32(priv->mac_dev, tse_csroffs(command_config));
 773
 774	if (enable)
 775		value |= MAC_CMDCFG_TX_ENA | MAC_CMDCFG_RX_ENA;
 776	else
 777		value &= ~(MAC_CMDCFG_TX_ENA | MAC_CMDCFG_RX_ENA);
 778
 779	csrwr32(value, priv->mac_dev, tse_csroffs(command_config));
 780}
 781
 782/* Change the MTU
 783 */
 784static int tse_change_mtu(struct net_device *dev, int new_mtu)
 785{
 
 
 
 
 786	if (netif_running(dev)) {
 787		netdev_err(dev, "must be stopped to change its MTU\n");
 788		return -EBUSY;
 789	}
 790
 
 
 
 
 
 791	dev->mtu = new_mtu;
 792	netdev_update_features(dev);
 793
 794	return 0;
 795}
 796
 797static void altera_tse_set_mcfilter(struct net_device *dev)
 798{
 799	struct altera_tse_private *priv = netdev_priv(dev);
 800	struct netdev_hw_addr *ha;
 801	int i;
 
 802
 803	/* clear the hash filter */
 804	for (i = 0; i < 64; i++)
 805		csrwr32(0, priv->mac_dev, tse_csroffs(hash_table) + i * 4);
 806
 807	netdev_for_each_mc_addr(ha, dev) {
 808		unsigned int hash = 0;
 809		int mac_octet;
 810
 811		for (mac_octet = 5; mac_octet >= 0; mac_octet--) {
 812			unsigned char xor_bit = 0;
 813			unsigned char octet = ha->addr[mac_octet];
 814			unsigned int bitshift;
 815
 816			for (bitshift = 0; bitshift < 8; bitshift++)
 817				xor_bit ^= ((octet >> bitshift) & 0x01);
 818
 819			hash = (hash << 1) | xor_bit;
 820		}
 821		csrwr32(1, priv->mac_dev, tse_csroffs(hash_table) + hash * 4);
 822	}
 823}
 824
 825
 826static void altera_tse_set_mcfilterall(struct net_device *dev)
 827{
 828	struct altera_tse_private *priv = netdev_priv(dev);
 829	int i;
 830
 831	/* set the hash filter */
 832	for (i = 0; i < 64; i++)
 833		csrwr32(1, priv->mac_dev, tse_csroffs(hash_table) + i * 4);
 834}
 835
 836/* Set or clear the multicast filter for this adapter
 837 */
 838static void tse_set_rx_mode_hashfilter(struct net_device *dev)
 839{
 840	struct altera_tse_private *priv = netdev_priv(dev);
 841
 842	spin_lock(&priv->mac_cfg_lock);
 843
 844	if (dev->flags & IFF_PROMISC)
 845		tse_set_bit(priv->mac_dev, tse_csroffs(command_config),
 846			    MAC_CMDCFG_PROMIS_EN);
 847
 848	if (dev->flags & IFF_ALLMULTI)
 849		altera_tse_set_mcfilterall(dev);
 850	else
 851		altera_tse_set_mcfilter(dev);
 852
 853	spin_unlock(&priv->mac_cfg_lock);
 854}
 855
 856/* Set or clear the multicast filter for this adapter
 857 */
 858static void tse_set_rx_mode(struct net_device *dev)
 859{
 860	struct altera_tse_private *priv = netdev_priv(dev);
 861
 862	spin_lock(&priv->mac_cfg_lock);
 863
 864	if ((dev->flags & IFF_PROMISC) || (dev->flags & IFF_ALLMULTI) ||
 865	    !netdev_mc_empty(dev) || !netdev_uc_empty(dev))
 866		tse_set_bit(priv->mac_dev, tse_csroffs(command_config),
 867			    MAC_CMDCFG_PROMIS_EN);
 868	else
 869		tse_clear_bit(priv->mac_dev, tse_csroffs(command_config),
 870			      MAC_CMDCFG_PROMIS_EN);
 871
 872	spin_unlock(&priv->mac_cfg_lock);
 873}
 874
 875/* Open and initialize the interface
 876 */
 877static int tse_open(struct net_device *dev)
 878{
 879	struct altera_tse_private *priv = netdev_priv(dev);
 880	unsigned long flags;
 881	int ret = 0;
 882	int i;
 
 883
 884	/* Reset and configure TSE MAC and probe associated PHY */
 885	ret = priv->dmaops->init_dma(priv);
 886	if (ret != 0) {
 887		netdev_err(dev, "Cannot initialize DMA\n");
 888		goto phy_error;
 889	}
 890
 891	if (netif_msg_ifup(priv))
 892		netdev_warn(dev, "device MAC address %pM\n",
 893			    dev->dev_addr);
 894
 895	if ((priv->revision < 0xd00) || (priv->revision > 0xe00))
 896		netdev_warn(dev, "TSE revision %x\n", priv->revision);
 897
 898	spin_lock(&priv->mac_cfg_lock);
 899
 900	ret = reset_mac(priv);
 901	/* Note that reset_mac will fail if the clocks are gated by the PHY
 902	 * due to the PHY being put into isolation or power down mode.
 903	 * This is not an error if reset fails due to no clock.
 904	 */
 905	if (ret)
 906		netdev_dbg(dev, "Cannot reset MAC core (error: %d)\n", ret);
 907
 908	ret = init_mac(priv);
 909	spin_unlock(&priv->mac_cfg_lock);
 910	if (ret) {
 911		netdev_err(dev, "Cannot init MAC core (error: %d)\n", ret);
 912		goto alloc_skbuf_error;
 913	}
 914
 915	priv->dmaops->reset_dma(priv);
 916
 917	/* Create and initialize the TX/RX descriptors chains. */
 918	priv->rx_ring_size = dma_rx_num;
 919	priv->tx_ring_size = dma_tx_num;
 920	ret = alloc_init_skbufs(priv);
 921	if (ret) {
 922		netdev_err(dev, "DMA descriptors initialization failed\n");
 923		goto alloc_skbuf_error;
 924	}
 925
 926
 927	/* Register RX interrupt */
 928	ret = request_irq(priv->rx_irq, altera_isr, IRQF_SHARED,
 929			  dev->name, dev);
 930	if (ret) {
 931		netdev_err(dev, "Unable to register RX interrupt %d\n",
 932			   priv->rx_irq);
 933		goto init_error;
 934	}
 935
 936	/* Register TX interrupt */
 937	ret = request_irq(priv->tx_irq, altera_isr, IRQF_SHARED,
 938			  dev->name, dev);
 939	if (ret) {
 940		netdev_err(dev, "Unable to register TX interrupt %d\n",
 941			   priv->tx_irq);
 942		goto tx_request_irq_error;
 943	}
 944
 945	/* Enable DMA interrupts */
 946	spin_lock_irqsave(&priv->rxdma_irq_lock, flags);
 947	priv->dmaops->enable_rxirq(priv);
 948	priv->dmaops->enable_txirq(priv);
 949
 950	/* Setup RX descriptor chain */
 951	for (i = 0; i < priv->rx_ring_size; i++)
 952		priv->dmaops->add_rx_desc(priv, &priv->rx_ring[i]);
 953
 954	spin_unlock_irqrestore(&priv->rxdma_irq_lock, flags);
 955
 956	ret = phylink_of_phy_connect(priv->phylink, priv->device->of_node, 0);
 957	if (ret) {
 958		netdev_err(dev, "could not connect phylink (%d)\n", ret);
 959		goto tx_request_irq_error;
 960	}
 961	phylink_start(priv->phylink);
 962
 963	napi_enable(&priv->napi);
 964	netif_start_queue(dev);
 965
 966	priv->dmaops->start_rxdma(priv);
 967
 968	/* Start MAC Rx/Tx */
 969	spin_lock(&priv->mac_cfg_lock);
 970	tse_set_mac(priv, true);
 971	spin_unlock(&priv->mac_cfg_lock);
 972
 973	return 0;
 974
 975tx_request_irq_error:
 976	free_irq(priv->rx_irq, dev);
 977init_error:
 978	free_skbufs(dev);
 979alloc_skbuf_error:
 
 
 
 
 980phy_error:
 981	return ret;
 982}
 983
 984/* Stop TSE MAC interface and put the device in an inactive state
 985 */
 986static int tse_shutdown(struct net_device *dev)
 987{
 988	struct altera_tse_private *priv = netdev_priv(dev);
 989	unsigned long int flags;
 990	int ret;
 
 
 
 
 
 
 
 
 991
 992	phylink_stop(priv->phylink);
 993	phylink_disconnect_phy(priv->phylink);
 994	netif_stop_queue(dev);
 995	napi_disable(&priv->napi);
 996
 997	/* Disable DMA interrupts */
 998	spin_lock_irqsave(&priv->rxdma_irq_lock, flags);
 999	priv->dmaops->disable_rxirq(priv);
1000	priv->dmaops->disable_txirq(priv);
1001	spin_unlock_irqrestore(&priv->rxdma_irq_lock, flags);
1002
1003	/* Free the IRQ lines */
1004	free_irq(priv->rx_irq, dev);
1005	free_irq(priv->tx_irq, dev);
1006
1007	/* disable and reset the MAC, empties fifo */
1008	spin_lock(&priv->mac_cfg_lock);
1009	spin_lock(&priv->tx_lock);
1010
1011	ret = reset_mac(priv);
1012	/* Note that reset_mac will fail if the clocks are gated by the PHY
1013	 * due to the PHY being put into isolation or power down mode.
1014	 * This is not an error if reset fails due to no clock.
1015	 */
1016	if (ret)
1017		netdev_dbg(dev, "Cannot reset MAC core (error: %d)\n", ret);
1018	priv->dmaops->reset_dma(priv);
1019	free_skbufs(dev);
1020
1021	spin_unlock(&priv->tx_lock);
1022	spin_unlock(&priv->mac_cfg_lock);
1023
1024	priv->dmaops->uninit_dma(priv);
1025
1026	return 0;
1027}
1028
1029static struct net_device_ops altera_tse_netdev_ops = {
1030	.ndo_open		= tse_open,
1031	.ndo_stop		= tse_shutdown,
1032	.ndo_start_xmit		= tse_start_xmit,
1033	.ndo_set_mac_address	= eth_mac_addr,
1034	.ndo_set_rx_mode	= tse_set_rx_mode,
1035	.ndo_change_mtu		= tse_change_mtu,
1036	.ndo_validate_addr	= eth_validate_addr,
1037};
1038
1039static void alt_tse_mac_config(struct phylink_config *config, unsigned int mode,
1040			       const struct phylink_link_state *state)
1041{
1042	struct net_device *ndev = to_net_dev(config->dev);
1043	struct altera_tse_private *priv = netdev_priv(ndev);
1044
1045	spin_lock(&priv->mac_cfg_lock);
1046	reset_mac(priv);
1047	tse_set_mac(priv, true);
1048	spin_unlock(&priv->mac_cfg_lock);
1049}
1050
1051static void alt_tse_mac_link_down(struct phylink_config *config,
1052				  unsigned int mode, phy_interface_t interface)
1053{
1054}
1055
1056static void alt_tse_mac_link_up(struct phylink_config *config,
1057				struct phy_device *phy, unsigned int mode,
1058				phy_interface_t interface, int speed,
1059				int duplex, bool tx_pause, bool rx_pause)
1060{
1061	struct net_device *ndev = to_net_dev(config->dev);
1062	struct altera_tse_private *priv = netdev_priv(ndev);
1063	u32 ctrl;
1064
1065	ctrl = csrrd32(priv->mac_dev, tse_csroffs(command_config));
1066	ctrl &= ~(MAC_CMDCFG_ENA_10 | MAC_CMDCFG_ETH_SPEED | MAC_CMDCFG_HD_ENA);
1067
1068	if (duplex == DUPLEX_HALF)
1069		ctrl |= MAC_CMDCFG_HD_ENA;
1070
1071	if (speed == SPEED_1000)
1072		ctrl |= MAC_CMDCFG_ETH_SPEED;
1073	else if (speed == SPEED_10)
1074		ctrl |= MAC_CMDCFG_ENA_10;
1075
1076	spin_lock(&priv->mac_cfg_lock);
1077	csrwr32(ctrl, priv->mac_dev, tse_csroffs(command_config));
1078	spin_unlock(&priv->mac_cfg_lock);
1079}
1080
1081static struct phylink_pcs *alt_tse_select_pcs(struct phylink_config *config,
1082					      phy_interface_t interface)
1083{
1084	struct net_device *ndev = to_net_dev(config->dev);
1085	struct altera_tse_private *priv = netdev_priv(ndev);
1086
1087	if (interface == PHY_INTERFACE_MODE_SGMII ||
1088	    interface == PHY_INTERFACE_MODE_1000BASEX)
1089		return priv->pcs;
1090	else
1091		return NULL;
1092}
1093
1094static const struct phylink_mac_ops alt_tse_phylink_ops = {
1095	.mac_config = alt_tse_mac_config,
1096	.mac_link_down = alt_tse_mac_link_down,
1097	.mac_link_up = alt_tse_mac_link_up,
1098	.mac_select_pcs = alt_tse_select_pcs,
1099};
1100
1101static int request_and_map(struct platform_device *pdev, const char *name,
1102			   struct resource **res, void __iomem **ptr)
1103{
1104	struct device *device = &pdev->dev;
1105	struct resource *region;
 
1106
1107	*res = platform_get_resource_byname(pdev, IORESOURCE_MEM, name);
1108	if (*res == NULL) {
1109		dev_err(device, "resource %s not defined\n", name);
1110		return -ENODEV;
1111	}
1112
1113	region = devm_request_mem_region(device, (*res)->start,
1114					 resource_size(*res), dev_name(device));
1115	if (region == NULL) {
1116		dev_err(device, "unable to request %s\n", name);
1117		return -EBUSY;
1118	}
1119
1120	*ptr = devm_ioremap(device, region->start,
1121				    resource_size(region));
1122	if (*ptr == NULL) {
1123		dev_err(device, "ioremap of %s failed!", name);
1124		return -ENOMEM;
1125	}
1126
1127	return 0;
1128}
1129
1130/* Probe Altera TSE MAC device
1131 */
1132static int altera_tse_probe(struct platform_device *pdev)
1133{
1134	struct regmap_config pcs_regmap_cfg;
1135	struct altera_tse_private *priv;
1136	struct mdio_regmap_config mrc;
1137	struct resource *control_port;
1138	struct regmap *pcs_regmap;
1139	struct resource *dma_res;
1140	struct resource *pcs_res;
1141	struct mii_bus *pcs_bus;
1142	struct net_device *ndev;
1143	void __iomem *descmap;
1144	int ret = -ENODEV;
1145
1146	ndev = alloc_etherdev(sizeof(struct altera_tse_private));
1147	if (!ndev) {
1148		dev_err(&pdev->dev, "Could not allocate network device\n");
1149		return -ENODEV;
1150	}
1151
1152	SET_NETDEV_DEV(ndev, &pdev->dev);
1153
1154	priv = netdev_priv(ndev);
1155	priv->device = &pdev->dev;
1156	priv->dev = ndev;
1157	priv->msg_enable = netif_msg_init(debug, default_msg_level);
1158
1159	priv->dmaops = device_get_match_data(&pdev->dev);
 
 
 
 
1160
1161	if (priv->dmaops &&
1162	    priv->dmaops->altera_dtype == ALTERA_DTYPE_SGDMA) {
1163		/* Get the mapped address to the SGDMA descriptor memory */
1164		ret = request_and_map(pdev, "s1", &dma_res, &descmap);
1165		if (ret)
1166			goto err_free_netdev;
1167
1168		/* Start of that memory is for transmit descriptors */
1169		priv->tx_dma_desc = descmap;
1170
1171		/* First half is for tx descriptors, other half for tx */
1172		priv->txdescmem = resource_size(dma_res)/2;
1173
1174		priv->txdescmem_busaddr = (dma_addr_t)dma_res->start;
1175
1176		priv->rx_dma_desc = (void __iomem *)((uintptr_t)(descmap +
1177						     priv->txdescmem));
1178		priv->rxdescmem = resource_size(dma_res)/2;
1179		priv->rxdescmem_busaddr = dma_res->start;
1180		priv->rxdescmem_busaddr += priv->txdescmem;
1181
1182		if (upper_32_bits(priv->rxdescmem_busaddr)) {
1183			dev_dbg(priv->device,
1184				"SGDMA bus addresses greater than 32-bits\n");
1185			ret = -EINVAL;
1186			goto err_free_netdev;
1187		}
1188		if (upper_32_bits(priv->txdescmem_busaddr)) {
1189			dev_dbg(priv->device,
1190				"SGDMA bus addresses greater than 32-bits\n");
1191			ret = -EINVAL;
1192			goto err_free_netdev;
1193		}
1194	} else if (priv->dmaops &&
1195		   priv->dmaops->altera_dtype == ALTERA_DTYPE_MSGDMA) {
1196		ret = request_and_map(pdev, "rx_resp", &dma_res,
1197				      &priv->rx_dma_resp);
1198		if (ret)
1199			goto err_free_netdev;
1200
1201		ret = request_and_map(pdev, "tx_desc", &dma_res,
1202				      &priv->tx_dma_desc);
1203		if (ret)
1204			goto err_free_netdev;
1205
1206		priv->txdescmem = resource_size(dma_res);
1207		priv->txdescmem_busaddr = dma_res->start;
1208
1209		ret = request_and_map(pdev, "rx_desc", &dma_res,
1210				      &priv->rx_dma_desc);
1211		if (ret)
1212			goto err_free_netdev;
1213
1214		priv->rxdescmem = resource_size(dma_res);
1215		priv->rxdescmem_busaddr = dma_res->start;
1216
1217	} else {
1218		ret = -ENODEV;
1219		goto err_free_netdev;
1220	}
1221
1222	if (!dma_set_mask(priv->device, DMA_BIT_MASK(priv->dmaops->dmamask))) {
1223		dma_set_coherent_mask(priv->device,
1224				      DMA_BIT_MASK(priv->dmaops->dmamask));
1225	} else if (!dma_set_mask(priv->device, DMA_BIT_MASK(32))) {
1226		dma_set_coherent_mask(priv->device, DMA_BIT_MASK(32));
1227	} else {
1228		ret = -EIO;
1229		goto err_free_netdev;
1230	}
1231
1232	/* MAC address space */
1233	ret = request_and_map(pdev, "control_port", &control_port,
1234			      (void __iomem **)&priv->mac_dev);
1235	if (ret)
1236		goto err_free_netdev;
1237
1238	/* xSGDMA Rx Dispatcher address space */
1239	ret = request_and_map(pdev, "rx_csr", &dma_res,
1240			      &priv->rx_dma_csr);
1241	if (ret)
1242		goto err_free_netdev;
1243
1244
1245	/* xSGDMA Tx Dispatcher address space */
1246	ret = request_and_map(pdev, "tx_csr", &dma_res,
1247			      &priv->tx_dma_csr);
1248	if (ret)
1249		goto err_free_netdev;
1250
1251	memset(&pcs_regmap_cfg, 0, sizeof(pcs_regmap_cfg));
1252	memset(&mrc, 0, sizeof(mrc));
1253	/* SGMII PCS address space. The location can vary depending on how the
1254	 * IP is integrated. We can have a resource dedicated to it at a specific
1255	 * address space, but if it's not the case, we fallback to the mdiophy0
1256	 * from the MAC's address space
1257	 */
1258	ret = request_and_map(pdev, "pcs", &pcs_res, &priv->pcs_base);
1259	if (ret) {
1260		/* If we can't find a dedicated resource for the PCS, fallback
1261		 * to the internal PCS, that has a different address stride
1262		 */
1263		priv->pcs_base = priv->mac_dev + tse_csroffs(mdio_phy0);
1264		pcs_regmap_cfg.reg_bits = 32;
1265		/* Values are MDIO-like values, on 16 bits */
1266		pcs_regmap_cfg.val_bits = 16;
1267		pcs_regmap_cfg.reg_shift = REGMAP_UPSHIFT(2);
1268	} else {
1269		pcs_regmap_cfg.reg_bits = 16;
1270		pcs_regmap_cfg.val_bits = 16;
1271		pcs_regmap_cfg.reg_shift = REGMAP_UPSHIFT(1);
1272	}
1273
1274	/* Create a regmap for the PCS so that it can be used by the PCS driver */
1275	pcs_regmap = devm_regmap_init_mmio(&pdev->dev, priv->pcs_base,
1276					   &pcs_regmap_cfg);
1277	if (IS_ERR(pcs_regmap)) {
1278		ret = PTR_ERR(pcs_regmap);
1279		goto err_free_netdev;
1280	}
1281	mrc.regmap = pcs_regmap;
1282	mrc.parent = &pdev->dev;
1283	mrc.valid_addr = 0x0;
1284	mrc.autoscan = false;
1285
1286	/* Rx IRQ */
1287	priv->rx_irq = platform_get_irq_byname(pdev, "rx_irq");
1288	if (priv->rx_irq == -ENXIO) {
1289		dev_err(&pdev->dev, "cannot obtain Rx IRQ\n");
1290		ret = -ENXIO;
1291		goto err_free_netdev;
1292	}
1293
1294	/* Tx IRQ */
1295	priv->tx_irq = platform_get_irq_byname(pdev, "tx_irq");
1296	if (priv->tx_irq == -ENXIO) {
1297		dev_err(&pdev->dev, "cannot obtain Tx IRQ\n");
1298		ret = -ENXIO;
1299		goto err_free_netdev;
1300	}
1301
1302	/* get FIFO depths from device tree */
1303	if (of_property_read_u32(pdev->dev.of_node, "rx-fifo-depth",
1304				 &priv->rx_fifo_depth)) {
1305		dev_err(&pdev->dev, "cannot obtain rx-fifo-depth\n");
1306		ret = -ENXIO;
1307		goto err_free_netdev;
1308	}
1309
1310	if (of_property_read_u32(pdev->dev.of_node, "tx-fifo-depth",
1311				 &priv->tx_fifo_depth)) {
1312		dev_err(&pdev->dev, "cannot obtain tx-fifo-depth\n");
1313		ret = -ENXIO;
1314		goto err_free_netdev;
1315	}
1316
1317	/* get hash filter settings for this instance */
1318	priv->hash_filter =
1319		of_property_read_bool(pdev->dev.of_node,
1320				      "altr,has-hash-multicast-filter");
1321
1322	/* Set hash filter to not set for now until the
1323	 * multicast filter receive issue is debugged
1324	 */
1325	priv->hash_filter = 0;
1326
1327	/* get supplemental address settings for this instance */
1328	priv->added_unicast =
1329		of_property_read_bool(pdev->dev.of_node,
1330				      "altr,has-supplementary-unicast");
1331
1332	priv->dev->min_mtu = ETH_ZLEN + ETH_FCS_LEN;
1333	/* Max MTU is 1500, ETH_DATA_LEN */
1334	priv->dev->max_mtu = ETH_DATA_LEN;
1335
1336	/* Get the max mtu from the device tree. Note that the
1337	 * "max-frame-size" parameter is actually max mtu. Definition
1338	 * in the ePAPR v1.1 spec and usage differ, so go with usage.
1339	 */
1340	of_property_read_u32(pdev->dev.of_node, "max-frame-size",
1341			     &priv->dev->max_mtu);
1342
1343	/* The DMA buffer size already accounts for an alignment bias
1344	 * to avoid unaligned access exceptions for the NIOS processor,
1345	 */
1346	priv->rx_dma_buf_sz = ALTERA_RXDMABUFFER_SIZE;
1347
1348	/* get default MAC address from device tree */
1349	ret = of_get_ethdev_address(pdev->dev.of_node, ndev);
1350	if (ret)
 
 
1351		eth_hw_addr_random(ndev);
1352
1353	/* get phy addr and create mdio */
1354	ret = altera_tse_phy_get_addr_mdio_create(ndev);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1355
1356	if (ret)
1357		goto err_free_netdev;
1358
1359	/* initialize netdev */
 
1360	ndev->mem_start = control_port->start;
1361	ndev->mem_end = control_port->end;
1362	ndev->netdev_ops = &altera_tse_netdev_ops;
1363	altera_tse_set_ethtool_ops(ndev);
1364
1365	altera_tse_netdev_ops.ndo_set_rx_mode = tse_set_rx_mode;
1366
1367	if (priv->hash_filter)
1368		altera_tse_netdev_ops.ndo_set_rx_mode =
1369			tse_set_rx_mode_hashfilter;
1370
1371	/* Scatter/gather IO is not supported,
1372	 * so it is turned off
1373	 */
1374	ndev->hw_features &= ~NETIF_F_SG;
1375	ndev->features |= ndev->hw_features | NETIF_F_HIGHDMA;
1376
1377	/* VLAN offloading of tagging, stripping and filtering is not
1378	 * supported by hardware, but driver will accommodate the
1379	 * extra 4-byte VLAN tag for processing by upper layers
1380	 */
1381	ndev->features |= NETIF_F_HW_VLAN_CTAG_RX;
1382
1383	/* setup NAPI interface */
1384	netif_napi_add(ndev, &priv->napi, tse_poll);
1385
1386	spin_lock_init(&priv->mac_cfg_lock);
1387	spin_lock_init(&priv->tx_lock);
1388	spin_lock_init(&priv->rxdma_irq_lock);
1389
1390	netif_carrier_off(ndev);
1391	ret = register_netdev(ndev);
1392	if (ret) {
1393		dev_err(&pdev->dev, "failed to register TSE net device\n");
1394		goto err_register_netdev;
1395	}
1396
1397	platform_set_drvdata(pdev, ndev);
1398
1399	priv->revision = ioread32(&priv->mac_dev->megacore_revision);
1400
1401	if (netif_msg_probe(priv))
1402		dev_info(&pdev->dev, "Altera TSE MAC version %d.%d at 0x%08lx irq %d/%d\n",
1403			 (priv->revision >> 8) & 0xff,
1404			 priv->revision & 0xff,
1405			 (unsigned long) control_port->start, priv->rx_irq,
1406			 priv->tx_irq);
1407
1408	snprintf(mrc.name, MII_BUS_ID_SIZE, "%s-pcs-mii", ndev->name);
1409	pcs_bus = devm_mdio_regmap_register(&pdev->dev, &mrc);
1410	if (IS_ERR(pcs_bus)) {
1411		ret = PTR_ERR(pcs_bus);
1412		goto err_init_pcs;
1413	}
1414
1415	priv->pcs = lynx_pcs_create_mdiodev(pcs_bus, 0);
1416	if (IS_ERR(priv->pcs)) {
1417		ret = PTR_ERR(priv->pcs);
1418		goto err_init_pcs;
1419	}
1420
1421	priv->phylink_config.dev = &ndev->dev;
1422	priv->phylink_config.type = PHYLINK_NETDEV;
1423	priv->phylink_config.mac_capabilities = MAC_SYM_PAUSE | MAC_10 |
1424						MAC_100 | MAC_1000FD;
1425
1426	phy_interface_set_rgmii(priv->phylink_config.supported_interfaces);
1427	__set_bit(PHY_INTERFACE_MODE_MII,
1428		  priv->phylink_config.supported_interfaces);
1429	__set_bit(PHY_INTERFACE_MODE_GMII,
1430		  priv->phylink_config.supported_interfaces);
1431	__set_bit(PHY_INTERFACE_MODE_SGMII,
1432		  priv->phylink_config.supported_interfaces);
1433	__set_bit(PHY_INTERFACE_MODE_1000BASEX,
1434		  priv->phylink_config.supported_interfaces);
1435
1436	priv->phylink = phylink_create(&priv->phylink_config,
1437				       of_fwnode_handle(priv->device->of_node),
1438				       priv->phy_iface, &alt_tse_phylink_ops);
1439	if (IS_ERR(priv->phylink)) {
1440		dev_err(&pdev->dev, "failed to create phylink\n");
1441		ret = PTR_ERR(priv->phylink);
1442		goto err_init_phylink;
1443	}
1444
1445	return 0;
1446err_init_phylink:
1447	lynx_pcs_destroy(priv->pcs);
1448err_init_pcs:
1449	unregister_netdev(ndev);
1450err_register_netdev:
1451	netif_napi_del(&priv->napi);
1452	altera_tse_mdio_destroy(ndev);
1453err_free_netdev:
1454	free_netdev(ndev);
1455	return ret;
1456}
1457
1458/* Remove Altera TSE MAC device
1459 */
1460static void altera_tse_remove(struct platform_device *pdev)
1461{
1462	struct net_device *ndev = platform_get_drvdata(pdev);
1463	struct altera_tse_private *priv = netdev_priv(ndev);
1464
1465	platform_set_drvdata(pdev, NULL);
1466	altera_tse_mdio_destroy(ndev);
1467	unregister_netdev(ndev);
1468	phylink_destroy(priv->phylink);
1469	lynx_pcs_destroy(priv->pcs);
1470
1471	free_netdev(ndev);
 
 
1472}
1473
1474static const struct altera_dmaops altera_dtype_sgdma = {
1475	.altera_dtype = ALTERA_DTYPE_SGDMA,
1476	.dmamask = 32,
1477	.reset_dma = sgdma_reset,
1478	.enable_txirq = sgdma_enable_txirq,
1479	.enable_rxirq = sgdma_enable_rxirq,
1480	.disable_txirq = sgdma_disable_txirq,
1481	.disable_rxirq = sgdma_disable_rxirq,
1482	.clear_txirq = sgdma_clear_txirq,
1483	.clear_rxirq = sgdma_clear_rxirq,
1484	.tx_buffer = sgdma_tx_buffer,
1485	.tx_completions = sgdma_tx_completions,
1486	.add_rx_desc = sgdma_add_rx_desc,
1487	.get_rx_status = sgdma_rx_status,
1488	.init_dma = sgdma_initialize,
1489	.uninit_dma = sgdma_uninitialize,
1490	.start_rxdma = sgdma_start_rxdma,
1491};
1492
1493static const struct altera_dmaops altera_dtype_msgdma = {
1494	.altera_dtype = ALTERA_DTYPE_MSGDMA,
1495	.dmamask = 64,
1496	.reset_dma = msgdma_reset,
1497	.enable_txirq = msgdma_enable_txirq,
1498	.enable_rxirq = msgdma_enable_rxirq,
1499	.disable_txirq = msgdma_disable_txirq,
1500	.disable_rxirq = msgdma_disable_rxirq,
1501	.clear_txirq = msgdma_clear_txirq,
1502	.clear_rxirq = msgdma_clear_rxirq,
1503	.tx_buffer = msgdma_tx_buffer,
1504	.tx_completions = msgdma_tx_completions,
1505	.add_rx_desc = msgdma_add_rx_desc,
1506	.get_rx_status = msgdma_rx_status,
1507	.init_dma = msgdma_initialize,
1508	.uninit_dma = msgdma_uninitialize,
1509	.start_rxdma = msgdma_start_rxdma,
1510};
1511
1512static const struct of_device_id altera_tse_ids[] = {
1513	{ .compatible = "altr,tse-msgdma-1.0", .data = &altera_dtype_msgdma, },
1514	{ .compatible = "altr,tse-1.0", .data = &altera_dtype_sgdma, },
1515	{ .compatible = "ALTR,tse-1.0", .data = &altera_dtype_sgdma, },
1516	{},
1517};
1518MODULE_DEVICE_TABLE(of, altera_tse_ids);
1519
1520static struct platform_driver altera_tse_driver = {
1521	.probe		= altera_tse_probe,
1522	.remove_new	= altera_tse_remove,
1523	.suspend	= NULL,
1524	.resume		= NULL,
1525	.driver		= {
1526		.name	= ALTERA_TSE_RESOURCE_NAME,
 
1527		.of_match_table = altera_tse_ids,
1528	},
1529};
1530
1531module_platform_driver(altera_tse_driver);
1532
1533MODULE_AUTHOR("Altera Corporation");
1534MODULE_DESCRIPTION("Altera Triple Speed Ethernet MAC driver");
1535MODULE_LICENSE("GPL v2");
v3.15
 
   1/* Altera Triple-Speed Ethernet MAC driver
   2 * Copyright (C) 2008-2014 Altera Corporation. All rights reserved
   3 *
   4 * Contributors:
   5 *   Dalon Westergreen
   6 *   Thomas Chou
   7 *   Ian Abbott
   8 *   Yuriy Kozlov
   9 *   Tobias Klauser
  10 *   Andriy Smolskyy
  11 *   Roman Bulgakov
  12 *   Dmytro Mytarchuk
  13 *   Matthew Gerlach
  14 *
  15 * Original driver contributed by SLS.
  16 * Major updates contributed by GlobalLogic
  17 *
  18 * This program is free software; you can redistribute it and/or modify it
  19 * under the terms and conditions of the GNU General Public License,
  20 * version 2, as published by the Free Software Foundation.
  21 *
  22 * This program is distributed in the hope it will be useful, but WITHOUT
  23 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  24 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
  25 * more details.
  26 *
  27 * You should have received a copy of the GNU General Public License along with
  28 * this program.  If not, see <http://www.gnu.org/licenses/>.
  29 */
  30
  31#include <linux/atomic.h>
  32#include <linux/delay.h>
  33#include <linux/etherdevice.h>
  34#include <linux/if_vlan.h>
  35#include <linux/init.h>
  36#include <linux/interrupt.h>
  37#include <linux/io.h>
  38#include <linux/kernel.h>
  39#include <linux/module.h>
 
 
  40#include <linux/netdevice.h>
  41#include <linux/of_device.h>
  42#include <linux/of_mdio.h>
  43#include <linux/of_net.h>
  44#include <linux/of_platform.h>
  45#include <linux/phy.h>
  46#include <linux/platform_device.h>
 
 
  47#include <linux/skbuff.h>
  48#include <asm/cacheflush.h>
  49
  50#include "altera_utils.h"
  51#include "altera_tse.h"
  52#include "altera_sgdma.h"
  53#include "altera_msgdma.h"
  54
  55static atomic_t instance_count = ATOMIC_INIT(~0);
  56/* Module parameters */
  57static int debug = -1;
  58module_param(debug, int, S_IRUGO | S_IWUSR);
  59MODULE_PARM_DESC(debug, "Message Level (-1: default, 0: no output, 16: all)");
  60
  61static const u32 default_msg_level = (NETIF_MSG_DRV | NETIF_MSG_PROBE |
  62					NETIF_MSG_LINK | NETIF_MSG_IFUP |
  63					NETIF_MSG_IFDOWN);
  64
  65#define RX_DESCRIPTORS 64
  66static int dma_rx_num = RX_DESCRIPTORS;
  67module_param(dma_rx_num, int, S_IRUGO | S_IWUSR);
  68MODULE_PARM_DESC(dma_rx_num, "Number of descriptors in the RX list");
  69
  70#define TX_DESCRIPTORS 64
  71static int dma_tx_num = TX_DESCRIPTORS;
  72module_param(dma_tx_num, int, S_IRUGO | S_IWUSR);
  73MODULE_PARM_DESC(dma_tx_num, "Number of descriptors in the TX list");
  74
  75
  76#define POLL_PHY (-1)
  77
  78/* Make sure DMA buffer size is larger than the max frame size
  79 * plus some alignment offset and a VLAN header. If the max frame size is
  80 * 1518, a VLAN header would be additional 4 bytes and additional
  81 * headroom for alignment is 2 bytes, 2048 is just fine.
  82 */
  83#define ALTERA_RXDMABUFFER_SIZE	2048
  84
  85/* Allow network stack to resume queueing packets after we've
  86 * finished transmitting at least 1/4 of the packets in the queue.
  87 */
  88#define TSE_TX_THRESH(x)	(x->tx_ring_size / 4)
  89
  90#define TXQUEUESTOP_THRESHHOLD	2
  91
  92static struct of_device_id altera_tse_ids[];
  93
  94static inline u32 tse_tx_avail(struct altera_tse_private *priv)
  95{
  96	return priv->tx_cons + priv->tx_ring_size - priv->tx_prod - 1;
  97}
  98
  99/* MDIO specific functions
 100 */
 101static int altera_tse_mdio_read(struct mii_bus *bus, int mii_id, int regnum)
 102{
 103	struct net_device *ndev = bus->priv;
 104	struct altera_tse_private *priv = netdev_priv(ndev);
 105
 106	/* set MDIO address */
 107	csrwr32((mii_id & 0x1f), priv->mac_dev,
 108		tse_csroffs(mdio_phy0_addr));
 109
 110	/* get the data */
 111	return csrrd32(priv->mac_dev,
 112		       tse_csroffs(mdio_phy0) + regnum * 4) & 0xffff;
 113}
 114
 115static int altera_tse_mdio_write(struct mii_bus *bus, int mii_id, int regnum,
 116				 u16 value)
 117{
 118	struct net_device *ndev = bus->priv;
 119	struct altera_tse_private *priv = netdev_priv(ndev);
 120
 121	/* set MDIO address */
 122	csrwr32((mii_id & 0x1f), priv->mac_dev,
 123		tse_csroffs(mdio_phy0_addr));
 124
 125	/* write the data */
 126	csrwr32(value, priv->mac_dev, tse_csroffs(mdio_phy0) + regnum * 4);
 127	return 0;
 128}
 129
 130static int altera_tse_mdio_create(struct net_device *dev, unsigned int id)
 131{
 132	struct altera_tse_private *priv = netdev_priv(dev);
 133	int ret;
 134	int i;
 135	struct device_node *mdio_node = NULL;
 
 136	struct mii_bus *mdio = NULL;
 137	struct device_node *child_node = NULL;
 138
 139	for_each_child_of_node(priv->device->of_node, child_node) {
 140		if (of_device_is_compatible(child_node, "altr,tse-mdio")) {
 141			mdio_node = child_node;
 142			break;
 143		}
 144	}
 145
 146	if (mdio_node) {
 147		netdev_dbg(dev, "FOUND MDIO subnode\n");
 148	} else {
 149		netdev_dbg(dev, "NO MDIO subnode\n");
 150		return 0;
 151	}
 152
 153	mdio = mdiobus_alloc();
 154	if (mdio == NULL) {
 155		netdev_err(dev, "Error allocating MDIO bus\n");
 156		return -ENOMEM;
 
 157	}
 158
 159	mdio->name = ALTERA_TSE_RESOURCE_NAME;
 160	mdio->read = &altera_tse_mdio_read;
 161	mdio->write = &altera_tse_mdio_write;
 162	snprintf(mdio->id, MII_BUS_ID_SIZE, "%s-%u", mdio->name, id);
 163
 164	mdio->irq = kcalloc(PHY_MAX_ADDR, sizeof(int), GFP_KERNEL);
 165	if (mdio->irq == NULL) {
 166		ret = -ENOMEM;
 167		goto out_free_mdio;
 168	}
 169	for (i = 0; i < PHY_MAX_ADDR; i++)
 170		mdio->irq[i] = PHY_POLL;
 171
 172	mdio->priv = dev;
 173	mdio->parent = priv->device;
 174
 175	ret = of_mdiobus_register(mdio, mdio_node);
 176	if (ret != 0) {
 177		netdev_err(dev, "Cannot register MDIO bus %s\n",
 178			   mdio->id);
 179		goto out_free_mdio_irq;
 180	}
 
 181
 182	if (netif_msg_drv(priv))
 183		netdev_info(dev, "MDIO bus %s: created\n", mdio->id);
 184
 185	priv->mdio = mdio;
 186	return 0;
 187out_free_mdio_irq:
 188	kfree(mdio->irq);
 189out_free_mdio:
 190	mdiobus_free(mdio);
 191	mdio = NULL;
 
 
 192	return ret;
 193}
 194
 195static void altera_tse_mdio_destroy(struct net_device *dev)
 196{
 197	struct altera_tse_private *priv = netdev_priv(dev);
 198
 199	if (priv->mdio == NULL)
 200		return;
 201
 202	if (netif_msg_drv(priv))
 203		netdev_info(dev, "MDIO bus %s: removed\n",
 204			    priv->mdio->id);
 205
 206	mdiobus_unregister(priv->mdio);
 207	kfree(priv->mdio->irq);
 208	mdiobus_free(priv->mdio);
 209	priv->mdio = NULL;
 210}
 211
 212static int tse_init_rx_buffer(struct altera_tse_private *priv,
 213			      struct tse_buffer *rxbuffer, int len)
 214{
 215	rxbuffer->skb = netdev_alloc_skb_ip_align(priv->dev, len);
 216	if (!rxbuffer->skb)
 217		return -ENOMEM;
 218
 219	rxbuffer->dma_addr = dma_map_single(priv->device, rxbuffer->skb->data,
 220						len,
 221						DMA_FROM_DEVICE);
 222
 223	if (dma_mapping_error(priv->device, rxbuffer->dma_addr)) {
 224		netdev_err(priv->dev, "%s: DMA mapping error\n", __func__);
 225		dev_kfree_skb_any(rxbuffer->skb);
 226		return -EINVAL;
 227	}
 228	rxbuffer->dma_addr &= (dma_addr_t)~3;
 229	rxbuffer->len = len;
 230	return 0;
 231}
 232
 233static void tse_free_rx_buffer(struct altera_tse_private *priv,
 234			       struct tse_buffer *rxbuffer)
 235{
 
 236	struct sk_buff *skb = rxbuffer->skb;
 237	dma_addr_t dma_addr = rxbuffer->dma_addr;
 238
 239	if (skb != NULL) {
 240		if (dma_addr)
 241			dma_unmap_single(priv->device, dma_addr,
 242					 rxbuffer->len,
 243					 DMA_FROM_DEVICE);
 244		dev_kfree_skb_any(skb);
 245		rxbuffer->skb = NULL;
 246		rxbuffer->dma_addr = 0;
 247	}
 248}
 249
 250/* Unmap and free Tx buffer resources
 251 */
 252static void tse_free_tx_buffer(struct altera_tse_private *priv,
 253			       struct tse_buffer *buffer)
 254{
 255	if (buffer->dma_addr) {
 256		if (buffer->mapped_as_page)
 257			dma_unmap_page(priv->device, buffer->dma_addr,
 258				       buffer->len, DMA_TO_DEVICE);
 259		else
 260			dma_unmap_single(priv->device, buffer->dma_addr,
 261					 buffer->len, DMA_TO_DEVICE);
 262		buffer->dma_addr = 0;
 263	}
 264	if (buffer->skb) {
 265		dev_kfree_skb_any(buffer->skb);
 266		buffer->skb = NULL;
 267	}
 268}
 269
 270static int alloc_init_skbufs(struct altera_tse_private *priv)
 271{
 272	unsigned int rx_descs = priv->rx_ring_size;
 273	unsigned int tx_descs = priv->tx_ring_size;
 274	int ret = -ENOMEM;
 275	int i;
 276
 277	/* Create Rx ring buffer */
 278	priv->rx_ring = kcalloc(rx_descs, sizeof(struct tse_buffer),
 279				GFP_KERNEL);
 280	if (!priv->rx_ring)
 281		goto err_rx_ring;
 282
 283	/* Create Tx ring buffer */
 284	priv->tx_ring = kcalloc(tx_descs, sizeof(struct tse_buffer),
 285				GFP_KERNEL);
 286	if (!priv->tx_ring)
 287		goto err_tx_ring;
 288
 289	priv->tx_cons = 0;
 290	priv->tx_prod = 0;
 291
 292	/* Init Rx ring */
 293	for (i = 0; i < rx_descs; i++) {
 294		ret = tse_init_rx_buffer(priv, &priv->rx_ring[i],
 295					 priv->rx_dma_buf_sz);
 296		if (ret)
 297			goto err_init_rx_buffers;
 298	}
 299
 300	priv->rx_cons = 0;
 301	priv->rx_prod = 0;
 302
 303	return 0;
 304err_init_rx_buffers:
 305	while (--i >= 0)
 306		tse_free_rx_buffer(priv, &priv->rx_ring[i]);
 307	kfree(priv->tx_ring);
 308err_tx_ring:
 309	kfree(priv->rx_ring);
 310err_rx_ring:
 311	return ret;
 312}
 313
 314static void free_skbufs(struct net_device *dev)
 315{
 316	struct altera_tse_private *priv = netdev_priv(dev);
 317	unsigned int rx_descs = priv->rx_ring_size;
 318	unsigned int tx_descs = priv->tx_ring_size;
 319	int i;
 320
 321	/* Release the DMA TX/RX socket buffers */
 322	for (i = 0; i < rx_descs; i++)
 323		tse_free_rx_buffer(priv, &priv->rx_ring[i]);
 324	for (i = 0; i < tx_descs; i++)
 325		tse_free_tx_buffer(priv, &priv->tx_ring[i]);
 326
 327
 328	kfree(priv->tx_ring);
 329}
 330
 331/* Reallocate the skb for the reception process
 332 */
 333static inline void tse_rx_refill(struct altera_tse_private *priv)
 334{
 335	unsigned int rxsize = priv->rx_ring_size;
 336	unsigned int entry;
 337	int ret;
 338
 339	for (; priv->rx_cons - priv->rx_prod > 0;
 340			priv->rx_prod++) {
 341		entry = priv->rx_prod % rxsize;
 342		if (likely(priv->rx_ring[entry].skb == NULL)) {
 343			ret = tse_init_rx_buffer(priv, &priv->rx_ring[entry],
 344				priv->rx_dma_buf_sz);
 345			if (unlikely(ret != 0))
 346				break;
 347			priv->dmaops->add_rx_desc(priv, &priv->rx_ring[entry]);
 348		}
 349	}
 350}
 351
 352/* Pull out the VLAN tag and fix up the packet
 353 */
 354static inline void tse_rx_vlan(struct net_device *dev, struct sk_buff *skb)
 355{
 356	struct ethhdr *eth_hdr;
 357	u16 vid;
 
 358	if ((dev->features & NETIF_F_HW_VLAN_CTAG_RX) &&
 359	    !__vlan_get_tag(skb, &vid)) {
 360		eth_hdr = (struct ethhdr *)skb->data;
 361		memmove(skb->data + VLAN_HLEN, eth_hdr, ETH_ALEN * 2);
 362		skb_pull(skb, VLAN_HLEN);
 363		__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid);
 364	}
 365}
 366
 367/* Receive a packet: retrieve and pass over to upper levels
 368 */
 369static int tse_rx(struct altera_tse_private *priv, int limit)
 370{
 
 
 371	unsigned int count = 0;
 372	unsigned int next_entry;
 373	struct sk_buff *skb;
 374	unsigned int entry = priv->rx_cons % priv->rx_ring_size;
 375	u32 rxstatus;
 376	u16 pktlength;
 377	u16 pktstatus;
 378
 379	while ((rxstatus = priv->dmaops->get_rx_status(priv)) != 0) {
 
 
 
 
 
 
 380		pktstatus = rxstatus >> 16;
 381		pktlength = rxstatus & 0xffff;
 382
 383		if ((pktstatus & 0xFF) || (pktlength == 0))
 384			netdev_err(priv->dev,
 385				   "RCV pktstatus %08X pktlength %08X\n",
 386				   pktstatus, pktlength);
 387
 
 
 
 
 
 
 388		count++;
 389		next_entry = (++priv->rx_cons) % priv->rx_ring_size;
 390
 391		skb = priv->rx_ring[entry].skb;
 392		if (unlikely(!skb)) {
 393			netdev_err(priv->dev,
 394				   "%s: Inconsistent Rx descriptor chain\n",
 395				   __func__);
 396			priv->dev->stats.rx_dropped++;
 397			break;
 398		}
 399		priv->rx_ring[entry].skb = NULL;
 400
 401		skb_put(skb, pktlength);
 402
 403		/* make cache consistent with receive packet buffer */
 404		dma_sync_single_for_cpu(priv->device,
 405					priv->rx_ring[entry].dma_addr,
 406					priv->rx_ring[entry].len,
 407					DMA_FROM_DEVICE);
 408
 409		dma_unmap_single(priv->device, priv->rx_ring[entry].dma_addr,
 410				 priv->rx_ring[entry].len, DMA_FROM_DEVICE);
 411
 412		if (netif_msg_pktdata(priv)) {
 413			netdev_info(priv->dev, "frame received %d bytes\n",
 414				    pktlength);
 415			print_hex_dump(KERN_ERR, "data: ", DUMP_PREFIX_OFFSET,
 416				       16, 1, skb->data, pktlength, true);
 417		}
 418
 419		tse_rx_vlan(priv->dev, skb);
 420
 421		skb->protocol = eth_type_trans(skb, priv->dev);
 422		skb_checksum_none_assert(skb);
 423
 424		napi_gro_receive(&priv->napi, skb);
 425
 426		priv->dev->stats.rx_packets++;
 427		priv->dev->stats.rx_bytes += pktlength;
 428
 429		entry = next_entry;
 430
 431		tse_rx_refill(priv);
 432	}
 433
 434	return count;
 435}
 436
 437/* Reclaim resources after transmission completes
 438 */
 439static int tse_tx_complete(struct altera_tse_private *priv)
 440{
 441	unsigned int txsize = priv->tx_ring_size;
 442	u32 ready;
 443	unsigned int entry;
 444	struct tse_buffer *tx_buff;
 445	int txcomplete = 0;
 
 446
 447	spin_lock(&priv->tx_lock);
 448
 449	ready = priv->dmaops->tx_completions(priv);
 450
 451	/* Free sent buffers */
 452	while (ready && (priv->tx_cons != priv->tx_prod)) {
 453		entry = priv->tx_cons % txsize;
 454		tx_buff = &priv->tx_ring[entry];
 455
 456		if (netif_msg_tx_done(priv))
 457			netdev_dbg(priv->dev, "%s: curr %d, dirty %d\n",
 458				   __func__, priv->tx_prod, priv->tx_cons);
 459
 460		if (likely(tx_buff->skb))
 461			priv->dev->stats.tx_packets++;
 462
 463		tse_free_tx_buffer(priv, tx_buff);
 464		priv->tx_cons++;
 465
 466		txcomplete++;
 467		ready--;
 468	}
 469
 470	if (unlikely(netif_queue_stopped(priv->dev) &&
 471		     tse_tx_avail(priv) > TSE_TX_THRESH(priv))) {
 472		netif_tx_lock(priv->dev);
 473		if (netif_queue_stopped(priv->dev) &&
 474		    tse_tx_avail(priv) > TSE_TX_THRESH(priv)) {
 475			if (netif_msg_tx_done(priv))
 476				netdev_dbg(priv->dev, "%s: restart transmit\n",
 477					   __func__);
 478			netif_wake_queue(priv->dev);
 479		}
 480		netif_tx_unlock(priv->dev);
 481	}
 482
 483	spin_unlock(&priv->tx_lock);
 484	return txcomplete;
 485}
 486
 487/* NAPI polling function
 488 */
 489static int tse_poll(struct napi_struct *napi, int budget)
 490{
 491	struct altera_tse_private *priv =
 492			container_of(napi, struct altera_tse_private, napi);
 
 493	int rxcomplete = 0;
 494	int txcomplete = 0;
 495	unsigned long int flags;
 496
 497	txcomplete = tse_tx_complete(priv);
 498
 499	rxcomplete = tse_rx(priv, budget);
 500
 501	if (rxcomplete >= budget || txcomplete > 0)
 502		return rxcomplete;
 503
 504	napi_gro_flush(napi, false);
 505	__napi_complete(napi);
 506
 507	netdev_dbg(priv->dev,
 508		   "NAPI Complete, did %d packets with budget %d\n",
 509		   txcomplete+rxcomplete, budget);
 510
 511	spin_lock_irqsave(&priv->rxdma_irq_lock, flags);
 512	priv->dmaops->enable_rxirq(priv);
 513	priv->dmaops->enable_txirq(priv);
 514	spin_unlock_irqrestore(&priv->rxdma_irq_lock, flags);
 515	return rxcomplete + txcomplete;
 
 516}
 517
 518/* DMA TX & RX FIFO interrupt routing
 519 */
 520static irqreturn_t altera_isr(int irq, void *dev_id)
 521{
 522	struct net_device *dev = dev_id;
 523	struct altera_tse_private *priv;
 524	unsigned long int flags;
 525
 526	if (unlikely(!dev)) {
 527		pr_err("%s: invalid dev pointer\n", __func__);
 528		return IRQ_NONE;
 529	}
 530	priv = netdev_priv(dev);
 531
 532	/* turn off desc irqs and enable napi rx */
 533	spin_lock_irqsave(&priv->rxdma_irq_lock, flags);
 
 
 
 534
 535	if (likely(napi_schedule_prep(&priv->napi))) {
 
 536		priv->dmaops->disable_rxirq(priv);
 537		priv->dmaops->disable_txirq(priv);
 
 538		__napi_schedule(&priv->napi);
 539	}
 540
 541	/* reset IRQs */
 542	priv->dmaops->clear_rxirq(priv);
 543	priv->dmaops->clear_txirq(priv);
 544
 545	spin_unlock_irqrestore(&priv->rxdma_irq_lock, flags);
 546
 547	return IRQ_HANDLED;
 548}
 549
 550/* Transmit a packet (called by the kernel). Dispatches
 551 * either the SGDMA method for transmitting or the
 552 * MSGDMA method, assumes no scatter/gather support,
 553 * implying an assumption that there's only one
 554 * physically contiguous fragment starting at
 555 * skb->data, for length of skb_headlen(skb).
 556 */
 557static int tse_start_xmit(struct sk_buff *skb, struct net_device *dev)
 558{
 559	struct altera_tse_private *priv = netdev_priv(dev);
 
 560	unsigned int txsize = priv->tx_ring_size;
 561	unsigned int entry;
 562	struct tse_buffer *buffer = NULL;
 563	int nfrags = skb_shinfo(skb)->nr_frags;
 564	unsigned int nopaged_len = skb_headlen(skb);
 565	enum netdev_tx ret = NETDEV_TX_OK;
 566	dma_addr_t dma_addr;
 
 567
 568	spin_lock_bh(&priv->tx_lock);
 569
 570	if (unlikely(tse_tx_avail(priv) < nfrags + 1)) {
 571		if (!netif_queue_stopped(dev)) {
 572			netif_stop_queue(dev);
 573			/* This is a hard error, log it. */
 574			netdev_err(priv->dev,
 575				   "%s: Tx list full when queue awake\n",
 576				   __func__);
 577		}
 578		ret = NETDEV_TX_BUSY;
 579		goto out;
 580	}
 581
 582	/* Map the first skb fragment */
 583	entry = priv->tx_prod % txsize;
 584	buffer = &priv->tx_ring[entry];
 585
 586	dma_addr = dma_map_single(priv->device, skb->data, nopaged_len,
 587				  DMA_TO_DEVICE);
 588	if (dma_mapping_error(priv->device, dma_addr)) {
 589		netdev_err(priv->dev, "%s: DMA mapping error\n", __func__);
 590		ret = NETDEV_TX_OK;
 591		goto out;
 592	}
 593
 594	buffer->skb = skb;
 595	buffer->dma_addr = dma_addr;
 596	buffer->len = nopaged_len;
 597
 598	/* Push data out of the cache hierarchy into main memory */
 599	dma_sync_single_for_device(priv->device, buffer->dma_addr,
 600				   buffer->len, DMA_TO_DEVICE);
 601
 602	priv->dmaops->tx_buffer(priv, buffer);
 603
 604	skb_tx_timestamp(skb);
 605
 606	priv->tx_prod++;
 607	dev->stats.tx_bytes += skb->len;
 608
 609	if (unlikely(tse_tx_avail(priv) <= TXQUEUESTOP_THRESHHOLD)) {
 610		if (netif_msg_hw(priv))
 611			netdev_dbg(priv->dev, "%s: stop transmitted packets\n",
 612				   __func__);
 613		netif_stop_queue(dev);
 614	}
 615
 616out:
 617	spin_unlock_bh(&priv->tx_lock);
 618
 619	return ret;
 620}
 621
 622/* Called every time the controller might need to be made
 623 * aware of new link state.  The PHY code conveys this
 624 * information through variables in the phydev structure, and this
 625 * function converts those variables into the appropriate
 626 * register values, and can bring down the device if needed.
 627 */
 628static void altera_tse_adjust_link(struct net_device *dev)
 629{
 630	struct altera_tse_private *priv = netdev_priv(dev);
 631	struct phy_device *phydev = priv->phydev;
 632	int new_state = 0;
 633
 634	/* only change config if there is a link */
 635	spin_lock(&priv->mac_cfg_lock);
 636	if (phydev->link) {
 637		/* Read old config */
 638		u32 cfg_reg = ioread32(&priv->mac_dev->command_config);
 639
 640		/* Check duplex */
 641		if (phydev->duplex != priv->oldduplex) {
 642			new_state = 1;
 643			if (!(phydev->duplex))
 644				cfg_reg |= MAC_CMDCFG_HD_ENA;
 645			else
 646				cfg_reg &= ~MAC_CMDCFG_HD_ENA;
 647
 648			netdev_dbg(priv->dev, "%s: Link duplex = 0x%x\n",
 649				   dev->name, phydev->duplex);
 
 650
 651			priv->oldduplex = phydev->duplex;
 652		}
 
 653
 654		/* Check speed */
 655		if (phydev->speed != priv->oldspeed) {
 656			new_state = 1;
 657			switch (phydev->speed) {
 658			case 1000:
 659				cfg_reg |= MAC_CMDCFG_ETH_SPEED;
 660				cfg_reg &= ~MAC_CMDCFG_ENA_10;
 661				break;
 662			case 100:
 663				cfg_reg &= ~MAC_CMDCFG_ETH_SPEED;
 664				cfg_reg &= ~MAC_CMDCFG_ENA_10;
 665				break;
 666			case 10:
 667				cfg_reg &= ~MAC_CMDCFG_ETH_SPEED;
 668				cfg_reg |= MAC_CMDCFG_ENA_10;
 669				break;
 670			default:
 671				if (netif_msg_link(priv))
 672					netdev_warn(dev, "Speed (%d) is not 10/100/1000!\n",
 673						    phydev->speed);
 674				break;
 675			}
 676			priv->oldspeed = phydev->speed;
 677		}
 678		iowrite32(cfg_reg, &priv->mac_dev->command_config);
 679
 680		if (!priv->oldlink) {
 681			new_state = 1;
 682			priv->oldlink = 1;
 683		}
 684	} else if (priv->oldlink) {
 685		new_state = 1;
 686		priv->oldlink = 0;
 687		priv->oldspeed = 0;
 688		priv->oldduplex = -1;
 689	}
 690
 691	if (new_state && netif_msg_link(priv))
 692		phy_print_status(phydev);
 693
 694	spin_unlock(&priv->mac_cfg_lock);
 695}
 696static struct phy_device *connect_local_phy(struct net_device *dev)
 697{
 698	struct altera_tse_private *priv = netdev_priv(dev);
 699	struct phy_device *phydev = NULL;
 700	char phy_id_fmt[MII_BUS_ID_SIZE + 3];
 701
 702	if (priv->phy_addr != POLL_PHY) {
 703		snprintf(phy_id_fmt, MII_BUS_ID_SIZE + 3, PHY_ID_FMT,
 704			 priv->mdio->id, priv->phy_addr);
 705
 706		netdev_dbg(dev, "trying to attach to %s\n", phy_id_fmt);
 707
 708		phydev = phy_connect(dev, phy_id_fmt, &altera_tse_adjust_link,
 709				     priv->phy_iface);
 710		if (IS_ERR(phydev))
 711			netdev_err(dev, "Could not attach to PHY\n");
 712
 713	} else {
 714		int ret;
 715		phydev = phy_find_first(priv->mdio);
 716		if (phydev == NULL) {
 717			netdev_err(dev, "No PHY found\n");
 718			return phydev;
 719		}
 720
 721		ret = phy_connect_direct(dev, phydev, &altera_tse_adjust_link,
 722				priv->phy_iface);
 723		if (ret != 0) {
 724			netdev_err(dev, "Could not attach to PHY\n");
 725			phydev = NULL;
 726		}
 727	}
 728	return phydev;
 729}
 730
 731/* Initialize driver's PHY state, and attach to the PHY
 732 */
 733static int init_phy(struct net_device *dev)
 734{
 735	struct altera_tse_private *priv = netdev_priv(dev);
 736	struct phy_device *phydev;
 737	struct device_node *phynode;
 738
 739	priv->oldlink = 0;
 740	priv->oldspeed = 0;
 741	priv->oldduplex = -1;
 742
 743	phynode = of_parse_phandle(priv->device->of_node, "phy-handle", 0);
 744
 745	if (!phynode) {
 746		netdev_dbg(dev, "no phy-handle found\n");
 747		if (!priv->mdio) {
 748			netdev_err(dev,
 749				   "No phy-handle nor local mdio specified\n");
 750			return -ENODEV;
 751		}
 752		phydev = connect_local_phy(dev);
 753	} else {
 754		netdev_dbg(dev, "phy-handle found\n");
 755		phydev = of_phy_connect(dev, phynode,
 756			&altera_tse_adjust_link, 0, priv->phy_iface);
 757	}
 758
 759	if (!phydev) {
 760		netdev_err(dev, "Could not find the PHY\n");
 761		return -ENODEV;
 762	}
 763
 764	/* Stop Advertising 1000BASE Capability if interface is not GMII
 765	 * Note: Checkpatch throws CHECKs for the camel case defines below,
 766	 * it's ok to ignore.
 767	 */
 768	if ((priv->phy_iface == PHY_INTERFACE_MODE_MII) ||
 769	    (priv->phy_iface == PHY_INTERFACE_MODE_RMII))
 770		phydev->advertising &= ~(SUPPORTED_1000baseT_Half |
 771					 SUPPORTED_1000baseT_Full);
 772
 773	/* Broken HW is sometimes missing the pull-up resistor on the
 774	 * MDIO line, which results in reads to non-existent devices returning
 775	 * 0 rather than 0xffff. Catch this here and treat 0 as a non-existent
 776	 * device as well.
 777	 * Note: phydev->phy_id is the result of reading the UID PHY registers.
 778	 */
 779	if (phydev->phy_id == 0) {
 780		netdev_err(dev, "Bad PHY UID 0x%08x\n", phydev->phy_id);
 781		phy_disconnect(phydev);
 782		return -ENODEV;
 783	}
 784
 785	netdev_dbg(dev, "attached to PHY %d UID 0x%08x Link = %d\n",
 786		   phydev->addr, phydev->phy_id, phydev->link);
 787
 788	priv->phydev = phydev;
 789	return 0;
 790}
 791
 792static void tse_update_mac_addr(struct altera_tse_private *priv, u8 *addr)
 793{
 794	u32 msb;
 795	u32 lsb;
 796
 797	msb = (addr[3] << 24) | (addr[2] << 16) | (addr[1] << 8) | addr[0];
 798	lsb = ((addr[5] << 8) | addr[4]) & 0xffff;
 799
 800	/* Set primary MAC address */
 801	csrwr32(msb, priv->mac_dev, tse_csroffs(mac_addr_0));
 802	csrwr32(lsb, priv->mac_dev, tse_csroffs(mac_addr_1));
 803}
 804
 805/* MAC software reset.
 806 * When reset is triggered, the MAC function completes the current
 807 * transmission or reception, and subsequently disables the transmit and
 808 * receive logic, flushes the receive FIFO buffer, and resets the statistics
 809 * counters.
 810 */
 811static int reset_mac(struct altera_tse_private *priv)
 812{
 813	int counter;
 814	u32 dat;
 815
 816	dat = csrrd32(priv->mac_dev, tse_csroffs(command_config));
 817	dat &= ~(MAC_CMDCFG_TX_ENA | MAC_CMDCFG_RX_ENA);
 818	dat |= MAC_CMDCFG_SW_RESET | MAC_CMDCFG_CNT_RESET;
 819	csrwr32(dat, priv->mac_dev, tse_csroffs(command_config));
 820
 821	counter = 0;
 822	while (counter++ < ALTERA_TSE_SW_RESET_WATCHDOG_CNTR) {
 823		if (tse_bit_is_clear(priv->mac_dev, tse_csroffs(command_config),
 824				     MAC_CMDCFG_SW_RESET))
 825			break;
 826		udelay(1);
 827	}
 828
 829	if (counter >= ALTERA_TSE_SW_RESET_WATCHDOG_CNTR) {
 830		dat = csrrd32(priv->mac_dev, tse_csroffs(command_config));
 831		dat &= ~MAC_CMDCFG_SW_RESET;
 832		csrwr32(dat, priv->mac_dev, tse_csroffs(command_config));
 833		return -1;
 834	}
 835	return 0;
 836}
 837
 838/* Initialize MAC core registers
 839*/
 840static int init_mac(struct altera_tse_private *priv)
 841{
 842	unsigned int cmd = 0;
 843	u32 frm_length;
 844
 845	/* Setup Rx FIFO */
 846	csrwr32(priv->rx_fifo_depth - ALTERA_TSE_RX_SECTION_EMPTY,
 847		priv->mac_dev, tse_csroffs(rx_section_empty));
 848
 849	csrwr32(ALTERA_TSE_RX_SECTION_FULL, priv->mac_dev,
 850		tse_csroffs(rx_section_full));
 851
 852	csrwr32(ALTERA_TSE_RX_ALMOST_EMPTY, priv->mac_dev,
 853		tse_csroffs(rx_almost_empty));
 854
 855	csrwr32(ALTERA_TSE_RX_ALMOST_FULL, priv->mac_dev,
 856		tse_csroffs(rx_almost_full));
 857
 858	/* Setup Tx FIFO */
 859	csrwr32(priv->tx_fifo_depth - ALTERA_TSE_TX_SECTION_EMPTY,
 860		priv->mac_dev, tse_csroffs(tx_section_empty));
 861
 862	csrwr32(ALTERA_TSE_TX_SECTION_FULL, priv->mac_dev,
 863		tse_csroffs(tx_section_full));
 864
 865	csrwr32(ALTERA_TSE_TX_ALMOST_EMPTY, priv->mac_dev,
 866		tse_csroffs(tx_almost_empty));
 867
 868	csrwr32(ALTERA_TSE_TX_ALMOST_FULL, priv->mac_dev,
 869		tse_csroffs(tx_almost_full));
 870
 871	/* MAC Address Configuration */
 872	tse_update_mac_addr(priv, priv->dev->dev_addr);
 873
 874	/* MAC Function Configuration */
 875	frm_length = ETH_HLEN + priv->dev->mtu + ETH_FCS_LEN;
 876	csrwr32(frm_length, priv->mac_dev, tse_csroffs(frm_length));
 877
 878	csrwr32(ALTERA_TSE_TX_IPG_LENGTH, priv->mac_dev,
 879		tse_csroffs(tx_ipg_length));
 880
 881	/* Disable RX/TX shift 16 for alignment of all received frames on 16-bit
 882	 * start address
 883	 */
 884	tse_set_bit(priv->mac_dev, tse_csroffs(rx_cmd_stat),
 885		    ALTERA_TSE_RX_CMD_STAT_RX_SHIFT16);
 886
 887	tse_clear_bit(priv->mac_dev, tse_csroffs(tx_cmd_stat),
 888		      ALTERA_TSE_TX_CMD_STAT_TX_SHIFT16 |
 889		      ALTERA_TSE_TX_CMD_STAT_OMIT_CRC);
 890
 891	/* Set the MAC options */
 892	cmd = csrrd32(priv->mac_dev, tse_csroffs(command_config));
 893	cmd &= ~MAC_CMDCFG_PAD_EN;	/* No padding Removal on Receive */
 894	cmd &= ~MAC_CMDCFG_CRC_FWD;	/* CRC Removal */
 895	cmd |= MAC_CMDCFG_RX_ERR_DISC;	/* Automatically discard frames
 896					 * with CRC errors
 897					 */
 898	cmd |= MAC_CMDCFG_CNTL_FRM_ENA;
 899	cmd &= ~MAC_CMDCFG_TX_ENA;
 900	cmd &= ~MAC_CMDCFG_RX_ENA;
 901
 902	/* Default speed and duplex setting, full/100 */
 903	cmd &= ~MAC_CMDCFG_HD_ENA;
 904	cmd &= ~MAC_CMDCFG_ETH_SPEED;
 905	cmd &= ~MAC_CMDCFG_ENA_10;
 906
 907	csrwr32(cmd, priv->mac_dev, tse_csroffs(command_config));
 908
 909	csrwr32(ALTERA_TSE_PAUSE_QUANTA, priv->mac_dev,
 910		tse_csroffs(pause_quanta));
 911
 912	if (netif_msg_hw(priv))
 913		dev_dbg(priv->device,
 914			"MAC post-initialization: CMD_CONFIG = 0x%08x\n", cmd);
 915
 916	return 0;
 917}
 918
 919/* Start/stop MAC transmission logic
 920 */
 921static void tse_set_mac(struct altera_tse_private *priv, bool enable)
 922{
 923	u32 value = csrrd32(priv->mac_dev, tse_csroffs(command_config));
 924
 925	if (enable)
 926		value |= MAC_CMDCFG_TX_ENA | MAC_CMDCFG_RX_ENA;
 927	else
 928		value &= ~(MAC_CMDCFG_TX_ENA | MAC_CMDCFG_RX_ENA);
 929
 930	csrwr32(value, priv->mac_dev, tse_csroffs(command_config));
 931}
 932
 933/* Change the MTU
 934 */
 935static int tse_change_mtu(struct net_device *dev, int new_mtu)
 936{
 937	struct altera_tse_private *priv = netdev_priv(dev);
 938	unsigned int max_mtu = priv->max_mtu;
 939	unsigned int min_mtu = ETH_ZLEN + ETH_FCS_LEN;
 940
 941	if (netif_running(dev)) {
 942		netdev_err(dev, "must be stopped to change its MTU\n");
 943		return -EBUSY;
 944	}
 945
 946	if ((new_mtu < min_mtu) || (new_mtu > max_mtu)) {
 947		netdev_err(dev, "invalid MTU, max MTU is: %u\n", max_mtu);
 948		return -EINVAL;
 949	}
 950
 951	dev->mtu = new_mtu;
 952	netdev_update_features(dev);
 953
 954	return 0;
 955}
 956
 957static void altera_tse_set_mcfilter(struct net_device *dev)
 958{
 959	struct altera_tse_private *priv = netdev_priv(dev);
 
 960	int i;
 961	struct netdev_hw_addr *ha;
 962
 963	/* clear the hash filter */
 964	for (i = 0; i < 64; i++)
 965		csrwr32(0, priv->mac_dev, tse_csroffs(hash_table) + i * 4);
 966
 967	netdev_for_each_mc_addr(ha, dev) {
 968		unsigned int hash = 0;
 969		int mac_octet;
 970
 971		for (mac_octet = 5; mac_octet >= 0; mac_octet--) {
 972			unsigned char xor_bit = 0;
 973			unsigned char octet = ha->addr[mac_octet];
 974			unsigned int bitshift;
 975
 976			for (bitshift = 0; bitshift < 8; bitshift++)
 977				xor_bit ^= ((octet >> bitshift) & 0x01);
 978
 979			hash = (hash << 1) | xor_bit;
 980		}
 981		csrwr32(1, priv->mac_dev, tse_csroffs(hash_table) + hash * 4);
 982	}
 983}
 984
 985
 986static void altera_tse_set_mcfilterall(struct net_device *dev)
 987{
 988	struct altera_tse_private *priv = netdev_priv(dev);
 989	int i;
 990
 991	/* set the hash filter */
 992	for (i = 0; i < 64; i++)
 993		csrwr32(1, priv->mac_dev, tse_csroffs(hash_table) + i * 4);
 994}
 995
 996/* Set or clear the multicast filter for this adaptor
 997 */
 998static void tse_set_rx_mode_hashfilter(struct net_device *dev)
 999{
1000	struct altera_tse_private *priv = netdev_priv(dev);
1001
1002	spin_lock(&priv->mac_cfg_lock);
1003
1004	if (dev->flags & IFF_PROMISC)
1005		tse_set_bit(priv->mac_dev, tse_csroffs(command_config),
1006			    MAC_CMDCFG_PROMIS_EN);
1007
1008	if (dev->flags & IFF_ALLMULTI)
1009		altera_tse_set_mcfilterall(dev);
1010	else
1011		altera_tse_set_mcfilter(dev);
1012
1013	spin_unlock(&priv->mac_cfg_lock);
1014}
1015
1016/* Set or clear the multicast filter for this adaptor
1017 */
1018static void tse_set_rx_mode(struct net_device *dev)
1019{
1020	struct altera_tse_private *priv = netdev_priv(dev);
1021
1022	spin_lock(&priv->mac_cfg_lock);
1023
1024	if ((dev->flags & IFF_PROMISC) || (dev->flags & IFF_ALLMULTI) ||
1025	    !netdev_mc_empty(dev) || !netdev_uc_empty(dev))
1026		tse_set_bit(priv->mac_dev, tse_csroffs(command_config),
1027			    MAC_CMDCFG_PROMIS_EN);
1028	else
1029		tse_clear_bit(priv->mac_dev, tse_csroffs(command_config),
1030			      MAC_CMDCFG_PROMIS_EN);
1031
1032	spin_unlock(&priv->mac_cfg_lock);
1033}
1034
1035/* Open and initialize the interface
1036 */
1037static int tse_open(struct net_device *dev)
1038{
1039	struct altera_tse_private *priv = netdev_priv(dev);
 
1040	int ret = 0;
1041	int i;
1042	unsigned long int flags;
1043
1044	/* Reset and configure TSE MAC and probe associated PHY */
1045	ret = priv->dmaops->init_dma(priv);
1046	if (ret != 0) {
1047		netdev_err(dev, "Cannot initialize DMA\n");
1048		goto phy_error;
1049	}
1050
1051	if (netif_msg_ifup(priv))
1052		netdev_warn(dev, "device MAC address %pM\n",
1053			    dev->dev_addr);
1054
1055	if ((priv->revision < 0xd00) || (priv->revision > 0xe00))
1056		netdev_warn(dev, "TSE revision %x\n", priv->revision);
1057
1058	spin_lock(&priv->mac_cfg_lock);
 
1059	ret = reset_mac(priv);
 
 
 
 
1060	if (ret)
1061		netdev_err(dev, "Cannot reset MAC core (error: %d)\n", ret);
1062
1063	ret = init_mac(priv);
1064	spin_unlock(&priv->mac_cfg_lock);
1065	if (ret) {
1066		netdev_err(dev, "Cannot init MAC core (error: %d)\n", ret);
1067		goto alloc_skbuf_error;
1068	}
1069
1070	priv->dmaops->reset_dma(priv);
1071
1072	/* Create and initialize the TX/RX descriptors chains. */
1073	priv->rx_ring_size = dma_rx_num;
1074	priv->tx_ring_size = dma_tx_num;
1075	ret = alloc_init_skbufs(priv);
1076	if (ret) {
1077		netdev_err(dev, "DMA descriptors initialization failed\n");
1078		goto alloc_skbuf_error;
1079	}
1080
1081
1082	/* Register RX interrupt */
1083	ret = request_irq(priv->rx_irq, altera_isr, IRQF_SHARED,
1084			  dev->name, dev);
1085	if (ret) {
1086		netdev_err(dev, "Unable to register RX interrupt %d\n",
1087			   priv->rx_irq);
1088		goto init_error;
1089	}
1090
1091	/* Register TX interrupt */
1092	ret = request_irq(priv->tx_irq, altera_isr, IRQF_SHARED,
1093			  dev->name, dev);
1094	if (ret) {
1095		netdev_err(dev, "Unable to register TX interrupt %d\n",
1096			   priv->tx_irq);
1097		goto tx_request_irq_error;
1098	}
1099
1100	/* Enable DMA interrupts */
1101	spin_lock_irqsave(&priv->rxdma_irq_lock, flags);
1102	priv->dmaops->enable_rxirq(priv);
1103	priv->dmaops->enable_txirq(priv);
1104
1105	/* Setup RX descriptor chain */
1106	for (i = 0; i < priv->rx_ring_size; i++)
1107		priv->dmaops->add_rx_desc(priv, &priv->rx_ring[i]);
1108
1109	spin_unlock_irqrestore(&priv->rxdma_irq_lock, flags);
1110
1111	if (priv->phydev)
1112		phy_start(priv->phydev);
 
 
 
 
1113
1114	napi_enable(&priv->napi);
1115	netif_start_queue(dev);
1116
1117	priv->dmaops->start_rxdma(priv);
1118
1119	/* Start MAC Rx/Tx */
1120	spin_lock(&priv->mac_cfg_lock);
1121	tse_set_mac(priv, true);
1122	spin_unlock(&priv->mac_cfg_lock);
1123
1124	return 0;
1125
1126tx_request_irq_error:
1127	free_irq(priv->rx_irq, dev);
1128init_error:
1129	free_skbufs(dev);
1130alloc_skbuf_error:
1131	if (priv->phydev) {
1132		phy_disconnect(priv->phydev);
1133		priv->phydev = NULL;
1134	}
1135phy_error:
1136	return ret;
1137}
1138
1139/* Stop TSE MAC interface and put the device in an inactive state
1140 */
1141static int tse_shutdown(struct net_device *dev)
1142{
1143	struct altera_tse_private *priv = netdev_priv(dev);
 
1144	int ret;
1145	unsigned long int flags;
1146
1147	/* Stop and disconnect the PHY */
1148	if (priv->phydev) {
1149		phy_stop(priv->phydev);
1150		phy_disconnect(priv->phydev);
1151		priv->phydev = NULL;
1152	}
1153
 
 
1154	netif_stop_queue(dev);
1155	napi_disable(&priv->napi);
1156
1157	/* Disable DMA interrupts */
1158	spin_lock_irqsave(&priv->rxdma_irq_lock, flags);
1159	priv->dmaops->disable_rxirq(priv);
1160	priv->dmaops->disable_txirq(priv);
1161	spin_unlock_irqrestore(&priv->rxdma_irq_lock, flags);
1162
1163	/* Free the IRQ lines */
1164	free_irq(priv->rx_irq, dev);
1165	free_irq(priv->tx_irq, dev);
1166
1167	/* disable and reset the MAC, empties fifo */
1168	spin_lock(&priv->mac_cfg_lock);
1169	spin_lock(&priv->tx_lock);
1170
1171	ret = reset_mac(priv);
 
 
 
 
1172	if (ret)
1173		netdev_err(dev, "Cannot reset MAC core (error: %d)\n", ret);
1174	priv->dmaops->reset_dma(priv);
1175	free_skbufs(dev);
1176
1177	spin_unlock(&priv->tx_lock);
1178	spin_unlock(&priv->mac_cfg_lock);
1179
1180	priv->dmaops->uninit_dma(priv);
1181
1182	return 0;
1183}
1184
1185static struct net_device_ops altera_tse_netdev_ops = {
1186	.ndo_open		= tse_open,
1187	.ndo_stop		= tse_shutdown,
1188	.ndo_start_xmit		= tse_start_xmit,
1189	.ndo_set_mac_address	= eth_mac_addr,
1190	.ndo_set_rx_mode	= tse_set_rx_mode,
1191	.ndo_change_mtu		= tse_change_mtu,
1192	.ndo_validate_addr	= eth_validate_addr,
1193};
1194
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1195static int request_and_map(struct platform_device *pdev, const char *name,
1196			   struct resource **res, void __iomem **ptr)
1197{
 
1198	struct resource *region;
1199	struct device *device = &pdev->dev;
1200
1201	*res = platform_get_resource_byname(pdev, IORESOURCE_MEM, name);
1202	if (*res == NULL) {
1203		dev_err(device, "resource %s not defined\n", name);
1204		return -ENODEV;
1205	}
1206
1207	region = devm_request_mem_region(device, (*res)->start,
1208					 resource_size(*res), dev_name(device));
1209	if (region == NULL) {
1210		dev_err(device, "unable to request %s\n", name);
1211		return -EBUSY;
1212	}
1213
1214	*ptr = devm_ioremap_nocache(device, region->start,
1215				    resource_size(region));
1216	if (*ptr == NULL) {
1217		dev_err(device, "ioremap_nocache of %s failed!", name);
1218		return -ENOMEM;
1219	}
1220
1221	return 0;
1222}
1223
1224/* Probe Altera TSE MAC device
1225 */
1226static int altera_tse_probe(struct platform_device *pdev)
1227{
1228	struct net_device *ndev;
1229	int ret = -ENODEV;
 
1230	struct resource *control_port;
 
1231	struct resource *dma_res;
1232	struct altera_tse_private *priv;
1233	const unsigned char *macaddr;
1234	struct device_node *np = pdev->dev.of_node;
1235	void __iomem *descmap;
1236	const struct of_device_id *of_id = NULL;
1237
1238	ndev = alloc_etherdev(sizeof(struct altera_tse_private));
1239	if (!ndev) {
1240		dev_err(&pdev->dev, "Could not allocate network device\n");
1241		return -ENODEV;
1242	}
1243
1244	SET_NETDEV_DEV(ndev, &pdev->dev);
1245
1246	priv = netdev_priv(ndev);
1247	priv->device = &pdev->dev;
1248	priv->dev = ndev;
1249	priv->msg_enable = netif_msg_init(debug, default_msg_level);
1250
1251	of_id = of_match_device(altera_tse_ids, &pdev->dev);
1252
1253	if (of_id)
1254		priv->dmaops = (struct altera_dmaops *)of_id->data;
1255
1256
1257	if (priv->dmaops &&
1258	    priv->dmaops->altera_dtype == ALTERA_DTYPE_SGDMA) {
1259		/* Get the mapped address to the SGDMA descriptor memory */
1260		ret = request_and_map(pdev, "s1", &dma_res, &descmap);
1261		if (ret)
1262			goto err_free_netdev;
1263
1264		/* Start of that memory is for transmit descriptors */
1265		priv->tx_dma_desc = descmap;
1266
1267		/* First half is for tx descriptors, other half for tx */
1268		priv->txdescmem = resource_size(dma_res)/2;
1269
1270		priv->txdescmem_busaddr = (dma_addr_t)dma_res->start;
1271
1272		priv->rx_dma_desc = (void __iomem *)((uintptr_t)(descmap +
1273						     priv->txdescmem));
1274		priv->rxdescmem = resource_size(dma_res)/2;
1275		priv->rxdescmem_busaddr = dma_res->start;
1276		priv->rxdescmem_busaddr += priv->txdescmem;
1277
1278		if (upper_32_bits(priv->rxdescmem_busaddr)) {
1279			dev_dbg(priv->device,
1280				"SGDMA bus addresses greater than 32-bits\n");
 
1281			goto err_free_netdev;
1282		}
1283		if (upper_32_bits(priv->txdescmem_busaddr)) {
1284			dev_dbg(priv->device,
1285				"SGDMA bus addresses greater than 32-bits\n");
 
1286			goto err_free_netdev;
1287		}
1288	} else if (priv->dmaops &&
1289		   priv->dmaops->altera_dtype == ALTERA_DTYPE_MSGDMA) {
1290		ret = request_and_map(pdev, "rx_resp", &dma_res,
1291				      &priv->rx_dma_resp);
1292		if (ret)
1293			goto err_free_netdev;
1294
1295		ret = request_and_map(pdev, "tx_desc", &dma_res,
1296				      &priv->tx_dma_desc);
1297		if (ret)
1298			goto err_free_netdev;
1299
1300		priv->txdescmem = resource_size(dma_res);
1301		priv->txdescmem_busaddr = dma_res->start;
1302
1303		ret = request_and_map(pdev, "rx_desc", &dma_res,
1304				      &priv->rx_dma_desc);
1305		if (ret)
1306			goto err_free_netdev;
1307
1308		priv->rxdescmem = resource_size(dma_res);
1309		priv->rxdescmem_busaddr = dma_res->start;
1310
1311	} else {
 
1312		goto err_free_netdev;
1313	}
1314
1315	if (!dma_set_mask(priv->device, DMA_BIT_MASK(priv->dmaops->dmamask)))
1316		dma_set_coherent_mask(priv->device,
1317				      DMA_BIT_MASK(priv->dmaops->dmamask));
1318	else if (!dma_set_mask(priv->device, DMA_BIT_MASK(32)))
1319		dma_set_coherent_mask(priv->device, DMA_BIT_MASK(32));
1320	else
 
1321		goto err_free_netdev;
 
1322
1323	/* MAC address space */
1324	ret = request_and_map(pdev, "control_port", &control_port,
1325			      (void __iomem **)&priv->mac_dev);
1326	if (ret)
1327		goto err_free_netdev;
1328
1329	/* xSGDMA Rx Dispatcher address space */
1330	ret = request_and_map(pdev, "rx_csr", &dma_res,
1331			      &priv->rx_dma_csr);
1332	if (ret)
1333		goto err_free_netdev;
1334
1335
1336	/* xSGDMA Tx Dispatcher address space */
1337	ret = request_and_map(pdev, "tx_csr", &dma_res,
1338			      &priv->tx_dma_csr);
1339	if (ret)
1340		goto err_free_netdev;
1341
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1342
1343	/* Rx IRQ */
1344	priv->rx_irq = platform_get_irq_byname(pdev, "rx_irq");
1345	if (priv->rx_irq == -ENXIO) {
1346		dev_err(&pdev->dev, "cannot obtain Rx IRQ\n");
1347		ret = -ENXIO;
1348		goto err_free_netdev;
1349	}
1350
1351	/* Tx IRQ */
1352	priv->tx_irq = platform_get_irq_byname(pdev, "tx_irq");
1353	if (priv->tx_irq == -ENXIO) {
1354		dev_err(&pdev->dev, "cannot obtain Tx IRQ\n");
1355		ret = -ENXIO;
1356		goto err_free_netdev;
1357	}
1358
1359	/* get FIFO depths from device tree */
1360	if (of_property_read_u32(pdev->dev.of_node, "rx-fifo-depth",
1361				 &priv->rx_fifo_depth)) {
1362		dev_err(&pdev->dev, "cannot obtain rx-fifo-depth\n");
1363		ret = -ENXIO;
1364		goto err_free_netdev;
1365	}
1366
1367	if (of_property_read_u32(pdev->dev.of_node, "tx-fifo-depth",
1368				 &priv->rx_fifo_depth)) {
1369		dev_err(&pdev->dev, "cannot obtain tx-fifo-depth\n");
1370		ret = -ENXIO;
1371		goto err_free_netdev;
1372	}
1373
1374	/* get hash filter settings for this instance */
1375	priv->hash_filter =
1376		of_property_read_bool(pdev->dev.of_node,
1377				      "altr,has-hash-multicast-filter");
1378
1379	/* Set hash filter to not set for now until the
1380	 * multicast filter receive issue is debugged
1381	 */
1382	priv->hash_filter = 0;
1383
1384	/* get supplemental address settings for this instance */
1385	priv->added_unicast =
1386		of_property_read_bool(pdev->dev.of_node,
1387				      "altr,has-supplementary-unicast");
1388
 
1389	/* Max MTU is 1500, ETH_DATA_LEN */
1390	priv->max_mtu = ETH_DATA_LEN;
1391
1392	/* Get the max mtu from the device tree. Note that the
1393	 * "max-frame-size" parameter is actually max mtu. Definition
1394	 * in the ePAPR v1.1 spec and usage differ, so go with usage.
1395	 */
1396	of_property_read_u32(pdev->dev.of_node, "max-frame-size",
1397			     &priv->max_mtu);
1398
1399	/* The DMA buffer size already accounts for an alignment bias
1400	 * to avoid unaligned access exceptions for the NIOS processor,
1401	 */
1402	priv->rx_dma_buf_sz = ALTERA_RXDMABUFFER_SIZE;
1403
1404	/* get default MAC address from device tree */
1405	macaddr = of_get_mac_address(pdev->dev.of_node);
1406	if (macaddr)
1407		ether_addr_copy(ndev->dev_addr, macaddr);
1408	else
1409		eth_hw_addr_random(ndev);
1410
1411	priv->phy_iface = of_get_phy_mode(np);
1412
1413	/* try to get PHY address from device tree, use PHY autodetection if
1414	 * no valid address is given
1415	 */
1416	if (of_property_read_u32(pdev->dev.of_node, "phy-addr",
1417				 &priv->phy_addr)) {
1418		priv->phy_addr = POLL_PHY;
1419	}
1420
1421	if (!((priv->phy_addr == POLL_PHY) ||
1422	      ((priv->phy_addr >= 0) && (priv->phy_addr < PHY_MAX_ADDR)))) {
1423		dev_err(&pdev->dev, "invalid phy-addr specified %d\n",
1424			priv->phy_addr);
1425		goto err_free_netdev;
1426	}
1427
1428	/* Create/attach to MDIO bus */
1429	ret = altera_tse_mdio_create(ndev,
1430				     atomic_add_return(1, &instance_count));
1431
1432	if (ret)
1433		goto err_free_netdev;
1434
1435	/* initialize netdev */
1436	ether_setup(ndev);
1437	ndev->mem_start = control_port->start;
1438	ndev->mem_end = control_port->end;
1439	ndev->netdev_ops = &altera_tse_netdev_ops;
1440	altera_tse_set_ethtool_ops(ndev);
1441
1442	altera_tse_netdev_ops.ndo_set_rx_mode = tse_set_rx_mode;
1443
1444	if (priv->hash_filter)
1445		altera_tse_netdev_ops.ndo_set_rx_mode =
1446			tse_set_rx_mode_hashfilter;
1447
1448	/* Scatter/gather IO is not supported,
1449	 * so it is turned off
1450	 */
1451	ndev->hw_features &= ~NETIF_F_SG;
1452	ndev->features |= ndev->hw_features | NETIF_F_HIGHDMA;
1453
1454	/* VLAN offloading of tagging, stripping and filtering is not
1455	 * supported by hardware, but driver will accommodate the
1456	 * extra 4-byte VLAN tag for processing by upper layers
1457	 */
1458	ndev->features |= NETIF_F_HW_VLAN_CTAG_RX;
1459
1460	/* setup NAPI interface */
1461	netif_napi_add(ndev, &priv->napi, tse_poll, NAPI_POLL_WEIGHT);
1462
1463	spin_lock_init(&priv->mac_cfg_lock);
1464	spin_lock_init(&priv->tx_lock);
1465	spin_lock_init(&priv->rxdma_irq_lock);
1466
 
1467	ret = register_netdev(ndev);
1468	if (ret) {
1469		dev_err(&pdev->dev, "failed to register TSE net device\n");
1470		goto err_register_netdev;
1471	}
1472
1473	platform_set_drvdata(pdev, ndev);
1474
1475	priv->revision = ioread32(&priv->mac_dev->megacore_revision);
1476
1477	if (netif_msg_probe(priv))
1478		dev_info(&pdev->dev, "Altera TSE MAC version %d.%d at 0x%08lx irq %d/%d\n",
1479			 (priv->revision >> 8) & 0xff,
1480			 priv->revision & 0xff,
1481			 (unsigned long) control_port->start, priv->rx_irq,
1482			 priv->tx_irq);
1483
1484	ret = init_phy(ndev);
1485	if (ret != 0) {
1486		netdev_err(ndev, "Cannot attach to PHY (error: %d)\n", ret);
1487		goto err_init_phy;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1488	}
 
1489	return 0;
1490
1491err_init_phy:
 
1492	unregister_netdev(ndev);
1493err_register_netdev:
1494	netif_napi_del(&priv->napi);
1495	altera_tse_mdio_destroy(ndev);
1496err_free_netdev:
1497	free_netdev(ndev);
1498	return ret;
1499}
1500
1501/* Remove Altera TSE MAC device
1502 */
1503static int altera_tse_remove(struct platform_device *pdev)
1504{
1505	struct net_device *ndev = platform_get_drvdata(pdev);
 
1506
1507	platform_set_drvdata(pdev, NULL);
1508	altera_tse_mdio_destroy(ndev);
1509	unregister_netdev(ndev);
 
 
 
1510	free_netdev(ndev);
1511
1512	return 0;
1513}
1514
1515static const struct altera_dmaops altera_dtype_sgdma = {
1516	.altera_dtype = ALTERA_DTYPE_SGDMA,
1517	.dmamask = 32,
1518	.reset_dma = sgdma_reset,
1519	.enable_txirq = sgdma_enable_txirq,
1520	.enable_rxirq = sgdma_enable_rxirq,
1521	.disable_txirq = sgdma_disable_txirq,
1522	.disable_rxirq = sgdma_disable_rxirq,
1523	.clear_txirq = sgdma_clear_txirq,
1524	.clear_rxirq = sgdma_clear_rxirq,
1525	.tx_buffer = sgdma_tx_buffer,
1526	.tx_completions = sgdma_tx_completions,
1527	.add_rx_desc = sgdma_add_rx_desc,
1528	.get_rx_status = sgdma_rx_status,
1529	.init_dma = sgdma_initialize,
1530	.uninit_dma = sgdma_uninitialize,
1531	.start_rxdma = sgdma_start_rxdma,
1532};
1533
1534static const struct altera_dmaops altera_dtype_msgdma = {
1535	.altera_dtype = ALTERA_DTYPE_MSGDMA,
1536	.dmamask = 64,
1537	.reset_dma = msgdma_reset,
1538	.enable_txirq = msgdma_enable_txirq,
1539	.enable_rxirq = msgdma_enable_rxirq,
1540	.disable_txirq = msgdma_disable_txirq,
1541	.disable_rxirq = msgdma_disable_rxirq,
1542	.clear_txirq = msgdma_clear_txirq,
1543	.clear_rxirq = msgdma_clear_rxirq,
1544	.tx_buffer = msgdma_tx_buffer,
1545	.tx_completions = msgdma_tx_completions,
1546	.add_rx_desc = msgdma_add_rx_desc,
1547	.get_rx_status = msgdma_rx_status,
1548	.init_dma = msgdma_initialize,
1549	.uninit_dma = msgdma_uninitialize,
1550	.start_rxdma = msgdma_start_rxdma,
1551};
1552
1553static struct of_device_id altera_tse_ids[] = {
1554	{ .compatible = "altr,tse-msgdma-1.0", .data = &altera_dtype_msgdma, },
1555	{ .compatible = "altr,tse-1.0", .data = &altera_dtype_sgdma, },
1556	{ .compatible = "ALTR,tse-1.0", .data = &altera_dtype_sgdma, },
1557	{},
1558};
1559MODULE_DEVICE_TABLE(of, altera_tse_ids);
1560
1561static struct platform_driver altera_tse_driver = {
1562	.probe		= altera_tse_probe,
1563	.remove		= altera_tse_remove,
1564	.suspend	= NULL,
1565	.resume		= NULL,
1566	.driver		= {
1567		.name	= ALTERA_TSE_RESOURCE_NAME,
1568		.owner	= THIS_MODULE,
1569		.of_match_table = altera_tse_ids,
1570	},
1571};
1572
1573module_platform_driver(altera_tse_driver);
1574
1575MODULE_AUTHOR("Altera Corporation");
1576MODULE_DESCRIPTION("Altera Triple Speed Ethernet MAC driver");
1577MODULE_LICENSE("GPL v2");