Linux Audio

Check our new training course

Loading...
v6.8
   1// SPDX-License-Identifier: GPL-2.0-only
   2/* Altera Triple-Speed Ethernet MAC driver
   3 * Copyright (C) 2008-2014 Altera Corporation. All rights reserved
   4 *
   5 * Contributors:
   6 *   Dalon Westergreen
   7 *   Thomas Chou
   8 *   Ian Abbott
   9 *   Yuriy Kozlov
  10 *   Tobias Klauser
  11 *   Andriy Smolskyy
  12 *   Roman Bulgakov
  13 *   Dmytro Mytarchuk
  14 *   Matthew Gerlach
  15 *
  16 * Original driver contributed by SLS.
  17 * Major updates contributed by GlobalLogic
 
 
 
 
 
 
 
 
 
 
 
 
  18 */
  19
  20#include <linux/atomic.h>
  21#include <linux/delay.h>
  22#include <linux/etherdevice.h>
  23#include <linux/if_vlan.h>
  24#include <linux/init.h>
  25#include <linux/interrupt.h>
  26#include <linux/io.h>
  27#include <linux/kernel.h>
  28#include <linux/module.h>
  29#include <linux/mii.h>
  30#include <linux/mdio/mdio-regmap.h>
  31#include <linux/netdevice.h>
  32#include <linux/of.h>
  33#include <linux/of_mdio.h>
  34#include <linux/of_net.h>
  35#include <linux/pcs-lynx.h>
  36#include <linux/phy.h>
  37#include <linux/platform_device.h>
  38#include <linux/property.h>
  39#include <linux/regmap.h>
  40#include <linux/skbuff.h>
  41#include <asm/cacheflush.h>
  42
  43#include "altera_utils.h"
  44#include "altera_tse.h"
  45#include "altera_sgdma.h"
  46#include "altera_msgdma.h"
  47
  48static atomic_t instance_count = ATOMIC_INIT(~0);
  49/* Module parameters */
  50static int debug = -1;
  51module_param(debug, int, 0644);
  52MODULE_PARM_DESC(debug, "Message Level (-1: default, 0: no output, 16: all)");
  53
  54static const u32 default_msg_level = (NETIF_MSG_DRV | NETIF_MSG_PROBE |
  55					NETIF_MSG_LINK | NETIF_MSG_IFUP |
  56					NETIF_MSG_IFDOWN);
  57
  58#define RX_DESCRIPTORS 64
  59static int dma_rx_num = RX_DESCRIPTORS;
  60module_param(dma_rx_num, int, 0644);
  61MODULE_PARM_DESC(dma_rx_num, "Number of descriptors in the RX list");
  62
  63#define TX_DESCRIPTORS 64
  64static int dma_tx_num = TX_DESCRIPTORS;
  65module_param(dma_tx_num, int, 0644);
  66MODULE_PARM_DESC(dma_tx_num, "Number of descriptors in the TX list");
  67
  68
  69#define POLL_PHY (-1)
  70
  71/* Make sure DMA buffer size is larger than the max frame size
  72 * plus some alignment offset and a VLAN header. If the max frame size is
  73 * 1518, a VLAN header would be additional 4 bytes and additional
  74 * headroom for alignment is 2 bytes, 2048 is just fine.
  75 */
  76#define ALTERA_RXDMABUFFER_SIZE	2048
  77
  78/* Allow network stack to resume queuing packets after we've
  79 * finished transmitting at least 1/4 of the packets in the queue.
  80 */
  81#define TSE_TX_THRESH(x)	(x->tx_ring_size / 4)
  82
  83#define TXQUEUESTOP_THRESHHOLD	2
  84
 
 
  85static inline u32 tse_tx_avail(struct altera_tse_private *priv)
  86{
  87	return priv->tx_cons + priv->tx_ring_size - priv->tx_prod - 1;
  88}
  89
  90/* MDIO specific functions
  91 */
  92static int altera_tse_mdio_read(struct mii_bus *bus, int mii_id, int regnum)
  93{
  94	struct net_device *ndev = bus->priv;
  95	struct altera_tse_private *priv = netdev_priv(ndev);
  96
  97	/* set MDIO address */
  98	csrwr32((mii_id & 0x1f), priv->mac_dev,
  99		tse_csroffs(mdio_phy1_addr));
 100
 101	/* get the data */
 102	return csrrd32(priv->mac_dev,
 103		       tse_csroffs(mdio_phy1) + regnum * 4) & 0xffff;
 104}
 105
 106static int altera_tse_mdio_write(struct mii_bus *bus, int mii_id, int regnum,
 107				 u16 value)
 108{
 109	struct net_device *ndev = bus->priv;
 110	struct altera_tse_private *priv = netdev_priv(ndev);
 111
 112	/* set MDIO address */
 113	csrwr32((mii_id & 0x1f), priv->mac_dev,
 114		tse_csroffs(mdio_phy1_addr));
 115
 116	/* write the data */
 117	csrwr32(value, priv->mac_dev, tse_csroffs(mdio_phy1) + regnum * 4);
 118	return 0;
 119}
 120
 121static int altera_tse_mdio_create(struct net_device *dev, unsigned int id)
 122{
 123	struct altera_tse_private *priv = netdev_priv(dev);
 
 124	struct device_node *mdio_node = NULL;
 125	struct device_node *child_node = NULL;
 126	struct mii_bus *mdio = NULL;
 127	int ret;
 128
 129	for_each_child_of_node(priv->device->of_node, child_node) {
 130		if (of_device_is_compatible(child_node, "altr,tse-mdio")) {
 131			mdio_node = child_node;
 132			break;
 133		}
 134	}
 135
 136	if (mdio_node) {
 137		netdev_dbg(dev, "FOUND MDIO subnode\n");
 138	} else {
 139		netdev_dbg(dev, "NO MDIO subnode\n");
 140		return 0;
 141	}
 142
 143	mdio = mdiobus_alloc();
 144	if (mdio == NULL) {
 145		netdev_err(dev, "Error allocating MDIO bus\n");
 146		ret = -ENOMEM;
 147		goto put_node;
 148	}
 149
 150	mdio->name = ALTERA_TSE_RESOURCE_NAME;
 151	mdio->read = &altera_tse_mdio_read;
 152	mdio->write = &altera_tse_mdio_write;
 153	snprintf(mdio->id, MII_BUS_ID_SIZE, "%s-%u", mdio->name, id);
 154
 155	mdio->priv = dev;
 156	mdio->parent = priv->device;
 157
 158	ret = of_mdiobus_register(mdio, mdio_node);
 159	if (ret != 0) {
 160		netdev_err(dev, "Cannot register MDIO bus %s\n",
 161			   mdio->id);
 162		goto out_free_mdio;
 163	}
 164	of_node_put(mdio_node);
 165
 166	if (netif_msg_drv(priv))
 167		netdev_info(dev, "MDIO bus %s: created\n", mdio->id);
 168
 169	priv->mdio = mdio;
 170	return 0;
 171out_free_mdio:
 172	mdiobus_free(mdio);
 173	mdio = NULL;
 174put_node:
 175	of_node_put(mdio_node);
 176	return ret;
 177}
 178
 179static void altera_tse_mdio_destroy(struct net_device *dev)
 180{
 181	struct altera_tse_private *priv = netdev_priv(dev);
 182
 183	if (priv->mdio == NULL)
 184		return;
 185
 186	if (netif_msg_drv(priv))
 187		netdev_info(dev, "MDIO bus %s: removed\n",
 188			    priv->mdio->id);
 189
 190	mdiobus_unregister(priv->mdio);
 191	mdiobus_free(priv->mdio);
 192	priv->mdio = NULL;
 193}
 194
 195static int tse_init_rx_buffer(struct altera_tse_private *priv,
 196			      struct tse_buffer *rxbuffer, int len)
 197{
 198	rxbuffer->skb = netdev_alloc_skb_ip_align(priv->dev, len);
 199	if (!rxbuffer->skb)
 200		return -ENOMEM;
 201
 202	rxbuffer->dma_addr = dma_map_single(priv->device, rxbuffer->skb->data,
 203						len,
 204						DMA_FROM_DEVICE);
 205
 206	if (dma_mapping_error(priv->device, rxbuffer->dma_addr)) {
 207		netdev_err(priv->dev, "%s: DMA mapping error\n", __func__);
 208		dev_kfree_skb_any(rxbuffer->skb);
 209		return -EINVAL;
 210	}
 211	rxbuffer->dma_addr &= (dma_addr_t)~3;
 212	rxbuffer->len = len;
 213	return 0;
 214}
 215
 216static void tse_free_rx_buffer(struct altera_tse_private *priv,
 217			       struct tse_buffer *rxbuffer)
 218{
 219	dma_addr_t dma_addr = rxbuffer->dma_addr;
 220	struct sk_buff *skb = rxbuffer->skb;
 
 221
 222	if (skb != NULL) {
 223		if (dma_addr)
 224			dma_unmap_single(priv->device, dma_addr,
 225					 rxbuffer->len,
 226					 DMA_FROM_DEVICE);
 227		dev_kfree_skb_any(skb);
 228		rxbuffer->skb = NULL;
 229		rxbuffer->dma_addr = 0;
 230	}
 231}
 232
 233/* Unmap and free Tx buffer resources
 234 */
 235static void tse_free_tx_buffer(struct altera_tse_private *priv,
 236			       struct tse_buffer *buffer)
 237{
 238	if (buffer->dma_addr) {
 239		if (buffer->mapped_as_page)
 240			dma_unmap_page(priv->device, buffer->dma_addr,
 241				       buffer->len, DMA_TO_DEVICE);
 242		else
 243			dma_unmap_single(priv->device, buffer->dma_addr,
 244					 buffer->len, DMA_TO_DEVICE);
 245		buffer->dma_addr = 0;
 246	}
 247	if (buffer->skb) {
 248		dev_kfree_skb_any(buffer->skb);
 249		buffer->skb = NULL;
 250	}
 251}
 252
 253static int alloc_init_skbufs(struct altera_tse_private *priv)
 254{
 255	unsigned int rx_descs = priv->rx_ring_size;
 256	unsigned int tx_descs = priv->tx_ring_size;
 257	int ret = -ENOMEM;
 258	int i;
 259
 260	/* Create Rx ring buffer */
 261	priv->rx_ring = kcalloc(rx_descs, sizeof(struct tse_buffer),
 262				GFP_KERNEL);
 263	if (!priv->rx_ring)
 264		goto err_rx_ring;
 265
 266	/* Create Tx ring buffer */
 267	priv->tx_ring = kcalloc(tx_descs, sizeof(struct tse_buffer),
 268				GFP_KERNEL);
 269	if (!priv->tx_ring)
 270		goto err_tx_ring;
 271
 272	priv->tx_cons = 0;
 273	priv->tx_prod = 0;
 274
 275	/* Init Rx ring */
 276	for (i = 0; i < rx_descs; i++) {
 277		ret = tse_init_rx_buffer(priv, &priv->rx_ring[i],
 278					 priv->rx_dma_buf_sz);
 279		if (ret)
 280			goto err_init_rx_buffers;
 281	}
 282
 283	priv->rx_cons = 0;
 284	priv->rx_prod = 0;
 285
 286	return 0;
 287err_init_rx_buffers:
 288	while (--i >= 0)
 289		tse_free_rx_buffer(priv, &priv->rx_ring[i]);
 290	kfree(priv->tx_ring);
 291err_tx_ring:
 292	kfree(priv->rx_ring);
 293err_rx_ring:
 294	return ret;
 295}
 296
 297static void free_skbufs(struct net_device *dev)
 298{
 299	struct altera_tse_private *priv = netdev_priv(dev);
 300	unsigned int rx_descs = priv->rx_ring_size;
 301	unsigned int tx_descs = priv->tx_ring_size;
 302	int i;
 303
 304	/* Release the DMA TX/RX socket buffers */
 305	for (i = 0; i < rx_descs; i++)
 306		tse_free_rx_buffer(priv, &priv->rx_ring[i]);
 307	for (i = 0; i < tx_descs; i++)
 308		tse_free_tx_buffer(priv, &priv->tx_ring[i]);
 309
 310
 311	kfree(priv->tx_ring);
 312}
 313
 314/* Reallocate the skb for the reception process
 315 */
 316static inline void tse_rx_refill(struct altera_tse_private *priv)
 317{
 318	unsigned int rxsize = priv->rx_ring_size;
 319	unsigned int entry;
 320	int ret;
 321
 322	for (; priv->rx_cons - priv->rx_prod > 0;
 323			priv->rx_prod++) {
 324		entry = priv->rx_prod % rxsize;
 325		if (likely(priv->rx_ring[entry].skb == NULL)) {
 326			ret = tse_init_rx_buffer(priv, &priv->rx_ring[entry],
 327				priv->rx_dma_buf_sz);
 328			if (unlikely(ret != 0))
 329				break;
 330			priv->dmaops->add_rx_desc(priv, &priv->rx_ring[entry]);
 331		}
 332	}
 333}
 334
 335/* Pull out the VLAN tag and fix up the packet
 336 */
 337static inline void tse_rx_vlan(struct net_device *dev, struct sk_buff *skb)
 338{
 339	struct ethhdr *eth_hdr;
 340	u16 vid;
 341
 342	if ((dev->features & NETIF_F_HW_VLAN_CTAG_RX) &&
 343	    !__vlan_get_tag(skb, &vid)) {
 344		eth_hdr = (struct ethhdr *)skb->data;
 345		memmove(skb->data + VLAN_HLEN, eth_hdr, ETH_ALEN * 2);
 346		skb_pull(skb, VLAN_HLEN);
 347		__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid);
 348	}
 349}
 350
 351/* Receive a packet: retrieve and pass over to upper levels
 352 */
 353static int tse_rx(struct altera_tse_private *priv, int limit)
 354{
 355	unsigned int entry = priv->rx_cons % priv->rx_ring_size;
 356	unsigned int next_entry;
 357	unsigned int count = 0;
 
 358	struct sk_buff *skb;
 
 359	u32 rxstatus;
 360	u16 pktlength;
 361	u16 pktstatus;
 362
 363	/* Check for count < limit first as get_rx_status is changing
 364	* the response-fifo so we must process the next packet
 365	* after calling get_rx_status if a response is pending.
 366	* (reading the last byte of the response pops the value from the fifo.)
 367	*/
 368	while ((count < limit) &&
 369	       ((rxstatus = priv->dmaops->get_rx_status(priv)) != 0)) {
 370		pktstatus = rxstatus >> 16;
 371		pktlength = rxstatus & 0xffff;
 372
 373		if ((pktstatus & 0xFF) || (pktlength == 0))
 374			netdev_err(priv->dev,
 375				   "RCV pktstatus %08X pktlength %08X\n",
 376				   pktstatus, pktlength);
 377
 378		/* DMA transfer from TSE starts with 2 additional bytes for
 379		 * IP payload alignment. Status returned by get_rx_status()
 380		 * contains DMA transfer length. Packet is 2 bytes shorter.
 381		 */
 382		pktlength -= 2;
 383
 384		count++;
 385		next_entry = (++priv->rx_cons) % priv->rx_ring_size;
 386
 387		skb = priv->rx_ring[entry].skb;
 388		if (unlikely(!skb)) {
 389			netdev_err(priv->dev,
 390				   "%s: Inconsistent Rx descriptor chain\n",
 391				   __func__);
 392			priv->dev->stats.rx_dropped++;
 393			break;
 394		}
 395		priv->rx_ring[entry].skb = NULL;
 396
 397		skb_put(skb, pktlength);
 398
 
 
 
 
 
 
 399		dma_unmap_single(priv->device, priv->rx_ring[entry].dma_addr,
 400				 priv->rx_ring[entry].len, DMA_FROM_DEVICE);
 401
 402		if (netif_msg_pktdata(priv)) {
 403			netdev_info(priv->dev, "frame received %d bytes\n",
 404				    pktlength);
 405			print_hex_dump(KERN_ERR, "data: ", DUMP_PREFIX_OFFSET,
 406				       16, 1, skb->data, pktlength, true);
 407		}
 408
 409		tse_rx_vlan(priv->dev, skb);
 410
 411		skb->protocol = eth_type_trans(skb, priv->dev);
 412		skb_checksum_none_assert(skb);
 413
 414		napi_gro_receive(&priv->napi, skb);
 415
 416		priv->dev->stats.rx_packets++;
 417		priv->dev->stats.rx_bytes += pktlength;
 418
 419		entry = next_entry;
 420
 421		tse_rx_refill(priv);
 422	}
 423
 424	return count;
 425}
 426
 427/* Reclaim resources after transmission completes
 428 */
 429static int tse_tx_complete(struct altera_tse_private *priv)
 430{
 431	unsigned int txsize = priv->tx_ring_size;
 432	struct tse_buffer *tx_buff;
 433	unsigned int entry;
 
 434	int txcomplete = 0;
 435	u32 ready;
 436
 437	spin_lock(&priv->tx_lock);
 438
 439	ready = priv->dmaops->tx_completions(priv);
 440
 441	/* Free sent buffers */
 442	while (ready && (priv->tx_cons != priv->tx_prod)) {
 443		entry = priv->tx_cons % txsize;
 444		tx_buff = &priv->tx_ring[entry];
 445
 446		if (netif_msg_tx_done(priv))
 447			netdev_dbg(priv->dev, "%s: curr %d, dirty %d\n",
 448				   __func__, priv->tx_prod, priv->tx_cons);
 449
 450		if (likely(tx_buff->skb))
 451			priv->dev->stats.tx_packets++;
 452
 453		tse_free_tx_buffer(priv, tx_buff);
 454		priv->tx_cons++;
 455
 456		txcomplete++;
 457		ready--;
 458	}
 459
 460	if (unlikely(netif_queue_stopped(priv->dev) &&
 461		     tse_tx_avail(priv) > TSE_TX_THRESH(priv))) {
 
 462		if (netif_queue_stopped(priv->dev) &&
 463		    tse_tx_avail(priv) > TSE_TX_THRESH(priv)) {
 464			if (netif_msg_tx_done(priv))
 465				netdev_dbg(priv->dev, "%s: restart transmit\n",
 466					   __func__);
 467			netif_wake_queue(priv->dev);
 468		}
 
 469	}
 470
 471	spin_unlock(&priv->tx_lock);
 472	return txcomplete;
 473}
 474
 475/* NAPI polling function
 476 */
 477static int tse_poll(struct napi_struct *napi, int budget)
 478{
 479	struct altera_tse_private *priv =
 480			container_of(napi, struct altera_tse_private, napi);
 481	unsigned long int flags;
 482	int rxcomplete = 0;
 
 483
 484	tse_tx_complete(priv);
 485
 486	rxcomplete = tse_rx(priv, budget);
 487
 488	if (rxcomplete < budget) {
 489
 490		napi_complete_done(napi, rxcomplete);
 491
 492		netdev_dbg(priv->dev,
 493			   "NAPI Complete, did %d packets with budget %d\n",
 494			   rxcomplete, budget);
 495
 496		spin_lock_irqsave(&priv->rxdma_irq_lock, flags);
 497		priv->dmaops->enable_rxirq(priv);
 498		priv->dmaops->enable_txirq(priv);
 499		spin_unlock_irqrestore(&priv->rxdma_irq_lock, flags);
 500	}
 501	return rxcomplete;
 502}
 503
 504/* DMA TX & RX FIFO interrupt routing
 505 */
 506static irqreturn_t altera_isr(int irq, void *dev_id)
 507{
 508	struct net_device *dev = dev_id;
 509	struct altera_tse_private *priv;
 510
 511	if (unlikely(!dev)) {
 512		pr_err("%s: invalid dev pointer\n", __func__);
 513		return IRQ_NONE;
 514	}
 515	priv = netdev_priv(dev);
 516
 517	spin_lock(&priv->rxdma_irq_lock);
 518	/* reset IRQs */
 519	priv->dmaops->clear_rxirq(priv);
 520	priv->dmaops->clear_txirq(priv);
 521	spin_unlock(&priv->rxdma_irq_lock);
 522
 523	if (likely(napi_schedule_prep(&priv->napi))) {
 524		spin_lock(&priv->rxdma_irq_lock);
 525		priv->dmaops->disable_rxirq(priv);
 526		priv->dmaops->disable_txirq(priv);
 527		spin_unlock(&priv->rxdma_irq_lock);
 528		__napi_schedule(&priv->napi);
 529	}
 530
 531
 532	return IRQ_HANDLED;
 533}
 534
 535/* Transmit a packet (called by the kernel). Dispatches
 536 * either the SGDMA method for transmitting or the
 537 * MSGDMA method, assumes no scatter/gather support,
 538 * implying an assumption that there's only one
 539 * physically contiguous fragment starting at
 540 * skb->data, for length of skb_headlen(skb).
 541 */
 542static netdev_tx_t tse_start_xmit(struct sk_buff *skb, struct net_device *dev)
 543{
 544	struct altera_tse_private *priv = netdev_priv(dev);
 545	unsigned int nopaged_len = skb_headlen(skb);
 546	unsigned int txsize = priv->tx_ring_size;
 547	int nfrags = skb_shinfo(skb)->nr_frags;
 548	struct tse_buffer *buffer = NULL;
 549	netdev_tx_t ret = NETDEV_TX_OK;
 
 
 550	dma_addr_t dma_addr;
 551	unsigned int entry;
 552
 553	spin_lock_bh(&priv->tx_lock);
 554
 555	if (unlikely(tse_tx_avail(priv) < nfrags + 1)) {
 556		if (!netif_queue_stopped(dev)) {
 557			netif_stop_queue(dev);
 558			/* This is a hard error, log it. */
 559			netdev_err(priv->dev,
 560				   "%s: Tx list full when queue awake\n",
 561				   __func__);
 562		}
 563		ret = NETDEV_TX_BUSY;
 564		goto out;
 565	}
 566
 567	/* Map the first skb fragment */
 568	entry = priv->tx_prod % txsize;
 569	buffer = &priv->tx_ring[entry];
 570
 571	dma_addr = dma_map_single(priv->device, skb->data, nopaged_len,
 572				  DMA_TO_DEVICE);
 573	if (dma_mapping_error(priv->device, dma_addr)) {
 574		netdev_err(priv->dev, "%s: DMA mapping error\n", __func__);
 575		ret = NETDEV_TX_OK;
 576		goto out;
 577	}
 578
 579	buffer->skb = skb;
 580	buffer->dma_addr = dma_addr;
 581	buffer->len = nopaged_len;
 582
 
 
 
 
 583	priv->dmaops->tx_buffer(priv, buffer);
 584
 585	skb_tx_timestamp(skb);
 586
 587	priv->tx_prod++;
 588	dev->stats.tx_bytes += skb->len;
 589
 590	if (unlikely(tse_tx_avail(priv) <= TXQUEUESTOP_THRESHHOLD)) {
 591		if (netif_msg_hw(priv))
 592			netdev_dbg(priv->dev, "%s: stop transmitted packets\n",
 593				   __func__);
 594		netif_stop_queue(dev);
 595	}
 596
 597out:
 598	spin_unlock_bh(&priv->tx_lock);
 599
 600	return ret;
 601}
 602
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 603static int altera_tse_phy_get_addr_mdio_create(struct net_device *dev)
 604{
 605	struct altera_tse_private *priv = netdev_priv(dev);
 606	struct device_node *np = priv->device->of_node;
 607	int ret;
 608
 609	ret = of_get_phy_mode(np, &priv->phy_iface);
 610
 611	/* Avoid get phy addr and create mdio if no phy is present */
 612	if (ret)
 613		return 0;
 614
 615	/* try to get PHY address from device tree, use PHY autodetection if
 616	 * no valid address is given
 617	 */
 618
 619	if (of_property_read_u32(priv->device->of_node, "phy-addr",
 620			 &priv->phy_addr)) {
 621		priv->phy_addr = POLL_PHY;
 622	}
 623
 624	if (!((priv->phy_addr == POLL_PHY) ||
 625		  ((priv->phy_addr >= 0) && (priv->phy_addr < PHY_MAX_ADDR)))) {
 626		netdev_err(dev, "invalid phy-addr specified %d\n",
 627			priv->phy_addr);
 628		return -ENODEV;
 629	}
 630
 631	/* Create/attach to MDIO bus */
 632	ret = altera_tse_mdio_create(dev,
 633					 atomic_add_return(1, &instance_count));
 634
 635	if (ret)
 636		return -ENODEV;
 637
 638	return 0;
 639}
 640
 641static void tse_update_mac_addr(struct altera_tse_private *priv, const u8 *addr)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 642{
 643	u32 msb;
 644	u32 lsb;
 645
 646	msb = (addr[3] << 24) | (addr[2] << 16) | (addr[1] << 8) | addr[0];
 647	lsb = ((addr[5] << 8) | addr[4]) & 0xffff;
 648
 649	/* Set primary MAC address */
 650	csrwr32(msb, priv->mac_dev, tse_csroffs(mac_addr_0));
 651	csrwr32(lsb, priv->mac_dev, tse_csroffs(mac_addr_1));
 652}
 653
 654/* MAC software reset.
 655 * When reset is triggered, the MAC function completes the current
 656 * transmission or reception, and subsequently disables the transmit and
 657 * receive logic, flushes the receive FIFO buffer, and resets the statistics
 658 * counters.
 659 */
 660static int reset_mac(struct altera_tse_private *priv)
 661{
 662	int counter;
 663	u32 dat;
 664
 665	dat = csrrd32(priv->mac_dev, tse_csroffs(command_config));
 666	dat &= ~(MAC_CMDCFG_TX_ENA | MAC_CMDCFG_RX_ENA);
 667	dat |= MAC_CMDCFG_SW_RESET | MAC_CMDCFG_CNT_RESET;
 668	csrwr32(dat, priv->mac_dev, tse_csroffs(command_config));
 669
 670	counter = 0;
 671	while (counter++ < ALTERA_TSE_SW_RESET_WATCHDOG_CNTR) {
 672		if (tse_bit_is_clear(priv->mac_dev, tse_csroffs(command_config),
 673				     MAC_CMDCFG_SW_RESET))
 674			break;
 675		udelay(1);
 676	}
 677
 678	if (counter >= ALTERA_TSE_SW_RESET_WATCHDOG_CNTR) {
 679		dat = csrrd32(priv->mac_dev, tse_csroffs(command_config));
 680		dat &= ~MAC_CMDCFG_SW_RESET;
 681		csrwr32(dat, priv->mac_dev, tse_csroffs(command_config));
 682		return -1;
 683	}
 684	return 0;
 685}
 686
 687/* Initialize MAC core registers
 688*/
 689static int init_mac(struct altera_tse_private *priv)
 690{
 691	unsigned int cmd = 0;
 692	u32 frm_length;
 693
 694	/* Setup Rx FIFO */
 695	csrwr32(priv->rx_fifo_depth - ALTERA_TSE_RX_SECTION_EMPTY,
 696		priv->mac_dev, tse_csroffs(rx_section_empty));
 697
 698	csrwr32(ALTERA_TSE_RX_SECTION_FULL, priv->mac_dev,
 699		tse_csroffs(rx_section_full));
 700
 701	csrwr32(ALTERA_TSE_RX_ALMOST_EMPTY, priv->mac_dev,
 702		tse_csroffs(rx_almost_empty));
 703
 704	csrwr32(ALTERA_TSE_RX_ALMOST_FULL, priv->mac_dev,
 705		tse_csroffs(rx_almost_full));
 706
 707	/* Setup Tx FIFO */
 708	csrwr32(priv->tx_fifo_depth - ALTERA_TSE_TX_SECTION_EMPTY,
 709		priv->mac_dev, tse_csroffs(tx_section_empty));
 710
 711	csrwr32(ALTERA_TSE_TX_SECTION_FULL, priv->mac_dev,
 712		tse_csroffs(tx_section_full));
 713
 714	csrwr32(ALTERA_TSE_TX_ALMOST_EMPTY, priv->mac_dev,
 715		tse_csroffs(tx_almost_empty));
 716
 717	csrwr32(ALTERA_TSE_TX_ALMOST_FULL, priv->mac_dev,
 718		tse_csroffs(tx_almost_full));
 719
 720	/* MAC Address Configuration */
 721	tse_update_mac_addr(priv, priv->dev->dev_addr);
 722
 723	/* MAC Function Configuration */
 724	frm_length = ETH_HLEN + priv->dev->mtu + ETH_FCS_LEN;
 725	csrwr32(frm_length, priv->mac_dev, tse_csroffs(frm_length));
 726
 727	csrwr32(ALTERA_TSE_TX_IPG_LENGTH, priv->mac_dev,
 728		tse_csroffs(tx_ipg_length));
 729
 730	/* Disable RX/TX shift 16 for alignment of all received frames on 16-bit
 731	 * start address
 732	 */
 733	tse_set_bit(priv->mac_dev, tse_csroffs(rx_cmd_stat),
 734		    ALTERA_TSE_RX_CMD_STAT_RX_SHIFT16);
 735
 736	tse_clear_bit(priv->mac_dev, tse_csroffs(tx_cmd_stat),
 737		      ALTERA_TSE_TX_CMD_STAT_TX_SHIFT16 |
 738		      ALTERA_TSE_TX_CMD_STAT_OMIT_CRC);
 739
 740	/* Set the MAC options */
 741	cmd = csrrd32(priv->mac_dev, tse_csroffs(command_config));
 742	cmd &= ~MAC_CMDCFG_PAD_EN;	/* No padding Removal on Receive */
 743	cmd &= ~MAC_CMDCFG_CRC_FWD;	/* CRC Removal */
 744	cmd |= MAC_CMDCFG_RX_ERR_DISC;	/* Automatically discard frames
 745					 * with CRC errors
 746					 */
 747	cmd |= MAC_CMDCFG_CNTL_FRM_ENA;
 748	cmd &= ~MAC_CMDCFG_TX_ENA;
 749	cmd &= ~MAC_CMDCFG_RX_ENA;
 750
 751	/* Default speed and duplex setting, full/100 */
 752	cmd &= ~MAC_CMDCFG_HD_ENA;
 753	cmd &= ~MAC_CMDCFG_ETH_SPEED;
 754	cmd &= ~MAC_CMDCFG_ENA_10;
 755
 756	csrwr32(cmd, priv->mac_dev, tse_csroffs(command_config));
 757
 758	csrwr32(ALTERA_TSE_PAUSE_QUANTA, priv->mac_dev,
 759		tse_csroffs(pause_quanta));
 760
 761	if (netif_msg_hw(priv))
 762		dev_dbg(priv->device,
 763			"MAC post-initialization: CMD_CONFIG = 0x%08x\n", cmd);
 764
 765	return 0;
 766}
 767
 768/* Start/stop MAC transmission logic
 769 */
 770static void tse_set_mac(struct altera_tse_private *priv, bool enable)
 771{
 772	u32 value = csrrd32(priv->mac_dev, tse_csroffs(command_config));
 773
 774	if (enable)
 775		value |= MAC_CMDCFG_TX_ENA | MAC_CMDCFG_RX_ENA;
 776	else
 777		value &= ~(MAC_CMDCFG_TX_ENA | MAC_CMDCFG_RX_ENA);
 778
 779	csrwr32(value, priv->mac_dev, tse_csroffs(command_config));
 780}
 781
 782/* Change the MTU
 783 */
 784static int tse_change_mtu(struct net_device *dev, int new_mtu)
 785{
 
 
 
 
 786	if (netif_running(dev)) {
 787		netdev_err(dev, "must be stopped to change its MTU\n");
 788		return -EBUSY;
 789	}
 790
 
 
 
 
 
 791	dev->mtu = new_mtu;
 792	netdev_update_features(dev);
 793
 794	return 0;
 795}
 796
 797static void altera_tse_set_mcfilter(struct net_device *dev)
 798{
 799	struct altera_tse_private *priv = netdev_priv(dev);
 800	struct netdev_hw_addr *ha;
 801	int i;
 
 802
 803	/* clear the hash filter */
 804	for (i = 0; i < 64; i++)
 805		csrwr32(0, priv->mac_dev, tse_csroffs(hash_table) + i * 4);
 806
 807	netdev_for_each_mc_addr(ha, dev) {
 808		unsigned int hash = 0;
 809		int mac_octet;
 810
 811		for (mac_octet = 5; mac_octet >= 0; mac_octet--) {
 812			unsigned char xor_bit = 0;
 813			unsigned char octet = ha->addr[mac_octet];
 814			unsigned int bitshift;
 815
 816			for (bitshift = 0; bitshift < 8; bitshift++)
 817				xor_bit ^= ((octet >> bitshift) & 0x01);
 818
 819			hash = (hash << 1) | xor_bit;
 820		}
 821		csrwr32(1, priv->mac_dev, tse_csroffs(hash_table) + hash * 4);
 822	}
 823}
 824
 825
 826static void altera_tse_set_mcfilterall(struct net_device *dev)
 827{
 828	struct altera_tse_private *priv = netdev_priv(dev);
 829	int i;
 830
 831	/* set the hash filter */
 832	for (i = 0; i < 64; i++)
 833		csrwr32(1, priv->mac_dev, tse_csroffs(hash_table) + i * 4);
 834}
 835
 836/* Set or clear the multicast filter for this adapter
 837 */
 838static void tse_set_rx_mode_hashfilter(struct net_device *dev)
 839{
 840	struct altera_tse_private *priv = netdev_priv(dev);
 841
 842	spin_lock(&priv->mac_cfg_lock);
 843
 844	if (dev->flags & IFF_PROMISC)
 845		tse_set_bit(priv->mac_dev, tse_csroffs(command_config),
 846			    MAC_CMDCFG_PROMIS_EN);
 847
 848	if (dev->flags & IFF_ALLMULTI)
 849		altera_tse_set_mcfilterall(dev);
 850	else
 851		altera_tse_set_mcfilter(dev);
 852
 853	spin_unlock(&priv->mac_cfg_lock);
 854}
 855
 856/* Set or clear the multicast filter for this adapter
 857 */
 858static void tse_set_rx_mode(struct net_device *dev)
 859{
 860	struct altera_tse_private *priv = netdev_priv(dev);
 861
 862	spin_lock(&priv->mac_cfg_lock);
 863
 864	if ((dev->flags & IFF_PROMISC) || (dev->flags & IFF_ALLMULTI) ||
 865	    !netdev_mc_empty(dev) || !netdev_uc_empty(dev))
 866		tse_set_bit(priv->mac_dev, tse_csroffs(command_config),
 867			    MAC_CMDCFG_PROMIS_EN);
 868	else
 869		tse_clear_bit(priv->mac_dev, tse_csroffs(command_config),
 870			      MAC_CMDCFG_PROMIS_EN);
 871
 872	spin_unlock(&priv->mac_cfg_lock);
 873}
 874
 875/* Open and initialize the interface
 876 */
 877static int tse_open(struct net_device *dev)
 878{
 879	struct altera_tse_private *priv = netdev_priv(dev);
 880	unsigned long flags;
 881	int ret = 0;
 882	int i;
 
 883
 884	/* Reset and configure TSE MAC and probe associated PHY */
 885	ret = priv->dmaops->init_dma(priv);
 886	if (ret != 0) {
 887		netdev_err(dev, "Cannot initialize DMA\n");
 888		goto phy_error;
 889	}
 890
 891	if (netif_msg_ifup(priv))
 892		netdev_warn(dev, "device MAC address %pM\n",
 893			    dev->dev_addr);
 894
 895	if ((priv->revision < 0xd00) || (priv->revision > 0xe00))
 896		netdev_warn(dev, "TSE revision %x\n", priv->revision);
 897
 898	spin_lock(&priv->mac_cfg_lock);
 899
 900	ret = reset_mac(priv);
 901	/* Note that reset_mac will fail if the clocks are gated by the PHY
 902	 * due to the PHY being put into isolation or power down mode.
 903	 * This is not an error if reset fails due to no clock.
 904	 */
 905	if (ret)
 906		netdev_dbg(dev, "Cannot reset MAC core (error: %d)\n", ret);
 907
 908	ret = init_mac(priv);
 909	spin_unlock(&priv->mac_cfg_lock);
 910	if (ret) {
 911		netdev_err(dev, "Cannot init MAC core (error: %d)\n", ret);
 912		goto alloc_skbuf_error;
 913	}
 914
 915	priv->dmaops->reset_dma(priv);
 916
 917	/* Create and initialize the TX/RX descriptors chains. */
 918	priv->rx_ring_size = dma_rx_num;
 919	priv->tx_ring_size = dma_tx_num;
 920	ret = alloc_init_skbufs(priv);
 921	if (ret) {
 922		netdev_err(dev, "DMA descriptors initialization failed\n");
 923		goto alloc_skbuf_error;
 924	}
 925
 926
 927	/* Register RX interrupt */
 928	ret = request_irq(priv->rx_irq, altera_isr, IRQF_SHARED,
 929			  dev->name, dev);
 930	if (ret) {
 931		netdev_err(dev, "Unable to register RX interrupt %d\n",
 932			   priv->rx_irq);
 933		goto init_error;
 934	}
 935
 936	/* Register TX interrupt */
 937	ret = request_irq(priv->tx_irq, altera_isr, IRQF_SHARED,
 938			  dev->name, dev);
 939	if (ret) {
 940		netdev_err(dev, "Unable to register TX interrupt %d\n",
 941			   priv->tx_irq);
 942		goto tx_request_irq_error;
 943	}
 944
 945	/* Enable DMA interrupts */
 946	spin_lock_irqsave(&priv->rxdma_irq_lock, flags);
 947	priv->dmaops->enable_rxirq(priv);
 948	priv->dmaops->enable_txirq(priv);
 949
 950	/* Setup RX descriptor chain */
 951	for (i = 0; i < priv->rx_ring_size; i++)
 952		priv->dmaops->add_rx_desc(priv, &priv->rx_ring[i]);
 953
 954	spin_unlock_irqrestore(&priv->rxdma_irq_lock, flags);
 955
 956	ret = phylink_of_phy_connect(priv->phylink, priv->device->of_node, 0);
 957	if (ret) {
 958		netdev_err(dev, "could not connect phylink (%d)\n", ret);
 959		goto tx_request_irq_error;
 960	}
 961	phylink_start(priv->phylink);
 962
 963	napi_enable(&priv->napi);
 964	netif_start_queue(dev);
 965
 966	priv->dmaops->start_rxdma(priv);
 967
 968	/* Start MAC Rx/Tx */
 969	spin_lock(&priv->mac_cfg_lock);
 970	tse_set_mac(priv, true);
 971	spin_unlock(&priv->mac_cfg_lock);
 972
 973	return 0;
 974
 975tx_request_irq_error:
 976	free_irq(priv->rx_irq, dev);
 977init_error:
 978	free_skbufs(dev);
 979alloc_skbuf_error:
 980phy_error:
 981	return ret;
 982}
 983
 984/* Stop TSE MAC interface and put the device in an inactive state
 985 */
 986static int tse_shutdown(struct net_device *dev)
 987{
 988	struct altera_tse_private *priv = netdev_priv(dev);
 989	unsigned long int flags;
 990	int ret;
 
 
 
 
 
 991
 992	phylink_stop(priv->phylink);
 993	phylink_disconnect_phy(priv->phylink);
 994	netif_stop_queue(dev);
 995	napi_disable(&priv->napi);
 996
 997	/* Disable DMA interrupts */
 998	spin_lock_irqsave(&priv->rxdma_irq_lock, flags);
 999	priv->dmaops->disable_rxirq(priv);
1000	priv->dmaops->disable_txirq(priv);
1001	spin_unlock_irqrestore(&priv->rxdma_irq_lock, flags);
1002
1003	/* Free the IRQ lines */
1004	free_irq(priv->rx_irq, dev);
1005	free_irq(priv->tx_irq, dev);
1006
1007	/* disable and reset the MAC, empties fifo */
1008	spin_lock(&priv->mac_cfg_lock);
1009	spin_lock(&priv->tx_lock);
1010
1011	ret = reset_mac(priv);
1012	/* Note that reset_mac will fail if the clocks are gated by the PHY
1013	 * due to the PHY being put into isolation or power down mode.
1014	 * This is not an error if reset fails due to no clock.
1015	 */
1016	if (ret)
1017		netdev_dbg(dev, "Cannot reset MAC core (error: %d)\n", ret);
1018	priv->dmaops->reset_dma(priv);
1019	free_skbufs(dev);
1020
1021	spin_unlock(&priv->tx_lock);
1022	spin_unlock(&priv->mac_cfg_lock);
1023
1024	priv->dmaops->uninit_dma(priv);
1025
1026	return 0;
1027}
1028
1029static struct net_device_ops altera_tse_netdev_ops = {
1030	.ndo_open		= tse_open,
1031	.ndo_stop		= tse_shutdown,
1032	.ndo_start_xmit		= tse_start_xmit,
1033	.ndo_set_mac_address	= eth_mac_addr,
1034	.ndo_set_rx_mode	= tse_set_rx_mode,
1035	.ndo_change_mtu		= tse_change_mtu,
1036	.ndo_validate_addr	= eth_validate_addr,
1037};
1038
1039static void alt_tse_mac_config(struct phylink_config *config, unsigned int mode,
1040			       const struct phylink_link_state *state)
1041{
1042	struct net_device *ndev = to_net_dev(config->dev);
1043	struct altera_tse_private *priv = netdev_priv(ndev);
1044
1045	spin_lock(&priv->mac_cfg_lock);
1046	reset_mac(priv);
1047	tse_set_mac(priv, true);
1048	spin_unlock(&priv->mac_cfg_lock);
1049}
1050
1051static void alt_tse_mac_link_down(struct phylink_config *config,
1052				  unsigned int mode, phy_interface_t interface)
1053{
1054}
1055
1056static void alt_tse_mac_link_up(struct phylink_config *config,
1057				struct phy_device *phy, unsigned int mode,
1058				phy_interface_t interface, int speed,
1059				int duplex, bool tx_pause, bool rx_pause)
1060{
1061	struct net_device *ndev = to_net_dev(config->dev);
1062	struct altera_tse_private *priv = netdev_priv(ndev);
1063	u32 ctrl;
1064
1065	ctrl = csrrd32(priv->mac_dev, tse_csroffs(command_config));
1066	ctrl &= ~(MAC_CMDCFG_ENA_10 | MAC_CMDCFG_ETH_SPEED | MAC_CMDCFG_HD_ENA);
1067
1068	if (duplex == DUPLEX_HALF)
1069		ctrl |= MAC_CMDCFG_HD_ENA;
1070
1071	if (speed == SPEED_1000)
1072		ctrl |= MAC_CMDCFG_ETH_SPEED;
1073	else if (speed == SPEED_10)
1074		ctrl |= MAC_CMDCFG_ENA_10;
1075
1076	spin_lock(&priv->mac_cfg_lock);
1077	csrwr32(ctrl, priv->mac_dev, tse_csroffs(command_config));
1078	spin_unlock(&priv->mac_cfg_lock);
1079}
1080
1081static struct phylink_pcs *alt_tse_select_pcs(struct phylink_config *config,
1082					      phy_interface_t interface)
1083{
1084	struct net_device *ndev = to_net_dev(config->dev);
1085	struct altera_tse_private *priv = netdev_priv(ndev);
1086
1087	if (interface == PHY_INTERFACE_MODE_SGMII ||
1088	    interface == PHY_INTERFACE_MODE_1000BASEX)
1089		return priv->pcs;
1090	else
1091		return NULL;
1092}
1093
1094static const struct phylink_mac_ops alt_tse_phylink_ops = {
1095	.mac_config = alt_tse_mac_config,
1096	.mac_link_down = alt_tse_mac_link_down,
1097	.mac_link_up = alt_tse_mac_link_up,
1098	.mac_select_pcs = alt_tse_select_pcs,
1099};
1100
1101static int request_and_map(struct platform_device *pdev, const char *name,
1102			   struct resource **res, void __iomem **ptr)
1103{
1104	struct device *device = &pdev->dev;
1105	struct resource *region;
 
1106
1107	*res = platform_get_resource_byname(pdev, IORESOURCE_MEM, name);
1108	if (*res == NULL) {
1109		dev_err(device, "resource %s not defined\n", name);
1110		return -ENODEV;
1111	}
1112
1113	region = devm_request_mem_region(device, (*res)->start,
1114					 resource_size(*res), dev_name(device));
1115	if (region == NULL) {
1116		dev_err(device, "unable to request %s\n", name);
1117		return -EBUSY;
1118	}
1119
1120	*ptr = devm_ioremap(device, region->start,
1121				    resource_size(region));
1122	if (*ptr == NULL) {
1123		dev_err(device, "ioremap of %s failed!", name);
1124		return -ENOMEM;
1125	}
1126
1127	return 0;
1128}
1129
1130/* Probe Altera TSE MAC device
1131 */
1132static int altera_tse_probe(struct platform_device *pdev)
1133{
1134	struct regmap_config pcs_regmap_cfg;
1135	struct altera_tse_private *priv;
1136	struct mdio_regmap_config mrc;
1137	struct resource *control_port;
1138	struct regmap *pcs_regmap;
1139	struct resource *dma_res;
1140	struct resource *pcs_res;
1141	struct mii_bus *pcs_bus;
1142	struct net_device *ndev;
1143	void __iomem *descmap;
1144	int ret = -ENODEV;
1145
1146	ndev = alloc_etherdev(sizeof(struct altera_tse_private));
1147	if (!ndev) {
1148		dev_err(&pdev->dev, "Could not allocate network device\n");
1149		return -ENODEV;
1150	}
1151
1152	SET_NETDEV_DEV(ndev, &pdev->dev);
1153
1154	priv = netdev_priv(ndev);
1155	priv->device = &pdev->dev;
1156	priv->dev = ndev;
1157	priv->msg_enable = netif_msg_init(debug, default_msg_level);
1158
1159	priv->dmaops = device_get_match_data(&pdev->dev);
 
 
 
 
1160
1161	if (priv->dmaops &&
1162	    priv->dmaops->altera_dtype == ALTERA_DTYPE_SGDMA) {
1163		/* Get the mapped address to the SGDMA descriptor memory */
1164		ret = request_and_map(pdev, "s1", &dma_res, &descmap);
1165		if (ret)
1166			goto err_free_netdev;
1167
1168		/* Start of that memory is for transmit descriptors */
1169		priv->tx_dma_desc = descmap;
1170
1171		/* First half is for tx descriptors, other half for tx */
1172		priv->txdescmem = resource_size(dma_res)/2;
1173
1174		priv->txdescmem_busaddr = (dma_addr_t)dma_res->start;
1175
1176		priv->rx_dma_desc = (void __iomem *)((uintptr_t)(descmap +
1177						     priv->txdescmem));
1178		priv->rxdescmem = resource_size(dma_res)/2;
1179		priv->rxdescmem_busaddr = dma_res->start;
1180		priv->rxdescmem_busaddr += priv->txdescmem;
1181
1182		if (upper_32_bits(priv->rxdescmem_busaddr)) {
1183			dev_dbg(priv->device,
1184				"SGDMA bus addresses greater than 32-bits\n");
1185			ret = -EINVAL;
1186			goto err_free_netdev;
1187		}
1188		if (upper_32_bits(priv->txdescmem_busaddr)) {
1189			dev_dbg(priv->device,
1190				"SGDMA bus addresses greater than 32-bits\n");
1191			ret = -EINVAL;
1192			goto err_free_netdev;
1193		}
1194	} else if (priv->dmaops &&
1195		   priv->dmaops->altera_dtype == ALTERA_DTYPE_MSGDMA) {
1196		ret = request_and_map(pdev, "rx_resp", &dma_res,
1197				      &priv->rx_dma_resp);
1198		if (ret)
1199			goto err_free_netdev;
1200
1201		ret = request_and_map(pdev, "tx_desc", &dma_res,
1202				      &priv->tx_dma_desc);
1203		if (ret)
1204			goto err_free_netdev;
1205
1206		priv->txdescmem = resource_size(dma_res);
1207		priv->txdescmem_busaddr = dma_res->start;
1208
1209		ret = request_and_map(pdev, "rx_desc", &dma_res,
1210				      &priv->rx_dma_desc);
1211		if (ret)
1212			goto err_free_netdev;
1213
1214		priv->rxdescmem = resource_size(dma_res);
1215		priv->rxdescmem_busaddr = dma_res->start;
1216
1217	} else {
1218		ret = -ENODEV;
1219		goto err_free_netdev;
1220	}
1221
1222	if (!dma_set_mask(priv->device, DMA_BIT_MASK(priv->dmaops->dmamask))) {
1223		dma_set_coherent_mask(priv->device,
1224				      DMA_BIT_MASK(priv->dmaops->dmamask));
1225	} else if (!dma_set_mask(priv->device, DMA_BIT_MASK(32))) {
1226		dma_set_coherent_mask(priv->device, DMA_BIT_MASK(32));
1227	} else {
1228		ret = -EIO;
1229		goto err_free_netdev;
1230	}
1231
1232	/* MAC address space */
1233	ret = request_and_map(pdev, "control_port", &control_port,
1234			      (void __iomem **)&priv->mac_dev);
1235	if (ret)
1236		goto err_free_netdev;
1237
1238	/* xSGDMA Rx Dispatcher address space */
1239	ret = request_and_map(pdev, "rx_csr", &dma_res,
1240			      &priv->rx_dma_csr);
1241	if (ret)
1242		goto err_free_netdev;
1243
1244
1245	/* xSGDMA Tx Dispatcher address space */
1246	ret = request_and_map(pdev, "tx_csr", &dma_res,
1247			      &priv->tx_dma_csr);
1248	if (ret)
1249		goto err_free_netdev;
1250
1251	memset(&pcs_regmap_cfg, 0, sizeof(pcs_regmap_cfg));
1252	memset(&mrc, 0, sizeof(mrc));
1253	/* SGMII PCS address space. The location can vary depending on how the
1254	 * IP is integrated. We can have a resource dedicated to it at a specific
1255	 * address space, but if it's not the case, we fallback to the mdiophy0
1256	 * from the MAC's address space
1257	 */
1258	ret = request_and_map(pdev, "pcs", &pcs_res, &priv->pcs_base);
1259	if (ret) {
1260		/* If we can't find a dedicated resource for the PCS, fallback
1261		 * to the internal PCS, that has a different address stride
1262		 */
1263		priv->pcs_base = priv->mac_dev + tse_csroffs(mdio_phy0);
1264		pcs_regmap_cfg.reg_bits = 32;
1265		/* Values are MDIO-like values, on 16 bits */
1266		pcs_regmap_cfg.val_bits = 16;
1267		pcs_regmap_cfg.reg_shift = REGMAP_UPSHIFT(2);
1268	} else {
1269		pcs_regmap_cfg.reg_bits = 16;
1270		pcs_regmap_cfg.val_bits = 16;
1271		pcs_regmap_cfg.reg_shift = REGMAP_UPSHIFT(1);
1272	}
1273
1274	/* Create a regmap for the PCS so that it can be used by the PCS driver */
1275	pcs_regmap = devm_regmap_init_mmio(&pdev->dev, priv->pcs_base,
1276					   &pcs_regmap_cfg);
1277	if (IS_ERR(pcs_regmap)) {
1278		ret = PTR_ERR(pcs_regmap);
1279		goto err_free_netdev;
1280	}
1281	mrc.regmap = pcs_regmap;
1282	mrc.parent = &pdev->dev;
1283	mrc.valid_addr = 0x0;
1284	mrc.autoscan = false;
1285
1286	/* Rx IRQ */
1287	priv->rx_irq = platform_get_irq_byname(pdev, "rx_irq");
1288	if (priv->rx_irq == -ENXIO) {
1289		dev_err(&pdev->dev, "cannot obtain Rx IRQ\n");
1290		ret = -ENXIO;
1291		goto err_free_netdev;
1292	}
1293
1294	/* Tx IRQ */
1295	priv->tx_irq = platform_get_irq_byname(pdev, "tx_irq");
1296	if (priv->tx_irq == -ENXIO) {
1297		dev_err(&pdev->dev, "cannot obtain Tx IRQ\n");
1298		ret = -ENXIO;
1299		goto err_free_netdev;
1300	}
1301
1302	/* get FIFO depths from device tree */
1303	if (of_property_read_u32(pdev->dev.of_node, "rx-fifo-depth",
1304				 &priv->rx_fifo_depth)) {
1305		dev_err(&pdev->dev, "cannot obtain rx-fifo-depth\n");
1306		ret = -ENXIO;
1307		goto err_free_netdev;
1308	}
1309
1310	if (of_property_read_u32(pdev->dev.of_node, "tx-fifo-depth",
1311				 &priv->tx_fifo_depth)) {
1312		dev_err(&pdev->dev, "cannot obtain tx-fifo-depth\n");
1313		ret = -ENXIO;
1314		goto err_free_netdev;
1315	}
1316
1317	/* get hash filter settings for this instance */
1318	priv->hash_filter =
1319		of_property_read_bool(pdev->dev.of_node,
1320				      "altr,has-hash-multicast-filter");
1321
1322	/* Set hash filter to not set for now until the
1323	 * multicast filter receive issue is debugged
1324	 */
1325	priv->hash_filter = 0;
1326
1327	/* get supplemental address settings for this instance */
1328	priv->added_unicast =
1329		of_property_read_bool(pdev->dev.of_node,
1330				      "altr,has-supplementary-unicast");
1331
1332	priv->dev->min_mtu = ETH_ZLEN + ETH_FCS_LEN;
1333	/* Max MTU is 1500, ETH_DATA_LEN */
1334	priv->dev->max_mtu = ETH_DATA_LEN;
1335
1336	/* Get the max mtu from the device tree. Note that the
1337	 * "max-frame-size" parameter is actually max mtu. Definition
1338	 * in the ePAPR v1.1 spec and usage differ, so go with usage.
1339	 */
1340	of_property_read_u32(pdev->dev.of_node, "max-frame-size",
1341			     &priv->dev->max_mtu);
1342
1343	/* The DMA buffer size already accounts for an alignment bias
1344	 * to avoid unaligned access exceptions for the NIOS processor,
1345	 */
1346	priv->rx_dma_buf_sz = ALTERA_RXDMABUFFER_SIZE;
1347
1348	/* get default MAC address from device tree */
1349	ret = of_get_ethdev_address(pdev->dev.of_node, ndev);
1350	if (ret)
 
 
1351		eth_hw_addr_random(ndev);
1352
1353	/* get phy addr and create mdio */
1354	ret = altera_tse_phy_get_addr_mdio_create(ndev);
1355
1356	if (ret)
1357		goto err_free_netdev;
1358
1359	/* initialize netdev */
1360	ndev->mem_start = control_port->start;
1361	ndev->mem_end = control_port->end;
1362	ndev->netdev_ops = &altera_tse_netdev_ops;
1363	altera_tse_set_ethtool_ops(ndev);
1364
1365	altera_tse_netdev_ops.ndo_set_rx_mode = tse_set_rx_mode;
1366
1367	if (priv->hash_filter)
1368		altera_tse_netdev_ops.ndo_set_rx_mode =
1369			tse_set_rx_mode_hashfilter;
1370
1371	/* Scatter/gather IO is not supported,
1372	 * so it is turned off
1373	 */
1374	ndev->hw_features &= ~NETIF_F_SG;
1375	ndev->features |= ndev->hw_features | NETIF_F_HIGHDMA;
1376
1377	/* VLAN offloading of tagging, stripping and filtering is not
1378	 * supported by hardware, but driver will accommodate the
1379	 * extra 4-byte VLAN tag for processing by upper layers
1380	 */
1381	ndev->features |= NETIF_F_HW_VLAN_CTAG_RX;
1382
1383	/* setup NAPI interface */
1384	netif_napi_add(ndev, &priv->napi, tse_poll);
1385
1386	spin_lock_init(&priv->mac_cfg_lock);
1387	spin_lock_init(&priv->tx_lock);
1388	spin_lock_init(&priv->rxdma_irq_lock);
1389
1390	netif_carrier_off(ndev);
1391	ret = register_netdev(ndev);
1392	if (ret) {
1393		dev_err(&pdev->dev, "failed to register TSE net device\n");
1394		goto err_register_netdev;
1395	}
1396
1397	platform_set_drvdata(pdev, ndev);
1398
1399	priv->revision = ioread32(&priv->mac_dev->megacore_revision);
1400
1401	if (netif_msg_probe(priv))
1402		dev_info(&pdev->dev, "Altera TSE MAC version %d.%d at 0x%08lx irq %d/%d\n",
1403			 (priv->revision >> 8) & 0xff,
1404			 priv->revision & 0xff,
1405			 (unsigned long) control_port->start, priv->rx_irq,
1406			 priv->tx_irq);
1407
1408	snprintf(mrc.name, MII_BUS_ID_SIZE, "%s-pcs-mii", ndev->name);
1409	pcs_bus = devm_mdio_regmap_register(&pdev->dev, &mrc);
1410	if (IS_ERR(pcs_bus)) {
1411		ret = PTR_ERR(pcs_bus);
1412		goto err_init_pcs;
1413	}
1414
1415	priv->pcs = lynx_pcs_create_mdiodev(pcs_bus, 0);
1416	if (IS_ERR(priv->pcs)) {
1417		ret = PTR_ERR(priv->pcs);
1418		goto err_init_pcs;
1419	}
1420
1421	priv->phylink_config.dev = &ndev->dev;
1422	priv->phylink_config.type = PHYLINK_NETDEV;
1423	priv->phylink_config.mac_capabilities = MAC_SYM_PAUSE | MAC_10 |
1424						MAC_100 | MAC_1000FD;
1425
1426	phy_interface_set_rgmii(priv->phylink_config.supported_interfaces);
1427	__set_bit(PHY_INTERFACE_MODE_MII,
1428		  priv->phylink_config.supported_interfaces);
1429	__set_bit(PHY_INTERFACE_MODE_GMII,
1430		  priv->phylink_config.supported_interfaces);
1431	__set_bit(PHY_INTERFACE_MODE_SGMII,
1432		  priv->phylink_config.supported_interfaces);
1433	__set_bit(PHY_INTERFACE_MODE_1000BASEX,
1434		  priv->phylink_config.supported_interfaces);
1435
1436	priv->phylink = phylink_create(&priv->phylink_config,
1437				       of_fwnode_handle(priv->device->of_node),
1438				       priv->phy_iface, &alt_tse_phylink_ops);
1439	if (IS_ERR(priv->phylink)) {
1440		dev_err(&pdev->dev, "failed to create phylink\n");
1441		ret = PTR_ERR(priv->phylink);
1442		goto err_init_phylink;
1443	}
1444
1445	return 0;
1446err_init_phylink:
1447	lynx_pcs_destroy(priv->pcs);
1448err_init_pcs:
1449	unregister_netdev(ndev);
1450err_register_netdev:
1451	netif_napi_del(&priv->napi);
1452	altera_tse_mdio_destroy(ndev);
1453err_free_netdev:
1454	free_netdev(ndev);
1455	return ret;
1456}
1457
1458/* Remove Altera TSE MAC device
1459 */
1460static void altera_tse_remove(struct platform_device *pdev)
1461{
1462	struct net_device *ndev = platform_get_drvdata(pdev);
1463	struct altera_tse_private *priv = netdev_priv(ndev);
1464
 
 
 
1465	platform_set_drvdata(pdev, NULL);
1466	altera_tse_mdio_destroy(ndev);
1467	unregister_netdev(ndev);
1468	phylink_destroy(priv->phylink);
1469	lynx_pcs_destroy(priv->pcs);
1470
1471	free_netdev(ndev);
 
 
1472}
1473
1474static const struct altera_dmaops altera_dtype_sgdma = {
1475	.altera_dtype = ALTERA_DTYPE_SGDMA,
1476	.dmamask = 32,
1477	.reset_dma = sgdma_reset,
1478	.enable_txirq = sgdma_enable_txirq,
1479	.enable_rxirq = sgdma_enable_rxirq,
1480	.disable_txirq = sgdma_disable_txirq,
1481	.disable_rxirq = sgdma_disable_rxirq,
1482	.clear_txirq = sgdma_clear_txirq,
1483	.clear_rxirq = sgdma_clear_rxirq,
1484	.tx_buffer = sgdma_tx_buffer,
1485	.tx_completions = sgdma_tx_completions,
1486	.add_rx_desc = sgdma_add_rx_desc,
1487	.get_rx_status = sgdma_rx_status,
1488	.init_dma = sgdma_initialize,
1489	.uninit_dma = sgdma_uninitialize,
1490	.start_rxdma = sgdma_start_rxdma,
1491};
1492
1493static const struct altera_dmaops altera_dtype_msgdma = {
1494	.altera_dtype = ALTERA_DTYPE_MSGDMA,
1495	.dmamask = 64,
1496	.reset_dma = msgdma_reset,
1497	.enable_txirq = msgdma_enable_txirq,
1498	.enable_rxirq = msgdma_enable_rxirq,
1499	.disable_txirq = msgdma_disable_txirq,
1500	.disable_rxirq = msgdma_disable_rxirq,
1501	.clear_txirq = msgdma_clear_txirq,
1502	.clear_rxirq = msgdma_clear_rxirq,
1503	.tx_buffer = msgdma_tx_buffer,
1504	.tx_completions = msgdma_tx_completions,
1505	.add_rx_desc = msgdma_add_rx_desc,
1506	.get_rx_status = msgdma_rx_status,
1507	.init_dma = msgdma_initialize,
1508	.uninit_dma = msgdma_uninitialize,
1509	.start_rxdma = msgdma_start_rxdma,
1510};
1511
1512static const struct of_device_id altera_tse_ids[] = {
1513	{ .compatible = "altr,tse-msgdma-1.0", .data = &altera_dtype_msgdma, },
1514	{ .compatible = "altr,tse-1.0", .data = &altera_dtype_sgdma, },
1515	{ .compatible = "ALTR,tse-1.0", .data = &altera_dtype_sgdma, },
1516	{},
1517};
1518MODULE_DEVICE_TABLE(of, altera_tse_ids);
1519
1520static struct platform_driver altera_tse_driver = {
1521	.probe		= altera_tse_probe,
1522	.remove_new	= altera_tse_remove,
1523	.suspend	= NULL,
1524	.resume		= NULL,
1525	.driver		= {
1526		.name	= ALTERA_TSE_RESOURCE_NAME,
1527		.of_match_table = altera_tse_ids,
1528	},
1529};
1530
1531module_platform_driver(altera_tse_driver);
1532
1533MODULE_AUTHOR("Altera Corporation");
1534MODULE_DESCRIPTION("Altera Triple Speed Ethernet MAC driver");
1535MODULE_LICENSE("GPL v2");
v4.6
 
   1/* Altera Triple-Speed Ethernet MAC driver
   2 * Copyright (C) 2008-2014 Altera Corporation. All rights reserved
   3 *
   4 * Contributors:
   5 *   Dalon Westergreen
   6 *   Thomas Chou
   7 *   Ian Abbott
   8 *   Yuriy Kozlov
   9 *   Tobias Klauser
  10 *   Andriy Smolskyy
  11 *   Roman Bulgakov
  12 *   Dmytro Mytarchuk
  13 *   Matthew Gerlach
  14 *
  15 * Original driver contributed by SLS.
  16 * Major updates contributed by GlobalLogic
  17 *
  18 * This program is free software; you can redistribute it and/or modify it
  19 * under the terms and conditions of the GNU General Public License,
  20 * version 2, as published by the Free Software Foundation.
  21 *
  22 * This program is distributed in the hope it will be useful, but WITHOUT
  23 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  24 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
  25 * more details.
  26 *
  27 * You should have received a copy of the GNU General Public License along with
  28 * this program.  If not, see <http://www.gnu.org/licenses/>.
  29 */
  30
  31#include <linux/atomic.h>
  32#include <linux/delay.h>
  33#include <linux/etherdevice.h>
  34#include <linux/if_vlan.h>
  35#include <linux/init.h>
  36#include <linux/interrupt.h>
  37#include <linux/io.h>
  38#include <linux/kernel.h>
  39#include <linux/module.h>
 
 
  40#include <linux/netdevice.h>
  41#include <linux/of_device.h>
  42#include <linux/of_mdio.h>
  43#include <linux/of_net.h>
  44#include <linux/of_platform.h>
  45#include <linux/phy.h>
  46#include <linux/platform_device.h>
 
 
  47#include <linux/skbuff.h>
  48#include <asm/cacheflush.h>
  49
  50#include "altera_utils.h"
  51#include "altera_tse.h"
  52#include "altera_sgdma.h"
  53#include "altera_msgdma.h"
  54
  55static atomic_t instance_count = ATOMIC_INIT(~0);
  56/* Module parameters */
  57static int debug = -1;
  58module_param(debug, int, S_IRUGO | S_IWUSR);
  59MODULE_PARM_DESC(debug, "Message Level (-1: default, 0: no output, 16: all)");
  60
  61static const u32 default_msg_level = (NETIF_MSG_DRV | NETIF_MSG_PROBE |
  62					NETIF_MSG_LINK | NETIF_MSG_IFUP |
  63					NETIF_MSG_IFDOWN);
  64
  65#define RX_DESCRIPTORS 64
  66static int dma_rx_num = RX_DESCRIPTORS;
  67module_param(dma_rx_num, int, S_IRUGO | S_IWUSR);
  68MODULE_PARM_DESC(dma_rx_num, "Number of descriptors in the RX list");
  69
  70#define TX_DESCRIPTORS 64
  71static int dma_tx_num = TX_DESCRIPTORS;
  72module_param(dma_tx_num, int, S_IRUGO | S_IWUSR);
  73MODULE_PARM_DESC(dma_tx_num, "Number of descriptors in the TX list");
  74
  75
  76#define POLL_PHY (-1)
  77
  78/* Make sure DMA buffer size is larger than the max frame size
  79 * plus some alignment offset and a VLAN header. If the max frame size is
  80 * 1518, a VLAN header would be additional 4 bytes and additional
  81 * headroom for alignment is 2 bytes, 2048 is just fine.
  82 */
  83#define ALTERA_RXDMABUFFER_SIZE	2048
  84
  85/* Allow network stack to resume queueing packets after we've
  86 * finished transmitting at least 1/4 of the packets in the queue.
  87 */
  88#define TSE_TX_THRESH(x)	(x->tx_ring_size / 4)
  89
  90#define TXQUEUESTOP_THRESHHOLD	2
  91
  92static const struct of_device_id altera_tse_ids[];
  93
  94static inline u32 tse_tx_avail(struct altera_tse_private *priv)
  95{
  96	return priv->tx_cons + priv->tx_ring_size - priv->tx_prod - 1;
  97}
  98
  99/* MDIO specific functions
 100 */
 101static int altera_tse_mdio_read(struct mii_bus *bus, int mii_id, int regnum)
 102{
 103	struct net_device *ndev = bus->priv;
 104	struct altera_tse_private *priv = netdev_priv(ndev);
 105
 106	/* set MDIO address */
 107	csrwr32((mii_id & 0x1f), priv->mac_dev,
 108		tse_csroffs(mdio_phy1_addr));
 109
 110	/* get the data */
 111	return csrrd32(priv->mac_dev,
 112		       tse_csroffs(mdio_phy1) + regnum * 4) & 0xffff;
 113}
 114
 115static int altera_tse_mdio_write(struct mii_bus *bus, int mii_id, int regnum,
 116				 u16 value)
 117{
 118	struct net_device *ndev = bus->priv;
 119	struct altera_tse_private *priv = netdev_priv(ndev);
 120
 121	/* set MDIO address */
 122	csrwr32((mii_id & 0x1f), priv->mac_dev,
 123		tse_csroffs(mdio_phy1_addr));
 124
 125	/* write the data */
 126	csrwr32(value, priv->mac_dev, tse_csroffs(mdio_phy1) + regnum * 4);
 127	return 0;
 128}
 129
 130static int altera_tse_mdio_create(struct net_device *dev, unsigned int id)
 131{
 132	struct altera_tse_private *priv = netdev_priv(dev);
 133	int ret;
 134	struct device_node *mdio_node = NULL;
 
 135	struct mii_bus *mdio = NULL;
 136	struct device_node *child_node = NULL;
 137
 138	for_each_child_of_node(priv->device->of_node, child_node) {
 139		if (of_device_is_compatible(child_node, "altr,tse-mdio")) {
 140			mdio_node = child_node;
 141			break;
 142		}
 143	}
 144
 145	if (mdio_node) {
 146		netdev_dbg(dev, "FOUND MDIO subnode\n");
 147	} else {
 148		netdev_dbg(dev, "NO MDIO subnode\n");
 149		return 0;
 150	}
 151
 152	mdio = mdiobus_alloc();
 153	if (mdio == NULL) {
 154		netdev_err(dev, "Error allocating MDIO bus\n");
 155		return -ENOMEM;
 
 156	}
 157
 158	mdio->name = ALTERA_TSE_RESOURCE_NAME;
 159	mdio->read = &altera_tse_mdio_read;
 160	mdio->write = &altera_tse_mdio_write;
 161	snprintf(mdio->id, MII_BUS_ID_SIZE, "%s-%u", mdio->name, id);
 162
 163	mdio->priv = dev;
 164	mdio->parent = priv->device;
 165
 166	ret = of_mdiobus_register(mdio, mdio_node);
 167	if (ret != 0) {
 168		netdev_err(dev, "Cannot register MDIO bus %s\n",
 169			   mdio->id);
 170		goto out_free_mdio;
 171	}
 
 172
 173	if (netif_msg_drv(priv))
 174		netdev_info(dev, "MDIO bus %s: created\n", mdio->id);
 175
 176	priv->mdio = mdio;
 177	return 0;
 178out_free_mdio:
 179	mdiobus_free(mdio);
 180	mdio = NULL;
 
 
 181	return ret;
 182}
 183
 184static void altera_tse_mdio_destroy(struct net_device *dev)
 185{
 186	struct altera_tse_private *priv = netdev_priv(dev);
 187
 188	if (priv->mdio == NULL)
 189		return;
 190
 191	if (netif_msg_drv(priv))
 192		netdev_info(dev, "MDIO bus %s: removed\n",
 193			    priv->mdio->id);
 194
 195	mdiobus_unregister(priv->mdio);
 196	mdiobus_free(priv->mdio);
 197	priv->mdio = NULL;
 198}
 199
 200static int tse_init_rx_buffer(struct altera_tse_private *priv,
 201			      struct tse_buffer *rxbuffer, int len)
 202{
 203	rxbuffer->skb = netdev_alloc_skb_ip_align(priv->dev, len);
 204	if (!rxbuffer->skb)
 205		return -ENOMEM;
 206
 207	rxbuffer->dma_addr = dma_map_single(priv->device, rxbuffer->skb->data,
 208						len,
 209						DMA_FROM_DEVICE);
 210
 211	if (dma_mapping_error(priv->device, rxbuffer->dma_addr)) {
 212		netdev_err(priv->dev, "%s: DMA mapping error\n", __func__);
 213		dev_kfree_skb_any(rxbuffer->skb);
 214		return -EINVAL;
 215	}
 216	rxbuffer->dma_addr &= (dma_addr_t)~3;
 217	rxbuffer->len = len;
 218	return 0;
 219}
 220
 221static void tse_free_rx_buffer(struct altera_tse_private *priv,
 222			       struct tse_buffer *rxbuffer)
 223{
 
 224	struct sk_buff *skb = rxbuffer->skb;
 225	dma_addr_t dma_addr = rxbuffer->dma_addr;
 226
 227	if (skb != NULL) {
 228		if (dma_addr)
 229			dma_unmap_single(priv->device, dma_addr,
 230					 rxbuffer->len,
 231					 DMA_FROM_DEVICE);
 232		dev_kfree_skb_any(skb);
 233		rxbuffer->skb = NULL;
 234		rxbuffer->dma_addr = 0;
 235	}
 236}
 237
 238/* Unmap and free Tx buffer resources
 239 */
 240static void tse_free_tx_buffer(struct altera_tse_private *priv,
 241			       struct tse_buffer *buffer)
 242{
 243	if (buffer->dma_addr) {
 244		if (buffer->mapped_as_page)
 245			dma_unmap_page(priv->device, buffer->dma_addr,
 246				       buffer->len, DMA_TO_DEVICE);
 247		else
 248			dma_unmap_single(priv->device, buffer->dma_addr,
 249					 buffer->len, DMA_TO_DEVICE);
 250		buffer->dma_addr = 0;
 251	}
 252	if (buffer->skb) {
 253		dev_kfree_skb_any(buffer->skb);
 254		buffer->skb = NULL;
 255	}
 256}
 257
 258static int alloc_init_skbufs(struct altera_tse_private *priv)
 259{
 260	unsigned int rx_descs = priv->rx_ring_size;
 261	unsigned int tx_descs = priv->tx_ring_size;
 262	int ret = -ENOMEM;
 263	int i;
 264
 265	/* Create Rx ring buffer */
 266	priv->rx_ring = kcalloc(rx_descs, sizeof(struct tse_buffer),
 267				GFP_KERNEL);
 268	if (!priv->rx_ring)
 269		goto err_rx_ring;
 270
 271	/* Create Tx ring buffer */
 272	priv->tx_ring = kcalloc(tx_descs, sizeof(struct tse_buffer),
 273				GFP_KERNEL);
 274	if (!priv->tx_ring)
 275		goto err_tx_ring;
 276
 277	priv->tx_cons = 0;
 278	priv->tx_prod = 0;
 279
 280	/* Init Rx ring */
 281	for (i = 0; i < rx_descs; i++) {
 282		ret = tse_init_rx_buffer(priv, &priv->rx_ring[i],
 283					 priv->rx_dma_buf_sz);
 284		if (ret)
 285			goto err_init_rx_buffers;
 286	}
 287
 288	priv->rx_cons = 0;
 289	priv->rx_prod = 0;
 290
 291	return 0;
 292err_init_rx_buffers:
 293	while (--i >= 0)
 294		tse_free_rx_buffer(priv, &priv->rx_ring[i]);
 295	kfree(priv->tx_ring);
 296err_tx_ring:
 297	kfree(priv->rx_ring);
 298err_rx_ring:
 299	return ret;
 300}
 301
 302static void free_skbufs(struct net_device *dev)
 303{
 304	struct altera_tse_private *priv = netdev_priv(dev);
 305	unsigned int rx_descs = priv->rx_ring_size;
 306	unsigned int tx_descs = priv->tx_ring_size;
 307	int i;
 308
 309	/* Release the DMA TX/RX socket buffers */
 310	for (i = 0; i < rx_descs; i++)
 311		tse_free_rx_buffer(priv, &priv->rx_ring[i]);
 312	for (i = 0; i < tx_descs; i++)
 313		tse_free_tx_buffer(priv, &priv->tx_ring[i]);
 314
 315
 316	kfree(priv->tx_ring);
 317}
 318
 319/* Reallocate the skb for the reception process
 320 */
 321static inline void tse_rx_refill(struct altera_tse_private *priv)
 322{
 323	unsigned int rxsize = priv->rx_ring_size;
 324	unsigned int entry;
 325	int ret;
 326
 327	for (; priv->rx_cons - priv->rx_prod > 0;
 328			priv->rx_prod++) {
 329		entry = priv->rx_prod % rxsize;
 330		if (likely(priv->rx_ring[entry].skb == NULL)) {
 331			ret = tse_init_rx_buffer(priv, &priv->rx_ring[entry],
 332				priv->rx_dma_buf_sz);
 333			if (unlikely(ret != 0))
 334				break;
 335			priv->dmaops->add_rx_desc(priv, &priv->rx_ring[entry]);
 336		}
 337	}
 338}
 339
 340/* Pull out the VLAN tag and fix up the packet
 341 */
 342static inline void tse_rx_vlan(struct net_device *dev, struct sk_buff *skb)
 343{
 344	struct ethhdr *eth_hdr;
 345	u16 vid;
 
 346	if ((dev->features & NETIF_F_HW_VLAN_CTAG_RX) &&
 347	    !__vlan_get_tag(skb, &vid)) {
 348		eth_hdr = (struct ethhdr *)skb->data;
 349		memmove(skb->data + VLAN_HLEN, eth_hdr, ETH_ALEN * 2);
 350		skb_pull(skb, VLAN_HLEN);
 351		__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid);
 352	}
 353}
 354
 355/* Receive a packet: retrieve and pass over to upper levels
 356 */
 357static int tse_rx(struct altera_tse_private *priv, int limit)
 358{
 
 
 359	unsigned int count = 0;
 360	unsigned int next_entry;
 361	struct sk_buff *skb;
 362	unsigned int entry = priv->rx_cons % priv->rx_ring_size;
 363	u32 rxstatus;
 364	u16 pktlength;
 365	u16 pktstatus;
 366
 367	/* Check for count < limit first as get_rx_status is changing
 368	* the response-fifo so we must process the next packet
 369	* after calling get_rx_status if a response is pending.
 370	* (reading the last byte of the response pops the value from the fifo.)
 371	*/
 372	while ((count < limit) &&
 373	       ((rxstatus = priv->dmaops->get_rx_status(priv)) != 0)) {
 374		pktstatus = rxstatus >> 16;
 375		pktlength = rxstatus & 0xffff;
 376
 377		if ((pktstatus & 0xFF) || (pktlength == 0))
 378			netdev_err(priv->dev,
 379				   "RCV pktstatus %08X pktlength %08X\n",
 380				   pktstatus, pktlength);
 381
 382		/* DMA trasfer from TSE starts with 2 aditional bytes for
 383		 * IP payload alignment. Status returned by get_rx_status()
 384		 * contains DMA transfer length. Packet is 2 bytes shorter.
 385		 */
 386		pktlength -= 2;
 387
 388		count++;
 389		next_entry = (++priv->rx_cons) % priv->rx_ring_size;
 390
 391		skb = priv->rx_ring[entry].skb;
 392		if (unlikely(!skb)) {
 393			netdev_err(priv->dev,
 394				   "%s: Inconsistent Rx descriptor chain\n",
 395				   __func__);
 396			priv->dev->stats.rx_dropped++;
 397			break;
 398		}
 399		priv->rx_ring[entry].skb = NULL;
 400
 401		skb_put(skb, pktlength);
 402
 403		/* make cache consistent with receive packet buffer */
 404		dma_sync_single_for_cpu(priv->device,
 405					priv->rx_ring[entry].dma_addr,
 406					priv->rx_ring[entry].len,
 407					DMA_FROM_DEVICE);
 408
 409		dma_unmap_single(priv->device, priv->rx_ring[entry].dma_addr,
 410				 priv->rx_ring[entry].len, DMA_FROM_DEVICE);
 411
 412		if (netif_msg_pktdata(priv)) {
 413			netdev_info(priv->dev, "frame received %d bytes\n",
 414				    pktlength);
 415			print_hex_dump(KERN_ERR, "data: ", DUMP_PREFIX_OFFSET,
 416				       16, 1, skb->data, pktlength, true);
 417		}
 418
 419		tse_rx_vlan(priv->dev, skb);
 420
 421		skb->protocol = eth_type_trans(skb, priv->dev);
 422		skb_checksum_none_assert(skb);
 423
 424		napi_gro_receive(&priv->napi, skb);
 425
 426		priv->dev->stats.rx_packets++;
 427		priv->dev->stats.rx_bytes += pktlength;
 428
 429		entry = next_entry;
 430
 431		tse_rx_refill(priv);
 432	}
 433
 434	return count;
 435}
 436
 437/* Reclaim resources after transmission completes
 438 */
 439static int tse_tx_complete(struct altera_tse_private *priv)
 440{
 441	unsigned int txsize = priv->tx_ring_size;
 442	u32 ready;
 443	unsigned int entry;
 444	struct tse_buffer *tx_buff;
 445	int txcomplete = 0;
 
 446
 447	spin_lock(&priv->tx_lock);
 448
 449	ready = priv->dmaops->tx_completions(priv);
 450
 451	/* Free sent buffers */
 452	while (ready && (priv->tx_cons != priv->tx_prod)) {
 453		entry = priv->tx_cons % txsize;
 454		tx_buff = &priv->tx_ring[entry];
 455
 456		if (netif_msg_tx_done(priv))
 457			netdev_dbg(priv->dev, "%s: curr %d, dirty %d\n",
 458				   __func__, priv->tx_prod, priv->tx_cons);
 459
 460		if (likely(tx_buff->skb))
 461			priv->dev->stats.tx_packets++;
 462
 463		tse_free_tx_buffer(priv, tx_buff);
 464		priv->tx_cons++;
 465
 466		txcomplete++;
 467		ready--;
 468	}
 469
 470	if (unlikely(netif_queue_stopped(priv->dev) &&
 471		     tse_tx_avail(priv) > TSE_TX_THRESH(priv))) {
 472		netif_tx_lock(priv->dev);
 473		if (netif_queue_stopped(priv->dev) &&
 474		    tse_tx_avail(priv) > TSE_TX_THRESH(priv)) {
 475			if (netif_msg_tx_done(priv))
 476				netdev_dbg(priv->dev, "%s: restart transmit\n",
 477					   __func__);
 478			netif_wake_queue(priv->dev);
 479		}
 480		netif_tx_unlock(priv->dev);
 481	}
 482
 483	spin_unlock(&priv->tx_lock);
 484	return txcomplete;
 485}
 486
 487/* NAPI polling function
 488 */
 489static int tse_poll(struct napi_struct *napi, int budget)
 490{
 491	struct altera_tse_private *priv =
 492			container_of(napi, struct altera_tse_private, napi);
 
 493	int rxcomplete = 0;
 494	unsigned long int flags;
 495
 496	tse_tx_complete(priv);
 497
 498	rxcomplete = tse_rx(priv, budget);
 499
 500	if (rxcomplete < budget) {
 501
 502		napi_complete(napi);
 503
 504		netdev_dbg(priv->dev,
 505			   "NAPI Complete, did %d packets with budget %d\n",
 506			   rxcomplete, budget);
 507
 508		spin_lock_irqsave(&priv->rxdma_irq_lock, flags);
 509		priv->dmaops->enable_rxirq(priv);
 510		priv->dmaops->enable_txirq(priv);
 511		spin_unlock_irqrestore(&priv->rxdma_irq_lock, flags);
 512	}
 513	return rxcomplete;
 514}
 515
 516/* DMA TX & RX FIFO interrupt routing
 517 */
 518static irqreturn_t altera_isr(int irq, void *dev_id)
 519{
 520	struct net_device *dev = dev_id;
 521	struct altera_tse_private *priv;
 522
 523	if (unlikely(!dev)) {
 524		pr_err("%s: invalid dev pointer\n", __func__);
 525		return IRQ_NONE;
 526	}
 527	priv = netdev_priv(dev);
 528
 529	spin_lock(&priv->rxdma_irq_lock);
 530	/* reset IRQs */
 531	priv->dmaops->clear_rxirq(priv);
 532	priv->dmaops->clear_txirq(priv);
 533	spin_unlock(&priv->rxdma_irq_lock);
 534
 535	if (likely(napi_schedule_prep(&priv->napi))) {
 536		spin_lock(&priv->rxdma_irq_lock);
 537		priv->dmaops->disable_rxirq(priv);
 538		priv->dmaops->disable_txirq(priv);
 539		spin_unlock(&priv->rxdma_irq_lock);
 540		__napi_schedule(&priv->napi);
 541	}
 542
 543
 544	return IRQ_HANDLED;
 545}
 546
 547/* Transmit a packet (called by the kernel). Dispatches
 548 * either the SGDMA method for transmitting or the
 549 * MSGDMA method, assumes no scatter/gather support,
 550 * implying an assumption that there's only one
 551 * physically contiguous fragment starting at
 552 * skb->data, for length of skb_headlen(skb).
 553 */
 554static int tse_start_xmit(struct sk_buff *skb, struct net_device *dev)
 555{
 556	struct altera_tse_private *priv = netdev_priv(dev);
 
 557	unsigned int txsize = priv->tx_ring_size;
 558	unsigned int entry;
 559	struct tse_buffer *buffer = NULL;
 560	int nfrags = skb_shinfo(skb)->nr_frags;
 561	unsigned int nopaged_len = skb_headlen(skb);
 562	enum netdev_tx ret = NETDEV_TX_OK;
 563	dma_addr_t dma_addr;
 
 564
 565	spin_lock_bh(&priv->tx_lock);
 566
 567	if (unlikely(tse_tx_avail(priv) < nfrags + 1)) {
 568		if (!netif_queue_stopped(dev)) {
 569			netif_stop_queue(dev);
 570			/* This is a hard error, log it. */
 571			netdev_err(priv->dev,
 572				   "%s: Tx list full when queue awake\n",
 573				   __func__);
 574		}
 575		ret = NETDEV_TX_BUSY;
 576		goto out;
 577	}
 578
 579	/* Map the first skb fragment */
 580	entry = priv->tx_prod % txsize;
 581	buffer = &priv->tx_ring[entry];
 582
 583	dma_addr = dma_map_single(priv->device, skb->data, nopaged_len,
 584				  DMA_TO_DEVICE);
 585	if (dma_mapping_error(priv->device, dma_addr)) {
 586		netdev_err(priv->dev, "%s: DMA mapping error\n", __func__);
 587		ret = NETDEV_TX_OK;
 588		goto out;
 589	}
 590
 591	buffer->skb = skb;
 592	buffer->dma_addr = dma_addr;
 593	buffer->len = nopaged_len;
 594
 595	/* Push data out of the cache hierarchy into main memory */
 596	dma_sync_single_for_device(priv->device, buffer->dma_addr,
 597				   buffer->len, DMA_TO_DEVICE);
 598
 599	priv->dmaops->tx_buffer(priv, buffer);
 600
 601	skb_tx_timestamp(skb);
 602
 603	priv->tx_prod++;
 604	dev->stats.tx_bytes += skb->len;
 605
 606	if (unlikely(tse_tx_avail(priv) <= TXQUEUESTOP_THRESHHOLD)) {
 607		if (netif_msg_hw(priv))
 608			netdev_dbg(priv->dev, "%s: stop transmitted packets\n",
 609				   __func__);
 610		netif_stop_queue(dev);
 611	}
 612
 613out:
 614	spin_unlock_bh(&priv->tx_lock);
 615
 616	return ret;
 617}
 618
 619/* Called every time the controller might need to be made
 620 * aware of new link state.  The PHY code conveys this
 621 * information through variables in the phydev structure, and this
 622 * function converts those variables into the appropriate
 623 * register values, and can bring down the device if needed.
 624 */
 625static void altera_tse_adjust_link(struct net_device *dev)
 626{
 627	struct altera_tse_private *priv = netdev_priv(dev);
 628	struct phy_device *phydev = priv->phydev;
 629	int new_state = 0;
 630
 631	/* only change config if there is a link */
 632	spin_lock(&priv->mac_cfg_lock);
 633	if (phydev->link) {
 634		/* Read old config */
 635		u32 cfg_reg = ioread32(&priv->mac_dev->command_config);
 636
 637		/* Check duplex */
 638		if (phydev->duplex != priv->oldduplex) {
 639			new_state = 1;
 640			if (!(phydev->duplex))
 641				cfg_reg |= MAC_CMDCFG_HD_ENA;
 642			else
 643				cfg_reg &= ~MAC_CMDCFG_HD_ENA;
 644
 645			netdev_dbg(priv->dev, "%s: Link duplex = 0x%x\n",
 646				   dev->name, phydev->duplex);
 647
 648			priv->oldduplex = phydev->duplex;
 649		}
 650
 651		/* Check speed */
 652		if (phydev->speed != priv->oldspeed) {
 653			new_state = 1;
 654			switch (phydev->speed) {
 655			case 1000:
 656				cfg_reg |= MAC_CMDCFG_ETH_SPEED;
 657				cfg_reg &= ~MAC_CMDCFG_ENA_10;
 658				break;
 659			case 100:
 660				cfg_reg &= ~MAC_CMDCFG_ETH_SPEED;
 661				cfg_reg &= ~MAC_CMDCFG_ENA_10;
 662				break;
 663			case 10:
 664				cfg_reg &= ~MAC_CMDCFG_ETH_SPEED;
 665				cfg_reg |= MAC_CMDCFG_ENA_10;
 666				break;
 667			default:
 668				if (netif_msg_link(priv))
 669					netdev_warn(dev, "Speed (%d) is not 10/100/1000!\n",
 670						    phydev->speed);
 671				break;
 672			}
 673			priv->oldspeed = phydev->speed;
 674		}
 675		iowrite32(cfg_reg, &priv->mac_dev->command_config);
 676
 677		if (!priv->oldlink) {
 678			new_state = 1;
 679			priv->oldlink = 1;
 680		}
 681	} else if (priv->oldlink) {
 682		new_state = 1;
 683		priv->oldlink = 0;
 684		priv->oldspeed = 0;
 685		priv->oldduplex = -1;
 686	}
 687
 688	if (new_state && netif_msg_link(priv))
 689		phy_print_status(phydev);
 690
 691	spin_unlock(&priv->mac_cfg_lock);
 692}
 693static struct phy_device *connect_local_phy(struct net_device *dev)
 694{
 695	struct altera_tse_private *priv = netdev_priv(dev);
 696	struct phy_device *phydev = NULL;
 697	char phy_id_fmt[MII_BUS_ID_SIZE + 3];
 698
 699	if (priv->phy_addr != POLL_PHY) {
 700		snprintf(phy_id_fmt, MII_BUS_ID_SIZE + 3, PHY_ID_FMT,
 701			 priv->mdio->id, priv->phy_addr);
 702
 703		netdev_dbg(dev, "trying to attach to %s\n", phy_id_fmt);
 704
 705		phydev = phy_connect(dev, phy_id_fmt, &altera_tse_adjust_link,
 706				     priv->phy_iface);
 707		if (IS_ERR(phydev))
 708			netdev_err(dev, "Could not attach to PHY\n");
 709
 710	} else {
 711		int ret;
 712		phydev = phy_find_first(priv->mdio);
 713		if (phydev == NULL) {
 714			netdev_err(dev, "No PHY found\n");
 715			return phydev;
 716		}
 717
 718		ret = phy_connect_direct(dev, phydev, &altera_tse_adjust_link,
 719				priv->phy_iface);
 720		if (ret != 0) {
 721			netdev_err(dev, "Could not attach to PHY\n");
 722			phydev = NULL;
 723		}
 724	}
 725	return phydev;
 726}
 727
 728static int altera_tse_phy_get_addr_mdio_create(struct net_device *dev)
 729{
 730	struct altera_tse_private *priv = netdev_priv(dev);
 731	struct device_node *np = priv->device->of_node;
 732	int ret = 0;
 733
 734	priv->phy_iface = of_get_phy_mode(np);
 735
 736	/* Avoid get phy addr and create mdio if no phy is present */
 737	if (!priv->phy_iface)
 738		return 0;
 739
 740	/* try to get PHY address from device tree, use PHY autodetection if
 741	 * no valid address is given
 742	 */
 743
 744	if (of_property_read_u32(priv->device->of_node, "phy-addr",
 745			 &priv->phy_addr)) {
 746		priv->phy_addr = POLL_PHY;
 747	}
 748
 749	if (!((priv->phy_addr == POLL_PHY) ||
 750		  ((priv->phy_addr >= 0) && (priv->phy_addr < PHY_MAX_ADDR)))) {
 751		netdev_err(dev, "invalid phy-addr specified %d\n",
 752			priv->phy_addr);
 753		return -ENODEV;
 754	}
 755
 756	/* Create/attach to MDIO bus */
 757	ret = altera_tse_mdio_create(dev,
 758					 atomic_add_return(1, &instance_count));
 759
 760	if (ret)
 761		return -ENODEV;
 762
 763	return 0;
 764}
 765
 766/* Initialize driver's PHY state, and attach to the PHY
 767 */
 768static int init_phy(struct net_device *dev)
 769{
 770	struct altera_tse_private *priv = netdev_priv(dev);
 771	struct phy_device *phydev;
 772	struct device_node *phynode;
 773	bool fixed_link = false;
 774	int rc = 0;
 775
 776	/* Avoid init phy in case of no phy present */
 777	if (!priv->phy_iface)
 778		return 0;
 779
 780	priv->oldlink = 0;
 781	priv->oldspeed = 0;
 782	priv->oldduplex = -1;
 783
 784	phynode = of_parse_phandle(priv->device->of_node, "phy-handle", 0);
 785
 786	if (!phynode) {
 787		/* check if a fixed-link is defined in device-tree */
 788		if (of_phy_is_fixed_link(priv->device->of_node)) {
 789			rc = of_phy_register_fixed_link(priv->device->of_node);
 790			if (rc < 0) {
 791				netdev_err(dev, "cannot register fixed PHY\n");
 792				return rc;
 793			}
 794
 795			/* In the case of a fixed PHY, the DT node associated
 796			 * to the PHY is the Ethernet MAC DT node.
 797			 */
 798			phynode = of_node_get(priv->device->of_node);
 799			fixed_link = true;
 800
 801			netdev_dbg(dev, "fixed-link detected\n");
 802			phydev = of_phy_connect(dev, phynode,
 803						&altera_tse_adjust_link,
 804						0, priv->phy_iface);
 805		} else {
 806			netdev_dbg(dev, "no phy-handle found\n");
 807			if (!priv->mdio) {
 808				netdev_err(dev, "No phy-handle nor local mdio specified\n");
 809				return -ENODEV;
 810			}
 811			phydev = connect_local_phy(dev);
 812		}
 813	} else {
 814		netdev_dbg(dev, "phy-handle found\n");
 815		phydev = of_phy_connect(dev, phynode,
 816			&altera_tse_adjust_link, 0, priv->phy_iface);
 817	}
 818
 819	if (!phydev) {
 820		netdev_err(dev, "Could not find the PHY\n");
 821		return -ENODEV;
 822	}
 823
 824	/* Stop Advertising 1000BASE Capability if interface is not GMII
 825	 * Note: Checkpatch throws CHECKs for the camel case defines below,
 826	 * it's ok to ignore.
 827	 */
 828	if ((priv->phy_iface == PHY_INTERFACE_MODE_MII) ||
 829	    (priv->phy_iface == PHY_INTERFACE_MODE_RMII))
 830		phydev->advertising &= ~(SUPPORTED_1000baseT_Half |
 831					 SUPPORTED_1000baseT_Full);
 832
 833	/* Broken HW is sometimes missing the pull-up resistor on the
 834	 * MDIO line, which results in reads to non-existent devices returning
 835	 * 0 rather than 0xffff. Catch this here and treat 0 as a non-existent
 836	 * device as well. If a fixed-link is used the phy_id is always 0.
 837	 * Note: phydev->phy_id is the result of reading the UID PHY registers.
 838	 */
 839	if ((phydev->phy_id == 0) && !fixed_link) {
 840		netdev_err(dev, "Bad PHY UID 0x%08x\n", phydev->phy_id);
 841		phy_disconnect(phydev);
 842		return -ENODEV;
 843	}
 844
 845	netdev_dbg(dev, "attached to PHY %d UID 0x%08x Link = %d\n",
 846		   phydev->mdio.addr, phydev->phy_id, phydev->link);
 847
 848	priv->phydev = phydev;
 849	return 0;
 850}
 851
 852static void tse_update_mac_addr(struct altera_tse_private *priv, u8 *addr)
 853{
 854	u32 msb;
 855	u32 lsb;
 856
 857	msb = (addr[3] << 24) | (addr[2] << 16) | (addr[1] << 8) | addr[0];
 858	lsb = ((addr[5] << 8) | addr[4]) & 0xffff;
 859
 860	/* Set primary MAC address */
 861	csrwr32(msb, priv->mac_dev, tse_csroffs(mac_addr_0));
 862	csrwr32(lsb, priv->mac_dev, tse_csroffs(mac_addr_1));
 863}
 864
 865/* MAC software reset.
 866 * When reset is triggered, the MAC function completes the current
 867 * transmission or reception, and subsequently disables the transmit and
 868 * receive logic, flushes the receive FIFO buffer, and resets the statistics
 869 * counters.
 870 */
 871static int reset_mac(struct altera_tse_private *priv)
 872{
 873	int counter;
 874	u32 dat;
 875
 876	dat = csrrd32(priv->mac_dev, tse_csroffs(command_config));
 877	dat &= ~(MAC_CMDCFG_TX_ENA | MAC_CMDCFG_RX_ENA);
 878	dat |= MAC_CMDCFG_SW_RESET | MAC_CMDCFG_CNT_RESET;
 879	csrwr32(dat, priv->mac_dev, tse_csroffs(command_config));
 880
 881	counter = 0;
 882	while (counter++ < ALTERA_TSE_SW_RESET_WATCHDOG_CNTR) {
 883		if (tse_bit_is_clear(priv->mac_dev, tse_csroffs(command_config),
 884				     MAC_CMDCFG_SW_RESET))
 885			break;
 886		udelay(1);
 887	}
 888
 889	if (counter >= ALTERA_TSE_SW_RESET_WATCHDOG_CNTR) {
 890		dat = csrrd32(priv->mac_dev, tse_csroffs(command_config));
 891		dat &= ~MAC_CMDCFG_SW_RESET;
 892		csrwr32(dat, priv->mac_dev, tse_csroffs(command_config));
 893		return -1;
 894	}
 895	return 0;
 896}
 897
 898/* Initialize MAC core registers
 899*/
 900static int init_mac(struct altera_tse_private *priv)
 901{
 902	unsigned int cmd = 0;
 903	u32 frm_length;
 904
 905	/* Setup Rx FIFO */
 906	csrwr32(priv->rx_fifo_depth - ALTERA_TSE_RX_SECTION_EMPTY,
 907		priv->mac_dev, tse_csroffs(rx_section_empty));
 908
 909	csrwr32(ALTERA_TSE_RX_SECTION_FULL, priv->mac_dev,
 910		tse_csroffs(rx_section_full));
 911
 912	csrwr32(ALTERA_TSE_RX_ALMOST_EMPTY, priv->mac_dev,
 913		tse_csroffs(rx_almost_empty));
 914
 915	csrwr32(ALTERA_TSE_RX_ALMOST_FULL, priv->mac_dev,
 916		tse_csroffs(rx_almost_full));
 917
 918	/* Setup Tx FIFO */
 919	csrwr32(priv->tx_fifo_depth - ALTERA_TSE_TX_SECTION_EMPTY,
 920		priv->mac_dev, tse_csroffs(tx_section_empty));
 921
 922	csrwr32(ALTERA_TSE_TX_SECTION_FULL, priv->mac_dev,
 923		tse_csroffs(tx_section_full));
 924
 925	csrwr32(ALTERA_TSE_TX_ALMOST_EMPTY, priv->mac_dev,
 926		tse_csroffs(tx_almost_empty));
 927
 928	csrwr32(ALTERA_TSE_TX_ALMOST_FULL, priv->mac_dev,
 929		tse_csroffs(tx_almost_full));
 930
 931	/* MAC Address Configuration */
 932	tse_update_mac_addr(priv, priv->dev->dev_addr);
 933
 934	/* MAC Function Configuration */
 935	frm_length = ETH_HLEN + priv->dev->mtu + ETH_FCS_LEN;
 936	csrwr32(frm_length, priv->mac_dev, tse_csroffs(frm_length));
 937
 938	csrwr32(ALTERA_TSE_TX_IPG_LENGTH, priv->mac_dev,
 939		tse_csroffs(tx_ipg_length));
 940
 941	/* Disable RX/TX shift 16 for alignment of all received frames on 16-bit
 942	 * start address
 943	 */
 944	tse_set_bit(priv->mac_dev, tse_csroffs(rx_cmd_stat),
 945		    ALTERA_TSE_RX_CMD_STAT_RX_SHIFT16);
 946
 947	tse_clear_bit(priv->mac_dev, tse_csroffs(tx_cmd_stat),
 948		      ALTERA_TSE_TX_CMD_STAT_TX_SHIFT16 |
 949		      ALTERA_TSE_TX_CMD_STAT_OMIT_CRC);
 950
 951	/* Set the MAC options */
 952	cmd = csrrd32(priv->mac_dev, tse_csroffs(command_config));
 953	cmd &= ~MAC_CMDCFG_PAD_EN;	/* No padding Removal on Receive */
 954	cmd &= ~MAC_CMDCFG_CRC_FWD;	/* CRC Removal */
 955	cmd |= MAC_CMDCFG_RX_ERR_DISC;	/* Automatically discard frames
 956					 * with CRC errors
 957					 */
 958	cmd |= MAC_CMDCFG_CNTL_FRM_ENA;
 959	cmd &= ~MAC_CMDCFG_TX_ENA;
 960	cmd &= ~MAC_CMDCFG_RX_ENA;
 961
 962	/* Default speed and duplex setting, full/100 */
 963	cmd &= ~MAC_CMDCFG_HD_ENA;
 964	cmd &= ~MAC_CMDCFG_ETH_SPEED;
 965	cmd &= ~MAC_CMDCFG_ENA_10;
 966
 967	csrwr32(cmd, priv->mac_dev, tse_csroffs(command_config));
 968
 969	csrwr32(ALTERA_TSE_PAUSE_QUANTA, priv->mac_dev,
 970		tse_csroffs(pause_quanta));
 971
 972	if (netif_msg_hw(priv))
 973		dev_dbg(priv->device,
 974			"MAC post-initialization: CMD_CONFIG = 0x%08x\n", cmd);
 975
 976	return 0;
 977}
 978
 979/* Start/stop MAC transmission logic
 980 */
 981static void tse_set_mac(struct altera_tse_private *priv, bool enable)
 982{
 983	u32 value = csrrd32(priv->mac_dev, tse_csroffs(command_config));
 984
 985	if (enable)
 986		value |= MAC_CMDCFG_TX_ENA | MAC_CMDCFG_RX_ENA;
 987	else
 988		value &= ~(MAC_CMDCFG_TX_ENA | MAC_CMDCFG_RX_ENA);
 989
 990	csrwr32(value, priv->mac_dev, tse_csroffs(command_config));
 991}
 992
 993/* Change the MTU
 994 */
 995static int tse_change_mtu(struct net_device *dev, int new_mtu)
 996{
 997	struct altera_tse_private *priv = netdev_priv(dev);
 998	unsigned int max_mtu = priv->max_mtu;
 999	unsigned int min_mtu = ETH_ZLEN + ETH_FCS_LEN;
1000
1001	if (netif_running(dev)) {
1002		netdev_err(dev, "must be stopped to change its MTU\n");
1003		return -EBUSY;
1004	}
1005
1006	if ((new_mtu < min_mtu) || (new_mtu > max_mtu)) {
1007		netdev_err(dev, "invalid MTU, max MTU is: %u\n", max_mtu);
1008		return -EINVAL;
1009	}
1010
1011	dev->mtu = new_mtu;
1012	netdev_update_features(dev);
1013
1014	return 0;
1015}
1016
1017static void altera_tse_set_mcfilter(struct net_device *dev)
1018{
1019	struct altera_tse_private *priv = netdev_priv(dev);
 
1020	int i;
1021	struct netdev_hw_addr *ha;
1022
1023	/* clear the hash filter */
1024	for (i = 0; i < 64; i++)
1025		csrwr32(0, priv->mac_dev, tse_csroffs(hash_table) + i * 4);
1026
1027	netdev_for_each_mc_addr(ha, dev) {
1028		unsigned int hash = 0;
1029		int mac_octet;
1030
1031		for (mac_octet = 5; mac_octet >= 0; mac_octet--) {
1032			unsigned char xor_bit = 0;
1033			unsigned char octet = ha->addr[mac_octet];
1034			unsigned int bitshift;
1035
1036			for (bitshift = 0; bitshift < 8; bitshift++)
1037				xor_bit ^= ((octet >> bitshift) & 0x01);
1038
1039			hash = (hash << 1) | xor_bit;
1040		}
1041		csrwr32(1, priv->mac_dev, tse_csroffs(hash_table) + hash * 4);
1042	}
1043}
1044
1045
1046static void altera_tse_set_mcfilterall(struct net_device *dev)
1047{
1048	struct altera_tse_private *priv = netdev_priv(dev);
1049	int i;
1050
1051	/* set the hash filter */
1052	for (i = 0; i < 64; i++)
1053		csrwr32(1, priv->mac_dev, tse_csroffs(hash_table) + i * 4);
1054}
1055
1056/* Set or clear the multicast filter for this adaptor
1057 */
1058static void tse_set_rx_mode_hashfilter(struct net_device *dev)
1059{
1060	struct altera_tse_private *priv = netdev_priv(dev);
1061
1062	spin_lock(&priv->mac_cfg_lock);
1063
1064	if (dev->flags & IFF_PROMISC)
1065		tse_set_bit(priv->mac_dev, tse_csroffs(command_config),
1066			    MAC_CMDCFG_PROMIS_EN);
1067
1068	if (dev->flags & IFF_ALLMULTI)
1069		altera_tse_set_mcfilterall(dev);
1070	else
1071		altera_tse_set_mcfilter(dev);
1072
1073	spin_unlock(&priv->mac_cfg_lock);
1074}
1075
1076/* Set or clear the multicast filter for this adaptor
1077 */
1078static void tse_set_rx_mode(struct net_device *dev)
1079{
1080	struct altera_tse_private *priv = netdev_priv(dev);
1081
1082	spin_lock(&priv->mac_cfg_lock);
1083
1084	if ((dev->flags & IFF_PROMISC) || (dev->flags & IFF_ALLMULTI) ||
1085	    !netdev_mc_empty(dev) || !netdev_uc_empty(dev))
1086		tse_set_bit(priv->mac_dev, tse_csroffs(command_config),
1087			    MAC_CMDCFG_PROMIS_EN);
1088	else
1089		tse_clear_bit(priv->mac_dev, tse_csroffs(command_config),
1090			      MAC_CMDCFG_PROMIS_EN);
1091
1092	spin_unlock(&priv->mac_cfg_lock);
1093}
1094
1095/* Open and initialize the interface
1096 */
1097static int tse_open(struct net_device *dev)
1098{
1099	struct altera_tse_private *priv = netdev_priv(dev);
 
1100	int ret = 0;
1101	int i;
1102	unsigned long int flags;
1103
1104	/* Reset and configure TSE MAC and probe associated PHY */
1105	ret = priv->dmaops->init_dma(priv);
1106	if (ret != 0) {
1107		netdev_err(dev, "Cannot initialize DMA\n");
1108		goto phy_error;
1109	}
1110
1111	if (netif_msg_ifup(priv))
1112		netdev_warn(dev, "device MAC address %pM\n",
1113			    dev->dev_addr);
1114
1115	if ((priv->revision < 0xd00) || (priv->revision > 0xe00))
1116		netdev_warn(dev, "TSE revision %x\n", priv->revision);
1117
1118	spin_lock(&priv->mac_cfg_lock);
 
1119	ret = reset_mac(priv);
1120	/* Note that reset_mac will fail if the clocks are gated by the PHY
1121	 * due to the PHY being put into isolation or power down mode.
1122	 * This is not an error if reset fails due to no clock.
1123	 */
1124	if (ret)
1125		netdev_dbg(dev, "Cannot reset MAC core (error: %d)\n", ret);
1126
1127	ret = init_mac(priv);
1128	spin_unlock(&priv->mac_cfg_lock);
1129	if (ret) {
1130		netdev_err(dev, "Cannot init MAC core (error: %d)\n", ret);
1131		goto alloc_skbuf_error;
1132	}
1133
1134	priv->dmaops->reset_dma(priv);
1135
1136	/* Create and initialize the TX/RX descriptors chains. */
1137	priv->rx_ring_size = dma_rx_num;
1138	priv->tx_ring_size = dma_tx_num;
1139	ret = alloc_init_skbufs(priv);
1140	if (ret) {
1141		netdev_err(dev, "DMA descriptors initialization failed\n");
1142		goto alloc_skbuf_error;
1143	}
1144
1145
1146	/* Register RX interrupt */
1147	ret = request_irq(priv->rx_irq, altera_isr, IRQF_SHARED,
1148			  dev->name, dev);
1149	if (ret) {
1150		netdev_err(dev, "Unable to register RX interrupt %d\n",
1151			   priv->rx_irq);
1152		goto init_error;
1153	}
1154
1155	/* Register TX interrupt */
1156	ret = request_irq(priv->tx_irq, altera_isr, IRQF_SHARED,
1157			  dev->name, dev);
1158	if (ret) {
1159		netdev_err(dev, "Unable to register TX interrupt %d\n",
1160			   priv->tx_irq);
1161		goto tx_request_irq_error;
1162	}
1163
1164	/* Enable DMA interrupts */
1165	spin_lock_irqsave(&priv->rxdma_irq_lock, flags);
1166	priv->dmaops->enable_rxirq(priv);
1167	priv->dmaops->enable_txirq(priv);
1168
1169	/* Setup RX descriptor chain */
1170	for (i = 0; i < priv->rx_ring_size; i++)
1171		priv->dmaops->add_rx_desc(priv, &priv->rx_ring[i]);
1172
1173	spin_unlock_irqrestore(&priv->rxdma_irq_lock, flags);
1174
1175	if (priv->phydev)
1176		phy_start(priv->phydev);
 
 
 
 
1177
1178	napi_enable(&priv->napi);
1179	netif_start_queue(dev);
1180
1181	priv->dmaops->start_rxdma(priv);
1182
1183	/* Start MAC Rx/Tx */
1184	spin_lock(&priv->mac_cfg_lock);
1185	tse_set_mac(priv, true);
1186	spin_unlock(&priv->mac_cfg_lock);
1187
1188	return 0;
1189
1190tx_request_irq_error:
1191	free_irq(priv->rx_irq, dev);
1192init_error:
1193	free_skbufs(dev);
1194alloc_skbuf_error:
1195phy_error:
1196	return ret;
1197}
1198
1199/* Stop TSE MAC interface and put the device in an inactive state
1200 */
1201static int tse_shutdown(struct net_device *dev)
1202{
1203	struct altera_tse_private *priv = netdev_priv(dev);
 
1204	int ret;
1205	unsigned long int flags;
1206
1207	/* Stop the PHY */
1208	if (priv->phydev)
1209		phy_stop(priv->phydev);
1210
 
 
1211	netif_stop_queue(dev);
1212	napi_disable(&priv->napi);
1213
1214	/* Disable DMA interrupts */
1215	spin_lock_irqsave(&priv->rxdma_irq_lock, flags);
1216	priv->dmaops->disable_rxirq(priv);
1217	priv->dmaops->disable_txirq(priv);
1218	spin_unlock_irqrestore(&priv->rxdma_irq_lock, flags);
1219
1220	/* Free the IRQ lines */
1221	free_irq(priv->rx_irq, dev);
1222	free_irq(priv->tx_irq, dev);
1223
1224	/* disable and reset the MAC, empties fifo */
1225	spin_lock(&priv->mac_cfg_lock);
1226	spin_lock(&priv->tx_lock);
1227
1228	ret = reset_mac(priv);
1229	/* Note that reset_mac will fail if the clocks are gated by the PHY
1230	 * due to the PHY being put into isolation or power down mode.
1231	 * This is not an error if reset fails due to no clock.
1232	 */
1233	if (ret)
1234		netdev_dbg(dev, "Cannot reset MAC core (error: %d)\n", ret);
1235	priv->dmaops->reset_dma(priv);
1236	free_skbufs(dev);
1237
1238	spin_unlock(&priv->tx_lock);
1239	spin_unlock(&priv->mac_cfg_lock);
1240
1241	priv->dmaops->uninit_dma(priv);
1242
1243	return 0;
1244}
1245
1246static struct net_device_ops altera_tse_netdev_ops = {
1247	.ndo_open		= tse_open,
1248	.ndo_stop		= tse_shutdown,
1249	.ndo_start_xmit		= tse_start_xmit,
1250	.ndo_set_mac_address	= eth_mac_addr,
1251	.ndo_set_rx_mode	= tse_set_rx_mode,
1252	.ndo_change_mtu		= tse_change_mtu,
1253	.ndo_validate_addr	= eth_validate_addr,
1254};
1255
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1256static int request_and_map(struct platform_device *pdev, const char *name,
1257			   struct resource **res, void __iomem **ptr)
1258{
 
1259	struct resource *region;
1260	struct device *device = &pdev->dev;
1261
1262	*res = platform_get_resource_byname(pdev, IORESOURCE_MEM, name);
1263	if (*res == NULL) {
1264		dev_err(device, "resource %s not defined\n", name);
1265		return -ENODEV;
1266	}
1267
1268	region = devm_request_mem_region(device, (*res)->start,
1269					 resource_size(*res), dev_name(device));
1270	if (region == NULL) {
1271		dev_err(device, "unable to request %s\n", name);
1272		return -EBUSY;
1273	}
1274
1275	*ptr = devm_ioremap_nocache(device, region->start,
1276				    resource_size(region));
1277	if (*ptr == NULL) {
1278		dev_err(device, "ioremap_nocache of %s failed!", name);
1279		return -ENOMEM;
1280	}
1281
1282	return 0;
1283}
1284
1285/* Probe Altera TSE MAC device
1286 */
1287static int altera_tse_probe(struct platform_device *pdev)
1288{
1289	struct net_device *ndev;
1290	int ret = -ENODEV;
 
1291	struct resource *control_port;
 
1292	struct resource *dma_res;
1293	struct altera_tse_private *priv;
1294	const unsigned char *macaddr;
 
1295	void __iomem *descmap;
1296	const struct of_device_id *of_id = NULL;
1297
1298	ndev = alloc_etherdev(sizeof(struct altera_tse_private));
1299	if (!ndev) {
1300		dev_err(&pdev->dev, "Could not allocate network device\n");
1301		return -ENODEV;
1302	}
1303
1304	SET_NETDEV_DEV(ndev, &pdev->dev);
1305
1306	priv = netdev_priv(ndev);
1307	priv->device = &pdev->dev;
1308	priv->dev = ndev;
1309	priv->msg_enable = netif_msg_init(debug, default_msg_level);
1310
1311	of_id = of_match_device(altera_tse_ids, &pdev->dev);
1312
1313	if (of_id)
1314		priv->dmaops = (struct altera_dmaops *)of_id->data;
1315
1316
1317	if (priv->dmaops &&
1318	    priv->dmaops->altera_dtype == ALTERA_DTYPE_SGDMA) {
1319		/* Get the mapped address to the SGDMA descriptor memory */
1320		ret = request_and_map(pdev, "s1", &dma_res, &descmap);
1321		if (ret)
1322			goto err_free_netdev;
1323
1324		/* Start of that memory is for transmit descriptors */
1325		priv->tx_dma_desc = descmap;
1326
1327		/* First half is for tx descriptors, other half for tx */
1328		priv->txdescmem = resource_size(dma_res)/2;
1329
1330		priv->txdescmem_busaddr = (dma_addr_t)dma_res->start;
1331
1332		priv->rx_dma_desc = (void __iomem *)((uintptr_t)(descmap +
1333						     priv->txdescmem));
1334		priv->rxdescmem = resource_size(dma_res)/2;
1335		priv->rxdescmem_busaddr = dma_res->start;
1336		priv->rxdescmem_busaddr += priv->txdescmem;
1337
1338		if (upper_32_bits(priv->rxdescmem_busaddr)) {
1339			dev_dbg(priv->device,
1340				"SGDMA bus addresses greater than 32-bits\n");
 
1341			goto err_free_netdev;
1342		}
1343		if (upper_32_bits(priv->txdescmem_busaddr)) {
1344			dev_dbg(priv->device,
1345				"SGDMA bus addresses greater than 32-bits\n");
 
1346			goto err_free_netdev;
1347		}
1348	} else if (priv->dmaops &&
1349		   priv->dmaops->altera_dtype == ALTERA_DTYPE_MSGDMA) {
1350		ret = request_and_map(pdev, "rx_resp", &dma_res,
1351				      &priv->rx_dma_resp);
1352		if (ret)
1353			goto err_free_netdev;
1354
1355		ret = request_and_map(pdev, "tx_desc", &dma_res,
1356				      &priv->tx_dma_desc);
1357		if (ret)
1358			goto err_free_netdev;
1359
1360		priv->txdescmem = resource_size(dma_res);
1361		priv->txdescmem_busaddr = dma_res->start;
1362
1363		ret = request_and_map(pdev, "rx_desc", &dma_res,
1364				      &priv->rx_dma_desc);
1365		if (ret)
1366			goto err_free_netdev;
1367
1368		priv->rxdescmem = resource_size(dma_res);
1369		priv->rxdescmem_busaddr = dma_res->start;
1370
1371	} else {
 
1372		goto err_free_netdev;
1373	}
1374
1375	if (!dma_set_mask(priv->device, DMA_BIT_MASK(priv->dmaops->dmamask)))
1376		dma_set_coherent_mask(priv->device,
1377				      DMA_BIT_MASK(priv->dmaops->dmamask));
1378	else if (!dma_set_mask(priv->device, DMA_BIT_MASK(32)))
1379		dma_set_coherent_mask(priv->device, DMA_BIT_MASK(32));
1380	else
 
1381		goto err_free_netdev;
 
1382
1383	/* MAC address space */
1384	ret = request_and_map(pdev, "control_port", &control_port,
1385			      (void __iomem **)&priv->mac_dev);
1386	if (ret)
1387		goto err_free_netdev;
1388
1389	/* xSGDMA Rx Dispatcher address space */
1390	ret = request_and_map(pdev, "rx_csr", &dma_res,
1391			      &priv->rx_dma_csr);
1392	if (ret)
1393		goto err_free_netdev;
1394
1395
1396	/* xSGDMA Tx Dispatcher address space */
1397	ret = request_and_map(pdev, "tx_csr", &dma_res,
1398			      &priv->tx_dma_csr);
1399	if (ret)
1400		goto err_free_netdev;
1401
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1402
1403	/* Rx IRQ */
1404	priv->rx_irq = platform_get_irq_byname(pdev, "rx_irq");
1405	if (priv->rx_irq == -ENXIO) {
1406		dev_err(&pdev->dev, "cannot obtain Rx IRQ\n");
1407		ret = -ENXIO;
1408		goto err_free_netdev;
1409	}
1410
1411	/* Tx IRQ */
1412	priv->tx_irq = platform_get_irq_byname(pdev, "tx_irq");
1413	if (priv->tx_irq == -ENXIO) {
1414		dev_err(&pdev->dev, "cannot obtain Tx IRQ\n");
1415		ret = -ENXIO;
1416		goto err_free_netdev;
1417	}
1418
1419	/* get FIFO depths from device tree */
1420	if (of_property_read_u32(pdev->dev.of_node, "rx-fifo-depth",
1421				 &priv->rx_fifo_depth)) {
1422		dev_err(&pdev->dev, "cannot obtain rx-fifo-depth\n");
1423		ret = -ENXIO;
1424		goto err_free_netdev;
1425	}
1426
1427	if (of_property_read_u32(pdev->dev.of_node, "tx-fifo-depth",
1428				 &priv->tx_fifo_depth)) {
1429		dev_err(&pdev->dev, "cannot obtain tx-fifo-depth\n");
1430		ret = -ENXIO;
1431		goto err_free_netdev;
1432	}
1433
1434	/* get hash filter settings for this instance */
1435	priv->hash_filter =
1436		of_property_read_bool(pdev->dev.of_node,
1437				      "altr,has-hash-multicast-filter");
1438
1439	/* Set hash filter to not set for now until the
1440	 * multicast filter receive issue is debugged
1441	 */
1442	priv->hash_filter = 0;
1443
1444	/* get supplemental address settings for this instance */
1445	priv->added_unicast =
1446		of_property_read_bool(pdev->dev.of_node,
1447				      "altr,has-supplementary-unicast");
1448
 
1449	/* Max MTU is 1500, ETH_DATA_LEN */
1450	priv->max_mtu = ETH_DATA_LEN;
1451
1452	/* Get the max mtu from the device tree. Note that the
1453	 * "max-frame-size" parameter is actually max mtu. Definition
1454	 * in the ePAPR v1.1 spec and usage differ, so go with usage.
1455	 */
1456	of_property_read_u32(pdev->dev.of_node, "max-frame-size",
1457			     &priv->max_mtu);
1458
1459	/* The DMA buffer size already accounts for an alignment bias
1460	 * to avoid unaligned access exceptions for the NIOS processor,
1461	 */
1462	priv->rx_dma_buf_sz = ALTERA_RXDMABUFFER_SIZE;
1463
1464	/* get default MAC address from device tree */
1465	macaddr = of_get_mac_address(pdev->dev.of_node);
1466	if (macaddr)
1467		ether_addr_copy(ndev->dev_addr, macaddr);
1468	else
1469		eth_hw_addr_random(ndev);
1470
1471	/* get phy addr and create mdio */
1472	ret = altera_tse_phy_get_addr_mdio_create(ndev);
1473
1474	if (ret)
1475		goto err_free_netdev;
1476
1477	/* initialize netdev */
1478	ndev->mem_start = control_port->start;
1479	ndev->mem_end = control_port->end;
1480	ndev->netdev_ops = &altera_tse_netdev_ops;
1481	altera_tse_set_ethtool_ops(ndev);
1482
1483	altera_tse_netdev_ops.ndo_set_rx_mode = tse_set_rx_mode;
1484
1485	if (priv->hash_filter)
1486		altera_tse_netdev_ops.ndo_set_rx_mode =
1487			tse_set_rx_mode_hashfilter;
1488
1489	/* Scatter/gather IO is not supported,
1490	 * so it is turned off
1491	 */
1492	ndev->hw_features &= ~NETIF_F_SG;
1493	ndev->features |= ndev->hw_features | NETIF_F_HIGHDMA;
1494
1495	/* VLAN offloading of tagging, stripping and filtering is not
1496	 * supported by hardware, but driver will accommodate the
1497	 * extra 4-byte VLAN tag for processing by upper layers
1498	 */
1499	ndev->features |= NETIF_F_HW_VLAN_CTAG_RX;
1500
1501	/* setup NAPI interface */
1502	netif_napi_add(ndev, &priv->napi, tse_poll, NAPI_POLL_WEIGHT);
1503
1504	spin_lock_init(&priv->mac_cfg_lock);
1505	spin_lock_init(&priv->tx_lock);
1506	spin_lock_init(&priv->rxdma_irq_lock);
1507
1508	netif_carrier_off(ndev);
1509	ret = register_netdev(ndev);
1510	if (ret) {
1511		dev_err(&pdev->dev, "failed to register TSE net device\n");
1512		goto err_register_netdev;
1513	}
1514
1515	platform_set_drvdata(pdev, ndev);
1516
1517	priv->revision = ioread32(&priv->mac_dev->megacore_revision);
1518
1519	if (netif_msg_probe(priv))
1520		dev_info(&pdev->dev, "Altera TSE MAC version %d.%d at 0x%08lx irq %d/%d\n",
1521			 (priv->revision >> 8) & 0xff,
1522			 priv->revision & 0xff,
1523			 (unsigned long) control_port->start, priv->rx_irq,
1524			 priv->tx_irq);
1525
1526	ret = init_phy(ndev);
1527	if (ret != 0) {
1528		netdev_err(ndev, "Cannot attach to PHY (error: %d)\n", ret);
1529		goto err_init_phy;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1530	}
 
1531	return 0;
1532
1533err_init_phy:
 
1534	unregister_netdev(ndev);
1535err_register_netdev:
1536	netif_napi_del(&priv->napi);
1537	altera_tse_mdio_destroy(ndev);
1538err_free_netdev:
1539	free_netdev(ndev);
1540	return ret;
1541}
1542
1543/* Remove Altera TSE MAC device
1544 */
1545static int altera_tse_remove(struct platform_device *pdev)
1546{
1547	struct net_device *ndev = platform_get_drvdata(pdev);
1548	struct altera_tse_private *priv = netdev_priv(ndev);
1549
1550	if (priv->phydev)
1551		phy_disconnect(priv->phydev);
1552
1553	platform_set_drvdata(pdev, NULL);
1554	altera_tse_mdio_destroy(ndev);
1555	unregister_netdev(ndev);
 
 
 
1556	free_netdev(ndev);
1557
1558	return 0;
1559}
1560
1561static const struct altera_dmaops altera_dtype_sgdma = {
1562	.altera_dtype = ALTERA_DTYPE_SGDMA,
1563	.dmamask = 32,
1564	.reset_dma = sgdma_reset,
1565	.enable_txirq = sgdma_enable_txirq,
1566	.enable_rxirq = sgdma_enable_rxirq,
1567	.disable_txirq = sgdma_disable_txirq,
1568	.disable_rxirq = sgdma_disable_rxirq,
1569	.clear_txirq = sgdma_clear_txirq,
1570	.clear_rxirq = sgdma_clear_rxirq,
1571	.tx_buffer = sgdma_tx_buffer,
1572	.tx_completions = sgdma_tx_completions,
1573	.add_rx_desc = sgdma_add_rx_desc,
1574	.get_rx_status = sgdma_rx_status,
1575	.init_dma = sgdma_initialize,
1576	.uninit_dma = sgdma_uninitialize,
1577	.start_rxdma = sgdma_start_rxdma,
1578};
1579
1580static const struct altera_dmaops altera_dtype_msgdma = {
1581	.altera_dtype = ALTERA_DTYPE_MSGDMA,
1582	.dmamask = 64,
1583	.reset_dma = msgdma_reset,
1584	.enable_txirq = msgdma_enable_txirq,
1585	.enable_rxirq = msgdma_enable_rxirq,
1586	.disable_txirq = msgdma_disable_txirq,
1587	.disable_rxirq = msgdma_disable_rxirq,
1588	.clear_txirq = msgdma_clear_txirq,
1589	.clear_rxirq = msgdma_clear_rxirq,
1590	.tx_buffer = msgdma_tx_buffer,
1591	.tx_completions = msgdma_tx_completions,
1592	.add_rx_desc = msgdma_add_rx_desc,
1593	.get_rx_status = msgdma_rx_status,
1594	.init_dma = msgdma_initialize,
1595	.uninit_dma = msgdma_uninitialize,
1596	.start_rxdma = msgdma_start_rxdma,
1597};
1598
1599static const struct of_device_id altera_tse_ids[] = {
1600	{ .compatible = "altr,tse-msgdma-1.0", .data = &altera_dtype_msgdma, },
1601	{ .compatible = "altr,tse-1.0", .data = &altera_dtype_sgdma, },
1602	{ .compatible = "ALTR,tse-1.0", .data = &altera_dtype_sgdma, },
1603	{},
1604};
1605MODULE_DEVICE_TABLE(of, altera_tse_ids);
1606
1607static struct platform_driver altera_tse_driver = {
1608	.probe		= altera_tse_probe,
1609	.remove		= altera_tse_remove,
1610	.suspend	= NULL,
1611	.resume		= NULL,
1612	.driver		= {
1613		.name	= ALTERA_TSE_RESOURCE_NAME,
1614		.of_match_table = altera_tse_ids,
1615	},
1616};
1617
1618module_platform_driver(altera_tse_driver);
1619
1620MODULE_AUTHOR("Altera Corporation");
1621MODULE_DESCRIPTION("Altera Triple Speed Ethernet MAC driver");
1622MODULE_LICENSE("GPL v2");