Linux Audio

Check our new training course

Loading...
v4.17
 
   1/* Altera Triple-Speed Ethernet MAC driver
   2 * Copyright (C) 2008-2014 Altera Corporation. All rights reserved
   3 *
   4 * Contributors:
   5 *   Dalon Westergreen
   6 *   Thomas Chou
   7 *   Ian Abbott
   8 *   Yuriy Kozlov
   9 *   Tobias Klauser
  10 *   Andriy Smolskyy
  11 *   Roman Bulgakov
  12 *   Dmytro Mytarchuk
  13 *   Matthew Gerlach
  14 *
  15 * Original driver contributed by SLS.
  16 * Major updates contributed by GlobalLogic
  17 *
  18 * This program is free software; you can redistribute it and/or modify it
  19 * under the terms and conditions of the GNU General Public License,
  20 * version 2, as published by the Free Software Foundation.
  21 *
  22 * This program is distributed in the hope it will be useful, but WITHOUT
  23 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  24 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
  25 * more details.
  26 *
  27 * You should have received a copy of the GNU General Public License along with
  28 * this program.  If not, see <http://www.gnu.org/licenses/>.
  29 */
  30
  31#include <linux/atomic.h>
  32#include <linux/delay.h>
  33#include <linux/etherdevice.h>
  34#include <linux/if_vlan.h>
  35#include <linux/init.h>
  36#include <linux/interrupt.h>
  37#include <linux/io.h>
  38#include <linux/kernel.h>
  39#include <linux/module.h>
  40#include <linux/mii.h>
  41#include <linux/netdevice.h>
  42#include <linux/of_device.h>
  43#include <linux/of_mdio.h>
  44#include <linux/of_net.h>
  45#include <linux/of_platform.h>
  46#include <linux/phy.h>
  47#include <linux/platform_device.h>
  48#include <linux/skbuff.h>
  49#include <asm/cacheflush.h>
  50
  51#include "altera_utils.h"
  52#include "altera_tse.h"
  53#include "altera_sgdma.h"
  54#include "altera_msgdma.h"
  55
  56static atomic_t instance_count = ATOMIC_INIT(~0);
  57/* Module parameters */
  58static int debug = -1;
  59module_param(debug, int, 0644);
  60MODULE_PARM_DESC(debug, "Message Level (-1: default, 0: no output, 16: all)");
  61
  62static const u32 default_msg_level = (NETIF_MSG_DRV | NETIF_MSG_PROBE |
  63					NETIF_MSG_LINK | NETIF_MSG_IFUP |
  64					NETIF_MSG_IFDOWN);
  65
  66#define RX_DESCRIPTORS 64
  67static int dma_rx_num = RX_DESCRIPTORS;
  68module_param(dma_rx_num, int, 0644);
  69MODULE_PARM_DESC(dma_rx_num, "Number of descriptors in the RX list");
  70
  71#define TX_DESCRIPTORS 64
  72static int dma_tx_num = TX_DESCRIPTORS;
  73module_param(dma_tx_num, int, 0644);
  74MODULE_PARM_DESC(dma_tx_num, "Number of descriptors in the TX list");
  75
  76
  77#define POLL_PHY (-1)
  78
  79/* Make sure DMA buffer size is larger than the max frame size
  80 * plus some alignment offset and a VLAN header. If the max frame size is
  81 * 1518, a VLAN header would be additional 4 bytes and additional
  82 * headroom for alignment is 2 bytes, 2048 is just fine.
  83 */
  84#define ALTERA_RXDMABUFFER_SIZE	2048
  85
  86/* Allow network stack to resume queueing packets after we've
  87 * finished transmitting at least 1/4 of the packets in the queue.
  88 */
  89#define TSE_TX_THRESH(x)	(x->tx_ring_size / 4)
  90
  91#define TXQUEUESTOP_THRESHHOLD	2
  92
  93static const struct of_device_id altera_tse_ids[];
  94
  95static inline u32 tse_tx_avail(struct altera_tse_private *priv)
  96{
  97	return priv->tx_cons + priv->tx_ring_size - priv->tx_prod - 1;
  98}
  99
 100/* PCS Register read/write functions
 101 */
 102static u16 sgmii_pcs_read(struct altera_tse_private *priv, int regnum)
 103{
 104	return csrrd32(priv->mac_dev,
 105		       tse_csroffs(mdio_phy0) + regnum * 4) & 0xffff;
 106}
 107
 108static void sgmii_pcs_write(struct altera_tse_private *priv, int regnum,
 109				u16 value)
 110{
 111	csrwr32(value, priv->mac_dev, tse_csroffs(mdio_phy0) + regnum * 4);
 112}
 113
 114/* Check PCS scratch memory */
 115static int sgmii_pcs_scratch_test(struct altera_tse_private *priv, u16 value)
 116{
 117	sgmii_pcs_write(priv, SGMII_PCS_SCRATCH, value);
 118	return (sgmii_pcs_read(priv, SGMII_PCS_SCRATCH) == value);
 119}
 120
 121/* MDIO specific functions
 122 */
 123static int altera_tse_mdio_read(struct mii_bus *bus, int mii_id, int regnum)
 124{
 125	struct net_device *ndev = bus->priv;
 126	struct altera_tse_private *priv = netdev_priv(ndev);
 127
 128	/* set MDIO address */
 129	csrwr32((mii_id & 0x1f), priv->mac_dev,
 130		tse_csroffs(mdio_phy1_addr));
 131
 132	/* get the data */
 133	return csrrd32(priv->mac_dev,
 134		       tse_csroffs(mdio_phy1) + regnum * 4) & 0xffff;
 135}
 136
 137static int altera_tse_mdio_write(struct mii_bus *bus, int mii_id, int regnum,
 138				 u16 value)
 139{
 140	struct net_device *ndev = bus->priv;
 141	struct altera_tse_private *priv = netdev_priv(ndev);
 142
 143	/* set MDIO address */
 144	csrwr32((mii_id & 0x1f), priv->mac_dev,
 145		tse_csroffs(mdio_phy1_addr));
 146
 147	/* write the data */
 148	csrwr32(value, priv->mac_dev, tse_csroffs(mdio_phy1) + regnum * 4);
 149	return 0;
 150}
 151
 152static int altera_tse_mdio_create(struct net_device *dev, unsigned int id)
 153{
 154	struct altera_tse_private *priv = netdev_priv(dev);
 155	int ret;
 156	struct device_node *mdio_node = NULL;
 157	struct mii_bus *mdio = NULL;
 158	struct device_node *child_node = NULL;
 159
 160	for_each_child_of_node(priv->device->of_node, child_node) {
 161		if (of_device_is_compatible(child_node, "altr,tse-mdio")) {
 162			mdio_node = child_node;
 163			break;
 164		}
 165	}
 166
 167	if (mdio_node) {
 168		netdev_dbg(dev, "FOUND MDIO subnode\n");
 169	} else {
 170		netdev_dbg(dev, "NO MDIO subnode\n");
 171		return 0;
 172	}
 173
 174	mdio = mdiobus_alloc();
 175	if (mdio == NULL) {
 176		netdev_err(dev, "Error allocating MDIO bus\n");
 177		return -ENOMEM;
 178	}
 179
 180	mdio->name = ALTERA_TSE_RESOURCE_NAME;
 181	mdio->read = &altera_tse_mdio_read;
 182	mdio->write = &altera_tse_mdio_write;
 183	snprintf(mdio->id, MII_BUS_ID_SIZE, "%s-%u", mdio->name, id);
 184
 185	mdio->priv = dev;
 186	mdio->parent = priv->device;
 187
 188	ret = of_mdiobus_register(mdio, mdio_node);
 189	if (ret != 0) {
 190		netdev_err(dev, "Cannot register MDIO bus %s\n",
 191			   mdio->id);
 192		goto out_free_mdio;
 193	}
 194
 195	if (netif_msg_drv(priv))
 196		netdev_info(dev, "MDIO bus %s: created\n", mdio->id);
 197
 198	priv->mdio = mdio;
 199	return 0;
 200out_free_mdio:
 201	mdiobus_free(mdio);
 202	mdio = NULL;
 203	return ret;
 204}
 205
 206static void altera_tse_mdio_destroy(struct net_device *dev)
 207{
 208	struct altera_tse_private *priv = netdev_priv(dev);
 209
 210	if (priv->mdio == NULL)
 211		return;
 212
 213	if (netif_msg_drv(priv))
 214		netdev_info(dev, "MDIO bus %s: removed\n",
 215			    priv->mdio->id);
 216
 217	mdiobus_unregister(priv->mdio);
 218	mdiobus_free(priv->mdio);
 219	priv->mdio = NULL;
 220}
 221
 222static int tse_init_rx_buffer(struct altera_tse_private *priv,
 223			      struct tse_buffer *rxbuffer, int len)
 224{
 225	rxbuffer->skb = netdev_alloc_skb_ip_align(priv->dev, len);
 226	if (!rxbuffer->skb)
 227		return -ENOMEM;
 228
 229	rxbuffer->dma_addr = dma_map_single(priv->device, rxbuffer->skb->data,
 230						len,
 231						DMA_FROM_DEVICE);
 232
 233	if (dma_mapping_error(priv->device, rxbuffer->dma_addr)) {
 234		netdev_err(priv->dev, "%s: DMA mapping error\n", __func__);
 235		dev_kfree_skb_any(rxbuffer->skb);
 236		return -EINVAL;
 237	}
 238	rxbuffer->dma_addr &= (dma_addr_t)~3;
 239	rxbuffer->len = len;
 240	return 0;
 241}
 242
 243static void tse_free_rx_buffer(struct altera_tse_private *priv,
 244			       struct tse_buffer *rxbuffer)
 245{
 246	struct sk_buff *skb = rxbuffer->skb;
 247	dma_addr_t dma_addr = rxbuffer->dma_addr;
 248
 249	if (skb != NULL) {
 250		if (dma_addr)
 251			dma_unmap_single(priv->device, dma_addr,
 252					 rxbuffer->len,
 253					 DMA_FROM_DEVICE);
 254		dev_kfree_skb_any(skb);
 255		rxbuffer->skb = NULL;
 256		rxbuffer->dma_addr = 0;
 257	}
 258}
 259
 260/* Unmap and free Tx buffer resources
 261 */
 262static void tse_free_tx_buffer(struct altera_tse_private *priv,
 263			       struct tse_buffer *buffer)
 264{
 265	if (buffer->dma_addr) {
 266		if (buffer->mapped_as_page)
 267			dma_unmap_page(priv->device, buffer->dma_addr,
 268				       buffer->len, DMA_TO_DEVICE);
 269		else
 270			dma_unmap_single(priv->device, buffer->dma_addr,
 271					 buffer->len, DMA_TO_DEVICE);
 272		buffer->dma_addr = 0;
 273	}
 274	if (buffer->skb) {
 275		dev_kfree_skb_any(buffer->skb);
 276		buffer->skb = NULL;
 277	}
 278}
 279
 280static int alloc_init_skbufs(struct altera_tse_private *priv)
 281{
 282	unsigned int rx_descs = priv->rx_ring_size;
 283	unsigned int tx_descs = priv->tx_ring_size;
 284	int ret = -ENOMEM;
 285	int i;
 286
 287	/* Create Rx ring buffer */
 288	priv->rx_ring = kcalloc(rx_descs, sizeof(struct tse_buffer),
 289				GFP_KERNEL);
 290	if (!priv->rx_ring)
 291		goto err_rx_ring;
 292
 293	/* Create Tx ring buffer */
 294	priv->tx_ring = kcalloc(tx_descs, sizeof(struct tse_buffer),
 295				GFP_KERNEL);
 296	if (!priv->tx_ring)
 297		goto err_tx_ring;
 298
 299	priv->tx_cons = 0;
 300	priv->tx_prod = 0;
 301
 302	/* Init Rx ring */
 303	for (i = 0; i < rx_descs; i++) {
 304		ret = tse_init_rx_buffer(priv, &priv->rx_ring[i],
 305					 priv->rx_dma_buf_sz);
 306		if (ret)
 307			goto err_init_rx_buffers;
 308	}
 309
 310	priv->rx_cons = 0;
 311	priv->rx_prod = 0;
 312
 313	return 0;
 314err_init_rx_buffers:
 315	while (--i >= 0)
 316		tse_free_rx_buffer(priv, &priv->rx_ring[i]);
 317	kfree(priv->tx_ring);
 318err_tx_ring:
 319	kfree(priv->rx_ring);
 320err_rx_ring:
 321	return ret;
 322}
 323
 324static void free_skbufs(struct net_device *dev)
 325{
 326	struct altera_tse_private *priv = netdev_priv(dev);
 327	unsigned int rx_descs = priv->rx_ring_size;
 328	unsigned int tx_descs = priv->tx_ring_size;
 329	int i;
 330
 331	/* Release the DMA TX/RX socket buffers */
 332	for (i = 0; i < rx_descs; i++)
 333		tse_free_rx_buffer(priv, &priv->rx_ring[i]);
 334	for (i = 0; i < tx_descs; i++)
 335		tse_free_tx_buffer(priv, &priv->tx_ring[i]);
 336
 337
 338	kfree(priv->tx_ring);
 339}
 340
 341/* Reallocate the skb for the reception process
 342 */
 343static inline void tse_rx_refill(struct altera_tse_private *priv)
 344{
 345	unsigned int rxsize = priv->rx_ring_size;
 346	unsigned int entry;
 347	int ret;
 348
 349	for (; priv->rx_cons - priv->rx_prod > 0;
 350			priv->rx_prod++) {
 351		entry = priv->rx_prod % rxsize;
 352		if (likely(priv->rx_ring[entry].skb == NULL)) {
 353			ret = tse_init_rx_buffer(priv, &priv->rx_ring[entry],
 354				priv->rx_dma_buf_sz);
 355			if (unlikely(ret != 0))
 356				break;
 357			priv->dmaops->add_rx_desc(priv, &priv->rx_ring[entry]);
 358		}
 359	}
 360}
 361
 362/* Pull out the VLAN tag and fix up the packet
 363 */
 364static inline void tse_rx_vlan(struct net_device *dev, struct sk_buff *skb)
 365{
 366	struct ethhdr *eth_hdr;
 367	u16 vid;
 368	if ((dev->features & NETIF_F_HW_VLAN_CTAG_RX) &&
 369	    !__vlan_get_tag(skb, &vid)) {
 370		eth_hdr = (struct ethhdr *)skb->data;
 371		memmove(skb->data + VLAN_HLEN, eth_hdr, ETH_ALEN * 2);
 372		skb_pull(skb, VLAN_HLEN);
 373		__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid);
 374	}
 375}
 376
 377/* Receive a packet: retrieve and pass over to upper levels
 378 */
 379static int tse_rx(struct altera_tse_private *priv, int limit)
 380{
 381	unsigned int count = 0;
 382	unsigned int next_entry;
 383	struct sk_buff *skb;
 384	unsigned int entry = priv->rx_cons % priv->rx_ring_size;
 385	u32 rxstatus;
 386	u16 pktlength;
 387	u16 pktstatus;
 388
 389	/* Check for count < limit first as get_rx_status is changing
 390	* the response-fifo so we must process the next packet
 391	* after calling get_rx_status if a response is pending.
 392	* (reading the last byte of the response pops the value from the fifo.)
 393	*/
 394	while ((count < limit) &&
 395	       ((rxstatus = priv->dmaops->get_rx_status(priv)) != 0)) {
 396		pktstatus = rxstatus >> 16;
 397		pktlength = rxstatus & 0xffff;
 398
 399		if ((pktstatus & 0xFF) || (pktlength == 0))
 400			netdev_err(priv->dev,
 401				   "RCV pktstatus %08X pktlength %08X\n",
 402				   pktstatus, pktlength);
 403
 404		/* DMA trasfer from TSE starts with 2 aditional bytes for
 405		 * IP payload alignment. Status returned by get_rx_status()
 406		 * contains DMA transfer length. Packet is 2 bytes shorter.
 407		 */
 408		pktlength -= 2;
 409
 410		count++;
 411		next_entry = (++priv->rx_cons) % priv->rx_ring_size;
 412
 413		skb = priv->rx_ring[entry].skb;
 414		if (unlikely(!skb)) {
 415			netdev_err(priv->dev,
 416				   "%s: Inconsistent Rx descriptor chain\n",
 417				   __func__);
 418			priv->dev->stats.rx_dropped++;
 419			break;
 420		}
 421		priv->rx_ring[entry].skb = NULL;
 422
 423		skb_put(skb, pktlength);
 424
 425		dma_unmap_single(priv->device, priv->rx_ring[entry].dma_addr,
 426				 priv->rx_ring[entry].len, DMA_FROM_DEVICE);
 427
 428		if (netif_msg_pktdata(priv)) {
 429			netdev_info(priv->dev, "frame received %d bytes\n",
 430				    pktlength);
 431			print_hex_dump(KERN_ERR, "data: ", DUMP_PREFIX_OFFSET,
 432				       16, 1, skb->data, pktlength, true);
 433		}
 434
 435		tse_rx_vlan(priv->dev, skb);
 436
 437		skb->protocol = eth_type_trans(skb, priv->dev);
 438		skb_checksum_none_assert(skb);
 439
 440		napi_gro_receive(&priv->napi, skb);
 441
 442		priv->dev->stats.rx_packets++;
 443		priv->dev->stats.rx_bytes += pktlength;
 444
 445		entry = next_entry;
 446
 447		tse_rx_refill(priv);
 448	}
 449
 450	return count;
 451}
 452
 453/* Reclaim resources after transmission completes
 454 */
 455static int tse_tx_complete(struct altera_tse_private *priv)
 456{
 457	unsigned int txsize = priv->tx_ring_size;
 458	u32 ready;
 459	unsigned int entry;
 460	struct tse_buffer *tx_buff;
 461	int txcomplete = 0;
 462
 463	spin_lock(&priv->tx_lock);
 464
 465	ready = priv->dmaops->tx_completions(priv);
 466
 467	/* Free sent buffers */
 468	while (ready && (priv->tx_cons != priv->tx_prod)) {
 469		entry = priv->tx_cons % txsize;
 470		tx_buff = &priv->tx_ring[entry];
 471
 472		if (netif_msg_tx_done(priv))
 473			netdev_dbg(priv->dev, "%s: curr %d, dirty %d\n",
 474				   __func__, priv->tx_prod, priv->tx_cons);
 475
 476		if (likely(tx_buff->skb))
 477			priv->dev->stats.tx_packets++;
 478
 479		tse_free_tx_buffer(priv, tx_buff);
 480		priv->tx_cons++;
 481
 482		txcomplete++;
 483		ready--;
 484	}
 485
 486	if (unlikely(netif_queue_stopped(priv->dev) &&
 487		     tse_tx_avail(priv) > TSE_TX_THRESH(priv))) {
 488		if (netif_queue_stopped(priv->dev) &&
 489		    tse_tx_avail(priv) > TSE_TX_THRESH(priv)) {
 490			if (netif_msg_tx_done(priv))
 491				netdev_dbg(priv->dev, "%s: restart transmit\n",
 492					   __func__);
 493			netif_wake_queue(priv->dev);
 494		}
 495	}
 496
 497	spin_unlock(&priv->tx_lock);
 498	return txcomplete;
 499}
 500
 501/* NAPI polling function
 502 */
 503static int tse_poll(struct napi_struct *napi, int budget)
 504{
 505	struct altera_tse_private *priv =
 506			container_of(napi, struct altera_tse_private, napi);
 507	int rxcomplete = 0;
 508	unsigned long int flags;
 509
 510	tse_tx_complete(priv);
 511
 512	rxcomplete = tse_rx(priv, budget);
 513
 514	if (rxcomplete < budget) {
 515
 516		napi_complete_done(napi, rxcomplete);
 517
 518		netdev_dbg(priv->dev,
 519			   "NAPI Complete, did %d packets with budget %d\n",
 520			   rxcomplete, budget);
 521
 522		spin_lock_irqsave(&priv->rxdma_irq_lock, flags);
 523		priv->dmaops->enable_rxirq(priv);
 524		priv->dmaops->enable_txirq(priv);
 525		spin_unlock_irqrestore(&priv->rxdma_irq_lock, flags);
 526	}
 527	return rxcomplete;
 528}
 529
 530/* DMA TX & RX FIFO interrupt routing
 531 */
 532static irqreturn_t altera_isr(int irq, void *dev_id)
 533{
 534	struct net_device *dev = dev_id;
 535	struct altera_tse_private *priv;
 536
 537	if (unlikely(!dev)) {
 538		pr_err("%s: invalid dev pointer\n", __func__);
 539		return IRQ_NONE;
 540	}
 541	priv = netdev_priv(dev);
 542
 543	spin_lock(&priv->rxdma_irq_lock);
 544	/* reset IRQs */
 545	priv->dmaops->clear_rxirq(priv);
 546	priv->dmaops->clear_txirq(priv);
 547	spin_unlock(&priv->rxdma_irq_lock);
 548
 549	if (likely(napi_schedule_prep(&priv->napi))) {
 550		spin_lock(&priv->rxdma_irq_lock);
 551		priv->dmaops->disable_rxirq(priv);
 552		priv->dmaops->disable_txirq(priv);
 553		spin_unlock(&priv->rxdma_irq_lock);
 554		__napi_schedule(&priv->napi);
 555	}
 556
 557
 558	return IRQ_HANDLED;
 559}
 560
 561/* Transmit a packet (called by the kernel). Dispatches
 562 * either the SGDMA method for transmitting or the
 563 * MSGDMA method, assumes no scatter/gather support,
 564 * implying an assumption that there's only one
 565 * physically contiguous fragment starting at
 566 * skb->data, for length of skb_headlen(skb).
 567 */
 568static int tse_start_xmit(struct sk_buff *skb, struct net_device *dev)
 569{
 570	struct altera_tse_private *priv = netdev_priv(dev);
 571	unsigned int txsize = priv->tx_ring_size;
 572	unsigned int entry;
 573	struct tse_buffer *buffer = NULL;
 574	int nfrags = skb_shinfo(skb)->nr_frags;
 575	unsigned int nopaged_len = skb_headlen(skb);
 576	enum netdev_tx ret = NETDEV_TX_OK;
 577	dma_addr_t dma_addr;
 578
 579	spin_lock_bh(&priv->tx_lock);
 580
 581	if (unlikely(tse_tx_avail(priv) < nfrags + 1)) {
 582		if (!netif_queue_stopped(dev)) {
 583			netif_stop_queue(dev);
 584			/* This is a hard error, log it. */
 585			netdev_err(priv->dev,
 586				   "%s: Tx list full when queue awake\n",
 587				   __func__);
 588		}
 589		ret = NETDEV_TX_BUSY;
 590		goto out;
 591	}
 592
 593	/* Map the first skb fragment */
 594	entry = priv->tx_prod % txsize;
 595	buffer = &priv->tx_ring[entry];
 596
 597	dma_addr = dma_map_single(priv->device, skb->data, nopaged_len,
 598				  DMA_TO_DEVICE);
 599	if (dma_mapping_error(priv->device, dma_addr)) {
 600		netdev_err(priv->dev, "%s: DMA mapping error\n", __func__);
 601		ret = NETDEV_TX_OK;
 602		goto out;
 603	}
 604
 605	buffer->skb = skb;
 606	buffer->dma_addr = dma_addr;
 607	buffer->len = nopaged_len;
 608
 609	priv->dmaops->tx_buffer(priv, buffer);
 610
 611	skb_tx_timestamp(skb);
 612
 613	priv->tx_prod++;
 614	dev->stats.tx_bytes += skb->len;
 615
 616	if (unlikely(tse_tx_avail(priv) <= TXQUEUESTOP_THRESHHOLD)) {
 617		if (netif_msg_hw(priv))
 618			netdev_dbg(priv->dev, "%s: stop transmitted packets\n",
 619				   __func__);
 620		netif_stop_queue(dev);
 621	}
 622
 623out:
 624	spin_unlock_bh(&priv->tx_lock);
 625
 626	return ret;
 627}
 628
 629/* Called every time the controller might need to be made
 630 * aware of new link state.  The PHY code conveys this
 631 * information through variables in the phydev structure, and this
 632 * function converts those variables into the appropriate
 633 * register values, and can bring down the device if needed.
 634 */
 635static void altera_tse_adjust_link(struct net_device *dev)
 636{
 637	struct altera_tse_private *priv = netdev_priv(dev);
 638	struct phy_device *phydev = dev->phydev;
 639	int new_state = 0;
 640
 641	/* only change config if there is a link */
 642	spin_lock(&priv->mac_cfg_lock);
 643	if (phydev->link) {
 644		/* Read old config */
 645		u32 cfg_reg = ioread32(&priv->mac_dev->command_config);
 646
 647		/* Check duplex */
 648		if (phydev->duplex != priv->oldduplex) {
 649			new_state = 1;
 650			if (!(phydev->duplex))
 651				cfg_reg |= MAC_CMDCFG_HD_ENA;
 652			else
 653				cfg_reg &= ~MAC_CMDCFG_HD_ENA;
 654
 655			netdev_dbg(priv->dev, "%s: Link duplex = 0x%x\n",
 656				   dev->name, phydev->duplex);
 657
 658			priv->oldduplex = phydev->duplex;
 659		}
 660
 661		/* Check speed */
 662		if (phydev->speed != priv->oldspeed) {
 663			new_state = 1;
 664			switch (phydev->speed) {
 665			case 1000:
 666				cfg_reg |= MAC_CMDCFG_ETH_SPEED;
 667				cfg_reg &= ~MAC_CMDCFG_ENA_10;
 668				break;
 669			case 100:
 670				cfg_reg &= ~MAC_CMDCFG_ETH_SPEED;
 671				cfg_reg &= ~MAC_CMDCFG_ENA_10;
 672				break;
 673			case 10:
 674				cfg_reg &= ~MAC_CMDCFG_ETH_SPEED;
 675				cfg_reg |= MAC_CMDCFG_ENA_10;
 676				break;
 677			default:
 678				if (netif_msg_link(priv))
 679					netdev_warn(dev, "Speed (%d) is not 10/100/1000!\n",
 680						    phydev->speed);
 681				break;
 682			}
 683			priv->oldspeed = phydev->speed;
 684		}
 685		iowrite32(cfg_reg, &priv->mac_dev->command_config);
 686
 687		if (!priv->oldlink) {
 688			new_state = 1;
 689			priv->oldlink = 1;
 690		}
 691	} else if (priv->oldlink) {
 692		new_state = 1;
 693		priv->oldlink = 0;
 694		priv->oldspeed = 0;
 695		priv->oldduplex = -1;
 696	}
 697
 698	if (new_state && netif_msg_link(priv))
 699		phy_print_status(phydev);
 700
 701	spin_unlock(&priv->mac_cfg_lock);
 702}
 703static struct phy_device *connect_local_phy(struct net_device *dev)
 704{
 705	struct altera_tse_private *priv = netdev_priv(dev);
 706	struct phy_device *phydev = NULL;
 707	char phy_id_fmt[MII_BUS_ID_SIZE + 3];
 708
 709	if (priv->phy_addr != POLL_PHY) {
 710		snprintf(phy_id_fmt, MII_BUS_ID_SIZE + 3, PHY_ID_FMT,
 711			 priv->mdio->id, priv->phy_addr);
 712
 713		netdev_dbg(dev, "trying to attach to %s\n", phy_id_fmt);
 714
 715		phydev = phy_connect(dev, phy_id_fmt, &altera_tse_adjust_link,
 716				     priv->phy_iface);
 717		if (IS_ERR(phydev))
 718			netdev_err(dev, "Could not attach to PHY\n");
 
 
 719
 720	} else {
 721		int ret;
 722		phydev = phy_find_first(priv->mdio);
 723		if (phydev == NULL) {
 724			netdev_err(dev, "No PHY found\n");
 725			return phydev;
 726		}
 727
 728		ret = phy_connect_direct(dev, phydev, &altera_tse_adjust_link,
 729				priv->phy_iface);
 730		if (ret != 0) {
 731			netdev_err(dev, "Could not attach to PHY\n");
 732			phydev = NULL;
 733		}
 734	}
 735	return phydev;
 736}
 737
 738static int altera_tse_phy_get_addr_mdio_create(struct net_device *dev)
 739{
 740	struct altera_tse_private *priv = netdev_priv(dev);
 741	struct device_node *np = priv->device->of_node;
 742	int ret = 0;
 743
 744	priv->phy_iface = of_get_phy_mode(np);
 745
 746	/* Avoid get phy addr and create mdio if no phy is present */
 747	if (!priv->phy_iface)
 748		return 0;
 749
 750	/* try to get PHY address from device tree, use PHY autodetection if
 751	 * no valid address is given
 752	 */
 753
 754	if (of_property_read_u32(priv->device->of_node, "phy-addr",
 755			 &priv->phy_addr)) {
 756		priv->phy_addr = POLL_PHY;
 757	}
 758
 759	if (!((priv->phy_addr == POLL_PHY) ||
 760		  ((priv->phy_addr >= 0) && (priv->phy_addr < PHY_MAX_ADDR)))) {
 761		netdev_err(dev, "invalid phy-addr specified %d\n",
 762			priv->phy_addr);
 763		return -ENODEV;
 764	}
 765
 766	/* Create/attach to MDIO bus */
 767	ret = altera_tse_mdio_create(dev,
 768					 atomic_add_return(1, &instance_count));
 769
 770	if (ret)
 771		return -ENODEV;
 772
 773	return 0;
 774}
 775
 776/* Initialize driver's PHY state, and attach to the PHY
 777 */
 778static int init_phy(struct net_device *dev)
 779{
 780	struct altera_tse_private *priv = netdev_priv(dev);
 781	struct phy_device *phydev;
 782	struct device_node *phynode;
 783	bool fixed_link = false;
 784	int rc = 0;
 785
 786	/* Avoid init phy in case of no phy present */
 787	if (!priv->phy_iface)
 788		return 0;
 789
 790	priv->oldlink = 0;
 791	priv->oldspeed = 0;
 792	priv->oldduplex = -1;
 793
 794	phynode = of_parse_phandle(priv->device->of_node, "phy-handle", 0);
 795
 796	if (!phynode) {
 797		/* check if a fixed-link is defined in device-tree */
 798		if (of_phy_is_fixed_link(priv->device->of_node)) {
 799			rc = of_phy_register_fixed_link(priv->device->of_node);
 800			if (rc < 0) {
 801				netdev_err(dev, "cannot register fixed PHY\n");
 802				return rc;
 803			}
 804
 805			/* In the case of a fixed PHY, the DT node associated
 806			 * to the PHY is the Ethernet MAC DT node.
 807			 */
 808			phynode = of_node_get(priv->device->of_node);
 809			fixed_link = true;
 810
 811			netdev_dbg(dev, "fixed-link detected\n");
 812			phydev = of_phy_connect(dev, phynode,
 813						&altera_tse_adjust_link,
 814						0, priv->phy_iface);
 815		} else {
 816			netdev_dbg(dev, "no phy-handle found\n");
 817			if (!priv->mdio) {
 818				netdev_err(dev, "No phy-handle nor local mdio specified\n");
 819				return -ENODEV;
 820			}
 821			phydev = connect_local_phy(dev);
 822		}
 823	} else {
 824		netdev_dbg(dev, "phy-handle found\n");
 825		phydev = of_phy_connect(dev, phynode,
 826			&altera_tse_adjust_link, 0, priv->phy_iface);
 827	}
 828	of_node_put(phynode);
 829
 830	if (!phydev) {
 831		netdev_err(dev, "Could not find the PHY\n");
 832		if (fixed_link)
 833			of_phy_deregister_fixed_link(priv->device->of_node);
 834		return -ENODEV;
 835	}
 836
 837	/* Stop Advertising 1000BASE Capability if interface is not GMII
 838	 * Note: Checkpatch throws CHECKs for the camel case defines below,
 839	 * it's ok to ignore.
 840	 */
 841	if ((priv->phy_iface == PHY_INTERFACE_MODE_MII) ||
 842	    (priv->phy_iface == PHY_INTERFACE_MODE_RMII))
 843		phydev->advertising &= ~(SUPPORTED_1000baseT_Half |
 844					 SUPPORTED_1000baseT_Full);
 845
 846	/* Broken HW is sometimes missing the pull-up resistor on the
 847	 * MDIO line, which results in reads to non-existent devices returning
 848	 * 0 rather than 0xffff. Catch this here and treat 0 as a non-existent
 849	 * device as well. If a fixed-link is used the phy_id is always 0.
 850	 * Note: phydev->phy_id is the result of reading the UID PHY registers.
 851	 */
 852	if ((phydev->phy_id == 0) && !fixed_link) {
 853		netdev_err(dev, "Bad PHY UID 0x%08x\n", phydev->phy_id);
 854		phy_disconnect(phydev);
 855		return -ENODEV;
 856	}
 857
 858	netdev_dbg(dev, "attached to PHY %d UID 0x%08x Link = %d\n",
 859		   phydev->mdio.addr, phydev->phy_id, phydev->link);
 860
 861	return 0;
 862}
 863
 864static void tse_update_mac_addr(struct altera_tse_private *priv, u8 *addr)
 865{
 866	u32 msb;
 867	u32 lsb;
 868
 869	msb = (addr[3] << 24) | (addr[2] << 16) | (addr[1] << 8) | addr[0];
 870	lsb = ((addr[5] << 8) | addr[4]) & 0xffff;
 871
 872	/* Set primary MAC address */
 873	csrwr32(msb, priv->mac_dev, tse_csroffs(mac_addr_0));
 874	csrwr32(lsb, priv->mac_dev, tse_csroffs(mac_addr_1));
 875}
 876
 877/* MAC software reset.
 878 * When reset is triggered, the MAC function completes the current
 879 * transmission or reception, and subsequently disables the transmit and
 880 * receive logic, flushes the receive FIFO buffer, and resets the statistics
 881 * counters.
 882 */
 883static int reset_mac(struct altera_tse_private *priv)
 884{
 885	int counter;
 886	u32 dat;
 887
 888	dat = csrrd32(priv->mac_dev, tse_csroffs(command_config));
 889	dat &= ~(MAC_CMDCFG_TX_ENA | MAC_CMDCFG_RX_ENA);
 890	dat |= MAC_CMDCFG_SW_RESET | MAC_CMDCFG_CNT_RESET;
 891	csrwr32(dat, priv->mac_dev, tse_csroffs(command_config));
 892
 893	counter = 0;
 894	while (counter++ < ALTERA_TSE_SW_RESET_WATCHDOG_CNTR) {
 895		if (tse_bit_is_clear(priv->mac_dev, tse_csroffs(command_config),
 896				     MAC_CMDCFG_SW_RESET))
 897			break;
 898		udelay(1);
 899	}
 900
 901	if (counter >= ALTERA_TSE_SW_RESET_WATCHDOG_CNTR) {
 902		dat = csrrd32(priv->mac_dev, tse_csroffs(command_config));
 903		dat &= ~MAC_CMDCFG_SW_RESET;
 904		csrwr32(dat, priv->mac_dev, tse_csroffs(command_config));
 905		return -1;
 906	}
 907	return 0;
 908}
 909
 910/* Initialize MAC core registers
 911*/
 912static int init_mac(struct altera_tse_private *priv)
 913{
 914	unsigned int cmd = 0;
 915	u32 frm_length;
 916
 917	/* Setup Rx FIFO */
 918	csrwr32(priv->rx_fifo_depth - ALTERA_TSE_RX_SECTION_EMPTY,
 919		priv->mac_dev, tse_csroffs(rx_section_empty));
 920
 921	csrwr32(ALTERA_TSE_RX_SECTION_FULL, priv->mac_dev,
 922		tse_csroffs(rx_section_full));
 923
 924	csrwr32(ALTERA_TSE_RX_ALMOST_EMPTY, priv->mac_dev,
 925		tse_csroffs(rx_almost_empty));
 926
 927	csrwr32(ALTERA_TSE_RX_ALMOST_FULL, priv->mac_dev,
 928		tse_csroffs(rx_almost_full));
 929
 930	/* Setup Tx FIFO */
 931	csrwr32(priv->tx_fifo_depth - ALTERA_TSE_TX_SECTION_EMPTY,
 932		priv->mac_dev, tse_csroffs(tx_section_empty));
 933
 934	csrwr32(ALTERA_TSE_TX_SECTION_FULL, priv->mac_dev,
 935		tse_csroffs(tx_section_full));
 936
 937	csrwr32(ALTERA_TSE_TX_ALMOST_EMPTY, priv->mac_dev,
 938		tse_csroffs(tx_almost_empty));
 939
 940	csrwr32(ALTERA_TSE_TX_ALMOST_FULL, priv->mac_dev,
 941		tse_csroffs(tx_almost_full));
 942
 943	/* MAC Address Configuration */
 944	tse_update_mac_addr(priv, priv->dev->dev_addr);
 945
 946	/* MAC Function Configuration */
 947	frm_length = ETH_HLEN + priv->dev->mtu + ETH_FCS_LEN;
 948	csrwr32(frm_length, priv->mac_dev, tse_csroffs(frm_length));
 949
 950	csrwr32(ALTERA_TSE_TX_IPG_LENGTH, priv->mac_dev,
 951		tse_csroffs(tx_ipg_length));
 952
 953	/* Disable RX/TX shift 16 for alignment of all received frames on 16-bit
 954	 * start address
 955	 */
 956	tse_set_bit(priv->mac_dev, tse_csroffs(rx_cmd_stat),
 957		    ALTERA_TSE_RX_CMD_STAT_RX_SHIFT16);
 958
 959	tse_clear_bit(priv->mac_dev, tse_csroffs(tx_cmd_stat),
 960		      ALTERA_TSE_TX_CMD_STAT_TX_SHIFT16 |
 961		      ALTERA_TSE_TX_CMD_STAT_OMIT_CRC);
 962
 963	/* Set the MAC options */
 964	cmd = csrrd32(priv->mac_dev, tse_csroffs(command_config));
 965	cmd &= ~MAC_CMDCFG_PAD_EN;	/* No padding Removal on Receive */
 966	cmd &= ~MAC_CMDCFG_CRC_FWD;	/* CRC Removal */
 967	cmd |= MAC_CMDCFG_RX_ERR_DISC;	/* Automatically discard frames
 968					 * with CRC errors
 969					 */
 970	cmd |= MAC_CMDCFG_CNTL_FRM_ENA;
 971	cmd &= ~MAC_CMDCFG_TX_ENA;
 972	cmd &= ~MAC_CMDCFG_RX_ENA;
 973
 974	/* Default speed and duplex setting, full/100 */
 975	cmd &= ~MAC_CMDCFG_HD_ENA;
 976	cmd &= ~MAC_CMDCFG_ETH_SPEED;
 977	cmd &= ~MAC_CMDCFG_ENA_10;
 978
 979	csrwr32(cmd, priv->mac_dev, tse_csroffs(command_config));
 980
 981	csrwr32(ALTERA_TSE_PAUSE_QUANTA, priv->mac_dev,
 982		tse_csroffs(pause_quanta));
 983
 984	if (netif_msg_hw(priv))
 985		dev_dbg(priv->device,
 986			"MAC post-initialization: CMD_CONFIG = 0x%08x\n", cmd);
 987
 988	return 0;
 989}
 990
 991/* Start/stop MAC transmission logic
 992 */
 993static void tse_set_mac(struct altera_tse_private *priv, bool enable)
 994{
 995	u32 value = csrrd32(priv->mac_dev, tse_csroffs(command_config));
 996
 997	if (enable)
 998		value |= MAC_CMDCFG_TX_ENA | MAC_CMDCFG_RX_ENA;
 999	else
1000		value &= ~(MAC_CMDCFG_TX_ENA | MAC_CMDCFG_RX_ENA);
1001
1002	csrwr32(value, priv->mac_dev, tse_csroffs(command_config));
1003}
1004
1005/* Change the MTU
1006 */
1007static int tse_change_mtu(struct net_device *dev, int new_mtu)
1008{
1009	if (netif_running(dev)) {
1010		netdev_err(dev, "must be stopped to change its MTU\n");
1011		return -EBUSY;
1012	}
1013
1014	dev->mtu = new_mtu;
1015	netdev_update_features(dev);
1016
1017	return 0;
1018}
1019
1020static void altera_tse_set_mcfilter(struct net_device *dev)
1021{
1022	struct altera_tse_private *priv = netdev_priv(dev);
1023	int i;
1024	struct netdev_hw_addr *ha;
1025
1026	/* clear the hash filter */
1027	for (i = 0; i < 64; i++)
1028		csrwr32(0, priv->mac_dev, tse_csroffs(hash_table) + i * 4);
1029
1030	netdev_for_each_mc_addr(ha, dev) {
1031		unsigned int hash = 0;
1032		int mac_octet;
1033
1034		for (mac_octet = 5; mac_octet >= 0; mac_octet--) {
1035			unsigned char xor_bit = 0;
1036			unsigned char octet = ha->addr[mac_octet];
1037			unsigned int bitshift;
1038
1039			for (bitshift = 0; bitshift < 8; bitshift++)
1040				xor_bit ^= ((octet >> bitshift) & 0x01);
1041
1042			hash = (hash << 1) | xor_bit;
1043		}
1044		csrwr32(1, priv->mac_dev, tse_csroffs(hash_table) + hash * 4);
1045	}
1046}
1047
1048
1049static void altera_tse_set_mcfilterall(struct net_device *dev)
1050{
1051	struct altera_tse_private *priv = netdev_priv(dev);
1052	int i;
1053
1054	/* set the hash filter */
1055	for (i = 0; i < 64; i++)
1056		csrwr32(1, priv->mac_dev, tse_csroffs(hash_table) + i * 4);
1057}
1058
1059/* Set or clear the multicast filter for this adaptor
1060 */
1061static void tse_set_rx_mode_hashfilter(struct net_device *dev)
1062{
1063	struct altera_tse_private *priv = netdev_priv(dev);
1064
1065	spin_lock(&priv->mac_cfg_lock);
1066
1067	if (dev->flags & IFF_PROMISC)
1068		tse_set_bit(priv->mac_dev, tse_csroffs(command_config),
1069			    MAC_CMDCFG_PROMIS_EN);
1070
1071	if (dev->flags & IFF_ALLMULTI)
1072		altera_tse_set_mcfilterall(dev);
1073	else
1074		altera_tse_set_mcfilter(dev);
1075
1076	spin_unlock(&priv->mac_cfg_lock);
1077}
1078
1079/* Set or clear the multicast filter for this adaptor
1080 */
1081static void tse_set_rx_mode(struct net_device *dev)
1082{
1083	struct altera_tse_private *priv = netdev_priv(dev);
1084
1085	spin_lock(&priv->mac_cfg_lock);
1086
1087	if ((dev->flags & IFF_PROMISC) || (dev->flags & IFF_ALLMULTI) ||
1088	    !netdev_mc_empty(dev) || !netdev_uc_empty(dev))
1089		tse_set_bit(priv->mac_dev, tse_csroffs(command_config),
1090			    MAC_CMDCFG_PROMIS_EN);
1091	else
1092		tse_clear_bit(priv->mac_dev, tse_csroffs(command_config),
1093			      MAC_CMDCFG_PROMIS_EN);
1094
1095	spin_unlock(&priv->mac_cfg_lock);
1096}
1097
1098/* Initialise (if necessary) the SGMII PCS component
1099 */
1100static int init_sgmii_pcs(struct net_device *dev)
1101{
1102	struct altera_tse_private *priv = netdev_priv(dev);
1103	int n;
1104	unsigned int tmp_reg = 0;
1105
1106	if (priv->phy_iface != PHY_INTERFACE_MODE_SGMII)
1107		return 0; /* Nothing to do, not in SGMII mode */
1108
1109	/* The TSE SGMII PCS block looks a little like a PHY, it is
1110	 * mapped into the zeroth MDIO space of the MAC and it has
1111	 * ID registers like a PHY would.  Sadly this is often
1112	 * configured to zeroes, so don't be surprised if it does
1113	 * show 0x00000000.
1114	 */
1115
1116	if (sgmii_pcs_scratch_test(priv, 0x0000) &&
1117		sgmii_pcs_scratch_test(priv, 0xffff) &&
1118		sgmii_pcs_scratch_test(priv, 0xa5a5) &&
1119		sgmii_pcs_scratch_test(priv, 0x5a5a)) {
1120		netdev_info(dev, "PCS PHY ID: 0x%04x%04x\n",
1121				sgmii_pcs_read(priv, MII_PHYSID1),
1122				sgmii_pcs_read(priv, MII_PHYSID2));
1123	} else {
1124		netdev_err(dev, "SGMII PCS Scratch memory test failed.\n");
1125		return -ENOMEM;
1126	}
1127
1128	/* Starting on page 5-29 of the MegaCore Function User Guide
1129	 * Set SGMII Link timer to 1.6ms
1130	 */
1131	sgmii_pcs_write(priv, SGMII_PCS_LINK_TIMER_0, 0x0D40);
1132	sgmii_pcs_write(priv, SGMII_PCS_LINK_TIMER_1, 0x03);
1133
1134	/* Enable SGMII Interface and Enable SGMII Auto Negotiation */
1135	sgmii_pcs_write(priv, SGMII_PCS_IF_MODE, 0x3);
1136
1137	/* Enable Autonegotiation */
1138	tmp_reg = sgmii_pcs_read(priv, MII_BMCR);
1139	tmp_reg |= (BMCR_SPEED1000 | BMCR_FULLDPLX | BMCR_ANENABLE);
1140	sgmii_pcs_write(priv, MII_BMCR, tmp_reg);
1141
1142	/* Reset PCS block */
1143	tmp_reg |= BMCR_RESET;
1144	sgmii_pcs_write(priv, MII_BMCR, tmp_reg);
1145	for (n = 0; n < SGMII_PCS_SW_RESET_TIMEOUT; n++) {
1146		if (!(sgmii_pcs_read(priv, MII_BMCR) & BMCR_RESET)) {
1147			netdev_info(dev, "SGMII PCS block initialised OK\n");
1148			return 0;
1149		}
1150		udelay(1);
1151	}
1152
1153	/* We failed to reset the block, return a timeout */
1154	netdev_err(dev, "SGMII PCS block reset failed.\n");
1155	return -ETIMEDOUT;
1156}
1157
1158/* Open and initialize the interface
1159 */
1160static int tse_open(struct net_device *dev)
1161{
1162	struct altera_tse_private *priv = netdev_priv(dev);
1163	int ret = 0;
1164	int i;
1165	unsigned long int flags;
1166
1167	/* Reset and configure TSE MAC and probe associated PHY */
1168	ret = priv->dmaops->init_dma(priv);
1169	if (ret != 0) {
1170		netdev_err(dev, "Cannot initialize DMA\n");
1171		goto phy_error;
1172	}
1173
1174	if (netif_msg_ifup(priv))
1175		netdev_warn(dev, "device MAC address %pM\n",
1176			    dev->dev_addr);
1177
1178	if ((priv->revision < 0xd00) || (priv->revision > 0xe00))
1179		netdev_warn(dev, "TSE revision %x\n", priv->revision);
1180
1181	spin_lock(&priv->mac_cfg_lock);
1182	/* no-op if MAC not operating in SGMII mode*/
1183	ret = init_sgmii_pcs(dev);
1184	if (ret) {
1185		netdev_err(dev,
1186			   "Cannot init the SGMII PCS (error: %d)\n", ret);
1187		spin_unlock(&priv->mac_cfg_lock);
1188		goto phy_error;
1189	}
1190
1191	ret = reset_mac(priv);
1192	/* Note that reset_mac will fail if the clocks are gated by the PHY
1193	 * due to the PHY being put into isolation or power down mode.
1194	 * This is not an error if reset fails due to no clock.
1195	 */
1196	if (ret)
1197		netdev_dbg(dev, "Cannot reset MAC core (error: %d)\n", ret);
1198
1199	ret = init_mac(priv);
1200	spin_unlock(&priv->mac_cfg_lock);
1201	if (ret) {
1202		netdev_err(dev, "Cannot init MAC core (error: %d)\n", ret);
1203		goto alloc_skbuf_error;
1204	}
1205
1206	priv->dmaops->reset_dma(priv);
1207
1208	/* Create and initialize the TX/RX descriptors chains. */
1209	priv->rx_ring_size = dma_rx_num;
1210	priv->tx_ring_size = dma_tx_num;
1211	ret = alloc_init_skbufs(priv);
1212	if (ret) {
1213		netdev_err(dev, "DMA descriptors initialization failed\n");
1214		goto alloc_skbuf_error;
1215	}
1216
1217
1218	/* Register RX interrupt */
1219	ret = request_irq(priv->rx_irq, altera_isr, IRQF_SHARED,
1220			  dev->name, dev);
1221	if (ret) {
1222		netdev_err(dev, "Unable to register RX interrupt %d\n",
1223			   priv->rx_irq);
1224		goto init_error;
1225	}
1226
1227	/* Register TX interrupt */
1228	ret = request_irq(priv->tx_irq, altera_isr, IRQF_SHARED,
1229			  dev->name, dev);
1230	if (ret) {
1231		netdev_err(dev, "Unable to register TX interrupt %d\n",
1232			   priv->tx_irq);
1233		goto tx_request_irq_error;
1234	}
1235
1236	/* Enable DMA interrupts */
1237	spin_lock_irqsave(&priv->rxdma_irq_lock, flags);
1238	priv->dmaops->enable_rxirq(priv);
1239	priv->dmaops->enable_txirq(priv);
1240
1241	/* Setup RX descriptor chain */
1242	for (i = 0; i < priv->rx_ring_size; i++)
1243		priv->dmaops->add_rx_desc(priv, &priv->rx_ring[i]);
1244
1245	spin_unlock_irqrestore(&priv->rxdma_irq_lock, flags);
1246
1247	if (dev->phydev)
1248		phy_start(dev->phydev);
1249
1250	napi_enable(&priv->napi);
1251	netif_start_queue(dev);
1252
1253	priv->dmaops->start_rxdma(priv);
1254
1255	/* Start MAC Rx/Tx */
1256	spin_lock(&priv->mac_cfg_lock);
1257	tse_set_mac(priv, true);
1258	spin_unlock(&priv->mac_cfg_lock);
1259
1260	return 0;
1261
1262tx_request_irq_error:
1263	free_irq(priv->rx_irq, dev);
1264init_error:
1265	free_skbufs(dev);
1266alloc_skbuf_error:
1267phy_error:
1268	return ret;
1269}
1270
1271/* Stop TSE MAC interface and put the device in an inactive state
1272 */
1273static int tse_shutdown(struct net_device *dev)
1274{
1275	struct altera_tse_private *priv = netdev_priv(dev);
1276	int ret;
1277	unsigned long int flags;
1278
1279	/* Stop the PHY */
1280	if (dev->phydev)
1281		phy_stop(dev->phydev);
1282
1283	netif_stop_queue(dev);
1284	napi_disable(&priv->napi);
1285
1286	/* Disable DMA interrupts */
1287	spin_lock_irqsave(&priv->rxdma_irq_lock, flags);
1288	priv->dmaops->disable_rxirq(priv);
1289	priv->dmaops->disable_txirq(priv);
1290	spin_unlock_irqrestore(&priv->rxdma_irq_lock, flags);
1291
1292	/* Free the IRQ lines */
1293	free_irq(priv->rx_irq, dev);
1294	free_irq(priv->tx_irq, dev);
1295
1296	/* disable and reset the MAC, empties fifo */
1297	spin_lock(&priv->mac_cfg_lock);
1298	spin_lock(&priv->tx_lock);
1299
1300	ret = reset_mac(priv);
1301	/* Note that reset_mac will fail if the clocks are gated by the PHY
1302	 * due to the PHY being put into isolation or power down mode.
1303	 * This is not an error if reset fails due to no clock.
1304	 */
1305	if (ret)
1306		netdev_dbg(dev, "Cannot reset MAC core (error: %d)\n", ret);
1307	priv->dmaops->reset_dma(priv);
1308	free_skbufs(dev);
1309
1310	spin_unlock(&priv->tx_lock);
1311	spin_unlock(&priv->mac_cfg_lock);
1312
1313	priv->dmaops->uninit_dma(priv);
1314
1315	return 0;
1316}
1317
1318static struct net_device_ops altera_tse_netdev_ops = {
1319	.ndo_open		= tse_open,
1320	.ndo_stop		= tse_shutdown,
1321	.ndo_start_xmit		= tse_start_xmit,
1322	.ndo_set_mac_address	= eth_mac_addr,
1323	.ndo_set_rx_mode	= tse_set_rx_mode,
1324	.ndo_change_mtu		= tse_change_mtu,
1325	.ndo_validate_addr	= eth_validate_addr,
1326};
1327
1328static int request_and_map(struct platform_device *pdev, const char *name,
1329			   struct resource **res, void __iomem **ptr)
1330{
1331	struct resource *region;
1332	struct device *device = &pdev->dev;
1333
1334	*res = platform_get_resource_byname(pdev, IORESOURCE_MEM, name);
1335	if (*res == NULL) {
1336		dev_err(device, "resource %s not defined\n", name);
1337		return -ENODEV;
1338	}
1339
1340	region = devm_request_mem_region(device, (*res)->start,
1341					 resource_size(*res), dev_name(device));
1342	if (region == NULL) {
1343		dev_err(device, "unable to request %s\n", name);
1344		return -EBUSY;
1345	}
1346
1347	*ptr = devm_ioremap_nocache(device, region->start,
1348				    resource_size(region));
1349	if (*ptr == NULL) {
1350		dev_err(device, "ioremap_nocache of %s failed!", name);
1351		return -ENOMEM;
1352	}
1353
1354	return 0;
1355}
1356
1357/* Probe Altera TSE MAC device
1358 */
1359static int altera_tse_probe(struct platform_device *pdev)
1360{
1361	struct net_device *ndev;
1362	int ret = -ENODEV;
1363	struct resource *control_port;
1364	struct resource *dma_res;
1365	struct altera_tse_private *priv;
1366	const unsigned char *macaddr;
1367	void __iomem *descmap;
1368	const struct of_device_id *of_id = NULL;
1369
1370	ndev = alloc_etherdev(sizeof(struct altera_tse_private));
1371	if (!ndev) {
1372		dev_err(&pdev->dev, "Could not allocate network device\n");
1373		return -ENODEV;
1374	}
1375
1376	SET_NETDEV_DEV(ndev, &pdev->dev);
1377
1378	priv = netdev_priv(ndev);
1379	priv->device = &pdev->dev;
1380	priv->dev = ndev;
1381	priv->msg_enable = netif_msg_init(debug, default_msg_level);
1382
1383	of_id = of_match_device(altera_tse_ids, &pdev->dev);
1384
1385	if (of_id)
1386		priv->dmaops = (struct altera_dmaops *)of_id->data;
1387
1388
1389	if (priv->dmaops &&
1390	    priv->dmaops->altera_dtype == ALTERA_DTYPE_SGDMA) {
1391		/* Get the mapped address to the SGDMA descriptor memory */
1392		ret = request_and_map(pdev, "s1", &dma_res, &descmap);
1393		if (ret)
1394			goto err_free_netdev;
1395
1396		/* Start of that memory is for transmit descriptors */
1397		priv->tx_dma_desc = descmap;
1398
1399		/* First half is for tx descriptors, other half for tx */
1400		priv->txdescmem = resource_size(dma_res)/2;
1401
1402		priv->txdescmem_busaddr = (dma_addr_t)dma_res->start;
1403
1404		priv->rx_dma_desc = (void __iomem *)((uintptr_t)(descmap +
1405						     priv->txdescmem));
1406		priv->rxdescmem = resource_size(dma_res)/2;
1407		priv->rxdescmem_busaddr = dma_res->start;
1408		priv->rxdescmem_busaddr += priv->txdescmem;
1409
1410		if (upper_32_bits(priv->rxdescmem_busaddr)) {
1411			dev_dbg(priv->device,
1412				"SGDMA bus addresses greater than 32-bits\n");
1413			ret = -EINVAL;
1414			goto err_free_netdev;
1415		}
1416		if (upper_32_bits(priv->txdescmem_busaddr)) {
1417			dev_dbg(priv->device,
1418				"SGDMA bus addresses greater than 32-bits\n");
1419			ret = -EINVAL;
1420			goto err_free_netdev;
1421		}
1422	} else if (priv->dmaops &&
1423		   priv->dmaops->altera_dtype == ALTERA_DTYPE_MSGDMA) {
1424		ret = request_and_map(pdev, "rx_resp", &dma_res,
1425				      &priv->rx_dma_resp);
1426		if (ret)
1427			goto err_free_netdev;
1428
1429		ret = request_and_map(pdev, "tx_desc", &dma_res,
1430				      &priv->tx_dma_desc);
1431		if (ret)
1432			goto err_free_netdev;
1433
1434		priv->txdescmem = resource_size(dma_res);
1435		priv->txdescmem_busaddr = dma_res->start;
1436
1437		ret = request_and_map(pdev, "rx_desc", &dma_res,
1438				      &priv->rx_dma_desc);
1439		if (ret)
1440			goto err_free_netdev;
1441
1442		priv->rxdescmem = resource_size(dma_res);
1443		priv->rxdescmem_busaddr = dma_res->start;
1444
1445	} else {
1446		goto err_free_netdev;
1447	}
1448
1449	if (!dma_set_mask(priv->device, DMA_BIT_MASK(priv->dmaops->dmamask)))
1450		dma_set_coherent_mask(priv->device,
1451				      DMA_BIT_MASK(priv->dmaops->dmamask));
1452	else if (!dma_set_mask(priv->device, DMA_BIT_MASK(32)))
1453		dma_set_coherent_mask(priv->device, DMA_BIT_MASK(32));
1454	else
1455		goto err_free_netdev;
1456
1457	/* MAC address space */
1458	ret = request_and_map(pdev, "control_port", &control_port,
1459			      (void __iomem **)&priv->mac_dev);
1460	if (ret)
1461		goto err_free_netdev;
1462
1463	/* xSGDMA Rx Dispatcher address space */
1464	ret = request_and_map(pdev, "rx_csr", &dma_res,
1465			      &priv->rx_dma_csr);
1466	if (ret)
1467		goto err_free_netdev;
1468
1469
1470	/* xSGDMA Tx Dispatcher address space */
1471	ret = request_and_map(pdev, "tx_csr", &dma_res,
1472			      &priv->tx_dma_csr);
1473	if (ret)
1474		goto err_free_netdev;
1475
1476
1477	/* Rx IRQ */
1478	priv->rx_irq = platform_get_irq_byname(pdev, "rx_irq");
1479	if (priv->rx_irq == -ENXIO) {
1480		dev_err(&pdev->dev, "cannot obtain Rx IRQ\n");
1481		ret = -ENXIO;
1482		goto err_free_netdev;
1483	}
1484
1485	/* Tx IRQ */
1486	priv->tx_irq = platform_get_irq_byname(pdev, "tx_irq");
1487	if (priv->tx_irq == -ENXIO) {
1488		dev_err(&pdev->dev, "cannot obtain Tx IRQ\n");
1489		ret = -ENXIO;
1490		goto err_free_netdev;
1491	}
1492
1493	/* get FIFO depths from device tree */
1494	if (of_property_read_u32(pdev->dev.of_node, "rx-fifo-depth",
1495				 &priv->rx_fifo_depth)) {
1496		dev_err(&pdev->dev, "cannot obtain rx-fifo-depth\n");
1497		ret = -ENXIO;
1498		goto err_free_netdev;
1499	}
1500
1501	if (of_property_read_u32(pdev->dev.of_node, "tx-fifo-depth",
1502				 &priv->tx_fifo_depth)) {
1503		dev_err(&pdev->dev, "cannot obtain tx-fifo-depth\n");
1504		ret = -ENXIO;
1505		goto err_free_netdev;
1506	}
1507
1508	/* get hash filter settings for this instance */
1509	priv->hash_filter =
1510		of_property_read_bool(pdev->dev.of_node,
1511				      "altr,has-hash-multicast-filter");
1512
1513	/* Set hash filter to not set for now until the
1514	 * multicast filter receive issue is debugged
1515	 */
1516	priv->hash_filter = 0;
1517
1518	/* get supplemental address settings for this instance */
1519	priv->added_unicast =
1520		of_property_read_bool(pdev->dev.of_node,
1521				      "altr,has-supplementary-unicast");
1522
1523	priv->dev->min_mtu = ETH_ZLEN + ETH_FCS_LEN;
1524	/* Max MTU is 1500, ETH_DATA_LEN */
1525	priv->dev->max_mtu = ETH_DATA_LEN;
1526
1527	/* Get the max mtu from the device tree. Note that the
1528	 * "max-frame-size" parameter is actually max mtu. Definition
1529	 * in the ePAPR v1.1 spec and usage differ, so go with usage.
1530	 */
1531	of_property_read_u32(pdev->dev.of_node, "max-frame-size",
1532			     &priv->dev->max_mtu);
1533
1534	/* The DMA buffer size already accounts for an alignment bias
1535	 * to avoid unaligned access exceptions for the NIOS processor,
1536	 */
1537	priv->rx_dma_buf_sz = ALTERA_RXDMABUFFER_SIZE;
1538
1539	/* get default MAC address from device tree */
1540	macaddr = of_get_mac_address(pdev->dev.of_node);
1541	if (macaddr)
1542		ether_addr_copy(ndev->dev_addr, macaddr);
1543	else
1544		eth_hw_addr_random(ndev);
1545
1546	/* get phy addr and create mdio */
1547	ret = altera_tse_phy_get_addr_mdio_create(ndev);
1548
1549	if (ret)
1550		goto err_free_netdev;
1551
1552	/* initialize netdev */
1553	ndev->mem_start = control_port->start;
1554	ndev->mem_end = control_port->end;
1555	ndev->netdev_ops = &altera_tse_netdev_ops;
1556	altera_tse_set_ethtool_ops(ndev);
1557
1558	altera_tse_netdev_ops.ndo_set_rx_mode = tse_set_rx_mode;
1559
1560	if (priv->hash_filter)
1561		altera_tse_netdev_ops.ndo_set_rx_mode =
1562			tse_set_rx_mode_hashfilter;
1563
1564	/* Scatter/gather IO is not supported,
1565	 * so it is turned off
1566	 */
1567	ndev->hw_features &= ~NETIF_F_SG;
1568	ndev->features |= ndev->hw_features | NETIF_F_HIGHDMA;
1569
1570	/* VLAN offloading of tagging, stripping and filtering is not
1571	 * supported by hardware, but driver will accommodate the
1572	 * extra 4-byte VLAN tag for processing by upper layers
1573	 */
1574	ndev->features |= NETIF_F_HW_VLAN_CTAG_RX;
1575
1576	/* setup NAPI interface */
1577	netif_napi_add(ndev, &priv->napi, tse_poll, NAPI_POLL_WEIGHT);
1578
1579	spin_lock_init(&priv->mac_cfg_lock);
1580	spin_lock_init(&priv->tx_lock);
1581	spin_lock_init(&priv->rxdma_irq_lock);
1582
1583	netif_carrier_off(ndev);
1584	ret = register_netdev(ndev);
1585	if (ret) {
1586		dev_err(&pdev->dev, "failed to register TSE net device\n");
1587		goto err_register_netdev;
1588	}
1589
1590	platform_set_drvdata(pdev, ndev);
1591
1592	priv->revision = ioread32(&priv->mac_dev->megacore_revision);
1593
1594	if (netif_msg_probe(priv))
1595		dev_info(&pdev->dev, "Altera TSE MAC version %d.%d at 0x%08lx irq %d/%d\n",
1596			 (priv->revision >> 8) & 0xff,
1597			 priv->revision & 0xff,
1598			 (unsigned long) control_port->start, priv->rx_irq,
1599			 priv->tx_irq);
1600
1601	ret = init_phy(ndev);
1602	if (ret != 0) {
1603		netdev_err(ndev, "Cannot attach to PHY (error: %d)\n", ret);
1604		goto err_init_phy;
1605	}
1606	return 0;
1607
1608err_init_phy:
1609	unregister_netdev(ndev);
1610err_register_netdev:
1611	netif_napi_del(&priv->napi);
1612	altera_tse_mdio_destroy(ndev);
1613err_free_netdev:
1614	free_netdev(ndev);
1615	return ret;
1616}
1617
1618/* Remove Altera TSE MAC device
1619 */
1620static int altera_tse_remove(struct platform_device *pdev)
1621{
1622	struct net_device *ndev = platform_get_drvdata(pdev);
1623	struct altera_tse_private *priv = netdev_priv(ndev);
1624
1625	if (ndev->phydev) {
1626		phy_disconnect(ndev->phydev);
1627
1628		if (of_phy_is_fixed_link(priv->device->of_node))
1629			of_phy_deregister_fixed_link(priv->device->of_node);
1630	}
1631
1632	platform_set_drvdata(pdev, NULL);
1633	altera_tse_mdio_destroy(ndev);
1634	unregister_netdev(ndev);
1635	free_netdev(ndev);
1636
1637	return 0;
1638}
1639
1640static const struct altera_dmaops altera_dtype_sgdma = {
1641	.altera_dtype = ALTERA_DTYPE_SGDMA,
1642	.dmamask = 32,
1643	.reset_dma = sgdma_reset,
1644	.enable_txirq = sgdma_enable_txirq,
1645	.enable_rxirq = sgdma_enable_rxirq,
1646	.disable_txirq = sgdma_disable_txirq,
1647	.disable_rxirq = sgdma_disable_rxirq,
1648	.clear_txirq = sgdma_clear_txirq,
1649	.clear_rxirq = sgdma_clear_rxirq,
1650	.tx_buffer = sgdma_tx_buffer,
1651	.tx_completions = sgdma_tx_completions,
1652	.add_rx_desc = sgdma_add_rx_desc,
1653	.get_rx_status = sgdma_rx_status,
1654	.init_dma = sgdma_initialize,
1655	.uninit_dma = sgdma_uninitialize,
1656	.start_rxdma = sgdma_start_rxdma,
1657};
1658
1659static const struct altera_dmaops altera_dtype_msgdma = {
1660	.altera_dtype = ALTERA_DTYPE_MSGDMA,
1661	.dmamask = 64,
1662	.reset_dma = msgdma_reset,
1663	.enable_txirq = msgdma_enable_txirq,
1664	.enable_rxirq = msgdma_enable_rxirq,
1665	.disable_txirq = msgdma_disable_txirq,
1666	.disable_rxirq = msgdma_disable_rxirq,
1667	.clear_txirq = msgdma_clear_txirq,
1668	.clear_rxirq = msgdma_clear_rxirq,
1669	.tx_buffer = msgdma_tx_buffer,
1670	.tx_completions = msgdma_tx_completions,
1671	.add_rx_desc = msgdma_add_rx_desc,
1672	.get_rx_status = msgdma_rx_status,
1673	.init_dma = msgdma_initialize,
1674	.uninit_dma = msgdma_uninitialize,
1675	.start_rxdma = msgdma_start_rxdma,
1676};
1677
1678static const struct of_device_id altera_tse_ids[] = {
1679	{ .compatible = "altr,tse-msgdma-1.0", .data = &altera_dtype_msgdma, },
1680	{ .compatible = "altr,tse-1.0", .data = &altera_dtype_sgdma, },
1681	{ .compatible = "ALTR,tse-1.0", .data = &altera_dtype_sgdma, },
1682	{},
1683};
1684MODULE_DEVICE_TABLE(of, altera_tse_ids);
1685
1686static struct platform_driver altera_tse_driver = {
1687	.probe		= altera_tse_probe,
1688	.remove		= altera_tse_remove,
1689	.suspend	= NULL,
1690	.resume		= NULL,
1691	.driver		= {
1692		.name	= ALTERA_TSE_RESOURCE_NAME,
1693		.of_match_table = altera_tse_ids,
1694	},
1695};
1696
1697module_platform_driver(altera_tse_driver);
1698
1699MODULE_AUTHOR("Altera Corporation");
1700MODULE_DESCRIPTION("Altera Triple Speed Ethernet MAC driver");
1701MODULE_LICENSE("GPL v2");
v5.9
   1// SPDX-License-Identifier: GPL-2.0-only
   2/* Altera Triple-Speed Ethernet MAC driver
   3 * Copyright (C) 2008-2014 Altera Corporation. All rights reserved
   4 *
   5 * Contributors:
   6 *   Dalon Westergreen
   7 *   Thomas Chou
   8 *   Ian Abbott
   9 *   Yuriy Kozlov
  10 *   Tobias Klauser
  11 *   Andriy Smolskyy
  12 *   Roman Bulgakov
  13 *   Dmytro Mytarchuk
  14 *   Matthew Gerlach
  15 *
  16 * Original driver contributed by SLS.
  17 * Major updates contributed by GlobalLogic
 
 
 
 
 
 
 
 
 
 
 
 
  18 */
  19
  20#include <linux/atomic.h>
  21#include <linux/delay.h>
  22#include <linux/etherdevice.h>
  23#include <linux/if_vlan.h>
  24#include <linux/init.h>
  25#include <linux/interrupt.h>
  26#include <linux/io.h>
  27#include <linux/kernel.h>
  28#include <linux/module.h>
  29#include <linux/mii.h>
  30#include <linux/netdevice.h>
  31#include <linux/of_device.h>
  32#include <linux/of_mdio.h>
  33#include <linux/of_net.h>
  34#include <linux/of_platform.h>
  35#include <linux/phy.h>
  36#include <linux/platform_device.h>
  37#include <linux/skbuff.h>
  38#include <asm/cacheflush.h>
  39
  40#include "altera_utils.h"
  41#include "altera_tse.h"
  42#include "altera_sgdma.h"
  43#include "altera_msgdma.h"
  44
  45static atomic_t instance_count = ATOMIC_INIT(~0);
  46/* Module parameters */
  47static int debug = -1;
  48module_param(debug, int, 0644);
  49MODULE_PARM_DESC(debug, "Message Level (-1: default, 0: no output, 16: all)");
  50
  51static const u32 default_msg_level = (NETIF_MSG_DRV | NETIF_MSG_PROBE |
  52					NETIF_MSG_LINK | NETIF_MSG_IFUP |
  53					NETIF_MSG_IFDOWN);
  54
  55#define RX_DESCRIPTORS 64
  56static int dma_rx_num = RX_DESCRIPTORS;
  57module_param(dma_rx_num, int, 0644);
  58MODULE_PARM_DESC(dma_rx_num, "Number of descriptors in the RX list");
  59
  60#define TX_DESCRIPTORS 64
  61static int dma_tx_num = TX_DESCRIPTORS;
  62module_param(dma_tx_num, int, 0644);
  63MODULE_PARM_DESC(dma_tx_num, "Number of descriptors in the TX list");
  64
  65
  66#define POLL_PHY (-1)
  67
  68/* Make sure DMA buffer size is larger than the max frame size
  69 * plus some alignment offset and a VLAN header. If the max frame size is
  70 * 1518, a VLAN header would be additional 4 bytes and additional
  71 * headroom for alignment is 2 bytes, 2048 is just fine.
  72 */
  73#define ALTERA_RXDMABUFFER_SIZE	2048
  74
  75/* Allow network stack to resume queueing packets after we've
  76 * finished transmitting at least 1/4 of the packets in the queue.
  77 */
  78#define TSE_TX_THRESH(x)	(x->tx_ring_size / 4)
  79
  80#define TXQUEUESTOP_THRESHHOLD	2
  81
  82static const struct of_device_id altera_tse_ids[];
  83
  84static inline u32 tse_tx_avail(struct altera_tse_private *priv)
  85{
  86	return priv->tx_cons + priv->tx_ring_size - priv->tx_prod - 1;
  87}
  88
  89/* PCS Register read/write functions
  90 */
  91static u16 sgmii_pcs_read(struct altera_tse_private *priv, int regnum)
  92{
  93	return csrrd32(priv->mac_dev,
  94		       tse_csroffs(mdio_phy0) + regnum * 4) & 0xffff;
  95}
  96
  97static void sgmii_pcs_write(struct altera_tse_private *priv, int regnum,
  98				u16 value)
  99{
 100	csrwr32(value, priv->mac_dev, tse_csroffs(mdio_phy0) + regnum * 4);
 101}
 102
 103/* Check PCS scratch memory */
 104static int sgmii_pcs_scratch_test(struct altera_tse_private *priv, u16 value)
 105{
 106	sgmii_pcs_write(priv, SGMII_PCS_SCRATCH, value);
 107	return (sgmii_pcs_read(priv, SGMII_PCS_SCRATCH) == value);
 108}
 109
 110/* MDIO specific functions
 111 */
 112static int altera_tse_mdio_read(struct mii_bus *bus, int mii_id, int regnum)
 113{
 114	struct net_device *ndev = bus->priv;
 115	struct altera_tse_private *priv = netdev_priv(ndev);
 116
 117	/* set MDIO address */
 118	csrwr32((mii_id & 0x1f), priv->mac_dev,
 119		tse_csroffs(mdio_phy1_addr));
 120
 121	/* get the data */
 122	return csrrd32(priv->mac_dev,
 123		       tse_csroffs(mdio_phy1) + regnum * 4) & 0xffff;
 124}
 125
 126static int altera_tse_mdio_write(struct mii_bus *bus, int mii_id, int regnum,
 127				 u16 value)
 128{
 129	struct net_device *ndev = bus->priv;
 130	struct altera_tse_private *priv = netdev_priv(ndev);
 131
 132	/* set MDIO address */
 133	csrwr32((mii_id & 0x1f), priv->mac_dev,
 134		tse_csroffs(mdio_phy1_addr));
 135
 136	/* write the data */
 137	csrwr32(value, priv->mac_dev, tse_csroffs(mdio_phy1) + regnum * 4);
 138	return 0;
 139}
 140
 141static int altera_tse_mdio_create(struct net_device *dev, unsigned int id)
 142{
 143	struct altera_tse_private *priv = netdev_priv(dev);
 144	int ret;
 145	struct device_node *mdio_node = NULL;
 146	struct mii_bus *mdio = NULL;
 147	struct device_node *child_node = NULL;
 148
 149	for_each_child_of_node(priv->device->of_node, child_node) {
 150		if (of_device_is_compatible(child_node, "altr,tse-mdio")) {
 151			mdio_node = child_node;
 152			break;
 153		}
 154	}
 155
 156	if (mdio_node) {
 157		netdev_dbg(dev, "FOUND MDIO subnode\n");
 158	} else {
 159		netdev_dbg(dev, "NO MDIO subnode\n");
 160		return 0;
 161	}
 162
 163	mdio = mdiobus_alloc();
 164	if (mdio == NULL) {
 165		netdev_err(dev, "Error allocating MDIO bus\n");
 166		return -ENOMEM;
 167	}
 168
 169	mdio->name = ALTERA_TSE_RESOURCE_NAME;
 170	mdio->read = &altera_tse_mdio_read;
 171	mdio->write = &altera_tse_mdio_write;
 172	snprintf(mdio->id, MII_BUS_ID_SIZE, "%s-%u", mdio->name, id);
 173
 174	mdio->priv = dev;
 175	mdio->parent = priv->device;
 176
 177	ret = of_mdiobus_register(mdio, mdio_node);
 178	if (ret != 0) {
 179		netdev_err(dev, "Cannot register MDIO bus %s\n",
 180			   mdio->id);
 181		goto out_free_mdio;
 182	}
 183
 184	if (netif_msg_drv(priv))
 185		netdev_info(dev, "MDIO bus %s: created\n", mdio->id);
 186
 187	priv->mdio = mdio;
 188	return 0;
 189out_free_mdio:
 190	mdiobus_free(mdio);
 191	mdio = NULL;
 192	return ret;
 193}
 194
 195static void altera_tse_mdio_destroy(struct net_device *dev)
 196{
 197	struct altera_tse_private *priv = netdev_priv(dev);
 198
 199	if (priv->mdio == NULL)
 200		return;
 201
 202	if (netif_msg_drv(priv))
 203		netdev_info(dev, "MDIO bus %s: removed\n",
 204			    priv->mdio->id);
 205
 206	mdiobus_unregister(priv->mdio);
 207	mdiobus_free(priv->mdio);
 208	priv->mdio = NULL;
 209}
 210
 211static int tse_init_rx_buffer(struct altera_tse_private *priv,
 212			      struct tse_buffer *rxbuffer, int len)
 213{
 214	rxbuffer->skb = netdev_alloc_skb_ip_align(priv->dev, len);
 215	if (!rxbuffer->skb)
 216		return -ENOMEM;
 217
 218	rxbuffer->dma_addr = dma_map_single(priv->device, rxbuffer->skb->data,
 219						len,
 220						DMA_FROM_DEVICE);
 221
 222	if (dma_mapping_error(priv->device, rxbuffer->dma_addr)) {
 223		netdev_err(priv->dev, "%s: DMA mapping error\n", __func__);
 224		dev_kfree_skb_any(rxbuffer->skb);
 225		return -EINVAL;
 226	}
 227	rxbuffer->dma_addr &= (dma_addr_t)~3;
 228	rxbuffer->len = len;
 229	return 0;
 230}
 231
 232static void tse_free_rx_buffer(struct altera_tse_private *priv,
 233			       struct tse_buffer *rxbuffer)
 234{
 235	struct sk_buff *skb = rxbuffer->skb;
 236	dma_addr_t dma_addr = rxbuffer->dma_addr;
 237
 238	if (skb != NULL) {
 239		if (dma_addr)
 240			dma_unmap_single(priv->device, dma_addr,
 241					 rxbuffer->len,
 242					 DMA_FROM_DEVICE);
 243		dev_kfree_skb_any(skb);
 244		rxbuffer->skb = NULL;
 245		rxbuffer->dma_addr = 0;
 246	}
 247}
 248
 249/* Unmap and free Tx buffer resources
 250 */
 251static void tse_free_tx_buffer(struct altera_tse_private *priv,
 252			       struct tse_buffer *buffer)
 253{
 254	if (buffer->dma_addr) {
 255		if (buffer->mapped_as_page)
 256			dma_unmap_page(priv->device, buffer->dma_addr,
 257				       buffer->len, DMA_TO_DEVICE);
 258		else
 259			dma_unmap_single(priv->device, buffer->dma_addr,
 260					 buffer->len, DMA_TO_DEVICE);
 261		buffer->dma_addr = 0;
 262	}
 263	if (buffer->skb) {
 264		dev_kfree_skb_any(buffer->skb);
 265		buffer->skb = NULL;
 266	}
 267}
 268
 269static int alloc_init_skbufs(struct altera_tse_private *priv)
 270{
 271	unsigned int rx_descs = priv->rx_ring_size;
 272	unsigned int tx_descs = priv->tx_ring_size;
 273	int ret = -ENOMEM;
 274	int i;
 275
 276	/* Create Rx ring buffer */
 277	priv->rx_ring = kcalloc(rx_descs, sizeof(struct tse_buffer),
 278				GFP_KERNEL);
 279	if (!priv->rx_ring)
 280		goto err_rx_ring;
 281
 282	/* Create Tx ring buffer */
 283	priv->tx_ring = kcalloc(tx_descs, sizeof(struct tse_buffer),
 284				GFP_KERNEL);
 285	if (!priv->tx_ring)
 286		goto err_tx_ring;
 287
 288	priv->tx_cons = 0;
 289	priv->tx_prod = 0;
 290
 291	/* Init Rx ring */
 292	for (i = 0; i < rx_descs; i++) {
 293		ret = tse_init_rx_buffer(priv, &priv->rx_ring[i],
 294					 priv->rx_dma_buf_sz);
 295		if (ret)
 296			goto err_init_rx_buffers;
 297	}
 298
 299	priv->rx_cons = 0;
 300	priv->rx_prod = 0;
 301
 302	return 0;
 303err_init_rx_buffers:
 304	while (--i >= 0)
 305		tse_free_rx_buffer(priv, &priv->rx_ring[i]);
 306	kfree(priv->tx_ring);
 307err_tx_ring:
 308	kfree(priv->rx_ring);
 309err_rx_ring:
 310	return ret;
 311}
 312
 313static void free_skbufs(struct net_device *dev)
 314{
 315	struct altera_tse_private *priv = netdev_priv(dev);
 316	unsigned int rx_descs = priv->rx_ring_size;
 317	unsigned int tx_descs = priv->tx_ring_size;
 318	int i;
 319
 320	/* Release the DMA TX/RX socket buffers */
 321	for (i = 0; i < rx_descs; i++)
 322		tse_free_rx_buffer(priv, &priv->rx_ring[i]);
 323	for (i = 0; i < tx_descs; i++)
 324		tse_free_tx_buffer(priv, &priv->tx_ring[i]);
 325
 326
 327	kfree(priv->tx_ring);
 328}
 329
 330/* Reallocate the skb for the reception process
 331 */
 332static inline void tse_rx_refill(struct altera_tse_private *priv)
 333{
 334	unsigned int rxsize = priv->rx_ring_size;
 335	unsigned int entry;
 336	int ret;
 337
 338	for (; priv->rx_cons - priv->rx_prod > 0;
 339			priv->rx_prod++) {
 340		entry = priv->rx_prod % rxsize;
 341		if (likely(priv->rx_ring[entry].skb == NULL)) {
 342			ret = tse_init_rx_buffer(priv, &priv->rx_ring[entry],
 343				priv->rx_dma_buf_sz);
 344			if (unlikely(ret != 0))
 345				break;
 346			priv->dmaops->add_rx_desc(priv, &priv->rx_ring[entry]);
 347		}
 348	}
 349}
 350
 351/* Pull out the VLAN tag and fix up the packet
 352 */
 353static inline void tse_rx_vlan(struct net_device *dev, struct sk_buff *skb)
 354{
 355	struct ethhdr *eth_hdr;
 356	u16 vid;
 357	if ((dev->features & NETIF_F_HW_VLAN_CTAG_RX) &&
 358	    !__vlan_get_tag(skb, &vid)) {
 359		eth_hdr = (struct ethhdr *)skb->data;
 360		memmove(skb->data + VLAN_HLEN, eth_hdr, ETH_ALEN * 2);
 361		skb_pull(skb, VLAN_HLEN);
 362		__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid);
 363	}
 364}
 365
 366/* Receive a packet: retrieve and pass over to upper levels
 367 */
 368static int tse_rx(struct altera_tse_private *priv, int limit)
 369{
 370	unsigned int count = 0;
 371	unsigned int next_entry;
 372	struct sk_buff *skb;
 373	unsigned int entry = priv->rx_cons % priv->rx_ring_size;
 374	u32 rxstatus;
 375	u16 pktlength;
 376	u16 pktstatus;
 377
 378	/* Check for count < limit first as get_rx_status is changing
 379	* the response-fifo so we must process the next packet
 380	* after calling get_rx_status if a response is pending.
 381	* (reading the last byte of the response pops the value from the fifo.)
 382	*/
 383	while ((count < limit) &&
 384	       ((rxstatus = priv->dmaops->get_rx_status(priv)) != 0)) {
 385		pktstatus = rxstatus >> 16;
 386		pktlength = rxstatus & 0xffff;
 387
 388		if ((pktstatus & 0xFF) || (pktlength == 0))
 389			netdev_err(priv->dev,
 390				   "RCV pktstatus %08X pktlength %08X\n",
 391				   pktstatus, pktlength);
 392
 393		/* DMA trasfer from TSE starts with 2 aditional bytes for
 394		 * IP payload alignment. Status returned by get_rx_status()
 395		 * contains DMA transfer length. Packet is 2 bytes shorter.
 396		 */
 397		pktlength -= 2;
 398
 399		count++;
 400		next_entry = (++priv->rx_cons) % priv->rx_ring_size;
 401
 402		skb = priv->rx_ring[entry].skb;
 403		if (unlikely(!skb)) {
 404			netdev_err(priv->dev,
 405				   "%s: Inconsistent Rx descriptor chain\n",
 406				   __func__);
 407			priv->dev->stats.rx_dropped++;
 408			break;
 409		}
 410		priv->rx_ring[entry].skb = NULL;
 411
 412		skb_put(skb, pktlength);
 413
 414		dma_unmap_single(priv->device, priv->rx_ring[entry].dma_addr,
 415				 priv->rx_ring[entry].len, DMA_FROM_DEVICE);
 416
 417		if (netif_msg_pktdata(priv)) {
 418			netdev_info(priv->dev, "frame received %d bytes\n",
 419				    pktlength);
 420			print_hex_dump(KERN_ERR, "data: ", DUMP_PREFIX_OFFSET,
 421				       16, 1, skb->data, pktlength, true);
 422		}
 423
 424		tse_rx_vlan(priv->dev, skb);
 425
 426		skb->protocol = eth_type_trans(skb, priv->dev);
 427		skb_checksum_none_assert(skb);
 428
 429		napi_gro_receive(&priv->napi, skb);
 430
 431		priv->dev->stats.rx_packets++;
 432		priv->dev->stats.rx_bytes += pktlength;
 433
 434		entry = next_entry;
 435
 436		tse_rx_refill(priv);
 437	}
 438
 439	return count;
 440}
 441
 442/* Reclaim resources after transmission completes
 443 */
 444static int tse_tx_complete(struct altera_tse_private *priv)
 445{
 446	unsigned int txsize = priv->tx_ring_size;
 447	u32 ready;
 448	unsigned int entry;
 449	struct tse_buffer *tx_buff;
 450	int txcomplete = 0;
 451
 452	spin_lock(&priv->tx_lock);
 453
 454	ready = priv->dmaops->tx_completions(priv);
 455
 456	/* Free sent buffers */
 457	while (ready && (priv->tx_cons != priv->tx_prod)) {
 458		entry = priv->tx_cons % txsize;
 459		tx_buff = &priv->tx_ring[entry];
 460
 461		if (netif_msg_tx_done(priv))
 462			netdev_dbg(priv->dev, "%s: curr %d, dirty %d\n",
 463				   __func__, priv->tx_prod, priv->tx_cons);
 464
 465		if (likely(tx_buff->skb))
 466			priv->dev->stats.tx_packets++;
 467
 468		tse_free_tx_buffer(priv, tx_buff);
 469		priv->tx_cons++;
 470
 471		txcomplete++;
 472		ready--;
 473	}
 474
 475	if (unlikely(netif_queue_stopped(priv->dev) &&
 476		     tse_tx_avail(priv) > TSE_TX_THRESH(priv))) {
 477		if (netif_queue_stopped(priv->dev) &&
 478		    tse_tx_avail(priv) > TSE_TX_THRESH(priv)) {
 479			if (netif_msg_tx_done(priv))
 480				netdev_dbg(priv->dev, "%s: restart transmit\n",
 481					   __func__);
 482			netif_wake_queue(priv->dev);
 483		}
 484	}
 485
 486	spin_unlock(&priv->tx_lock);
 487	return txcomplete;
 488}
 489
 490/* NAPI polling function
 491 */
 492static int tse_poll(struct napi_struct *napi, int budget)
 493{
 494	struct altera_tse_private *priv =
 495			container_of(napi, struct altera_tse_private, napi);
 496	int rxcomplete = 0;
 497	unsigned long int flags;
 498
 499	tse_tx_complete(priv);
 500
 501	rxcomplete = tse_rx(priv, budget);
 502
 503	if (rxcomplete < budget) {
 504
 505		napi_complete_done(napi, rxcomplete);
 506
 507		netdev_dbg(priv->dev,
 508			   "NAPI Complete, did %d packets with budget %d\n",
 509			   rxcomplete, budget);
 510
 511		spin_lock_irqsave(&priv->rxdma_irq_lock, flags);
 512		priv->dmaops->enable_rxirq(priv);
 513		priv->dmaops->enable_txirq(priv);
 514		spin_unlock_irqrestore(&priv->rxdma_irq_lock, flags);
 515	}
 516	return rxcomplete;
 517}
 518
 519/* DMA TX & RX FIFO interrupt routing
 520 */
 521static irqreturn_t altera_isr(int irq, void *dev_id)
 522{
 523	struct net_device *dev = dev_id;
 524	struct altera_tse_private *priv;
 525
 526	if (unlikely(!dev)) {
 527		pr_err("%s: invalid dev pointer\n", __func__);
 528		return IRQ_NONE;
 529	}
 530	priv = netdev_priv(dev);
 531
 532	spin_lock(&priv->rxdma_irq_lock);
 533	/* reset IRQs */
 534	priv->dmaops->clear_rxirq(priv);
 535	priv->dmaops->clear_txirq(priv);
 536	spin_unlock(&priv->rxdma_irq_lock);
 537
 538	if (likely(napi_schedule_prep(&priv->napi))) {
 539		spin_lock(&priv->rxdma_irq_lock);
 540		priv->dmaops->disable_rxirq(priv);
 541		priv->dmaops->disable_txirq(priv);
 542		spin_unlock(&priv->rxdma_irq_lock);
 543		__napi_schedule(&priv->napi);
 544	}
 545
 546
 547	return IRQ_HANDLED;
 548}
 549
 550/* Transmit a packet (called by the kernel). Dispatches
 551 * either the SGDMA method for transmitting or the
 552 * MSGDMA method, assumes no scatter/gather support,
 553 * implying an assumption that there's only one
 554 * physically contiguous fragment starting at
 555 * skb->data, for length of skb_headlen(skb).
 556 */
 557static netdev_tx_t tse_start_xmit(struct sk_buff *skb, struct net_device *dev)
 558{
 559	struct altera_tse_private *priv = netdev_priv(dev);
 560	unsigned int txsize = priv->tx_ring_size;
 561	unsigned int entry;
 562	struct tse_buffer *buffer = NULL;
 563	int nfrags = skb_shinfo(skb)->nr_frags;
 564	unsigned int nopaged_len = skb_headlen(skb);
 565	netdev_tx_t ret = NETDEV_TX_OK;
 566	dma_addr_t dma_addr;
 567
 568	spin_lock_bh(&priv->tx_lock);
 569
 570	if (unlikely(tse_tx_avail(priv) < nfrags + 1)) {
 571		if (!netif_queue_stopped(dev)) {
 572			netif_stop_queue(dev);
 573			/* This is a hard error, log it. */
 574			netdev_err(priv->dev,
 575				   "%s: Tx list full when queue awake\n",
 576				   __func__);
 577		}
 578		ret = NETDEV_TX_BUSY;
 579		goto out;
 580	}
 581
 582	/* Map the first skb fragment */
 583	entry = priv->tx_prod % txsize;
 584	buffer = &priv->tx_ring[entry];
 585
 586	dma_addr = dma_map_single(priv->device, skb->data, nopaged_len,
 587				  DMA_TO_DEVICE);
 588	if (dma_mapping_error(priv->device, dma_addr)) {
 589		netdev_err(priv->dev, "%s: DMA mapping error\n", __func__);
 590		ret = NETDEV_TX_OK;
 591		goto out;
 592	}
 593
 594	buffer->skb = skb;
 595	buffer->dma_addr = dma_addr;
 596	buffer->len = nopaged_len;
 597
 598	priv->dmaops->tx_buffer(priv, buffer);
 599
 600	skb_tx_timestamp(skb);
 601
 602	priv->tx_prod++;
 603	dev->stats.tx_bytes += skb->len;
 604
 605	if (unlikely(tse_tx_avail(priv) <= TXQUEUESTOP_THRESHHOLD)) {
 606		if (netif_msg_hw(priv))
 607			netdev_dbg(priv->dev, "%s: stop transmitted packets\n",
 608				   __func__);
 609		netif_stop_queue(dev);
 610	}
 611
 612out:
 613	spin_unlock_bh(&priv->tx_lock);
 614
 615	return ret;
 616}
 617
 618/* Called every time the controller might need to be made
 619 * aware of new link state.  The PHY code conveys this
 620 * information through variables in the phydev structure, and this
 621 * function converts those variables into the appropriate
 622 * register values, and can bring down the device if needed.
 623 */
 624static void altera_tse_adjust_link(struct net_device *dev)
 625{
 626	struct altera_tse_private *priv = netdev_priv(dev);
 627	struct phy_device *phydev = dev->phydev;
 628	int new_state = 0;
 629
 630	/* only change config if there is a link */
 631	spin_lock(&priv->mac_cfg_lock);
 632	if (phydev->link) {
 633		/* Read old config */
 634		u32 cfg_reg = ioread32(&priv->mac_dev->command_config);
 635
 636		/* Check duplex */
 637		if (phydev->duplex != priv->oldduplex) {
 638			new_state = 1;
 639			if (!(phydev->duplex))
 640				cfg_reg |= MAC_CMDCFG_HD_ENA;
 641			else
 642				cfg_reg &= ~MAC_CMDCFG_HD_ENA;
 643
 644			netdev_dbg(priv->dev, "%s: Link duplex = 0x%x\n",
 645				   dev->name, phydev->duplex);
 646
 647			priv->oldduplex = phydev->duplex;
 648		}
 649
 650		/* Check speed */
 651		if (phydev->speed != priv->oldspeed) {
 652			new_state = 1;
 653			switch (phydev->speed) {
 654			case 1000:
 655				cfg_reg |= MAC_CMDCFG_ETH_SPEED;
 656				cfg_reg &= ~MAC_CMDCFG_ENA_10;
 657				break;
 658			case 100:
 659				cfg_reg &= ~MAC_CMDCFG_ETH_SPEED;
 660				cfg_reg &= ~MAC_CMDCFG_ENA_10;
 661				break;
 662			case 10:
 663				cfg_reg &= ~MAC_CMDCFG_ETH_SPEED;
 664				cfg_reg |= MAC_CMDCFG_ENA_10;
 665				break;
 666			default:
 667				if (netif_msg_link(priv))
 668					netdev_warn(dev, "Speed (%d) is not 10/100/1000!\n",
 669						    phydev->speed);
 670				break;
 671			}
 672			priv->oldspeed = phydev->speed;
 673		}
 674		iowrite32(cfg_reg, &priv->mac_dev->command_config);
 675
 676		if (!priv->oldlink) {
 677			new_state = 1;
 678			priv->oldlink = 1;
 679		}
 680	} else if (priv->oldlink) {
 681		new_state = 1;
 682		priv->oldlink = 0;
 683		priv->oldspeed = 0;
 684		priv->oldduplex = -1;
 685	}
 686
 687	if (new_state && netif_msg_link(priv))
 688		phy_print_status(phydev);
 689
 690	spin_unlock(&priv->mac_cfg_lock);
 691}
 692static struct phy_device *connect_local_phy(struct net_device *dev)
 693{
 694	struct altera_tse_private *priv = netdev_priv(dev);
 695	struct phy_device *phydev = NULL;
 696	char phy_id_fmt[MII_BUS_ID_SIZE + 3];
 697
 698	if (priv->phy_addr != POLL_PHY) {
 699		snprintf(phy_id_fmt, MII_BUS_ID_SIZE + 3, PHY_ID_FMT,
 700			 priv->mdio->id, priv->phy_addr);
 701
 702		netdev_dbg(dev, "trying to attach to %s\n", phy_id_fmt);
 703
 704		phydev = phy_connect(dev, phy_id_fmt, &altera_tse_adjust_link,
 705				     priv->phy_iface);
 706		if (IS_ERR(phydev)) {
 707			netdev_err(dev, "Could not attach to PHY\n");
 708			phydev = NULL;
 709		}
 710
 711	} else {
 712		int ret;
 713		phydev = phy_find_first(priv->mdio);
 714		if (phydev == NULL) {
 715			netdev_err(dev, "No PHY found\n");
 716			return phydev;
 717		}
 718
 719		ret = phy_connect_direct(dev, phydev, &altera_tse_adjust_link,
 720				priv->phy_iface);
 721		if (ret != 0) {
 722			netdev_err(dev, "Could not attach to PHY\n");
 723			phydev = NULL;
 724		}
 725	}
 726	return phydev;
 727}
 728
 729static int altera_tse_phy_get_addr_mdio_create(struct net_device *dev)
 730{
 731	struct altera_tse_private *priv = netdev_priv(dev);
 732	struct device_node *np = priv->device->of_node;
 733	int ret;
 734
 735	ret = of_get_phy_mode(np, &priv->phy_iface);
 736
 737	/* Avoid get phy addr and create mdio if no phy is present */
 738	if (ret)
 739		return 0;
 740
 741	/* try to get PHY address from device tree, use PHY autodetection if
 742	 * no valid address is given
 743	 */
 744
 745	if (of_property_read_u32(priv->device->of_node, "phy-addr",
 746			 &priv->phy_addr)) {
 747		priv->phy_addr = POLL_PHY;
 748	}
 749
 750	if (!((priv->phy_addr == POLL_PHY) ||
 751		  ((priv->phy_addr >= 0) && (priv->phy_addr < PHY_MAX_ADDR)))) {
 752		netdev_err(dev, "invalid phy-addr specified %d\n",
 753			priv->phy_addr);
 754		return -ENODEV;
 755	}
 756
 757	/* Create/attach to MDIO bus */
 758	ret = altera_tse_mdio_create(dev,
 759					 atomic_add_return(1, &instance_count));
 760
 761	if (ret)
 762		return -ENODEV;
 763
 764	return 0;
 765}
 766
 767/* Initialize driver's PHY state, and attach to the PHY
 768 */
 769static int init_phy(struct net_device *dev)
 770{
 771	struct altera_tse_private *priv = netdev_priv(dev);
 772	struct phy_device *phydev;
 773	struct device_node *phynode;
 774	bool fixed_link = false;
 775	int rc = 0;
 776
 777	/* Avoid init phy in case of no phy present */
 778	if (!priv->phy_iface)
 779		return 0;
 780
 781	priv->oldlink = 0;
 782	priv->oldspeed = 0;
 783	priv->oldduplex = -1;
 784
 785	phynode = of_parse_phandle(priv->device->of_node, "phy-handle", 0);
 786
 787	if (!phynode) {
 788		/* check if a fixed-link is defined in device-tree */
 789		if (of_phy_is_fixed_link(priv->device->of_node)) {
 790			rc = of_phy_register_fixed_link(priv->device->of_node);
 791			if (rc < 0) {
 792				netdev_err(dev, "cannot register fixed PHY\n");
 793				return rc;
 794			}
 795
 796			/* In the case of a fixed PHY, the DT node associated
 797			 * to the PHY is the Ethernet MAC DT node.
 798			 */
 799			phynode = of_node_get(priv->device->of_node);
 800			fixed_link = true;
 801
 802			netdev_dbg(dev, "fixed-link detected\n");
 803			phydev = of_phy_connect(dev, phynode,
 804						&altera_tse_adjust_link,
 805						0, priv->phy_iface);
 806		} else {
 807			netdev_dbg(dev, "no phy-handle found\n");
 808			if (!priv->mdio) {
 809				netdev_err(dev, "No phy-handle nor local mdio specified\n");
 810				return -ENODEV;
 811			}
 812			phydev = connect_local_phy(dev);
 813		}
 814	} else {
 815		netdev_dbg(dev, "phy-handle found\n");
 816		phydev = of_phy_connect(dev, phynode,
 817			&altera_tse_adjust_link, 0, priv->phy_iface);
 818	}
 819	of_node_put(phynode);
 820
 821	if (!phydev) {
 822		netdev_err(dev, "Could not find the PHY\n");
 823		if (fixed_link)
 824			of_phy_deregister_fixed_link(priv->device->of_node);
 825		return -ENODEV;
 826	}
 827
 828	/* Stop Advertising 1000BASE Capability if interface is not GMII
 
 
 829	 */
 830	if ((priv->phy_iface == PHY_INTERFACE_MODE_MII) ||
 831	    (priv->phy_iface == PHY_INTERFACE_MODE_RMII))
 832		phy_set_max_speed(phydev, SPEED_100);
 
 833
 834	/* Broken HW is sometimes missing the pull-up resistor on the
 835	 * MDIO line, which results in reads to non-existent devices returning
 836	 * 0 rather than 0xffff. Catch this here and treat 0 as a non-existent
 837	 * device as well. If a fixed-link is used the phy_id is always 0.
 838	 * Note: phydev->phy_id is the result of reading the UID PHY registers.
 839	 */
 840	if ((phydev->phy_id == 0) && !fixed_link) {
 841		netdev_err(dev, "Bad PHY UID 0x%08x\n", phydev->phy_id);
 842		phy_disconnect(phydev);
 843		return -ENODEV;
 844	}
 845
 846	netdev_dbg(dev, "attached to PHY %d UID 0x%08x Link = %d\n",
 847		   phydev->mdio.addr, phydev->phy_id, phydev->link);
 848
 849	return 0;
 850}
 851
 852static void tse_update_mac_addr(struct altera_tse_private *priv, u8 *addr)
 853{
 854	u32 msb;
 855	u32 lsb;
 856
 857	msb = (addr[3] << 24) | (addr[2] << 16) | (addr[1] << 8) | addr[0];
 858	lsb = ((addr[5] << 8) | addr[4]) & 0xffff;
 859
 860	/* Set primary MAC address */
 861	csrwr32(msb, priv->mac_dev, tse_csroffs(mac_addr_0));
 862	csrwr32(lsb, priv->mac_dev, tse_csroffs(mac_addr_1));
 863}
 864
 865/* MAC software reset.
 866 * When reset is triggered, the MAC function completes the current
 867 * transmission or reception, and subsequently disables the transmit and
 868 * receive logic, flushes the receive FIFO buffer, and resets the statistics
 869 * counters.
 870 */
 871static int reset_mac(struct altera_tse_private *priv)
 872{
 873	int counter;
 874	u32 dat;
 875
 876	dat = csrrd32(priv->mac_dev, tse_csroffs(command_config));
 877	dat &= ~(MAC_CMDCFG_TX_ENA | MAC_CMDCFG_RX_ENA);
 878	dat |= MAC_CMDCFG_SW_RESET | MAC_CMDCFG_CNT_RESET;
 879	csrwr32(dat, priv->mac_dev, tse_csroffs(command_config));
 880
 881	counter = 0;
 882	while (counter++ < ALTERA_TSE_SW_RESET_WATCHDOG_CNTR) {
 883		if (tse_bit_is_clear(priv->mac_dev, tse_csroffs(command_config),
 884				     MAC_CMDCFG_SW_RESET))
 885			break;
 886		udelay(1);
 887	}
 888
 889	if (counter >= ALTERA_TSE_SW_RESET_WATCHDOG_CNTR) {
 890		dat = csrrd32(priv->mac_dev, tse_csroffs(command_config));
 891		dat &= ~MAC_CMDCFG_SW_RESET;
 892		csrwr32(dat, priv->mac_dev, tse_csroffs(command_config));
 893		return -1;
 894	}
 895	return 0;
 896}
 897
 898/* Initialize MAC core registers
 899*/
 900static int init_mac(struct altera_tse_private *priv)
 901{
 902	unsigned int cmd = 0;
 903	u32 frm_length;
 904
 905	/* Setup Rx FIFO */
 906	csrwr32(priv->rx_fifo_depth - ALTERA_TSE_RX_SECTION_EMPTY,
 907		priv->mac_dev, tse_csroffs(rx_section_empty));
 908
 909	csrwr32(ALTERA_TSE_RX_SECTION_FULL, priv->mac_dev,
 910		tse_csroffs(rx_section_full));
 911
 912	csrwr32(ALTERA_TSE_RX_ALMOST_EMPTY, priv->mac_dev,
 913		tse_csroffs(rx_almost_empty));
 914
 915	csrwr32(ALTERA_TSE_RX_ALMOST_FULL, priv->mac_dev,
 916		tse_csroffs(rx_almost_full));
 917
 918	/* Setup Tx FIFO */
 919	csrwr32(priv->tx_fifo_depth - ALTERA_TSE_TX_SECTION_EMPTY,
 920		priv->mac_dev, tse_csroffs(tx_section_empty));
 921
 922	csrwr32(ALTERA_TSE_TX_SECTION_FULL, priv->mac_dev,
 923		tse_csroffs(tx_section_full));
 924
 925	csrwr32(ALTERA_TSE_TX_ALMOST_EMPTY, priv->mac_dev,
 926		tse_csroffs(tx_almost_empty));
 927
 928	csrwr32(ALTERA_TSE_TX_ALMOST_FULL, priv->mac_dev,
 929		tse_csroffs(tx_almost_full));
 930
 931	/* MAC Address Configuration */
 932	tse_update_mac_addr(priv, priv->dev->dev_addr);
 933
 934	/* MAC Function Configuration */
 935	frm_length = ETH_HLEN + priv->dev->mtu + ETH_FCS_LEN;
 936	csrwr32(frm_length, priv->mac_dev, tse_csroffs(frm_length));
 937
 938	csrwr32(ALTERA_TSE_TX_IPG_LENGTH, priv->mac_dev,
 939		tse_csroffs(tx_ipg_length));
 940
 941	/* Disable RX/TX shift 16 for alignment of all received frames on 16-bit
 942	 * start address
 943	 */
 944	tse_set_bit(priv->mac_dev, tse_csroffs(rx_cmd_stat),
 945		    ALTERA_TSE_RX_CMD_STAT_RX_SHIFT16);
 946
 947	tse_clear_bit(priv->mac_dev, tse_csroffs(tx_cmd_stat),
 948		      ALTERA_TSE_TX_CMD_STAT_TX_SHIFT16 |
 949		      ALTERA_TSE_TX_CMD_STAT_OMIT_CRC);
 950
 951	/* Set the MAC options */
 952	cmd = csrrd32(priv->mac_dev, tse_csroffs(command_config));
 953	cmd &= ~MAC_CMDCFG_PAD_EN;	/* No padding Removal on Receive */
 954	cmd &= ~MAC_CMDCFG_CRC_FWD;	/* CRC Removal */
 955	cmd |= MAC_CMDCFG_RX_ERR_DISC;	/* Automatically discard frames
 956					 * with CRC errors
 957					 */
 958	cmd |= MAC_CMDCFG_CNTL_FRM_ENA;
 959	cmd &= ~MAC_CMDCFG_TX_ENA;
 960	cmd &= ~MAC_CMDCFG_RX_ENA;
 961
 962	/* Default speed and duplex setting, full/100 */
 963	cmd &= ~MAC_CMDCFG_HD_ENA;
 964	cmd &= ~MAC_CMDCFG_ETH_SPEED;
 965	cmd &= ~MAC_CMDCFG_ENA_10;
 966
 967	csrwr32(cmd, priv->mac_dev, tse_csroffs(command_config));
 968
 969	csrwr32(ALTERA_TSE_PAUSE_QUANTA, priv->mac_dev,
 970		tse_csroffs(pause_quanta));
 971
 972	if (netif_msg_hw(priv))
 973		dev_dbg(priv->device,
 974			"MAC post-initialization: CMD_CONFIG = 0x%08x\n", cmd);
 975
 976	return 0;
 977}
 978
 979/* Start/stop MAC transmission logic
 980 */
 981static void tse_set_mac(struct altera_tse_private *priv, bool enable)
 982{
 983	u32 value = csrrd32(priv->mac_dev, tse_csroffs(command_config));
 984
 985	if (enable)
 986		value |= MAC_CMDCFG_TX_ENA | MAC_CMDCFG_RX_ENA;
 987	else
 988		value &= ~(MAC_CMDCFG_TX_ENA | MAC_CMDCFG_RX_ENA);
 989
 990	csrwr32(value, priv->mac_dev, tse_csroffs(command_config));
 991}
 992
 993/* Change the MTU
 994 */
 995static int tse_change_mtu(struct net_device *dev, int new_mtu)
 996{
 997	if (netif_running(dev)) {
 998		netdev_err(dev, "must be stopped to change its MTU\n");
 999		return -EBUSY;
1000	}
1001
1002	dev->mtu = new_mtu;
1003	netdev_update_features(dev);
1004
1005	return 0;
1006}
1007
1008static void altera_tse_set_mcfilter(struct net_device *dev)
1009{
1010	struct altera_tse_private *priv = netdev_priv(dev);
1011	int i;
1012	struct netdev_hw_addr *ha;
1013
1014	/* clear the hash filter */
1015	for (i = 0; i < 64; i++)
1016		csrwr32(0, priv->mac_dev, tse_csroffs(hash_table) + i * 4);
1017
1018	netdev_for_each_mc_addr(ha, dev) {
1019		unsigned int hash = 0;
1020		int mac_octet;
1021
1022		for (mac_octet = 5; mac_octet >= 0; mac_octet--) {
1023			unsigned char xor_bit = 0;
1024			unsigned char octet = ha->addr[mac_octet];
1025			unsigned int bitshift;
1026
1027			for (bitshift = 0; bitshift < 8; bitshift++)
1028				xor_bit ^= ((octet >> bitshift) & 0x01);
1029
1030			hash = (hash << 1) | xor_bit;
1031		}
1032		csrwr32(1, priv->mac_dev, tse_csroffs(hash_table) + hash * 4);
1033	}
1034}
1035
1036
1037static void altera_tse_set_mcfilterall(struct net_device *dev)
1038{
1039	struct altera_tse_private *priv = netdev_priv(dev);
1040	int i;
1041
1042	/* set the hash filter */
1043	for (i = 0; i < 64; i++)
1044		csrwr32(1, priv->mac_dev, tse_csroffs(hash_table) + i * 4);
1045}
1046
1047/* Set or clear the multicast filter for this adaptor
1048 */
1049static void tse_set_rx_mode_hashfilter(struct net_device *dev)
1050{
1051	struct altera_tse_private *priv = netdev_priv(dev);
1052
1053	spin_lock(&priv->mac_cfg_lock);
1054
1055	if (dev->flags & IFF_PROMISC)
1056		tse_set_bit(priv->mac_dev, tse_csroffs(command_config),
1057			    MAC_CMDCFG_PROMIS_EN);
1058
1059	if (dev->flags & IFF_ALLMULTI)
1060		altera_tse_set_mcfilterall(dev);
1061	else
1062		altera_tse_set_mcfilter(dev);
1063
1064	spin_unlock(&priv->mac_cfg_lock);
1065}
1066
1067/* Set or clear the multicast filter for this adaptor
1068 */
1069static void tse_set_rx_mode(struct net_device *dev)
1070{
1071	struct altera_tse_private *priv = netdev_priv(dev);
1072
1073	spin_lock(&priv->mac_cfg_lock);
1074
1075	if ((dev->flags & IFF_PROMISC) || (dev->flags & IFF_ALLMULTI) ||
1076	    !netdev_mc_empty(dev) || !netdev_uc_empty(dev))
1077		tse_set_bit(priv->mac_dev, tse_csroffs(command_config),
1078			    MAC_CMDCFG_PROMIS_EN);
1079	else
1080		tse_clear_bit(priv->mac_dev, tse_csroffs(command_config),
1081			      MAC_CMDCFG_PROMIS_EN);
1082
1083	spin_unlock(&priv->mac_cfg_lock);
1084}
1085
1086/* Initialise (if necessary) the SGMII PCS component
1087 */
1088static int init_sgmii_pcs(struct net_device *dev)
1089{
1090	struct altera_tse_private *priv = netdev_priv(dev);
1091	int n;
1092	unsigned int tmp_reg = 0;
1093
1094	if (priv->phy_iface != PHY_INTERFACE_MODE_SGMII)
1095		return 0; /* Nothing to do, not in SGMII mode */
1096
1097	/* The TSE SGMII PCS block looks a little like a PHY, it is
1098	 * mapped into the zeroth MDIO space of the MAC and it has
1099	 * ID registers like a PHY would.  Sadly this is often
1100	 * configured to zeroes, so don't be surprised if it does
1101	 * show 0x00000000.
1102	 */
1103
1104	if (sgmii_pcs_scratch_test(priv, 0x0000) &&
1105		sgmii_pcs_scratch_test(priv, 0xffff) &&
1106		sgmii_pcs_scratch_test(priv, 0xa5a5) &&
1107		sgmii_pcs_scratch_test(priv, 0x5a5a)) {
1108		netdev_info(dev, "PCS PHY ID: 0x%04x%04x\n",
1109				sgmii_pcs_read(priv, MII_PHYSID1),
1110				sgmii_pcs_read(priv, MII_PHYSID2));
1111	} else {
1112		netdev_err(dev, "SGMII PCS Scratch memory test failed.\n");
1113		return -ENOMEM;
1114	}
1115
1116	/* Starting on page 5-29 of the MegaCore Function User Guide
1117	 * Set SGMII Link timer to 1.6ms
1118	 */
1119	sgmii_pcs_write(priv, SGMII_PCS_LINK_TIMER_0, 0x0D40);
1120	sgmii_pcs_write(priv, SGMII_PCS_LINK_TIMER_1, 0x03);
1121
1122	/* Enable SGMII Interface and Enable SGMII Auto Negotiation */
1123	sgmii_pcs_write(priv, SGMII_PCS_IF_MODE, 0x3);
1124
1125	/* Enable Autonegotiation */
1126	tmp_reg = sgmii_pcs_read(priv, MII_BMCR);
1127	tmp_reg |= (BMCR_SPEED1000 | BMCR_FULLDPLX | BMCR_ANENABLE);
1128	sgmii_pcs_write(priv, MII_BMCR, tmp_reg);
1129
1130	/* Reset PCS block */
1131	tmp_reg |= BMCR_RESET;
1132	sgmii_pcs_write(priv, MII_BMCR, tmp_reg);
1133	for (n = 0; n < SGMII_PCS_SW_RESET_TIMEOUT; n++) {
1134		if (!(sgmii_pcs_read(priv, MII_BMCR) & BMCR_RESET)) {
1135			netdev_info(dev, "SGMII PCS block initialised OK\n");
1136			return 0;
1137		}
1138		udelay(1);
1139	}
1140
1141	/* We failed to reset the block, return a timeout */
1142	netdev_err(dev, "SGMII PCS block reset failed.\n");
1143	return -ETIMEDOUT;
1144}
1145
1146/* Open and initialize the interface
1147 */
1148static int tse_open(struct net_device *dev)
1149{
1150	struct altera_tse_private *priv = netdev_priv(dev);
1151	int ret = 0;
1152	int i;
1153	unsigned long int flags;
1154
1155	/* Reset and configure TSE MAC and probe associated PHY */
1156	ret = priv->dmaops->init_dma(priv);
1157	if (ret != 0) {
1158		netdev_err(dev, "Cannot initialize DMA\n");
1159		goto phy_error;
1160	}
1161
1162	if (netif_msg_ifup(priv))
1163		netdev_warn(dev, "device MAC address %pM\n",
1164			    dev->dev_addr);
1165
1166	if ((priv->revision < 0xd00) || (priv->revision > 0xe00))
1167		netdev_warn(dev, "TSE revision %x\n", priv->revision);
1168
1169	spin_lock(&priv->mac_cfg_lock);
1170	/* no-op if MAC not operating in SGMII mode*/
1171	ret = init_sgmii_pcs(dev);
1172	if (ret) {
1173		netdev_err(dev,
1174			   "Cannot init the SGMII PCS (error: %d)\n", ret);
1175		spin_unlock(&priv->mac_cfg_lock);
1176		goto phy_error;
1177	}
1178
1179	ret = reset_mac(priv);
1180	/* Note that reset_mac will fail if the clocks are gated by the PHY
1181	 * due to the PHY being put into isolation or power down mode.
1182	 * This is not an error if reset fails due to no clock.
1183	 */
1184	if (ret)
1185		netdev_dbg(dev, "Cannot reset MAC core (error: %d)\n", ret);
1186
1187	ret = init_mac(priv);
1188	spin_unlock(&priv->mac_cfg_lock);
1189	if (ret) {
1190		netdev_err(dev, "Cannot init MAC core (error: %d)\n", ret);
1191		goto alloc_skbuf_error;
1192	}
1193
1194	priv->dmaops->reset_dma(priv);
1195
1196	/* Create and initialize the TX/RX descriptors chains. */
1197	priv->rx_ring_size = dma_rx_num;
1198	priv->tx_ring_size = dma_tx_num;
1199	ret = alloc_init_skbufs(priv);
1200	if (ret) {
1201		netdev_err(dev, "DMA descriptors initialization failed\n");
1202		goto alloc_skbuf_error;
1203	}
1204
1205
1206	/* Register RX interrupt */
1207	ret = request_irq(priv->rx_irq, altera_isr, IRQF_SHARED,
1208			  dev->name, dev);
1209	if (ret) {
1210		netdev_err(dev, "Unable to register RX interrupt %d\n",
1211			   priv->rx_irq);
1212		goto init_error;
1213	}
1214
1215	/* Register TX interrupt */
1216	ret = request_irq(priv->tx_irq, altera_isr, IRQF_SHARED,
1217			  dev->name, dev);
1218	if (ret) {
1219		netdev_err(dev, "Unable to register TX interrupt %d\n",
1220			   priv->tx_irq);
1221		goto tx_request_irq_error;
1222	}
1223
1224	/* Enable DMA interrupts */
1225	spin_lock_irqsave(&priv->rxdma_irq_lock, flags);
1226	priv->dmaops->enable_rxirq(priv);
1227	priv->dmaops->enable_txirq(priv);
1228
1229	/* Setup RX descriptor chain */
1230	for (i = 0; i < priv->rx_ring_size; i++)
1231		priv->dmaops->add_rx_desc(priv, &priv->rx_ring[i]);
1232
1233	spin_unlock_irqrestore(&priv->rxdma_irq_lock, flags);
1234
1235	if (dev->phydev)
1236		phy_start(dev->phydev);
1237
1238	napi_enable(&priv->napi);
1239	netif_start_queue(dev);
1240
1241	priv->dmaops->start_rxdma(priv);
1242
1243	/* Start MAC Rx/Tx */
1244	spin_lock(&priv->mac_cfg_lock);
1245	tse_set_mac(priv, true);
1246	spin_unlock(&priv->mac_cfg_lock);
1247
1248	return 0;
1249
1250tx_request_irq_error:
1251	free_irq(priv->rx_irq, dev);
1252init_error:
1253	free_skbufs(dev);
1254alloc_skbuf_error:
1255phy_error:
1256	return ret;
1257}
1258
1259/* Stop TSE MAC interface and put the device in an inactive state
1260 */
1261static int tse_shutdown(struct net_device *dev)
1262{
1263	struct altera_tse_private *priv = netdev_priv(dev);
1264	int ret;
1265	unsigned long int flags;
1266
1267	/* Stop the PHY */
1268	if (dev->phydev)
1269		phy_stop(dev->phydev);
1270
1271	netif_stop_queue(dev);
1272	napi_disable(&priv->napi);
1273
1274	/* Disable DMA interrupts */
1275	spin_lock_irqsave(&priv->rxdma_irq_lock, flags);
1276	priv->dmaops->disable_rxirq(priv);
1277	priv->dmaops->disable_txirq(priv);
1278	spin_unlock_irqrestore(&priv->rxdma_irq_lock, flags);
1279
1280	/* Free the IRQ lines */
1281	free_irq(priv->rx_irq, dev);
1282	free_irq(priv->tx_irq, dev);
1283
1284	/* disable and reset the MAC, empties fifo */
1285	spin_lock(&priv->mac_cfg_lock);
1286	spin_lock(&priv->tx_lock);
1287
1288	ret = reset_mac(priv);
1289	/* Note that reset_mac will fail if the clocks are gated by the PHY
1290	 * due to the PHY being put into isolation or power down mode.
1291	 * This is not an error if reset fails due to no clock.
1292	 */
1293	if (ret)
1294		netdev_dbg(dev, "Cannot reset MAC core (error: %d)\n", ret);
1295	priv->dmaops->reset_dma(priv);
1296	free_skbufs(dev);
1297
1298	spin_unlock(&priv->tx_lock);
1299	spin_unlock(&priv->mac_cfg_lock);
1300
1301	priv->dmaops->uninit_dma(priv);
1302
1303	return 0;
1304}
1305
1306static struct net_device_ops altera_tse_netdev_ops = {
1307	.ndo_open		= tse_open,
1308	.ndo_stop		= tse_shutdown,
1309	.ndo_start_xmit		= tse_start_xmit,
1310	.ndo_set_mac_address	= eth_mac_addr,
1311	.ndo_set_rx_mode	= tse_set_rx_mode,
1312	.ndo_change_mtu		= tse_change_mtu,
1313	.ndo_validate_addr	= eth_validate_addr,
1314};
1315
1316static int request_and_map(struct platform_device *pdev, const char *name,
1317			   struct resource **res, void __iomem **ptr)
1318{
1319	struct resource *region;
1320	struct device *device = &pdev->dev;
1321
1322	*res = platform_get_resource_byname(pdev, IORESOURCE_MEM, name);
1323	if (*res == NULL) {
1324		dev_err(device, "resource %s not defined\n", name);
1325		return -ENODEV;
1326	}
1327
1328	region = devm_request_mem_region(device, (*res)->start,
1329					 resource_size(*res), dev_name(device));
1330	if (region == NULL) {
1331		dev_err(device, "unable to request %s\n", name);
1332		return -EBUSY;
1333	}
1334
1335	*ptr = devm_ioremap(device, region->start,
1336				    resource_size(region));
1337	if (*ptr == NULL) {
1338		dev_err(device, "ioremap of %s failed!", name);
1339		return -ENOMEM;
1340	}
1341
1342	return 0;
1343}
1344
1345/* Probe Altera TSE MAC device
1346 */
1347static int altera_tse_probe(struct platform_device *pdev)
1348{
1349	struct net_device *ndev;
1350	int ret = -ENODEV;
1351	struct resource *control_port;
1352	struct resource *dma_res;
1353	struct altera_tse_private *priv;
1354	const unsigned char *macaddr;
1355	void __iomem *descmap;
1356	const struct of_device_id *of_id = NULL;
1357
1358	ndev = alloc_etherdev(sizeof(struct altera_tse_private));
1359	if (!ndev) {
1360		dev_err(&pdev->dev, "Could not allocate network device\n");
1361		return -ENODEV;
1362	}
1363
1364	SET_NETDEV_DEV(ndev, &pdev->dev);
1365
1366	priv = netdev_priv(ndev);
1367	priv->device = &pdev->dev;
1368	priv->dev = ndev;
1369	priv->msg_enable = netif_msg_init(debug, default_msg_level);
1370
1371	of_id = of_match_device(altera_tse_ids, &pdev->dev);
1372
1373	if (of_id)
1374		priv->dmaops = (struct altera_dmaops *)of_id->data;
1375
1376
1377	if (priv->dmaops &&
1378	    priv->dmaops->altera_dtype == ALTERA_DTYPE_SGDMA) {
1379		/* Get the mapped address to the SGDMA descriptor memory */
1380		ret = request_and_map(pdev, "s1", &dma_res, &descmap);
1381		if (ret)
1382			goto err_free_netdev;
1383
1384		/* Start of that memory is for transmit descriptors */
1385		priv->tx_dma_desc = descmap;
1386
1387		/* First half is for tx descriptors, other half for tx */
1388		priv->txdescmem = resource_size(dma_res)/2;
1389
1390		priv->txdescmem_busaddr = (dma_addr_t)dma_res->start;
1391
1392		priv->rx_dma_desc = (void __iomem *)((uintptr_t)(descmap +
1393						     priv->txdescmem));
1394		priv->rxdescmem = resource_size(dma_res)/2;
1395		priv->rxdescmem_busaddr = dma_res->start;
1396		priv->rxdescmem_busaddr += priv->txdescmem;
1397
1398		if (upper_32_bits(priv->rxdescmem_busaddr)) {
1399			dev_dbg(priv->device,
1400				"SGDMA bus addresses greater than 32-bits\n");
1401			ret = -EINVAL;
1402			goto err_free_netdev;
1403		}
1404		if (upper_32_bits(priv->txdescmem_busaddr)) {
1405			dev_dbg(priv->device,
1406				"SGDMA bus addresses greater than 32-bits\n");
1407			ret = -EINVAL;
1408			goto err_free_netdev;
1409		}
1410	} else if (priv->dmaops &&
1411		   priv->dmaops->altera_dtype == ALTERA_DTYPE_MSGDMA) {
1412		ret = request_and_map(pdev, "rx_resp", &dma_res,
1413				      &priv->rx_dma_resp);
1414		if (ret)
1415			goto err_free_netdev;
1416
1417		ret = request_and_map(pdev, "tx_desc", &dma_res,
1418				      &priv->tx_dma_desc);
1419		if (ret)
1420			goto err_free_netdev;
1421
1422		priv->txdescmem = resource_size(dma_res);
1423		priv->txdescmem_busaddr = dma_res->start;
1424
1425		ret = request_and_map(pdev, "rx_desc", &dma_res,
1426				      &priv->rx_dma_desc);
1427		if (ret)
1428			goto err_free_netdev;
1429
1430		priv->rxdescmem = resource_size(dma_res);
1431		priv->rxdescmem_busaddr = dma_res->start;
1432
1433	} else {
1434		goto err_free_netdev;
1435	}
1436
1437	if (!dma_set_mask(priv->device, DMA_BIT_MASK(priv->dmaops->dmamask)))
1438		dma_set_coherent_mask(priv->device,
1439				      DMA_BIT_MASK(priv->dmaops->dmamask));
1440	else if (!dma_set_mask(priv->device, DMA_BIT_MASK(32)))
1441		dma_set_coherent_mask(priv->device, DMA_BIT_MASK(32));
1442	else
1443		goto err_free_netdev;
1444
1445	/* MAC address space */
1446	ret = request_and_map(pdev, "control_port", &control_port,
1447			      (void __iomem **)&priv->mac_dev);
1448	if (ret)
1449		goto err_free_netdev;
1450
1451	/* xSGDMA Rx Dispatcher address space */
1452	ret = request_and_map(pdev, "rx_csr", &dma_res,
1453			      &priv->rx_dma_csr);
1454	if (ret)
1455		goto err_free_netdev;
1456
1457
1458	/* xSGDMA Tx Dispatcher address space */
1459	ret = request_and_map(pdev, "tx_csr", &dma_res,
1460			      &priv->tx_dma_csr);
1461	if (ret)
1462		goto err_free_netdev;
1463
1464
1465	/* Rx IRQ */
1466	priv->rx_irq = platform_get_irq_byname(pdev, "rx_irq");
1467	if (priv->rx_irq == -ENXIO) {
1468		dev_err(&pdev->dev, "cannot obtain Rx IRQ\n");
1469		ret = -ENXIO;
1470		goto err_free_netdev;
1471	}
1472
1473	/* Tx IRQ */
1474	priv->tx_irq = platform_get_irq_byname(pdev, "tx_irq");
1475	if (priv->tx_irq == -ENXIO) {
1476		dev_err(&pdev->dev, "cannot obtain Tx IRQ\n");
1477		ret = -ENXIO;
1478		goto err_free_netdev;
1479	}
1480
1481	/* get FIFO depths from device tree */
1482	if (of_property_read_u32(pdev->dev.of_node, "rx-fifo-depth",
1483				 &priv->rx_fifo_depth)) {
1484		dev_err(&pdev->dev, "cannot obtain rx-fifo-depth\n");
1485		ret = -ENXIO;
1486		goto err_free_netdev;
1487	}
1488
1489	if (of_property_read_u32(pdev->dev.of_node, "tx-fifo-depth",
1490				 &priv->tx_fifo_depth)) {
1491		dev_err(&pdev->dev, "cannot obtain tx-fifo-depth\n");
1492		ret = -ENXIO;
1493		goto err_free_netdev;
1494	}
1495
1496	/* get hash filter settings for this instance */
1497	priv->hash_filter =
1498		of_property_read_bool(pdev->dev.of_node,
1499				      "altr,has-hash-multicast-filter");
1500
1501	/* Set hash filter to not set for now until the
1502	 * multicast filter receive issue is debugged
1503	 */
1504	priv->hash_filter = 0;
1505
1506	/* get supplemental address settings for this instance */
1507	priv->added_unicast =
1508		of_property_read_bool(pdev->dev.of_node,
1509				      "altr,has-supplementary-unicast");
1510
1511	priv->dev->min_mtu = ETH_ZLEN + ETH_FCS_LEN;
1512	/* Max MTU is 1500, ETH_DATA_LEN */
1513	priv->dev->max_mtu = ETH_DATA_LEN;
1514
1515	/* Get the max mtu from the device tree. Note that the
1516	 * "max-frame-size" parameter is actually max mtu. Definition
1517	 * in the ePAPR v1.1 spec and usage differ, so go with usage.
1518	 */
1519	of_property_read_u32(pdev->dev.of_node, "max-frame-size",
1520			     &priv->dev->max_mtu);
1521
1522	/* The DMA buffer size already accounts for an alignment bias
1523	 * to avoid unaligned access exceptions for the NIOS processor,
1524	 */
1525	priv->rx_dma_buf_sz = ALTERA_RXDMABUFFER_SIZE;
1526
1527	/* get default MAC address from device tree */
1528	macaddr = of_get_mac_address(pdev->dev.of_node);
1529	if (!IS_ERR(macaddr))
1530		ether_addr_copy(ndev->dev_addr, macaddr);
1531	else
1532		eth_hw_addr_random(ndev);
1533
1534	/* get phy addr and create mdio */
1535	ret = altera_tse_phy_get_addr_mdio_create(ndev);
1536
1537	if (ret)
1538		goto err_free_netdev;
1539
1540	/* initialize netdev */
1541	ndev->mem_start = control_port->start;
1542	ndev->mem_end = control_port->end;
1543	ndev->netdev_ops = &altera_tse_netdev_ops;
1544	altera_tse_set_ethtool_ops(ndev);
1545
1546	altera_tse_netdev_ops.ndo_set_rx_mode = tse_set_rx_mode;
1547
1548	if (priv->hash_filter)
1549		altera_tse_netdev_ops.ndo_set_rx_mode =
1550			tse_set_rx_mode_hashfilter;
1551
1552	/* Scatter/gather IO is not supported,
1553	 * so it is turned off
1554	 */
1555	ndev->hw_features &= ~NETIF_F_SG;
1556	ndev->features |= ndev->hw_features | NETIF_F_HIGHDMA;
1557
1558	/* VLAN offloading of tagging, stripping and filtering is not
1559	 * supported by hardware, but driver will accommodate the
1560	 * extra 4-byte VLAN tag for processing by upper layers
1561	 */
1562	ndev->features |= NETIF_F_HW_VLAN_CTAG_RX;
1563
1564	/* setup NAPI interface */
1565	netif_napi_add(ndev, &priv->napi, tse_poll, NAPI_POLL_WEIGHT);
1566
1567	spin_lock_init(&priv->mac_cfg_lock);
1568	spin_lock_init(&priv->tx_lock);
1569	spin_lock_init(&priv->rxdma_irq_lock);
1570
1571	netif_carrier_off(ndev);
1572	ret = register_netdev(ndev);
1573	if (ret) {
1574		dev_err(&pdev->dev, "failed to register TSE net device\n");
1575		goto err_register_netdev;
1576	}
1577
1578	platform_set_drvdata(pdev, ndev);
1579
1580	priv->revision = ioread32(&priv->mac_dev->megacore_revision);
1581
1582	if (netif_msg_probe(priv))
1583		dev_info(&pdev->dev, "Altera TSE MAC version %d.%d at 0x%08lx irq %d/%d\n",
1584			 (priv->revision >> 8) & 0xff,
1585			 priv->revision & 0xff,
1586			 (unsigned long) control_port->start, priv->rx_irq,
1587			 priv->tx_irq);
1588
1589	ret = init_phy(ndev);
1590	if (ret != 0) {
1591		netdev_err(ndev, "Cannot attach to PHY (error: %d)\n", ret);
1592		goto err_init_phy;
1593	}
1594	return 0;
1595
1596err_init_phy:
1597	unregister_netdev(ndev);
1598err_register_netdev:
1599	netif_napi_del(&priv->napi);
1600	altera_tse_mdio_destroy(ndev);
1601err_free_netdev:
1602	free_netdev(ndev);
1603	return ret;
1604}
1605
1606/* Remove Altera TSE MAC device
1607 */
1608static int altera_tse_remove(struct platform_device *pdev)
1609{
1610	struct net_device *ndev = platform_get_drvdata(pdev);
1611	struct altera_tse_private *priv = netdev_priv(ndev);
1612
1613	if (ndev->phydev) {
1614		phy_disconnect(ndev->phydev);
1615
1616		if (of_phy_is_fixed_link(priv->device->of_node))
1617			of_phy_deregister_fixed_link(priv->device->of_node);
1618	}
1619
1620	platform_set_drvdata(pdev, NULL);
1621	altera_tse_mdio_destroy(ndev);
1622	unregister_netdev(ndev);
1623	free_netdev(ndev);
1624
1625	return 0;
1626}
1627
1628static const struct altera_dmaops altera_dtype_sgdma = {
1629	.altera_dtype = ALTERA_DTYPE_SGDMA,
1630	.dmamask = 32,
1631	.reset_dma = sgdma_reset,
1632	.enable_txirq = sgdma_enable_txirq,
1633	.enable_rxirq = sgdma_enable_rxirq,
1634	.disable_txirq = sgdma_disable_txirq,
1635	.disable_rxirq = sgdma_disable_rxirq,
1636	.clear_txirq = sgdma_clear_txirq,
1637	.clear_rxirq = sgdma_clear_rxirq,
1638	.tx_buffer = sgdma_tx_buffer,
1639	.tx_completions = sgdma_tx_completions,
1640	.add_rx_desc = sgdma_add_rx_desc,
1641	.get_rx_status = sgdma_rx_status,
1642	.init_dma = sgdma_initialize,
1643	.uninit_dma = sgdma_uninitialize,
1644	.start_rxdma = sgdma_start_rxdma,
1645};
1646
1647static const struct altera_dmaops altera_dtype_msgdma = {
1648	.altera_dtype = ALTERA_DTYPE_MSGDMA,
1649	.dmamask = 64,
1650	.reset_dma = msgdma_reset,
1651	.enable_txirq = msgdma_enable_txirq,
1652	.enable_rxirq = msgdma_enable_rxirq,
1653	.disable_txirq = msgdma_disable_txirq,
1654	.disable_rxirq = msgdma_disable_rxirq,
1655	.clear_txirq = msgdma_clear_txirq,
1656	.clear_rxirq = msgdma_clear_rxirq,
1657	.tx_buffer = msgdma_tx_buffer,
1658	.tx_completions = msgdma_tx_completions,
1659	.add_rx_desc = msgdma_add_rx_desc,
1660	.get_rx_status = msgdma_rx_status,
1661	.init_dma = msgdma_initialize,
1662	.uninit_dma = msgdma_uninitialize,
1663	.start_rxdma = msgdma_start_rxdma,
1664};
1665
1666static const struct of_device_id altera_tse_ids[] = {
1667	{ .compatible = "altr,tse-msgdma-1.0", .data = &altera_dtype_msgdma, },
1668	{ .compatible = "altr,tse-1.0", .data = &altera_dtype_sgdma, },
1669	{ .compatible = "ALTR,tse-1.0", .data = &altera_dtype_sgdma, },
1670	{},
1671};
1672MODULE_DEVICE_TABLE(of, altera_tse_ids);
1673
1674static struct platform_driver altera_tse_driver = {
1675	.probe		= altera_tse_probe,
1676	.remove		= altera_tse_remove,
1677	.suspend	= NULL,
1678	.resume		= NULL,
1679	.driver		= {
1680		.name	= ALTERA_TSE_RESOURCE_NAME,
1681		.of_match_table = altera_tse_ids,
1682	},
1683};
1684
1685module_platform_driver(altera_tse_driver);
1686
1687MODULE_AUTHOR("Altera Corporation");
1688MODULE_DESCRIPTION("Altera Triple Speed Ethernet MAC driver");
1689MODULE_LICENSE("GPL v2");