Linux Audio

Check our new training course

Loading...
v6.8
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * Xilinx Axi Ethernet device driver
   4 *
   5 * Copyright (c) 2008 Nissin Systems Co., Ltd.,  Yoshio Kashiwagi
   6 * Copyright (c) 2005-2008 DLA Systems,  David H. Lynch Jr. <dhlii@dlasys.net>
   7 * Copyright (c) 2008-2009 Secret Lab Technologies Ltd.
   8 * Copyright (c) 2010 - 2011 Michal Simek <monstr@monstr.eu>
   9 * Copyright (c) 2010 - 2011 PetaLogix
  10 * Copyright (c) 2019 - 2022 Calian Advanced Technologies
  11 * Copyright (c) 2010 - 2012 Xilinx, Inc. All rights reserved.
  12 *
  13 * This is a driver for the Xilinx Axi Ethernet which is used in the Virtex6
  14 * and Spartan6.
  15 *
  16 * TODO:
  17 *  - Add Axi Fifo support.
  18 *  - Factor out Axi DMA code into separate driver.
  19 *  - Test and fix basic multicast filtering.
  20 *  - Add support for extended multicast filtering.
  21 *  - Test basic VLAN support.
  22 *  - Add support for extended VLAN support.
  23 */
  24
  25#include <linux/clk.h>
  26#include <linux/delay.h>
  27#include <linux/etherdevice.h>
  28#include <linux/module.h>
  29#include <linux/netdevice.h>
  30#include <linux/of.h>
  31#include <linux/of_mdio.h>
  32#include <linux/of_net.h>
 
  33#include <linux/of_irq.h>
  34#include <linux/of_address.h>
  35#include <linux/platform_device.h>
  36#include <linux/skbuff.h>
  37#include <linux/math64.h>
  38#include <linux/phy.h>
  39#include <linux/mii.h>
  40#include <linux/ethtool.h>
  41#include <linux/dmaengine.h>
  42#include <linux/dma-mapping.h>
  43#include <linux/dma/xilinx_dma.h>
  44#include <linux/circ_buf.h>
  45#include <net/netdev_queues.h>
  46
  47#include "xilinx_axienet.h"
  48
  49/* Descriptors defines for Tx and Rx DMA */
  50#define TX_BD_NUM_DEFAULT		128
  51#define RX_BD_NUM_DEFAULT		1024
  52#define TX_BD_NUM_MIN			(MAX_SKB_FRAGS + 1)
  53#define TX_BD_NUM_MAX			4096
  54#define RX_BD_NUM_MAX			4096
  55#define DMA_NUM_APP_WORDS		5
  56#define LEN_APP				4
  57#define RX_BUF_NUM_DEFAULT		128
  58
  59/* Must be shorter than length of ethtool_drvinfo.driver field to fit */
  60#define DRIVER_NAME		"xaxienet"
  61#define DRIVER_DESCRIPTION	"Xilinx Axi Ethernet driver"
  62#define DRIVER_VERSION		"1.00a"
  63
  64#define AXIENET_REGS_N		40
  65
  66static void axienet_rx_submit_desc(struct net_device *ndev);
  67
  68/* Match table for of_platform binding */
  69static const struct of_device_id axienet_of_match[] = {
  70	{ .compatible = "xlnx,axi-ethernet-1.00.a", },
  71	{ .compatible = "xlnx,axi-ethernet-1.01.a", },
  72	{ .compatible = "xlnx,axi-ethernet-2.01.a", },
  73	{},
  74};
  75
  76MODULE_DEVICE_TABLE(of, axienet_of_match);
  77
  78/* Option table for setting up Axi Ethernet hardware options */
  79static struct axienet_option axienet_options[] = {
  80	/* Turn on jumbo packet support for both Rx and Tx */
  81	{
  82		.opt = XAE_OPTION_JUMBO,
  83		.reg = XAE_TC_OFFSET,
  84		.m_or = XAE_TC_JUM_MASK,
  85	}, {
  86		.opt = XAE_OPTION_JUMBO,
  87		.reg = XAE_RCW1_OFFSET,
  88		.m_or = XAE_RCW1_JUM_MASK,
  89	}, { /* Turn on VLAN packet support for both Rx and Tx */
  90		.opt = XAE_OPTION_VLAN,
  91		.reg = XAE_TC_OFFSET,
  92		.m_or = XAE_TC_VLAN_MASK,
  93	}, {
  94		.opt = XAE_OPTION_VLAN,
  95		.reg = XAE_RCW1_OFFSET,
  96		.m_or = XAE_RCW1_VLAN_MASK,
  97	}, { /* Turn on FCS stripping on receive packets */
  98		.opt = XAE_OPTION_FCS_STRIP,
  99		.reg = XAE_RCW1_OFFSET,
 100		.m_or = XAE_RCW1_FCS_MASK,
 101	}, { /* Turn on FCS insertion on transmit packets */
 102		.opt = XAE_OPTION_FCS_INSERT,
 103		.reg = XAE_TC_OFFSET,
 104		.m_or = XAE_TC_FCS_MASK,
 105	}, { /* Turn off length/type field checking on receive packets */
 106		.opt = XAE_OPTION_LENTYPE_ERR,
 107		.reg = XAE_RCW1_OFFSET,
 108		.m_or = XAE_RCW1_LT_DIS_MASK,
 109	}, { /* Turn on Rx flow control */
 110		.opt = XAE_OPTION_FLOW_CONTROL,
 111		.reg = XAE_FCC_OFFSET,
 112		.m_or = XAE_FCC_FCRX_MASK,
 113	}, { /* Turn on Tx flow control */
 114		.opt = XAE_OPTION_FLOW_CONTROL,
 115		.reg = XAE_FCC_OFFSET,
 116		.m_or = XAE_FCC_FCTX_MASK,
 117	}, { /* Turn on promiscuous frame filtering */
 118		.opt = XAE_OPTION_PROMISC,
 119		.reg = XAE_FMI_OFFSET,
 120		.m_or = XAE_FMI_PM_MASK,
 121	}, { /* Enable transmitter */
 122		.opt = XAE_OPTION_TXEN,
 123		.reg = XAE_TC_OFFSET,
 124		.m_or = XAE_TC_TX_MASK,
 125	}, { /* Enable receiver */
 126		.opt = XAE_OPTION_RXEN,
 127		.reg = XAE_RCW1_OFFSET,
 128		.m_or = XAE_RCW1_RX_MASK,
 129	},
 130	{}
 131};
 132
 133static struct skbuf_dma_descriptor *axienet_get_rx_desc(struct axienet_local *lp, int i)
 134{
 135	return lp->rx_skb_ring[i & (RX_BUF_NUM_DEFAULT - 1)];
 136}
 137
 138static struct skbuf_dma_descriptor *axienet_get_tx_desc(struct axienet_local *lp, int i)
 139{
 140	return lp->tx_skb_ring[i & (TX_BD_NUM_MAX - 1)];
 141}
 142
 143/**
 144 * axienet_dma_in32 - Memory mapped Axi DMA register read
 145 * @lp:		Pointer to axienet local structure
 146 * @reg:	Address offset from the base address of the Axi DMA core
 147 *
 148 * Return: The contents of the Axi DMA register
 149 *
 150 * This function returns the contents of the corresponding Axi DMA register.
 151 */
 152static inline u32 axienet_dma_in32(struct axienet_local *lp, off_t reg)
 153{
 154	return ioread32(lp->dma_regs + reg);
 155}
 156
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 157static void desc_set_phys_addr(struct axienet_local *lp, dma_addr_t addr,
 158			       struct axidma_bd *desc)
 159{
 160	desc->phys = lower_32_bits(addr);
 161	if (lp->features & XAE_FEATURE_DMA_64BIT)
 162		desc->phys_msb = upper_32_bits(addr);
 163}
 164
 165static dma_addr_t desc_get_phys_addr(struct axienet_local *lp,
 166				     struct axidma_bd *desc)
 167{
 168	dma_addr_t ret = desc->phys;
 169
 170	if (lp->features & XAE_FEATURE_DMA_64BIT)
 171		ret |= ((dma_addr_t)desc->phys_msb << 16) << 16;
 172
 173	return ret;
 174}
 175
 176/**
 177 * axienet_dma_bd_release - Release buffer descriptor rings
 178 * @ndev:	Pointer to the net_device structure
 179 *
 180 * This function is used to release the descriptors allocated in
 181 * axienet_dma_bd_init. axienet_dma_bd_release is called when Axi Ethernet
 182 * driver stop api is called.
 183 */
 184static void axienet_dma_bd_release(struct net_device *ndev)
 185{
 186	int i;
 187	struct axienet_local *lp = netdev_priv(ndev);
 188
 189	/* If we end up here, tx_bd_v must have been DMA allocated. */
 190	dma_free_coherent(lp->dev,
 191			  sizeof(*lp->tx_bd_v) * lp->tx_bd_num,
 192			  lp->tx_bd_v,
 193			  lp->tx_bd_p);
 194
 195	if (!lp->rx_bd_v)
 196		return;
 197
 198	for (i = 0; i < lp->rx_bd_num; i++) {
 199		dma_addr_t phys;
 200
 201		/* A NULL skb means this descriptor has not been initialised
 202		 * at all.
 203		 */
 204		if (!lp->rx_bd_v[i].skb)
 205			break;
 206
 207		dev_kfree_skb(lp->rx_bd_v[i].skb);
 208
 209		/* For each descriptor, we programmed cntrl with the (non-zero)
 210		 * descriptor size, after it had been successfully allocated.
 211		 * So a non-zero value in there means we need to unmap it.
 212		 */
 213		if (lp->rx_bd_v[i].cntrl) {
 214			phys = desc_get_phys_addr(lp, &lp->rx_bd_v[i]);
 215			dma_unmap_single(lp->dev, phys,
 216					 lp->max_frm_size, DMA_FROM_DEVICE);
 217		}
 218	}
 219
 220	dma_free_coherent(lp->dev,
 221			  sizeof(*lp->rx_bd_v) * lp->rx_bd_num,
 222			  lp->rx_bd_v,
 223			  lp->rx_bd_p);
 224}
 225
 226/**
 227 * axienet_usec_to_timer - Calculate IRQ delay timer value
 228 * @lp:		Pointer to the axienet_local structure
 229 * @coalesce_usec: Microseconds to convert into timer value
 230 */
 231static u32 axienet_usec_to_timer(struct axienet_local *lp, u32 coalesce_usec)
 232{
 233	u32 result;
 234	u64 clk_rate = 125000000; /* arbitrary guess if no clock rate set */
 235
 236	if (lp->axi_clk)
 237		clk_rate = clk_get_rate(lp->axi_clk);
 238
 239	/* 1 Timeout Interval = 125 * (clock period of SG clock) */
 240	result = DIV64_U64_ROUND_CLOSEST((u64)coalesce_usec * clk_rate,
 241					 (u64)125000000);
 242	if (result > 255)
 243		result = 255;
 244
 245	return result;
 246}
 247
 248/**
 249 * axienet_dma_start - Set up DMA registers and start DMA operation
 250 * @lp:		Pointer to the axienet_local structure
 251 */
 252static void axienet_dma_start(struct axienet_local *lp)
 253{
 254	/* Start updating the Rx channel control register */
 255	lp->rx_dma_cr = (lp->coalesce_count_rx << XAXIDMA_COALESCE_SHIFT) |
 256			XAXIDMA_IRQ_IOC_MASK | XAXIDMA_IRQ_ERROR_MASK;
 257	/* Only set interrupt delay timer if not generating an interrupt on
 258	 * the first RX packet. Otherwise leave at 0 to disable delay interrupt.
 259	 */
 260	if (lp->coalesce_count_rx > 1)
 261		lp->rx_dma_cr |= (axienet_usec_to_timer(lp, lp->coalesce_usec_rx)
 262					<< XAXIDMA_DELAY_SHIFT) |
 263				 XAXIDMA_IRQ_DELAY_MASK;
 264	axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, lp->rx_dma_cr);
 265
 266	/* Start updating the Tx channel control register */
 267	lp->tx_dma_cr = (lp->coalesce_count_tx << XAXIDMA_COALESCE_SHIFT) |
 268			XAXIDMA_IRQ_IOC_MASK | XAXIDMA_IRQ_ERROR_MASK;
 269	/* Only set interrupt delay timer if not generating an interrupt on
 270	 * the first TX packet. Otherwise leave at 0 to disable delay interrupt.
 271	 */
 272	if (lp->coalesce_count_tx > 1)
 273		lp->tx_dma_cr |= (axienet_usec_to_timer(lp, lp->coalesce_usec_tx)
 274					<< XAXIDMA_DELAY_SHIFT) |
 275				 XAXIDMA_IRQ_DELAY_MASK;
 276	axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, lp->tx_dma_cr);
 277
 278	/* Populate the tail pointer and bring the Rx Axi DMA engine out of
 279	 * halted state. This will make the Rx side ready for reception.
 280	 */
 281	axienet_dma_out_addr(lp, XAXIDMA_RX_CDESC_OFFSET, lp->rx_bd_p);
 282	lp->rx_dma_cr |= XAXIDMA_CR_RUNSTOP_MASK;
 283	axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, lp->rx_dma_cr);
 284	axienet_dma_out_addr(lp, XAXIDMA_RX_TDESC_OFFSET, lp->rx_bd_p +
 285			     (sizeof(*lp->rx_bd_v) * (lp->rx_bd_num - 1)));
 286
 287	/* Write to the RS (Run-stop) bit in the Tx channel control register.
 288	 * Tx channel is now ready to run. But only after we write to the
 289	 * tail pointer register that the Tx channel will start transmitting.
 290	 */
 291	axienet_dma_out_addr(lp, XAXIDMA_TX_CDESC_OFFSET, lp->tx_bd_p);
 292	lp->tx_dma_cr |= XAXIDMA_CR_RUNSTOP_MASK;
 293	axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, lp->tx_dma_cr);
 294}
 295
 296/**
 297 * axienet_dma_bd_init - Setup buffer descriptor rings for Axi DMA
 298 * @ndev:	Pointer to the net_device structure
 299 *
 300 * Return: 0, on success -ENOMEM, on failure
 301 *
 302 * This function is called to initialize the Rx and Tx DMA descriptor
 303 * rings. This initializes the descriptors with required default values
 304 * and is called when Axi Ethernet driver reset is called.
 305 */
 306static int axienet_dma_bd_init(struct net_device *ndev)
 307{
 
 308	int i;
 309	struct sk_buff *skb;
 310	struct axienet_local *lp = netdev_priv(ndev);
 311
 312	/* Reset the indexes which are used for accessing the BDs */
 313	lp->tx_bd_ci = 0;
 314	lp->tx_bd_tail = 0;
 315	lp->rx_bd_ci = 0;
 316
 317	/* Allocate the Tx and Rx buffer descriptors. */
 318	lp->tx_bd_v = dma_alloc_coherent(lp->dev,
 319					 sizeof(*lp->tx_bd_v) * lp->tx_bd_num,
 320					 &lp->tx_bd_p, GFP_KERNEL);
 321	if (!lp->tx_bd_v)
 322		return -ENOMEM;
 323
 324	lp->rx_bd_v = dma_alloc_coherent(lp->dev,
 325					 sizeof(*lp->rx_bd_v) * lp->rx_bd_num,
 326					 &lp->rx_bd_p, GFP_KERNEL);
 327	if (!lp->rx_bd_v)
 328		goto out;
 329
 330	for (i = 0; i < lp->tx_bd_num; i++) {
 331		dma_addr_t addr = lp->tx_bd_p +
 332				  sizeof(*lp->tx_bd_v) *
 333				  ((i + 1) % lp->tx_bd_num);
 334
 335		lp->tx_bd_v[i].next = lower_32_bits(addr);
 336		if (lp->features & XAE_FEATURE_DMA_64BIT)
 337			lp->tx_bd_v[i].next_msb = upper_32_bits(addr);
 338	}
 339
 340	for (i = 0; i < lp->rx_bd_num; i++) {
 341		dma_addr_t addr;
 342
 343		addr = lp->rx_bd_p + sizeof(*lp->rx_bd_v) *
 344			((i + 1) % lp->rx_bd_num);
 345		lp->rx_bd_v[i].next = lower_32_bits(addr);
 346		if (lp->features & XAE_FEATURE_DMA_64BIT)
 347			lp->rx_bd_v[i].next_msb = upper_32_bits(addr);
 348
 349		skb = netdev_alloc_skb_ip_align(ndev, lp->max_frm_size);
 350		if (!skb)
 351			goto out;
 352
 353		lp->rx_bd_v[i].skb = skb;
 354		addr = dma_map_single(lp->dev, skb->data,
 355				      lp->max_frm_size, DMA_FROM_DEVICE);
 356		if (dma_mapping_error(lp->dev, addr)) {
 357			netdev_err(ndev, "DMA mapping error\n");
 358			goto out;
 359		}
 360		desc_set_phys_addr(lp, addr, &lp->rx_bd_v[i]);
 361
 362		lp->rx_bd_v[i].cntrl = lp->max_frm_size;
 363	}
 364
 365	axienet_dma_start(lp);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 366
 367	return 0;
 368out:
 369	axienet_dma_bd_release(ndev);
 370	return -ENOMEM;
 371}
 372
 373/**
 374 * axienet_set_mac_address - Write the MAC address
 375 * @ndev:	Pointer to the net_device structure
 376 * @address:	6 byte Address to be written as MAC address
 377 *
 378 * This function is called to initialize the MAC address of the Axi Ethernet
 379 * core. It writes to the UAW0 and UAW1 registers of the core.
 380 */
 381static void axienet_set_mac_address(struct net_device *ndev,
 382				    const void *address)
 383{
 384	struct axienet_local *lp = netdev_priv(ndev);
 385
 386	if (address)
 387		eth_hw_addr_set(ndev, address);
 388	if (!is_valid_ether_addr(ndev->dev_addr))
 389		eth_hw_addr_random(ndev);
 390
 391	/* Set up unicast MAC address filter set its mac address */
 392	axienet_iow(lp, XAE_UAW0_OFFSET,
 393		    (ndev->dev_addr[0]) |
 394		    (ndev->dev_addr[1] << 8) |
 395		    (ndev->dev_addr[2] << 16) |
 396		    (ndev->dev_addr[3] << 24));
 397	axienet_iow(lp, XAE_UAW1_OFFSET,
 398		    (((axienet_ior(lp, XAE_UAW1_OFFSET)) &
 399		      ~XAE_UAW1_UNICASTADDR_MASK) |
 400		     (ndev->dev_addr[4] |
 401		     (ndev->dev_addr[5] << 8))));
 402}
 403
 404/**
 405 * netdev_set_mac_address - Write the MAC address (from outside the driver)
 406 * @ndev:	Pointer to the net_device structure
 407 * @p:		6 byte Address to be written as MAC address
 408 *
 409 * Return: 0 for all conditions. Presently, there is no failure case.
 410 *
 411 * This function is called to initialize the MAC address of the Axi Ethernet
 412 * core. It calls the core specific axienet_set_mac_address. This is the
 413 * function that goes into net_device_ops structure entry ndo_set_mac_address.
 414 */
 415static int netdev_set_mac_address(struct net_device *ndev, void *p)
 416{
 417	struct sockaddr *addr = p;
 418	axienet_set_mac_address(ndev, addr->sa_data);
 419	return 0;
 420}
 421
 422/**
 423 * axienet_set_multicast_list - Prepare the multicast table
 424 * @ndev:	Pointer to the net_device structure
 425 *
 426 * This function is called to initialize the multicast table during
 427 * initialization. The Axi Ethernet basic multicast support has a four-entry
 428 * multicast table which is initialized here. Additionally this function
 429 * goes into the net_device_ops structure entry ndo_set_multicast_list. This
 430 * means whenever the multicast table entries need to be updated this
 431 * function gets called.
 432 */
 433static void axienet_set_multicast_list(struct net_device *ndev)
 434{
 435	int i;
 436	u32 reg, af0reg, af1reg;
 437	struct axienet_local *lp = netdev_priv(ndev);
 438
 439	if (ndev->flags & (IFF_ALLMULTI | IFF_PROMISC) ||
 440	    netdev_mc_count(ndev) > XAE_MULTICAST_CAM_TABLE_NUM) {
 441		/* We must make the kernel realize we had to move into
 442		 * promiscuous mode. If it was a promiscuous mode request
 443		 * the flag is already set. If not we set it.
 444		 */
 445		ndev->flags |= IFF_PROMISC;
 446		reg = axienet_ior(lp, XAE_FMI_OFFSET);
 447		reg |= XAE_FMI_PM_MASK;
 448		axienet_iow(lp, XAE_FMI_OFFSET, reg);
 449		dev_info(&ndev->dev, "Promiscuous mode enabled.\n");
 450	} else if (!netdev_mc_empty(ndev)) {
 451		struct netdev_hw_addr *ha;
 452
 453		i = 0;
 454		netdev_for_each_mc_addr(ha, ndev) {
 455			if (i >= XAE_MULTICAST_CAM_TABLE_NUM)
 456				break;
 457
 458			af0reg = (ha->addr[0]);
 459			af0reg |= (ha->addr[1] << 8);
 460			af0reg |= (ha->addr[2] << 16);
 461			af0reg |= (ha->addr[3] << 24);
 462
 463			af1reg = (ha->addr[4]);
 464			af1reg |= (ha->addr[5] << 8);
 465
 466			reg = axienet_ior(lp, XAE_FMI_OFFSET) & 0xFFFFFF00;
 467			reg |= i;
 468
 469			axienet_iow(lp, XAE_FMI_OFFSET, reg);
 470			axienet_iow(lp, XAE_AF0_OFFSET, af0reg);
 471			axienet_iow(lp, XAE_AF1_OFFSET, af1reg);
 472			i++;
 473		}
 474	} else {
 475		reg = axienet_ior(lp, XAE_FMI_OFFSET);
 476		reg &= ~XAE_FMI_PM_MASK;
 477
 478		axienet_iow(lp, XAE_FMI_OFFSET, reg);
 479
 480		for (i = 0; i < XAE_MULTICAST_CAM_TABLE_NUM; i++) {
 481			reg = axienet_ior(lp, XAE_FMI_OFFSET) & 0xFFFFFF00;
 482			reg |= i;
 483
 484			axienet_iow(lp, XAE_FMI_OFFSET, reg);
 485			axienet_iow(lp, XAE_AF0_OFFSET, 0);
 486			axienet_iow(lp, XAE_AF1_OFFSET, 0);
 487		}
 488
 489		dev_info(&ndev->dev, "Promiscuous mode disabled.\n");
 490	}
 491}
 492
 493/**
 494 * axienet_setoptions - Set an Axi Ethernet option
 495 * @ndev:	Pointer to the net_device structure
 496 * @options:	Option to be enabled/disabled
 497 *
 498 * The Axi Ethernet core has multiple features which can be selectively turned
 499 * on or off. The typical options could be jumbo frame option, basic VLAN
 500 * option, promiscuous mode option etc. This function is used to set or clear
 501 * these options in the Axi Ethernet hardware. This is done through
 502 * axienet_option structure .
 503 */
 504static void axienet_setoptions(struct net_device *ndev, u32 options)
 505{
 506	int reg;
 507	struct axienet_local *lp = netdev_priv(ndev);
 508	struct axienet_option *tp = &axienet_options[0];
 509
 510	while (tp->opt) {
 511		reg = ((axienet_ior(lp, tp->reg)) & ~(tp->m_or));
 512		if (options & tp->opt)
 513			reg |= tp->m_or;
 514		axienet_iow(lp, tp->reg, reg);
 515		tp++;
 516	}
 517
 518	lp->options |= options;
 519}
 520
 521static int __axienet_device_reset(struct axienet_local *lp)
 522{
 523	u32 value;
 524	int ret;
 525
 526	/* Reset Axi DMA. This would reset Axi Ethernet core as well. The reset
 527	 * process of Axi DMA takes a while to complete as all pending
 528	 * commands/transfers will be flushed or completed during this
 529	 * reset process.
 530	 * Note that even though both TX and RX have their own reset register,
 531	 * they both reset the entire DMA core, so only one needs to be used.
 532	 */
 533	axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, XAXIDMA_CR_RESET_MASK);
 534	ret = read_poll_timeout(axienet_dma_in32, value,
 535				!(value & XAXIDMA_CR_RESET_MASK),
 536				DELAY_OF_ONE_MILLISEC, 50000, false, lp,
 537				XAXIDMA_TX_CR_OFFSET);
 538	if (ret) {
 539		dev_err(lp->dev, "%s: DMA reset timeout!\n", __func__);
 540		return ret;
 541	}
 542
 543	/* Wait for PhyRstCmplt bit to be set, indicating the PHY reset has finished */
 544	ret = read_poll_timeout(axienet_ior, value,
 545				value & XAE_INT_PHYRSTCMPLT_MASK,
 546				DELAY_OF_ONE_MILLISEC, 50000, false, lp,
 547				XAE_IS_OFFSET);
 548	if (ret) {
 549		dev_err(lp->dev, "%s: timeout waiting for PhyRstCmplt\n", __func__);
 550		return ret;
 551	}
 552
 553	return 0;
 554}
 555
 556/**
 557 * axienet_dma_stop - Stop DMA operation
 558 * @lp:		Pointer to the axienet_local structure
 559 */
 560static void axienet_dma_stop(struct axienet_local *lp)
 561{
 562	int count;
 563	u32 cr, sr;
 564
 565	cr = axienet_dma_in32(lp, XAXIDMA_RX_CR_OFFSET);
 566	cr &= ~(XAXIDMA_CR_RUNSTOP_MASK | XAXIDMA_IRQ_ALL_MASK);
 567	axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, cr);
 568	synchronize_irq(lp->rx_irq);
 569
 570	cr = axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET);
 571	cr &= ~(XAXIDMA_CR_RUNSTOP_MASK | XAXIDMA_IRQ_ALL_MASK);
 572	axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, cr);
 573	synchronize_irq(lp->tx_irq);
 574
 575	/* Give DMAs a chance to halt gracefully */
 576	sr = axienet_dma_in32(lp, XAXIDMA_RX_SR_OFFSET);
 577	for (count = 0; !(sr & XAXIDMA_SR_HALT_MASK) && count < 5; ++count) {
 578		msleep(20);
 579		sr = axienet_dma_in32(lp, XAXIDMA_RX_SR_OFFSET);
 580	}
 581
 582	sr = axienet_dma_in32(lp, XAXIDMA_TX_SR_OFFSET);
 583	for (count = 0; !(sr & XAXIDMA_SR_HALT_MASK) && count < 5; ++count) {
 584		msleep(20);
 585		sr = axienet_dma_in32(lp, XAXIDMA_TX_SR_OFFSET);
 586	}
 587
 588	/* Do a reset to ensure DMA is really stopped */
 589	axienet_lock_mii(lp);
 590	__axienet_device_reset(lp);
 591	axienet_unlock_mii(lp);
 592}
 593
 594/**
 595 * axienet_device_reset - Reset and initialize the Axi Ethernet hardware.
 596 * @ndev:	Pointer to the net_device structure
 597 *
 598 * This function is called to reset and initialize the Axi Ethernet core. This
 599 * is typically called during initialization. It does a reset of the Axi DMA
 600 * Rx/Tx channels and initializes the Axi DMA BDs. Since Axi DMA reset lines
 601 * are connected to Axi Ethernet reset lines, this in turn resets the Axi
 602 * Ethernet core. No separate hardware reset is done for the Axi Ethernet
 603 * core.
 604 * Returns 0 on success or a negative error number otherwise.
 605 */
 606static int axienet_device_reset(struct net_device *ndev)
 607{
 608	u32 axienet_status;
 609	struct axienet_local *lp = netdev_priv(ndev);
 610	int ret;
 611
 
 
 
 
 612	lp->max_frm_size = XAE_MAX_VLAN_FRAME_SIZE;
 613	lp->options |= XAE_OPTION_VLAN;
 614	lp->options &= (~XAE_OPTION_JUMBO);
 615
 616	if ((ndev->mtu > XAE_MTU) &&
 617	    (ndev->mtu <= XAE_JUMBO_MTU)) {
 618		lp->max_frm_size = ndev->mtu + VLAN_ETH_HLEN +
 619					XAE_TRL_SIZE;
 620
 621		if (lp->max_frm_size <= lp->rxmem)
 622			lp->options |= XAE_OPTION_JUMBO;
 623	}
 624
 625	if (!lp->use_dmaengine) {
 626		ret = __axienet_device_reset(lp);
 627		if (ret)
 628			return ret;
 629
 630		ret = axienet_dma_bd_init(ndev);
 631		if (ret) {
 632			netdev_err(ndev, "%s: descriptor allocation failed\n",
 633				   __func__);
 634			return ret;
 635		}
 636	}
 637
 638	axienet_status = axienet_ior(lp, XAE_RCW1_OFFSET);
 639	axienet_status &= ~XAE_RCW1_RX_MASK;
 640	axienet_iow(lp, XAE_RCW1_OFFSET, axienet_status);
 641
 642	axienet_status = axienet_ior(lp, XAE_IP_OFFSET);
 643	if (axienet_status & XAE_INT_RXRJECT_MASK)
 644		axienet_iow(lp, XAE_IS_OFFSET, XAE_INT_RXRJECT_MASK);
 645	axienet_iow(lp, XAE_IE_OFFSET, lp->eth_irq > 0 ?
 646		    XAE_INT_RECV_ERROR_MASK : 0);
 647
 648	axienet_iow(lp, XAE_FCC_OFFSET, XAE_FCC_FCRX_MASK);
 649
 650	/* Sync default options with HW but leave receiver and
 651	 * transmitter disabled.
 652	 */
 653	axienet_setoptions(ndev, lp->options &
 654			   ~(XAE_OPTION_TXEN | XAE_OPTION_RXEN));
 655	axienet_set_mac_address(ndev, NULL);
 656	axienet_set_multicast_list(ndev);
 657	axienet_setoptions(ndev, lp->options);
 658
 659	netif_trans_update(ndev);
 660
 661	return 0;
 662}
 663
 664/**
 665 * axienet_free_tx_chain - Clean up a series of linked TX descriptors.
 666 * @lp:		Pointer to the axienet_local structure
 667 * @first_bd:	Index of first descriptor to clean up
 668 * @nr_bds:	Max number of descriptors to clean up
 669 * @force:	Whether to clean descriptors even if not complete
 670 * @sizep:	Pointer to a u32 filled with the total sum of all bytes
 671 *		in all cleaned-up descriptors. Ignored if NULL.
 672 * @budget:	NAPI budget (use 0 when not called from NAPI poll)
 673 *
 674 * Would either be called after a successful transmit operation, or after
 675 * there was an error when setting up the chain.
 676 * Returns the number of descriptors handled.
 677 */
 678static int axienet_free_tx_chain(struct axienet_local *lp, u32 first_bd,
 679				 int nr_bds, bool force, u32 *sizep, int budget)
 680{
 
 681	struct axidma_bd *cur_p;
 
 682	unsigned int status;
 683	dma_addr_t phys;
 684	int i;
 685
 686	for (i = 0; i < nr_bds; i++) {
 
 
 
 687		cur_p = &lp->tx_bd_v[(first_bd + i) % lp->tx_bd_num];
 688		status = cur_p->status;
 689
 690		/* If force is not specified, clean up only descriptors
 691		 * that have been completed by the MAC.
 692		 */
 693		if (!force && !(status & XAXIDMA_BD_STS_COMPLETE_MASK))
 694			break;
 695
 696		/* Ensure we see complete descriptor update */
 697		dma_rmb();
 698		phys = desc_get_phys_addr(lp, cur_p);
 699		dma_unmap_single(lp->dev, phys,
 700				 (cur_p->cntrl & XAXIDMA_BD_CTRL_LENGTH_MASK),
 701				 DMA_TO_DEVICE);
 702
 703		if (cur_p->skb && (status & XAXIDMA_BD_STS_COMPLETE_MASK))
 704			napi_consume_skb(cur_p->skb, budget);
 705
 
 706		cur_p->app0 = 0;
 707		cur_p->app1 = 0;
 708		cur_p->app2 = 0;
 709		cur_p->app4 = 0;
 710		cur_p->skb = NULL;
 711		/* ensure our transmit path and device don't prematurely see status cleared */
 712		wmb();
 713		cur_p->cntrl = 0;
 714		cur_p->status = 0;
 
 715
 716		if (sizep)
 717			*sizep += status & XAXIDMA_BD_STS_ACTUAL_LEN_MASK;
 718	}
 719
 720	return i;
 721}
 722
 723/**
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 724 * axienet_check_tx_bd_space - Checks if a BD/group of BDs are currently busy
 725 * @lp:		Pointer to the axienet_local structure
 726 * @num_frag:	The number of BDs to check for
 727 *
 728 * Return: 0, on success
 729 *	    NETDEV_TX_BUSY, if any of the descriptors are not free
 730 *
 731 * This function is invoked before BDs are allocated and transmission starts.
 732 * This function returns 0 if a BD or group of BDs can be allocated for
 733 * transmission. If the BD or any of the BDs are not free the function
 734 * returns a busy status.
 735 */
 736static inline int axienet_check_tx_bd_space(struct axienet_local *lp,
 737					    int num_frag)
 738{
 739	struct axidma_bd *cur_p;
 740
 741	/* Ensure we see all descriptor updates from device or TX polling */
 742	rmb();
 743	cur_p = &lp->tx_bd_v[(READ_ONCE(lp->tx_bd_tail) + num_frag) %
 744			     lp->tx_bd_num];
 745	if (cur_p->cntrl)
 746		return NETDEV_TX_BUSY;
 747	return 0;
 748}
 749
 750/**
 751 * axienet_dma_tx_cb - DMA engine callback for TX channel.
 752 * @data:       Pointer to the axienet_local structure.
 753 * @result:     error reporting through dmaengine_result.
 754 * This function is called by dmaengine driver for TX channel to notify
 755 * that the transmit is done.
 756 */
 757static void axienet_dma_tx_cb(void *data, const struct dmaengine_result *result)
 758{
 759	struct skbuf_dma_descriptor *skbuf_dma;
 760	struct axienet_local *lp = data;
 761	struct netdev_queue *txq;
 762	int len;
 763
 764	skbuf_dma = axienet_get_tx_desc(lp, lp->tx_ring_tail++);
 765	len = skbuf_dma->skb->len;
 766	txq = skb_get_tx_queue(lp->ndev, skbuf_dma->skb);
 767	u64_stats_update_begin(&lp->tx_stat_sync);
 768	u64_stats_add(&lp->tx_bytes, len);
 769	u64_stats_add(&lp->tx_packets, 1);
 770	u64_stats_update_end(&lp->tx_stat_sync);
 771	dma_unmap_sg(lp->dev, skbuf_dma->sgl, skbuf_dma->sg_len, DMA_TO_DEVICE);
 772	dev_consume_skb_any(skbuf_dma->skb);
 773	netif_txq_completed_wake(txq, 1, len,
 774				 CIRC_SPACE(lp->tx_ring_head, lp->tx_ring_tail, TX_BD_NUM_MAX),
 775				 2 * MAX_SKB_FRAGS);
 776}
 777
 778/**
 779 * axienet_start_xmit_dmaengine - Starts the transmission.
 780 * @skb:        sk_buff pointer that contains data to be Txed.
 781 * @ndev:       Pointer to net_device structure.
 782 *
 783 * Return: NETDEV_TX_OK on success or any non space errors.
 784 *         NETDEV_TX_BUSY when free element in TX skb ring buffer
 785 *         is not available.
 786 *
 787 * This function is invoked to initiate transmission. The
 788 * function sets the skbs, register dma callback API and submit
 789 * the dma transaction.
 790 * Additionally if checksum offloading is supported,
 791 * it populates AXI Stream Control fields with appropriate values.
 792 */
 793static netdev_tx_t
 794axienet_start_xmit_dmaengine(struct sk_buff *skb, struct net_device *ndev)
 795{
 796	struct dma_async_tx_descriptor *dma_tx_desc = NULL;
 797	struct axienet_local *lp = netdev_priv(ndev);
 798	u32 app_metadata[DMA_NUM_APP_WORDS] = {0};
 799	struct skbuf_dma_descriptor *skbuf_dma;
 800	struct dma_device *dma_dev;
 801	struct netdev_queue *txq;
 802	u32 csum_start_off;
 803	u32 csum_index_off;
 804	int sg_len;
 805	int ret;
 806
 807	dma_dev = lp->tx_chan->device;
 808	sg_len = skb_shinfo(skb)->nr_frags + 1;
 809	if (CIRC_SPACE(lp->tx_ring_head, lp->tx_ring_tail, TX_BD_NUM_MAX) <= sg_len) {
 810		netif_stop_queue(ndev);
 811		if (net_ratelimit())
 812			netdev_warn(ndev, "TX ring unexpectedly full\n");
 813		return NETDEV_TX_BUSY;
 814	}
 815
 816	skbuf_dma = axienet_get_tx_desc(lp, lp->tx_ring_head);
 817	if (!skbuf_dma)
 818		goto xmit_error_drop_skb;
 819
 820	lp->tx_ring_head++;
 821	sg_init_table(skbuf_dma->sgl, sg_len);
 822	ret = skb_to_sgvec(skb, skbuf_dma->sgl, 0, skb->len);
 823	if (ret < 0)
 824		goto xmit_error_drop_skb;
 825
 826	ret = dma_map_sg(lp->dev, skbuf_dma->sgl, sg_len, DMA_TO_DEVICE);
 827	if (!ret)
 828		goto xmit_error_drop_skb;
 829
 830	/* Fill up app fields for checksum */
 831	if (skb->ip_summed == CHECKSUM_PARTIAL) {
 832		if (lp->features & XAE_FEATURE_FULL_TX_CSUM) {
 833			/* Tx Full Checksum Offload Enabled */
 834			app_metadata[0] |= 2;
 835		} else if (lp->features & XAE_FEATURE_PARTIAL_TX_CSUM) {
 836			csum_start_off = skb_transport_offset(skb);
 837			csum_index_off = csum_start_off + skb->csum_offset;
 838			/* Tx Partial Checksum Offload Enabled */
 839			app_metadata[0] |= 1;
 840			app_metadata[1] = (csum_start_off << 16) | csum_index_off;
 841		}
 842	} else if (skb->ip_summed == CHECKSUM_UNNECESSARY) {
 843		app_metadata[0] |= 2; /* Tx Full Checksum Offload Enabled */
 844	}
 845
 846	dma_tx_desc = dma_dev->device_prep_slave_sg(lp->tx_chan, skbuf_dma->sgl,
 847			sg_len, DMA_MEM_TO_DEV,
 848			DMA_PREP_INTERRUPT, (void *)app_metadata);
 849	if (!dma_tx_desc)
 850		goto xmit_error_unmap_sg;
 851
 852	skbuf_dma->skb = skb;
 853	skbuf_dma->sg_len = sg_len;
 854	dma_tx_desc->callback_param = lp;
 855	dma_tx_desc->callback_result = axienet_dma_tx_cb;
 856	dmaengine_submit(dma_tx_desc);
 857	dma_async_issue_pending(lp->tx_chan);
 858	txq = skb_get_tx_queue(lp->ndev, skb);
 859	netdev_tx_sent_queue(txq, skb->len);
 860	netif_txq_maybe_stop(txq, CIRC_SPACE(lp->tx_ring_head, lp->tx_ring_tail, TX_BD_NUM_MAX),
 861			     MAX_SKB_FRAGS + 1, 2 * MAX_SKB_FRAGS);
 862
 863	return NETDEV_TX_OK;
 864
 865xmit_error_unmap_sg:
 866	dma_unmap_sg(lp->dev, skbuf_dma->sgl, sg_len, DMA_TO_DEVICE);
 867xmit_error_drop_skb:
 868	dev_kfree_skb_any(skb);
 869	return NETDEV_TX_OK;
 870}
 871
 872/**
 873 * axienet_tx_poll - Invoked once a transmit is completed by the
 874 * Axi DMA Tx channel.
 875 * @napi:	Pointer to NAPI structure.
 876 * @budget:	Max number of TX packets to process.
 877 *
 878 * Return: Number of TX packets processed.
 879 *
 880 * This function is invoked from the NAPI processing to notify the completion
 881 * of transmit operation. It clears fields in the corresponding Tx BDs and
 882 * unmaps the corresponding buffer so that CPU can regain ownership of the
 883 * buffer. It finally invokes "netif_wake_queue" to restart transmission if
 884 * required.
 885 */
 886static int axienet_tx_poll(struct napi_struct *napi, int budget)
 887{
 888	struct axienet_local *lp = container_of(napi, struct axienet_local, napi_tx);
 889	struct net_device *ndev = lp->ndev;
 890	u32 size = 0;
 891	int packets;
 892
 893	packets = axienet_free_tx_chain(lp, lp->tx_bd_ci, budget, false, &size, budget);
 894
 895	if (packets) {
 896		lp->tx_bd_ci += packets;
 897		if (lp->tx_bd_ci >= lp->tx_bd_num)
 898			lp->tx_bd_ci %= lp->tx_bd_num;
 899
 900		u64_stats_update_begin(&lp->tx_stat_sync);
 901		u64_stats_add(&lp->tx_packets, packets);
 902		u64_stats_add(&lp->tx_bytes, size);
 903		u64_stats_update_end(&lp->tx_stat_sync);
 904
 905		/* Matches barrier in axienet_start_xmit */
 906		smp_mb();
 907
 908		if (!axienet_check_tx_bd_space(lp, MAX_SKB_FRAGS + 1))
 909			netif_wake_queue(ndev);
 910	}
 911
 912	if (packets < budget && napi_complete_done(napi, packets)) {
 913		/* Re-enable TX completion interrupts. This should
 914		 * cause an immediate interrupt if any TX packets are
 915		 * already pending.
 916		 */
 917		axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, lp->tx_dma_cr);
 918	}
 919	return packets;
 920}
 921
 922/**
 923 * axienet_start_xmit - Starts the transmission.
 924 * @skb:	sk_buff pointer that contains data to be Txed.
 925 * @ndev:	Pointer to net_device structure.
 926 *
 927 * Return: NETDEV_TX_OK, on success
 928 *	    NETDEV_TX_BUSY, if any of the descriptors are not free
 929 *
 930 * This function is invoked from upper layers to initiate transmission. The
 931 * function uses the next available free BDs and populates their fields to
 932 * start the transmission. Additionally if checksum offloading is supported,
 933 * it populates AXI Stream Control fields with appropriate values.
 934 */
 935static netdev_tx_t
 936axienet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
 937{
 938	u32 ii;
 939	u32 num_frag;
 940	u32 csum_start_off;
 941	u32 csum_index_off;
 942	skb_frag_t *frag;
 943	dma_addr_t tail_p, phys;
 944	u32 orig_tail_ptr, new_tail_ptr;
 945	struct axienet_local *lp = netdev_priv(ndev);
 946	struct axidma_bd *cur_p;
 947
 948	orig_tail_ptr = lp->tx_bd_tail;
 949	new_tail_ptr = orig_tail_ptr;
 950
 951	num_frag = skb_shinfo(skb)->nr_frags;
 952	cur_p = &lp->tx_bd_v[orig_tail_ptr];
 
 
 
 
 953
 954	if (axienet_check_tx_bd_space(lp, num_frag + 1)) {
 955		/* Should not happen as last start_xmit call should have
 956		 * checked for sufficient space and queue should only be
 957		 * woken when sufficient space is available.
 958		 */
 959		netif_stop_queue(ndev);
 960		if (net_ratelimit())
 961			netdev_warn(ndev, "TX ring unexpectedly full\n");
 962		return NETDEV_TX_BUSY;
 
 
 
 
 
 
 963	}
 964
 965	if (skb->ip_summed == CHECKSUM_PARTIAL) {
 966		if (lp->features & XAE_FEATURE_FULL_TX_CSUM) {
 967			/* Tx Full Checksum Offload Enabled */
 968			cur_p->app0 |= 2;
 969		} else if (lp->features & XAE_FEATURE_PARTIAL_TX_CSUM) {
 970			csum_start_off = skb_transport_offset(skb);
 971			csum_index_off = csum_start_off + skb->csum_offset;
 972			/* Tx Partial Checksum Offload Enabled */
 973			cur_p->app0 |= 1;
 974			cur_p->app1 = (csum_start_off << 16) | csum_index_off;
 975		}
 976	} else if (skb->ip_summed == CHECKSUM_UNNECESSARY) {
 977		cur_p->app0 |= 2; /* Tx Full Checksum Offload Enabled */
 978	}
 979
 980	phys = dma_map_single(lp->dev, skb->data,
 981			      skb_headlen(skb), DMA_TO_DEVICE);
 982	if (unlikely(dma_mapping_error(lp->dev, phys))) {
 983		if (net_ratelimit())
 984			netdev_err(ndev, "TX DMA mapping error\n");
 985		ndev->stats.tx_dropped++;
 986		return NETDEV_TX_OK;
 987	}
 988	desc_set_phys_addr(lp, phys, cur_p);
 989	cur_p->cntrl = skb_headlen(skb) | XAXIDMA_BD_CTRL_TXSOF_MASK;
 990
 991	for (ii = 0; ii < num_frag; ii++) {
 992		if (++new_tail_ptr >= lp->tx_bd_num)
 993			new_tail_ptr = 0;
 994		cur_p = &lp->tx_bd_v[new_tail_ptr];
 995		frag = &skb_shinfo(skb)->frags[ii];
 996		phys = dma_map_single(lp->dev,
 997				      skb_frag_address(frag),
 998				      skb_frag_size(frag),
 999				      DMA_TO_DEVICE);
1000		if (unlikely(dma_mapping_error(lp->dev, phys))) {
1001			if (net_ratelimit())
1002				netdev_err(ndev, "TX DMA mapping error\n");
1003			ndev->stats.tx_dropped++;
1004			axienet_free_tx_chain(lp, orig_tail_ptr, ii + 1,
1005					      true, NULL, 0);
 
 
1006			return NETDEV_TX_OK;
1007		}
1008		desc_set_phys_addr(lp, phys, cur_p);
1009		cur_p->cntrl = skb_frag_size(frag);
1010	}
1011
1012	cur_p->cntrl |= XAXIDMA_BD_CTRL_TXEOF_MASK;
1013	cur_p->skb = skb;
1014
1015	tail_p = lp->tx_bd_p + sizeof(*lp->tx_bd_v) * new_tail_ptr;
1016	if (++new_tail_ptr >= lp->tx_bd_num)
1017		new_tail_ptr = 0;
1018	WRITE_ONCE(lp->tx_bd_tail, new_tail_ptr);
1019
1020	/* Start the transfer */
1021	axienet_dma_out_addr(lp, XAXIDMA_TX_TDESC_OFFSET, tail_p);
1022
1023	/* Stop queue if next transmit may not have space */
1024	if (axienet_check_tx_bd_space(lp, MAX_SKB_FRAGS + 1)) {
1025		netif_stop_queue(ndev);
1026
1027		/* Matches barrier in axienet_tx_poll */
1028		smp_mb();
1029
1030		/* Space might have just been freed - check again */
1031		if (!axienet_check_tx_bd_space(lp, MAX_SKB_FRAGS + 1))
1032			netif_wake_queue(ndev);
1033	}
1034
1035	return NETDEV_TX_OK;
1036}
1037
1038/**
1039 * axienet_dma_rx_cb - DMA engine callback for RX channel.
1040 * @data:       Pointer to the skbuf_dma_descriptor structure.
1041 * @result:     error reporting through dmaengine_result.
1042 * This function is called by dmaengine driver for RX channel to notify
1043 * that the packet is received.
1044 */
1045static void axienet_dma_rx_cb(void *data, const struct dmaengine_result *result)
1046{
1047	struct skbuf_dma_descriptor *skbuf_dma;
1048	size_t meta_len, meta_max_len, rx_len;
1049	struct axienet_local *lp = data;
1050	struct sk_buff *skb;
1051	u32 *app_metadata;
1052
1053	skbuf_dma = axienet_get_rx_desc(lp, lp->rx_ring_tail++);
1054	skb = skbuf_dma->skb;
1055	app_metadata = dmaengine_desc_get_metadata_ptr(skbuf_dma->desc, &meta_len,
1056						       &meta_max_len);
1057	dma_unmap_single(lp->dev, skbuf_dma->dma_address, lp->max_frm_size,
1058			 DMA_FROM_DEVICE);
1059	/* TODO: Derive app word index programmatically */
1060	rx_len = (app_metadata[LEN_APP] & 0xFFFF);
1061	skb_put(skb, rx_len);
1062	skb->protocol = eth_type_trans(skb, lp->ndev);
1063	skb->ip_summed = CHECKSUM_NONE;
1064
1065	__netif_rx(skb);
1066	u64_stats_update_begin(&lp->rx_stat_sync);
1067	u64_stats_add(&lp->rx_packets, 1);
1068	u64_stats_add(&lp->rx_bytes, rx_len);
1069	u64_stats_update_end(&lp->rx_stat_sync);
1070	axienet_rx_submit_desc(lp->ndev);
1071	dma_async_issue_pending(lp->rx_chan);
1072}
1073
1074/**
1075 * axienet_rx_poll - Triggered by RX ISR to complete the BD processing.
1076 * @napi:	Pointer to NAPI structure.
1077 * @budget:	Max number of RX packets to process.
1078 *
1079 * Return: Number of RX packets processed.
 
 
1080 */
1081static int axienet_rx_poll(struct napi_struct *napi, int budget)
1082{
1083	u32 length;
1084	u32 csumstatus;
1085	u32 size = 0;
1086	int packets = 0;
1087	dma_addr_t tail_p = 0;
1088	struct axidma_bd *cur_p;
1089	struct sk_buff *skb, *new_skb;
1090	struct axienet_local *lp = container_of(napi, struct axienet_local, napi_rx);
1091
1092	cur_p = &lp->rx_bd_v[lp->rx_bd_ci];
1093
1094	while (packets < budget && (cur_p->status & XAXIDMA_BD_STS_COMPLETE_MASK)) {
1095		dma_addr_t phys;
1096
1097		/* Ensure we see complete descriptor update */
1098		dma_rmb();
 
 
 
1099
1100		skb = cur_p->skb;
1101		cur_p->skb = NULL;
 
1102
1103		/* skb could be NULL if a previous pass already received the
1104		 * packet for this slot in the ring, but failed to refill it
1105		 * with a newly allocated buffer. In this case, don't try to
1106		 * receive it again.
1107		 */
1108		if (likely(skb)) {
1109			length = cur_p->app4 & 0x0000FFFF;
1110
1111			phys = desc_get_phys_addr(lp, cur_p);
1112			dma_unmap_single(lp->dev, phys, lp->max_frm_size,
1113					 DMA_FROM_DEVICE);
1114
1115			skb_put(skb, length);
1116			skb->protocol = eth_type_trans(skb, lp->ndev);
1117			/*skb_checksum_none_assert(skb);*/
1118			skb->ip_summed = CHECKSUM_NONE;
1119
1120			/* if we're doing Rx csum offload, set it up */
1121			if (lp->features & XAE_FEATURE_FULL_RX_CSUM) {
1122				csumstatus = (cur_p->app2 &
1123					      XAE_FULL_CSUM_STATUS_MASK) >> 3;
1124				if (csumstatus == XAE_IP_TCP_CSUM_VALIDATED ||
1125				    csumstatus == XAE_IP_UDP_CSUM_VALIDATED) {
1126					skb->ip_summed = CHECKSUM_UNNECESSARY;
1127				}
1128			} else if ((lp->features & XAE_FEATURE_PARTIAL_RX_CSUM) != 0 &&
1129				   skb->protocol == htons(ETH_P_IP) &&
1130				   skb->len > 64) {
1131				skb->csum = be32_to_cpu(cur_p->app3 & 0xFFFF);
1132				skb->ip_summed = CHECKSUM_COMPLETE;
1133			}
 
 
 
 
 
 
1134
1135			napi_gro_receive(napi, skb);
1136
1137			size += length;
1138			packets++;
1139		}
1140
1141		new_skb = napi_alloc_skb(napi, lp->max_frm_size);
1142		if (!new_skb)
1143			break;
1144
1145		phys = dma_map_single(lp->dev, new_skb->data,
1146				      lp->max_frm_size,
1147				      DMA_FROM_DEVICE);
1148		if (unlikely(dma_mapping_error(lp->dev, phys))) {
1149			if (net_ratelimit())
1150				netdev_err(lp->ndev, "RX DMA mapping error\n");
1151			dev_kfree_skb(new_skb);
1152			break;
1153		}
1154		desc_set_phys_addr(lp, phys, cur_p);
1155
1156		cur_p->cntrl = lp->max_frm_size;
1157		cur_p->status = 0;
1158		cur_p->skb = new_skb;
1159
1160		/* Only update tail_p to mark this slot as usable after it has
1161		 * been successfully refilled.
1162		 */
1163		tail_p = lp->rx_bd_p + sizeof(*lp->rx_bd_v) * lp->rx_bd_ci;
1164
1165		if (++lp->rx_bd_ci >= lp->rx_bd_num)
1166			lp->rx_bd_ci = 0;
1167		cur_p = &lp->rx_bd_v[lp->rx_bd_ci];
1168	}
1169
1170	u64_stats_update_begin(&lp->rx_stat_sync);
1171	u64_stats_add(&lp->rx_packets, packets);
1172	u64_stats_add(&lp->rx_bytes, size);
1173	u64_stats_update_end(&lp->rx_stat_sync);
1174
1175	if (tail_p)
1176		axienet_dma_out_addr(lp, XAXIDMA_RX_TDESC_OFFSET, tail_p);
1177
1178	if (packets < budget && napi_complete_done(napi, packets)) {
1179		/* Re-enable RX completion interrupts. This should
1180		 * cause an immediate interrupt if any RX packets are
1181		 * already pending.
1182		 */
1183		axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, lp->rx_dma_cr);
1184	}
1185	return packets;
1186}
1187
1188/**
1189 * axienet_tx_irq - Tx Done Isr.
1190 * @irq:	irq number
1191 * @_ndev:	net_device pointer
1192 *
1193 * Return: IRQ_HANDLED if device generated a TX interrupt, IRQ_NONE otherwise.
1194 *
1195 * This is the Axi DMA Tx done Isr. It invokes NAPI polling to complete the
1196 * TX BD processing.
1197 */
1198static irqreturn_t axienet_tx_irq(int irq, void *_ndev)
1199{
 
1200	unsigned int status;
1201	struct net_device *ndev = _ndev;
1202	struct axienet_local *lp = netdev_priv(ndev);
1203
1204	status = axienet_dma_in32(lp, XAXIDMA_TX_SR_OFFSET);
1205
 
 
 
 
1206	if (!(status & XAXIDMA_IRQ_ALL_MASK))
1207		return IRQ_NONE;
 
 
 
 
 
 
 
 
 
 
 
1208
1209	axienet_dma_out32(lp, XAXIDMA_TX_SR_OFFSET, status);
 
 
 
 
1210
1211	if (unlikely(status & XAXIDMA_IRQ_ERROR_MASK)) {
1212		netdev_err(ndev, "DMA Tx error 0x%x\n", status);
1213		netdev_err(ndev, "Current BD is at: 0x%x%08x\n",
1214			   (lp->tx_bd_v[lp->tx_bd_ci]).phys_msb,
1215			   (lp->tx_bd_v[lp->tx_bd_ci]).phys);
1216		schedule_work(&lp->dma_err_task);
1217	} else {
1218		/* Disable further TX completion interrupts and schedule
1219		 * NAPI to handle the completions.
1220		 */
1221		u32 cr = lp->tx_dma_cr;
1222
1223		cr &= ~(XAXIDMA_IRQ_IOC_MASK | XAXIDMA_IRQ_DELAY_MASK);
1224		axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, cr);
1225
1226		napi_schedule(&lp->napi_tx);
1227	}
1228
1229	return IRQ_HANDLED;
1230}
1231
1232/**
1233 * axienet_rx_irq - Rx Isr.
1234 * @irq:	irq number
1235 * @_ndev:	net_device pointer
1236 *
1237 * Return: IRQ_HANDLED if device generated a RX interrupt, IRQ_NONE otherwise.
1238 *
1239 * This is the Axi DMA Rx Isr. It invokes NAPI polling to complete the RX BD
1240 * processing.
1241 */
1242static irqreturn_t axienet_rx_irq(int irq, void *_ndev)
1243{
 
1244	unsigned int status;
1245	struct net_device *ndev = _ndev;
1246	struct axienet_local *lp = netdev_priv(ndev);
1247
1248	status = axienet_dma_in32(lp, XAXIDMA_RX_SR_OFFSET);
1249
 
 
 
 
1250	if (!(status & XAXIDMA_IRQ_ALL_MASK))
1251		return IRQ_NONE;
 
 
 
 
 
 
 
 
 
 
 
1252
1253	axienet_dma_out32(lp, XAXIDMA_RX_SR_OFFSET, status);
1254
1255	if (unlikely(status & XAXIDMA_IRQ_ERROR_MASK)) {
1256		netdev_err(ndev, "DMA Rx error 0x%x\n", status);
1257		netdev_err(ndev, "Current BD is at: 0x%x%08x\n",
1258			   (lp->rx_bd_v[lp->rx_bd_ci]).phys_msb,
1259			   (lp->rx_bd_v[lp->rx_bd_ci]).phys);
1260		schedule_work(&lp->dma_err_task);
1261	} else {
1262		/* Disable further RX completion interrupts and schedule
1263		 * NAPI receive.
1264		 */
1265		u32 cr = lp->rx_dma_cr;
1266
1267		cr &= ~(XAXIDMA_IRQ_IOC_MASK | XAXIDMA_IRQ_DELAY_MASK);
1268		axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, cr);
1269
1270		napi_schedule(&lp->napi_rx);
 
1271	}
1272
1273	return IRQ_HANDLED;
1274}
1275
1276/**
1277 * axienet_eth_irq - Ethernet core Isr.
1278 * @irq:	irq number
1279 * @_ndev:	net_device pointer
1280 *
1281 * Return: IRQ_HANDLED if device generated a core interrupt, IRQ_NONE otherwise.
1282 *
1283 * Handle miscellaneous conditions indicated by Ethernet core IRQ.
1284 */
1285static irqreturn_t axienet_eth_irq(int irq, void *_ndev)
1286{
1287	struct net_device *ndev = _ndev;
1288	struct axienet_local *lp = netdev_priv(ndev);
1289	unsigned int pending;
1290
1291	pending = axienet_ior(lp, XAE_IP_OFFSET);
1292	if (!pending)
1293		return IRQ_NONE;
1294
1295	if (pending & XAE_INT_RXFIFOOVR_MASK)
1296		ndev->stats.rx_missed_errors++;
1297
1298	if (pending & XAE_INT_RXRJECT_MASK)
1299		ndev->stats.rx_frame_errors++;
1300
1301	axienet_iow(lp, XAE_IS_OFFSET, pending);
1302	return IRQ_HANDLED;
1303}
1304
1305static void axienet_dma_err_handler(struct work_struct *work);
1306
1307/**
1308 * axienet_rx_submit_desc - Submit the rx descriptors to dmaengine.
1309 * allocate skbuff, map the scatterlist and obtain a descriptor
1310 * and then add the callback information and submit descriptor.
1311 *
1312 * @ndev:	net_device pointer
1313 *
1314 */
1315static void axienet_rx_submit_desc(struct net_device *ndev)
1316{
1317	struct dma_async_tx_descriptor *dma_rx_desc = NULL;
1318	struct axienet_local *lp = netdev_priv(ndev);
1319	struct skbuf_dma_descriptor *skbuf_dma;
1320	struct sk_buff *skb;
1321	dma_addr_t addr;
1322
1323	skbuf_dma = axienet_get_rx_desc(lp, lp->rx_ring_head);
1324	if (!skbuf_dma)
1325		return;
1326
1327	lp->rx_ring_head++;
1328	skb = netdev_alloc_skb(ndev, lp->max_frm_size);
1329	if (!skb)
1330		return;
1331
1332	sg_init_table(skbuf_dma->sgl, 1);
1333	addr = dma_map_single(lp->dev, skb->data, lp->max_frm_size, DMA_FROM_DEVICE);
1334	if (unlikely(dma_mapping_error(lp->dev, addr))) {
1335		if (net_ratelimit())
1336			netdev_err(ndev, "DMA mapping error\n");
1337		goto rx_submit_err_free_skb;
1338	}
1339	sg_dma_address(skbuf_dma->sgl) = addr;
1340	sg_dma_len(skbuf_dma->sgl) = lp->max_frm_size;
1341	dma_rx_desc = dmaengine_prep_slave_sg(lp->rx_chan, skbuf_dma->sgl,
1342					      1, DMA_DEV_TO_MEM,
1343					      DMA_PREP_INTERRUPT);
1344	if (!dma_rx_desc)
1345		goto rx_submit_err_unmap_skb;
1346
1347	skbuf_dma->skb = skb;
1348	skbuf_dma->dma_address = sg_dma_address(skbuf_dma->sgl);
1349	skbuf_dma->desc = dma_rx_desc;
1350	dma_rx_desc->callback_param = lp;
1351	dma_rx_desc->callback_result = axienet_dma_rx_cb;
1352	dmaengine_submit(dma_rx_desc);
1353
1354	return;
1355
1356rx_submit_err_unmap_skb:
1357	dma_unmap_single(lp->dev, addr, lp->max_frm_size, DMA_FROM_DEVICE);
1358rx_submit_err_free_skb:
1359	dev_kfree_skb(skb);
1360}
1361
1362/**
1363 * axienet_init_dmaengine - init the dmaengine code.
1364 * @ndev:       Pointer to net_device structure
1365 *
1366 * Return: 0, on success.
1367 *          non-zero error value on failure
1368 *
1369 * This is the dmaengine initialization code.
 
 
 
 
1370 */
1371static int axienet_init_dmaengine(struct net_device *ndev)
1372{
 
1373	struct axienet_local *lp = netdev_priv(ndev);
1374	struct skbuf_dma_descriptor *skbuf_dma;
1375	int i, ret;
1376
1377	lp->tx_chan = dma_request_chan(lp->dev, "tx_chan0");
1378	if (IS_ERR(lp->tx_chan)) {
1379		dev_err(lp->dev, "No Ethernet DMA (TX) channel found\n");
1380		return PTR_ERR(lp->tx_chan);
1381	}
1382
1383	lp->rx_chan = dma_request_chan(lp->dev, "rx_chan0");
1384	if (IS_ERR(lp->rx_chan)) {
1385		ret = PTR_ERR(lp->rx_chan);
1386		dev_err(lp->dev, "No Ethernet DMA (RX) channel found\n");
1387		goto err_dma_release_tx;
1388	}
1389
1390	lp->tx_ring_tail = 0;
1391	lp->tx_ring_head = 0;
1392	lp->rx_ring_tail = 0;
1393	lp->rx_ring_head = 0;
1394	lp->tx_skb_ring = kcalloc(TX_BD_NUM_MAX, sizeof(*lp->tx_skb_ring),
1395				  GFP_KERNEL);
1396	if (!lp->tx_skb_ring) {
1397		ret = -ENOMEM;
1398		goto err_dma_release_rx;
1399	}
1400	for (i = 0; i < TX_BD_NUM_MAX; i++) {
1401		skbuf_dma = kzalloc(sizeof(*skbuf_dma), GFP_KERNEL);
1402		if (!skbuf_dma) {
1403			ret = -ENOMEM;
1404			goto err_free_tx_skb_ring;
1405		}
1406		lp->tx_skb_ring[i] = skbuf_dma;
1407	}
1408
1409	lp->rx_skb_ring = kcalloc(RX_BUF_NUM_DEFAULT, sizeof(*lp->rx_skb_ring),
1410				  GFP_KERNEL);
1411	if (!lp->rx_skb_ring) {
1412		ret = -ENOMEM;
1413		goto err_free_tx_skb_ring;
1414	}
1415	for (i = 0; i < RX_BUF_NUM_DEFAULT; i++) {
1416		skbuf_dma = kzalloc(sizeof(*skbuf_dma), GFP_KERNEL);
1417		if (!skbuf_dma) {
1418			ret = -ENOMEM;
1419			goto err_free_rx_skb_ring;
1420		}
1421		lp->rx_skb_ring[i] = skbuf_dma;
1422	}
1423	/* TODO: Instead of BD_NUM_DEFAULT use runtime support */
1424	for (i = 0; i < RX_BUF_NUM_DEFAULT; i++)
1425		axienet_rx_submit_desc(ndev);
1426	dma_async_issue_pending(lp->rx_chan);
1427
1428	return 0;
1429
1430err_free_rx_skb_ring:
1431	for (i = 0; i < RX_BUF_NUM_DEFAULT; i++)
1432		kfree(lp->rx_skb_ring[i]);
1433	kfree(lp->rx_skb_ring);
1434err_free_tx_skb_ring:
1435	for (i = 0; i < TX_BD_NUM_MAX; i++)
1436		kfree(lp->tx_skb_ring[i]);
1437	kfree(lp->tx_skb_ring);
1438err_dma_release_rx:
1439	dma_release_channel(lp->rx_chan);
1440err_dma_release_tx:
1441	dma_release_channel(lp->tx_chan);
1442	return ret;
1443}
1444
1445/**
1446 * axienet_init_legacy_dma - init the dma legacy code.
1447 * @ndev:       Pointer to net_device structure
1448 *
1449 * Return: 0, on success.
1450 *          non-zero error value on failure
1451 *
1452 * This is the dma  initialization code. It also allocates interrupt
1453 * service routines, enables the interrupt lines and ISR handling.
1454 *
1455 */
1456static int axienet_init_legacy_dma(struct net_device *ndev)
1457{
1458	int ret;
1459	struct axienet_local *lp = netdev_priv(ndev);
1460
1461	/* Enable worker thread for Axi DMA error handling */
1462	INIT_WORK(&lp->dma_err_task, axienet_dma_err_handler);
1463
1464	napi_enable(&lp->napi_rx);
1465	napi_enable(&lp->napi_tx);
1466
1467	/* Enable interrupts for Axi DMA Tx */
1468	ret = request_irq(lp->tx_irq, axienet_tx_irq, IRQF_SHARED,
1469			  ndev->name, ndev);
1470	if (ret)
1471		goto err_tx_irq;
1472	/* Enable interrupts for Axi DMA Rx */
1473	ret = request_irq(lp->rx_irq, axienet_rx_irq, IRQF_SHARED,
1474			  ndev->name, ndev);
1475	if (ret)
1476		goto err_rx_irq;
1477	/* Enable interrupts for Axi Ethernet core (if defined) */
1478	if (lp->eth_irq > 0) {
1479		ret = request_irq(lp->eth_irq, axienet_eth_irq, IRQF_SHARED,
1480				  ndev->name, ndev);
1481		if (ret)
1482			goto err_eth_irq;
1483	}
1484
1485	return 0;
1486
1487err_eth_irq:
1488	free_irq(lp->rx_irq, ndev);
1489err_rx_irq:
1490	free_irq(lp->tx_irq, ndev);
1491err_tx_irq:
1492	napi_disable(&lp->napi_tx);
1493	napi_disable(&lp->napi_rx);
1494	cancel_work_sync(&lp->dma_err_task);
1495	dev_err(lp->dev, "request_irq() failed\n");
1496	return ret;
1497}
1498
1499/**
1500 * axienet_open - Driver open routine.
1501 * @ndev:	Pointer to net_device structure
1502 *
1503 * Return: 0, on success.
1504 *	    non-zero error value on failure
1505 *
1506 * This is the driver open routine. It calls phylink_start to start the
1507 * PHY device.
1508 * It also allocates interrupt service routines, enables the interrupt lines
1509 * and ISR handling. Axi Ethernet core is reset through Axi DMA core. Buffer
1510 * descriptors are initialized.
1511 */
1512static int axienet_open(struct net_device *ndev)
1513{
1514	int ret;
1515	struct axienet_local *lp = netdev_priv(ndev);
1516
1517	dev_dbg(&ndev->dev, "%s\n", __func__);
1518
1519	/* When we do an Axi Ethernet reset, it resets the complete core
1520	 * including the MDIO. MDIO must be disabled before resetting.
1521	 * Hold MDIO bus lock to avoid MDIO accesses during the reset.
1522	 */
1523	axienet_lock_mii(lp);
1524	ret = axienet_device_reset(ndev);
1525	axienet_unlock_mii(lp);
1526
1527	ret = phylink_of_phy_connect(lp->phylink, lp->dev->of_node, 0);
1528	if (ret) {
1529		dev_err(lp->dev, "phylink_of_phy_connect() failed: %d\n", ret);
1530		return ret;
1531	}
1532
1533	phylink_start(lp->phylink);
1534
1535	if (lp->use_dmaengine) {
1536		/* Enable interrupts for Axi Ethernet core (if defined) */
1537		if (lp->eth_irq > 0) {
1538			ret = request_irq(lp->eth_irq, axienet_eth_irq, IRQF_SHARED,
1539					  ndev->name, ndev);
1540			if (ret)
1541				goto err_phy;
1542		}
1543
1544		ret = axienet_init_dmaengine(ndev);
1545		if (ret < 0)
1546			goto err_free_eth_irq;
1547	} else {
1548		ret = axienet_init_legacy_dma(ndev);
1549		if (ret)
1550			goto err_phy;
1551	}
1552
1553	return 0;
1554
1555err_free_eth_irq:
1556	if (lp->eth_irq > 0)
1557		free_irq(lp->eth_irq, ndev);
1558err_phy:
1559	phylink_stop(lp->phylink);
1560	phylink_disconnect_phy(lp->phylink);
 
 
1561	return ret;
1562}
1563
1564/**
1565 * axienet_stop - Driver stop routine.
1566 * @ndev:	Pointer to net_device structure
1567 *
1568 * Return: 0, on success.
1569 *
1570 * This is the driver stop routine. It calls phylink_disconnect to stop the PHY
1571 * device. It also removes the interrupt handlers and disables the interrupts.
1572 * The Axi DMA Tx/Rx BDs are released.
1573 */
1574static int axienet_stop(struct net_device *ndev)
1575{
 
 
1576	struct axienet_local *lp = netdev_priv(ndev);
1577	int i;
1578
1579	dev_dbg(&ndev->dev, "axienet_close()\n");
1580
1581	if (!lp->use_dmaengine) {
1582		napi_disable(&lp->napi_tx);
1583		napi_disable(&lp->napi_rx);
1584	}
1585
1586	phylink_stop(lp->phylink);
1587	phylink_disconnect_phy(lp->phylink);
1588
1589	axienet_setoptions(ndev, lp->options &
1590			   ~(XAE_OPTION_TXEN | XAE_OPTION_RXEN));
1591
1592	if (!lp->use_dmaengine) {
1593		axienet_dma_stop(lp);
1594		cancel_work_sync(&lp->dma_err_task);
1595		free_irq(lp->tx_irq, ndev);
1596		free_irq(lp->rx_irq, ndev);
1597		axienet_dma_bd_release(ndev);
1598	} else {
1599		dmaengine_terminate_sync(lp->tx_chan);
1600		dmaengine_synchronize(lp->tx_chan);
1601		dmaengine_terminate_sync(lp->rx_chan);
1602		dmaengine_synchronize(lp->rx_chan);
1603
1604		for (i = 0; i < TX_BD_NUM_MAX; i++)
1605			kfree(lp->tx_skb_ring[i]);
1606		kfree(lp->tx_skb_ring);
1607		for (i = 0; i < RX_BUF_NUM_DEFAULT; i++)
1608			kfree(lp->rx_skb_ring[i]);
1609		kfree(lp->rx_skb_ring);
1610
1611		dma_release_channel(lp->rx_chan);
1612		dma_release_channel(lp->tx_chan);
1613	}
1614
1615	axienet_iow(lp, XAE_IE_OFFSET, 0);
1616
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1617	if (lp->eth_irq > 0)
1618		free_irq(lp->eth_irq, ndev);
 
 
 
 
1619	return 0;
1620}
1621
1622/**
1623 * axienet_change_mtu - Driver change mtu routine.
1624 * @ndev:	Pointer to net_device structure
1625 * @new_mtu:	New mtu value to be applied
1626 *
1627 * Return: Always returns 0 (success).
1628 *
1629 * This is the change mtu driver routine. It checks if the Axi Ethernet
1630 * hardware supports jumbo frames before changing the mtu. This can be
1631 * called only when the device is not up.
1632 */
1633static int axienet_change_mtu(struct net_device *ndev, int new_mtu)
1634{
1635	struct axienet_local *lp = netdev_priv(ndev);
1636
1637	if (netif_running(ndev))
1638		return -EBUSY;
1639
1640	if ((new_mtu + VLAN_ETH_HLEN +
1641		XAE_TRL_SIZE) > lp->rxmem)
1642		return -EINVAL;
1643
1644	ndev->mtu = new_mtu;
1645
1646	return 0;
1647}
1648
1649#ifdef CONFIG_NET_POLL_CONTROLLER
1650/**
1651 * axienet_poll_controller - Axi Ethernet poll mechanism.
1652 * @ndev:	Pointer to net_device structure
1653 *
1654 * This implements Rx/Tx ISR poll mechanisms. The interrupts are disabled prior
1655 * to polling the ISRs and are enabled back after the polling is done.
1656 */
1657static void axienet_poll_controller(struct net_device *ndev)
1658{
1659	struct axienet_local *lp = netdev_priv(ndev);
1660	disable_irq(lp->tx_irq);
1661	disable_irq(lp->rx_irq);
1662	axienet_rx_irq(lp->tx_irq, ndev);
1663	axienet_tx_irq(lp->rx_irq, ndev);
1664	enable_irq(lp->tx_irq);
1665	enable_irq(lp->rx_irq);
1666}
1667#endif
1668
1669static int axienet_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
1670{
1671	struct axienet_local *lp = netdev_priv(dev);
1672
1673	if (!netif_running(dev))
1674		return -EINVAL;
1675
1676	return phylink_mii_ioctl(lp->phylink, rq, cmd);
1677}
1678
1679static void
1680axienet_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
1681{
1682	struct axienet_local *lp = netdev_priv(dev);
1683	unsigned int start;
1684
1685	netdev_stats_to_stats64(stats, &dev->stats);
1686
1687	do {
1688		start = u64_stats_fetch_begin(&lp->rx_stat_sync);
1689		stats->rx_packets = u64_stats_read(&lp->rx_packets);
1690		stats->rx_bytes = u64_stats_read(&lp->rx_bytes);
1691	} while (u64_stats_fetch_retry(&lp->rx_stat_sync, start));
1692
1693	do {
1694		start = u64_stats_fetch_begin(&lp->tx_stat_sync);
1695		stats->tx_packets = u64_stats_read(&lp->tx_packets);
1696		stats->tx_bytes = u64_stats_read(&lp->tx_bytes);
1697	} while (u64_stats_fetch_retry(&lp->tx_stat_sync, start));
1698}
1699
1700static const struct net_device_ops axienet_netdev_ops = {
1701	.ndo_open = axienet_open,
1702	.ndo_stop = axienet_stop,
1703	.ndo_start_xmit = axienet_start_xmit,
1704	.ndo_get_stats64 = axienet_get_stats64,
1705	.ndo_change_mtu	= axienet_change_mtu,
1706	.ndo_set_mac_address = netdev_set_mac_address,
1707	.ndo_validate_addr = eth_validate_addr,
1708	.ndo_eth_ioctl = axienet_ioctl,
1709	.ndo_set_rx_mode = axienet_set_multicast_list,
1710#ifdef CONFIG_NET_POLL_CONTROLLER
1711	.ndo_poll_controller = axienet_poll_controller,
1712#endif
1713};
1714
1715static const struct net_device_ops axienet_netdev_dmaengine_ops = {
1716	.ndo_open = axienet_open,
1717	.ndo_stop = axienet_stop,
1718	.ndo_start_xmit = axienet_start_xmit_dmaengine,
1719	.ndo_get_stats64 = axienet_get_stats64,
1720	.ndo_change_mtu	= axienet_change_mtu,
1721	.ndo_set_mac_address = netdev_set_mac_address,
1722	.ndo_validate_addr = eth_validate_addr,
1723	.ndo_eth_ioctl = axienet_ioctl,
1724	.ndo_set_rx_mode = axienet_set_multicast_list,
1725};
1726
1727/**
1728 * axienet_ethtools_get_drvinfo - Get various Axi Ethernet driver information.
1729 * @ndev:	Pointer to net_device structure
1730 * @ed:		Pointer to ethtool_drvinfo structure
1731 *
1732 * This implements ethtool command for getting the driver information.
1733 * Issue "ethtool -i ethX" under linux prompt to execute this function.
1734 */
1735static void axienet_ethtools_get_drvinfo(struct net_device *ndev,
1736					 struct ethtool_drvinfo *ed)
1737{
1738	strscpy(ed->driver, DRIVER_NAME, sizeof(ed->driver));
1739	strscpy(ed->version, DRIVER_VERSION, sizeof(ed->version));
1740}
1741
1742/**
1743 * axienet_ethtools_get_regs_len - Get the total regs length present in the
1744 *				   AxiEthernet core.
1745 * @ndev:	Pointer to net_device structure
1746 *
1747 * This implements ethtool command for getting the total register length
1748 * information.
1749 *
1750 * Return: the total regs length
1751 */
1752static int axienet_ethtools_get_regs_len(struct net_device *ndev)
1753{
1754	return sizeof(u32) * AXIENET_REGS_N;
1755}
1756
1757/**
1758 * axienet_ethtools_get_regs - Dump the contents of all registers present
1759 *			       in AxiEthernet core.
1760 * @ndev:	Pointer to net_device structure
1761 * @regs:	Pointer to ethtool_regs structure
1762 * @ret:	Void pointer used to return the contents of the registers.
1763 *
1764 * This implements ethtool command for getting the Axi Ethernet register dump.
1765 * Issue "ethtool -d ethX" to execute this function.
1766 */
1767static void axienet_ethtools_get_regs(struct net_device *ndev,
1768				      struct ethtool_regs *regs, void *ret)
1769{
1770	u32 *data = (u32 *)ret;
1771	size_t len = sizeof(u32) * AXIENET_REGS_N;
1772	struct axienet_local *lp = netdev_priv(ndev);
1773
1774	regs->version = 0;
1775	regs->len = len;
1776
1777	memset(data, 0, len);
1778	data[0] = axienet_ior(lp, XAE_RAF_OFFSET);
1779	data[1] = axienet_ior(lp, XAE_TPF_OFFSET);
1780	data[2] = axienet_ior(lp, XAE_IFGP_OFFSET);
1781	data[3] = axienet_ior(lp, XAE_IS_OFFSET);
1782	data[4] = axienet_ior(lp, XAE_IP_OFFSET);
1783	data[5] = axienet_ior(lp, XAE_IE_OFFSET);
1784	data[6] = axienet_ior(lp, XAE_TTAG_OFFSET);
1785	data[7] = axienet_ior(lp, XAE_RTAG_OFFSET);
1786	data[8] = axienet_ior(lp, XAE_UAWL_OFFSET);
1787	data[9] = axienet_ior(lp, XAE_UAWU_OFFSET);
1788	data[10] = axienet_ior(lp, XAE_TPID0_OFFSET);
1789	data[11] = axienet_ior(lp, XAE_TPID1_OFFSET);
1790	data[12] = axienet_ior(lp, XAE_PPST_OFFSET);
1791	data[13] = axienet_ior(lp, XAE_RCW0_OFFSET);
1792	data[14] = axienet_ior(lp, XAE_RCW1_OFFSET);
1793	data[15] = axienet_ior(lp, XAE_TC_OFFSET);
1794	data[16] = axienet_ior(lp, XAE_FCC_OFFSET);
1795	data[17] = axienet_ior(lp, XAE_EMMC_OFFSET);
1796	data[18] = axienet_ior(lp, XAE_PHYC_OFFSET);
1797	data[19] = axienet_ior(lp, XAE_MDIO_MC_OFFSET);
1798	data[20] = axienet_ior(lp, XAE_MDIO_MCR_OFFSET);
1799	data[21] = axienet_ior(lp, XAE_MDIO_MWD_OFFSET);
1800	data[22] = axienet_ior(lp, XAE_MDIO_MRD_OFFSET);
1801	data[27] = axienet_ior(lp, XAE_UAW0_OFFSET);
1802	data[28] = axienet_ior(lp, XAE_UAW1_OFFSET);
1803	data[29] = axienet_ior(lp, XAE_FMI_OFFSET);
1804	data[30] = axienet_ior(lp, XAE_AF0_OFFSET);
1805	data[31] = axienet_ior(lp, XAE_AF1_OFFSET);
1806	if (!lp->use_dmaengine) {
1807		data[32] = axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET);
1808		data[33] = axienet_dma_in32(lp, XAXIDMA_TX_SR_OFFSET);
1809		data[34] = axienet_dma_in32(lp, XAXIDMA_TX_CDESC_OFFSET);
1810		data[35] = axienet_dma_in32(lp, XAXIDMA_TX_TDESC_OFFSET);
1811		data[36] = axienet_dma_in32(lp, XAXIDMA_RX_CR_OFFSET);
1812		data[37] = axienet_dma_in32(lp, XAXIDMA_RX_SR_OFFSET);
1813		data[38] = axienet_dma_in32(lp, XAXIDMA_RX_CDESC_OFFSET);
1814		data[39] = axienet_dma_in32(lp, XAXIDMA_RX_TDESC_OFFSET);
1815	}
1816}
1817
1818static void
1819axienet_ethtools_get_ringparam(struct net_device *ndev,
1820			       struct ethtool_ringparam *ering,
1821			       struct kernel_ethtool_ringparam *kernel_ering,
1822			       struct netlink_ext_ack *extack)
1823{
1824	struct axienet_local *lp = netdev_priv(ndev);
1825
1826	ering->rx_max_pending = RX_BD_NUM_MAX;
1827	ering->rx_mini_max_pending = 0;
1828	ering->rx_jumbo_max_pending = 0;
1829	ering->tx_max_pending = TX_BD_NUM_MAX;
1830	ering->rx_pending = lp->rx_bd_num;
1831	ering->rx_mini_pending = 0;
1832	ering->rx_jumbo_pending = 0;
1833	ering->tx_pending = lp->tx_bd_num;
1834}
1835
1836static int
1837axienet_ethtools_set_ringparam(struct net_device *ndev,
1838			       struct ethtool_ringparam *ering,
1839			       struct kernel_ethtool_ringparam *kernel_ering,
1840			       struct netlink_ext_ack *extack)
1841{
1842	struct axienet_local *lp = netdev_priv(ndev);
1843
1844	if (ering->rx_pending > RX_BD_NUM_MAX ||
1845	    ering->rx_mini_pending ||
1846	    ering->rx_jumbo_pending ||
1847	    ering->tx_pending < TX_BD_NUM_MIN ||
1848	    ering->tx_pending > TX_BD_NUM_MAX)
1849		return -EINVAL;
1850
1851	if (netif_running(ndev))
1852		return -EBUSY;
1853
1854	lp->rx_bd_num = ering->rx_pending;
1855	lp->tx_bd_num = ering->tx_pending;
1856	return 0;
1857}
1858
1859/**
1860 * axienet_ethtools_get_pauseparam - Get the pause parameter setting for
1861 *				     Tx and Rx paths.
1862 * @ndev:	Pointer to net_device structure
1863 * @epauseparm:	Pointer to ethtool_pauseparam structure.
1864 *
1865 * This implements ethtool command for getting axi ethernet pause frame
1866 * setting. Issue "ethtool -a ethX" to execute this function.
1867 */
1868static void
1869axienet_ethtools_get_pauseparam(struct net_device *ndev,
1870				struct ethtool_pauseparam *epauseparm)
1871{
1872	struct axienet_local *lp = netdev_priv(ndev);
1873
1874	phylink_ethtool_get_pauseparam(lp->phylink, epauseparm);
1875}
1876
1877/**
1878 * axienet_ethtools_set_pauseparam - Set device pause parameter(flow control)
1879 *				     settings.
1880 * @ndev:	Pointer to net_device structure
1881 * @epauseparm:Pointer to ethtool_pauseparam structure
1882 *
1883 * This implements ethtool command for enabling flow control on Rx and Tx
1884 * paths. Issue "ethtool -A ethX tx on|off" under linux prompt to execute this
1885 * function.
1886 *
1887 * Return: 0 on success, -EFAULT if device is running
1888 */
1889static int
1890axienet_ethtools_set_pauseparam(struct net_device *ndev,
1891				struct ethtool_pauseparam *epauseparm)
1892{
1893	struct axienet_local *lp = netdev_priv(ndev);
1894
1895	return phylink_ethtool_set_pauseparam(lp->phylink, epauseparm);
1896}
1897
1898/**
1899 * axienet_ethtools_get_coalesce - Get DMA interrupt coalescing count.
1900 * @ndev:	Pointer to net_device structure
1901 * @ecoalesce:	Pointer to ethtool_coalesce structure
1902 * @kernel_coal: ethtool CQE mode setting structure
1903 * @extack:	extack for reporting error messages
1904 *
1905 * This implements ethtool command for getting the DMA interrupt coalescing
1906 * count on Tx and Rx paths. Issue "ethtool -c ethX" under linux prompt to
1907 * execute this function.
1908 *
1909 * Return: 0 always
1910 */
1911static int
1912axienet_ethtools_get_coalesce(struct net_device *ndev,
1913			      struct ethtool_coalesce *ecoalesce,
1914			      struct kernel_ethtool_coalesce *kernel_coal,
1915			      struct netlink_ext_ack *extack)
1916{
 
1917	struct axienet_local *lp = netdev_priv(ndev);
1918
1919	ecoalesce->rx_max_coalesced_frames = lp->coalesce_count_rx;
1920	ecoalesce->rx_coalesce_usecs = lp->coalesce_usec_rx;
1921	ecoalesce->tx_max_coalesced_frames = lp->coalesce_count_tx;
1922	ecoalesce->tx_coalesce_usecs = lp->coalesce_usec_tx;
 
1923	return 0;
1924}
1925
1926/**
1927 * axienet_ethtools_set_coalesce - Set DMA interrupt coalescing count.
1928 * @ndev:	Pointer to net_device structure
1929 * @ecoalesce:	Pointer to ethtool_coalesce structure
1930 * @kernel_coal: ethtool CQE mode setting structure
1931 * @extack:	extack for reporting error messages
1932 *
1933 * This implements ethtool command for setting the DMA interrupt coalescing
1934 * count on Tx and Rx paths. Issue "ethtool -C ethX rx-frames 5" under linux
1935 * prompt to execute this function.
1936 *
1937 * Return: 0, on success, Non-zero error value on failure.
1938 */
1939static int
1940axienet_ethtools_set_coalesce(struct net_device *ndev,
1941			      struct ethtool_coalesce *ecoalesce,
1942			      struct kernel_ethtool_coalesce *kernel_coal,
1943			      struct netlink_ext_ack *extack)
1944{
1945	struct axienet_local *lp = netdev_priv(ndev);
1946
1947	if (netif_running(ndev)) {
1948		netdev_err(ndev,
1949			   "Please stop netif before applying configuration\n");
1950		return -EFAULT;
1951	}
1952
1953	if (ecoalesce->rx_max_coalesced_frames)
1954		lp->coalesce_count_rx = ecoalesce->rx_max_coalesced_frames;
1955	if (ecoalesce->rx_coalesce_usecs)
1956		lp->coalesce_usec_rx = ecoalesce->rx_coalesce_usecs;
1957	if (ecoalesce->tx_max_coalesced_frames)
1958		lp->coalesce_count_tx = ecoalesce->tx_max_coalesced_frames;
1959	if (ecoalesce->tx_coalesce_usecs)
1960		lp->coalesce_usec_tx = ecoalesce->tx_coalesce_usecs;
1961
1962	return 0;
1963}
1964
1965static int
1966axienet_ethtools_get_link_ksettings(struct net_device *ndev,
1967				    struct ethtool_link_ksettings *cmd)
1968{
1969	struct axienet_local *lp = netdev_priv(ndev);
1970
1971	return phylink_ethtool_ksettings_get(lp->phylink, cmd);
1972}
1973
1974static int
1975axienet_ethtools_set_link_ksettings(struct net_device *ndev,
1976				    const struct ethtool_link_ksettings *cmd)
1977{
1978	struct axienet_local *lp = netdev_priv(ndev);
1979
1980	return phylink_ethtool_ksettings_set(lp->phylink, cmd);
1981}
1982
1983static int axienet_ethtools_nway_reset(struct net_device *dev)
1984{
1985	struct axienet_local *lp = netdev_priv(dev);
1986
1987	return phylink_ethtool_nway_reset(lp->phylink);
1988}
1989
1990static const struct ethtool_ops axienet_ethtool_ops = {
1991	.supported_coalesce_params = ETHTOOL_COALESCE_MAX_FRAMES |
1992				     ETHTOOL_COALESCE_USECS,
1993	.get_drvinfo    = axienet_ethtools_get_drvinfo,
1994	.get_regs_len   = axienet_ethtools_get_regs_len,
1995	.get_regs       = axienet_ethtools_get_regs,
1996	.get_link       = ethtool_op_get_link,
1997	.get_ringparam	= axienet_ethtools_get_ringparam,
1998	.set_ringparam	= axienet_ethtools_set_ringparam,
1999	.get_pauseparam = axienet_ethtools_get_pauseparam,
2000	.set_pauseparam = axienet_ethtools_set_pauseparam,
2001	.get_coalesce   = axienet_ethtools_get_coalesce,
2002	.set_coalesce   = axienet_ethtools_set_coalesce,
2003	.get_link_ksettings = axienet_ethtools_get_link_ksettings,
2004	.set_link_ksettings = axienet_ethtools_set_link_ksettings,
2005	.nway_reset	= axienet_ethtools_nway_reset,
2006};
2007
2008static struct axienet_local *pcs_to_axienet_local(struct phylink_pcs *pcs)
2009{
2010	return container_of(pcs, struct axienet_local, pcs);
2011}
2012
2013static void axienet_pcs_get_state(struct phylink_pcs *pcs,
2014				  struct phylink_link_state *state)
2015{
2016	struct mdio_device *pcs_phy = pcs_to_axienet_local(pcs)->pcs_phy;
 
 
2017
2018	phylink_mii_c22_pcs_get_state(pcs_phy, state);
2019}
 
 
 
 
 
 
 
2020
2021static void axienet_pcs_an_restart(struct phylink_pcs *pcs)
2022{
2023	struct mdio_device *pcs_phy = pcs_to_axienet_local(pcs)->pcs_phy;
2024
2025	phylink_mii_c22_pcs_an_restart(pcs_phy);
 
 
 
 
 
 
 
 
 
 
2026}
2027
2028static int axienet_pcs_config(struct phylink_pcs *pcs, unsigned int neg_mode,
2029			      phy_interface_t interface,
2030			      const unsigned long *advertising,
2031			      bool permit_pause_to_mac)
2032{
2033	struct mdio_device *pcs_phy = pcs_to_axienet_local(pcs)->pcs_phy;
2034	struct net_device *ndev = pcs_to_axienet_local(pcs)->ndev;
2035	struct axienet_local *lp = netdev_priv(ndev);
2036	int ret;
2037
2038	if (lp->switch_x_sgmii) {
2039		ret = mdiodev_write(pcs_phy, XLNX_MII_STD_SELECT_REG,
2040				    interface == PHY_INTERFACE_MODE_SGMII ?
2041					XLNX_MII_STD_SELECT_SGMII : 0);
2042		if (ret < 0) {
2043			netdev_warn(ndev,
2044				    "Failed to switch PHY interface: %d\n",
2045				    ret);
2046			return ret;
2047		}
2048	}
2049
2050	ret = phylink_mii_c22_pcs_config(pcs_phy, interface, advertising,
2051					 neg_mode);
2052	if (ret < 0)
2053		netdev_warn(ndev, "Failed to configure PCS: %d\n", ret);
 
 
 
2054
2055	return ret;
2056}
 
 
 
 
2057
2058static const struct phylink_pcs_ops axienet_pcs_ops = {
2059	.pcs_get_state = axienet_pcs_get_state,
2060	.pcs_config = axienet_pcs_config,
2061	.pcs_an_restart = axienet_pcs_an_restart,
2062};
2063
2064static struct phylink_pcs *axienet_mac_select_pcs(struct phylink_config *config,
2065						  phy_interface_t interface)
2066{
2067	struct net_device *ndev = to_net_dev(config->dev);
2068	struct axienet_local *lp = netdev_priv(ndev);
2069
2070	if (interface == PHY_INTERFACE_MODE_1000BASEX ||
2071	    interface ==  PHY_INTERFACE_MODE_SGMII)
2072		return &lp->pcs;
2073
2074	return NULL;
2075}
2076
2077static void axienet_mac_config(struct phylink_config *config, unsigned int mode,
2078			       const struct phylink_link_state *state)
2079{
2080	/* nothing meaningful to do */
2081}
2082
2083static void axienet_mac_link_down(struct phylink_config *config,
2084				  unsigned int mode,
2085				  phy_interface_t interface)
2086{
2087	/* nothing meaningful to do */
2088}
2089
2090static void axienet_mac_link_up(struct phylink_config *config,
2091				struct phy_device *phy,
2092				unsigned int mode, phy_interface_t interface,
2093				int speed, int duplex,
2094				bool tx_pause, bool rx_pause)
2095{
2096	struct net_device *ndev = to_net_dev(config->dev);
2097	struct axienet_local *lp = netdev_priv(ndev);
2098	u32 emmc_reg, fcc_reg;
2099
2100	emmc_reg = axienet_ior(lp, XAE_EMMC_OFFSET);
2101	emmc_reg &= ~XAE_EMMC_LINKSPEED_MASK;
2102
2103	switch (speed) {
2104	case SPEED_1000:
2105		emmc_reg |= XAE_EMMC_LINKSPD_1000;
2106		break;
2107	case SPEED_100:
2108		emmc_reg |= XAE_EMMC_LINKSPD_100;
2109		break;
2110	case SPEED_10:
2111		emmc_reg |= XAE_EMMC_LINKSPD_10;
2112		break;
2113	default:
2114		dev_err(&ndev->dev,
2115			"Speed other than 10, 100 or 1Gbps is not supported\n");
2116		break;
2117	}
2118
2119	axienet_iow(lp, XAE_EMMC_OFFSET, emmc_reg);
2120
2121	fcc_reg = axienet_ior(lp, XAE_FCC_OFFSET);
2122	if (tx_pause)
2123		fcc_reg |= XAE_FCC_FCTX_MASK;
2124	else
2125		fcc_reg &= ~XAE_FCC_FCTX_MASK;
2126	if (rx_pause)
2127		fcc_reg |= XAE_FCC_FCRX_MASK;
2128	else
2129		fcc_reg &= ~XAE_FCC_FCRX_MASK;
2130	axienet_iow(lp, XAE_FCC_OFFSET, fcc_reg);
2131}
2132
2133static const struct phylink_mac_ops axienet_phylink_ops = {
2134	.mac_select_pcs = axienet_mac_select_pcs,
 
 
2135	.mac_config = axienet_mac_config,
2136	.mac_link_down = axienet_mac_link_down,
2137	.mac_link_up = axienet_mac_link_up,
2138};
2139
2140/**
2141 * axienet_dma_err_handler - Work queue task for Axi DMA Error
2142 * @work:	pointer to work_struct
2143 *
2144 * Resets the Axi DMA and Axi Ethernet devices, and reconfigures the
2145 * Tx/Rx BDs.
2146 */
2147static void axienet_dma_err_handler(struct work_struct *work)
2148{
2149	u32 i;
2150	u32 axienet_status;
2151	struct axidma_bd *cur_p;
2152	struct axienet_local *lp = container_of(work, struct axienet_local,
2153						dma_err_task);
2154	struct net_device *ndev = lp->ndev;
2155
2156	napi_disable(&lp->napi_tx);
2157	napi_disable(&lp->napi_rx);
2158
2159	axienet_setoptions(ndev, lp->options &
2160			   ~(XAE_OPTION_TXEN | XAE_OPTION_RXEN));
2161
2162	axienet_dma_stop(lp);
 
 
 
 
 
 
 
 
 
2163
2164	for (i = 0; i < lp->tx_bd_num; i++) {
2165		cur_p = &lp->tx_bd_v[i];
2166		if (cur_p->cntrl) {
2167			dma_addr_t addr = desc_get_phys_addr(lp, cur_p);
2168
2169			dma_unmap_single(lp->dev, addr,
2170					 (cur_p->cntrl &
2171					  XAXIDMA_BD_CTRL_LENGTH_MASK),
2172					 DMA_TO_DEVICE);
2173		}
2174		if (cur_p->skb)
2175			dev_kfree_skb_irq(cur_p->skb);
2176		cur_p->phys = 0;
2177		cur_p->phys_msb = 0;
2178		cur_p->cntrl = 0;
2179		cur_p->status = 0;
2180		cur_p->app0 = 0;
2181		cur_p->app1 = 0;
2182		cur_p->app2 = 0;
2183		cur_p->app3 = 0;
2184		cur_p->app4 = 0;
2185		cur_p->skb = NULL;
2186	}
2187
2188	for (i = 0; i < lp->rx_bd_num; i++) {
2189		cur_p = &lp->rx_bd_v[i];
2190		cur_p->status = 0;
2191		cur_p->app0 = 0;
2192		cur_p->app1 = 0;
2193		cur_p->app2 = 0;
2194		cur_p->app3 = 0;
2195		cur_p->app4 = 0;
2196	}
2197
2198	lp->tx_bd_ci = 0;
2199	lp->tx_bd_tail = 0;
2200	lp->rx_bd_ci = 0;
2201
2202	axienet_dma_start(lp);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2203
2204	axienet_status = axienet_ior(lp, XAE_RCW1_OFFSET);
2205	axienet_status &= ~XAE_RCW1_RX_MASK;
2206	axienet_iow(lp, XAE_RCW1_OFFSET, axienet_status);
2207
2208	axienet_status = axienet_ior(lp, XAE_IP_OFFSET);
2209	if (axienet_status & XAE_INT_RXRJECT_MASK)
2210		axienet_iow(lp, XAE_IS_OFFSET, XAE_INT_RXRJECT_MASK);
2211	axienet_iow(lp, XAE_IE_OFFSET, lp->eth_irq > 0 ?
2212		    XAE_INT_RECV_ERROR_MASK : 0);
2213	axienet_iow(lp, XAE_FCC_OFFSET, XAE_FCC_FCRX_MASK);
2214
2215	/* Sync default options with HW but leave receiver and
2216	 * transmitter disabled.
2217	 */
2218	axienet_setoptions(ndev, lp->options &
2219			   ~(XAE_OPTION_TXEN | XAE_OPTION_RXEN));
2220	axienet_set_mac_address(ndev, NULL);
2221	axienet_set_multicast_list(ndev);
2222	axienet_setoptions(ndev, lp->options);
2223	napi_enable(&lp->napi_rx);
2224	napi_enable(&lp->napi_tx);
2225}
2226
2227/**
2228 * axienet_probe - Axi Ethernet probe function.
2229 * @pdev:	Pointer to platform device structure.
2230 *
2231 * Return: 0, on success
2232 *	    Non-zero error value on failure.
2233 *
2234 * This is the probe routine for Axi Ethernet driver. This is called before
2235 * any other driver routines are invoked. It allocates and sets up the Ethernet
2236 * device. Parses through device tree and populates fields of
2237 * axienet_local. It registers the Ethernet device.
2238 */
2239static int axienet_probe(struct platform_device *pdev)
2240{
2241	int ret;
2242	struct device_node *np;
2243	struct axienet_local *lp;
2244	struct net_device *ndev;
 
2245	struct resource *ethres;
2246	u8 mac_addr[ETH_ALEN];
2247	int addr_width = 32;
2248	u32 value;
2249
2250	ndev = alloc_etherdev(sizeof(*lp));
2251	if (!ndev)
2252		return -ENOMEM;
2253
2254	platform_set_drvdata(pdev, ndev);
2255
2256	SET_NETDEV_DEV(ndev, &pdev->dev);
2257	ndev->flags &= ~IFF_MULTICAST;  /* clear multicast */
2258	ndev->features = NETIF_F_SG;
 
2259	ndev->ethtool_ops = &axienet_ethtool_ops;
2260
2261	/* MTU range: 64 - 9000 */
2262	ndev->min_mtu = 64;
2263	ndev->max_mtu = XAE_JUMBO_MTU;
2264
2265	lp = netdev_priv(ndev);
2266	lp->ndev = ndev;
2267	lp->dev = &pdev->dev;
2268	lp->options = XAE_OPTION_DEFAULTS;
2269	lp->rx_bd_num = RX_BD_NUM_DEFAULT;
2270	lp->tx_bd_num = TX_BD_NUM_DEFAULT;
2271
2272	u64_stats_init(&lp->rx_stat_sync);
2273	u64_stats_init(&lp->tx_stat_sync);
2274
2275	lp->axi_clk = devm_clk_get_optional(&pdev->dev, "s_axi_lite_clk");
2276	if (!lp->axi_clk) {
2277		/* For backward compatibility, if named AXI clock is not present,
2278		 * treat the first clock specified as the AXI clock.
2279		 */
2280		lp->axi_clk = devm_clk_get_optional(&pdev->dev, NULL);
2281	}
2282	if (IS_ERR(lp->axi_clk)) {
2283		ret = PTR_ERR(lp->axi_clk);
2284		goto free_netdev;
2285	}
2286	ret = clk_prepare_enable(lp->axi_clk);
2287	if (ret) {
2288		dev_err(&pdev->dev, "Unable to enable AXI clock: %d\n", ret);
2289		goto free_netdev;
2290	}
2291
2292	lp->misc_clks[0].id = "axis_clk";
2293	lp->misc_clks[1].id = "ref_clk";
2294	lp->misc_clks[2].id = "mgt_clk";
2295
2296	ret = devm_clk_bulk_get_optional(&pdev->dev, XAE_NUM_MISC_CLOCKS, lp->misc_clks);
2297	if (ret)
2298		goto cleanup_clk;
2299
2300	ret = clk_bulk_prepare_enable(XAE_NUM_MISC_CLOCKS, lp->misc_clks);
2301	if (ret)
2302		goto cleanup_clk;
2303
2304	/* Map device registers */
2305	lp->regs = devm_platform_get_and_ioremap_resource(pdev, 0, &ethres);
 
2306	if (IS_ERR(lp->regs)) {
 
2307		ret = PTR_ERR(lp->regs);
2308		goto cleanup_clk;
2309	}
2310	lp->regs_start = ethres->start;
2311
2312	/* Setup checksum offload, but default to off if not specified */
2313	lp->features = 0;
2314
2315	ret = of_property_read_u32(pdev->dev.of_node, "xlnx,txcsum", &value);
2316	if (!ret) {
2317		switch (value) {
2318		case 1:
2319			lp->csum_offload_on_tx_path =
2320				XAE_FEATURE_PARTIAL_TX_CSUM;
2321			lp->features |= XAE_FEATURE_PARTIAL_TX_CSUM;
2322			/* Can checksum TCP/UDP over IPv4. */
2323			ndev->features |= NETIF_F_IP_CSUM;
2324			break;
2325		case 2:
2326			lp->csum_offload_on_tx_path =
2327				XAE_FEATURE_FULL_TX_CSUM;
2328			lp->features |= XAE_FEATURE_FULL_TX_CSUM;
2329			/* Can checksum TCP/UDP over IPv4. */
2330			ndev->features |= NETIF_F_IP_CSUM;
2331			break;
2332		default:
2333			lp->csum_offload_on_tx_path = XAE_NO_CSUM_OFFLOAD;
2334		}
2335	}
2336	ret = of_property_read_u32(pdev->dev.of_node, "xlnx,rxcsum", &value);
2337	if (!ret) {
2338		switch (value) {
2339		case 1:
2340			lp->csum_offload_on_rx_path =
2341				XAE_FEATURE_PARTIAL_RX_CSUM;
2342			lp->features |= XAE_FEATURE_PARTIAL_RX_CSUM;
2343			break;
2344		case 2:
2345			lp->csum_offload_on_rx_path =
2346				XAE_FEATURE_FULL_RX_CSUM;
2347			lp->features |= XAE_FEATURE_FULL_RX_CSUM;
2348			break;
2349		default:
2350			lp->csum_offload_on_rx_path = XAE_NO_CSUM_OFFLOAD;
2351		}
2352	}
2353	/* For supporting jumbo frames, the Axi Ethernet hardware must have
2354	 * a larger Rx/Tx Memory. Typically, the size must be large so that
2355	 * we can enable jumbo option and start supporting jumbo frames.
2356	 * Here we check for memory allocated for Rx/Tx in the hardware from
2357	 * the device-tree and accordingly set flags.
2358	 */
2359	of_property_read_u32(pdev->dev.of_node, "xlnx,rxmem", &lp->rxmem);
2360
2361	lp->switch_x_sgmii = of_property_read_bool(pdev->dev.of_node,
2362						   "xlnx,switch-x-sgmii");
2363
2364	/* Start with the proprietary, and broken phy_type */
2365	ret = of_property_read_u32(pdev->dev.of_node, "xlnx,phy-type", &value);
2366	if (!ret) {
2367		netdev_warn(ndev, "Please upgrade your device tree binary blob to use phy-mode");
2368		switch (value) {
2369		case XAE_PHY_TYPE_MII:
2370			lp->phy_mode = PHY_INTERFACE_MODE_MII;
2371			break;
2372		case XAE_PHY_TYPE_GMII:
2373			lp->phy_mode = PHY_INTERFACE_MODE_GMII;
2374			break;
2375		case XAE_PHY_TYPE_RGMII_2_0:
2376			lp->phy_mode = PHY_INTERFACE_MODE_RGMII_ID;
2377			break;
2378		case XAE_PHY_TYPE_SGMII:
2379			lp->phy_mode = PHY_INTERFACE_MODE_SGMII;
2380			break;
2381		case XAE_PHY_TYPE_1000BASE_X:
2382			lp->phy_mode = PHY_INTERFACE_MODE_1000BASEX;
2383			break;
2384		default:
2385			ret = -EINVAL;
2386			goto cleanup_clk;
2387		}
2388	} else {
2389		ret = of_get_phy_mode(pdev->dev.of_node, &lp->phy_mode);
2390		if (ret)
2391			goto cleanup_clk;
2392	}
2393	if (lp->switch_x_sgmii && lp->phy_mode != PHY_INTERFACE_MODE_SGMII &&
2394	    lp->phy_mode != PHY_INTERFACE_MODE_1000BASEX) {
2395		dev_err(&pdev->dev, "xlnx,switch-x-sgmii only supported with SGMII or 1000BaseX\n");
2396		ret = -EINVAL;
2397		goto cleanup_clk;
2398	}
2399
2400	if (!of_find_property(pdev->dev.of_node, "dmas", NULL)) {
2401		/* Find the DMA node, map the DMA registers, and decode the DMA IRQs */
2402		np = of_parse_phandle(pdev->dev.of_node, "axistream-connected", 0);
2403
2404		if (np) {
2405			struct resource dmares;
2406
2407			ret = of_address_to_resource(np, 0, &dmares);
2408			if (ret) {
2409				dev_err(&pdev->dev,
2410					"unable to get DMA resource\n");
2411				of_node_put(np);
2412				goto cleanup_clk;
2413			}
2414			lp->dma_regs = devm_ioremap_resource(&pdev->dev,
2415							     &dmares);
2416			lp->rx_irq = irq_of_parse_and_map(np, 1);
2417			lp->tx_irq = irq_of_parse_and_map(np, 0);
2418			of_node_put(np);
2419			lp->eth_irq = platform_get_irq_optional(pdev, 0);
2420		} else {
2421			/* Check for these resources directly on the Ethernet node. */
2422			lp->dma_regs = devm_platform_get_and_ioremap_resource(pdev, 1, NULL);
2423			lp->rx_irq = platform_get_irq(pdev, 1);
2424			lp->tx_irq = platform_get_irq(pdev, 0);
2425			lp->eth_irq = platform_get_irq_optional(pdev, 2);
2426		}
2427		if (IS_ERR(lp->dma_regs)) {
2428			dev_err(&pdev->dev, "could not map DMA regs\n");
2429			ret = PTR_ERR(lp->dma_regs);
2430			goto cleanup_clk;
2431		}
2432		if (lp->rx_irq <= 0 || lp->tx_irq <= 0) {
2433			dev_err(&pdev->dev, "could not determine irqs\n");
2434			ret = -ENOMEM;
2435			goto cleanup_clk;
2436		}
2437
2438		/* Reset core now that clocks are enabled, prior to accessing MDIO */
2439		ret = __axienet_device_reset(lp);
2440		if (ret)
2441			goto cleanup_clk;
2442
2443		/* Autodetect the need for 64-bit DMA pointers.
2444		 * When the IP is configured for a bus width bigger than 32 bits,
2445		 * writing the MSB registers is mandatory, even if they are all 0.
2446		 * We can detect this case by writing all 1's to one such register
2447		 * and see if that sticks: when the IP is configured for 32 bits
2448		 * only, those registers are RES0.
2449		 * Those MSB registers were introduced in IP v7.1, which we check first.
2450		 */
2451		if ((axienet_ior(lp, XAE_ID_OFFSET) >> 24) >= 0x9) {
2452			void __iomem *desc = lp->dma_regs + XAXIDMA_TX_CDESC_OFFSET + 4;
2453
2454			iowrite32(0x0, desc);
2455			if (ioread32(desc) == 0) {	/* sanity check */
2456				iowrite32(0xffffffff, desc);
2457				if (ioread32(desc) > 0) {
2458					lp->features |= XAE_FEATURE_DMA_64BIT;
2459					addr_width = 64;
2460					dev_info(&pdev->dev,
2461						 "autodetected 64-bit DMA range\n");
2462				}
2463				iowrite32(0x0, desc);
2464			}
2465		}
2466		if (!IS_ENABLED(CONFIG_64BIT) && lp->features & XAE_FEATURE_DMA_64BIT) {
2467			dev_err(&pdev->dev, "64-bit addressable DMA is not compatible with 32-bit archecture\n");
2468			ret = -EINVAL;
2469			goto cleanup_clk;
2470		}
2471
2472		ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(addr_width));
2473		if (ret) {
2474			dev_err(&pdev->dev, "No suitable DMA available\n");
2475			goto cleanup_clk;
 
 
2476		}
2477		netif_napi_add(ndev, &lp->napi_rx, axienet_rx_poll);
2478		netif_napi_add(ndev, &lp->napi_tx, axienet_tx_poll);
 
 
 
 
2479	} else {
2480		struct xilinx_vdma_config cfg;
2481		struct dma_chan *tx_chan;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2482
2483		lp->eth_irq = platform_get_irq_optional(pdev, 0);
2484		if (lp->eth_irq < 0 && lp->eth_irq != -ENXIO) {
2485			ret = lp->eth_irq;
2486			goto cleanup_clk;
2487		}
2488		tx_chan = dma_request_chan(lp->dev, "tx_chan0");
2489		if (IS_ERR(tx_chan)) {
2490			ret = PTR_ERR(tx_chan);
2491			dev_err_probe(lp->dev, ret, "No Ethernet DMA (TX) channel found\n");
2492			goto cleanup_clk;
2493		}
2494
2495		cfg.reset = 1;
2496		/* As name says VDMA but it has support for DMA channel reset */
2497		ret = xilinx_vdma_channel_set_config(tx_chan, &cfg);
2498		if (ret < 0) {
2499			dev_err(&pdev->dev, "Reset channel failed\n");
2500			dma_release_channel(tx_chan);
2501			goto cleanup_clk;
 
 
 
2502		}
 
2503
2504		dma_release_channel(tx_chan);
2505		lp->use_dmaengine = 1;
 
 
2506	}
2507
2508	if (lp->use_dmaengine)
2509		ndev->netdev_ops = &axienet_netdev_dmaengine_ops;
2510	else
2511		ndev->netdev_ops = &axienet_netdev_ops;
2512	/* Check for Ethernet core IRQ (optional) */
2513	if (lp->eth_irq <= 0)
2514		dev_info(&pdev->dev, "Ethernet core IRQ not defined\n");
2515
2516	/* Retrieve the MAC address */
2517	ret = of_get_mac_address(pdev->dev.of_node, mac_addr);
2518	if (!ret) {
2519		axienet_set_mac_address(ndev, mac_addr);
2520	} else {
2521		dev_warn(&pdev->dev, "could not find MAC address property: %d\n",
2522			 ret);
2523		axienet_set_mac_address(ndev, NULL);
2524	}
 
2525
2526	lp->coalesce_count_rx = XAXIDMA_DFT_RX_THRESHOLD;
2527	lp->coalesce_count_tx = XAXIDMA_DFT_TX_THRESHOLD;
2528	lp->coalesce_usec_rx = XAXIDMA_DFT_RX_USEC;
2529	lp->coalesce_usec_tx = XAXIDMA_DFT_TX_USEC;
2530
2531	ret = axienet_mdio_setup(lp);
2532	if (ret)
2533		dev_warn(&pdev->dev,
2534			 "error registering MDIO bus: %d\n", ret);
2535
2536	if (lp->phy_mode == PHY_INTERFACE_MODE_SGMII ||
2537	    lp->phy_mode == PHY_INTERFACE_MODE_1000BASEX) {
2538		np = of_parse_phandle(pdev->dev.of_node, "pcs-handle", 0);
2539		if (!np) {
2540			/* Deprecated: Always use "pcs-handle" for pcs_phy.
2541			 * Falling back to "phy-handle" here is only for
2542			 * backward compatibility with old device trees.
2543			 */
2544			np = of_parse_phandle(pdev->dev.of_node, "phy-handle", 0);
2545		}
2546		if (!np) {
2547			dev_err(&pdev->dev, "pcs-handle (preferred) or phy-handle required for 1000BaseX/SGMII\n");
2548			ret = -EINVAL;
2549			goto cleanup_mdio;
2550		}
2551		lp->pcs_phy = of_mdio_find_device(np);
2552		if (!lp->pcs_phy) {
2553			ret = -EPROBE_DEFER;
2554			of_node_put(np);
2555			goto cleanup_mdio;
2556		}
2557		of_node_put(np);
2558		lp->pcs.ops = &axienet_pcs_ops;
2559		lp->pcs.neg_mode = true;
2560		lp->pcs.poll = true;
 
2561	}
2562
2563	lp->phylink_config.dev = &ndev->dev;
2564	lp->phylink_config.type = PHYLINK_NETDEV;
2565	lp->phylink_config.mac_capabilities = MAC_SYM_PAUSE | MAC_ASYM_PAUSE |
2566		MAC_10FD | MAC_100FD | MAC_1000FD;
2567
2568	__set_bit(lp->phy_mode, lp->phylink_config.supported_interfaces);
2569	if (lp->switch_x_sgmii) {
2570		__set_bit(PHY_INTERFACE_MODE_1000BASEX,
2571			  lp->phylink_config.supported_interfaces);
2572		__set_bit(PHY_INTERFACE_MODE_SGMII,
2573			  lp->phylink_config.supported_interfaces);
2574	}
2575
2576	lp->phylink = phylink_create(&lp->phylink_config, pdev->dev.fwnode,
2577				     lp->phy_mode,
2578				     &axienet_phylink_ops);
2579	if (IS_ERR(lp->phylink)) {
2580		ret = PTR_ERR(lp->phylink);
2581		dev_err(&pdev->dev, "phylink_create error (%i)\n", ret);
2582		goto cleanup_mdio;
2583	}
2584
2585	ret = register_netdev(lp->ndev);
2586	if (ret) {
2587		dev_err(lp->dev, "register_netdev() error (%i)\n", ret);
2588		goto cleanup_phylink;
2589	}
2590
2591	return 0;
2592
2593cleanup_phylink:
2594	phylink_destroy(lp->phylink);
2595
2596cleanup_mdio:
2597	if (lp->pcs_phy)
2598		put_device(&lp->pcs_phy->dev);
2599	if (lp->mii_bus)
2600		axienet_mdio_teardown(lp);
2601cleanup_clk:
2602	clk_bulk_disable_unprepare(XAE_NUM_MISC_CLOCKS, lp->misc_clks);
2603	clk_disable_unprepare(lp->axi_clk);
2604
2605free_netdev:
2606	free_netdev(ndev);
2607
2608	return ret;
2609}
2610
2611static void axienet_remove(struct platform_device *pdev)
2612{
2613	struct net_device *ndev = platform_get_drvdata(pdev);
2614	struct axienet_local *lp = netdev_priv(ndev);
2615
2616	unregister_netdev(ndev);
2617
2618	if (lp->phylink)
2619		phylink_destroy(lp->phylink);
2620
2621	if (lp->pcs_phy)
2622		put_device(&lp->pcs_phy->dev);
2623
2624	axienet_mdio_teardown(lp);
2625
2626	clk_bulk_disable_unprepare(XAE_NUM_MISC_CLOCKS, lp->misc_clks);
2627	clk_disable_unprepare(lp->axi_clk);
 
 
 
2628
2629	free_netdev(ndev);
 
 
2630}
2631
2632static void axienet_shutdown(struct platform_device *pdev)
2633{
2634	struct net_device *ndev = platform_get_drvdata(pdev);
2635
2636	rtnl_lock();
2637	netif_device_detach(ndev);
2638
2639	if (netif_running(ndev))
2640		dev_close(ndev);
2641
2642	rtnl_unlock();
2643}
2644
2645static int axienet_suspend(struct device *dev)
2646{
2647	struct net_device *ndev = dev_get_drvdata(dev);
2648
2649	if (!netif_running(ndev))
2650		return 0;
2651
2652	netif_device_detach(ndev);
2653
2654	rtnl_lock();
2655	axienet_stop(ndev);
2656	rtnl_unlock();
2657
2658	return 0;
2659}
2660
2661static int axienet_resume(struct device *dev)
2662{
2663	struct net_device *ndev = dev_get_drvdata(dev);
2664
2665	if (!netif_running(ndev))
2666		return 0;
2667
2668	rtnl_lock();
2669	axienet_open(ndev);
2670	rtnl_unlock();
2671
2672	netif_device_attach(ndev);
2673
2674	return 0;
2675}
2676
2677static DEFINE_SIMPLE_DEV_PM_OPS(axienet_pm_ops,
2678				axienet_suspend, axienet_resume);
2679
2680static struct platform_driver axienet_driver = {
2681	.probe = axienet_probe,
2682	.remove_new = axienet_remove,
2683	.shutdown = axienet_shutdown,
2684	.driver = {
2685		 .name = "xilinx_axienet",
2686		 .pm = &axienet_pm_ops,
2687		 .of_match_table = axienet_of_match,
2688	},
2689};
2690
2691module_platform_driver(axienet_driver);
2692
2693MODULE_DESCRIPTION("Xilinx Axi Ethernet driver");
2694MODULE_AUTHOR("Xilinx");
2695MODULE_LICENSE("GPL");
v5.9
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * Xilinx Axi Ethernet device driver
   4 *
   5 * Copyright (c) 2008 Nissin Systems Co., Ltd.,  Yoshio Kashiwagi
   6 * Copyright (c) 2005-2008 DLA Systems,  David H. Lynch Jr. <dhlii@dlasys.net>
   7 * Copyright (c) 2008-2009 Secret Lab Technologies Ltd.
   8 * Copyright (c) 2010 - 2011 Michal Simek <monstr@monstr.eu>
   9 * Copyright (c) 2010 - 2011 PetaLogix
  10 * Copyright (c) 2019 SED Systems, a division of Calian Ltd.
  11 * Copyright (c) 2010 - 2012 Xilinx, Inc. All rights reserved.
  12 *
  13 * This is a driver for the Xilinx Axi Ethernet which is used in the Virtex6
  14 * and Spartan6.
  15 *
  16 * TODO:
  17 *  - Add Axi Fifo support.
  18 *  - Factor out Axi DMA code into separate driver.
  19 *  - Test and fix basic multicast filtering.
  20 *  - Add support for extended multicast filtering.
  21 *  - Test basic VLAN support.
  22 *  - Add support for extended VLAN support.
  23 */
  24
  25#include <linux/clk.h>
  26#include <linux/delay.h>
  27#include <linux/etherdevice.h>
  28#include <linux/module.h>
  29#include <linux/netdevice.h>
 
  30#include <linux/of_mdio.h>
  31#include <linux/of_net.h>
  32#include <linux/of_platform.h>
  33#include <linux/of_irq.h>
  34#include <linux/of_address.h>
 
  35#include <linux/skbuff.h>
  36#include <linux/spinlock.h>
  37#include <linux/phy.h>
  38#include <linux/mii.h>
  39#include <linux/ethtool.h>
 
 
 
 
 
  40
  41#include "xilinx_axienet.h"
  42
  43/* Descriptors defines for Tx and Rx DMA */
  44#define TX_BD_NUM_DEFAULT		64
  45#define RX_BD_NUM_DEFAULT		1024
 
  46#define TX_BD_NUM_MAX			4096
  47#define RX_BD_NUM_MAX			4096
 
 
 
  48
  49/* Must be shorter than length of ethtool_drvinfo.driver field to fit */
  50#define DRIVER_NAME		"xaxienet"
  51#define DRIVER_DESCRIPTION	"Xilinx Axi Ethernet driver"
  52#define DRIVER_VERSION		"1.00a"
  53
  54#define AXIENET_REGS_N		40
  55
 
 
  56/* Match table for of_platform binding */
  57static const struct of_device_id axienet_of_match[] = {
  58	{ .compatible = "xlnx,axi-ethernet-1.00.a", },
  59	{ .compatible = "xlnx,axi-ethernet-1.01.a", },
  60	{ .compatible = "xlnx,axi-ethernet-2.01.a", },
  61	{},
  62};
  63
  64MODULE_DEVICE_TABLE(of, axienet_of_match);
  65
  66/* Option table for setting up Axi Ethernet hardware options */
  67static struct axienet_option axienet_options[] = {
  68	/* Turn on jumbo packet support for both Rx and Tx */
  69	{
  70		.opt = XAE_OPTION_JUMBO,
  71		.reg = XAE_TC_OFFSET,
  72		.m_or = XAE_TC_JUM_MASK,
  73	}, {
  74		.opt = XAE_OPTION_JUMBO,
  75		.reg = XAE_RCW1_OFFSET,
  76		.m_or = XAE_RCW1_JUM_MASK,
  77	}, { /* Turn on VLAN packet support for both Rx and Tx */
  78		.opt = XAE_OPTION_VLAN,
  79		.reg = XAE_TC_OFFSET,
  80		.m_or = XAE_TC_VLAN_MASK,
  81	}, {
  82		.opt = XAE_OPTION_VLAN,
  83		.reg = XAE_RCW1_OFFSET,
  84		.m_or = XAE_RCW1_VLAN_MASK,
  85	}, { /* Turn on FCS stripping on receive packets */
  86		.opt = XAE_OPTION_FCS_STRIP,
  87		.reg = XAE_RCW1_OFFSET,
  88		.m_or = XAE_RCW1_FCS_MASK,
  89	}, { /* Turn on FCS insertion on transmit packets */
  90		.opt = XAE_OPTION_FCS_INSERT,
  91		.reg = XAE_TC_OFFSET,
  92		.m_or = XAE_TC_FCS_MASK,
  93	}, { /* Turn off length/type field checking on receive packets */
  94		.opt = XAE_OPTION_LENTYPE_ERR,
  95		.reg = XAE_RCW1_OFFSET,
  96		.m_or = XAE_RCW1_LT_DIS_MASK,
  97	}, { /* Turn on Rx flow control */
  98		.opt = XAE_OPTION_FLOW_CONTROL,
  99		.reg = XAE_FCC_OFFSET,
 100		.m_or = XAE_FCC_FCRX_MASK,
 101	}, { /* Turn on Tx flow control */
 102		.opt = XAE_OPTION_FLOW_CONTROL,
 103		.reg = XAE_FCC_OFFSET,
 104		.m_or = XAE_FCC_FCTX_MASK,
 105	}, { /* Turn on promiscuous frame filtering */
 106		.opt = XAE_OPTION_PROMISC,
 107		.reg = XAE_FMI_OFFSET,
 108		.m_or = XAE_FMI_PM_MASK,
 109	}, { /* Enable transmitter */
 110		.opt = XAE_OPTION_TXEN,
 111		.reg = XAE_TC_OFFSET,
 112		.m_or = XAE_TC_TX_MASK,
 113	}, { /* Enable receiver */
 114		.opt = XAE_OPTION_RXEN,
 115		.reg = XAE_RCW1_OFFSET,
 116		.m_or = XAE_RCW1_RX_MASK,
 117	},
 118	{}
 119};
 120
 
 
 
 
 
 
 
 
 
 
 121/**
 122 * axienet_dma_in32 - Memory mapped Axi DMA register read
 123 * @lp:		Pointer to axienet local structure
 124 * @reg:	Address offset from the base address of the Axi DMA core
 125 *
 126 * Return: The contents of the Axi DMA register
 127 *
 128 * This function returns the contents of the corresponding Axi DMA register.
 129 */
 130static inline u32 axienet_dma_in32(struct axienet_local *lp, off_t reg)
 131{
 132	return ioread32(lp->dma_regs + reg);
 133}
 134
 135/**
 136 * axienet_dma_out32 - Memory mapped Axi DMA register write.
 137 * @lp:		Pointer to axienet local structure
 138 * @reg:	Address offset from the base address of the Axi DMA core
 139 * @value:	Value to be written into the Axi DMA register
 140 *
 141 * This function writes the desired value into the corresponding Axi DMA
 142 * register.
 143 */
 144static inline void axienet_dma_out32(struct axienet_local *lp,
 145				     off_t reg, u32 value)
 146{
 147	iowrite32(value, lp->dma_regs + reg);
 148}
 149
 150static void axienet_dma_out_addr(struct axienet_local *lp, off_t reg,
 151				 dma_addr_t addr)
 152{
 153	axienet_dma_out32(lp, reg, lower_32_bits(addr));
 154
 155	if (lp->features & XAE_FEATURE_DMA_64BIT)
 156		axienet_dma_out32(lp, reg + 4, upper_32_bits(addr));
 157}
 158
 159static void desc_set_phys_addr(struct axienet_local *lp, dma_addr_t addr,
 160			       struct axidma_bd *desc)
 161{
 162	desc->phys = lower_32_bits(addr);
 163	if (lp->features & XAE_FEATURE_DMA_64BIT)
 164		desc->phys_msb = upper_32_bits(addr);
 165}
 166
 167static dma_addr_t desc_get_phys_addr(struct axienet_local *lp,
 168				     struct axidma_bd *desc)
 169{
 170	dma_addr_t ret = desc->phys;
 171
 172	if (lp->features & XAE_FEATURE_DMA_64BIT)
 173		ret |= ((dma_addr_t)desc->phys_msb << 16) << 16;
 174
 175	return ret;
 176}
 177
 178/**
 179 * axienet_dma_bd_release - Release buffer descriptor rings
 180 * @ndev:	Pointer to the net_device structure
 181 *
 182 * This function is used to release the descriptors allocated in
 183 * axienet_dma_bd_init. axienet_dma_bd_release is called when Axi Ethernet
 184 * driver stop api is called.
 185 */
 186static void axienet_dma_bd_release(struct net_device *ndev)
 187{
 188	int i;
 189	struct axienet_local *lp = netdev_priv(ndev);
 190
 191	/* If we end up here, tx_bd_v must have been DMA allocated. */
 192	dma_free_coherent(ndev->dev.parent,
 193			  sizeof(*lp->tx_bd_v) * lp->tx_bd_num,
 194			  lp->tx_bd_v,
 195			  lp->tx_bd_p);
 196
 197	if (!lp->rx_bd_v)
 198		return;
 199
 200	for (i = 0; i < lp->rx_bd_num; i++) {
 201		dma_addr_t phys;
 202
 203		/* A NULL skb means this descriptor has not been initialised
 204		 * at all.
 205		 */
 206		if (!lp->rx_bd_v[i].skb)
 207			break;
 208
 209		dev_kfree_skb(lp->rx_bd_v[i].skb);
 210
 211		/* For each descriptor, we programmed cntrl with the (non-zero)
 212		 * descriptor size, after it had been successfully allocated.
 213		 * So a non-zero value in there means we need to unmap it.
 214		 */
 215		if (lp->rx_bd_v[i].cntrl) {
 216			phys = desc_get_phys_addr(lp, &lp->rx_bd_v[i]);
 217			dma_unmap_single(ndev->dev.parent, phys,
 218					 lp->max_frm_size, DMA_FROM_DEVICE);
 219		}
 220	}
 221
 222	dma_free_coherent(ndev->dev.parent,
 223			  sizeof(*lp->rx_bd_v) * lp->rx_bd_num,
 224			  lp->rx_bd_v,
 225			  lp->rx_bd_p);
 226}
 227
 228/**
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 229 * axienet_dma_bd_init - Setup buffer descriptor rings for Axi DMA
 230 * @ndev:	Pointer to the net_device structure
 231 *
 232 * Return: 0, on success -ENOMEM, on failure
 233 *
 234 * This function is called to initialize the Rx and Tx DMA descriptor
 235 * rings. This initializes the descriptors with required default values
 236 * and is called when Axi Ethernet driver reset is called.
 237 */
 238static int axienet_dma_bd_init(struct net_device *ndev)
 239{
 240	u32 cr;
 241	int i;
 242	struct sk_buff *skb;
 243	struct axienet_local *lp = netdev_priv(ndev);
 244
 245	/* Reset the indexes which are used for accessing the BDs */
 246	lp->tx_bd_ci = 0;
 247	lp->tx_bd_tail = 0;
 248	lp->rx_bd_ci = 0;
 249
 250	/* Allocate the Tx and Rx buffer descriptors. */
 251	lp->tx_bd_v = dma_alloc_coherent(ndev->dev.parent,
 252					 sizeof(*lp->tx_bd_v) * lp->tx_bd_num,
 253					 &lp->tx_bd_p, GFP_KERNEL);
 254	if (!lp->tx_bd_v)
 255		return -ENOMEM;
 256
 257	lp->rx_bd_v = dma_alloc_coherent(ndev->dev.parent,
 258					 sizeof(*lp->rx_bd_v) * lp->rx_bd_num,
 259					 &lp->rx_bd_p, GFP_KERNEL);
 260	if (!lp->rx_bd_v)
 261		goto out;
 262
 263	for (i = 0; i < lp->tx_bd_num; i++) {
 264		dma_addr_t addr = lp->tx_bd_p +
 265				  sizeof(*lp->tx_bd_v) *
 266				  ((i + 1) % lp->tx_bd_num);
 267
 268		lp->tx_bd_v[i].next = lower_32_bits(addr);
 269		if (lp->features & XAE_FEATURE_DMA_64BIT)
 270			lp->tx_bd_v[i].next_msb = upper_32_bits(addr);
 271	}
 272
 273	for (i = 0; i < lp->rx_bd_num; i++) {
 274		dma_addr_t addr;
 275
 276		addr = lp->rx_bd_p + sizeof(*lp->rx_bd_v) *
 277			((i + 1) % lp->rx_bd_num);
 278		lp->rx_bd_v[i].next = lower_32_bits(addr);
 279		if (lp->features & XAE_FEATURE_DMA_64BIT)
 280			lp->rx_bd_v[i].next_msb = upper_32_bits(addr);
 281
 282		skb = netdev_alloc_skb_ip_align(ndev, lp->max_frm_size);
 283		if (!skb)
 284			goto out;
 285
 286		lp->rx_bd_v[i].skb = skb;
 287		addr = dma_map_single(ndev->dev.parent, skb->data,
 288				      lp->max_frm_size, DMA_FROM_DEVICE);
 289		if (dma_mapping_error(ndev->dev.parent, addr)) {
 290			netdev_err(ndev, "DMA mapping error\n");
 291			goto out;
 292		}
 293		desc_set_phys_addr(lp, addr, &lp->rx_bd_v[i]);
 294
 295		lp->rx_bd_v[i].cntrl = lp->max_frm_size;
 296	}
 297
 298	/* Start updating the Rx channel control register */
 299	cr = axienet_dma_in32(lp, XAXIDMA_RX_CR_OFFSET);
 300	/* Update the interrupt coalesce count */
 301	cr = ((cr & ~XAXIDMA_COALESCE_MASK) |
 302	      ((lp->coalesce_count_rx) << XAXIDMA_COALESCE_SHIFT));
 303	/* Update the delay timer count */
 304	cr = ((cr & ~XAXIDMA_DELAY_MASK) |
 305	      (XAXIDMA_DFT_RX_WAITBOUND << XAXIDMA_DELAY_SHIFT));
 306	/* Enable coalesce, delay timer and error interrupts */
 307	cr |= XAXIDMA_IRQ_ALL_MASK;
 308	/* Write to the Rx channel control register */
 309	axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, cr);
 310
 311	/* Start updating the Tx channel control register */
 312	cr = axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET);
 313	/* Update the interrupt coalesce count */
 314	cr = (((cr & ~XAXIDMA_COALESCE_MASK)) |
 315	      ((lp->coalesce_count_tx) << XAXIDMA_COALESCE_SHIFT));
 316	/* Update the delay timer count */
 317	cr = (((cr & ~XAXIDMA_DELAY_MASK)) |
 318	      (XAXIDMA_DFT_TX_WAITBOUND << XAXIDMA_DELAY_SHIFT));
 319	/* Enable coalesce, delay timer and error interrupts */
 320	cr |= XAXIDMA_IRQ_ALL_MASK;
 321	/* Write to the Tx channel control register */
 322	axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, cr);
 323
 324	/* Populate the tail pointer and bring the Rx Axi DMA engine out of
 325	 * halted state. This will make the Rx side ready for reception.
 326	 */
 327	axienet_dma_out_addr(lp, XAXIDMA_RX_CDESC_OFFSET, lp->rx_bd_p);
 328	cr = axienet_dma_in32(lp, XAXIDMA_RX_CR_OFFSET);
 329	axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET,
 330			  cr | XAXIDMA_CR_RUNSTOP_MASK);
 331	axienet_dma_out_addr(lp, XAXIDMA_RX_TDESC_OFFSET, lp->rx_bd_p +
 332			     (sizeof(*lp->rx_bd_v) * (lp->rx_bd_num - 1)));
 333
 334	/* Write to the RS (Run-stop) bit in the Tx channel control register.
 335	 * Tx channel is now ready to run. But only after we write to the
 336	 * tail pointer register that the Tx channel will start transmitting.
 337	 */
 338	axienet_dma_out_addr(lp, XAXIDMA_TX_CDESC_OFFSET, lp->tx_bd_p);
 339	cr = axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET);
 340	axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET,
 341			  cr | XAXIDMA_CR_RUNSTOP_MASK);
 342
 343	return 0;
 344out:
 345	axienet_dma_bd_release(ndev);
 346	return -ENOMEM;
 347}
 348
 349/**
 350 * axienet_set_mac_address - Write the MAC address
 351 * @ndev:	Pointer to the net_device structure
 352 * @address:	6 byte Address to be written as MAC address
 353 *
 354 * This function is called to initialize the MAC address of the Axi Ethernet
 355 * core. It writes to the UAW0 and UAW1 registers of the core.
 356 */
 357static void axienet_set_mac_address(struct net_device *ndev,
 358				    const void *address)
 359{
 360	struct axienet_local *lp = netdev_priv(ndev);
 361
 362	if (address)
 363		memcpy(ndev->dev_addr, address, ETH_ALEN);
 364	if (!is_valid_ether_addr(ndev->dev_addr))
 365		eth_hw_addr_random(ndev);
 366
 367	/* Set up unicast MAC address filter set its mac address */
 368	axienet_iow(lp, XAE_UAW0_OFFSET,
 369		    (ndev->dev_addr[0]) |
 370		    (ndev->dev_addr[1] << 8) |
 371		    (ndev->dev_addr[2] << 16) |
 372		    (ndev->dev_addr[3] << 24));
 373	axienet_iow(lp, XAE_UAW1_OFFSET,
 374		    (((axienet_ior(lp, XAE_UAW1_OFFSET)) &
 375		      ~XAE_UAW1_UNICASTADDR_MASK) |
 376		     (ndev->dev_addr[4] |
 377		     (ndev->dev_addr[5] << 8))));
 378}
 379
 380/**
 381 * netdev_set_mac_address - Write the MAC address (from outside the driver)
 382 * @ndev:	Pointer to the net_device structure
 383 * @p:		6 byte Address to be written as MAC address
 384 *
 385 * Return: 0 for all conditions. Presently, there is no failure case.
 386 *
 387 * This function is called to initialize the MAC address of the Axi Ethernet
 388 * core. It calls the core specific axienet_set_mac_address. This is the
 389 * function that goes into net_device_ops structure entry ndo_set_mac_address.
 390 */
 391static int netdev_set_mac_address(struct net_device *ndev, void *p)
 392{
 393	struct sockaddr *addr = p;
 394	axienet_set_mac_address(ndev, addr->sa_data);
 395	return 0;
 396}
 397
 398/**
 399 * axienet_set_multicast_list - Prepare the multicast table
 400 * @ndev:	Pointer to the net_device structure
 401 *
 402 * This function is called to initialize the multicast table during
 403 * initialization. The Axi Ethernet basic multicast support has a four-entry
 404 * multicast table which is initialized here. Additionally this function
 405 * goes into the net_device_ops structure entry ndo_set_multicast_list. This
 406 * means whenever the multicast table entries need to be updated this
 407 * function gets called.
 408 */
 409static void axienet_set_multicast_list(struct net_device *ndev)
 410{
 411	int i;
 412	u32 reg, af0reg, af1reg;
 413	struct axienet_local *lp = netdev_priv(ndev);
 414
 415	if (ndev->flags & (IFF_ALLMULTI | IFF_PROMISC) ||
 416	    netdev_mc_count(ndev) > XAE_MULTICAST_CAM_TABLE_NUM) {
 417		/* We must make the kernel realize we had to move into
 418		 * promiscuous mode. If it was a promiscuous mode request
 419		 * the flag is already set. If not we set it.
 420		 */
 421		ndev->flags |= IFF_PROMISC;
 422		reg = axienet_ior(lp, XAE_FMI_OFFSET);
 423		reg |= XAE_FMI_PM_MASK;
 424		axienet_iow(lp, XAE_FMI_OFFSET, reg);
 425		dev_info(&ndev->dev, "Promiscuous mode enabled.\n");
 426	} else if (!netdev_mc_empty(ndev)) {
 427		struct netdev_hw_addr *ha;
 428
 429		i = 0;
 430		netdev_for_each_mc_addr(ha, ndev) {
 431			if (i >= XAE_MULTICAST_CAM_TABLE_NUM)
 432				break;
 433
 434			af0reg = (ha->addr[0]);
 435			af0reg |= (ha->addr[1] << 8);
 436			af0reg |= (ha->addr[2] << 16);
 437			af0reg |= (ha->addr[3] << 24);
 438
 439			af1reg = (ha->addr[4]);
 440			af1reg |= (ha->addr[5] << 8);
 441
 442			reg = axienet_ior(lp, XAE_FMI_OFFSET) & 0xFFFFFF00;
 443			reg |= i;
 444
 445			axienet_iow(lp, XAE_FMI_OFFSET, reg);
 446			axienet_iow(lp, XAE_AF0_OFFSET, af0reg);
 447			axienet_iow(lp, XAE_AF1_OFFSET, af1reg);
 448			i++;
 449		}
 450	} else {
 451		reg = axienet_ior(lp, XAE_FMI_OFFSET);
 452		reg &= ~XAE_FMI_PM_MASK;
 453
 454		axienet_iow(lp, XAE_FMI_OFFSET, reg);
 455
 456		for (i = 0; i < XAE_MULTICAST_CAM_TABLE_NUM; i++) {
 457			reg = axienet_ior(lp, XAE_FMI_OFFSET) & 0xFFFFFF00;
 458			reg |= i;
 459
 460			axienet_iow(lp, XAE_FMI_OFFSET, reg);
 461			axienet_iow(lp, XAE_AF0_OFFSET, 0);
 462			axienet_iow(lp, XAE_AF1_OFFSET, 0);
 463		}
 464
 465		dev_info(&ndev->dev, "Promiscuous mode disabled.\n");
 466	}
 467}
 468
 469/**
 470 * axienet_setoptions - Set an Axi Ethernet option
 471 * @ndev:	Pointer to the net_device structure
 472 * @options:	Option to be enabled/disabled
 473 *
 474 * The Axi Ethernet core has multiple features which can be selectively turned
 475 * on or off. The typical options could be jumbo frame option, basic VLAN
 476 * option, promiscuous mode option etc. This function is used to set or clear
 477 * these options in the Axi Ethernet hardware. This is done through
 478 * axienet_option structure .
 479 */
 480static void axienet_setoptions(struct net_device *ndev, u32 options)
 481{
 482	int reg;
 483	struct axienet_local *lp = netdev_priv(ndev);
 484	struct axienet_option *tp = &axienet_options[0];
 485
 486	while (tp->opt) {
 487		reg = ((axienet_ior(lp, tp->reg)) & ~(tp->m_or));
 488		if (options & tp->opt)
 489			reg |= tp->m_or;
 490		axienet_iow(lp, tp->reg, reg);
 491		tp++;
 492	}
 493
 494	lp->options |= options;
 495}
 496
 497static int __axienet_device_reset(struct axienet_local *lp)
 498{
 499	u32 timeout;
 
 500
 501	/* Reset Axi DMA. This would reset Axi Ethernet core as well. The reset
 502	 * process of Axi DMA takes a while to complete as all pending
 503	 * commands/transfers will be flushed or completed during this
 504	 * reset process.
 505	 * Note that even though both TX and RX have their own reset register,
 506	 * they both reset the entire DMA core, so only one needs to be used.
 507	 */
 508	axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, XAXIDMA_CR_RESET_MASK);
 509	timeout = DELAY_OF_ONE_MILLISEC;
 510	while (axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET) &
 511				XAXIDMA_CR_RESET_MASK) {
 512		udelay(1);
 513		if (--timeout == 0) {
 514			netdev_err(lp->ndev, "%s: DMA reset timeout!\n",
 515				   __func__);
 516			return -ETIMEDOUT;
 517		}
 
 
 
 
 
 
 
 
 518	}
 519
 520	return 0;
 521}
 522
 523/**
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 524 * axienet_device_reset - Reset and initialize the Axi Ethernet hardware.
 525 * @ndev:	Pointer to the net_device structure
 526 *
 527 * This function is called to reset and initialize the Axi Ethernet core. This
 528 * is typically called during initialization. It does a reset of the Axi DMA
 529 * Rx/Tx channels and initializes the Axi DMA BDs. Since Axi DMA reset lines
 530 * areconnected to Axi Ethernet reset lines, this in turn resets the Axi
 531 * Ethernet core. No separate hardware reset is done for the Axi Ethernet
 532 * core.
 533 * Returns 0 on success or a negative error number otherwise.
 534 */
 535static int axienet_device_reset(struct net_device *ndev)
 536{
 537	u32 axienet_status;
 538	struct axienet_local *lp = netdev_priv(ndev);
 539	int ret;
 540
 541	ret = __axienet_device_reset(lp);
 542	if (ret)
 543		return ret;
 544
 545	lp->max_frm_size = XAE_MAX_VLAN_FRAME_SIZE;
 546	lp->options |= XAE_OPTION_VLAN;
 547	lp->options &= (~XAE_OPTION_JUMBO);
 548
 549	if ((ndev->mtu > XAE_MTU) &&
 550		(ndev->mtu <= XAE_JUMBO_MTU)) {
 551		lp->max_frm_size = ndev->mtu + VLAN_ETH_HLEN +
 552					XAE_TRL_SIZE;
 553
 554		if (lp->max_frm_size <= lp->rxmem)
 555			lp->options |= XAE_OPTION_JUMBO;
 556	}
 557
 558	ret = axienet_dma_bd_init(ndev);
 559	if (ret) {
 560		netdev_err(ndev, "%s: descriptor allocation failed\n",
 561			   __func__);
 562		return ret;
 
 
 
 
 
 
 563	}
 564
 565	axienet_status = axienet_ior(lp, XAE_RCW1_OFFSET);
 566	axienet_status &= ~XAE_RCW1_RX_MASK;
 567	axienet_iow(lp, XAE_RCW1_OFFSET, axienet_status);
 568
 569	axienet_status = axienet_ior(lp, XAE_IP_OFFSET);
 570	if (axienet_status & XAE_INT_RXRJECT_MASK)
 571		axienet_iow(lp, XAE_IS_OFFSET, XAE_INT_RXRJECT_MASK);
 572	axienet_iow(lp, XAE_IE_OFFSET, lp->eth_irq > 0 ?
 573		    XAE_INT_RECV_ERROR_MASK : 0);
 574
 575	axienet_iow(lp, XAE_FCC_OFFSET, XAE_FCC_FCRX_MASK);
 576
 577	/* Sync default options with HW but leave receiver and
 578	 * transmitter disabled.
 579	 */
 580	axienet_setoptions(ndev, lp->options &
 581			   ~(XAE_OPTION_TXEN | XAE_OPTION_RXEN));
 582	axienet_set_mac_address(ndev, NULL);
 583	axienet_set_multicast_list(ndev);
 584	axienet_setoptions(ndev, lp->options);
 585
 586	netif_trans_update(ndev);
 587
 588	return 0;
 589}
 590
 591/**
 592 * axienet_free_tx_chain - Clean up a series of linked TX descriptors.
 593 * @ndev:	Pointer to the net_device structure
 594 * @first_bd:	Index of first descriptor to clean up
 595 * @nr_bds:	Number of descriptors to clean up, can be -1 if unknown.
 
 596 * @sizep:	Pointer to a u32 filled with the total sum of all bytes
 597 * 		in all cleaned-up descriptors. Ignored if NULL.
 
 598 *
 599 * Would either be called after a successful transmit operation, or after
 600 * there was an error when setting up the chain.
 601 * Returns the number of descriptors handled.
 602 */
 603static int axienet_free_tx_chain(struct net_device *ndev, u32 first_bd,
 604				 int nr_bds, u32 *sizep)
 605{
 606	struct axienet_local *lp = netdev_priv(ndev);
 607	struct axidma_bd *cur_p;
 608	int max_bds = nr_bds;
 609	unsigned int status;
 610	dma_addr_t phys;
 611	int i;
 612
 613	if (max_bds == -1)
 614		max_bds = lp->tx_bd_num;
 615
 616	for (i = 0; i < max_bds; i++) {
 617		cur_p = &lp->tx_bd_v[(first_bd + i) % lp->tx_bd_num];
 618		status = cur_p->status;
 619
 620		/* If no number is given, clean up *all* descriptors that have
 621		 * been completed by the MAC.
 622		 */
 623		if (nr_bds == -1 && !(status & XAXIDMA_BD_STS_COMPLETE_MASK))
 624			break;
 625
 
 
 626		phys = desc_get_phys_addr(lp, cur_p);
 627		dma_unmap_single(ndev->dev.parent, phys,
 628				 (cur_p->cntrl & XAXIDMA_BD_CTRL_LENGTH_MASK),
 629				 DMA_TO_DEVICE);
 630
 631		if (cur_p->skb && (status & XAXIDMA_BD_STS_COMPLETE_MASK))
 632			dev_consume_skb_irq(cur_p->skb);
 633
 634		cur_p->cntrl = 0;
 635		cur_p->app0 = 0;
 636		cur_p->app1 = 0;
 637		cur_p->app2 = 0;
 638		cur_p->app4 = 0;
 
 
 
 
 639		cur_p->status = 0;
 640		cur_p->skb = NULL;
 641
 642		if (sizep)
 643			*sizep += status & XAXIDMA_BD_STS_ACTUAL_LEN_MASK;
 644	}
 645
 646	return i;
 647}
 648
 649/**
 650 * axienet_start_xmit_done - Invoked once a transmit is completed by the
 651 * Axi DMA Tx channel.
 652 * @ndev:	Pointer to the net_device structure
 653 *
 654 * This function is invoked from the Axi DMA Tx isr to notify the completion
 655 * of transmit operation. It clears fields in the corresponding Tx BDs and
 656 * unmaps the corresponding buffer so that CPU can regain ownership of the
 657 * buffer. It finally invokes "netif_wake_queue" to restart transmission if
 658 * required.
 659 */
 660static void axienet_start_xmit_done(struct net_device *ndev)
 661{
 662	struct axienet_local *lp = netdev_priv(ndev);
 663	u32 packets = 0;
 664	u32 size = 0;
 665
 666	packets = axienet_free_tx_chain(ndev, lp->tx_bd_ci, -1, &size);
 667
 668	lp->tx_bd_ci += packets;
 669	if (lp->tx_bd_ci >= lp->tx_bd_num)
 670		lp->tx_bd_ci -= lp->tx_bd_num;
 671
 672	ndev->stats.tx_packets += packets;
 673	ndev->stats.tx_bytes += size;
 674
 675	/* Matches barrier in axienet_start_xmit */
 676	smp_mb();
 677
 678	netif_wake_queue(ndev);
 679}
 680
 681/**
 682 * axienet_check_tx_bd_space - Checks if a BD/group of BDs are currently busy
 683 * @lp:		Pointer to the axienet_local structure
 684 * @num_frag:	The number of BDs to check for
 685 *
 686 * Return: 0, on success
 687 *	    NETDEV_TX_BUSY, if any of the descriptors are not free
 688 *
 689 * This function is invoked before BDs are allocated and transmission starts.
 690 * This function returns 0 if a BD or group of BDs can be allocated for
 691 * transmission. If the BD or any of the BDs are not free the function
 692 * returns a busy status. This is invoked from axienet_start_xmit.
 693 */
 694static inline int axienet_check_tx_bd_space(struct axienet_local *lp,
 695					    int num_frag)
 696{
 697	struct axidma_bd *cur_p;
 698	cur_p = &lp->tx_bd_v[(lp->tx_bd_tail + num_frag) % lp->tx_bd_num];
 699	if (cur_p->status & XAXIDMA_BD_STS_ALL_MASK)
 
 
 
 
 700		return NETDEV_TX_BUSY;
 701	return 0;
 702}
 703
 704/**
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 705 * axienet_start_xmit - Starts the transmission.
 706 * @skb:	sk_buff pointer that contains data to be Txed.
 707 * @ndev:	Pointer to net_device structure.
 708 *
 709 * Return: NETDEV_TX_OK, on success
 710 *	    NETDEV_TX_BUSY, if any of the descriptors are not free
 711 *
 712 * This function is invoked from upper layers to initiate transmission. The
 713 * function uses the next available free BDs and populates their fields to
 714 * start the transmission. Additionally if checksum offloading is supported,
 715 * it populates AXI Stream Control fields with appropriate values.
 716 */
 717static netdev_tx_t
 718axienet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
 719{
 720	u32 ii;
 721	u32 num_frag;
 722	u32 csum_start_off;
 723	u32 csum_index_off;
 724	skb_frag_t *frag;
 725	dma_addr_t tail_p, phys;
 
 726	struct axienet_local *lp = netdev_priv(ndev);
 727	struct axidma_bd *cur_p;
 728	u32 orig_tail_ptr = lp->tx_bd_tail;
 
 
 729
 730	num_frag = skb_shinfo(skb)->nr_frags;
 731	cur_p = &lp->tx_bd_v[lp->tx_bd_tail];
 732
 733	if (axienet_check_tx_bd_space(lp, num_frag)) {
 734		if (netif_queue_stopped(ndev))
 735			return NETDEV_TX_BUSY;
 736
 
 
 
 
 
 737		netif_stop_queue(ndev);
 738
 739		/* Matches barrier in axienet_start_xmit_done */
 740		smp_mb();
 741
 742		/* Space might have just been freed - check again */
 743		if (axienet_check_tx_bd_space(lp, num_frag))
 744			return NETDEV_TX_BUSY;
 745
 746		netif_wake_queue(ndev);
 747	}
 748
 749	if (skb->ip_summed == CHECKSUM_PARTIAL) {
 750		if (lp->features & XAE_FEATURE_FULL_TX_CSUM) {
 751			/* Tx Full Checksum Offload Enabled */
 752			cur_p->app0 |= 2;
 753		} else if (lp->features & XAE_FEATURE_PARTIAL_RX_CSUM) {
 754			csum_start_off = skb_transport_offset(skb);
 755			csum_index_off = csum_start_off + skb->csum_offset;
 756			/* Tx Partial Checksum Offload Enabled */
 757			cur_p->app0 |= 1;
 758			cur_p->app1 = (csum_start_off << 16) | csum_index_off;
 759		}
 760	} else if (skb->ip_summed == CHECKSUM_UNNECESSARY) {
 761		cur_p->app0 |= 2; /* Tx Full Checksum Offload Enabled */
 762	}
 763
 764	phys = dma_map_single(ndev->dev.parent, skb->data,
 765			      skb_headlen(skb), DMA_TO_DEVICE);
 766	if (unlikely(dma_mapping_error(ndev->dev.parent, phys))) {
 767		if (net_ratelimit())
 768			netdev_err(ndev, "TX DMA mapping error\n");
 769		ndev->stats.tx_dropped++;
 770		return NETDEV_TX_OK;
 771	}
 772	desc_set_phys_addr(lp, phys, cur_p);
 773	cur_p->cntrl = skb_headlen(skb) | XAXIDMA_BD_CTRL_TXSOF_MASK;
 774
 775	for (ii = 0; ii < num_frag; ii++) {
 776		if (++lp->tx_bd_tail >= lp->tx_bd_num)
 777			lp->tx_bd_tail = 0;
 778		cur_p = &lp->tx_bd_v[lp->tx_bd_tail];
 779		frag = &skb_shinfo(skb)->frags[ii];
 780		phys = dma_map_single(ndev->dev.parent,
 781				      skb_frag_address(frag),
 782				      skb_frag_size(frag),
 783				      DMA_TO_DEVICE);
 784		if (unlikely(dma_mapping_error(ndev->dev.parent, phys))) {
 785			if (net_ratelimit())
 786				netdev_err(ndev, "TX DMA mapping error\n");
 787			ndev->stats.tx_dropped++;
 788			axienet_free_tx_chain(ndev, orig_tail_ptr, ii + 1,
 789					      NULL);
 790			lp->tx_bd_tail = orig_tail_ptr;
 791
 792			return NETDEV_TX_OK;
 793		}
 794		desc_set_phys_addr(lp, phys, cur_p);
 795		cur_p->cntrl = skb_frag_size(frag);
 796	}
 797
 798	cur_p->cntrl |= XAXIDMA_BD_CTRL_TXEOF_MASK;
 799	cur_p->skb = skb;
 800
 801	tail_p = lp->tx_bd_p + sizeof(*lp->tx_bd_v) * lp->tx_bd_tail;
 
 
 
 
 802	/* Start the transfer */
 803	axienet_dma_out_addr(lp, XAXIDMA_TX_TDESC_OFFSET, tail_p);
 804	if (++lp->tx_bd_tail >= lp->tx_bd_num)
 805		lp->tx_bd_tail = 0;
 
 
 
 
 
 
 
 
 
 
 806
 807	return NETDEV_TX_OK;
 808}
 809
 810/**
 811 * axienet_recv - Is called from Axi DMA Rx Isr to complete the received
 812 *		  BD processing.
 813 * @ndev:	Pointer to net_device structure.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 814 *
 815 * This function is invoked from the Axi DMA Rx isr to process the Rx BDs. It
 816 * does minimal processing and invokes "netif_rx" to complete further
 817 * processing.
 818 */
 819static void axienet_recv(struct net_device *ndev)
 820{
 821	u32 length;
 822	u32 csumstatus;
 823	u32 size = 0;
 824	u32 packets = 0;
 825	dma_addr_t tail_p = 0;
 826	struct axienet_local *lp = netdev_priv(ndev);
 827	struct sk_buff *skb, *new_skb;
 828	struct axidma_bd *cur_p;
 829
 830	cur_p = &lp->rx_bd_v[lp->rx_bd_ci];
 831
 832	while ((cur_p->status & XAXIDMA_BD_STS_COMPLETE_MASK)) {
 833		dma_addr_t phys;
 834
 835		tail_p = lp->rx_bd_p + sizeof(*lp->rx_bd_v) * lp->rx_bd_ci;
 836
 837		phys = desc_get_phys_addr(lp, cur_p);
 838		dma_unmap_single(ndev->dev.parent, phys, lp->max_frm_size,
 839				 DMA_FROM_DEVICE);
 840
 841		skb = cur_p->skb;
 842		cur_p->skb = NULL;
 843		length = cur_p->app4 & 0x0000FFFF;
 844
 845		skb_put(skb, length);
 846		skb->protocol = eth_type_trans(skb, ndev);
 847		/*skb_checksum_none_assert(skb);*/
 848		skb->ip_summed = CHECKSUM_NONE;
 849
 850		/* if we're doing Rx csum offload, set it up */
 851		if (lp->features & XAE_FEATURE_FULL_RX_CSUM) {
 852			csumstatus = (cur_p->app2 &
 853				      XAE_FULL_CSUM_STATUS_MASK) >> 3;
 854			if ((csumstatus == XAE_IP_TCP_CSUM_VALIDATED) ||
 855			    (csumstatus == XAE_IP_UDP_CSUM_VALIDATED)) {
 856				skb->ip_summed = CHECKSUM_UNNECESSARY;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 857			}
 858		} else if ((lp->features & XAE_FEATURE_PARTIAL_RX_CSUM) != 0 &&
 859			   skb->protocol == htons(ETH_P_IP) &&
 860			   skb->len > 64) {
 861			skb->csum = be32_to_cpu(cur_p->app3 & 0xFFFF);
 862			skb->ip_summed = CHECKSUM_COMPLETE;
 863		}
 864
 865		netif_rx(skb);
 866
 867		size += length;
 868		packets++;
 
 869
 870		new_skb = netdev_alloc_skb_ip_align(ndev, lp->max_frm_size);
 871		if (!new_skb)
 872			return;
 873
 874		phys = dma_map_single(ndev->dev.parent, new_skb->data,
 875				      lp->max_frm_size,
 876				      DMA_FROM_DEVICE);
 877		if (unlikely(dma_mapping_error(ndev->dev.parent, phys))) {
 878			if (net_ratelimit())
 879				netdev_err(ndev, "RX DMA mapping error\n");
 880			dev_kfree_skb(new_skb);
 881			return;
 882		}
 883		desc_set_phys_addr(lp, phys, cur_p);
 884
 885		cur_p->cntrl = lp->max_frm_size;
 886		cur_p->status = 0;
 887		cur_p->skb = new_skb;
 888
 
 
 
 
 
 889		if (++lp->rx_bd_ci >= lp->rx_bd_num)
 890			lp->rx_bd_ci = 0;
 891		cur_p = &lp->rx_bd_v[lp->rx_bd_ci];
 892	}
 893
 894	ndev->stats.rx_packets += packets;
 895	ndev->stats.rx_bytes += size;
 
 
 896
 897	if (tail_p)
 898		axienet_dma_out_addr(lp, XAXIDMA_RX_TDESC_OFFSET, tail_p);
 
 
 
 
 
 
 
 
 
 899}
 900
 901/**
 902 * axienet_tx_irq - Tx Done Isr.
 903 * @irq:	irq number
 904 * @_ndev:	net_device pointer
 905 *
 906 * Return: IRQ_HANDLED if device generated a TX interrupt, IRQ_NONE otherwise.
 907 *
 908 * This is the Axi DMA Tx done Isr. It invokes "axienet_start_xmit_done"
 909 * to complete the BD processing.
 910 */
 911static irqreturn_t axienet_tx_irq(int irq, void *_ndev)
 912{
 913	u32 cr;
 914	unsigned int status;
 915	struct net_device *ndev = _ndev;
 916	struct axienet_local *lp = netdev_priv(ndev);
 917
 918	status = axienet_dma_in32(lp, XAXIDMA_TX_SR_OFFSET);
 919	if (status & (XAXIDMA_IRQ_IOC_MASK | XAXIDMA_IRQ_DELAY_MASK)) {
 920		axienet_dma_out32(lp, XAXIDMA_TX_SR_OFFSET, status);
 921		axienet_start_xmit_done(lp->ndev);
 922		goto out;
 923	}
 924	if (!(status & XAXIDMA_IRQ_ALL_MASK))
 925		return IRQ_NONE;
 926	if (status & XAXIDMA_IRQ_ERROR_MASK) {
 927		dev_err(&ndev->dev, "DMA Tx error 0x%x\n", status);
 928		dev_err(&ndev->dev, "Current BD is at: 0x%x%08x\n",
 929			(lp->tx_bd_v[lp->tx_bd_ci]).phys_msb,
 930			(lp->tx_bd_v[lp->tx_bd_ci]).phys);
 931
 932		cr = axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET);
 933		/* Disable coalesce, delay timer and error interrupts */
 934		cr &= (~XAXIDMA_IRQ_ALL_MASK);
 935		/* Write to the Tx channel control register */
 936		axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, cr);
 937
 938		cr = axienet_dma_in32(lp, XAXIDMA_RX_CR_OFFSET);
 939		/* Disable coalesce, delay timer and error interrupts */
 940		cr &= (~XAXIDMA_IRQ_ALL_MASK);
 941		/* Write to the Rx channel control register */
 942		axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, cr);
 943
 
 
 
 
 
 944		schedule_work(&lp->dma_err_task);
 945		axienet_dma_out32(lp, XAXIDMA_TX_SR_OFFSET, status);
 
 
 
 
 
 
 
 
 
 946	}
 947out:
 948	return IRQ_HANDLED;
 949}
 950
 951/**
 952 * axienet_rx_irq - Rx Isr.
 953 * @irq:	irq number
 954 * @_ndev:	net_device pointer
 955 *
 956 * Return: IRQ_HANDLED if device generated a RX interrupt, IRQ_NONE otherwise.
 957 *
 958 * This is the Axi DMA Rx Isr. It invokes "axienet_recv" to complete the BD
 959 * processing.
 960 */
 961static irqreturn_t axienet_rx_irq(int irq, void *_ndev)
 962{
 963	u32 cr;
 964	unsigned int status;
 965	struct net_device *ndev = _ndev;
 966	struct axienet_local *lp = netdev_priv(ndev);
 967
 968	status = axienet_dma_in32(lp, XAXIDMA_RX_SR_OFFSET);
 969	if (status & (XAXIDMA_IRQ_IOC_MASK | XAXIDMA_IRQ_DELAY_MASK)) {
 970		axienet_dma_out32(lp, XAXIDMA_RX_SR_OFFSET, status);
 971		axienet_recv(lp->ndev);
 972		goto out;
 973	}
 974	if (!(status & XAXIDMA_IRQ_ALL_MASK))
 975		return IRQ_NONE;
 976	if (status & XAXIDMA_IRQ_ERROR_MASK) {
 977		dev_err(&ndev->dev, "DMA Rx error 0x%x\n", status);
 978		dev_err(&ndev->dev, "Current BD is at: 0x%x%08x\n",
 979			(lp->rx_bd_v[lp->rx_bd_ci]).phys_msb,
 980			(lp->rx_bd_v[lp->rx_bd_ci]).phys);
 981
 982		cr = axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET);
 983		/* Disable coalesce, delay timer and error interrupts */
 984		cr &= (~XAXIDMA_IRQ_ALL_MASK);
 985		/* Finally write to the Tx channel control register */
 986		axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, cr);
 987
 988		cr = axienet_dma_in32(lp, XAXIDMA_RX_CR_OFFSET);
 989		/* Disable coalesce, delay timer and error interrupts */
 990		cr &= (~XAXIDMA_IRQ_ALL_MASK);
 991		/* write to the Rx channel control register */
 
 
 
 
 
 
 
 
 
 
 
 992		axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, cr);
 993
 994		schedule_work(&lp->dma_err_task);
 995		axienet_dma_out32(lp, XAXIDMA_RX_SR_OFFSET, status);
 996	}
 997out:
 998	return IRQ_HANDLED;
 999}
1000
1001/**
1002 * axienet_eth_irq - Ethernet core Isr.
1003 * @irq:	irq number
1004 * @_ndev:	net_device pointer
1005 *
1006 * Return: IRQ_HANDLED if device generated a core interrupt, IRQ_NONE otherwise.
1007 *
1008 * Handle miscellaneous conditions indicated by Ethernet core IRQ.
1009 */
1010static irqreturn_t axienet_eth_irq(int irq, void *_ndev)
1011{
1012	struct net_device *ndev = _ndev;
1013	struct axienet_local *lp = netdev_priv(ndev);
1014	unsigned int pending;
1015
1016	pending = axienet_ior(lp, XAE_IP_OFFSET);
1017	if (!pending)
1018		return IRQ_NONE;
1019
1020	if (pending & XAE_INT_RXFIFOOVR_MASK)
1021		ndev->stats.rx_missed_errors++;
1022
1023	if (pending & XAE_INT_RXRJECT_MASK)
1024		ndev->stats.rx_frame_errors++;
1025
1026	axienet_iow(lp, XAE_IS_OFFSET, pending);
1027	return IRQ_HANDLED;
1028}
1029
1030static void axienet_dma_err_handler(struct work_struct *work);
1031
1032/**
1033 * axienet_open - Driver open routine.
1034 * @ndev:	Pointer to net_device structure
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1035 *
1036 * Return: 0, on success.
1037 *	    non-zero error value on failure
1038 *
1039 * This is the driver open routine. It calls phylink_start to start the
1040 * PHY device.
1041 * It also allocates interrupt service routines, enables the interrupt lines
1042 * and ISR handling. Axi Ethernet core is reset through Axi DMA core. Buffer
1043 * descriptors are initialized.
1044 */
1045static int axienet_open(struct net_device *ndev)
1046{
1047	int ret;
1048	struct axienet_local *lp = netdev_priv(ndev);
 
 
1049
1050	dev_dbg(&ndev->dev, "axienet_open()\n");
 
 
 
 
1051
1052	/* Disable the MDIO interface till Axi Ethernet Reset is completed.
1053	 * When we do an Axi Ethernet reset, it resets the complete core
1054	 * including the MDIO. MDIO must be disabled before resetting
1055	 * and re-enabled afterwards.
1056	 * Hold MDIO bus lock to avoid MDIO accesses during the reset.
1057	 */
1058	mutex_lock(&lp->mii_bus->mdio_lock);
1059	axienet_mdio_disable(lp);
1060	ret = axienet_device_reset(ndev);
1061	if (ret == 0)
1062		ret = axienet_mdio_enable(lp);
1063	mutex_unlock(&lp->mii_bus->mdio_lock);
1064	if (ret < 0)
1065		return ret;
 
 
 
 
 
 
 
 
 
 
 
1066
1067	ret = phylink_of_phy_connect(lp->phylink, lp->dev->of_node, 0);
1068	if (ret) {
1069		dev_err(lp->dev, "phylink_of_phy_connect() failed: %d\n", ret);
1070		return ret;
 
 
 
 
 
 
 
 
 
1071	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1072
1073	phylink_start(lp->phylink);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1074
1075	/* Enable worker thread for Axi DMA error handling */
1076	INIT_WORK(&lp->dma_err_task, axienet_dma_err_handler);
1077
 
 
 
1078	/* Enable interrupts for Axi DMA Tx */
1079	ret = request_irq(lp->tx_irq, axienet_tx_irq, IRQF_SHARED,
1080			  ndev->name, ndev);
1081	if (ret)
1082		goto err_tx_irq;
1083	/* Enable interrupts for Axi DMA Rx */
1084	ret = request_irq(lp->rx_irq, axienet_rx_irq, IRQF_SHARED,
1085			  ndev->name, ndev);
1086	if (ret)
1087		goto err_rx_irq;
1088	/* Enable interrupts for Axi Ethernet core (if defined) */
1089	if (lp->eth_irq > 0) {
1090		ret = request_irq(lp->eth_irq, axienet_eth_irq, IRQF_SHARED,
1091				  ndev->name, ndev);
1092		if (ret)
1093			goto err_eth_irq;
1094	}
1095
1096	return 0;
1097
1098err_eth_irq:
1099	free_irq(lp->rx_irq, ndev);
1100err_rx_irq:
1101	free_irq(lp->tx_irq, ndev);
1102err_tx_irq:
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1103	phylink_stop(lp->phylink);
1104	phylink_disconnect_phy(lp->phylink);
1105	cancel_work_sync(&lp->dma_err_task);
1106	dev_err(lp->dev, "request_irq() failed\n");
1107	return ret;
1108}
1109
1110/**
1111 * axienet_stop - Driver stop routine.
1112 * @ndev:	Pointer to net_device structure
1113 *
1114 * Return: 0, on success.
1115 *
1116 * This is the driver stop routine. It calls phylink_disconnect to stop the PHY
1117 * device. It also removes the interrupt handlers and disables the interrupts.
1118 * The Axi DMA Tx/Rx BDs are released.
1119 */
1120static int axienet_stop(struct net_device *ndev)
1121{
1122	u32 cr, sr;
1123	int count;
1124	struct axienet_local *lp = netdev_priv(ndev);
 
1125
1126	dev_dbg(&ndev->dev, "axienet_close()\n");
1127
 
 
 
 
 
1128	phylink_stop(lp->phylink);
1129	phylink_disconnect_phy(lp->phylink);
1130
1131	axienet_setoptions(ndev, lp->options &
1132			   ~(XAE_OPTION_TXEN | XAE_OPTION_RXEN));
1133
1134	cr = axienet_dma_in32(lp, XAXIDMA_RX_CR_OFFSET);
1135	cr &= ~(XAXIDMA_CR_RUNSTOP_MASK | XAXIDMA_IRQ_ALL_MASK);
1136	axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, cr);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1137
1138	cr = axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET);
1139	cr &= ~(XAXIDMA_CR_RUNSTOP_MASK | XAXIDMA_IRQ_ALL_MASK);
1140	axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, cr);
1141
1142	axienet_iow(lp, XAE_IE_OFFSET, 0);
1143
1144	/* Give DMAs a chance to halt gracefully */
1145	sr = axienet_dma_in32(lp, XAXIDMA_RX_SR_OFFSET);
1146	for (count = 0; !(sr & XAXIDMA_SR_HALT_MASK) && count < 5; ++count) {
1147		msleep(20);
1148		sr = axienet_dma_in32(lp, XAXIDMA_RX_SR_OFFSET);
1149	}
1150
1151	sr = axienet_dma_in32(lp, XAXIDMA_TX_SR_OFFSET);
1152	for (count = 0; !(sr & XAXIDMA_SR_HALT_MASK) && count < 5; ++count) {
1153		msleep(20);
1154		sr = axienet_dma_in32(lp, XAXIDMA_TX_SR_OFFSET);
1155	}
1156
1157	/* Do a reset to ensure DMA is really stopped */
1158	mutex_lock(&lp->mii_bus->mdio_lock);
1159	axienet_mdio_disable(lp);
1160	__axienet_device_reset(lp);
1161	axienet_mdio_enable(lp);
1162	mutex_unlock(&lp->mii_bus->mdio_lock);
1163
1164	cancel_work_sync(&lp->dma_err_task);
1165
1166	if (lp->eth_irq > 0)
1167		free_irq(lp->eth_irq, ndev);
1168	free_irq(lp->tx_irq, ndev);
1169	free_irq(lp->rx_irq, ndev);
1170
1171	axienet_dma_bd_release(ndev);
1172	return 0;
1173}
1174
1175/**
1176 * axienet_change_mtu - Driver change mtu routine.
1177 * @ndev:	Pointer to net_device structure
1178 * @new_mtu:	New mtu value to be applied
1179 *
1180 * Return: Always returns 0 (success).
1181 *
1182 * This is the change mtu driver routine. It checks if the Axi Ethernet
1183 * hardware supports jumbo frames before changing the mtu. This can be
1184 * called only when the device is not up.
1185 */
1186static int axienet_change_mtu(struct net_device *ndev, int new_mtu)
1187{
1188	struct axienet_local *lp = netdev_priv(ndev);
1189
1190	if (netif_running(ndev))
1191		return -EBUSY;
1192
1193	if ((new_mtu + VLAN_ETH_HLEN +
1194		XAE_TRL_SIZE) > lp->rxmem)
1195		return -EINVAL;
1196
1197	ndev->mtu = new_mtu;
1198
1199	return 0;
1200}
1201
1202#ifdef CONFIG_NET_POLL_CONTROLLER
1203/**
1204 * axienet_poll_controller - Axi Ethernet poll mechanism.
1205 * @ndev:	Pointer to net_device structure
1206 *
1207 * This implements Rx/Tx ISR poll mechanisms. The interrupts are disabled prior
1208 * to polling the ISRs and are enabled back after the polling is done.
1209 */
1210static void axienet_poll_controller(struct net_device *ndev)
1211{
1212	struct axienet_local *lp = netdev_priv(ndev);
1213	disable_irq(lp->tx_irq);
1214	disable_irq(lp->rx_irq);
1215	axienet_rx_irq(lp->tx_irq, ndev);
1216	axienet_tx_irq(lp->rx_irq, ndev);
1217	enable_irq(lp->tx_irq);
1218	enable_irq(lp->rx_irq);
1219}
1220#endif
1221
1222static int axienet_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
1223{
1224	struct axienet_local *lp = netdev_priv(dev);
1225
1226	if (!netif_running(dev))
1227		return -EINVAL;
1228
1229	return phylink_mii_ioctl(lp->phylink, rq, cmd);
1230}
1231
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1232static const struct net_device_ops axienet_netdev_ops = {
1233	.ndo_open = axienet_open,
1234	.ndo_stop = axienet_stop,
1235	.ndo_start_xmit = axienet_start_xmit,
 
1236	.ndo_change_mtu	= axienet_change_mtu,
1237	.ndo_set_mac_address = netdev_set_mac_address,
1238	.ndo_validate_addr = eth_validate_addr,
1239	.ndo_do_ioctl = axienet_ioctl,
1240	.ndo_set_rx_mode = axienet_set_multicast_list,
1241#ifdef CONFIG_NET_POLL_CONTROLLER
1242	.ndo_poll_controller = axienet_poll_controller,
1243#endif
1244};
1245
 
 
 
 
 
 
 
 
 
 
 
 
1246/**
1247 * axienet_ethtools_get_drvinfo - Get various Axi Ethernet driver information.
1248 * @ndev:	Pointer to net_device structure
1249 * @ed:		Pointer to ethtool_drvinfo structure
1250 *
1251 * This implements ethtool command for getting the driver information.
1252 * Issue "ethtool -i ethX" under linux prompt to execute this function.
1253 */
1254static void axienet_ethtools_get_drvinfo(struct net_device *ndev,
1255					 struct ethtool_drvinfo *ed)
1256{
1257	strlcpy(ed->driver, DRIVER_NAME, sizeof(ed->driver));
1258	strlcpy(ed->version, DRIVER_VERSION, sizeof(ed->version));
1259}
1260
1261/**
1262 * axienet_ethtools_get_regs_len - Get the total regs length present in the
1263 *				   AxiEthernet core.
1264 * @ndev:	Pointer to net_device structure
1265 *
1266 * This implements ethtool command for getting the total register length
1267 * information.
1268 *
1269 * Return: the total regs length
1270 */
1271static int axienet_ethtools_get_regs_len(struct net_device *ndev)
1272{
1273	return sizeof(u32) * AXIENET_REGS_N;
1274}
1275
1276/**
1277 * axienet_ethtools_get_regs - Dump the contents of all registers present
1278 *			       in AxiEthernet core.
1279 * @ndev:	Pointer to net_device structure
1280 * @regs:	Pointer to ethtool_regs structure
1281 * @ret:	Void pointer used to return the contents of the registers.
1282 *
1283 * This implements ethtool command for getting the Axi Ethernet register dump.
1284 * Issue "ethtool -d ethX" to execute this function.
1285 */
1286static void axienet_ethtools_get_regs(struct net_device *ndev,
1287				      struct ethtool_regs *regs, void *ret)
1288{
1289	u32 *data = (u32 *) ret;
1290	size_t len = sizeof(u32) * AXIENET_REGS_N;
1291	struct axienet_local *lp = netdev_priv(ndev);
1292
1293	regs->version = 0;
1294	regs->len = len;
1295
1296	memset(data, 0, len);
1297	data[0] = axienet_ior(lp, XAE_RAF_OFFSET);
1298	data[1] = axienet_ior(lp, XAE_TPF_OFFSET);
1299	data[2] = axienet_ior(lp, XAE_IFGP_OFFSET);
1300	data[3] = axienet_ior(lp, XAE_IS_OFFSET);
1301	data[4] = axienet_ior(lp, XAE_IP_OFFSET);
1302	data[5] = axienet_ior(lp, XAE_IE_OFFSET);
1303	data[6] = axienet_ior(lp, XAE_TTAG_OFFSET);
1304	data[7] = axienet_ior(lp, XAE_RTAG_OFFSET);
1305	data[8] = axienet_ior(lp, XAE_UAWL_OFFSET);
1306	data[9] = axienet_ior(lp, XAE_UAWU_OFFSET);
1307	data[10] = axienet_ior(lp, XAE_TPID0_OFFSET);
1308	data[11] = axienet_ior(lp, XAE_TPID1_OFFSET);
1309	data[12] = axienet_ior(lp, XAE_PPST_OFFSET);
1310	data[13] = axienet_ior(lp, XAE_RCW0_OFFSET);
1311	data[14] = axienet_ior(lp, XAE_RCW1_OFFSET);
1312	data[15] = axienet_ior(lp, XAE_TC_OFFSET);
1313	data[16] = axienet_ior(lp, XAE_FCC_OFFSET);
1314	data[17] = axienet_ior(lp, XAE_EMMC_OFFSET);
1315	data[18] = axienet_ior(lp, XAE_PHYC_OFFSET);
1316	data[19] = axienet_ior(lp, XAE_MDIO_MC_OFFSET);
1317	data[20] = axienet_ior(lp, XAE_MDIO_MCR_OFFSET);
1318	data[21] = axienet_ior(lp, XAE_MDIO_MWD_OFFSET);
1319	data[22] = axienet_ior(lp, XAE_MDIO_MRD_OFFSET);
1320	data[27] = axienet_ior(lp, XAE_UAW0_OFFSET);
1321	data[28] = axienet_ior(lp, XAE_UAW1_OFFSET);
1322	data[29] = axienet_ior(lp, XAE_FMI_OFFSET);
1323	data[30] = axienet_ior(lp, XAE_AF0_OFFSET);
1324	data[31] = axienet_ior(lp, XAE_AF1_OFFSET);
1325	data[32] = axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET);
1326	data[33] = axienet_dma_in32(lp, XAXIDMA_TX_SR_OFFSET);
1327	data[34] = axienet_dma_in32(lp, XAXIDMA_TX_CDESC_OFFSET);
1328	data[35] = axienet_dma_in32(lp, XAXIDMA_TX_TDESC_OFFSET);
1329	data[36] = axienet_dma_in32(lp, XAXIDMA_RX_CR_OFFSET);
1330	data[37] = axienet_dma_in32(lp, XAXIDMA_RX_SR_OFFSET);
1331	data[38] = axienet_dma_in32(lp, XAXIDMA_RX_CDESC_OFFSET);
1332	data[39] = axienet_dma_in32(lp, XAXIDMA_RX_TDESC_OFFSET);
 
 
1333}
1334
1335static void axienet_ethtools_get_ringparam(struct net_device *ndev,
1336					   struct ethtool_ringparam *ering)
 
 
 
1337{
1338	struct axienet_local *lp = netdev_priv(ndev);
1339
1340	ering->rx_max_pending = RX_BD_NUM_MAX;
1341	ering->rx_mini_max_pending = 0;
1342	ering->rx_jumbo_max_pending = 0;
1343	ering->tx_max_pending = TX_BD_NUM_MAX;
1344	ering->rx_pending = lp->rx_bd_num;
1345	ering->rx_mini_pending = 0;
1346	ering->rx_jumbo_pending = 0;
1347	ering->tx_pending = lp->tx_bd_num;
1348}
1349
1350static int axienet_ethtools_set_ringparam(struct net_device *ndev,
1351					  struct ethtool_ringparam *ering)
 
 
 
1352{
1353	struct axienet_local *lp = netdev_priv(ndev);
1354
1355	if (ering->rx_pending > RX_BD_NUM_MAX ||
1356	    ering->rx_mini_pending ||
1357	    ering->rx_jumbo_pending ||
1358	    ering->rx_pending > TX_BD_NUM_MAX)
 
1359		return -EINVAL;
1360
1361	if (netif_running(ndev))
1362		return -EBUSY;
1363
1364	lp->rx_bd_num = ering->rx_pending;
1365	lp->tx_bd_num = ering->tx_pending;
1366	return 0;
1367}
1368
1369/**
1370 * axienet_ethtools_get_pauseparam - Get the pause parameter setting for
1371 *				     Tx and Rx paths.
1372 * @ndev:	Pointer to net_device structure
1373 * @epauseparm:	Pointer to ethtool_pauseparam structure.
1374 *
1375 * This implements ethtool command for getting axi ethernet pause frame
1376 * setting. Issue "ethtool -a ethX" to execute this function.
1377 */
1378static void
1379axienet_ethtools_get_pauseparam(struct net_device *ndev,
1380				struct ethtool_pauseparam *epauseparm)
1381{
1382	struct axienet_local *lp = netdev_priv(ndev);
1383
1384	phylink_ethtool_get_pauseparam(lp->phylink, epauseparm);
1385}
1386
1387/**
1388 * axienet_ethtools_set_pauseparam - Set device pause parameter(flow control)
1389 *				     settings.
1390 * @ndev:	Pointer to net_device structure
1391 * @epauseparm:Pointer to ethtool_pauseparam structure
1392 *
1393 * This implements ethtool command for enabling flow control on Rx and Tx
1394 * paths. Issue "ethtool -A ethX tx on|off" under linux prompt to execute this
1395 * function.
1396 *
1397 * Return: 0 on success, -EFAULT if device is running
1398 */
1399static int
1400axienet_ethtools_set_pauseparam(struct net_device *ndev,
1401				struct ethtool_pauseparam *epauseparm)
1402{
1403	struct axienet_local *lp = netdev_priv(ndev);
1404
1405	return phylink_ethtool_set_pauseparam(lp->phylink, epauseparm);
1406}
1407
1408/**
1409 * axienet_ethtools_get_coalesce - Get DMA interrupt coalescing count.
1410 * @ndev:	Pointer to net_device structure
1411 * @ecoalesce:	Pointer to ethtool_coalesce structure
 
 
1412 *
1413 * This implements ethtool command for getting the DMA interrupt coalescing
1414 * count on Tx and Rx paths. Issue "ethtool -c ethX" under linux prompt to
1415 * execute this function.
1416 *
1417 * Return: 0 always
1418 */
1419static int axienet_ethtools_get_coalesce(struct net_device *ndev,
1420					 struct ethtool_coalesce *ecoalesce)
 
 
 
1421{
1422	u32 regval = 0;
1423	struct axienet_local *lp = netdev_priv(ndev);
1424	regval = axienet_dma_in32(lp, XAXIDMA_RX_CR_OFFSET);
1425	ecoalesce->rx_max_coalesced_frames = (regval & XAXIDMA_COALESCE_MASK)
1426					     >> XAXIDMA_COALESCE_SHIFT;
1427	regval = axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET);
1428	ecoalesce->tx_max_coalesced_frames = (regval & XAXIDMA_COALESCE_MASK)
1429					     >> XAXIDMA_COALESCE_SHIFT;
1430	return 0;
1431}
1432
1433/**
1434 * axienet_ethtools_set_coalesce - Set DMA interrupt coalescing count.
1435 * @ndev:	Pointer to net_device structure
1436 * @ecoalesce:	Pointer to ethtool_coalesce structure
 
 
1437 *
1438 * This implements ethtool command for setting the DMA interrupt coalescing
1439 * count on Tx and Rx paths. Issue "ethtool -C ethX rx-frames 5" under linux
1440 * prompt to execute this function.
1441 *
1442 * Return: 0, on success, Non-zero error value on failure.
1443 */
1444static int axienet_ethtools_set_coalesce(struct net_device *ndev,
1445					 struct ethtool_coalesce *ecoalesce)
 
 
 
1446{
1447	struct axienet_local *lp = netdev_priv(ndev);
1448
1449	if (netif_running(ndev)) {
1450		netdev_err(ndev,
1451			   "Please stop netif before applying configuration\n");
1452		return -EFAULT;
1453	}
1454
1455	if (ecoalesce->rx_max_coalesced_frames)
1456		lp->coalesce_count_rx = ecoalesce->rx_max_coalesced_frames;
 
 
1457	if (ecoalesce->tx_max_coalesced_frames)
1458		lp->coalesce_count_tx = ecoalesce->tx_max_coalesced_frames;
 
 
1459
1460	return 0;
1461}
1462
1463static int
1464axienet_ethtools_get_link_ksettings(struct net_device *ndev,
1465				    struct ethtool_link_ksettings *cmd)
1466{
1467	struct axienet_local *lp = netdev_priv(ndev);
1468
1469	return phylink_ethtool_ksettings_get(lp->phylink, cmd);
1470}
1471
1472static int
1473axienet_ethtools_set_link_ksettings(struct net_device *ndev,
1474				    const struct ethtool_link_ksettings *cmd)
1475{
1476	struct axienet_local *lp = netdev_priv(ndev);
1477
1478	return phylink_ethtool_ksettings_set(lp->phylink, cmd);
1479}
1480
 
 
 
 
 
 
 
1481static const struct ethtool_ops axienet_ethtool_ops = {
1482	.supported_coalesce_params = ETHTOOL_COALESCE_MAX_FRAMES,
 
1483	.get_drvinfo    = axienet_ethtools_get_drvinfo,
1484	.get_regs_len   = axienet_ethtools_get_regs_len,
1485	.get_regs       = axienet_ethtools_get_regs,
1486	.get_link       = ethtool_op_get_link,
1487	.get_ringparam	= axienet_ethtools_get_ringparam,
1488	.set_ringparam	= axienet_ethtools_set_ringparam,
1489	.get_pauseparam = axienet_ethtools_get_pauseparam,
1490	.set_pauseparam = axienet_ethtools_set_pauseparam,
1491	.get_coalesce   = axienet_ethtools_get_coalesce,
1492	.set_coalesce   = axienet_ethtools_set_coalesce,
1493	.get_link_ksettings = axienet_ethtools_get_link_ksettings,
1494	.set_link_ksettings = axienet_ethtools_set_link_ksettings,
 
1495};
1496
1497static void axienet_validate(struct phylink_config *config,
1498			     unsigned long *supported,
1499			     struct phylink_link_state *state)
 
 
 
 
1500{
1501	struct net_device *ndev = to_net_dev(config->dev);
1502	struct axienet_local *lp = netdev_priv(ndev);
1503	__ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
1504
1505	/* Only support the mode we are configured for */
1506	if (state->interface != PHY_INTERFACE_MODE_NA &&
1507	    state->interface != lp->phy_mode) {
1508		netdev_warn(ndev, "Cannot use PHY mode %s, supported: %s\n",
1509			    phy_modes(state->interface),
1510			    phy_modes(lp->phy_mode));
1511		bitmap_zero(supported, __ETHTOOL_LINK_MODE_MASK_NBITS);
1512		return;
1513	}
1514
1515	phylink_set(mask, Autoneg);
1516	phylink_set_port_modes(mask);
 
1517
1518	phylink_set(mask, Asym_Pause);
1519	phylink_set(mask, Pause);
1520	phylink_set(mask, 1000baseX_Full);
1521	phylink_set(mask, 10baseT_Full);
1522	phylink_set(mask, 100baseT_Full);
1523	phylink_set(mask, 1000baseT_Full);
1524
1525	bitmap_and(supported, supported, mask,
1526		   __ETHTOOL_LINK_MODE_MASK_NBITS);
1527	bitmap_and(state->advertising, state->advertising, mask,
1528		   __ETHTOOL_LINK_MODE_MASK_NBITS);
1529}
1530
1531static void axienet_mac_pcs_get_state(struct phylink_config *config,
1532				      struct phylink_link_state *state)
 
 
1533{
1534	struct net_device *ndev = to_net_dev(config->dev);
 
1535	struct axienet_local *lp = netdev_priv(ndev);
1536	u32 emmc_reg, fcc_reg;
1537
1538	state->interface = lp->phy_mode;
 
 
 
 
 
 
 
 
 
 
1539
1540	emmc_reg = axienet_ior(lp, XAE_EMMC_OFFSET);
1541	if (emmc_reg & XAE_EMMC_LINKSPD_1000)
1542		state->speed = SPEED_1000;
1543	else if (emmc_reg & XAE_EMMC_LINKSPD_100)
1544		state->speed = SPEED_100;
1545	else
1546		state->speed = SPEED_10;
1547
1548	state->pause = 0;
1549	fcc_reg = axienet_ior(lp, XAE_FCC_OFFSET);
1550	if (fcc_reg & XAE_FCC_FCTX_MASK)
1551		state->pause |= MLO_PAUSE_TX;
1552	if (fcc_reg & XAE_FCC_FCRX_MASK)
1553		state->pause |= MLO_PAUSE_RX;
1554
1555	state->an_complete = 0;
1556	state->duplex = 1;
1557}
 
 
1558
1559static void axienet_mac_an_restart(struct phylink_config *config)
 
1560{
1561	/* Unsupported, do nothing */
 
 
 
 
 
 
 
1562}
1563
1564static void axienet_mac_config(struct phylink_config *config, unsigned int mode,
1565			       const struct phylink_link_state *state)
1566{
1567	/* nothing meaningful to do */
1568}
1569
1570static void axienet_mac_link_down(struct phylink_config *config,
1571				  unsigned int mode,
1572				  phy_interface_t interface)
1573{
1574	/* nothing meaningful to do */
1575}
1576
1577static void axienet_mac_link_up(struct phylink_config *config,
1578				struct phy_device *phy,
1579				unsigned int mode, phy_interface_t interface,
1580				int speed, int duplex,
1581				bool tx_pause, bool rx_pause)
1582{
1583	struct net_device *ndev = to_net_dev(config->dev);
1584	struct axienet_local *lp = netdev_priv(ndev);
1585	u32 emmc_reg, fcc_reg;
1586
1587	emmc_reg = axienet_ior(lp, XAE_EMMC_OFFSET);
1588	emmc_reg &= ~XAE_EMMC_LINKSPEED_MASK;
1589
1590	switch (speed) {
1591	case SPEED_1000:
1592		emmc_reg |= XAE_EMMC_LINKSPD_1000;
1593		break;
1594	case SPEED_100:
1595		emmc_reg |= XAE_EMMC_LINKSPD_100;
1596		break;
1597	case SPEED_10:
1598		emmc_reg |= XAE_EMMC_LINKSPD_10;
1599		break;
1600	default:
1601		dev_err(&ndev->dev,
1602			"Speed other than 10, 100 or 1Gbps is not supported\n");
1603		break;
1604	}
1605
1606	axienet_iow(lp, XAE_EMMC_OFFSET, emmc_reg);
1607
1608	fcc_reg = axienet_ior(lp, XAE_FCC_OFFSET);
1609	if (tx_pause)
1610		fcc_reg |= XAE_FCC_FCTX_MASK;
1611	else
1612		fcc_reg &= ~XAE_FCC_FCTX_MASK;
1613	if (rx_pause)
1614		fcc_reg |= XAE_FCC_FCRX_MASK;
1615	else
1616		fcc_reg &= ~XAE_FCC_FCRX_MASK;
1617	axienet_iow(lp, XAE_FCC_OFFSET, fcc_reg);
1618}
1619
1620static const struct phylink_mac_ops axienet_phylink_ops = {
1621	.validate = axienet_validate,
1622	.mac_pcs_get_state = axienet_mac_pcs_get_state,
1623	.mac_an_restart = axienet_mac_an_restart,
1624	.mac_config = axienet_mac_config,
1625	.mac_link_down = axienet_mac_link_down,
1626	.mac_link_up = axienet_mac_link_up,
1627};
1628
1629/**
1630 * axienet_dma_err_handler - Work queue task for Axi DMA Error
1631 * @work:	pointer to work_struct
1632 *
1633 * Resets the Axi DMA and Axi Ethernet devices, and reconfigures the
1634 * Tx/Rx BDs.
1635 */
1636static void axienet_dma_err_handler(struct work_struct *work)
1637{
 
1638	u32 axienet_status;
1639	u32 cr, i;
1640	struct axienet_local *lp = container_of(work, struct axienet_local,
1641						dma_err_task);
1642	struct net_device *ndev = lp->ndev;
1643	struct axidma_bd *cur_p;
 
 
1644
1645	axienet_setoptions(ndev, lp->options &
1646			   ~(XAE_OPTION_TXEN | XAE_OPTION_RXEN));
1647	/* Disable the MDIO interface till Axi Ethernet Reset is completed.
1648	 * When we do an Axi Ethernet reset, it resets the complete core
1649	 * including the MDIO. MDIO must be disabled before resetting
1650	 * and re-enabled afterwards.
1651	 * Hold MDIO bus lock to avoid MDIO accesses during the reset.
1652	 */
1653	mutex_lock(&lp->mii_bus->mdio_lock);
1654	axienet_mdio_disable(lp);
1655	__axienet_device_reset(lp);
1656	axienet_mdio_enable(lp);
1657	mutex_unlock(&lp->mii_bus->mdio_lock);
1658
1659	for (i = 0; i < lp->tx_bd_num; i++) {
1660		cur_p = &lp->tx_bd_v[i];
1661		if (cur_p->cntrl) {
1662			dma_addr_t addr = desc_get_phys_addr(lp, cur_p);
1663
1664			dma_unmap_single(ndev->dev.parent, addr,
1665					 (cur_p->cntrl &
1666					  XAXIDMA_BD_CTRL_LENGTH_MASK),
1667					 DMA_TO_DEVICE);
1668		}
1669		if (cur_p->skb)
1670			dev_kfree_skb_irq(cur_p->skb);
1671		cur_p->phys = 0;
1672		cur_p->phys_msb = 0;
1673		cur_p->cntrl = 0;
1674		cur_p->status = 0;
1675		cur_p->app0 = 0;
1676		cur_p->app1 = 0;
1677		cur_p->app2 = 0;
1678		cur_p->app3 = 0;
1679		cur_p->app4 = 0;
1680		cur_p->skb = NULL;
1681	}
1682
1683	for (i = 0; i < lp->rx_bd_num; i++) {
1684		cur_p = &lp->rx_bd_v[i];
1685		cur_p->status = 0;
1686		cur_p->app0 = 0;
1687		cur_p->app1 = 0;
1688		cur_p->app2 = 0;
1689		cur_p->app3 = 0;
1690		cur_p->app4 = 0;
1691	}
1692
1693	lp->tx_bd_ci = 0;
1694	lp->tx_bd_tail = 0;
1695	lp->rx_bd_ci = 0;
1696
1697	/* Start updating the Rx channel control register */
1698	cr = axienet_dma_in32(lp, XAXIDMA_RX_CR_OFFSET);
1699	/* Update the interrupt coalesce count */
1700	cr = ((cr & ~XAXIDMA_COALESCE_MASK) |
1701	      (XAXIDMA_DFT_RX_THRESHOLD << XAXIDMA_COALESCE_SHIFT));
1702	/* Update the delay timer count */
1703	cr = ((cr & ~XAXIDMA_DELAY_MASK) |
1704	      (XAXIDMA_DFT_RX_WAITBOUND << XAXIDMA_DELAY_SHIFT));
1705	/* Enable coalesce, delay timer and error interrupts */
1706	cr |= XAXIDMA_IRQ_ALL_MASK;
1707	/* Finally write to the Rx channel control register */
1708	axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, cr);
1709
1710	/* Start updating the Tx channel control register */
1711	cr = axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET);
1712	/* Update the interrupt coalesce count */
1713	cr = (((cr & ~XAXIDMA_COALESCE_MASK)) |
1714	      (XAXIDMA_DFT_TX_THRESHOLD << XAXIDMA_COALESCE_SHIFT));
1715	/* Update the delay timer count */
1716	cr = (((cr & ~XAXIDMA_DELAY_MASK)) |
1717	      (XAXIDMA_DFT_TX_WAITBOUND << XAXIDMA_DELAY_SHIFT));
1718	/* Enable coalesce, delay timer and error interrupts */
1719	cr |= XAXIDMA_IRQ_ALL_MASK;
1720	/* Finally write to the Tx channel control register */
1721	axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, cr);
1722
1723	/* Populate the tail pointer and bring the Rx Axi DMA engine out of
1724	 * halted state. This will make the Rx side ready for reception.
1725	 */
1726	axienet_dma_out_addr(lp, XAXIDMA_RX_CDESC_OFFSET, lp->rx_bd_p);
1727	cr = axienet_dma_in32(lp, XAXIDMA_RX_CR_OFFSET);
1728	axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET,
1729			  cr | XAXIDMA_CR_RUNSTOP_MASK);
1730	axienet_dma_out_addr(lp, XAXIDMA_RX_TDESC_OFFSET, lp->rx_bd_p +
1731			     (sizeof(*lp->rx_bd_v) * (lp->rx_bd_num - 1)));
1732
1733	/* Write to the RS (Run-stop) bit in the Tx channel control register.
1734	 * Tx channel is now ready to run. But only after we write to the
1735	 * tail pointer register that the Tx channel will start transmitting
1736	 */
1737	axienet_dma_out_addr(lp, XAXIDMA_TX_CDESC_OFFSET, lp->tx_bd_p);
1738	cr = axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET);
1739	axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET,
1740			  cr | XAXIDMA_CR_RUNSTOP_MASK);
1741
1742	axienet_status = axienet_ior(lp, XAE_RCW1_OFFSET);
1743	axienet_status &= ~XAE_RCW1_RX_MASK;
1744	axienet_iow(lp, XAE_RCW1_OFFSET, axienet_status);
1745
1746	axienet_status = axienet_ior(lp, XAE_IP_OFFSET);
1747	if (axienet_status & XAE_INT_RXRJECT_MASK)
1748		axienet_iow(lp, XAE_IS_OFFSET, XAE_INT_RXRJECT_MASK);
1749	axienet_iow(lp, XAE_IE_OFFSET, lp->eth_irq > 0 ?
1750		    XAE_INT_RECV_ERROR_MASK : 0);
1751	axienet_iow(lp, XAE_FCC_OFFSET, XAE_FCC_FCRX_MASK);
1752
1753	/* Sync default options with HW but leave receiver and
1754	 * transmitter disabled.
1755	 */
1756	axienet_setoptions(ndev, lp->options &
1757			   ~(XAE_OPTION_TXEN | XAE_OPTION_RXEN));
1758	axienet_set_mac_address(ndev, NULL);
1759	axienet_set_multicast_list(ndev);
1760	axienet_setoptions(ndev, lp->options);
 
 
1761}
1762
1763/**
1764 * axienet_probe - Axi Ethernet probe function.
1765 * @pdev:	Pointer to platform device structure.
1766 *
1767 * Return: 0, on success
1768 *	    Non-zero error value on failure.
1769 *
1770 * This is the probe routine for Axi Ethernet driver. This is called before
1771 * any other driver routines are invoked. It allocates and sets up the Ethernet
1772 * device. Parses through device tree and populates fields of
1773 * axienet_local. It registers the Ethernet device.
1774 */
1775static int axienet_probe(struct platform_device *pdev)
1776{
1777	int ret;
1778	struct device_node *np;
1779	struct axienet_local *lp;
1780	struct net_device *ndev;
1781	const void *mac_addr;
1782	struct resource *ethres;
 
1783	int addr_width = 32;
1784	u32 value;
1785
1786	ndev = alloc_etherdev(sizeof(*lp));
1787	if (!ndev)
1788		return -ENOMEM;
1789
1790	platform_set_drvdata(pdev, ndev);
1791
1792	SET_NETDEV_DEV(ndev, &pdev->dev);
1793	ndev->flags &= ~IFF_MULTICAST;  /* clear multicast */
1794	ndev->features = NETIF_F_SG;
1795	ndev->netdev_ops = &axienet_netdev_ops;
1796	ndev->ethtool_ops = &axienet_ethtool_ops;
1797
1798	/* MTU range: 64 - 9000 */
1799	ndev->min_mtu = 64;
1800	ndev->max_mtu = XAE_JUMBO_MTU;
1801
1802	lp = netdev_priv(ndev);
1803	lp->ndev = ndev;
1804	lp->dev = &pdev->dev;
1805	lp->options = XAE_OPTION_DEFAULTS;
1806	lp->rx_bd_num = RX_BD_NUM_DEFAULT;
1807	lp->tx_bd_num = TX_BD_NUM_DEFAULT;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1808	/* Map device registers */
1809	ethres = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1810	lp->regs = devm_ioremap_resource(&pdev->dev, ethres);
1811	if (IS_ERR(lp->regs)) {
1812		dev_err(&pdev->dev, "could not map Axi Ethernet regs.\n");
1813		ret = PTR_ERR(lp->regs);
1814		goto free_netdev;
1815	}
1816	lp->regs_start = ethres->start;
1817
1818	/* Setup checksum offload, but default to off if not specified */
1819	lp->features = 0;
1820
1821	ret = of_property_read_u32(pdev->dev.of_node, "xlnx,txcsum", &value);
1822	if (!ret) {
1823		switch (value) {
1824		case 1:
1825			lp->csum_offload_on_tx_path =
1826				XAE_FEATURE_PARTIAL_TX_CSUM;
1827			lp->features |= XAE_FEATURE_PARTIAL_TX_CSUM;
1828			/* Can checksum TCP/UDP over IPv4. */
1829			ndev->features |= NETIF_F_IP_CSUM;
1830			break;
1831		case 2:
1832			lp->csum_offload_on_tx_path =
1833				XAE_FEATURE_FULL_TX_CSUM;
1834			lp->features |= XAE_FEATURE_FULL_TX_CSUM;
1835			/* Can checksum TCP/UDP over IPv4. */
1836			ndev->features |= NETIF_F_IP_CSUM;
1837			break;
1838		default:
1839			lp->csum_offload_on_tx_path = XAE_NO_CSUM_OFFLOAD;
1840		}
1841	}
1842	ret = of_property_read_u32(pdev->dev.of_node, "xlnx,rxcsum", &value);
1843	if (!ret) {
1844		switch (value) {
1845		case 1:
1846			lp->csum_offload_on_rx_path =
1847				XAE_FEATURE_PARTIAL_RX_CSUM;
1848			lp->features |= XAE_FEATURE_PARTIAL_RX_CSUM;
1849			break;
1850		case 2:
1851			lp->csum_offload_on_rx_path =
1852				XAE_FEATURE_FULL_RX_CSUM;
1853			lp->features |= XAE_FEATURE_FULL_RX_CSUM;
1854			break;
1855		default:
1856			lp->csum_offload_on_rx_path = XAE_NO_CSUM_OFFLOAD;
1857		}
1858	}
1859	/* For supporting jumbo frames, the Axi Ethernet hardware must have
1860	 * a larger Rx/Tx Memory. Typically, the size must be large so that
1861	 * we can enable jumbo option and start supporting jumbo frames.
1862	 * Here we check for memory allocated for Rx/Tx in the hardware from
1863	 * the device-tree and accordingly set flags.
1864	 */
1865	of_property_read_u32(pdev->dev.of_node, "xlnx,rxmem", &lp->rxmem);
1866
 
 
 
1867	/* Start with the proprietary, and broken phy_type */
1868	ret = of_property_read_u32(pdev->dev.of_node, "xlnx,phy-type", &value);
1869	if (!ret) {
1870		netdev_warn(ndev, "Please upgrade your device tree binary blob to use phy-mode");
1871		switch (value) {
1872		case XAE_PHY_TYPE_MII:
1873			lp->phy_mode = PHY_INTERFACE_MODE_MII;
1874			break;
1875		case XAE_PHY_TYPE_GMII:
1876			lp->phy_mode = PHY_INTERFACE_MODE_GMII;
1877			break;
1878		case XAE_PHY_TYPE_RGMII_2_0:
1879			lp->phy_mode = PHY_INTERFACE_MODE_RGMII_ID;
1880			break;
1881		case XAE_PHY_TYPE_SGMII:
1882			lp->phy_mode = PHY_INTERFACE_MODE_SGMII;
1883			break;
1884		case XAE_PHY_TYPE_1000BASE_X:
1885			lp->phy_mode = PHY_INTERFACE_MODE_1000BASEX;
1886			break;
1887		default:
1888			ret = -EINVAL;
1889			goto free_netdev;
1890		}
1891	} else {
1892		ret = of_get_phy_mode(pdev->dev.of_node, &lp->phy_mode);
1893		if (ret)
1894			goto free_netdev;
 
 
 
 
 
 
1895	}
1896
1897	/* Find the DMA node, map the DMA registers, and decode the DMA IRQs */
1898	np = of_parse_phandle(pdev->dev.of_node, "axistream-connected", 0);
1899	if (np) {
1900		struct resource dmares;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1901
1902		ret = of_address_to_resource(np, 0, &dmares);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1903		if (ret) {
1904			dev_err(&pdev->dev,
1905				"unable to get DMA resource\n");
1906			of_node_put(np);
1907			goto free_netdev;
1908		}
1909		lp->dma_regs = devm_ioremap_resource(&pdev->dev,
1910						     &dmares);
1911		lp->rx_irq = irq_of_parse_and_map(np, 1);
1912		lp->tx_irq = irq_of_parse_and_map(np, 0);
1913		of_node_put(np);
1914		lp->eth_irq = platform_get_irq_optional(pdev, 0);
1915	} else {
1916		/* Check for these resources directly on the Ethernet node. */
1917		struct resource *res = platform_get_resource(pdev,
1918							     IORESOURCE_MEM, 1);
1919		lp->dma_regs = devm_ioremap_resource(&pdev->dev, res);
1920		lp->rx_irq = platform_get_irq(pdev, 1);
1921		lp->tx_irq = platform_get_irq(pdev, 0);
1922		lp->eth_irq = platform_get_irq_optional(pdev, 2);
1923	}
1924	if (IS_ERR(lp->dma_regs)) {
1925		dev_err(&pdev->dev, "could not map DMA regs\n");
1926		ret = PTR_ERR(lp->dma_regs);
1927		goto free_netdev;
1928	}
1929	if ((lp->rx_irq <= 0) || (lp->tx_irq <= 0)) {
1930		dev_err(&pdev->dev, "could not determine irqs\n");
1931		ret = -ENOMEM;
1932		goto free_netdev;
1933	}
1934
1935	/* Autodetect the need for 64-bit DMA pointers.
1936	 * When the IP is configured for a bus width bigger than 32 bits,
1937	 * writing the MSB registers is mandatory, even if they are all 0.
1938	 * We can detect this case by writing all 1's to one such register
1939	 * and see if that sticks: when the IP is configured for 32 bits
1940	 * only, those registers are RES0.
1941	 * Those MSB registers were introduced in IP v7.1, which we check first.
1942	 */
1943	if ((axienet_ior(lp, XAE_ID_OFFSET) >> 24) >= 0x9) {
1944		void __iomem *desc = lp->dma_regs + XAXIDMA_TX_CDESC_OFFSET + 4;
 
1945
1946		iowrite32(0x0, desc);
1947		if (ioread32(desc) == 0) {	/* sanity check */
1948			iowrite32(0xffffffff, desc);
1949			if (ioread32(desc) > 0) {
1950				lp->features |= XAE_FEATURE_DMA_64BIT;
1951				addr_width = 64;
1952				dev_info(&pdev->dev,
1953					 "autodetected 64-bit DMA range\n");
1954			}
1955			iowrite32(0x0, desc);
1956		}
1957	}
1958
1959	ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(addr_width));
1960	if (ret) {
1961		dev_err(&pdev->dev, "No suitable DMA available\n");
1962		goto free_netdev;
1963	}
1964
 
 
 
 
1965	/* Check for Ethernet core IRQ (optional) */
1966	if (lp->eth_irq <= 0)
1967		dev_info(&pdev->dev, "Ethernet core IRQ not defined\n");
1968
1969	/* Retrieve the MAC address */
1970	mac_addr = of_get_mac_address(pdev->dev.of_node);
1971	if (IS_ERR(mac_addr)) {
1972		dev_warn(&pdev->dev, "could not find MAC address property: %ld\n",
1973			 PTR_ERR(mac_addr));
1974		mac_addr = NULL;
 
 
1975	}
1976	axienet_set_mac_address(ndev, mac_addr);
1977
1978	lp->coalesce_count_rx = XAXIDMA_DFT_RX_THRESHOLD;
1979	lp->coalesce_count_tx = XAXIDMA_DFT_TX_THRESHOLD;
 
 
 
 
 
 
 
1980
1981	lp->phy_node = of_parse_phandle(pdev->dev.of_node, "phy-handle", 0);
1982	if (lp->phy_node) {
1983		lp->clk = devm_clk_get(&pdev->dev, NULL);
1984		if (IS_ERR(lp->clk)) {
1985			dev_warn(&pdev->dev, "Failed to get clock: %ld\n",
1986				 PTR_ERR(lp->clk));
1987			lp->clk = NULL;
1988		} else {
1989			ret = clk_prepare_enable(lp->clk);
1990			if (ret) {
1991				dev_err(&pdev->dev, "Unable to enable clock: %d\n",
1992					ret);
1993				goto free_netdev;
1994			}
 
 
 
 
 
 
1995		}
1996
1997		ret = axienet_mdio_setup(lp);
1998		if (ret)
1999			dev_warn(&pdev->dev,
2000				 "error registering MDIO bus: %d\n", ret);
2001	}
2002
2003	lp->phylink_config.dev = &ndev->dev;
2004	lp->phylink_config.type = PHYLINK_NETDEV;
 
 
 
 
 
 
 
 
 
 
2005
2006	lp->phylink = phylink_create(&lp->phylink_config, pdev->dev.fwnode,
2007				     lp->phy_mode,
2008				     &axienet_phylink_ops);
2009	if (IS_ERR(lp->phylink)) {
2010		ret = PTR_ERR(lp->phylink);
2011		dev_err(&pdev->dev, "phylink_create error (%i)\n", ret);
2012		goto free_netdev;
2013	}
2014
2015	ret = register_netdev(lp->ndev);
2016	if (ret) {
2017		dev_err(lp->dev, "register_netdev() error (%i)\n", ret);
2018		goto free_netdev;
2019	}
2020
2021	return 0;
2022
 
 
 
 
 
 
 
 
 
 
 
 
2023free_netdev:
2024	free_netdev(ndev);
2025
2026	return ret;
2027}
2028
2029static int axienet_remove(struct platform_device *pdev)
2030{
2031	struct net_device *ndev = platform_get_drvdata(pdev);
2032	struct axienet_local *lp = netdev_priv(ndev);
2033
2034	unregister_netdev(ndev);
2035
2036	if (lp->phylink)
2037		phylink_destroy(lp->phylink);
2038
 
 
 
2039	axienet_mdio_teardown(lp);
2040
2041	if (lp->clk)
2042		clk_disable_unprepare(lp->clk);
2043
2044	of_node_put(lp->phy_node);
2045	lp->phy_node = NULL;
2046
2047	free_netdev(ndev);
2048
2049	return 0;
2050}
2051
2052static void axienet_shutdown(struct platform_device *pdev)
2053{
2054	struct net_device *ndev = platform_get_drvdata(pdev);
2055
2056	rtnl_lock();
2057	netif_device_detach(ndev);
2058
2059	if (netif_running(ndev))
2060		dev_close(ndev);
2061
2062	rtnl_unlock();
2063}
2064
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2065static struct platform_driver axienet_driver = {
2066	.probe = axienet_probe,
2067	.remove = axienet_remove,
2068	.shutdown = axienet_shutdown,
2069	.driver = {
2070		 .name = "xilinx_axienet",
 
2071		 .of_match_table = axienet_of_match,
2072	},
2073};
2074
2075module_platform_driver(axienet_driver);
2076
2077MODULE_DESCRIPTION("Xilinx Axi Ethernet driver");
2078MODULE_AUTHOR("Xilinx");
2079MODULE_LICENSE("GPL");