Linux Audio

Check our new training course

Loading...
v6.8
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * Xilinx Axi Ethernet device driver
   4 *
   5 * Copyright (c) 2008 Nissin Systems Co., Ltd.,  Yoshio Kashiwagi
   6 * Copyright (c) 2005-2008 DLA Systems,  David H. Lynch Jr. <dhlii@dlasys.net>
   7 * Copyright (c) 2008-2009 Secret Lab Technologies Ltd.
   8 * Copyright (c) 2010 - 2011 Michal Simek <monstr@monstr.eu>
   9 * Copyright (c) 2010 - 2011 PetaLogix
  10 * Copyright (c) 2019 - 2022 Calian Advanced Technologies
  11 * Copyright (c) 2010 - 2012 Xilinx, Inc. All rights reserved.
  12 *
  13 * This is a driver for the Xilinx Axi Ethernet which is used in the Virtex6
  14 * and Spartan6.
  15 *
  16 * TODO:
  17 *  - Add Axi Fifo support.
  18 *  - Factor out Axi DMA code into separate driver.
  19 *  - Test and fix basic multicast filtering.
  20 *  - Add support for extended multicast filtering.
  21 *  - Test basic VLAN support.
  22 *  - Add support for extended VLAN support.
  23 */
  24
  25#include <linux/clk.h>
  26#include <linux/delay.h>
  27#include <linux/etherdevice.h>
  28#include <linux/module.h>
  29#include <linux/netdevice.h>
  30#include <linux/of.h>
  31#include <linux/of_mdio.h>
  32#include <linux/of_net.h>
  33#include <linux/of_irq.h>
  34#include <linux/of_address.h>
  35#include <linux/platform_device.h>
  36#include <linux/skbuff.h>
  37#include <linux/math64.h>
  38#include <linux/phy.h>
  39#include <linux/mii.h>
  40#include <linux/ethtool.h>
  41#include <linux/dmaengine.h>
  42#include <linux/dma-mapping.h>
  43#include <linux/dma/xilinx_dma.h>
  44#include <linux/circ_buf.h>
  45#include <net/netdev_queues.h>
  46
  47#include "xilinx_axienet.h"
  48
  49/* Descriptors defines for Tx and Rx DMA */
  50#define TX_BD_NUM_DEFAULT		128
  51#define RX_BD_NUM_DEFAULT		1024
  52#define TX_BD_NUM_MIN			(MAX_SKB_FRAGS + 1)
  53#define TX_BD_NUM_MAX			4096
  54#define RX_BD_NUM_MAX			4096
  55#define DMA_NUM_APP_WORDS		5
  56#define LEN_APP				4
  57#define RX_BUF_NUM_DEFAULT		128
  58
  59/* Must be shorter than length of ethtool_drvinfo.driver field to fit */
  60#define DRIVER_NAME		"xaxienet"
  61#define DRIVER_DESCRIPTION	"Xilinx Axi Ethernet driver"
  62#define DRIVER_VERSION		"1.00a"
  63
  64#define AXIENET_REGS_N		40
  65
  66static void axienet_rx_submit_desc(struct net_device *ndev);
  67
  68/* Match table for of_platform binding */
  69static const struct of_device_id axienet_of_match[] = {
  70	{ .compatible = "xlnx,axi-ethernet-1.00.a", },
  71	{ .compatible = "xlnx,axi-ethernet-1.01.a", },
  72	{ .compatible = "xlnx,axi-ethernet-2.01.a", },
  73	{},
  74};
  75
  76MODULE_DEVICE_TABLE(of, axienet_of_match);
  77
  78/* Option table for setting up Axi Ethernet hardware options */
  79static struct axienet_option axienet_options[] = {
  80	/* Turn on jumbo packet support for both Rx and Tx */
  81	{
  82		.opt = XAE_OPTION_JUMBO,
  83		.reg = XAE_TC_OFFSET,
  84		.m_or = XAE_TC_JUM_MASK,
  85	}, {
  86		.opt = XAE_OPTION_JUMBO,
  87		.reg = XAE_RCW1_OFFSET,
  88		.m_or = XAE_RCW1_JUM_MASK,
  89	}, { /* Turn on VLAN packet support for both Rx and Tx */
  90		.opt = XAE_OPTION_VLAN,
  91		.reg = XAE_TC_OFFSET,
  92		.m_or = XAE_TC_VLAN_MASK,
  93	}, {
  94		.opt = XAE_OPTION_VLAN,
  95		.reg = XAE_RCW1_OFFSET,
  96		.m_or = XAE_RCW1_VLAN_MASK,
  97	}, { /* Turn on FCS stripping on receive packets */
  98		.opt = XAE_OPTION_FCS_STRIP,
  99		.reg = XAE_RCW1_OFFSET,
 100		.m_or = XAE_RCW1_FCS_MASK,
 101	}, { /* Turn on FCS insertion on transmit packets */
 102		.opt = XAE_OPTION_FCS_INSERT,
 103		.reg = XAE_TC_OFFSET,
 104		.m_or = XAE_TC_FCS_MASK,
 105	}, { /* Turn off length/type field checking on receive packets */
 106		.opt = XAE_OPTION_LENTYPE_ERR,
 107		.reg = XAE_RCW1_OFFSET,
 108		.m_or = XAE_RCW1_LT_DIS_MASK,
 109	}, { /* Turn on Rx flow control */
 110		.opt = XAE_OPTION_FLOW_CONTROL,
 111		.reg = XAE_FCC_OFFSET,
 112		.m_or = XAE_FCC_FCRX_MASK,
 113	}, { /* Turn on Tx flow control */
 114		.opt = XAE_OPTION_FLOW_CONTROL,
 115		.reg = XAE_FCC_OFFSET,
 116		.m_or = XAE_FCC_FCTX_MASK,
 117	}, { /* Turn on promiscuous frame filtering */
 118		.opt = XAE_OPTION_PROMISC,
 119		.reg = XAE_FMI_OFFSET,
 120		.m_or = XAE_FMI_PM_MASK,
 121	}, { /* Enable transmitter */
 122		.opt = XAE_OPTION_TXEN,
 123		.reg = XAE_TC_OFFSET,
 124		.m_or = XAE_TC_TX_MASK,
 125	}, { /* Enable receiver */
 126		.opt = XAE_OPTION_RXEN,
 127		.reg = XAE_RCW1_OFFSET,
 128		.m_or = XAE_RCW1_RX_MASK,
 129	},
 130	{}
 131};
 132
 133static struct skbuf_dma_descriptor *axienet_get_rx_desc(struct axienet_local *lp, int i)
 134{
 135	return lp->rx_skb_ring[i & (RX_BUF_NUM_DEFAULT - 1)];
 136}
 137
 138static struct skbuf_dma_descriptor *axienet_get_tx_desc(struct axienet_local *lp, int i)
 139{
 140	return lp->tx_skb_ring[i & (TX_BD_NUM_MAX - 1)];
 141}
 142
 143/**
 144 * axienet_dma_in32 - Memory mapped Axi DMA register read
 145 * @lp:		Pointer to axienet local structure
 146 * @reg:	Address offset from the base address of the Axi DMA core
 147 *
 148 * Return: The contents of the Axi DMA register
 149 *
 150 * This function returns the contents of the corresponding Axi DMA register.
 151 */
 152static inline u32 axienet_dma_in32(struct axienet_local *lp, off_t reg)
 153{
 154	return ioread32(lp->dma_regs + reg);
 155}
 156
 157static void desc_set_phys_addr(struct axienet_local *lp, dma_addr_t addr,
 158			       struct axidma_bd *desc)
 159{
 160	desc->phys = lower_32_bits(addr);
 161	if (lp->features & XAE_FEATURE_DMA_64BIT)
 162		desc->phys_msb = upper_32_bits(addr);
 163}
 164
 165static dma_addr_t desc_get_phys_addr(struct axienet_local *lp,
 166				     struct axidma_bd *desc)
 167{
 168	dma_addr_t ret = desc->phys;
 169
 170	if (lp->features & XAE_FEATURE_DMA_64BIT)
 171		ret |= ((dma_addr_t)desc->phys_msb << 16) << 16;
 172
 173	return ret;
 174}
 175
 176/**
 177 * axienet_dma_bd_release - Release buffer descriptor rings
 178 * @ndev:	Pointer to the net_device structure
 179 *
 180 * This function is used to release the descriptors allocated in
 181 * axienet_dma_bd_init. axienet_dma_bd_release is called when Axi Ethernet
 182 * driver stop api is called.
 183 */
 184static void axienet_dma_bd_release(struct net_device *ndev)
 185{
 186	int i;
 187	struct axienet_local *lp = netdev_priv(ndev);
 188
 189	/* If we end up here, tx_bd_v must have been DMA allocated. */
 190	dma_free_coherent(lp->dev,
 191			  sizeof(*lp->tx_bd_v) * lp->tx_bd_num,
 192			  lp->tx_bd_v,
 193			  lp->tx_bd_p);
 194
 195	if (!lp->rx_bd_v)
 196		return;
 197
 198	for (i = 0; i < lp->rx_bd_num; i++) {
 199		dma_addr_t phys;
 200
 201		/* A NULL skb means this descriptor has not been initialised
 202		 * at all.
 203		 */
 204		if (!lp->rx_bd_v[i].skb)
 205			break;
 206
 207		dev_kfree_skb(lp->rx_bd_v[i].skb);
 208
 209		/* For each descriptor, we programmed cntrl with the (non-zero)
 210		 * descriptor size, after it had been successfully allocated.
 211		 * So a non-zero value in there means we need to unmap it.
 212		 */
 213		if (lp->rx_bd_v[i].cntrl) {
 214			phys = desc_get_phys_addr(lp, &lp->rx_bd_v[i]);
 215			dma_unmap_single(lp->dev, phys,
 216					 lp->max_frm_size, DMA_FROM_DEVICE);
 217		}
 218	}
 219
 220	dma_free_coherent(lp->dev,
 221			  sizeof(*lp->rx_bd_v) * lp->rx_bd_num,
 222			  lp->rx_bd_v,
 223			  lp->rx_bd_p);
 224}
 225
 226/**
 227 * axienet_usec_to_timer - Calculate IRQ delay timer value
 228 * @lp:		Pointer to the axienet_local structure
 229 * @coalesce_usec: Microseconds to convert into timer value
 230 */
 231static u32 axienet_usec_to_timer(struct axienet_local *lp, u32 coalesce_usec)
 232{
 233	u32 result;
 234	u64 clk_rate = 125000000; /* arbitrary guess if no clock rate set */
 235
 236	if (lp->axi_clk)
 237		clk_rate = clk_get_rate(lp->axi_clk);
 238
 239	/* 1 Timeout Interval = 125 * (clock period of SG clock) */
 240	result = DIV64_U64_ROUND_CLOSEST((u64)coalesce_usec * clk_rate,
 241					 (u64)125000000);
 242	if (result > 255)
 243		result = 255;
 244
 245	return result;
 246}
 247
 248/**
 249 * axienet_dma_start - Set up DMA registers and start DMA operation
 250 * @lp:		Pointer to the axienet_local structure
 251 */
 252static void axienet_dma_start(struct axienet_local *lp)
 253{
 254	/* Start updating the Rx channel control register */
 255	lp->rx_dma_cr = (lp->coalesce_count_rx << XAXIDMA_COALESCE_SHIFT) |
 256			XAXIDMA_IRQ_IOC_MASK | XAXIDMA_IRQ_ERROR_MASK;
 257	/* Only set interrupt delay timer if not generating an interrupt on
 258	 * the first RX packet. Otherwise leave at 0 to disable delay interrupt.
 259	 */
 260	if (lp->coalesce_count_rx > 1)
 261		lp->rx_dma_cr |= (axienet_usec_to_timer(lp, lp->coalesce_usec_rx)
 262					<< XAXIDMA_DELAY_SHIFT) |
 263				 XAXIDMA_IRQ_DELAY_MASK;
 264	axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, lp->rx_dma_cr);
 265
 266	/* Start updating the Tx channel control register */
 267	lp->tx_dma_cr = (lp->coalesce_count_tx << XAXIDMA_COALESCE_SHIFT) |
 268			XAXIDMA_IRQ_IOC_MASK | XAXIDMA_IRQ_ERROR_MASK;
 269	/* Only set interrupt delay timer if not generating an interrupt on
 270	 * the first TX packet. Otherwise leave at 0 to disable delay interrupt.
 271	 */
 272	if (lp->coalesce_count_tx > 1)
 273		lp->tx_dma_cr |= (axienet_usec_to_timer(lp, lp->coalesce_usec_tx)
 274					<< XAXIDMA_DELAY_SHIFT) |
 275				 XAXIDMA_IRQ_DELAY_MASK;
 276	axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, lp->tx_dma_cr);
 277
 278	/* Populate the tail pointer and bring the Rx Axi DMA engine out of
 279	 * halted state. This will make the Rx side ready for reception.
 280	 */
 281	axienet_dma_out_addr(lp, XAXIDMA_RX_CDESC_OFFSET, lp->rx_bd_p);
 282	lp->rx_dma_cr |= XAXIDMA_CR_RUNSTOP_MASK;
 283	axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, lp->rx_dma_cr);
 284	axienet_dma_out_addr(lp, XAXIDMA_RX_TDESC_OFFSET, lp->rx_bd_p +
 285			     (sizeof(*lp->rx_bd_v) * (lp->rx_bd_num - 1)));
 286
 287	/* Write to the RS (Run-stop) bit in the Tx channel control register.
 288	 * Tx channel is now ready to run. But only after we write to the
 289	 * tail pointer register that the Tx channel will start transmitting.
 290	 */
 291	axienet_dma_out_addr(lp, XAXIDMA_TX_CDESC_OFFSET, lp->tx_bd_p);
 292	lp->tx_dma_cr |= XAXIDMA_CR_RUNSTOP_MASK;
 293	axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, lp->tx_dma_cr);
 294}
 295
 296/**
 297 * axienet_dma_bd_init - Setup buffer descriptor rings for Axi DMA
 298 * @ndev:	Pointer to the net_device structure
 299 *
 300 * Return: 0, on success -ENOMEM, on failure
 301 *
 302 * This function is called to initialize the Rx and Tx DMA descriptor
 303 * rings. This initializes the descriptors with required default values
 304 * and is called when Axi Ethernet driver reset is called.
 305 */
 306static int axienet_dma_bd_init(struct net_device *ndev)
 307{
 308	int i;
 309	struct sk_buff *skb;
 310	struct axienet_local *lp = netdev_priv(ndev);
 311
 312	/* Reset the indexes which are used for accessing the BDs */
 313	lp->tx_bd_ci = 0;
 314	lp->tx_bd_tail = 0;
 315	lp->rx_bd_ci = 0;
 316
 317	/* Allocate the Tx and Rx buffer descriptors. */
 318	lp->tx_bd_v = dma_alloc_coherent(lp->dev,
 319					 sizeof(*lp->tx_bd_v) * lp->tx_bd_num,
 320					 &lp->tx_bd_p, GFP_KERNEL);
 321	if (!lp->tx_bd_v)
 322		return -ENOMEM;
 323
 324	lp->rx_bd_v = dma_alloc_coherent(lp->dev,
 325					 sizeof(*lp->rx_bd_v) * lp->rx_bd_num,
 326					 &lp->rx_bd_p, GFP_KERNEL);
 327	if (!lp->rx_bd_v)
 328		goto out;
 329
 330	for (i = 0; i < lp->tx_bd_num; i++) {
 331		dma_addr_t addr = lp->tx_bd_p +
 332				  sizeof(*lp->tx_bd_v) *
 333				  ((i + 1) % lp->tx_bd_num);
 334
 335		lp->tx_bd_v[i].next = lower_32_bits(addr);
 336		if (lp->features & XAE_FEATURE_DMA_64BIT)
 337			lp->tx_bd_v[i].next_msb = upper_32_bits(addr);
 338	}
 339
 340	for (i = 0; i < lp->rx_bd_num; i++) {
 341		dma_addr_t addr;
 342
 343		addr = lp->rx_bd_p + sizeof(*lp->rx_bd_v) *
 344			((i + 1) % lp->rx_bd_num);
 345		lp->rx_bd_v[i].next = lower_32_bits(addr);
 346		if (lp->features & XAE_FEATURE_DMA_64BIT)
 347			lp->rx_bd_v[i].next_msb = upper_32_bits(addr);
 348
 349		skb = netdev_alloc_skb_ip_align(ndev, lp->max_frm_size);
 350		if (!skb)
 351			goto out;
 352
 353		lp->rx_bd_v[i].skb = skb;
 354		addr = dma_map_single(lp->dev, skb->data,
 355				      lp->max_frm_size, DMA_FROM_DEVICE);
 356		if (dma_mapping_error(lp->dev, addr)) {
 357			netdev_err(ndev, "DMA mapping error\n");
 358			goto out;
 359		}
 360		desc_set_phys_addr(lp, addr, &lp->rx_bd_v[i]);
 361
 362		lp->rx_bd_v[i].cntrl = lp->max_frm_size;
 363	}
 364
 365	axienet_dma_start(lp);
 366
 367	return 0;
 368out:
 369	axienet_dma_bd_release(ndev);
 370	return -ENOMEM;
 371}
 372
 373/**
 374 * axienet_set_mac_address - Write the MAC address
 375 * @ndev:	Pointer to the net_device structure
 376 * @address:	6 byte Address to be written as MAC address
 377 *
 378 * This function is called to initialize the MAC address of the Axi Ethernet
 379 * core. It writes to the UAW0 and UAW1 registers of the core.
 380 */
 381static void axienet_set_mac_address(struct net_device *ndev,
 382				    const void *address)
 383{
 384	struct axienet_local *lp = netdev_priv(ndev);
 385
 386	if (address)
 387		eth_hw_addr_set(ndev, address);
 388	if (!is_valid_ether_addr(ndev->dev_addr))
 389		eth_hw_addr_random(ndev);
 390
 391	/* Set up unicast MAC address filter set its mac address */
 392	axienet_iow(lp, XAE_UAW0_OFFSET,
 393		    (ndev->dev_addr[0]) |
 394		    (ndev->dev_addr[1] << 8) |
 395		    (ndev->dev_addr[2] << 16) |
 396		    (ndev->dev_addr[3] << 24));
 397	axienet_iow(lp, XAE_UAW1_OFFSET,
 398		    (((axienet_ior(lp, XAE_UAW1_OFFSET)) &
 399		      ~XAE_UAW1_UNICASTADDR_MASK) |
 400		     (ndev->dev_addr[4] |
 401		     (ndev->dev_addr[5] << 8))));
 402}
 403
 404/**
 405 * netdev_set_mac_address - Write the MAC address (from outside the driver)
 406 * @ndev:	Pointer to the net_device structure
 407 * @p:		6 byte Address to be written as MAC address
 408 *
 409 * Return: 0 for all conditions. Presently, there is no failure case.
 410 *
 411 * This function is called to initialize the MAC address of the Axi Ethernet
 412 * core. It calls the core specific axienet_set_mac_address. This is the
 413 * function that goes into net_device_ops structure entry ndo_set_mac_address.
 414 */
 415static int netdev_set_mac_address(struct net_device *ndev, void *p)
 416{
 417	struct sockaddr *addr = p;
 
 418	axienet_set_mac_address(ndev, addr->sa_data);
 419	return 0;
 420}
 421
 422/**
 423 * axienet_set_multicast_list - Prepare the multicast table
 424 * @ndev:	Pointer to the net_device structure
 425 *
 426 * This function is called to initialize the multicast table during
 427 * initialization. The Axi Ethernet basic multicast support has a four-entry
 428 * multicast table which is initialized here. Additionally this function
 429 * goes into the net_device_ops structure entry ndo_set_multicast_list. This
 430 * means whenever the multicast table entries need to be updated this
 431 * function gets called.
 432 */
 433static void axienet_set_multicast_list(struct net_device *ndev)
 434{
 435	int i;
 436	u32 reg, af0reg, af1reg;
 437	struct axienet_local *lp = netdev_priv(ndev);
 438
 439	if (ndev->flags & (IFF_ALLMULTI | IFF_PROMISC) ||
 440	    netdev_mc_count(ndev) > XAE_MULTICAST_CAM_TABLE_NUM) {
 441		/* We must make the kernel realize we had to move into
 442		 * promiscuous mode. If it was a promiscuous mode request
 443		 * the flag is already set. If not we set it.
 444		 */
 445		ndev->flags |= IFF_PROMISC;
 446		reg = axienet_ior(lp, XAE_FMI_OFFSET);
 447		reg |= XAE_FMI_PM_MASK;
 
 
 
 
 
 
 
 448		axienet_iow(lp, XAE_FMI_OFFSET, reg);
 449		dev_info(&ndev->dev, "Promiscuous mode enabled.\n");
 
 
 
 
 
 450	} else if (!netdev_mc_empty(ndev)) {
 451		struct netdev_hw_addr *ha;
 452
 453		i = 0;
 454		netdev_for_each_mc_addr(ha, ndev) {
 455			if (i >= XAE_MULTICAST_CAM_TABLE_NUM)
 456				break;
 457
 458			af0reg = (ha->addr[0]);
 459			af0reg |= (ha->addr[1] << 8);
 460			af0reg |= (ha->addr[2] << 16);
 461			af0reg |= (ha->addr[3] << 24);
 462
 463			af1reg = (ha->addr[4]);
 464			af1reg |= (ha->addr[5] << 8);
 465
 466			reg = axienet_ior(lp, XAE_FMI_OFFSET) & 0xFFFFFF00;
 467			reg |= i;
 468
 469			axienet_iow(lp, XAE_FMI_OFFSET, reg);
 470			axienet_iow(lp, XAE_AF0_OFFSET, af0reg);
 471			axienet_iow(lp, XAE_AF1_OFFSET, af1reg);
 
 
 
 472			i++;
 473		}
 474	} else {
 475		reg = axienet_ior(lp, XAE_FMI_OFFSET);
 476		reg &= ~XAE_FMI_PM_MASK;
 477
 
 
 
 478		axienet_iow(lp, XAE_FMI_OFFSET, reg);
 479
 480		for (i = 0; i < XAE_MULTICAST_CAM_TABLE_NUM; i++) {
 481			reg = axienet_ior(lp, XAE_FMI_OFFSET) & 0xFFFFFF00;
 482			reg |= i;
 483
 484			axienet_iow(lp, XAE_FMI_OFFSET, reg);
 485			axienet_iow(lp, XAE_AF0_OFFSET, 0);
 486			axienet_iow(lp, XAE_AF1_OFFSET, 0);
 487		}
 488
 489		dev_info(&ndev->dev, "Promiscuous mode disabled.\n");
 490	}
 491}
 492
 493/**
 494 * axienet_setoptions - Set an Axi Ethernet option
 495 * @ndev:	Pointer to the net_device structure
 496 * @options:	Option to be enabled/disabled
 497 *
 498 * The Axi Ethernet core has multiple features which can be selectively turned
 499 * on or off. The typical options could be jumbo frame option, basic VLAN
 500 * option, promiscuous mode option etc. This function is used to set or clear
 501 * these options in the Axi Ethernet hardware. This is done through
 502 * axienet_option structure .
 503 */
 504static void axienet_setoptions(struct net_device *ndev, u32 options)
 505{
 506	int reg;
 507	struct axienet_local *lp = netdev_priv(ndev);
 508	struct axienet_option *tp = &axienet_options[0];
 509
 510	while (tp->opt) {
 511		reg = ((axienet_ior(lp, tp->reg)) & ~(tp->m_or));
 512		if (options & tp->opt)
 513			reg |= tp->m_or;
 514		axienet_iow(lp, tp->reg, reg);
 515		tp++;
 516	}
 517
 518	lp->options |= options;
 519}
 520
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 521static int __axienet_device_reset(struct axienet_local *lp)
 522{
 523	u32 value;
 524	int ret;
 525
 
 
 
 
 
 526	/* Reset Axi DMA. This would reset Axi Ethernet core as well. The reset
 527	 * process of Axi DMA takes a while to complete as all pending
 528	 * commands/transfers will be flushed or completed during this
 529	 * reset process.
 530	 * Note that even though both TX and RX have their own reset register,
 531	 * they both reset the entire DMA core, so only one needs to be used.
 532	 */
 533	axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, XAXIDMA_CR_RESET_MASK);
 534	ret = read_poll_timeout(axienet_dma_in32, value,
 535				!(value & XAXIDMA_CR_RESET_MASK),
 536				DELAY_OF_ONE_MILLISEC, 50000, false, lp,
 537				XAXIDMA_TX_CR_OFFSET);
 538	if (ret) {
 539		dev_err(lp->dev, "%s: DMA reset timeout!\n", __func__);
 540		return ret;
 541	}
 542
 543	/* Wait for PhyRstCmplt bit to be set, indicating the PHY reset has finished */
 544	ret = read_poll_timeout(axienet_ior, value,
 545				value & XAE_INT_PHYRSTCMPLT_MASK,
 546				DELAY_OF_ONE_MILLISEC, 50000, false, lp,
 547				XAE_IS_OFFSET);
 548	if (ret) {
 549		dev_err(lp->dev, "%s: timeout waiting for PhyRstCmplt\n", __func__);
 550		return ret;
 551	}
 552
 553	return 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 554}
 555
 556/**
 557 * axienet_dma_stop - Stop DMA operation
 558 * @lp:		Pointer to the axienet_local structure
 559 */
 560static void axienet_dma_stop(struct axienet_local *lp)
 561{
 562	int count;
 563	u32 cr, sr;
 564
 565	cr = axienet_dma_in32(lp, XAXIDMA_RX_CR_OFFSET);
 566	cr &= ~(XAXIDMA_CR_RUNSTOP_MASK | XAXIDMA_IRQ_ALL_MASK);
 567	axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, cr);
 568	synchronize_irq(lp->rx_irq);
 569
 570	cr = axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET);
 571	cr &= ~(XAXIDMA_CR_RUNSTOP_MASK | XAXIDMA_IRQ_ALL_MASK);
 572	axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, cr);
 573	synchronize_irq(lp->tx_irq);
 574
 575	/* Give DMAs a chance to halt gracefully */
 576	sr = axienet_dma_in32(lp, XAXIDMA_RX_SR_OFFSET);
 577	for (count = 0; !(sr & XAXIDMA_SR_HALT_MASK) && count < 5; ++count) {
 578		msleep(20);
 579		sr = axienet_dma_in32(lp, XAXIDMA_RX_SR_OFFSET);
 580	}
 581
 582	sr = axienet_dma_in32(lp, XAXIDMA_TX_SR_OFFSET);
 583	for (count = 0; !(sr & XAXIDMA_SR_HALT_MASK) && count < 5; ++count) {
 584		msleep(20);
 585		sr = axienet_dma_in32(lp, XAXIDMA_TX_SR_OFFSET);
 586	}
 587
 588	/* Do a reset to ensure DMA is really stopped */
 589	axienet_lock_mii(lp);
 590	__axienet_device_reset(lp);
 591	axienet_unlock_mii(lp);
 592}
 593
 594/**
 595 * axienet_device_reset - Reset and initialize the Axi Ethernet hardware.
 596 * @ndev:	Pointer to the net_device structure
 597 *
 598 * This function is called to reset and initialize the Axi Ethernet core. This
 599 * is typically called during initialization. It does a reset of the Axi DMA
 600 * Rx/Tx channels and initializes the Axi DMA BDs. Since Axi DMA reset lines
 601 * are connected to Axi Ethernet reset lines, this in turn resets the Axi
 602 * Ethernet core. No separate hardware reset is done for the Axi Ethernet
 603 * core.
 604 * Returns 0 on success or a negative error number otherwise.
 605 */
 606static int axienet_device_reset(struct net_device *ndev)
 607{
 608	u32 axienet_status;
 609	struct axienet_local *lp = netdev_priv(ndev);
 610	int ret;
 611
 612	lp->max_frm_size = XAE_MAX_VLAN_FRAME_SIZE;
 613	lp->options |= XAE_OPTION_VLAN;
 614	lp->options &= (~XAE_OPTION_JUMBO);
 615
 616	if ((ndev->mtu > XAE_MTU) &&
 617	    (ndev->mtu <= XAE_JUMBO_MTU)) {
 618		lp->max_frm_size = ndev->mtu + VLAN_ETH_HLEN +
 619					XAE_TRL_SIZE;
 620
 621		if (lp->max_frm_size <= lp->rxmem)
 622			lp->options |= XAE_OPTION_JUMBO;
 623	}
 624
 625	if (!lp->use_dmaengine) {
 626		ret = __axienet_device_reset(lp);
 627		if (ret)
 628			return ret;
 629
 630		ret = axienet_dma_bd_init(ndev);
 631		if (ret) {
 632			netdev_err(ndev, "%s: descriptor allocation failed\n",
 633				   __func__);
 634			return ret;
 635		}
 636	}
 637
 638	axienet_status = axienet_ior(lp, XAE_RCW1_OFFSET);
 639	axienet_status &= ~XAE_RCW1_RX_MASK;
 640	axienet_iow(lp, XAE_RCW1_OFFSET, axienet_status);
 641
 642	axienet_status = axienet_ior(lp, XAE_IP_OFFSET);
 643	if (axienet_status & XAE_INT_RXRJECT_MASK)
 644		axienet_iow(lp, XAE_IS_OFFSET, XAE_INT_RXRJECT_MASK);
 645	axienet_iow(lp, XAE_IE_OFFSET, lp->eth_irq > 0 ?
 646		    XAE_INT_RECV_ERROR_MASK : 0);
 647
 648	axienet_iow(lp, XAE_FCC_OFFSET, XAE_FCC_FCRX_MASK);
 649
 650	/* Sync default options with HW but leave receiver and
 651	 * transmitter disabled.
 652	 */
 653	axienet_setoptions(ndev, lp->options &
 654			   ~(XAE_OPTION_TXEN | XAE_OPTION_RXEN));
 655	axienet_set_mac_address(ndev, NULL);
 656	axienet_set_multicast_list(ndev);
 657	axienet_setoptions(ndev, lp->options);
 658
 659	netif_trans_update(ndev);
 660
 661	return 0;
 662}
 663
 664/**
 665 * axienet_free_tx_chain - Clean up a series of linked TX descriptors.
 666 * @lp:		Pointer to the axienet_local structure
 667 * @first_bd:	Index of first descriptor to clean up
 668 * @nr_bds:	Max number of descriptors to clean up
 669 * @force:	Whether to clean descriptors even if not complete
 670 * @sizep:	Pointer to a u32 filled with the total sum of all bytes
 671 *		in all cleaned-up descriptors. Ignored if NULL.
 672 * @budget:	NAPI budget (use 0 when not called from NAPI poll)
 673 *
 674 * Would either be called after a successful transmit operation, or after
 675 * there was an error when setting up the chain.
 676 * Returns the number of descriptors handled.
 677 */
 678static int axienet_free_tx_chain(struct axienet_local *lp, u32 first_bd,
 679				 int nr_bds, bool force, u32 *sizep, int budget)
 680{
 681	struct axidma_bd *cur_p;
 682	unsigned int status;
 
 683	dma_addr_t phys;
 684	int i;
 685
 686	for (i = 0; i < nr_bds; i++) {
 687		cur_p = &lp->tx_bd_v[(first_bd + i) % lp->tx_bd_num];
 688		status = cur_p->status;
 689
 690		/* If force is not specified, clean up only descriptors
 691		 * that have been completed by the MAC.
 692		 */
 693		if (!force && !(status & XAXIDMA_BD_STS_COMPLETE_MASK))
 694			break;
 695
 696		/* Ensure we see complete descriptor update */
 697		dma_rmb();
 698		phys = desc_get_phys_addr(lp, cur_p);
 699		dma_unmap_single(lp->dev, phys,
 700				 (cur_p->cntrl & XAXIDMA_BD_CTRL_LENGTH_MASK),
 701				 DMA_TO_DEVICE);
 702
 703		if (cur_p->skb && (status & XAXIDMA_BD_STS_COMPLETE_MASK))
 704			napi_consume_skb(cur_p->skb, budget);
 
 
 705
 706		cur_p->app0 = 0;
 707		cur_p->app1 = 0;
 708		cur_p->app2 = 0;
 709		cur_p->app4 = 0;
 710		cur_p->skb = NULL;
 711		/* ensure our transmit path and device don't prematurely see status cleared */
 712		wmb();
 713		cur_p->cntrl = 0;
 714		cur_p->status = 0;
 715
 716		if (sizep)
 717			*sizep += status & XAXIDMA_BD_STS_ACTUAL_LEN_MASK;
 718	}
 719
 720	return i;
 
 
 
 
 
 
 721}
 722
 723/**
 724 * axienet_check_tx_bd_space - Checks if a BD/group of BDs are currently busy
 725 * @lp:		Pointer to the axienet_local structure
 726 * @num_frag:	The number of BDs to check for
 727 *
 728 * Return: 0, on success
 729 *	    NETDEV_TX_BUSY, if any of the descriptors are not free
 730 *
 731 * This function is invoked before BDs are allocated and transmission starts.
 732 * This function returns 0 if a BD or group of BDs can be allocated for
 733 * transmission. If the BD or any of the BDs are not free the function
 734 * returns a busy status.
 735 */
 736static inline int axienet_check_tx_bd_space(struct axienet_local *lp,
 737					    int num_frag)
 738{
 739	struct axidma_bd *cur_p;
 740
 741	/* Ensure we see all descriptor updates from device or TX polling */
 742	rmb();
 743	cur_p = &lp->tx_bd_v[(READ_ONCE(lp->tx_bd_tail) + num_frag) %
 744			     lp->tx_bd_num];
 745	if (cur_p->cntrl)
 746		return NETDEV_TX_BUSY;
 747	return 0;
 748}
 749
 750/**
 751 * axienet_dma_tx_cb - DMA engine callback for TX channel.
 752 * @data:       Pointer to the axienet_local structure.
 753 * @result:     error reporting through dmaengine_result.
 754 * This function is called by dmaengine driver for TX channel to notify
 755 * that the transmit is done.
 756 */
 757static void axienet_dma_tx_cb(void *data, const struct dmaengine_result *result)
 758{
 759	struct skbuf_dma_descriptor *skbuf_dma;
 760	struct axienet_local *lp = data;
 761	struct netdev_queue *txq;
 762	int len;
 763
 764	skbuf_dma = axienet_get_tx_desc(lp, lp->tx_ring_tail++);
 765	len = skbuf_dma->skb->len;
 766	txq = skb_get_tx_queue(lp->ndev, skbuf_dma->skb);
 767	u64_stats_update_begin(&lp->tx_stat_sync);
 768	u64_stats_add(&lp->tx_bytes, len);
 769	u64_stats_add(&lp->tx_packets, 1);
 770	u64_stats_update_end(&lp->tx_stat_sync);
 771	dma_unmap_sg(lp->dev, skbuf_dma->sgl, skbuf_dma->sg_len, DMA_TO_DEVICE);
 772	dev_consume_skb_any(skbuf_dma->skb);
 773	netif_txq_completed_wake(txq, 1, len,
 774				 CIRC_SPACE(lp->tx_ring_head, lp->tx_ring_tail, TX_BD_NUM_MAX),
 775				 2 * MAX_SKB_FRAGS);
 776}
 777
 778/**
 779 * axienet_start_xmit_dmaengine - Starts the transmission.
 780 * @skb:        sk_buff pointer that contains data to be Txed.
 781 * @ndev:       Pointer to net_device structure.
 782 *
 783 * Return: NETDEV_TX_OK on success or any non space errors.
 784 *         NETDEV_TX_BUSY when free element in TX skb ring buffer
 785 *         is not available.
 786 *
 787 * This function is invoked to initiate transmission. The
 788 * function sets the skbs, register dma callback API and submit
 789 * the dma transaction.
 790 * Additionally if checksum offloading is supported,
 791 * it populates AXI Stream Control fields with appropriate values.
 792 */
 793static netdev_tx_t
 794axienet_start_xmit_dmaengine(struct sk_buff *skb, struct net_device *ndev)
 795{
 796	struct dma_async_tx_descriptor *dma_tx_desc = NULL;
 797	struct axienet_local *lp = netdev_priv(ndev);
 798	u32 app_metadata[DMA_NUM_APP_WORDS] = {0};
 799	struct skbuf_dma_descriptor *skbuf_dma;
 800	struct dma_device *dma_dev;
 801	struct netdev_queue *txq;
 802	u32 csum_start_off;
 803	u32 csum_index_off;
 804	int sg_len;
 805	int ret;
 806
 807	dma_dev = lp->tx_chan->device;
 808	sg_len = skb_shinfo(skb)->nr_frags + 1;
 809	if (CIRC_SPACE(lp->tx_ring_head, lp->tx_ring_tail, TX_BD_NUM_MAX) <= sg_len) {
 810		netif_stop_queue(ndev);
 811		if (net_ratelimit())
 812			netdev_warn(ndev, "TX ring unexpectedly full\n");
 813		return NETDEV_TX_BUSY;
 814	}
 815
 816	skbuf_dma = axienet_get_tx_desc(lp, lp->tx_ring_head);
 817	if (!skbuf_dma)
 818		goto xmit_error_drop_skb;
 819
 820	lp->tx_ring_head++;
 821	sg_init_table(skbuf_dma->sgl, sg_len);
 822	ret = skb_to_sgvec(skb, skbuf_dma->sgl, 0, skb->len);
 823	if (ret < 0)
 824		goto xmit_error_drop_skb;
 825
 826	ret = dma_map_sg(lp->dev, skbuf_dma->sgl, sg_len, DMA_TO_DEVICE);
 827	if (!ret)
 828		goto xmit_error_drop_skb;
 829
 830	/* Fill up app fields for checksum */
 831	if (skb->ip_summed == CHECKSUM_PARTIAL) {
 832		if (lp->features & XAE_FEATURE_FULL_TX_CSUM) {
 833			/* Tx Full Checksum Offload Enabled */
 834			app_metadata[0] |= 2;
 835		} else if (lp->features & XAE_FEATURE_PARTIAL_TX_CSUM) {
 836			csum_start_off = skb_transport_offset(skb);
 837			csum_index_off = csum_start_off + skb->csum_offset;
 838			/* Tx Partial Checksum Offload Enabled */
 839			app_metadata[0] |= 1;
 840			app_metadata[1] = (csum_start_off << 16) | csum_index_off;
 841		}
 842	} else if (skb->ip_summed == CHECKSUM_UNNECESSARY) {
 843		app_metadata[0] |= 2; /* Tx Full Checksum Offload Enabled */
 844	}
 845
 846	dma_tx_desc = dma_dev->device_prep_slave_sg(lp->tx_chan, skbuf_dma->sgl,
 847			sg_len, DMA_MEM_TO_DEV,
 848			DMA_PREP_INTERRUPT, (void *)app_metadata);
 849	if (!dma_tx_desc)
 850		goto xmit_error_unmap_sg;
 851
 852	skbuf_dma->skb = skb;
 853	skbuf_dma->sg_len = sg_len;
 854	dma_tx_desc->callback_param = lp;
 855	dma_tx_desc->callback_result = axienet_dma_tx_cb;
 856	dmaengine_submit(dma_tx_desc);
 857	dma_async_issue_pending(lp->tx_chan);
 858	txq = skb_get_tx_queue(lp->ndev, skb);
 859	netdev_tx_sent_queue(txq, skb->len);
 860	netif_txq_maybe_stop(txq, CIRC_SPACE(lp->tx_ring_head, lp->tx_ring_tail, TX_BD_NUM_MAX),
 861			     MAX_SKB_FRAGS + 1, 2 * MAX_SKB_FRAGS);
 862
 
 
 863	return NETDEV_TX_OK;
 864
 865xmit_error_unmap_sg:
 866	dma_unmap_sg(lp->dev, skbuf_dma->sgl, sg_len, DMA_TO_DEVICE);
 867xmit_error_drop_skb:
 868	dev_kfree_skb_any(skb);
 869	return NETDEV_TX_OK;
 870}
 871
 872/**
 873 * axienet_tx_poll - Invoked once a transmit is completed by the
 874 * Axi DMA Tx channel.
 875 * @napi:	Pointer to NAPI structure.
 876 * @budget:	Max number of TX packets to process.
 877 *
 878 * Return: Number of TX packets processed.
 879 *
 880 * This function is invoked from the NAPI processing to notify the completion
 881 * of transmit operation. It clears fields in the corresponding Tx BDs and
 882 * unmaps the corresponding buffer so that CPU can regain ownership of the
 883 * buffer. It finally invokes "netif_wake_queue" to restart transmission if
 884 * required.
 885 */
 886static int axienet_tx_poll(struct napi_struct *napi, int budget)
 887{
 888	struct axienet_local *lp = container_of(napi, struct axienet_local, napi_tx);
 889	struct net_device *ndev = lp->ndev;
 890	u32 size = 0;
 891	int packets;
 892
 893	packets = axienet_free_tx_chain(lp, lp->tx_bd_ci, budget, false, &size, budget);
 
 894
 895	if (packets) {
 896		lp->tx_bd_ci += packets;
 897		if (lp->tx_bd_ci >= lp->tx_bd_num)
 898			lp->tx_bd_ci %= lp->tx_bd_num;
 899
 900		u64_stats_update_begin(&lp->tx_stat_sync);
 901		u64_stats_add(&lp->tx_packets, packets);
 902		u64_stats_add(&lp->tx_bytes, size);
 903		u64_stats_update_end(&lp->tx_stat_sync);
 904
 905		/* Matches barrier in axienet_start_xmit */
 906		smp_mb();
 907
 908		if (!axienet_check_tx_bd_space(lp, MAX_SKB_FRAGS + 1))
 909			netif_wake_queue(ndev);
 910	}
 911
 912	if (packets < budget && napi_complete_done(napi, packets)) {
 913		/* Re-enable TX completion interrupts. This should
 914		 * cause an immediate interrupt if any TX packets are
 915		 * already pending.
 916		 */
 917		axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, lp->tx_dma_cr);
 918	}
 919	return packets;
 920}
 921
 922/**
 923 * axienet_start_xmit - Starts the transmission.
 924 * @skb:	sk_buff pointer that contains data to be Txed.
 925 * @ndev:	Pointer to net_device structure.
 926 *
 927 * Return: NETDEV_TX_OK, on success
 928 *	    NETDEV_TX_BUSY, if any of the descriptors are not free
 929 *
 930 * This function is invoked from upper layers to initiate transmission. The
 931 * function uses the next available free BDs and populates their fields to
 932 * start the transmission. Additionally if checksum offloading is supported,
 933 * it populates AXI Stream Control fields with appropriate values.
 934 */
 935static netdev_tx_t
 936axienet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
 937{
 938	u32 ii;
 939	u32 num_frag;
 940	u32 csum_start_off;
 941	u32 csum_index_off;
 942	skb_frag_t *frag;
 943	dma_addr_t tail_p, phys;
 944	u32 orig_tail_ptr, new_tail_ptr;
 945	struct axienet_local *lp = netdev_priv(ndev);
 946	struct axidma_bd *cur_p;
 947
 948	orig_tail_ptr = lp->tx_bd_tail;
 949	new_tail_ptr = orig_tail_ptr;
 950
 951	num_frag = skb_shinfo(skb)->nr_frags;
 952	cur_p = &lp->tx_bd_v[orig_tail_ptr];
 953
 954	if (axienet_check_tx_bd_space(lp, num_frag + 1)) {
 955		/* Should not happen as last start_xmit call should have
 956		 * checked for sufficient space and queue should only be
 957		 * woken when sufficient space is available.
 958		 */
 959		netif_stop_queue(ndev);
 960		if (net_ratelimit())
 961			netdev_warn(ndev, "TX ring unexpectedly full\n");
 962		return NETDEV_TX_BUSY;
 963	}
 964
 965	if (skb->ip_summed == CHECKSUM_PARTIAL) {
 966		if (lp->features & XAE_FEATURE_FULL_TX_CSUM) {
 967			/* Tx Full Checksum Offload Enabled */
 968			cur_p->app0 |= 2;
 969		} else if (lp->features & XAE_FEATURE_PARTIAL_TX_CSUM) {
 970			csum_start_off = skb_transport_offset(skb);
 971			csum_index_off = csum_start_off + skb->csum_offset;
 972			/* Tx Partial Checksum Offload Enabled */
 973			cur_p->app0 |= 1;
 974			cur_p->app1 = (csum_start_off << 16) | csum_index_off;
 975		}
 976	} else if (skb->ip_summed == CHECKSUM_UNNECESSARY) {
 977		cur_p->app0 |= 2; /* Tx Full Checksum Offload Enabled */
 978	}
 979
 980	phys = dma_map_single(lp->dev, skb->data,
 981			      skb_headlen(skb), DMA_TO_DEVICE);
 982	if (unlikely(dma_mapping_error(lp->dev, phys))) {
 983		if (net_ratelimit())
 984			netdev_err(ndev, "TX DMA mapping error\n");
 985		ndev->stats.tx_dropped++;
 
 986		return NETDEV_TX_OK;
 987	}
 988	desc_set_phys_addr(lp, phys, cur_p);
 989	cur_p->cntrl = skb_headlen(skb) | XAXIDMA_BD_CTRL_TXSOF_MASK;
 990
 991	for (ii = 0; ii < num_frag; ii++) {
 992		if (++new_tail_ptr >= lp->tx_bd_num)
 993			new_tail_ptr = 0;
 994		cur_p = &lp->tx_bd_v[new_tail_ptr];
 995		frag = &skb_shinfo(skb)->frags[ii];
 996		phys = dma_map_single(lp->dev,
 997				      skb_frag_address(frag),
 998				      skb_frag_size(frag),
 999				      DMA_TO_DEVICE);
1000		if (unlikely(dma_mapping_error(lp->dev, phys))) {
1001			if (net_ratelimit())
1002				netdev_err(ndev, "TX DMA mapping error\n");
1003			ndev->stats.tx_dropped++;
1004			axienet_free_tx_chain(lp, orig_tail_ptr, ii + 1,
1005					      true, NULL, 0);
 
1006			return NETDEV_TX_OK;
1007		}
1008		desc_set_phys_addr(lp, phys, cur_p);
1009		cur_p->cntrl = skb_frag_size(frag);
1010	}
1011
1012	cur_p->cntrl |= XAXIDMA_BD_CTRL_TXEOF_MASK;
1013	cur_p->skb = skb;
1014
1015	tail_p = lp->tx_bd_p + sizeof(*lp->tx_bd_v) * new_tail_ptr;
1016	if (++new_tail_ptr >= lp->tx_bd_num)
1017		new_tail_ptr = 0;
1018	WRITE_ONCE(lp->tx_bd_tail, new_tail_ptr);
1019
1020	/* Start the transfer */
1021	axienet_dma_out_addr(lp, XAXIDMA_TX_TDESC_OFFSET, tail_p);
1022
1023	/* Stop queue if next transmit may not have space */
1024	if (axienet_check_tx_bd_space(lp, MAX_SKB_FRAGS + 1)) {
1025		netif_stop_queue(ndev);
1026
1027		/* Matches barrier in axienet_tx_poll */
1028		smp_mb();
1029
1030		/* Space might have just been freed - check again */
1031		if (!axienet_check_tx_bd_space(lp, MAX_SKB_FRAGS + 1))
1032			netif_wake_queue(ndev);
1033	}
1034
1035	return NETDEV_TX_OK;
1036}
1037
1038/**
1039 * axienet_dma_rx_cb - DMA engine callback for RX channel.
1040 * @data:       Pointer to the skbuf_dma_descriptor structure.
1041 * @result:     error reporting through dmaengine_result.
1042 * This function is called by dmaengine driver for RX channel to notify
1043 * that the packet is received.
1044 */
1045static void axienet_dma_rx_cb(void *data, const struct dmaengine_result *result)
1046{
1047	struct skbuf_dma_descriptor *skbuf_dma;
1048	size_t meta_len, meta_max_len, rx_len;
1049	struct axienet_local *lp = data;
1050	struct sk_buff *skb;
1051	u32 *app_metadata;
1052
1053	skbuf_dma = axienet_get_rx_desc(lp, lp->rx_ring_tail++);
1054	skb = skbuf_dma->skb;
1055	app_metadata = dmaengine_desc_get_metadata_ptr(skbuf_dma->desc, &meta_len,
1056						       &meta_max_len);
1057	dma_unmap_single(lp->dev, skbuf_dma->dma_address, lp->max_frm_size,
1058			 DMA_FROM_DEVICE);
1059	/* TODO: Derive app word index programmatically */
1060	rx_len = (app_metadata[LEN_APP] & 0xFFFF);
1061	skb_put(skb, rx_len);
1062	skb->protocol = eth_type_trans(skb, lp->ndev);
1063	skb->ip_summed = CHECKSUM_NONE;
1064
1065	__netif_rx(skb);
1066	u64_stats_update_begin(&lp->rx_stat_sync);
1067	u64_stats_add(&lp->rx_packets, 1);
1068	u64_stats_add(&lp->rx_bytes, rx_len);
1069	u64_stats_update_end(&lp->rx_stat_sync);
1070	axienet_rx_submit_desc(lp->ndev);
1071	dma_async_issue_pending(lp->rx_chan);
1072}
1073
1074/**
1075 * axienet_rx_poll - Triggered by RX ISR to complete the BD processing.
1076 * @napi:	Pointer to NAPI structure.
1077 * @budget:	Max number of RX packets to process.
1078 *
1079 * Return: Number of RX packets processed.
1080 */
1081static int axienet_rx_poll(struct napi_struct *napi, int budget)
1082{
1083	u32 length;
1084	u32 csumstatus;
1085	u32 size = 0;
1086	int packets = 0;
1087	dma_addr_t tail_p = 0;
1088	struct axidma_bd *cur_p;
1089	struct sk_buff *skb, *new_skb;
1090	struct axienet_local *lp = container_of(napi, struct axienet_local, napi_rx);
1091
1092	cur_p = &lp->rx_bd_v[lp->rx_bd_ci];
1093
1094	while (packets < budget && (cur_p->status & XAXIDMA_BD_STS_COMPLETE_MASK)) {
1095		dma_addr_t phys;
1096
1097		/* Ensure we see complete descriptor update */
1098		dma_rmb();
1099
1100		skb = cur_p->skb;
1101		cur_p->skb = NULL;
1102
1103		/* skb could be NULL if a previous pass already received the
1104		 * packet for this slot in the ring, but failed to refill it
1105		 * with a newly allocated buffer. In this case, don't try to
1106		 * receive it again.
1107		 */
1108		if (likely(skb)) {
1109			length = cur_p->app4 & 0x0000FFFF;
1110
1111			phys = desc_get_phys_addr(lp, cur_p);
1112			dma_unmap_single(lp->dev, phys, lp->max_frm_size,
1113					 DMA_FROM_DEVICE);
1114
1115			skb_put(skb, length);
1116			skb->protocol = eth_type_trans(skb, lp->ndev);
1117			/*skb_checksum_none_assert(skb);*/
1118			skb->ip_summed = CHECKSUM_NONE;
1119
1120			/* if we're doing Rx csum offload, set it up */
1121			if (lp->features & XAE_FEATURE_FULL_RX_CSUM) {
1122				csumstatus = (cur_p->app2 &
1123					      XAE_FULL_CSUM_STATUS_MASK) >> 3;
1124				if (csumstatus == XAE_IP_TCP_CSUM_VALIDATED ||
1125				    csumstatus == XAE_IP_UDP_CSUM_VALIDATED) {
1126					skb->ip_summed = CHECKSUM_UNNECESSARY;
1127				}
1128			} else if ((lp->features & XAE_FEATURE_PARTIAL_RX_CSUM) != 0 &&
1129				   skb->protocol == htons(ETH_P_IP) &&
1130				   skb->len > 64) {
1131				skb->csum = be32_to_cpu(cur_p->app3 & 0xFFFF);
1132				skb->ip_summed = CHECKSUM_COMPLETE;
1133			}
1134
1135			napi_gro_receive(napi, skb);
1136
1137			size += length;
1138			packets++;
1139		}
1140
1141		new_skb = napi_alloc_skb(napi, lp->max_frm_size);
1142		if (!new_skb)
1143			break;
1144
1145		phys = dma_map_single(lp->dev, new_skb->data,
1146				      lp->max_frm_size,
1147				      DMA_FROM_DEVICE);
1148		if (unlikely(dma_mapping_error(lp->dev, phys))) {
1149			if (net_ratelimit())
1150				netdev_err(lp->ndev, "RX DMA mapping error\n");
1151			dev_kfree_skb(new_skb);
1152			break;
1153		}
1154		desc_set_phys_addr(lp, phys, cur_p);
1155
1156		cur_p->cntrl = lp->max_frm_size;
1157		cur_p->status = 0;
1158		cur_p->skb = new_skb;
1159
1160		/* Only update tail_p to mark this slot as usable after it has
1161		 * been successfully refilled.
1162		 */
1163		tail_p = lp->rx_bd_p + sizeof(*lp->rx_bd_v) * lp->rx_bd_ci;
1164
1165		if (++lp->rx_bd_ci >= lp->rx_bd_num)
1166			lp->rx_bd_ci = 0;
1167		cur_p = &lp->rx_bd_v[lp->rx_bd_ci];
1168	}
1169
1170	u64_stats_update_begin(&lp->rx_stat_sync);
1171	u64_stats_add(&lp->rx_packets, packets);
1172	u64_stats_add(&lp->rx_bytes, size);
1173	u64_stats_update_end(&lp->rx_stat_sync);
1174
1175	if (tail_p)
1176		axienet_dma_out_addr(lp, XAXIDMA_RX_TDESC_OFFSET, tail_p);
1177
1178	if (packets < budget && napi_complete_done(napi, packets)) {
1179		/* Re-enable RX completion interrupts. This should
1180		 * cause an immediate interrupt if any RX packets are
1181		 * already pending.
1182		 */
1183		axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, lp->rx_dma_cr);
1184	}
1185	return packets;
1186}
1187
1188/**
1189 * axienet_tx_irq - Tx Done Isr.
1190 * @irq:	irq number
1191 * @_ndev:	net_device pointer
1192 *
1193 * Return: IRQ_HANDLED if device generated a TX interrupt, IRQ_NONE otherwise.
1194 *
1195 * This is the Axi DMA Tx done Isr. It invokes NAPI polling to complete the
1196 * TX BD processing.
1197 */
1198static irqreturn_t axienet_tx_irq(int irq, void *_ndev)
1199{
1200	unsigned int status;
1201	struct net_device *ndev = _ndev;
1202	struct axienet_local *lp = netdev_priv(ndev);
1203
1204	status = axienet_dma_in32(lp, XAXIDMA_TX_SR_OFFSET);
1205
1206	if (!(status & XAXIDMA_IRQ_ALL_MASK))
1207		return IRQ_NONE;
1208
1209	axienet_dma_out32(lp, XAXIDMA_TX_SR_OFFSET, status);
1210
1211	if (unlikely(status & XAXIDMA_IRQ_ERROR_MASK)) {
1212		netdev_err(ndev, "DMA Tx error 0x%x\n", status);
1213		netdev_err(ndev, "Current BD is at: 0x%x%08x\n",
1214			   (lp->tx_bd_v[lp->tx_bd_ci]).phys_msb,
1215			   (lp->tx_bd_v[lp->tx_bd_ci]).phys);
1216		schedule_work(&lp->dma_err_task);
1217	} else {
1218		/* Disable further TX completion interrupts and schedule
1219		 * NAPI to handle the completions.
1220		 */
1221		u32 cr = lp->tx_dma_cr;
1222
1223		cr &= ~(XAXIDMA_IRQ_IOC_MASK | XAXIDMA_IRQ_DELAY_MASK);
1224		axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, cr);
1225
1226		napi_schedule(&lp->napi_tx);
 
1227	}
1228
1229	return IRQ_HANDLED;
1230}
1231
1232/**
1233 * axienet_rx_irq - Rx Isr.
1234 * @irq:	irq number
1235 * @_ndev:	net_device pointer
1236 *
1237 * Return: IRQ_HANDLED if device generated a RX interrupt, IRQ_NONE otherwise.
1238 *
1239 * This is the Axi DMA Rx Isr. It invokes NAPI polling to complete the RX BD
1240 * processing.
1241 */
1242static irqreturn_t axienet_rx_irq(int irq, void *_ndev)
1243{
1244	unsigned int status;
1245	struct net_device *ndev = _ndev;
1246	struct axienet_local *lp = netdev_priv(ndev);
1247
1248	status = axienet_dma_in32(lp, XAXIDMA_RX_SR_OFFSET);
1249
1250	if (!(status & XAXIDMA_IRQ_ALL_MASK))
1251		return IRQ_NONE;
1252
1253	axienet_dma_out32(lp, XAXIDMA_RX_SR_OFFSET, status);
1254
1255	if (unlikely(status & XAXIDMA_IRQ_ERROR_MASK)) {
1256		netdev_err(ndev, "DMA Rx error 0x%x\n", status);
1257		netdev_err(ndev, "Current BD is at: 0x%x%08x\n",
1258			   (lp->rx_bd_v[lp->rx_bd_ci]).phys_msb,
1259			   (lp->rx_bd_v[lp->rx_bd_ci]).phys);
1260		schedule_work(&lp->dma_err_task);
1261	} else {
1262		/* Disable further RX completion interrupts and schedule
1263		 * NAPI receive.
1264		 */
1265		u32 cr = lp->rx_dma_cr;
1266
1267		cr &= ~(XAXIDMA_IRQ_IOC_MASK | XAXIDMA_IRQ_DELAY_MASK);
1268		axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, cr);
1269
1270		napi_schedule(&lp->napi_rx);
 
1271	}
1272
1273	return IRQ_HANDLED;
1274}
1275
1276/**
1277 * axienet_eth_irq - Ethernet core Isr.
1278 * @irq:	irq number
1279 * @_ndev:	net_device pointer
1280 *
1281 * Return: IRQ_HANDLED if device generated a core interrupt, IRQ_NONE otherwise.
1282 *
1283 * Handle miscellaneous conditions indicated by Ethernet core IRQ.
1284 */
1285static irqreturn_t axienet_eth_irq(int irq, void *_ndev)
1286{
1287	struct net_device *ndev = _ndev;
1288	struct axienet_local *lp = netdev_priv(ndev);
1289	unsigned int pending;
1290
1291	pending = axienet_ior(lp, XAE_IP_OFFSET);
1292	if (!pending)
1293		return IRQ_NONE;
1294
1295	if (pending & XAE_INT_RXFIFOOVR_MASK)
1296		ndev->stats.rx_missed_errors++;
1297
1298	if (pending & XAE_INT_RXRJECT_MASK)
1299		ndev->stats.rx_frame_errors++;
1300
1301	axienet_iow(lp, XAE_IS_OFFSET, pending);
1302	return IRQ_HANDLED;
1303}
1304
1305static void axienet_dma_err_handler(struct work_struct *work);
1306
1307/**
1308 * axienet_rx_submit_desc - Submit the rx descriptors to dmaengine.
1309 * allocate skbuff, map the scatterlist and obtain a descriptor
1310 * and then add the callback information and submit descriptor.
1311 *
1312 * @ndev:	net_device pointer
1313 *
1314 */
1315static void axienet_rx_submit_desc(struct net_device *ndev)
1316{
1317	struct dma_async_tx_descriptor *dma_rx_desc = NULL;
1318	struct axienet_local *lp = netdev_priv(ndev);
1319	struct skbuf_dma_descriptor *skbuf_dma;
1320	struct sk_buff *skb;
1321	dma_addr_t addr;
1322
1323	skbuf_dma = axienet_get_rx_desc(lp, lp->rx_ring_head);
1324	if (!skbuf_dma)
1325		return;
1326
1327	lp->rx_ring_head++;
1328	skb = netdev_alloc_skb(ndev, lp->max_frm_size);
1329	if (!skb)
1330		return;
1331
1332	sg_init_table(skbuf_dma->sgl, 1);
1333	addr = dma_map_single(lp->dev, skb->data, lp->max_frm_size, DMA_FROM_DEVICE);
1334	if (unlikely(dma_mapping_error(lp->dev, addr))) {
1335		if (net_ratelimit())
1336			netdev_err(ndev, "DMA mapping error\n");
1337		goto rx_submit_err_free_skb;
1338	}
1339	sg_dma_address(skbuf_dma->sgl) = addr;
1340	sg_dma_len(skbuf_dma->sgl) = lp->max_frm_size;
1341	dma_rx_desc = dmaengine_prep_slave_sg(lp->rx_chan, skbuf_dma->sgl,
1342					      1, DMA_DEV_TO_MEM,
1343					      DMA_PREP_INTERRUPT);
1344	if (!dma_rx_desc)
1345		goto rx_submit_err_unmap_skb;
1346
1347	skbuf_dma->skb = skb;
1348	skbuf_dma->dma_address = sg_dma_address(skbuf_dma->sgl);
1349	skbuf_dma->desc = dma_rx_desc;
1350	dma_rx_desc->callback_param = lp;
1351	dma_rx_desc->callback_result = axienet_dma_rx_cb;
1352	dmaengine_submit(dma_rx_desc);
1353
1354	return;
1355
1356rx_submit_err_unmap_skb:
1357	dma_unmap_single(lp->dev, addr, lp->max_frm_size, DMA_FROM_DEVICE);
1358rx_submit_err_free_skb:
1359	dev_kfree_skb(skb);
1360}
1361
1362/**
1363 * axienet_init_dmaengine - init the dmaengine code.
1364 * @ndev:       Pointer to net_device structure
1365 *
1366 * Return: 0, on success.
1367 *          non-zero error value on failure
1368 *
1369 * This is the dmaengine initialization code.
1370 */
1371static int axienet_init_dmaengine(struct net_device *ndev)
1372{
1373	struct axienet_local *lp = netdev_priv(ndev);
1374	struct skbuf_dma_descriptor *skbuf_dma;
1375	int i, ret;
1376
1377	lp->tx_chan = dma_request_chan(lp->dev, "tx_chan0");
1378	if (IS_ERR(lp->tx_chan)) {
1379		dev_err(lp->dev, "No Ethernet DMA (TX) channel found\n");
1380		return PTR_ERR(lp->tx_chan);
1381	}
1382
1383	lp->rx_chan = dma_request_chan(lp->dev, "rx_chan0");
1384	if (IS_ERR(lp->rx_chan)) {
1385		ret = PTR_ERR(lp->rx_chan);
1386		dev_err(lp->dev, "No Ethernet DMA (RX) channel found\n");
1387		goto err_dma_release_tx;
1388	}
1389
1390	lp->tx_ring_tail = 0;
1391	lp->tx_ring_head = 0;
1392	lp->rx_ring_tail = 0;
1393	lp->rx_ring_head = 0;
1394	lp->tx_skb_ring = kcalloc(TX_BD_NUM_MAX, sizeof(*lp->tx_skb_ring),
1395				  GFP_KERNEL);
1396	if (!lp->tx_skb_ring) {
1397		ret = -ENOMEM;
1398		goto err_dma_release_rx;
1399	}
1400	for (i = 0; i < TX_BD_NUM_MAX; i++) {
1401		skbuf_dma = kzalloc(sizeof(*skbuf_dma), GFP_KERNEL);
1402		if (!skbuf_dma) {
1403			ret = -ENOMEM;
1404			goto err_free_tx_skb_ring;
1405		}
1406		lp->tx_skb_ring[i] = skbuf_dma;
1407	}
1408
1409	lp->rx_skb_ring = kcalloc(RX_BUF_NUM_DEFAULT, sizeof(*lp->rx_skb_ring),
1410				  GFP_KERNEL);
1411	if (!lp->rx_skb_ring) {
1412		ret = -ENOMEM;
1413		goto err_free_tx_skb_ring;
1414	}
1415	for (i = 0; i < RX_BUF_NUM_DEFAULT; i++) {
1416		skbuf_dma = kzalloc(sizeof(*skbuf_dma), GFP_KERNEL);
1417		if (!skbuf_dma) {
1418			ret = -ENOMEM;
1419			goto err_free_rx_skb_ring;
1420		}
1421		lp->rx_skb_ring[i] = skbuf_dma;
1422	}
1423	/* TODO: Instead of BD_NUM_DEFAULT use runtime support */
1424	for (i = 0; i < RX_BUF_NUM_DEFAULT; i++)
1425		axienet_rx_submit_desc(ndev);
1426	dma_async_issue_pending(lp->rx_chan);
1427
1428	return 0;
1429
1430err_free_rx_skb_ring:
1431	for (i = 0; i < RX_BUF_NUM_DEFAULT; i++)
1432		kfree(lp->rx_skb_ring[i]);
1433	kfree(lp->rx_skb_ring);
1434err_free_tx_skb_ring:
1435	for (i = 0; i < TX_BD_NUM_MAX; i++)
1436		kfree(lp->tx_skb_ring[i]);
1437	kfree(lp->tx_skb_ring);
1438err_dma_release_rx:
1439	dma_release_channel(lp->rx_chan);
1440err_dma_release_tx:
1441	dma_release_channel(lp->tx_chan);
1442	return ret;
1443}
1444
1445/**
1446 * axienet_init_legacy_dma - init the dma legacy code.
1447 * @ndev:       Pointer to net_device structure
1448 *
1449 * Return: 0, on success.
1450 *          non-zero error value on failure
1451 *
1452 * This is the dma  initialization code. It also allocates interrupt
1453 * service routines, enables the interrupt lines and ISR handling.
1454 *
1455 */
1456static int axienet_init_legacy_dma(struct net_device *ndev)
1457{
1458	int ret;
1459	struct axienet_local *lp = netdev_priv(ndev);
1460
1461	/* Enable worker thread for Axi DMA error handling */
 
1462	INIT_WORK(&lp->dma_err_task, axienet_dma_err_handler);
1463
1464	napi_enable(&lp->napi_rx);
1465	napi_enable(&lp->napi_tx);
1466
1467	/* Enable interrupts for Axi DMA Tx */
1468	ret = request_irq(lp->tx_irq, axienet_tx_irq, IRQF_SHARED,
1469			  ndev->name, ndev);
1470	if (ret)
1471		goto err_tx_irq;
1472	/* Enable interrupts for Axi DMA Rx */
1473	ret = request_irq(lp->rx_irq, axienet_rx_irq, IRQF_SHARED,
1474			  ndev->name, ndev);
1475	if (ret)
1476		goto err_rx_irq;
1477	/* Enable interrupts for Axi Ethernet core (if defined) */
1478	if (lp->eth_irq > 0) {
1479		ret = request_irq(lp->eth_irq, axienet_eth_irq, IRQF_SHARED,
1480				  ndev->name, ndev);
1481		if (ret)
1482			goto err_eth_irq;
1483	}
1484
1485	return 0;
1486
1487err_eth_irq:
1488	free_irq(lp->rx_irq, ndev);
1489err_rx_irq:
1490	free_irq(lp->tx_irq, ndev);
1491err_tx_irq:
1492	napi_disable(&lp->napi_tx);
1493	napi_disable(&lp->napi_rx);
1494	cancel_work_sync(&lp->dma_err_task);
1495	dev_err(lp->dev, "request_irq() failed\n");
1496	return ret;
1497}
1498
1499/**
1500 * axienet_open - Driver open routine.
1501 * @ndev:	Pointer to net_device structure
1502 *
1503 * Return: 0, on success.
1504 *	    non-zero error value on failure
1505 *
1506 * This is the driver open routine. It calls phylink_start to start the
1507 * PHY device.
1508 * It also allocates interrupt service routines, enables the interrupt lines
1509 * and ISR handling. Axi Ethernet core is reset through Axi DMA core. Buffer
1510 * descriptors are initialized.
1511 */
1512static int axienet_open(struct net_device *ndev)
1513{
1514	int ret;
1515	struct axienet_local *lp = netdev_priv(ndev);
1516
1517	dev_dbg(&ndev->dev, "%s\n", __func__);
1518
1519	/* When we do an Axi Ethernet reset, it resets the complete core
1520	 * including the MDIO. MDIO must be disabled before resetting.
1521	 * Hold MDIO bus lock to avoid MDIO accesses during the reset.
1522	 */
1523	axienet_lock_mii(lp);
1524	ret = axienet_device_reset(ndev);
1525	axienet_unlock_mii(lp);
1526
1527	ret = phylink_of_phy_connect(lp->phylink, lp->dev->of_node, 0);
1528	if (ret) {
1529		dev_err(lp->dev, "phylink_of_phy_connect() failed: %d\n", ret);
1530		return ret;
1531	}
1532
1533	phylink_start(lp->phylink);
1534
 
 
 
1535	if (lp->use_dmaengine) {
1536		/* Enable interrupts for Axi Ethernet core (if defined) */
1537		if (lp->eth_irq > 0) {
1538			ret = request_irq(lp->eth_irq, axienet_eth_irq, IRQF_SHARED,
1539					  ndev->name, ndev);
1540			if (ret)
1541				goto err_phy;
1542		}
1543
1544		ret = axienet_init_dmaengine(ndev);
1545		if (ret < 0)
1546			goto err_free_eth_irq;
1547	} else {
1548		ret = axienet_init_legacy_dma(ndev);
1549		if (ret)
1550			goto err_phy;
1551	}
1552
1553	return 0;
1554
1555err_free_eth_irq:
1556	if (lp->eth_irq > 0)
1557		free_irq(lp->eth_irq, ndev);
1558err_phy:
 
1559	phylink_stop(lp->phylink);
1560	phylink_disconnect_phy(lp->phylink);
1561	return ret;
1562}
1563
1564/**
1565 * axienet_stop - Driver stop routine.
1566 * @ndev:	Pointer to net_device structure
1567 *
1568 * Return: 0, on success.
1569 *
1570 * This is the driver stop routine. It calls phylink_disconnect to stop the PHY
1571 * device. It also removes the interrupt handlers and disables the interrupts.
1572 * The Axi DMA Tx/Rx BDs are released.
1573 */
1574static int axienet_stop(struct net_device *ndev)
1575{
1576	struct axienet_local *lp = netdev_priv(ndev);
1577	int i;
1578
1579	dev_dbg(&ndev->dev, "axienet_close()\n");
1580
1581	if (!lp->use_dmaengine) {
 
 
 
1582		napi_disable(&lp->napi_tx);
1583		napi_disable(&lp->napi_rx);
1584	}
1585
 
 
1586	phylink_stop(lp->phylink);
1587	phylink_disconnect_phy(lp->phylink);
1588
1589	axienet_setoptions(ndev, lp->options &
1590			   ~(XAE_OPTION_TXEN | XAE_OPTION_RXEN));
1591
1592	if (!lp->use_dmaengine) {
1593		axienet_dma_stop(lp);
1594		cancel_work_sync(&lp->dma_err_task);
1595		free_irq(lp->tx_irq, ndev);
1596		free_irq(lp->rx_irq, ndev);
1597		axienet_dma_bd_release(ndev);
1598	} else {
1599		dmaengine_terminate_sync(lp->tx_chan);
1600		dmaengine_synchronize(lp->tx_chan);
1601		dmaengine_terminate_sync(lp->rx_chan);
1602		dmaengine_synchronize(lp->rx_chan);
1603
1604		for (i = 0; i < TX_BD_NUM_MAX; i++)
1605			kfree(lp->tx_skb_ring[i]);
1606		kfree(lp->tx_skb_ring);
1607		for (i = 0; i < RX_BUF_NUM_DEFAULT; i++)
1608			kfree(lp->rx_skb_ring[i]);
1609		kfree(lp->rx_skb_ring);
1610
1611		dma_release_channel(lp->rx_chan);
1612		dma_release_channel(lp->tx_chan);
1613	}
1614
1615	axienet_iow(lp, XAE_IE_OFFSET, 0);
1616
1617	if (lp->eth_irq > 0)
1618		free_irq(lp->eth_irq, ndev);
1619	return 0;
1620}
1621
1622/**
1623 * axienet_change_mtu - Driver change mtu routine.
1624 * @ndev:	Pointer to net_device structure
1625 * @new_mtu:	New mtu value to be applied
1626 *
1627 * Return: Always returns 0 (success).
1628 *
1629 * This is the change mtu driver routine. It checks if the Axi Ethernet
1630 * hardware supports jumbo frames before changing the mtu. This can be
1631 * called only when the device is not up.
1632 */
1633static int axienet_change_mtu(struct net_device *ndev, int new_mtu)
1634{
1635	struct axienet_local *lp = netdev_priv(ndev);
1636
1637	if (netif_running(ndev))
1638		return -EBUSY;
1639
1640	if ((new_mtu + VLAN_ETH_HLEN +
1641		XAE_TRL_SIZE) > lp->rxmem)
1642		return -EINVAL;
1643
1644	ndev->mtu = new_mtu;
1645
1646	return 0;
1647}
1648
1649#ifdef CONFIG_NET_POLL_CONTROLLER
1650/**
1651 * axienet_poll_controller - Axi Ethernet poll mechanism.
1652 * @ndev:	Pointer to net_device structure
1653 *
1654 * This implements Rx/Tx ISR poll mechanisms. The interrupts are disabled prior
1655 * to polling the ISRs and are enabled back after the polling is done.
1656 */
1657static void axienet_poll_controller(struct net_device *ndev)
1658{
1659	struct axienet_local *lp = netdev_priv(ndev);
 
1660	disable_irq(lp->tx_irq);
1661	disable_irq(lp->rx_irq);
1662	axienet_rx_irq(lp->tx_irq, ndev);
1663	axienet_tx_irq(lp->rx_irq, ndev);
1664	enable_irq(lp->tx_irq);
1665	enable_irq(lp->rx_irq);
1666}
1667#endif
1668
1669static int axienet_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
1670{
1671	struct axienet_local *lp = netdev_priv(dev);
1672
1673	if (!netif_running(dev))
1674		return -EINVAL;
1675
1676	return phylink_mii_ioctl(lp->phylink, rq, cmd);
1677}
1678
1679static void
1680axienet_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
1681{
1682	struct axienet_local *lp = netdev_priv(dev);
1683	unsigned int start;
1684
1685	netdev_stats_to_stats64(stats, &dev->stats);
1686
1687	do {
1688		start = u64_stats_fetch_begin(&lp->rx_stat_sync);
1689		stats->rx_packets = u64_stats_read(&lp->rx_packets);
1690		stats->rx_bytes = u64_stats_read(&lp->rx_bytes);
1691	} while (u64_stats_fetch_retry(&lp->rx_stat_sync, start));
1692
1693	do {
1694		start = u64_stats_fetch_begin(&lp->tx_stat_sync);
1695		stats->tx_packets = u64_stats_read(&lp->tx_packets);
1696		stats->tx_bytes = u64_stats_read(&lp->tx_bytes);
1697	} while (u64_stats_fetch_retry(&lp->tx_stat_sync, start));
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1698}
1699
1700static const struct net_device_ops axienet_netdev_ops = {
1701	.ndo_open = axienet_open,
1702	.ndo_stop = axienet_stop,
1703	.ndo_start_xmit = axienet_start_xmit,
1704	.ndo_get_stats64 = axienet_get_stats64,
1705	.ndo_change_mtu	= axienet_change_mtu,
1706	.ndo_set_mac_address = netdev_set_mac_address,
1707	.ndo_validate_addr = eth_validate_addr,
1708	.ndo_eth_ioctl = axienet_ioctl,
1709	.ndo_set_rx_mode = axienet_set_multicast_list,
1710#ifdef CONFIG_NET_POLL_CONTROLLER
1711	.ndo_poll_controller = axienet_poll_controller,
1712#endif
1713};
1714
1715static const struct net_device_ops axienet_netdev_dmaengine_ops = {
1716	.ndo_open = axienet_open,
1717	.ndo_stop = axienet_stop,
1718	.ndo_start_xmit = axienet_start_xmit_dmaengine,
1719	.ndo_get_stats64 = axienet_get_stats64,
1720	.ndo_change_mtu	= axienet_change_mtu,
1721	.ndo_set_mac_address = netdev_set_mac_address,
1722	.ndo_validate_addr = eth_validate_addr,
1723	.ndo_eth_ioctl = axienet_ioctl,
1724	.ndo_set_rx_mode = axienet_set_multicast_list,
1725};
1726
1727/**
1728 * axienet_ethtools_get_drvinfo - Get various Axi Ethernet driver information.
1729 * @ndev:	Pointer to net_device structure
1730 * @ed:		Pointer to ethtool_drvinfo structure
1731 *
1732 * This implements ethtool command for getting the driver information.
1733 * Issue "ethtool -i ethX" under linux prompt to execute this function.
1734 */
1735static void axienet_ethtools_get_drvinfo(struct net_device *ndev,
1736					 struct ethtool_drvinfo *ed)
1737{
1738	strscpy(ed->driver, DRIVER_NAME, sizeof(ed->driver));
1739	strscpy(ed->version, DRIVER_VERSION, sizeof(ed->version));
1740}
1741
1742/**
1743 * axienet_ethtools_get_regs_len - Get the total regs length present in the
1744 *				   AxiEthernet core.
1745 * @ndev:	Pointer to net_device structure
1746 *
1747 * This implements ethtool command for getting the total register length
1748 * information.
1749 *
1750 * Return: the total regs length
1751 */
1752static int axienet_ethtools_get_regs_len(struct net_device *ndev)
1753{
1754	return sizeof(u32) * AXIENET_REGS_N;
1755}
1756
1757/**
1758 * axienet_ethtools_get_regs - Dump the contents of all registers present
1759 *			       in AxiEthernet core.
1760 * @ndev:	Pointer to net_device structure
1761 * @regs:	Pointer to ethtool_regs structure
1762 * @ret:	Void pointer used to return the contents of the registers.
1763 *
1764 * This implements ethtool command for getting the Axi Ethernet register dump.
1765 * Issue "ethtool -d ethX" to execute this function.
1766 */
1767static void axienet_ethtools_get_regs(struct net_device *ndev,
1768				      struct ethtool_regs *regs, void *ret)
1769{
1770	u32 *data = (u32 *)ret;
1771	size_t len = sizeof(u32) * AXIENET_REGS_N;
1772	struct axienet_local *lp = netdev_priv(ndev);
1773
1774	regs->version = 0;
1775	regs->len = len;
1776
1777	memset(data, 0, len);
1778	data[0] = axienet_ior(lp, XAE_RAF_OFFSET);
1779	data[1] = axienet_ior(lp, XAE_TPF_OFFSET);
1780	data[2] = axienet_ior(lp, XAE_IFGP_OFFSET);
1781	data[3] = axienet_ior(lp, XAE_IS_OFFSET);
1782	data[4] = axienet_ior(lp, XAE_IP_OFFSET);
1783	data[5] = axienet_ior(lp, XAE_IE_OFFSET);
1784	data[6] = axienet_ior(lp, XAE_TTAG_OFFSET);
1785	data[7] = axienet_ior(lp, XAE_RTAG_OFFSET);
1786	data[8] = axienet_ior(lp, XAE_UAWL_OFFSET);
1787	data[9] = axienet_ior(lp, XAE_UAWU_OFFSET);
1788	data[10] = axienet_ior(lp, XAE_TPID0_OFFSET);
1789	data[11] = axienet_ior(lp, XAE_TPID1_OFFSET);
1790	data[12] = axienet_ior(lp, XAE_PPST_OFFSET);
1791	data[13] = axienet_ior(lp, XAE_RCW0_OFFSET);
1792	data[14] = axienet_ior(lp, XAE_RCW1_OFFSET);
1793	data[15] = axienet_ior(lp, XAE_TC_OFFSET);
1794	data[16] = axienet_ior(lp, XAE_FCC_OFFSET);
1795	data[17] = axienet_ior(lp, XAE_EMMC_OFFSET);
1796	data[18] = axienet_ior(lp, XAE_PHYC_OFFSET);
1797	data[19] = axienet_ior(lp, XAE_MDIO_MC_OFFSET);
1798	data[20] = axienet_ior(lp, XAE_MDIO_MCR_OFFSET);
1799	data[21] = axienet_ior(lp, XAE_MDIO_MWD_OFFSET);
1800	data[22] = axienet_ior(lp, XAE_MDIO_MRD_OFFSET);
1801	data[27] = axienet_ior(lp, XAE_UAW0_OFFSET);
1802	data[28] = axienet_ior(lp, XAE_UAW1_OFFSET);
1803	data[29] = axienet_ior(lp, XAE_FMI_OFFSET);
1804	data[30] = axienet_ior(lp, XAE_AF0_OFFSET);
1805	data[31] = axienet_ior(lp, XAE_AF1_OFFSET);
1806	if (!lp->use_dmaengine) {
1807		data[32] = axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET);
1808		data[33] = axienet_dma_in32(lp, XAXIDMA_TX_SR_OFFSET);
1809		data[34] = axienet_dma_in32(lp, XAXIDMA_TX_CDESC_OFFSET);
1810		data[35] = axienet_dma_in32(lp, XAXIDMA_TX_TDESC_OFFSET);
1811		data[36] = axienet_dma_in32(lp, XAXIDMA_RX_CR_OFFSET);
1812		data[37] = axienet_dma_in32(lp, XAXIDMA_RX_SR_OFFSET);
1813		data[38] = axienet_dma_in32(lp, XAXIDMA_RX_CDESC_OFFSET);
1814		data[39] = axienet_dma_in32(lp, XAXIDMA_RX_TDESC_OFFSET);
1815	}
1816}
1817
1818static void
1819axienet_ethtools_get_ringparam(struct net_device *ndev,
1820			       struct ethtool_ringparam *ering,
1821			       struct kernel_ethtool_ringparam *kernel_ering,
1822			       struct netlink_ext_ack *extack)
1823{
1824	struct axienet_local *lp = netdev_priv(ndev);
1825
1826	ering->rx_max_pending = RX_BD_NUM_MAX;
1827	ering->rx_mini_max_pending = 0;
1828	ering->rx_jumbo_max_pending = 0;
1829	ering->tx_max_pending = TX_BD_NUM_MAX;
1830	ering->rx_pending = lp->rx_bd_num;
1831	ering->rx_mini_pending = 0;
1832	ering->rx_jumbo_pending = 0;
1833	ering->tx_pending = lp->tx_bd_num;
1834}
1835
1836static int
1837axienet_ethtools_set_ringparam(struct net_device *ndev,
1838			       struct ethtool_ringparam *ering,
1839			       struct kernel_ethtool_ringparam *kernel_ering,
1840			       struct netlink_ext_ack *extack)
1841{
1842	struct axienet_local *lp = netdev_priv(ndev);
1843
1844	if (ering->rx_pending > RX_BD_NUM_MAX ||
1845	    ering->rx_mini_pending ||
1846	    ering->rx_jumbo_pending ||
1847	    ering->tx_pending < TX_BD_NUM_MIN ||
1848	    ering->tx_pending > TX_BD_NUM_MAX)
1849		return -EINVAL;
1850
1851	if (netif_running(ndev))
1852		return -EBUSY;
1853
1854	lp->rx_bd_num = ering->rx_pending;
1855	lp->tx_bd_num = ering->tx_pending;
1856	return 0;
1857}
1858
1859/**
1860 * axienet_ethtools_get_pauseparam - Get the pause parameter setting for
1861 *				     Tx and Rx paths.
1862 * @ndev:	Pointer to net_device structure
1863 * @epauseparm:	Pointer to ethtool_pauseparam structure.
1864 *
1865 * This implements ethtool command for getting axi ethernet pause frame
1866 * setting. Issue "ethtool -a ethX" to execute this function.
1867 */
1868static void
1869axienet_ethtools_get_pauseparam(struct net_device *ndev,
1870				struct ethtool_pauseparam *epauseparm)
1871{
1872	struct axienet_local *lp = netdev_priv(ndev);
1873
1874	phylink_ethtool_get_pauseparam(lp->phylink, epauseparm);
1875}
1876
1877/**
1878 * axienet_ethtools_set_pauseparam - Set device pause parameter(flow control)
1879 *				     settings.
1880 * @ndev:	Pointer to net_device structure
1881 * @epauseparm:Pointer to ethtool_pauseparam structure
1882 *
1883 * This implements ethtool command for enabling flow control on Rx and Tx
1884 * paths. Issue "ethtool -A ethX tx on|off" under linux prompt to execute this
1885 * function.
1886 *
1887 * Return: 0 on success, -EFAULT if device is running
1888 */
1889static int
1890axienet_ethtools_set_pauseparam(struct net_device *ndev,
1891				struct ethtool_pauseparam *epauseparm)
1892{
1893	struct axienet_local *lp = netdev_priv(ndev);
1894
1895	return phylink_ethtool_set_pauseparam(lp->phylink, epauseparm);
1896}
1897
1898/**
1899 * axienet_ethtools_get_coalesce - Get DMA interrupt coalescing count.
1900 * @ndev:	Pointer to net_device structure
1901 * @ecoalesce:	Pointer to ethtool_coalesce structure
1902 * @kernel_coal: ethtool CQE mode setting structure
1903 * @extack:	extack for reporting error messages
1904 *
1905 * This implements ethtool command for getting the DMA interrupt coalescing
1906 * count on Tx and Rx paths. Issue "ethtool -c ethX" under linux prompt to
1907 * execute this function.
1908 *
1909 * Return: 0 always
1910 */
1911static int
1912axienet_ethtools_get_coalesce(struct net_device *ndev,
1913			      struct ethtool_coalesce *ecoalesce,
1914			      struct kernel_ethtool_coalesce *kernel_coal,
1915			      struct netlink_ext_ack *extack)
1916{
1917	struct axienet_local *lp = netdev_priv(ndev);
1918
1919	ecoalesce->rx_max_coalesced_frames = lp->coalesce_count_rx;
1920	ecoalesce->rx_coalesce_usecs = lp->coalesce_usec_rx;
1921	ecoalesce->tx_max_coalesced_frames = lp->coalesce_count_tx;
1922	ecoalesce->tx_coalesce_usecs = lp->coalesce_usec_tx;
1923	return 0;
1924}
1925
1926/**
1927 * axienet_ethtools_set_coalesce - Set DMA interrupt coalescing count.
1928 * @ndev:	Pointer to net_device structure
1929 * @ecoalesce:	Pointer to ethtool_coalesce structure
1930 * @kernel_coal: ethtool CQE mode setting structure
1931 * @extack:	extack for reporting error messages
1932 *
1933 * This implements ethtool command for setting the DMA interrupt coalescing
1934 * count on Tx and Rx paths. Issue "ethtool -C ethX rx-frames 5" under linux
1935 * prompt to execute this function.
1936 *
1937 * Return: 0, on success, Non-zero error value on failure.
1938 */
1939static int
1940axienet_ethtools_set_coalesce(struct net_device *ndev,
1941			      struct ethtool_coalesce *ecoalesce,
1942			      struct kernel_ethtool_coalesce *kernel_coal,
1943			      struct netlink_ext_ack *extack)
1944{
1945	struct axienet_local *lp = netdev_priv(ndev);
1946
1947	if (netif_running(ndev)) {
1948		netdev_err(ndev,
1949			   "Please stop netif before applying configuration\n");
1950		return -EFAULT;
 
 
 
 
 
 
1951	}
1952
1953	if (ecoalesce->rx_max_coalesced_frames)
1954		lp->coalesce_count_rx = ecoalesce->rx_max_coalesced_frames;
1955	if (ecoalesce->rx_coalesce_usecs)
1956		lp->coalesce_usec_rx = ecoalesce->rx_coalesce_usecs;
1957	if (ecoalesce->tx_max_coalesced_frames)
1958		lp->coalesce_count_tx = ecoalesce->tx_max_coalesced_frames;
1959	if (ecoalesce->tx_coalesce_usecs)
1960		lp->coalesce_usec_tx = ecoalesce->tx_coalesce_usecs;
1961
1962	return 0;
1963}
1964
1965static int
1966axienet_ethtools_get_link_ksettings(struct net_device *ndev,
1967				    struct ethtool_link_ksettings *cmd)
1968{
1969	struct axienet_local *lp = netdev_priv(ndev);
1970
1971	return phylink_ethtool_ksettings_get(lp->phylink, cmd);
1972}
1973
1974static int
1975axienet_ethtools_set_link_ksettings(struct net_device *ndev,
1976				    const struct ethtool_link_ksettings *cmd)
1977{
1978	struct axienet_local *lp = netdev_priv(ndev);
1979
1980	return phylink_ethtool_ksettings_set(lp->phylink, cmd);
1981}
1982
1983static int axienet_ethtools_nway_reset(struct net_device *dev)
1984{
1985	struct axienet_local *lp = netdev_priv(dev);
1986
1987	return phylink_ethtool_nway_reset(lp->phylink);
1988}
1989
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1990static const struct ethtool_ops axienet_ethtool_ops = {
1991	.supported_coalesce_params = ETHTOOL_COALESCE_MAX_FRAMES |
1992				     ETHTOOL_COALESCE_USECS,
1993	.get_drvinfo    = axienet_ethtools_get_drvinfo,
1994	.get_regs_len   = axienet_ethtools_get_regs_len,
1995	.get_regs       = axienet_ethtools_get_regs,
1996	.get_link       = ethtool_op_get_link,
1997	.get_ringparam	= axienet_ethtools_get_ringparam,
1998	.set_ringparam	= axienet_ethtools_set_ringparam,
1999	.get_pauseparam = axienet_ethtools_get_pauseparam,
2000	.set_pauseparam = axienet_ethtools_set_pauseparam,
2001	.get_coalesce   = axienet_ethtools_get_coalesce,
2002	.set_coalesce   = axienet_ethtools_set_coalesce,
2003	.get_link_ksettings = axienet_ethtools_get_link_ksettings,
2004	.set_link_ksettings = axienet_ethtools_set_link_ksettings,
2005	.nway_reset	= axienet_ethtools_nway_reset,
 
 
 
 
 
 
 
2006};
2007
2008static struct axienet_local *pcs_to_axienet_local(struct phylink_pcs *pcs)
2009{
2010	return container_of(pcs, struct axienet_local, pcs);
2011}
2012
2013static void axienet_pcs_get_state(struct phylink_pcs *pcs,
2014				  struct phylink_link_state *state)
2015{
2016	struct mdio_device *pcs_phy = pcs_to_axienet_local(pcs)->pcs_phy;
2017
2018	phylink_mii_c22_pcs_get_state(pcs_phy, state);
2019}
2020
2021static void axienet_pcs_an_restart(struct phylink_pcs *pcs)
2022{
2023	struct mdio_device *pcs_phy = pcs_to_axienet_local(pcs)->pcs_phy;
2024
2025	phylink_mii_c22_pcs_an_restart(pcs_phy);
2026}
2027
2028static int axienet_pcs_config(struct phylink_pcs *pcs, unsigned int neg_mode,
2029			      phy_interface_t interface,
2030			      const unsigned long *advertising,
2031			      bool permit_pause_to_mac)
2032{
2033	struct mdio_device *pcs_phy = pcs_to_axienet_local(pcs)->pcs_phy;
2034	struct net_device *ndev = pcs_to_axienet_local(pcs)->ndev;
2035	struct axienet_local *lp = netdev_priv(ndev);
2036	int ret;
2037
2038	if (lp->switch_x_sgmii) {
2039		ret = mdiodev_write(pcs_phy, XLNX_MII_STD_SELECT_REG,
2040				    interface == PHY_INTERFACE_MODE_SGMII ?
2041					XLNX_MII_STD_SELECT_SGMII : 0);
2042		if (ret < 0) {
2043			netdev_warn(ndev,
2044				    "Failed to switch PHY interface: %d\n",
2045				    ret);
2046			return ret;
2047		}
2048	}
2049
2050	ret = phylink_mii_c22_pcs_config(pcs_phy, interface, advertising,
2051					 neg_mode);
2052	if (ret < 0)
2053		netdev_warn(ndev, "Failed to configure PCS: %d\n", ret);
2054
2055	return ret;
2056}
2057
2058static const struct phylink_pcs_ops axienet_pcs_ops = {
2059	.pcs_get_state = axienet_pcs_get_state,
2060	.pcs_config = axienet_pcs_config,
2061	.pcs_an_restart = axienet_pcs_an_restart,
2062};
2063
2064static struct phylink_pcs *axienet_mac_select_pcs(struct phylink_config *config,
2065						  phy_interface_t interface)
2066{
2067	struct net_device *ndev = to_net_dev(config->dev);
2068	struct axienet_local *lp = netdev_priv(ndev);
2069
2070	if (interface == PHY_INTERFACE_MODE_1000BASEX ||
2071	    interface ==  PHY_INTERFACE_MODE_SGMII)
2072		return &lp->pcs;
2073
2074	return NULL;
2075}
2076
2077static void axienet_mac_config(struct phylink_config *config, unsigned int mode,
2078			       const struct phylink_link_state *state)
2079{
2080	/* nothing meaningful to do */
2081}
2082
2083static void axienet_mac_link_down(struct phylink_config *config,
2084				  unsigned int mode,
2085				  phy_interface_t interface)
2086{
2087	/* nothing meaningful to do */
2088}
2089
2090static void axienet_mac_link_up(struct phylink_config *config,
2091				struct phy_device *phy,
2092				unsigned int mode, phy_interface_t interface,
2093				int speed, int duplex,
2094				bool tx_pause, bool rx_pause)
2095{
2096	struct net_device *ndev = to_net_dev(config->dev);
2097	struct axienet_local *lp = netdev_priv(ndev);
2098	u32 emmc_reg, fcc_reg;
2099
2100	emmc_reg = axienet_ior(lp, XAE_EMMC_OFFSET);
2101	emmc_reg &= ~XAE_EMMC_LINKSPEED_MASK;
2102
2103	switch (speed) {
2104	case SPEED_1000:
2105		emmc_reg |= XAE_EMMC_LINKSPD_1000;
2106		break;
2107	case SPEED_100:
2108		emmc_reg |= XAE_EMMC_LINKSPD_100;
2109		break;
2110	case SPEED_10:
2111		emmc_reg |= XAE_EMMC_LINKSPD_10;
2112		break;
2113	default:
2114		dev_err(&ndev->dev,
2115			"Speed other than 10, 100 or 1Gbps is not supported\n");
2116		break;
2117	}
2118
2119	axienet_iow(lp, XAE_EMMC_OFFSET, emmc_reg);
2120
2121	fcc_reg = axienet_ior(lp, XAE_FCC_OFFSET);
2122	if (tx_pause)
2123		fcc_reg |= XAE_FCC_FCTX_MASK;
2124	else
2125		fcc_reg &= ~XAE_FCC_FCTX_MASK;
2126	if (rx_pause)
2127		fcc_reg |= XAE_FCC_FCRX_MASK;
2128	else
2129		fcc_reg &= ~XAE_FCC_FCRX_MASK;
2130	axienet_iow(lp, XAE_FCC_OFFSET, fcc_reg);
2131}
2132
2133static const struct phylink_mac_ops axienet_phylink_ops = {
2134	.mac_select_pcs = axienet_mac_select_pcs,
2135	.mac_config = axienet_mac_config,
2136	.mac_link_down = axienet_mac_link_down,
2137	.mac_link_up = axienet_mac_link_up,
2138};
2139
2140/**
2141 * axienet_dma_err_handler - Work queue task for Axi DMA Error
2142 * @work:	pointer to work_struct
2143 *
2144 * Resets the Axi DMA and Axi Ethernet devices, and reconfigures the
2145 * Tx/Rx BDs.
2146 */
2147static void axienet_dma_err_handler(struct work_struct *work)
2148{
2149	u32 i;
2150	u32 axienet_status;
2151	struct axidma_bd *cur_p;
2152	struct axienet_local *lp = container_of(work, struct axienet_local,
2153						dma_err_task);
2154	struct net_device *ndev = lp->ndev;
2155
 
 
 
 
2156	napi_disable(&lp->napi_tx);
2157	napi_disable(&lp->napi_rx);
2158
2159	axienet_setoptions(ndev, lp->options &
2160			   ~(XAE_OPTION_TXEN | XAE_OPTION_RXEN));
2161
2162	axienet_dma_stop(lp);
2163
2164	for (i = 0; i < lp->tx_bd_num; i++) {
2165		cur_p = &lp->tx_bd_v[i];
2166		if (cur_p->cntrl) {
2167			dma_addr_t addr = desc_get_phys_addr(lp, cur_p);
2168
2169			dma_unmap_single(lp->dev, addr,
2170					 (cur_p->cntrl &
2171					  XAXIDMA_BD_CTRL_LENGTH_MASK),
2172					 DMA_TO_DEVICE);
2173		}
2174		if (cur_p->skb)
2175			dev_kfree_skb_irq(cur_p->skb);
2176		cur_p->phys = 0;
2177		cur_p->phys_msb = 0;
2178		cur_p->cntrl = 0;
2179		cur_p->status = 0;
2180		cur_p->app0 = 0;
2181		cur_p->app1 = 0;
2182		cur_p->app2 = 0;
2183		cur_p->app3 = 0;
2184		cur_p->app4 = 0;
2185		cur_p->skb = NULL;
2186	}
2187
2188	for (i = 0; i < lp->rx_bd_num; i++) {
2189		cur_p = &lp->rx_bd_v[i];
2190		cur_p->status = 0;
2191		cur_p->app0 = 0;
2192		cur_p->app1 = 0;
2193		cur_p->app2 = 0;
2194		cur_p->app3 = 0;
2195		cur_p->app4 = 0;
2196	}
2197
2198	lp->tx_bd_ci = 0;
2199	lp->tx_bd_tail = 0;
2200	lp->rx_bd_ci = 0;
2201
2202	axienet_dma_start(lp);
2203
2204	axienet_status = axienet_ior(lp, XAE_RCW1_OFFSET);
2205	axienet_status &= ~XAE_RCW1_RX_MASK;
2206	axienet_iow(lp, XAE_RCW1_OFFSET, axienet_status);
2207
2208	axienet_status = axienet_ior(lp, XAE_IP_OFFSET);
2209	if (axienet_status & XAE_INT_RXRJECT_MASK)
2210		axienet_iow(lp, XAE_IS_OFFSET, XAE_INT_RXRJECT_MASK);
2211	axienet_iow(lp, XAE_IE_OFFSET, lp->eth_irq > 0 ?
2212		    XAE_INT_RECV_ERROR_MASK : 0);
2213	axienet_iow(lp, XAE_FCC_OFFSET, XAE_FCC_FCRX_MASK);
2214
2215	/* Sync default options with HW but leave receiver and
2216	 * transmitter disabled.
2217	 */
2218	axienet_setoptions(ndev, lp->options &
2219			   ~(XAE_OPTION_TXEN | XAE_OPTION_RXEN));
2220	axienet_set_mac_address(ndev, NULL);
2221	axienet_set_multicast_list(ndev);
2222	axienet_setoptions(ndev, lp->options);
2223	napi_enable(&lp->napi_rx);
2224	napi_enable(&lp->napi_tx);
 
2225}
2226
2227/**
2228 * axienet_probe - Axi Ethernet probe function.
2229 * @pdev:	Pointer to platform device structure.
2230 *
2231 * Return: 0, on success
2232 *	    Non-zero error value on failure.
2233 *
2234 * This is the probe routine for Axi Ethernet driver. This is called before
2235 * any other driver routines are invoked. It allocates and sets up the Ethernet
2236 * device. Parses through device tree and populates fields of
2237 * axienet_local. It registers the Ethernet device.
2238 */
2239static int axienet_probe(struct platform_device *pdev)
2240{
2241	int ret;
2242	struct device_node *np;
2243	struct axienet_local *lp;
2244	struct net_device *ndev;
2245	struct resource *ethres;
2246	u8 mac_addr[ETH_ALEN];
2247	int addr_width = 32;
2248	u32 value;
2249
2250	ndev = alloc_etherdev(sizeof(*lp));
2251	if (!ndev)
2252		return -ENOMEM;
2253
2254	platform_set_drvdata(pdev, ndev);
2255
2256	SET_NETDEV_DEV(ndev, &pdev->dev);
2257	ndev->flags &= ~IFF_MULTICAST;  /* clear multicast */
2258	ndev->features = NETIF_F_SG;
2259	ndev->ethtool_ops = &axienet_ethtool_ops;
2260
2261	/* MTU range: 64 - 9000 */
2262	ndev->min_mtu = 64;
2263	ndev->max_mtu = XAE_JUMBO_MTU;
2264
2265	lp = netdev_priv(ndev);
2266	lp->ndev = ndev;
2267	lp->dev = &pdev->dev;
2268	lp->options = XAE_OPTION_DEFAULTS;
2269	lp->rx_bd_num = RX_BD_NUM_DEFAULT;
2270	lp->tx_bd_num = TX_BD_NUM_DEFAULT;
2271
2272	u64_stats_init(&lp->rx_stat_sync);
2273	u64_stats_init(&lp->tx_stat_sync);
2274
 
 
 
 
2275	lp->axi_clk = devm_clk_get_optional(&pdev->dev, "s_axi_lite_clk");
2276	if (!lp->axi_clk) {
2277		/* For backward compatibility, if named AXI clock is not present,
2278		 * treat the first clock specified as the AXI clock.
2279		 */
2280		lp->axi_clk = devm_clk_get_optional(&pdev->dev, NULL);
2281	}
2282	if (IS_ERR(lp->axi_clk)) {
2283		ret = PTR_ERR(lp->axi_clk);
2284		goto free_netdev;
2285	}
2286	ret = clk_prepare_enable(lp->axi_clk);
2287	if (ret) {
2288		dev_err(&pdev->dev, "Unable to enable AXI clock: %d\n", ret);
2289		goto free_netdev;
2290	}
2291
2292	lp->misc_clks[0].id = "axis_clk";
2293	lp->misc_clks[1].id = "ref_clk";
2294	lp->misc_clks[2].id = "mgt_clk";
2295
2296	ret = devm_clk_bulk_get_optional(&pdev->dev, XAE_NUM_MISC_CLOCKS, lp->misc_clks);
2297	if (ret)
2298		goto cleanup_clk;
2299
2300	ret = clk_bulk_prepare_enable(XAE_NUM_MISC_CLOCKS, lp->misc_clks);
2301	if (ret)
2302		goto cleanup_clk;
2303
2304	/* Map device registers */
2305	lp->regs = devm_platform_get_and_ioremap_resource(pdev, 0, &ethres);
2306	if (IS_ERR(lp->regs)) {
2307		ret = PTR_ERR(lp->regs);
2308		goto cleanup_clk;
2309	}
2310	lp->regs_start = ethres->start;
2311
2312	/* Setup checksum offload, but default to off if not specified */
2313	lp->features = 0;
2314
 
 
 
2315	ret = of_property_read_u32(pdev->dev.of_node, "xlnx,txcsum", &value);
2316	if (!ret) {
2317		switch (value) {
2318		case 1:
2319			lp->csum_offload_on_tx_path =
2320				XAE_FEATURE_PARTIAL_TX_CSUM;
2321			lp->features |= XAE_FEATURE_PARTIAL_TX_CSUM;
2322			/* Can checksum TCP/UDP over IPv4. */
2323			ndev->features |= NETIF_F_IP_CSUM;
2324			break;
2325		case 2:
2326			lp->csum_offload_on_tx_path =
2327				XAE_FEATURE_FULL_TX_CSUM;
2328			lp->features |= XAE_FEATURE_FULL_TX_CSUM;
2329			/* Can checksum TCP/UDP over IPv4. */
2330			ndev->features |= NETIF_F_IP_CSUM;
2331			break;
2332		default:
2333			lp->csum_offload_on_tx_path = XAE_NO_CSUM_OFFLOAD;
2334		}
2335	}
2336	ret = of_property_read_u32(pdev->dev.of_node, "xlnx,rxcsum", &value);
2337	if (!ret) {
2338		switch (value) {
2339		case 1:
2340			lp->csum_offload_on_rx_path =
2341				XAE_FEATURE_PARTIAL_RX_CSUM;
2342			lp->features |= XAE_FEATURE_PARTIAL_RX_CSUM;
 
2343			break;
2344		case 2:
2345			lp->csum_offload_on_rx_path =
2346				XAE_FEATURE_FULL_RX_CSUM;
2347			lp->features |= XAE_FEATURE_FULL_RX_CSUM;
 
2348			break;
2349		default:
2350			lp->csum_offload_on_rx_path = XAE_NO_CSUM_OFFLOAD;
2351		}
2352	}
2353	/* For supporting jumbo frames, the Axi Ethernet hardware must have
2354	 * a larger Rx/Tx Memory. Typically, the size must be large so that
2355	 * we can enable jumbo option and start supporting jumbo frames.
2356	 * Here we check for memory allocated for Rx/Tx in the hardware from
2357	 * the device-tree and accordingly set flags.
2358	 */
2359	of_property_read_u32(pdev->dev.of_node, "xlnx,rxmem", &lp->rxmem);
2360
2361	lp->switch_x_sgmii = of_property_read_bool(pdev->dev.of_node,
2362						   "xlnx,switch-x-sgmii");
2363
2364	/* Start with the proprietary, and broken phy_type */
2365	ret = of_property_read_u32(pdev->dev.of_node, "xlnx,phy-type", &value);
2366	if (!ret) {
2367		netdev_warn(ndev, "Please upgrade your device tree binary blob to use phy-mode");
2368		switch (value) {
2369		case XAE_PHY_TYPE_MII:
2370			lp->phy_mode = PHY_INTERFACE_MODE_MII;
2371			break;
2372		case XAE_PHY_TYPE_GMII:
2373			lp->phy_mode = PHY_INTERFACE_MODE_GMII;
2374			break;
2375		case XAE_PHY_TYPE_RGMII_2_0:
2376			lp->phy_mode = PHY_INTERFACE_MODE_RGMII_ID;
2377			break;
2378		case XAE_PHY_TYPE_SGMII:
2379			lp->phy_mode = PHY_INTERFACE_MODE_SGMII;
2380			break;
2381		case XAE_PHY_TYPE_1000BASE_X:
2382			lp->phy_mode = PHY_INTERFACE_MODE_1000BASEX;
2383			break;
2384		default:
2385			ret = -EINVAL;
2386			goto cleanup_clk;
2387		}
2388	} else {
2389		ret = of_get_phy_mode(pdev->dev.of_node, &lp->phy_mode);
2390		if (ret)
2391			goto cleanup_clk;
2392	}
2393	if (lp->switch_x_sgmii && lp->phy_mode != PHY_INTERFACE_MODE_SGMII &&
2394	    lp->phy_mode != PHY_INTERFACE_MODE_1000BASEX) {
2395		dev_err(&pdev->dev, "xlnx,switch-x-sgmii only supported with SGMII or 1000BaseX\n");
2396		ret = -EINVAL;
2397		goto cleanup_clk;
2398	}
2399
2400	if (!of_find_property(pdev->dev.of_node, "dmas", NULL)) {
2401		/* Find the DMA node, map the DMA registers, and decode the DMA IRQs */
2402		np = of_parse_phandle(pdev->dev.of_node, "axistream-connected", 0);
2403
2404		if (np) {
2405			struct resource dmares;
2406
2407			ret = of_address_to_resource(np, 0, &dmares);
2408			if (ret) {
2409				dev_err(&pdev->dev,
2410					"unable to get DMA resource\n");
2411				of_node_put(np);
2412				goto cleanup_clk;
2413			}
2414			lp->dma_regs = devm_ioremap_resource(&pdev->dev,
2415							     &dmares);
2416			lp->rx_irq = irq_of_parse_and_map(np, 1);
2417			lp->tx_irq = irq_of_parse_and_map(np, 0);
2418			of_node_put(np);
2419			lp->eth_irq = platform_get_irq_optional(pdev, 0);
2420		} else {
2421			/* Check for these resources directly on the Ethernet node. */
2422			lp->dma_regs = devm_platform_get_and_ioremap_resource(pdev, 1, NULL);
2423			lp->rx_irq = platform_get_irq(pdev, 1);
2424			lp->tx_irq = platform_get_irq(pdev, 0);
2425			lp->eth_irq = platform_get_irq_optional(pdev, 2);
2426		}
2427		if (IS_ERR(lp->dma_regs)) {
2428			dev_err(&pdev->dev, "could not map DMA regs\n");
2429			ret = PTR_ERR(lp->dma_regs);
2430			goto cleanup_clk;
2431		}
2432		if (lp->rx_irq <= 0 || lp->tx_irq <= 0) {
2433			dev_err(&pdev->dev, "could not determine irqs\n");
2434			ret = -ENOMEM;
2435			goto cleanup_clk;
2436		}
2437
2438		/* Reset core now that clocks are enabled, prior to accessing MDIO */
2439		ret = __axienet_device_reset(lp);
2440		if (ret)
2441			goto cleanup_clk;
2442
2443		/* Autodetect the need for 64-bit DMA pointers.
2444		 * When the IP is configured for a bus width bigger than 32 bits,
2445		 * writing the MSB registers is mandatory, even if they are all 0.
2446		 * We can detect this case by writing all 1's to one such register
2447		 * and see if that sticks: when the IP is configured for 32 bits
2448		 * only, those registers are RES0.
2449		 * Those MSB registers were introduced in IP v7.1, which we check first.
2450		 */
2451		if ((axienet_ior(lp, XAE_ID_OFFSET) >> 24) >= 0x9) {
2452			void __iomem *desc = lp->dma_regs + XAXIDMA_TX_CDESC_OFFSET + 4;
2453
2454			iowrite32(0x0, desc);
2455			if (ioread32(desc) == 0) {	/* sanity check */
2456				iowrite32(0xffffffff, desc);
2457				if (ioread32(desc) > 0) {
2458					lp->features |= XAE_FEATURE_DMA_64BIT;
2459					addr_width = 64;
2460					dev_info(&pdev->dev,
2461						 "autodetected 64-bit DMA range\n");
2462				}
2463				iowrite32(0x0, desc);
2464			}
2465		}
2466		if (!IS_ENABLED(CONFIG_64BIT) && lp->features & XAE_FEATURE_DMA_64BIT) {
2467			dev_err(&pdev->dev, "64-bit addressable DMA is not compatible with 32-bit archecture\n");
2468			ret = -EINVAL;
2469			goto cleanup_clk;
2470		}
2471
2472		ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(addr_width));
2473		if (ret) {
2474			dev_err(&pdev->dev, "No suitable DMA available\n");
2475			goto cleanup_clk;
2476		}
2477		netif_napi_add(ndev, &lp->napi_rx, axienet_rx_poll);
2478		netif_napi_add(ndev, &lp->napi_tx, axienet_tx_poll);
2479	} else {
2480		struct xilinx_vdma_config cfg;
2481		struct dma_chan *tx_chan;
2482
2483		lp->eth_irq = platform_get_irq_optional(pdev, 0);
2484		if (lp->eth_irq < 0 && lp->eth_irq != -ENXIO) {
2485			ret = lp->eth_irq;
2486			goto cleanup_clk;
2487		}
2488		tx_chan = dma_request_chan(lp->dev, "tx_chan0");
2489		if (IS_ERR(tx_chan)) {
2490			ret = PTR_ERR(tx_chan);
2491			dev_err_probe(lp->dev, ret, "No Ethernet DMA (TX) channel found\n");
2492			goto cleanup_clk;
2493		}
2494
2495		cfg.reset = 1;
2496		/* As name says VDMA but it has support for DMA channel reset */
2497		ret = xilinx_vdma_channel_set_config(tx_chan, &cfg);
2498		if (ret < 0) {
2499			dev_err(&pdev->dev, "Reset channel failed\n");
2500			dma_release_channel(tx_chan);
2501			goto cleanup_clk;
2502		}
2503
2504		dma_release_channel(tx_chan);
2505		lp->use_dmaengine = 1;
2506	}
2507
2508	if (lp->use_dmaengine)
2509		ndev->netdev_ops = &axienet_netdev_dmaengine_ops;
2510	else
2511		ndev->netdev_ops = &axienet_netdev_ops;
2512	/* Check for Ethernet core IRQ (optional) */
2513	if (lp->eth_irq <= 0)
2514		dev_info(&pdev->dev, "Ethernet core IRQ not defined\n");
2515
2516	/* Retrieve the MAC address */
2517	ret = of_get_mac_address(pdev->dev.of_node, mac_addr);
2518	if (!ret) {
2519		axienet_set_mac_address(ndev, mac_addr);
2520	} else {
2521		dev_warn(&pdev->dev, "could not find MAC address property: %d\n",
2522			 ret);
2523		axienet_set_mac_address(ndev, NULL);
2524	}
2525
2526	lp->coalesce_count_rx = XAXIDMA_DFT_RX_THRESHOLD;
2527	lp->coalesce_count_tx = XAXIDMA_DFT_TX_THRESHOLD;
2528	lp->coalesce_usec_rx = XAXIDMA_DFT_RX_USEC;
2529	lp->coalesce_usec_tx = XAXIDMA_DFT_TX_USEC;
2530
2531	ret = axienet_mdio_setup(lp);
2532	if (ret)
2533		dev_warn(&pdev->dev,
2534			 "error registering MDIO bus: %d\n", ret);
2535
2536	if (lp->phy_mode == PHY_INTERFACE_MODE_SGMII ||
2537	    lp->phy_mode == PHY_INTERFACE_MODE_1000BASEX) {
2538		np = of_parse_phandle(pdev->dev.of_node, "pcs-handle", 0);
2539		if (!np) {
2540			/* Deprecated: Always use "pcs-handle" for pcs_phy.
2541			 * Falling back to "phy-handle" here is only for
2542			 * backward compatibility with old device trees.
2543			 */
2544			np = of_parse_phandle(pdev->dev.of_node, "phy-handle", 0);
2545		}
2546		if (!np) {
2547			dev_err(&pdev->dev, "pcs-handle (preferred) or phy-handle required for 1000BaseX/SGMII\n");
2548			ret = -EINVAL;
2549			goto cleanup_mdio;
2550		}
2551		lp->pcs_phy = of_mdio_find_device(np);
2552		if (!lp->pcs_phy) {
2553			ret = -EPROBE_DEFER;
2554			of_node_put(np);
2555			goto cleanup_mdio;
2556		}
2557		of_node_put(np);
2558		lp->pcs.ops = &axienet_pcs_ops;
2559		lp->pcs.neg_mode = true;
2560		lp->pcs.poll = true;
2561	}
2562
2563	lp->phylink_config.dev = &ndev->dev;
2564	lp->phylink_config.type = PHYLINK_NETDEV;
 
2565	lp->phylink_config.mac_capabilities = MAC_SYM_PAUSE | MAC_ASYM_PAUSE |
2566		MAC_10FD | MAC_100FD | MAC_1000FD;
2567
2568	__set_bit(lp->phy_mode, lp->phylink_config.supported_interfaces);
2569	if (lp->switch_x_sgmii) {
2570		__set_bit(PHY_INTERFACE_MODE_1000BASEX,
2571			  lp->phylink_config.supported_interfaces);
2572		__set_bit(PHY_INTERFACE_MODE_SGMII,
2573			  lp->phylink_config.supported_interfaces);
2574	}
2575
2576	lp->phylink = phylink_create(&lp->phylink_config, pdev->dev.fwnode,
2577				     lp->phy_mode,
2578				     &axienet_phylink_ops);
2579	if (IS_ERR(lp->phylink)) {
2580		ret = PTR_ERR(lp->phylink);
2581		dev_err(&pdev->dev, "phylink_create error (%i)\n", ret);
2582		goto cleanup_mdio;
2583	}
2584
2585	ret = register_netdev(lp->ndev);
2586	if (ret) {
2587		dev_err(lp->dev, "register_netdev() error (%i)\n", ret);
2588		goto cleanup_phylink;
2589	}
2590
2591	return 0;
2592
2593cleanup_phylink:
2594	phylink_destroy(lp->phylink);
2595
2596cleanup_mdio:
2597	if (lp->pcs_phy)
2598		put_device(&lp->pcs_phy->dev);
2599	if (lp->mii_bus)
2600		axienet_mdio_teardown(lp);
2601cleanup_clk:
2602	clk_bulk_disable_unprepare(XAE_NUM_MISC_CLOCKS, lp->misc_clks);
2603	clk_disable_unprepare(lp->axi_clk);
2604
2605free_netdev:
2606	free_netdev(ndev);
2607
2608	return ret;
2609}
2610
2611static void axienet_remove(struct platform_device *pdev)
2612{
2613	struct net_device *ndev = platform_get_drvdata(pdev);
2614	struct axienet_local *lp = netdev_priv(ndev);
2615
2616	unregister_netdev(ndev);
2617
2618	if (lp->phylink)
2619		phylink_destroy(lp->phylink);
2620
2621	if (lp->pcs_phy)
2622		put_device(&lp->pcs_phy->dev);
2623
2624	axienet_mdio_teardown(lp);
2625
2626	clk_bulk_disable_unprepare(XAE_NUM_MISC_CLOCKS, lp->misc_clks);
2627	clk_disable_unprepare(lp->axi_clk);
2628
2629	free_netdev(ndev);
2630}
2631
2632static void axienet_shutdown(struct platform_device *pdev)
2633{
2634	struct net_device *ndev = platform_get_drvdata(pdev);
2635
2636	rtnl_lock();
2637	netif_device_detach(ndev);
2638
2639	if (netif_running(ndev))
2640		dev_close(ndev);
2641
2642	rtnl_unlock();
2643}
2644
2645static int axienet_suspend(struct device *dev)
2646{
2647	struct net_device *ndev = dev_get_drvdata(dev);
2648
2649	if (!netif_running(ndev))
2650		return 0;
2651
2652	netif_device_detach(ndev);
2653
2654	rtnl_lock();
2655	axienet_stop(ndev);
2656	rtnl_unlock();
2657
2658	return 0;
2659}
2660
2661static int axienet_resume(struct device *dev)
2662{
2663	struct net_device *ndev = dev_get_drvdata(dev);
2664
2665	if (!netif_running(ndev))
2666		return 0;
2667
2668	rtnl_lock();
2669	axienet_open(ndev);
2670	rtnl_unlock();
2671
2672	netif_device_attach(ndev);
2673
2674	return 0;
2675}
2676
2677static DEFINE_SIMPLE_DEV_PM_OPS(axienet_pm_ops,
2678				axienet_suspend, axienet_resume);
2679
2680static struct platform_driver axienet_driver = {
2681	.probe = axienet_probe,
2682	.remove_new = axienet_remove,
2683	.shutdown = axienet_shutdown,
2684	.driver = {
2685		 .name = "xilinx_axienet",
2686		 .pm = &axienet_pm_ops,
2687		 .of_match_table = axienet_of_match,
2688	},
2689};
2690
2691module_platform_driver(axienet_driver);
2692
2693MODULE_DESCRIPTION("Xilinx Axi Ethernet driver");
2694MODULE_AUTHOR("Xilinx");
2695MODULE_LICENSE("GPL");
v6.13.7
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * Xilinx Axi Ethernet device driver
   4 *
   5 * Copyright (c) 2008 Nissin Systems Co., Ltd.,  Yoshio Kashiwagi
   6 * Copyright (c) 2005-2008 DLA Systems,  David H. Lynch Jr. <dhlii@dlasys.net>
   7 * Copyright (c) 2008-2009 Secret Lab Technologies Ltd.
   8 * Copyright (c) 2010 - 2011 Michal Simek <monstr@monstr.eu>
   9 * Copyright (c) 2010 - 2011 PetaLogix
  10 * Copyright (c) 2019 - 2022 Calian Advanced Technologies
  11 * Copyright (c) 2010 - 2012 Xilinx, Inc. All rights reserved.
  12 *
  13 * This is a driver for the Xilinx Axi Ethernet which is used in the Virtex6
  14 * and Spartan6.
  15 *
  16 * TODO:
  17 *  - Add Axi Fifo support.
  18 *  - Factor out Axi DMA code into separate driver.
  19 *  - Test and fix basic multicast filtering.
  20 *  - Add support for extended multicast filtering.
  21 *  - Test basic VLAN support.
  22 *  - Add support for extended VLAN support.
  23 */
  24
  25#include <linux/clk.h>
  26#include <linux/delay.h>
  27#include <linux/etherdevice.h>
  28#include <linux/module.h>
  29#include <linux/netdevice.h>
  30#include <linux/of.h>
  31#include <linux/of_mdio.h>
  32#include <linux/of_net.h>
  33#include <linux/of_irq.h>
  34#include <linux/of_address.h>
  35#include <linux/platform_device.h>
  36#include <linux/skbuff.h>
  37#include <linux/math64.h>
  38#include <linux/phy.h>
  39#include <linux/mii.h>
  40#include <linux/ethtool.h>
  41#include <linux/dmaengine.h>
  42#include <linux/dma-mapping.h>
  43#include <linux/dma/xilinx_dma.h>
  44#include <linux/circ_buf.h>
  45#include <net/netdev_queues.h>
  46
  47#include "xilinx_axienet.h"
  48
  49/* Descriptors defines for Tx and Rx DMA */
  50#define TX_BD_NUM_DEFAULT		128
  51#define RX_BD_NUM_DEFAULT		1024
  52#define TX_BD_NUM_MIN			(MAX_SKB_FRAGS + 1)
  53#define TX_BD_NUM_MAX			4096
  54#define RX_BD_NUM_MAX			4096
  55#define DMA_NUM_APP_WORDS		5
  56#define LEN_APP				4
  57#define RX_BUF_NUM_DEFAULT		128
  58
  59/* Must be shorter than length of ethtool_drvinfo.driver field to fit */
  60#define DRIVER_NAME		"xaxienet"
  61#define DRIVER_DESCRIPTION	"Xilinx Axi Ethernet driver"
  62#define DRIVER_VERSION		"1.00a"
  63
  64#define AXIENET_REGS_N		40
  65
  66static void axienet_rx_submit_desc(struct net_device *ndev);
  67
  68/* Match table for of_platform binding */
  69static const struct of_device_id axienet_of_match[] = {
  70	{ .compatible = "xlnx,axi-ethernet-1.00.a", },
  71	{ .compatible = "xlnx,axi-ethernet-1.01.a", },
  72	{ .compatible = "xlnx,axi-ethernet-2.01.a", },
  73	{},
  74};
  75
  76MODULE_DEVICE_TABLE(of, axienet_of_match);
  77
  78/* Option table for setting up Axi Ethernet hardware options */
  79static struct axienet_option axienet_options[] = {
  80	/* Turn on jumbo packet support for both Rx and Tx */
  81	{
  82		.opt = XAE_OPTION_JUMBO,
  83		.reg = XAE_TC_OFFSET,
  84		.m_or = XAE_TC_JUM_MASK,
  85	}, {
  86		.opt = XAE_OPTION_JUMBO,
  87		.reg = XAE_RCW1_OFFSET,
  88		.m_or = XAE_RCW1_JUM_MASK,
  89	}, { /* Turn on VLAN packet support for both Rx and Tx */
  90		.opt = XAE_OPTION_VLAN,
  91		.reg = XAE_TC_OFFSET,
  92		.m_or = XAE_TC_VLAN_MASK,
  93	}, {
  94		.opt = XAE_OPTION_VLAN,
  95		.reg = XAE_RCW1_OFFSET,
  96		.m_or = XAE_RCW1_VLAN_MASK,
  97	}, { /* Turn on FCS stripping on receive packets */
  98		.opt = XAE_OPTION_FCS_STRIP,
  99		.reg = XAE_RCW1_OFFSET,
 100		.m_or = XAE_RCW1_FCS_MASK,
 101	}, { /* Turn on FCS insertion on transmit packets */
 102		.opt = XAE_OPTION_FCS_INSERT,
 103		.reg = XAE_TC_OFFSET,
 104		.m_or = XAE_TC_FCS_MASK,
 105	}, { /* Turn off length/type field checking on receive packets */
 106		.opt = XAE_OPTION_LENTYPE_ERR,
 107		.reg = XAE_RCW1_OFFSET,
 108		.m_or = XAE_RCW1_LT_DIS_MASK,
 109	}, { /* Turn on Rx flow control */
 110		.opt = XAE_OPTION_FLOW_CONTROL,
 111		.reg = XAE_FCC_OFFSET,
 112		.m_or = XAE_FCC_FCRX_MASK,
 113	}, { /* Turn on Tx flow control */
 114		.opt = XAE_OPTION_FLOW_CONTROL,
 115		.reg = XAE_FCC_OFFSET,
 116		.m_or = XAE_FCC_FCTX_MASK,
 117	}, { /* Turn on promiscuous frame filtering */
 118		.opt = XAE_OPTION_PROMISC,
 119		.reg = XAE_FMI_OFFSET,
 120		.m_or = XAE_FMI_PM_MASK,
 121	}, { /* Enable transmitter */
 122		.opt = XAE_OPTION_TXEN,
 123		.reg = XAE_TC_OFFSET,
 124		.m_or = XAE_TC_TX_MASK,
 125	}, { /* Enable receiver */
 126		.opt = XAE_OPTION_RXEN,
 127		.reg = XAE_RCW1_OFFSET,
 128		.m_or = XAE_RCW1_RX_MASK,
 129	},
 130	{}
 131};
 132
 133static struct skbuf_dma_descriptor *axienet_get_rx_desc(struct axienet_local *lp, int i)
 134{
 135	return lp->rx_skb_ring[i & (RX_BUF_NUM_DEFAULT - 1)];
 136}
 137
 138static struct skbuf_dma_descriptor *axienet_get_tx_desc(struct axienet_local *lp, int i)
 139{
 140	return lp->tx_skb_ring[i & (TX_BD_NUM_MAX - 1)];
 141}
 142
 143/**
 144 * axienet_dma_in32 - Memory mapped Axi DMA register read
 145 * @lp:		Pointer to axienet local structure
 146 * @reg:	Address offset from the base address of the Axi DMA core
 147 *
 148 * Return: The contents of the Axi DMA register
 149 *
 150 * This function returns the contents of the corresponding Axi DMA register.
 151 */
 152static inline u32 axienet_dma_in32(struct axienet_local *lp, off_t reg)
 153{
 154	return ioread32(lp->dma_regs + reg);
 155}
 156
 157static void desc_set_phys_addr(struct axienet_local *lp, dma_addr_t addr,
 158			       struct axidma_bd *desc)
 159{
 160	desc->phys = lower_32_bits(addr);
 161	if (lp->features & XAE_FEATURE_DMA_64BIT)
 162		desc->phys_msb = upper_32_bits(addr);
 163}
 164
 165static dma_addr_t desc_get_phys_addr(struct axienet_local *lp,
 166				     struct axidma_bd *desc)
 167{
 168	dma_addr_t ret = desc->phys;
 169
 170	if (lp->features & XAE_FEATURE_DMA_64BIT)
 171		ret |= ((dma_addr_t)desc->phys_msb << 16) << 16;
 172
 173	return ret;
 174}
 175
 176/**
 177 * axienet_dma_bd_release - Release buffer descriptor rings
 178 * @ndev:	Pointer to the net_device structure
 179 *
 180 * This function is used to release the descriptors allocated in
 181 * axienet_dma_bd_init. axienet_dma_bd_release is called when Axi Ethernet
 182 * driver stop api is called.
 183 */
 184static void axienet_dma_bd_release(struct net_device *ndev)
 185{
 186	int i;
 187	struct axienet_local *lp = netdev_priv(ndev);
 188
 189	/* If we end up here, tx_bd_v must have been DMA allocated. */
 190	dma_free_coherent(lp->dev,
 191			  sizeof(*lp->tx_bd_v) * lp->tx_bd_num,
 192			  lp->tx_bd_v,
 193			  lp->tx_bd_p);
 194
 195	if (!lp->rx_bd_v)
 196		return;
 197
 198	for (i = 0; i < lp->rx_bd_num; i++) {
 199		dma_addr_t phys;
 200
 201		/* A NULL skb means this descriptor has not been initialised
 202		 * at all.
 203		 */
 204		if (!lp->rx_bd_v[i].skb)
 205			break;
 206
 207		dev_kfree_skb(lp->rx_bd_v[i].skb);
 208
 209		/* For each descriptor, we programmed cntrl with the (non-zero)
 210		 * descriptor size, after it had been successfully allocated.
 211		 * So a non-zero value in there means we need to unmap it.
 212		 */
 213		if (lp->rx_bd_v[i].cntrl) {
 214			phys = desc_get_phys_addr(lp, &lp->rx_bd_v[i]);
 215			dma_unmap_single(lp->dev, phys,
 216					 lp->max_frm_size, DMA_FROM_DEVICE);
 217		}
 218	}
 219
 220	dma_free_coherent(lp->dev,
 221			  sizeof(*lp->rx_bd_v) * lp->rx_bd_num,
 222			  lp->rx_bd_v,
 223			  lp->rx_bd_p);
 224}
 225
 226/**
 227 * axienet_usec_to_timer - Calculate IRQ delay timer value
 228 * @lp:		Pointer to the axienet_local structure
 229 * @coalesce_usec: Microseconds to convert into timer value
 230 */
 231static u32 axienet_usec_to_timer(struct axienet_local *lp, u32 coalesce_usec)
 232{
 233	u32 result;
 234	u64 clk_rate = 125000000; /* arbitrary guess if no clock rate set */
 235
 236	if (lp->axi_clk)
 237		clk_rate = clk_get_rate(lp->axi_clk);
 238
 239	/* 1 Timeout Interval = 125 * (clock period of SG clock) */
 240	result = DIV64_U64_ROUND_CLOSEST((u64)coalesce_usec * clk_rate,
 241					 (u64)125000000);
 242	if (result > 255)
 243		result = 255;
 244
 245	return result;
 246}
 247
 248/**
 249 * axienet_dma_start - Set up DMA registers and start DMA operation
 250 * @lp:		Pointer to the axienet_local structure
 251 */
 252static void axienet_dma_start(struct axienet_local *lp)
 253{
 254	/* Start updating the Rx channel control register */
 255	lp->rx_dma_cr = (lp->coalesce_count_rx << XAXIDMA_COALESCE_SHIFT) |
 256			XAXIDMA_IRQ_IOC_MASK | XAXIDMA_IRQ_ERROR_MASK;
 257	/* Only set interrupt delay timer if not generating an interrupt on
 258	 * the first RX packet. Otherwise leave at 0 to disable delay interrupt.
 259	 */
 260	if (lp->coalesce_count_rx > 1)
 261		lp->rx_dma_cr |= (axienet_usec_to_timer(lp, lp->coalesce_usec_rx)
 262					<< XAXIDMA_DELAY_SHIFT) |
 263				 XAXIDMA_IRQ_DELAY_MASK;
 264	axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, lp->rx_dma_cr);
 265
 266	/* Start updating the Tx channel control register */
 267	lp->tx_dma_cr = (lp->coalesce_count_tx << XAXIDMA_COALESCE_SHIFT) |
 268			XAXIDMA_IRQ_IOC_MASK | XAXIDMA_IRQ_ERROR_MASK;
 269	/* Only set interrupt delay timer if not generating an interrupt on
 270	 * the first TX packet. Otherwise leave at 0 to disable delay interrupt.
 271	 */
 272	if (lp->coalesce_count_tx > 1)
 273		lp->tx_dma_cr |= (axienet_usec_to_timer(lp, lp->coalesce_usec_tx)
 274					<< XAXIDMA_DELAY_SHIFT) |
 275				 XAXIDMA_IRQ_DELAY_MASK;
 276	axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, lp->tx_dma_cr);
 277
 278	/* Populate the tail pointer and bring the Rx Axi DMA engine out of
 279	 * halted state. This will make the Rx side ready for reception.
 280	 */
 281	axienet_dma_out_addr(lp, XAXIDMA_RX_CDESC_OFFSET, lp->rx_bd_p);
 282	lp->rx_dma_cr |= XAXIDMA_CR_RUNSTOP_MASK;
 283	axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, lp->rx_dma_cr);
 284	axienet_dma_out_addr(lp, XAXIDMA_RX_TDESC_OFFSET, lp->rx_bd_p +
 285			     (sizeof(*lp->rx_bd_v) * (lp->rx_bd_num - 1)));
 286
 287	/* Write to the RS (Run-stop) bit in the Tx channel control register.
 288	 * Tx channel is now ready to run. But only after we write to the
 289	 * tail pointer register that the Tx channel will start transmitting.
 290	 */
 291	axienet_dma_out_addr(lp, XAXIDMA_TX_CDESC_OFFSET, lp->tx_bd_p);
 292	lp->tx_dma_cr |= XAXIDMA_CR_RUNSTOP_MASK;
 293	axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, lp->tx_dma_cr);
 294}
 295
 296/**
 297 * axienet_dma_bd_init - Setup buffer descriptor rings for Axi DMA
 298 * @ndev:	Pointer to the net_device structure
 299 *
 300 * Return: 0, on success -ENOMEM, on failure
 301 *
 302 * This function is called to initialize the Rx and Tx DMA descriptor
 303 * rings. This initializes the descriptors with required default values
 304 * and is called when Axi Ethernet driver reset is called.
 305 */
 306static int axienet_dma_bd_init(struct net_device *ndev)
 307{
 308	int i;
 309	struct sk_buff *skb;
 310	struct axienet_local *lp = netdev_priv(ndev);
 311
 312	/* Reset the indexes which are used for accessing the BDs */
 313	lp->tx_bd_ci = 0;
 314	lp->tx_bd_tail = 0;
 315	lp->rx_bd_ci = 0;
 316
 317	/* Allocate the Tx and Rx buffer descriptors. */
 318	lp->tx_bd_v = dma_alloc_coherent(lp->dev,
 319					 sizeof(*lp->tx_bd_v) * lp->tx_bd_num,
 320					 &lp->tx_bd_p, GFP_KERNEL);
 321	if (!lp->tx_bd_v)
 322		return -ENOMEM;
 323
 324	lp->rx_bd_v = dma_alloc_coherent(lp->dev,
 325					 sizeof(*lp->rx_bd_v) * lp->rx_bd_num,
 326					 &lp->rx_bd_p, GFP_KERNEL);
 327	if (!lp->rx_bd_v)
 328		goto out;
 329
 330	for (i = 0; i < lp->tx_bd_num; i++) {
 331		dma_addr_t addr = lp->tx_bd_p +
 332				  sizeof(*lp->tx_bd_v) *
 333				  ((i + 1) % lp->tx_bd_num);
 334
 335		lp->tx_bd_v[i].next = lower_32_bits(addr);
 336		if (lp->features & XAE_FEATURE_DMA_64BIT)
 337			lp->tx_bd_v[i].next_msb = upper_32_bits(addr);
 338	}
 339
 340	for (i = 0; i < lp->rx_bd_num; i++) {
 341		dma_addr_t addr;
 342
 343		addr = lp->rx_bd_p + sizeof(*lp->rx_bd_v) *
 344			((i + 1) % lp->rx_bd_num);
 345		lp->rx_bd_v[i].next = lower_32_bits(addr);
 346		if (lp->features & XAE_FEATURE_DMA_64BIT)
 347			lp->rx_bd_v[i].next_msb = upper_32_bits(addr);
 348
 349		skb = netdev_alloc_skb_ip_align(ndev, lp->max_frm_size);
 350		if (!skb)
 351			goto out;
 352
 353		lp->rx_bd_v[i].skb = skb;
 354		addr = dma_map_single(lp->dev, skb->data,
 355				      lp->max_frm_size, DMA_FROM_DEVICE);
 356		if (dma_mapping_error(lp->dev, addr)) {
 357			netdev_err(ndev, "DMA mapping error\n");
 358			goto out;
 359		}
 360		desc_set_phys_addr(lp, addr, &lp->rx_bd_v[i]);
 361
 362		lp->rx_bd_v[i].cntrl = lp->max_frm_size;
 363	}
 364
 365	axienet_dma_start(lp);
 366
 367	return 0;
 368out:
 369	axienet_dma_bd_release(ndev);
 370	return -ENOMEM;
 371}
 372
 373/**
 374 * axienet_set_mac_address - Write the MAC address
 375 * @ndev:	Pointer to the net_device structure
 376 * @address:	6 byte Address to be written as MAC address
 377 *
 378 * This function is called to initialize the MAC address of the Axi Ethernet
 379 * core. It writes to the UAW0 and UAW1 registers of the core.
 380 */
 381static void axienet_set_mac_address(struct net_device *ndev,
 382				    const void *address)
 383{
 384	struct axienet_local *lp = netdev_priv(ndev);
 385
 386	if (address)
 387		eth_hw_addr_set(ndev, address);
 388	if (!is_valid_ether_addr(ndev->dev_addr))
 389		eth_hw_addr_random(ndev);
 390
 391	/* Set up unicast MAC address filter set its mac address */
 392	axienet_iow(lp, XAE_UAW0_OFFSET,
 393		    (ndev->dev_addr[0]) |
 394		    (ndev->dev_addr[1] << 8) |
 395		    (ndev->dev_addr[2] << 16) |
 396		    (ndev->dev_addr[3] << 24));
 397	axienet_iow(lp, XAE_UAW1_OFFSET,
 398		    (((axienet_ior(lp, XAE_UAW1_OFFSET)) &
 399		      ~XAE_UAW1_UNICASTADDR_MASK) |
 400		     (ndev->dev_addr[4] |
 401		     (ndev->dev_addr[5] << 8))));
 402}
 403
 404/**
 405 * netdev_set_mac_address - Write the MAC address (from outside the driver)
 406 * @ndev:	Pointer to the net_device structure
 407 * @p:		6 byte Address to be written as MAC address
 408 *
 409 * Return: 0 for all conditions. Presently, there is no failure case.
 410 *
 411 * This function is called to initialize the MAC address of the Axi Ethernet
 412 * core. It calls the core specific axienet_set_mac_address. This is the
 413 * function that goes into net_device_ops structure entry ndo_set_mac_address.
 414 */
 415static int netdev_set_mac_address(struct net_device *ndev, void *p)
 416{
 417	struct sockaddr *addr = p;
 418
 419	axienet_set_mac_address(ndev, addr->sa_data);
 420	return 0;
 421}
 422
 423/**
 424 * axienet_set_multicast_list - Prepare the multicast table
 425 * @ndev:	Pointer to the net_device structure
 426 *
 427 * This function is called to initialize the multicast table during
 428 * initialization. The Axi Ethernet basic multicast support has a four-entry
 429 * multicast table which is initialized here. Additionally this function
 430 * goes into the net_device_ops structure entry ndo_set_multicast_list. This
 431 * means whenever the multicast table entries need to be updated this
 432 * function gets called.
 433 */
 434static void axienet_set_multicast_list(struct net_device *ndev)
 435{
 436	int i = 0;
 437	u32 reg, af0reg, af1reg;
 438	struct axienet_local *lp = netdev_priv(ndev);
 439
 440	reg = axienet_ior(lp, XAE_FMI_OFFSET);
 441	reg &= ~XAE_FMI_PM_MASK;
 442	if (ndev->flags & IFF_PROMISC)
 
 
 
 
 
 443		reg |= XAE_FMI_PM_MASK;
 444	else
 445		reg &= ~XAE_FMI_PM_MASK;
 446	axienet_iow(lp, XAE_FMI_OFFSET, reg);
 447
 448	if (ndev->flags & IFF_ALLMULTI ||
 449	    netdev_mc_count(ndev) > XAE_MULTICAST_CAM_TABLE_NUM) {
 450		reg &= 0xFFFFFF00;
 451		axienet_iow(lp, XAE_FMI_OFFSET, reg);
 452		axienet_iow(lp, XAE_AF0_OFFSET, 1); /* Multicast bit */
 453		axienet_iow(lp, XAE_AF1_OFFSET, 0);
 454		axienet_iow(lp, XAE_AM0_OFFSET, 1); /* ditto */
 455		axienet_iow(lp, XAE_AM1_OFFSET, 0);
 456		axienet_iow(lp, XAE_FFE_OFFSET, 1);
 457		i = 1;
 458	} else if (!netdev_mc_empty(ndev)) {
 459		struct netdev_hw_addr *ha;
 460
 
 461		netdev_for_each_mc_addr(ha, ndev) {
 462			if (i >= XAE_MULTICAST_CAM_TABLE_NUM)
 463				break;
 464
 465			af0reg = (ha->addr[0]);
 466			af0reg |= (ha->addr[1] << 8);
 467			af0reg |= (ha->addr[2] << 16);
 468			af0reg |= (ha->addr[3] << 24);
 469
 470			af1reg = (ha->addr[4]);
 471			af1reg |= (ha->addr[5] << 8);
 472
 473			reg &= 0xFFFFFF00;
 474			reg |= i;
 475
 476			axienet_iow(lp, XAE_FMI_OFFSET, reg);
 477			axienet_iow(lp, XAE_AF0_OFFSET, af0reg);
 478			axienet_iow(lp, XAE_AF1_OFFSET, af1reg);
 479			axienet_iow(lp, XAE_AM0_OFFSET, 0xffffffff);
 480			axienet_iow(lp, XAE_AM1_OFFSET, 0x0000ffff);
 481			axienet_iow(lp, XAE_FFE_OFFSET, 1);
 482			i++;
 483		}
 484	}
 
 
 485
 486	for (; i < XAE_MULTICAST_CAM_TABLE_NUM; i++) {
 487		reg &= 0xFFFFFF00;
 488		reg |= i;
 489		axienet_iow(lp, XAE_FMI_OFFSET, reg);
 490		axienet_iow(lp, XAE_FFE_OFFSET, 0);
 
 
 
 
 
 
 
 
 
 
 491	}
 492}
 493
 494/**
 495 * axienet_setoptions - Set an Axi Ethernet option
 496 * @ndev:	Pointer to the net_device structure
 497 * @options:	Option to be enabled/disabled
 498 *
 499 * The Axi Ethernet core has multiple features which can be selectively turned
 500 * on or off. The typical options could be jumbo frame option, basic VLAN
 501 * option, promiscuous mode option etc. This function is used to set or clear
 502 * these options in the Axi Ethernet hardware. This is done through
 503 * axienet_option structure .
 504 */
 505static void axienet_setoptions(struct net_device *ndev, u32 options)
 506{
 507	int reg;
 508	struct axienet_local *lp = netdev_priv(ndev);
 509	struct axienet_option *tp = &axienet_options[0];
 510
 511	while (tp->opt) {
 512		reg = ((axienet_ior(lp, tp->reg)) & ~(tp->m_or));
 513		if (options & tp->opt)
 514			reg |= tp->m_or;
 515		axienet_iow(lp, tp->reg, reg);
 516		tp++;
 517	}
 518
 519	lp->options |= options;
 520}
 521
 522static u64 axienet_stat(struct axienet_local *lp, enum temac_stat stat)
 523{
 524	u32 counter;
 525
 526	if (lp->reset_in_progress)
 527		return lp->hw_stat_base[stat];
 528
 529	counter = axienet_ior(lp, XAE_STATS_OFFSET + stat * 8);
 530	return lp->hw_stat_base[stat] + (counter - lp->hw_last_counter[stat]);
 531}
 532
 533static void axienet_stats_update(struct axienet_local *lp, bool reset)
 534{
 535	enum temac_stat stat;
 536
 537	write_seqcount_begin(&lp->hw_stats_seqcount);
 538	lp->reset_in_progress = reset;
 539	for (stat = 0; stat < STAT_COUNT; stat++) {
 540		u32 counter = axienet_ior(lp, XAE_STATS_OFFSET + stat * 8);
 541
 542		lp->hw_stat_base[stat] += counter - lp->hw_last_counter[stat];
 543		lp->hw_last_counter[stat] = counter;
 544	}
 545	write_seqcount_end(&lp->hw_stats_seqcount);
 546}
 547
 548static void axienet_refresh_stats(struct work_struct *work)
 549{
 550	struct axienet_local *lp = container_of(work, struct axienet_local,
 551						stats_work.work);
 552
 553	mutex_lock(&lp->stats_lock);
 554	axienet_stats_update(lp, false);
 555	mutex_unlock(&lp->stats_lock);
 556
 557	/* Just less than 2^32 bytes at 2.5 GBit/s */
 558	schedule_delayed_work(&lp->stats_work, 13 * HZ);
 559}
 560
 561static int __axienet_device_reset(struct axienet_local *lp)
 562{
 563	u32 value;
 564	int ret;
 565
 566	/* Save statistics counters in case they will be reset */
 567	mutex_lock(&lp->stats_lock);
 568	if (lp->features & XAE_FEATURE_STATS)
 569		axienet_stats_update(lp, true);
 570
 571	/* Reset Axi DMA. This would reset Axi Ethernet core as well. The reset
 572	 * process of Axi DMA takes a while to complete as all pending
 573	 * commands/transfers will be flushed or completed during this
 574	 * reset process.
 575	 * Note that even though both TX and RX have their own reset register,
 576	 * they both reset the entire DMA core, so only one needs to be used.
 577	 */
 578	axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, XAXIDMA_CR_RESET_MASK);
 579	ret = read_poll_timeout(axienet_dma_in32, value,
 580				!(value & XAXIDMA_CR_RESET_MASK),
 581				DELAY_OF_ONE_MILLISEC, 50000, false, lp,
 582				XAXIDMA_TX_CR_OFFSET);
 583	if (ret) {
 584		dev_err(lp->dev, "%s: DMA reset timeout!\n", __func__);
 585		goto out;
 586	}
 587
 588	/* Wait for PhyRstCmplt bit to be set, indicating the PHY reset has finished */
 589	ret = read_poll_timeout(axienet_ior, value,
 590				value & XAE_INT_PHYRSTCMPLT_MASK,
 591				DELAY_OF_ONE_MILLISEC, 50000, false, lp,
 592				XAE_IS_OFFSET);
 593	if (ret) {
 594		dev_err(lp->dev, "%s: timeout waiting for PhyRstCmplt\n", __func__);
 595		goto out;
 596	}
 597
 598	/* Update statistics counters with new values */
 599	if (lp->features & XAE_FEATURE_STATS) {
 600		enum temac_stat stat;
 601
 602		write_seqcount_begin(&lp->hw_stats_seqcount);
 603		lp->reset_in_progress = false;
 604		for (stat = 0; stat < STAT_COUNT; stat++) {
 605			u32 counter =
 606				axienet_ior(lp, XAE_STATS_OFFSET + stat * 8);
 607
 608			lp->hw_stat_base[stat] +=
 609				lp->hw_last_counter[stat] - counter;
 610			lp->hw_last_counter[stat] = counter;
 611		}
 612		write_seqcount_end(&lp->hw_stats_seqcount);
 613	}
 614
 615out:
 616	mutex_unlock(&lp->stats_lock);
 617	return ret;
 618}
 619
 620/**
 621 * axienet_dma_stop - Stop DMA operation
 622 * @lp:		Pointer to the axienet_local structure
 623 */
 624static void axienet_dma_stop(struct axienet_local *lp)
 625{
 626	int count;
 627	u32 cr, sr;
 628
 629	cr = axienet_dma_in32(lp, XAXIDMA_RX_CR_OFFSET);
 630	cr &= ~(XAXIDMA_CR_RUNSTOP_MASK | XAXIDMA_IRQ_ALL_MASK);
 631	axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, cr);
 632	synchronize_irq(lp->rx_irq);
 633
 634	cr = axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET);
 635	cr &= ~(XAXIDMA_CR_RUNSTOP_MASK | XAXIDMA_IRQ_ALL_MASK);
 636	axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, cr);
 637	synchronize_irq(lp->tx_irq);
 638
 639	/* Give DMAs a chance to halt gracefully */
 640	sr = axienet_dma_in32(lp, XAXIDMA_RX_SR_OFFSET);
 641	for (count = 0; !(sr & XAXIDMA_SR_HALT_MASK) && count < 5; ++count) {
 642		msleep(20);
 643		sr = axienet_dma_in32(lp, XAXIDMA_RX_SR_OFFSET);
 644	}
 645
 646	sr = axienet_dma_in32(lp, XAXIDMA_TX_SR_OFFSET);
 647	for (count = 0; !(sr & XAXIDMA_SR_HALT_MASK) && count < 5; ++count) {
 648		msleep(20);
 649		sr = axienet_dma_in32(lp, XAXIDMA_TX_SR_OFFSET);
 650	}
 651
 652	/* Do a reset to ensure DMA is really stopped */
 653	axienet_lock_mii(lp);
 654	__axienet_device_reset(lp);
 655	axienet_unlock_mii(lp);
 656}
 657
 658/**
 659 * axienet_device_reset - Reset and initialize the Axi Ethernet hardware.
 660 * @ndev:	Pointer to the net_device structure
 661 *
 662 * This function is called to reset and initialize the Axi Ethernet core. This
 663 * is typically called during initialization. It does a reset of the Axi DMA
 664 * Rx/Tx channels and initializes the Axi DMA BDs. Since Axi DMA reset lines
 665 * are connected to Axi Ethernet reset lines, this in turn resets the Axi
 666 * Ethernet core. No separate hardware reset is done for the Axi Ethernet
 667 * core.
 668 * Returns 0 on success or a negative error number otherwise.
 669 */
 670static int axienet_device_reset(struct net_device *ndev)
 671{
 672	u32 axienet_status;
 673	struct axienet_local *lp = netdev_priv(ndev);
 674	int ret;
 675
 676	lp->max_frm_size = XAE_MAX_VLAN_FRAME_SIZE;
 677	lp->options |= XAE_OPTION_VLAN;
 678	lp->options &= (~XAE_OPTION_JUMBO);
 679
 680	if (ndev->mtu > XAE_MTU && ndev->mtu <= XAE_JUMBO_MTU) {
 
 681		lp->max_frm_size = ndev->mtu + VLAN_ETH_HLEN +
 682					XAE_TRL_SIZE;
 683
 684		if (lp->max_frm_size <= lp->rxmem)
 685			lp->options |= XAE_OPTION_JUMBO;
 686	}
 687
 688	if (!lp->use_dmaengine) {
 689		ret = __axienet_device_reset(lp);
 690		if (ret)
 691			return ret;
 692
 693		ret = axienet_dma_bd_init(ndev);
 694		if (ret) {
 695			netdev_err(ndev, "%s: descriptor allocation failed\n",
 696				   __func__);
 697			return ret;
 698		}
 699	}
 700
 701	axienet_status = axienet_ior(lp, XAE_RCW1_OFFSET);
 702	axienet_status &= ~XAE_RCW1_RX_MASK;
 703	axienet_iow(lp, XAE_RCW1_OFFSET, axienet_status);
 704
 705	axienet_status = axienet_ior(lp, XAE_IP_OFFSET);
 706	if (axienet_status & XAE_INT_RXRJECT_MASK)
 707		axienet_iow(lp, XAE_IS_OFFSET, XAE_INT_RXRJECT_MASK);
 708	axienet_iow(lp, XAE_IE_OFFSET, lp->eth_irq > 0 ?
 709		    XAE_INT_RECV_ERROR_MASK : 0);
 710
 711	axienet_iow(lp, XAE_FCC_OFFSET, XAE_FCC_FCRX_MASK);
 712
 713	/* Sync default options with HW but leave receiver and
 714	 * transmitter disabled.
 715	 */
 716	axienet_setoptions(ndev, lp->options &
 717			   ~(XAE_OPTION_TXEN | XAE_OPTION_RXEN));
 718	axienet_set_mac_address(ndev, NULL);
 719	axienet_set_multicast_list(ndev);
 720	axienet_setoptions(ndev, lp->options);
 721
 722	netif_trans_update(ndev);
 723
 724	return 0;
 725}
 726
 727/**
 728 * axienet_free_tx_chain - Clean up a series of linked TX descriptors.
 729 * @lp:		Pointer to the axienet_local structure
 730 * @first_bd:	Index of first descriptor to clean up
 731 * @nr_bds:	Max number of descriptors to clean up
 732 * @force:	Whether to clean descriptors even if not complete
 733 * @sizep:	Pointer to a u32 filled with the total sum of all bytes
 734 *		in all cleaned-up descriptors. Ignored if NULL.
 735 * @budget:	NAPI budget (use 0 when not called from NAPI poll)
 736 *
 737 * Would either be called after a successful transmit operation, or after
 738 * there was an error when setting up the chain.
 739 * Returns the number of packets handled.
 740 */
 741static int axienet_free_tx_chain(struct axienet_local *lp, u32 first_bd,
 742				 int nr_bds, bool force, u32 *sizep, int budget)
 743{
 744	struct axidma_bd *cur_p;
 745	unsigned int status;
 746	int i, packets = 0;
 747	dma_addr_t phys;
 
 748
 749	for (i = 0; i < nr_bds; i++) {
 750		cur_p = &lp->tx_bd_v[(first_bd + i) % lp->tx_bd_num];
 751		status = cur_p->status;
 752
 753		/* If force is not specified, clean up only descriptors
 754		 * that have been completed by the MAC.
 755		 */
 756		if (!force && !(status & XAXIDMA_BD_STS_COMPLETE_MASK))
 757			break;
 758
 759		/* Ensure we see complete descriptor update */
 760		dma_rmb();
 761		phys = desc_get_phys_addr(lp, cur_p);
 762		dma_unmap_single(lp->dev, phys,
 763				 (cur_p->cntrl & XAXIDMA_BD_CTRL_LENGTH_MASK),
 764				 DMA_TO_DEVICE);
 765
 766		if (cur_p->skb && (status & XAXIDMA_BD_STS_COMPLETE_MASK)) {
 767			napi_consume_skb(cur_p->skb, budget);
 768			packets++;
 769		}
 770
 771		cur_p->app0 = 0;
 772		cur_p->app1 = 0;
 773		cur_p->app2 = 0;
 774		cur_p->app4 = 0;
 775		cur_p->skb = NULL;
 776		/* ensure our transmit path and device don't prematurely see status cleared */
 777		wmb();
 778		cur_p->cntrl = 0;
 779		cur_p->status = 0;
 780
 781		if (sizep)
 782			*sizep += status & XAXIDMA_BD_STS_ACTUAL_LEN_MASK;
 783	}
 784
 785	if (!force) {
 786		lp->tx_bd_ci += i;
 787		if (lp->tx_bd_ci >= lp->tx_bd_num)
 788			lp->tx_bd_ci %= lp->tx_bd_num;
 789	}
 790
 791	return packets;
 792}
 793
 794/**
 795 * axienet_check_tx_bd_space - Checks if a BD/group of BDs are currently busy
 796 * @lp:		Pointer to the axienet_local structure
 797 * @num_frag:	The number of BDs to check for
 798 *
 799 * Return: 0, on success
 800 *	    NETDEV_TX_BUSY, if any of the descriptors are not free
 801 *
 802 * This function is invoked before BDs are allocated and transmission starts.
 803 * This function returns 0 if a BD or group of BDs can be allocated for
 804 * transmission. If the BD or any of the BDs are not free the function
 805 * returns a busy status.
 806 */
 807static inline int axienet_check_tx_bd_space(struct axienet_local *lp,
 808					    int num_frag)
 809{
 810	struct axidma_bd *cur_p;
 811
 812	/* Ensure we see all descriptor updates from device or TX polling */
 813	rmb();
 814	cur_p = &lp->tx_bd_v[(READ_ONCE(lp->tx_bd_tail) + num_frag) %
 815			     lp->tx_bd_num];
 816	if (cur_p->cntrl)
 817		return NETDEV_TX_BUSY;
 818	return 0;
 819}
 820
 821/**
 822 * axienet_dma_tx_cb - DMA engine callback for TX channel.
 823 * @data:       Pointer to the axienet_local structure.
 824 * @result:     error reporting through dmaengine_result.
 825 * This function is called by dmaengine driver for TX channel to notify
 826 * that the transmit is done.
 827 */
 828static void axienet_dma_tx_cb(void *data, const struct dmaengine_result *result)
 829{
 830	struct skbuf_dma_descriptor *skbuf_dma;
 831	struct axienet_local *lp = data;
 832	struct netdev_queue *txq;
 833	int len;
 834
 835	skbuf_dma = axienet_get_tx_desc(lp, lp->tx_ring_tail++);
 836	len = skbuf_dma->skb->len;
 837	txq = skb_get_tx_queue(lp->ndev, skbuf_dma->skb);
 838	u64_stats_update_begin(&lp->tx_stat_sync);
 839	u64_stats_add(&lp->tx_bytes, len);
 840	u64_stats_add(&lp->tx_packets, 1);
 841	u64_stats_update_end(&lp->tx_stat_sync);
 842	dma_unmap_sg(lp->dev, skbuf_dma->sgl, skbuf_dma->sg_len, DMA_TO_DEVICE);
 843	dev_consume_skb_any(skbuf_dma->skb);
 844	netif_txq_completed_wake(txq, 1, len,
 845				 CIRC_SPACE(lp->tx_ring_head, lp->tx_ring_tail, TX_BD_NUM_MAX),
 846				 2 * MAX_SKB_FRAGS);
 847}
 848
 849/**
 850 * axienet_start_xmit_dmaengine - Starts the transmission.
 851 * @skb:        sk_buff pointer that contains data to be Txed.
 852 * @ndev:       Pointer to net_device structure.
 853 *
 854 * Return: NETDEV_TX_OK on success or any non space errors.
 855 *         NETDEV_TX_BUSY when free element in TX skb ring buffer
 856 *         is not available.
 857 *
 858 * This function is invoked to initiate transmission. The
 859 * function sets the skbs, register dma callback API and submit
 860 * the dma transaction.
 861 * Additionally if checksum offloading is supported,
 862 * it populates AXI Stream Control fields with appropriate values.
 863 */
 864static netdev_tx_t
 865axienet_start_xmit_dmaengine(struct sk_buff *skb, struct net_device *ndev)
 866{
 867	struct dma_async_tx_descriptor *dma_tx_desc = NULL;
 868	struct axienet_local *lp = netdev_priv(ndev);
 869	u32 app_metadata[DMA_NUM_APP_WORDS] = {0};
 870	struct skbuf_dma_descriptor *skbuf_dma;
 871	struct dma_device *dma_dev;
 872	struct netdev_queue *txq;
 873	u32 csum_start_off;
 874	u32 csum_index_off;
 875	int sg_len;
 876	int ret;
 877
 878	dma_dev = lp->tx_chan->device;
 879	sg_len = skb_shinfo(skb)->nr_frags + 1;
 880	if (CIRC_SPACE(lp->tx_ring_head, lp->tx_ring_tail, TX_BD_NUM_MAX) <= sg_len) {
 881		netif_stop_queue(ndev);
 882		if (net_ratelimit())
 883			netdev_warn(ndev, "TX ring unexpectedly full\n");
 884		return NETDEV_TX_BUSY;
 885	}
 886
 887	skbuf_dma = axienet_get_tx_desc(lp, lp->tx_ring_head);
 888	if (!skbuf_dma)
 889		goto xmit_error_drop_skb;
 890
 891	lp->tx_ring_head++;
 892	sg_init_table(skbuf_dma->sgl, sg_len);
 893	ret = skb_to_sgvec(skb, skbuf_dma->sgl, 0, skb->len);
 894	if (ret < 0)
 895		goto xmit_error_drop_skb;
 896
 897	ret = dma_map_sg(lp->dev, skbuf_dma->sgl, sg_len, DMA_TO_DEVICE);
 898	if (!ret)
 899		goto xmit_error_drop_skb;
 900
 901	/* Fill up app fields for checksum */
 902	if (skb->ip_summed == CHECKSUM_PARTIAL) {
 903		if (lp->features & XAE_FEATURE_FULL_TX_CSUM) {
 904			/* Tx Full Checksum Offload Enabled */
 905			app_metadata[0] |= 2;
 906		} else if (lp->features & XAE_FEATURE_PARTIAL_TX_CSUM) {
 907			csum_start_off = skb_transport_offset(skb);
 908			csum_index_off = csum_start_off + skb->csum_offset;
 909			/* Tx Partial Checksum Offload Enabled */
 910			app_metadata[0] |= 1;
 911			app_metadata[1] = (csum_start_off << 16) | csum_index_off;
 912		}
 913	} else if (skb->ip_summed == CHECKSUM_UNNECESSARY) {
 914		app_metadata[0] |= 2; /* Tx Full Checksum Offload Enabled */
 915	}
 916
 917	dma_tx_desc = dma_dev->device_prep_slave_sg(lp->tx_chan, skbuf_dma->sgl,
 918			sg_len, DMA_MEM_TO_DEV,
 919			DMA_PREP_INTERRUPT, (void *)app_metadata);
 920	if (!dma_tx_desc)
 921		goto xmit_error_unmap_sg;
 922
 923	skbuf_dma->skb = skb;
 924	skbuf_dma->sg_len = sg_len;
 925	dma_tx_desc->callback_param = lp;
 926	dma_tx_desc->callback_result = axienet_dma_tx_cb;
 
 
 927	txq = skb_get_tx_queue(lp->ndev, skb);
 928	netdev_tx_sent_queue(txq, skb->len);
 929	netif_txq_maybe_stop(txq, CIRC_SPACE(lp->tx_ring_head, lp->tx_ring_tail, TX_BD_NUM_MAX),
 930			     MAX_SKB_FRAGS + 1, 2 * MAX_SKB_FRAGS);
 931
 932	dmaengine_submit(dma_tx_desc);
 933	dma_async_issue_pending(lp->tx_chan);
 934	return NETDEV_TX_OK;
 935
 936xmit_error_unmap_sg:
 937	dma_unmap_sg(lp->dev, skbuf_dma->sgl, sg_len, DMA_TO_DEVICE);
 938xmit_error_drop_skb:
 939	dev_kfree_skb_any(skb);
 940	return NETDEV_TX_OK;
 941}
 942
 943/**
 944 * axienet_tx_poll - Invoked once a transmit is completed by the
 945 * Axi DMA Tx channel.
 946 * @napi:	Pointer to NAPI structure.
 947 * @budget:	Max number of TX packets to process.
 948 *
 949 * Return: Number of TX packets processed.
 950 *
 951 * This function is invoked from the NAPI processing to notify the completion
 952 * of transmit operation. It clears fields in the corresponding Tx BDs and
 953 * unmaps the corresponding buffer so that CPU can regain ownership of the
 954 * buffer. It finally invokes "netif_wake_queue" to restart transmission if
 955 * required.
 956 */
 957static int axienet_tx_poll(struct napi_struct *napi, int budget)
 958{
 959	struct axienet_local *lp = container_of(napi, struct axienet_local, napi_tx);
 960	struct net_device *ndev = lp->ndev;
 961	u32 size = 0;
 962	int packets;
 963
 964	packets = axienet_free_tx_chain(lp, lp->tx_bd_ci, lp->tx_bd_num, false,
 965					&size, budget);
 966
 967	if (packets) {
 
 
 
 
 968		u64_stats_update_begin(&lp->tx_stat_sync);
 969		u64_stats_add(&lp->tx_packets, packets);
 970		u64_stats_add(&lp->tx_bytes, size);
 971		u64_stats_update_end(&lp->tx_stat_sync);
 972
 973		/* Matches barrier in axienet_start_xmit */
 974		smp_mb();
 975
 976		if (!axienet_check_tx_bd_space(lp, MAX_SKB_FRAGS + 1))
 977			netif_wake_queue(ndev);
 978	}
 979
 980	if (packets < budget && napi_complete_done(napi, packets)) {
 981		/* Re-enable TX completion interrupts. This should
 982		 * cause an immediate interrupt if any TX packets are
 983		 * already pending.
 984		 */
 985		axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, lp->tx_dma_cr);
 986	}
 987	return packets;
 988}
 989
 990/**
 991 * axienet_start_xmit - Starts the transmission.
 992 * @skb:	sk_buff pointer that contains data to be Txed.
 993 * @ndev:	Pointer to net_device structure.
 994 *
 995 * Return: NETDEV_TX_OK, on success
 996 *	    NETDEV_TX_BUSY, if any of the descriptors are not free
 997 *
 998 * This function is invoked from upper layers to initiate transmission. The
 999 * function uses the next available free BDs and populates their fields to
1000 * start the transmission. Additionally if checksum offloading is supported,
1001 * it populates AXI Stream Control fields with appropriate values.
1002 */
1003static netdev_tx_t
1004axienet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
1005{
1006	u32 ii;
1007	u32 num_frag;
1008	u32 csum_start_off;
1009	u32 csum_index_off;
1010	skb_frag_t *frag;
1011	dma_addr_t tail_p, phys;
1012	u32 orig_tail_ptr, new_tail_ptr;
1013	struct axienet_local *lp = netdev_priv(ndev);
1014	struct axidma_bd *cur_p;
1015
1016	orig_tail_ptr = lp->tx_bd_tail;
1017	new_tail_ptr = orig_tail_ptr;
1018
1019	num_frag = skb_shinfo(skb)->nr_frags;
1020	cur_p = &lp->tx_bd_v[orig_tail_ptr];
1021
1022	if (axienet_check_tx_bd_space(lp, num_frag + 1)) {
1023		/* Should not happen as last start_xmit call should have
1024		 * checked for sufficient space and queue should only be
1025		 * woken when sufficient space is available.
1026		 */
1027		netif_stop_queue(ndev);
1028		if (net_ratelimit())
1029			netdev_warn(ndev, "TX ring unexpectedly full\n");
1030		return NETDEV_TX_BUSY;
1031	}
1032
1033	if (skb->ip_summed == CHECKSUM_PARTIAL) {
1034		if (lp->features & XAE_FEATURE_FULL_TX_CSUM) {
1035			/* Tx Full Checksum Offload Enabled */
1036			cur_p->app0 |= 2;
1037		} else if (lp->features & XAE_FEATURE_PARTIAL_TX_CSUM) {
1038			csum_start_off = skb_transport_offset(skb);
1039			csum_index_off = csum_start_off + skb->csum_offset;
1040			/* Tx Partial Checksum Offload Enabled */
1041			cur_p->app0 |= 1;
1042			cur_p->app1 = (csum_start_off << 16) | csum_index_off;
1043		}
1044	} else if (skb->ip_summed == CHECKSUM_UNNECESSARY) {
1045		cur_p->app0 |= 2; /* Tx Full Checksum Offload Enabled */
1046	}
1047
1048	phys = dma_map_single(lp->dev, skb->data,
1049			      skb_headlen(skb), DMA_TO_DEVICE);
1050	if (unlikely(dma_mapping_error(lp->dev, phys))) {
1051		if (net_ratelimit())
1052			netdev_err(ndev, "TX DMA mapping error\n");
1053		ndev->stats.tx_dropped++;
1054		dev_kfree_skb_any(skb);
1055		return NETDEV_TX_OK;
1056	}
1057	desc_set_phys_addr(lp, phys, cur_p);
1058	cur_p->cntrl = skb_headlen(skb) | XAXIDMA_BD_CTRL_TXSOF_MASK;
1059
1060	for (ii = 0; ii < num_frag; ii++) {
1061		if (++new_tail_ptr >= lp->tx_bd_num)
1062			new_tail_ptr = 0;
1063		cur_p = &lp->tx_bd_v[new_tail_ptr];
1064		frag = &skb_shinfo(skb)->frags[ii];
1065		phys = dma_map_single(lp->dev,
1066				      skb_frag_address(frag),
1067				      skb_frag_size(frag),
1068				      DMA_TO_DEVICE);
1069		if (unlikely(dma_mapping_error(lp->dev, phys))) {
1070			if (net_ratelimit())
1071				netdev_err(ndev, "TX DMA mapping error\n");
1072			ndev->stats.tx_dropped++;
1073			axienet_free_tx_chain(lp, orig_tail_ptr, ii + 1,
1074					      true, NULL, 0);
1075			dev_kfree_skb_any(skb);
1076			return NETDEV_TX_OK;
1077		}
1078		desc_set_phys_addr(lp, phys, cur_p);
1079		cur_p->cntrl = skb_frag_size(frag);
1080	}
1081
1082	cur_p->cntrl |= XAXIDMA_BD_CTRL_TXEOF_MASK;
1083	cur_p->skb = skb;
1084
1085	tail_p = lp->tx_bd_p + sizeof(*lp->tx_bd_v) * new_tail_ptr;
1086	if (++new_tail_ptr >= lp->tx_bd_num)
1087		new_tail_ptr = 0;
1088	WRITE_ONCE(lp->tx_bd_tail, new_tail_ptr);
1089
1090	/* Start the transfer */
1091	axienet_dma_out_addr(lp, XAXIDMA_TX_TDESC_OFFSET, tail_p);
1092
1093	/* Stop queue if next transmit may not have space */
1094	if (axienet_check_tx_bd_space(lp, MAX_SKB_FRAGS + 1)) {
1095		netif_stop_queue(ndev);
1096
1097		/* Matches barrier in axienet_tx_poll */
1098		smp_mb();
1099
1100		/* Space might have just been freed - check again */
1101		if (!axienet_check_tx_bd_space(lp, MAX_SKB_FRAGS + 1))
1102			netif_wake_queue(ndev);
1103	}
1104
1105	return NETDEV_TX_OK;
1106}
1107
1108/**
1109 * axienet_dma_rx_cb - DMA engine callback for RX channel.
1110 * @data:       Pointer to the skbuf_dma_descriptor structure.
1111 * @result:     error reporting through dmaengine_result.
1112 * This function is called by dmaengine driver for RX channel to notify
1113 * that the packet is received.
1114 */
1115static void axienet_dma_rx_cb(void *data, const struct dmaengine_result *result)
1116{
1117	struct skbuf_dma_descriptor *skbuf_dma;
1118	size_t meta_len, meta_max_len, rx_len;
1119	struct axienet_local *lp = data;
1120	struct sk_buff *skb;
1121	u32 *app_metadata;
1122
1123	skbuf_dma = axienet_get_rx_desc(lp, lp->rx_ring_tail++);
1124	skb = skbuf_dma->skb;
1125	app_metadata = dmaengine_desc_get_metadata_ptr(skbuf_dma->desc, &meta_len,
1126						       &meta_max_len);
1127	dma_unmap_single(lp->dev, skbuf_dma->dma_address, lp->max_frm_size,
1128			 DMA_FROM_DEVICE);
1129	/* TODO: Derive app word index programmatically */
1130	rx_len = (app_metadata[LEN_APP] & 0xFFFF);
1131	skb_put(skb, rx_len);
1132	skb->protocol = eth_type_trans(skb, lp->ndev);
1133	skb->ip_summed = CHECKSUM_NONE;
1134
1135	__netif_rx(skb);
1136	u64_stats_update_begin(&lp->rx_stat_sync);
1137	u64_stats_add(&lp->rx_packets, 1);
1138	u64_stats_add(&lp->rx_bytes, rx_len);
1139	u64_stats_update_end(&lp->rx_stat_sync);
1140	axienet_rx_submit_desc(lp->ndev);
1141	dma_async_issue_pending(lp->rx_chan);
1142}
1143
1144/**
1145 * axienet_rx_poll - Triggered by RX ISR to complete the BD processing.
1146 * @napi:	Pointer to NAPI structure.
1147 * @budget:	Max number of RX packets to process.
1148 *
1149 * Return: Number of RX packets processed.
1150 */
1151static int axienet_rx_poll(struct napi_struct *napi, int budget)
1152{
1153	u32 length;
1154	u32 csumstatus;
1155	u32 size = 0;
1156	int packets = 0;
1157	dma_addr_t tail_p = 0;
1158	struct axidma_bd *cur_p;
1159	struct sk_buff *skb, *new_skb;
1160	struct axienet_local *lp = container_of(napi, struct axienet_local, napi_rx);
1161
1162	cur_p = &lp->rx_bd_v[lp->rx_bd_ci];
1163
1164	while (packets < budget && (cur_p->status & XAXIDMA_BD_STS_COMPLETE_MASK)) {
1165		dma_addr_t phys;
1166
1167		/* Ensure we see complete descriptor update */
1168		dma_rmb();
1169
1170		skb = cur_p->skb;
1171		cur_p->skb = NULL;
1172
1173		/* skb could be NULL if a previous pass already received the
1174		 * packet for this slot in the ring, but failed to refill it
1175		 * with a newly allocated buffer. In this case, don't try to
1176		 * receive it again.
1177		 */
1178		if (likely(skb)) {
1179			length = cur_p->app4 & 0x0000FFFF;
1180
1181			phys = desc_get_phys_addr(lp, cur_p);
1182			dma_unmap_single(lp->dev, phys, lp->max_frm_size,
1183					 DMA_FROM_DEVICE);
1184
1185			skb_put(skb, length);
1186			skb->protocol = eth_type_trans(skb, lp->ndev);
1187			/*skb_checksum_none_assert(skb);*/
1188			skb->ip_summed = CHECKSUM_NONE;
1189
1190			/* if we're doing Rx csum offload, set it up */
1191			if (lp->features & XAE_FEATURE_FULL_RX_CSUM) {
1192				csumstatus = (cur_p->app2 &
1193					      XAE_FULL_CSUM_STATUS_MASK) >> 3;
1194				if (csumstatus == XAE_IP_TCP_CSUM_VALIDATED ||
1195				    csumstatus == XAE_IP_UDP_CSUM_VALIDATED) {
1196					skb->ip_summed = CHECKSUM_UNNECESSARY;
1197				}
1198			} else if (lp->features & XAE_FEATURE_PARTIAL_RX_CSUM) {
 
 
1199				skb->csum = be32_to_cpu(cur_p->app3 & 0xFFFF);
1200				skb->ip_summed = CHECKSUM_COMPLETE;
1201			}
1202
1203			napi_gro_receive(napi, skb);
1204
1205			size += length;
1206			packets++;
1207		}
1208
1209		new_skb = napi_alloc_skb(napi, lp->max_frm_size);
1210		if (!new_skb)
1211			break;
1212
1213		phys = dma_map_single(lp->dev, new_skb->data,
1214				      lp->max_frm_size,
1215				      DMA_FROM_DEVICE);
1216		if (unlikely(dma_mapping_error(lp->dev, phys))) {
1217			if (net_ratelimit())
1218				netdev_err(lp->ndev, "RX DMA mapping error\n");
1219			dev_kfree_skb(new_skb);
1220			break;
1221		}
1222		desc_set_phys_addr(lp, phys, cur_p);
1223
1224		cur_p->cntrl = lp->max_frm_size;
1225		cur_p->status = 0;
1226		cur_p->skb = new_skb;
1227
1228		/* Only update tail_p to mark this slot as usable after it has
1229		 * been successfully refilled.
1230		 */
1231		tail_p = lp->rx_bd_p + sizeof(*lp->rx_bd_v) * lp->rx_bd_ci;
1232
1233		if (++lp->rx_bd_ci >= lp->rx_bd_num)
1234			lp->rx_bd_ci = 0;
1235		cur_p = &lp->rx_bd_v[lp->rx_bd_ci];
1236	}
1237
1238	u64_stats_update_begin(&lp->rx_stat_sync);
1239	u64_stats_add(&lp->rx_packets, packets);
1240	u64_stats_add(&lp->rx_bytes, size);
1241	u64_stats_update_end(&lp->rx_stat_sync);
1242
1243	if (tail_p)
1244		axienet_dma_out_addr(lp, XAXIDMA_RX_TDESC_OFFSET, tail_p);
1245
1246	if (packets < budget && napi_complete_done(napi, packets)) {
1247		/* Re-enable RX completion interrupts. This should
1248		 * cause an immediate interrupt if any RX packets are
1249		 * already pending.
1250		 */
1251		axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, lp->rx_dma_cr);
1252	}
1253	return packets;
1254}
1255
1256/**
1257 * axienet_tx_irq - Tx Done Isr.
1258 * @irq:	irq number
1259 * @_ndev:	net_device pointer
1260 *
1261 * Return: IRQ_HANDLED if device generated a TX interrupt, IRQ_NONE otherwise.
1262 *
1263 * This is the Axi DMA Tx done Isr. It invokes NAPI polling to complete the
1264 * TX BD processing.
1265 */
1266static irqreturn_t axienet_tx_irq(int irq, void *_ndev)
1267{
1268	unsigned int status;
1269	struct net_device *ndev = _ndev;
1270	struct axienet_local *lp = netdev_priv(ndev);
1271
1272	status = axienet_dma_in32(lp, XAXIDMA_TX_SR_OFFSET);
1273
1274	if (!(status & XAXIDMA_IRQ_ALL_MASK))
1275		return IRQ_NONE;
1276
1277	axienet_dma_out32(lp, XAXIDMA_TX_SR_OFFSET, status);
1278
1279	if (unlikely(status & XAXIDMA_IRQ_ERROR_MASK)) {
1280		netdev_err(ndev, "DMA Tx error 0x%x\n", status);
1281		netdev_err(ndev, "Current BD is at: 0x%x%08x\n",
1282			   (lp->tx_bd_v[lp->tx_bd_ci]).phys_msb,
1283			   (lp->tx_bd_v[lp->tx_bd_ci]).phys);
1284		schedule_work(&lp->dma_err_task);
1285	} else {
1286		/* Disable further TX completion interrupts and schedule
1287		 * NAPI to handle the completions.
1288		 */
1289		u32 cr = lp->tx_dma_cr;
1290
1291		cr &= ~(XAXIDMA_IRQ_IOC_MASK | XAXIDMA_IRQ_DELAY_MASK);
1292		if (napi_schedule_prep(&lp->napi_tx)) {
1293			axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, cr);
1294			__napi_schedule(&lp->napi_tx);
1295		}
1296	}
1297
1298	return IRQ_HANDLED;
1299}
1300
1301/**
1302 * axienet_rx_irq - Rx Isr.
1303 * @irq:	irq number
1304 * @_ndev:	net_device pointer
1305 *
1306 * Return: IRQ_HANDLED if device generated a RX interrupt, IRQ_NONE otherwise.
1307 *
1308 * This is the Axi DMA Rx Isr. It invokes NAPI polling to complete the RX BD
1309 * processing.
1310 */
1311static irqreturn_t axienet_rx_irq(int irq, void *_ndev)
1312{
1313	unsigned int status;
1314	struct net_device *ndev = _ndev;
1315	struct axienet_local *lp = netdev_priv(ndev);
1316
1317	status = axienet_dma_in32(lp, XAXIDMA_RX_SR_OFFSET);
1318
1319	if (!(status & XAXIDMA_IRQ_ALL_MASK))
1320		return IRQ_NONE;
1321
1322	axienet_dma_out32(lp, XAXIDMA_RX_SR_OFFSET, status);
1323
1324	if (unlikely(status & XAXIDMA_IRQ_ERROR_MASK)) {
1325		netdev_err(ndev, "DMA Rx error 0x%x\n", status);
1326		netdev_err(ndev, "Current BD is at: 0x%x%08x\n",
1327			   (lp->rx_bd_v[lp->rx_bd_ci]).phys_msb,
1328			   (lp->rx_bd_v[lp->rx_bd_ci]).phys);
1329		schedule_work(&lp->dma_err_task);
1330	} else {
1331		/* Disable further RX completion interrupts and schedule
1332		 * NAPI receive.
1333		 */
1334		u32 cr = lp->rx_dma_cr;
1335
1336		cr &= ~(XAXIDMA_IRQ_IOC_MASK | XAXIDMA_IRQ_DELAY_MASK);
1337		if (napi_schedule_prep(&lp->napi_rx)) {
1338			axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, cr);
1339			__napi_schedule(&lp->napi_rx);
1340		}
1341	}
1342
1343	return IRQ_HANDLED;
1344}
1345
1346/**
1347 * axienet_eth_irq - Ethernet core Isr.
1348 * @irq:	irq number
1349 * @_ndev:	net_device pointer
1350 *
1351 * Return: IRQ_HANDLED if device generated a core interrupt, IRQ_NONE otherwise.
1352 *
1353 * Handle miscellaneous conditions indicated by Ethernet core IRQ.
1354 */
1355static irqreturn_t axienet_eth_irq(int irq, void *_ndev)
1356{
1357	struct net_device *ndev = _ndev;
1358	struct axienet_local *lp = netdev_priv(ndev);
1359	unsigned int pending;
1360
1361	pending = axienet_ior(lp, XAE_IP_OFFSET);
1362	if (!pending)
1363		return IRQ_NONE;
1364
1365	if (pending & XAE_INT_RXFIFOOVR_MASK)
1366		ndev->stats.rx_missed_errors++;
1367
1368	if (pending & XAE_INT_RXRJECT_MASK)
1369		ndev->stats.rx_dropped++;
1370
1371	axienet_iow(lp, XAE_IS_OFFSET, pending);
1372	return IRQ_HANDLED;
1373}
1374
1375static void axienet_dma_err_handler(struct work_struct *work);
1376
1377/**
1378 * axienet_rx_submit_desc - Submit the rx descriptors to dmaengine.
1379 * allocate skbuff, map the scatterlist and obtain a descriptor
1380 * and then add the callback information and submit descriptor.
1381 *
1382 * @ndev:	net_device pointer
1383 *
1384 */
1385static void axienet_rx_submit_desc(struct net_device *ndev)
1386{
1387	struct dma_async_tx_descriptor *dma_rx_desc = NULL;
1388	struct axienet_local *lp = netdev_priv(ndev);
1389	struct skbuf_dma_descriptor *skbuf_dma;
1390	struct sk_buff *skb;
1391	dma_addr_t addr;
1392
1393	skbuf_dma = axienet_get_rx_desc(lp, lp->rx_ring_head);
1394	if (!skbuf_dma)
1395		return;
1396
1397	lp->rx_ring_head++;
1398	skb = netdev_alloc_skb(ndev, lp->max_frm_size);
1399	if (!skb)
1400		return;
1401
1402	sg_init_table(skbuf_dma->sgl, 1);
1403	addr = dma_map_single(lp->dev, skb->data, lp->max_frm_size, DMA_FROM_DEVICE);
1404	if (unlikely(dma_mapping_error(lp->dev, addr))) {
1405		if (net_ratelimit())
1406			netdev_err(ndev, "DMA mapping error\n");
1407		goto rx_submit_err_free_skb;
1408	}
1409	sg_dma_address(skbuf_dma->sgl) = addr;
1410	sg_dma_len(skbuf_dma->sgl) = lp->max_frm_size;
1411	dma_rx_desc = dmaengine_prep_slave_sg(lp->rx_chan, skbuf_dma->sgl,
1412					      1, DMA_DEV_TO_MEM,
1413					      DMA_PREP_INTERRUPT);
1414	if (!dma_rx_desc)
1415		goto rx_submit_err_unmap_skb;
1416
1417	skbuf_dma->skb = skb;
1418	skbuf_dma->dma_address = sg_dma_address(skbuf_dma->sgl);
1419	skbuf_dma->desc = dma_rx_desc;
1420	dma_rx_desc->callback_param = lp;
1421	dma_rx_desc->callback_result = axienet_dma_rx_cb;
1422	dmaengine_submit(dma_rx_desc);
1423
1424	return;
1425
1426rx_submit_err_unmap_skb:
1427	dma_unmap_single(lp->dev, addr, lp->max_frm_size, DMA_FROM_DEVICE);
1428rx_submit_err_free_skb:
1429	dev_kfree_skb(skb);
1430}
1431
1432/**
1433 * axienet_init_dmaengine - init the dmaengine code.
1434 * @ndev:       Pointer to net_device structure
1435 *
1436 * Return: 0, on success.
1437 *          non-zero error value on failure
1438 *
1439 * This is the dmaengine initialization code.
1440 */
1441static int axienet_init_dmaengine(struct net_device *ndev)
1442{
1443	struct axienet_local *lp = netdev_priv(ndev);
1444	struct skbuf_dma_descriptor *skbuf_dma;
1445	int i, ret;
1446
1447	lp->tx_chan = dma_request_chan(lp->dev, "tx_chan0");
1448	if (IS_ERR(lp->tx_chan)) {
1449		dev_err(lp->dev, "No Ethernet DMA (TX) channel found\n");
1450		return PTR_ERR(lp->tx_chan);
1451	}
1452
1453	lp->rx_chan = dma_request_chan(lp->dev, "rx_chan0");
1454	if (IS_ERR(lp->rx_chan)) {
1455		ret = PTR_ERR(lp->rx_chan);
1456		dev_err(lp->dev, "No Ethernet DMA (RX) channel found\n");
1457		goto err_dma_release_tx;
1458	}
1459
1460	lp->tx_ring_tail = 0;
1461	lp->tx_ring_head = 0;
1462	lp->rx_ring_tail = 0;
1463	lp->rx_ring_head = 0;
1464	lp->tx_skb_ring = kcalloc(TX_BD_NUM_MAX, sizeof(*lp->tx_skb_ring),
1465				  GFP_KERNEL);
1466	if (!lp->tx_skb_ring) {
1467		ret = -ENOMEM;
1468		goto err_dma_release_rx;
1469	}
1470	for (i = 0; i < TX_BD_NUM_MAX; i++) {
1471		skbuf_dma = kzalloc(sizeof(*skbuf_dma), GFP_KERNEL);
1472		if (!skbuf_dma) {
1473			ret = -ENOMEM;
1474			goto err_free_tx_skb_ring;
1475		}
1476		lp->tx_skb_ring[i] = skbuf_dma;
1477	}
1478
1479	lp->rx_skb_ring = kcalloc(RX_BUF_NUM_DEFAULT, sizeof(*lp->rx_skb_ring),
1480				  GFP_KERNEL);
1481	if (!lp->rx_skb_ring) {
1482		ret = -ENOMEM;
1483		goto err_free_tx_skb_ring;
1484	}
1485	for (i = 0; i < RX_BUF_NUM_DEFAULT; i++) {
1486		skbuf_dma = kzalloc(sizeof(*skbuf_dma), GFP_KERNEL);
1487		if (!skbuf_dma) {
1488			ret = -ENOMEM;
1489			goto err_free_rx_skb_ring;
1490		}
1491		lp->rx_skb_ring[i] = skbuf_dma;
1492	}
1493	/* TODO: Instead of BD_NUM_DEFAULT use runtime support */
1494	for (i = 0; i < RX_BUF_NUM_DEFAULT; i++)
1495		axienet_rx_submit_desc(ndev);
1496	dma_async_issue_pending(lp->rx_chan);
1497
1498	return 0;
1499
1500err_free_rx_skb_ring:
1501	for (i = 0; i < RX_BUF_NUM_DEFAULT; i++)
1502		kfree(lp->rx_skb_ring[i]);
1503	kfree(lp->rx_skb_ring);
1504err_free_tx_skb_ring:
1505	for (i = 0; i < TX_BD_NUM_MAX; i++)
1506		kfree(lp->tx_skb_ring[i]);
1507	kfree(lp->tx_skb_ring);
1508err_dma_release_rx:
1509	dma_release_channel(lp->rx_chan);
1510err_dma_release_tx:
1511	dma_release_channel(lp->tx_chan);
1512	return ret;
1513}
1514
1515/**
1516 * axienet_init_legacy_dma - init the dma legacy code.
1517 * @ndev:       Pointer to net_device structure
1518 *
1519 * Return: 0, on success.
1520 *          non-zero error value on failure
1521 *
1522 * This is the dma  initialization code. It also allocates interrupt
1523 * service routines, enables the interrupt lines and ISR handling.
1524 *
1525 */
1526static int axienet_init_legacy_dma(struct net_device *ndev)
1527{
1528	int ret;
1529	struct axienet_local *lp = netdev_priv(ndev);
1530
1531	/* Enable worker thread for Axi DMA error handling */
1532	lp->stopping = false;
1533	INIT_WORK(&lp->dma_err_task, axienet_dma_err_handler);
1534
1535	napi_enable(&lp->napi_rx);
1536	napi_enable(&lp->napi_tx);
1537
1538	/* Enable interrupts for Axi DMA Tx */
1539	ret = request_irq(lp->tx_irq, axienet_tx_irq, IRQF_SHARED,
1540			  ndev->name, ndev);
1541	if (ret)
1542		goto err_tx_irq;
1543	/* Enable interrupts for Axi DMA Rx */
1544	ret = request_irq(lp->rx_irq, axienet_rx_irq, IRQF_SHARED,
1545			  ndev->name, ndev);
1546	if (ret)
1547		goto err_rx_irq;
1548	/* Enable interrupts for Axi Ethernet core (if defined) */
1549	if (lp->eth_irq > 0) {
1550		ret = request_irq(lp->eth_irq, axienet_eth_irq, IRQF_SHARED,
1551				  ndev->name, ndev);
1552		if (ret)
1553			goto err_eth_irq;
1554	}
1555
1556	return 0;
1557
1558err_eth_irq:
1559	free_irq(lp->rx_irq, ndev);
1560err_rx_irq:
1561	free_irq(lp->tx_irq, ndev);
1562err_tx_irq:
1563	napi_disable(&lp->napi_tx);
1564	napi_disable(&lp->napi_rx);
1565	cancel_work_sync(&lp->dma_err_task);
1566	dev_err(lp->dev, "request_irq() failed\n");
1567	return ret;
1568}
1569
1570/**
1571 * axienet_open - Driver open routine.
1572 * @ndev:	Pointer to net_device structure
1573 *
1574 * Return: 0, on success.
1575 *	    non-zero error value on failure
1576 *
1577 * This is the driver open routine. It calls phylink_start to start the
1578 * PHY device.
1579 * It also allocates interrupt service routines, enables the interrupt lines
1580 * and ISR handling. Axi Ethernet core is reset through Axi DMA core. Buffer
1581 * descriptors are initialized.
1582 */
1583static int axienet_open(struct net_device *ndev)
1584{
1585	int ret;
1586	struct axienet_local *lp = netdev_priv(ndev);
1587
 
 
1588	/* When we do an Axi Ethernet reset, it resets the complete core
1589	 * including the MDIO. MDIO must be disabled before resetting.
1590	 * Hold MDIO bus lock to avoid MDIO accesses during the reset.
1591	 */
1592	axienet_lock_mii(lp);
1593	ret = axienet_device_reset(ndev);
1594	axienet_unlock_mii(lp);
1595
1596	ret = phylink_of_phy_connect(lp->phylink, lp->dev->of_node, 0);
1597	if (ret) {
1598		dev_err(lp->dev, "phylink_of_phy_connect() failed: %d\n", ret);
1599		return ret;
1600	}
1601
1602	phylink_start(lp->phylink);
1603
1604	/* Start the statistics refresh work */
1605	schedule_delayed_work(&lp->stats_work, 0);
1606
1607	if (lp->use_dmaengine) {
1608		/* Enable interrupts for Axi Ethernet core (if defined) */
1609		if (lp->eth_irq > 0) {
1610			ret = request_irq(lp->eth_irq, axienet_eth_irq, IRQF_SHARED,
1611					  ndev->name, ndev);
1612			if (ret)
1613				goto err_phy;
1614		}
1615
1616		ret = axienet_init_dmaengine(ndev);
1617		if (ret < 0)
1618			goto err_free_eth_irq;
1619	} else {
1620		ret = axienet_init_legacy_dma(ndev);
1621		if (ret)
1622			goto err_phy;
1623	}
1624
1625	return 0;
1626
1627err_free_eth_irq:
1628	if (lp->eth_irq > 0)
1629		free_irq(lp->eth_irq, ndev);
1630err_phy:
1631	cancel_delayed_work_sync(&lp->stats_work);
1632	phylink_stop(lp->phylink);
1633	phylink_disconnect_phy(lp->phylink);
1634	return ret;
1635}
1636
1637/**
1638 * axienet_stop - Driver stop routine.
1639 * @ndev:	Pointer to net_device structure
1640 *
1641 * Return: 0, on success.
1642 *
1643 * This is the driver stop routine. It calls phylink_disconnect to stop the PHY
1644 * device. It also removes the interrupt handlers and disables the interrupts.
1645 * The Axi DMA Tx/Rx BDs are released.
1646 */
1647static int axienet_stop(struct net_device *ndev)
1648{
1649	struct axienet_local *lp = netdev_priv(ndev);
1650	int i;
1651
 
 
1652	if (!lp->use_dmaengine) {
1653		WRITE_ONCE(lp->stopping, true);
1654		flush_work(&lp->dma_err_task);
1655
1656		napi_disable(&lp->napi_tx);
1657		napi_disable(&lp->napi_rx);
1658	}
1659
1660	cancel_delayed_work_sync(&lp->stats_work);
1661
1662	phylink_stop(lp->phylink);
1663	phylink_disconnect_phy(lp->phylink);
1664
1665	axienet_setoptions(ndev, lp->options &
1666			   ~(XAE_OPTION_TXEN | XAE_OPTION_RXEN));
1667
1668	if (!lp->use_dmaengine) {
1669		axienet_dma_stop(lp);
1670		cancel_work_sync(&lp->dma_err_task);
1671		free_irq(lp->tx_irq, ndev);
1672		free_irq(lp->rx_irq, ndev);
1673		axienet_dma_bd_release(ndev);
1674	} else {
1675		dmaengine_terminate_sync(lp->tx_chan);
1676		dmaengine_synchronize(lp->tx_chan);
1677		dmaengine_terminate_sync(lp->rx_chan);
1678		dmaengine_synchronize(lp->rx_chan);
1679
1680		for (i = 0; i < TX_BD_NUM_MAX; i++)
1681			kfree(lp->tx_skb_ring[i]);
1682		kfree(lp->tx_skb_ring);
1683		for (i = 0; i < RX_BUF_NUM_DEFAULT; i++)
1684			kfree(lp->rx_skb_ring[i]);
1685		kfree(lp->rx_skb_ring);
1686
1687		dma_release_channel(lp->rx_chan);
1688		dma_release_channel(lp->tx_chan);
1689	}
1690
1691	axienet_iow(lp, XAE_IE_OFFSET, 0);
1692
1693	if (lp->eth_irq > 0)
1694		free_irq(lp->eth_irq, ndev);
1695	return 0;
1696}
1697
1698/**
1699 * axienet_change_mtu - Driver change mtu routine.
1700 * @ndev:	Pointer to net_device structure
1701 * @new_mtu:	New mtu value to be applied
1702 *
1703 * Return: Always returns 0 (success).
1704 *
1705 * This is the change mtu driver routine. It checks if the Axi Ethernet
1706 * hardware supports jumbo frames before changing the mtu. This can be
1707 * called only when the device is not up.
1708 */
1709static int axienet_change_mtu(struct net_device *ndev, int new_mtu)
1710{
1711	struct axienet_local *lp = netdev_priv(ndev);
1712
1713	if (netif_running(ndev))
1714		return -EBUSY;
1715
1716	if ((new_mtu + VLAN_ETH_HLEN +
1717		XAE_TRL_SIZE) > lp->rxmem)
1718		return -EINVAL;
1719
1720	WRITE_ONCE(ndev->mtu, new_mtu);
1721
1722	return 0;
1723}
1724
1725#ifdef CONFIG_NET_POLL_CONTROLLER
1726/**
1727 * axienet_poll_controller - Axi Ethernet poll mechanism.
1728 * @ndev:	Pointer to net_device structure
1729 *
1730 * This implements Rx/Tx ISR poll mechanisms. The interrupts are disabled prior
1731 * to polling the ISRs and are enabled back after the polling is done.
1732 */
1733static void axienet_poll_controller(struct net_device *ndev)
1734{
1735	struct axienet_local *lp = netdev_priv(ndev);
1736
1737	disable_irq(lp->tx_irq);
1738	disable_irq(lp->rx_irq);
1739	axienet_rx_irq(lp->tx_irq, ndev);
1740	axienet_tx_irq(lp->rx_irq, ndev);
1741	enable_irq(lp->tx_irq);
1742	enable_irq(lp->rx_irq);
1743}
1744#endif
1745
1746static int axienet_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
1747{
1748	struct axienet_local *lp = netdev_priv(dev);
1749
1750	if (!netif_running(dev))
1751		return -EINVAL;
1752
1753	return phylink_mii_ioctl(lp->phylink, rq, cmd);
1754}
1755
1756static void
1757axienet_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
1758{
1759	struct axienet_local *lp = netdev_priv(dev);
1760	unsigned int start;
1761
1762	netdev_stats_to_stats64(stats, &dev->stats);
1763
1764	do {
1765		start = u64_stats_fetch_begin(&lp->rx_stat_sync);
1766		stats->rx_packets = u64_stats_read(&lp->rx_packets);
1767		stats->rx_bytes = u64_stats_read(&lp->rx_bytes);
1768	} while (u64_stats_fetch_retry(&lp->rx_stat_sync, start));
1769
1770	do {
1771		start = u64_stats_fetch_begin(&lp->tx_stat_sync);
1772		stats->tx_packets = u64_stats_read(&lp->tx_packets);
1773		stats->tx_bytes = u64_stats_read(&lp->tx_bytes);
1774	} while (u64_stats_fetch_retry(&lp->tx_stat_sync, start));
1775
1776	if (!(lp->features & XAE_FEATURE_STATS))
1777		return;
1778
1779	do {
1780		start = read_seqcount_begin(&lp->hw_stats_seqcount);
1781		stats->rx_length_errors =
1782			axienet_stat(lp, STAT_RX_LENGTH_ERRORS);
1783		stats->rx_crc_errors = axienet_stat(lp, STAT_RX_FCS_ERRORS);
1784		stats->rx_frame_errors =
1785			axienet_stat(lp, STAT_RX_ALIGNMENT_ERRORS);
1786		stats->rx_errors = axienet_stat(lp, STAT_UNDERSIZE_FRAMES) +
1787				   axienet_stat(lp, STAT_FRAGMENT_FRAMES) +
1788				   stats->rx_length_errors +
1789				   stats->rx_crc_errors +
1790				   stats->rx_frame_errors;
1791		stats->multicast = axienet_stat(lp, STAT_RX_MULTICAST_FRAMES);
1792
1793		stats->tx_aborted_errors =
1794			axienet_stat(lp, STAT_TX_EXCESS_COLLISIONS);
1795		stats->tx_fifo_errors =
1796			axienet_stat(lp, STAT_TX_UNDERRUN_ERRORS);
1797		stats->tx_window_errors =
1798			axienet_stat(lp, STAT_TX_LATE_COLLISIONS);
1799		stats->tx_errors = axienet_stat(lp, STAT_TX_EXCESS_DEFERRAL) +
1800				   stats->tx_aborted_errors +
1801				   stats->tx_fifo_errors +
1802				   stats->tx_window_errors;
1803	} while (read_seqcount_retry(&lp->hw_stats_seqcount, start));
1804}
1805
1806static const struct net_device_ops axienet_netdev_ops = {
1807	.ndo_open = axienet_open,
1808	.ndo_stop = axienet_stop,
1809	.ndo_start_xmit = axienet_start_xmit,
1810	.ndo_get_stats64 = axienet_get_stats64,
1811	.ndo_change_mtu	= axienet_change_mtu,
1812	.ndo_set_mac_address = netdev_set_mac_address,
1813	.ndo_validate_addr = eth_validate_addr,
1814	.ndo_eth_ioctl = axienet_ioctl,
1815	.ndo_set_rx_mode = axienet_set_multicast_list,
1816#ifdef CONFIG_NET_POLL_CONTROLLER
1817	.ndo_poll_controller = axienet_poll_controller,
1818#endif
1819};
1820
1821static const struct net_device_ops axienet_netdev_dmaengine_ops = {
1822	.ndo_open = axienet_open,
1823	.ndo_stop = axienet_stop,
1824	.ndo_start_xmit = axienet_start_xmit_dmaengine,
1825	.ndo_get_stats64 = axienet_get_stats64,
1826	.ndo_change_mtu	= axienet_change_mtu,
1827	.ndo_set_mac_address = netdev_set_mac_address,
1828	.ndo_validate_addr = eth_validate_addr,
1829	.ndo_eth_ioctl = axienet_ioctl,
1830	.ndo_set_rx_mode = axienet_set_multicast_list,
1831};
1832
1833/**
1834 * axienet_ethtools_get_drvinfo - Get various Axi Ethernet driver information.
1835 * @ndev:	Pointer to net_device structure
1836 * @ed:		Pointer to ethtool_drvinfo structure
1837 *
1838 * This implements ethtool command for getting the driver information.
1839 * Issue "ethtool -i ethX" under linux prompt to execute this function.
1840 */
1841static void axienet_ethtools_get_drvinfo(struct net_device *ndev,
1842					 struct ethtool_drvinfo *ed)
1843{
1844	strscpy(ed->driver, DRIVER_NAME, sizeof(ed->driver));
1845	strscpy(ed->version, DRIVER_VERSION, sizeof(ed->version));
1846}
1847
1848/**
1849 * axienet_ethtools_get_regs_len - Get the total regs length present in the
1850 *				   AxiEthernet core.
1851 * @ndev:	Pointer to net_device structure
1852 *
1853 * This implements ethtool command for getting the total register length
1854 * information.
1855 *
1856 * Return: the total regs length
1857 */
1858static int axienet_ethtools_get_regs_len(struct net_device *ndev)
1859{
1860	return sizeof(u32) * AXIENET_REGS_N;
1861}
1862
1863/**
1864 * axienet_ethtools_get_regs - Dump the contents of all registers present
1865 *			       in AxiEthernet core.
1866 * @ndev:	Pointer to net_device structure
1867 * @regs:	Pointer to ethtool_regs structure
1868 * @ret:	Void pointer used to return the contents of the registers.
1869 *
1870 * This implements ethtool command for getting the Axi Ethernet register dump.
1871 * Issue "ethtool -d ethX" to execute this function.
1872 */
1873static void axienet_ethtools_get_regs(struct net_device *ndev,
1874				      struct ethtool_regs *regs, void *ret)
1875{
1876	u32 *data = (u32 *)ret;
1877	size_t len = sizeof(u32) * AXIENET_REGS_N;
1878	struct axienet_local *lp = netdev_priv(ndev);
1879
1880	regs->version = 0;
1881	regs->len = len;
1882
1883	memset(data, 0, len);
1884	data[0] = axienet_ior(lp, XAE_RAF_OFFSET);
1885	data[1] = axienet_ior(lp, XAE_TPF_OFFSET);
1886	data[2] = axienet_ior(lp, XAE_IFGP_OFFSET);
1887	data[3] = axienet_ior(lp, XAE_IS_OFFSET);
1888	data[4] = axienet_ior(lp, XAE_IP_OFFSET);
1889	data[5] = axienet_ior(lp, XAE_IE_OFFSET);
1890	data[6] = axienet_ior(lp, XAE_TTAG_OFFSET);
1891	data[7] = axienet_ior(lp, XAE_RTAG_OFFSET);
1892	data[8] = axienet_ior(lp, XAE_UAWL_OFFSET);
1893	data[9] = axienet_ior(lp, XAE_UAWU_OFFSET);
1894	data[10] = axienet_ior(lp, XAE_TPID0_OFFSET);
1895	data[11] = axienet_ior(lp, XAE_TPID1_OFFSET);
1896	data[12] = axienet_ior(lp, XAE_PPST_OFFSET);
1897	data[13] = axienet_ior(lp, XAE_RCW0_OFFSET);
1898	data[14] = axienet_ior(lp, XAE_RCW1_OFFSET);
1899	data[15] = axienet_ior(lp, XAE_TC_OFFSET);
1900	data[16] = axienet_ior(lp, XAE_FCC_OFFSET);
1901	data[17] = axienet_ior(lp, XAE_EMMC_OFFSET);
1902	data[18] = axienet_ior(lp, XAE_PHYC_OFFSET);
1903	data[19] = axienet_ior(lp, XAE_MDIO_MC_OFFSET);
1904	data[20] = axienet_ior(lp, XAE_MDIO_MCR_OFFSET);
1905	data[21] = axienet_ior(lp, XAE_MDIO_MWD_OFFSET);
1906	data[22] = axienet_ior(lp, XAE_MDIO_MRD_OFFSET);
1907	data[27] = axienet_ior(lp, XAE_UAW0_OFFSET);
1908	data[28] = axienet_ior(lp, XAE_UAW1_OFFSET);
1909	data[29] = axienet_ior(lp, XAE_FMI_OFFSET);
1910	data[30] = axienet_ior(lp, XAE_AF0_OFFSET);
1911	data[31] = axienet_ior(lp, XAE_AF1_OFFSET);
1912	if (!lp->use_dmaengine) {
1913		data[32] = axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET);
1914		data[33] = axienet_dma_in32(lp, XAXIDMA_TX_SR_OFFSET);
1915		data[34] = axienet_dma_in32(lp, XAXIDMA_TX_CDESC_OFFSET);
1916		data[35] = axienet_dma_in32(lp, XAXIDMA_TX_TDESC_OFFSET);
1917		data[36] = axienet_dma_in32(lp, XAXIDMA_RX_CR_OFFSET);
1918		data[37] = axienet_dma_in32(lp, XAXIDMA_RX_SR_OFFSET);
1919		data[38] = axienet_dma_in32(lp, XAXIDMA_RX_CDESC_OFFSET);
1920		data[39] = axienet_dma_in32(lp, XAXIDMA_RX_TDESC_OFFSET);
1921	}
1922}
1923
1924static void
1925axienet_ethtools_get_ringparam(struct net_device *ndev,
1926			       struct ethtool_ringparam *ering,
1927			       struct kernel_ethtool_ringparam *kernel_ering,
1928			       struct netlink_ext_ack *extack)
1929{
1930	struct axienet_local *lp = netdev_priv(ndev);
1931
1932	ering->rx_max_pending = RX_BD_NUM_MAX;
1933	ering->rx_mini_max_pending = 0;
1934	ering->rx_jumbo_max_pending = 0;
1935	ering->tx_max_pending = TX_BD_NUM_MAX;
1936	ering->rx_pending = lp->rx_bd_num;
1937	ering->rx_mini_pending = 0;
1938	ering->rx_jumbo_pending = 0;
1939	ering->tx_pending = lp->tx_bd_num;
1940}
1941
1942static int
1943axienet_ethtools_set_ringparam(struct net_device *ndev,
1944			       struct ethtool_ringparam *ering,
1945			       struct kernel_ethtool_ringparam *kernel_ering,
1946			       struct netlink_ext_ack *extack)
1947{
1948	struct axienet_local *lp = netdev_priv(ndev);
1949
1950	if (ering->rx_pending > RX_BD_NUM_MAX ||
1951	    ering->rx_mini_pending ||
1952	    ering->rx_jumbo_pending ||
1953	    ering->tx_pending < TX_BD_NUM_MIN ||
1954	    ering->tx_pending > TX_BD_NUM_MAX)
1955		return -EINVAL;
1956
1957	if (netif_running(ndev))
1958		return -EBUSY;
1959
1960	lp->rx_bd_num = ering->rx_pending;
1961	lp->tx_bd_num = ering->tx_pending;
1962	return 0;
1963}
1964
1965/**
1966 * axienet_ethtools_get_pauseparam - Get the pause parameter setting for
1967 *				     Tx and Rx paths.
1968 * @ndev:	Pointer to net_device structure
1969 * @epauseparm:	Pointer to ethtool_pauseparam structure.
1970 *
1971 * This implements ethtool command for getting axi ethernet pause frame
1972 * setting. Issue "ethtool -a ethX" to execute this function.
1973 */
1974static void
1975axienet_ethtools_get_pauseparam(struct net_device *ndev,
1976				struct ethtool_pauseparam *epauseparm)
1977{
1978	struct axienet_local *lp = netdev_priv(ndev);
1979
1980	phylink_ethtool_get_pauseparam(lp->phylink, epauseparm);
1981}
1982
1983/**
1984 * axienet_ethtools_set_pauseparam - Set device pause parameter(flow control)
1985 *				     settings.
1986 * @ndev:	Pointer to net_device structure
1987 * @epauseparm:Pointer to ethtool_pauseparam structure
1988 *
1989 * This implements ethtool command for enabling flow control on Rx and Tx
1990 * paths. Issue "ethtool -A ethX tx on|off" under linux prompt to execute this
1991 * function.
1992 *
1993 * Return: 0 on success, -EFAULT if device is running
1994 */
1995static int
1996axienet_ethtools_set_pauseparam(struct net_device *ndev,
1997				struct ethtool_pauseparam *epauseparm)
1998{
1999	struct axienet_local *lp = netdev_priv(ndev);
2000
2001	return phylink_ethtool_set_pauseparam(lp->phylink, epauseparm);
2002}
2003
2004/**
2005 * axienet_ethtools_get_coalesce - Get DMA interrupt coalescing count.
2006 * @ndev:	Pointer to net_device structure
2007 * @ecoalesce:	Pointer to ethtool_coalesce structure
2008 * @kernel_coal: ethtool CQE mode setting structure
2009 * @extack:	extack for reporting error messages
2010 *
2011 * This implements ethtool command for getting the DMA interrupt coalescing
2012 * count on Tx and Rx paths. Issue "ethtool -c ethX" under linux prompt to
2013 * execute this function.
2014 *
2015 * Return: 0 always
2016 */
2017static int
2018axienet_ethtools_get_coalesce(struct net_device *ndev,
2019			      struct ethtool_coalesce *ecoalesce,
2020			      struct kernel_ethtool_coalesce *kernel_coal,
2021			      struct netlink_ext_ack *extack)
2022{
2023	struct axienet_local *lp = netdev_priv(ndev);
2024
2025	ecoalesce->rx_max_coalesced_frames = lp->coalesce_count_rx;
2026	ecoalesce->rx_coalesce_usecs = lp->coalesce_usec_rx;
2027	ecoalesce->tx_max_coalesced_frames = lp->coalesce_count_tx;
2028	ecoalesce->tx_coalesce_usecs = lp->coalesce_usec_tx;
2029	return 0;
2030}
2031
2032/**
2033 * axienet_ethtools_set_coalesce - Set DMA interrupt coalescing count.
2034 * @ndev:	Pointer to net_device structure
2035 * @ecoalesce:	Pointer to ethtool_coalesce structure
2036 * @kernel_coal: ethtool CQE mode setting structure
2037 * @extack:	extack for reporting error messages
2038 *
2039 * This implements ethtool command for setting the DMA interrupt coalescing
2040 * count on Tx and Rx paths. Issue "ethtool -C ethX rx-frames 5" under linux
2041 * prompt to execute this function.
2042 *
2043 * Return: 0, on success, Non-zero error value on failure.
2044 */
2045static int
2046axienet_ethtools_set_coalesce(struct net_device *ndev,
2047			      struct ethtool_coalesce *ecoalesce,
2048			      struct kernel_ethtool_coalesce *kernel_coal,
2049			      struct netlink_ext_ack *extack)
2050{
2051	struct axienet_local *lp = netdev_priv(ndev);
2052
2053	if (netif_running(ndev)) {
2054		NL_SET_ERR_MSG(extack,
2055			       "Please stop netif before applying configuration");
2056		return -EBUSY;
2057	}
2058
2059	if (ecoalesce->rx_max_coalesced_frames > 255 ||
2060	    ecoalesce->tx_max_coalesced_frames > 255) {
2061		NL_SET_ERR_MSG(extack, "frames must be less than 256");
2062		return -EINVAL;
2063	}
2064
2065	if (ecoalesce->rx_max_coalesced_frames)
2066		lp->coalesce_count_rx = ecoalesce->rx_max_coalesced_frames;
2067	if (ecoalesce->rx_coalesce_usecs)
2068		lp->coalesce_usec_rx = ecoalesce->rx_coalesce_usecs;
2069	if (ecoalesce->tx_max_coalesced_frames)
2070		lp->coalesce_count_tx = ecoalesce->tx_max_coalesced_frames;
2071	if (ecoalesce->tx_coalesce_usecs)
2072		lp->coalesce_usec_tx = ecoalesce->tx_coalesce_usecs;
2073
2074	return 0;
2075}
2076
2077static int
2078axienet_ethtools_get_link_ksettings(struct net_device *ndev,
2079				    struct ethtool_link_ksettings *cmd)
2080{
2081	struct axienet_local *lp = netdev_priv(ndev);
2082
2083	return phylink_ethtool_ksettings_get(lp->phylink, cmd);
2084}
2085
2086static int
2087axienet_ethtools_set_link_ksettings(struct net_device *ndev,
2088				    const struct ethtool_link_ksettings *cmd)
2089{
2090	struct axienet_local *lp = netdev_priv(ndev);
2091
2092	return phylink_ethtool_ksettings_set(lp->phylink, cmd);
2093}
2094
2095static int axienet_ethtools_nway_reset(struct net_device *dev)
2096{
2097	struct axienet_local *lp = netdev_priv(dev);
2098
2099	return phylink_ethtool_nway_reset(lp->phylink);
2100}
2101
2102static void axienet_ethtools_get_ethtool_stats(struct net_device *dev,
2103					       struct ethtool_stats *stats,
2104					       u64 *data)
2105{
2106	struct axienet_local *lp = netdev_priv(dev);
2107	unsigned int start;
2108
2109	do {
2110		start = read_seqcount_begin(&lp->hw_stats_seqcount);
2111		data[0] = axienet_stat(lp, STAT_RX_BYTES);
2112		data[1] = axienet_stat(lp, STAT_TX_BYTES);
2113		data[2] = axienet_stat(lp, STAT_RX_VLAN_FRAMES);
2114		data[3] = axienet_stat(lp, STAT_TX_VLAN_FRAMES);
2115		data[6] = axienet_stat(lp, STAT_TX_PFC_FRAMES);
2116		data[7] = axienet_stat(lp, STAT_RX_PFC_FRAMES);
2117		data[8] = axienet_stat(lp, STAT_USER_DEFINED0);
2118		data[9] = axienet_stat(lp, STAT_USER_DEFINED1);
2119		data[10] = axienet_stat(lp, STAT_USER_DEFINED2);
2120	} while (read_seqcount_retry(&lp->hw_stats_seqcount, start));
2121}
2122
2123static const char axienet_ethtool_stats_strings[][ETH_GSTRING_LEN] = {
2124	"Received bytes",
2125	"Transmitted bytes",
2126	"RX Good VLAN Tagged Frames",
2127	"TX Good VLAN Tagged Frames",
2128	"TX Good PFC Frames",
2129	"RX Good PFC Frames",
2130	"User Defined Counter 0",
2131	"User Defined Counter 1",
2132	"User Defined Counter 2",
2133};
2134
2135static void axienet_ethtools_get_strings(struct net_device *dev, u32 stringset, u8 *data)
2136{
2137	switch (stringset) {
2138	case ETH_SS_STATS:
2139		memcpy(data, axienet_ethtool_stats_strings,
2140		       sizeof(axienet_ethtool_stats_strings));
2141		break;
2142	}
2143}
2144
2145static int axienet_ethtools_get_sset_count(struct net_device *dev, int sset)
2146{
2147	struct axienet_local *lp = netdev_priv(dev);
2148
2149	switch (sset) {
2150	case ETH_SS_STATS:
2151		if (lp->features & XAE_FEATURE_STATS)
2152			return ARRAY_SIZE(axienet_ethtool_stats_strings);
2153		fallthrough;
2154	default:
2155		return -EOPNOTSUPP;
2156	}
2157}
2158
2159static void
2160axienet_ethtools_get_pause_stats(struct net_device *dev,
2161				 struct ethtool_pause_stats *pause_stats)
2162{
2163	struct axienet_local *lp = netdev_priv(dev);
2164	unsigned int start;
2165
2166	if (!(lp->features & XAE_FEATURE_STATS))
2167		return;
2168
2169	do {
2170		start = read_seqcount_begin(&lp->hw_stats_seqcount);
2171		pause_stats->tx_pause_frames =
2172			axienet_stat(lp, STAT_TX_PAUSE_FRAMES);
2173		pause_stats->rx_pause_frames =
2174			axienet_stat(lp, STAT_RX_PAUSE_FRAMES);
2175	} while (read_seqcount_retry(&lp->hw_stats_seqcount, start));
2176}
2177
2178static void
2179axienet_ethtool_get_eth_mac_stats(struct net_device *dev,
2180				  struct ethtool_eth_mac_stats *mac_stats)
2181{
2182	struct axienet_local *lp = netdev_priv(dev);
2183	unsigned int start;
2184
2185	if (!(lp->features & XAE_FEATURE_STATS))
2186		return;
2187
2188	do {
2189		start = read_seqcount_begin(&lp->hw_stats_seqcount);
2190		mac_stats->FramesTransmittedOK =
2191			axienet_stat(lp, STAT_TX_GOOD_FRAMES);
2192		mac_stats->SingleCollisionFrames =
2193			axienet_stat(lp, STAT_TX_SINGLE_COLLISION_FRAMES);
2194		mac_stats->MultipleCollisionFrames =
2195			axienet_stat(lp, STAT_TX_MULTIPLE_COLLISION_FRAMES);
2196		mac_stats->FramesReceivedOK =
2197			axienet_stat(lp, STAT_RX_GOOD_FRAMES);
2198		mac_stats->FrameCheckSequenceErrors =
2199			axienet_stat(lp, STAT_RX_FCS_ERRORS);
2200		mac_stats->AlignmentErrors =
2201			axienet_stat(lp, STAT_RX_ALIGNMENT_ERRORS);
2202		mac_stats->FramesWithDeferredXmissions =
2203			axienet_stat(lp, STAT_TX_DEFERRED_FRAMES);
2204		mac_stats->LateCollisions =
2205			axienet_stat(lp, STAT_TX_LATE_COLLISIONS);
2206		mac_stats->FramesAbortedDueToXSColls =
2207			axienet_stat(lp, STAT_TX_EXCESS_COLLISIONS);
2208		mac_stats->MulticastFramesXmittedOK =
2209			axienet_stat(lp, STAT_TX_MULTICAST_FRAMES);
2210		mac_stats->BroadcastFramesXmittedOK =
2211			axienet_stat(lp, STAT_TX_BROADCAST_FRAMES);
2212		mac_stats->FramesWithExcessiveDeferral =
2213			axienet_stat(lp, STAT_TX_EXCESS_DEFERRAL);
2214		mac_stats->MulticastFramesReceivedOK =
2215			axienet_stat(lp, STAT_RX_MULTICAST_FRAMES);
2216		mac_stats->BroadcastFramesReceivedOK =
2217			axienet_stat(lp, STAT_RX_BROADCAST_FRAMES);
2218		mac_stats->InRangeLengthErrors =
2219			axienet_stat(lp, STAT_RX_LENGTH_ERRORS);
2220	} while (read_seqcount_retry(&lp->hw_stats_seqcount, start));
2221}
2222
2223static void
2224axienet_ethtool_get_eth_ctrl_stats(struct net_device *dev,
2225				   struct ethtool_eth_ctrl_stats *ctrl_stats)
2226{
2227	struct axienet_local *lp = netdev_priv(dev);
2228	unsigned int start;
2229
2230	if (!(lp->features & XAE_FEATURE_STATS))
2231		return;
2232
2233	do {
2234		start = read_seqcount_begin(&lp->hw_stats_seqcount);
2235		ctrl_stats->MACControlFramesTransmitted =
2236			axienet_stat(lp, STAT_TX_CONTROL_FRAMES);
2237		ctrl_stats->MACControlFramesReceived =
2238			axienet_stat(lp, STAT_RX_CONTROL_FRAMES);
2239		ctrl_stats->UnsupportedOpcodesReceived =
2240			axienet_stat(lp, STAT_RX_CONTROL_OPCODE_ERRORS);
2241	} while (read_seqcount_retry(&lp->hw_stats_seqcount, start));
2242}
2243
2244static const struct ethtool_rmon_hist_range axienet_rmon_ranges[] = {
2245	{   64,    64 },
2246	{   65,   127 },
2247	{  128,   255 },
2248	{  256,   511 },
2249	{  512,  1023 },
2250	{ 1024,  1518 },
2251	{ 1519, 16384 },
2252	{ },
2253};
2254
2255static void
2256axienet_ethtool_get_rmon_stats(struct net_device *dev,
2257			       struct ethtool_rmon_stats *rmon_stats,
2258			       const struct ethtool_rmon_hist_range **ranges)
2259{
2260	struct axienet_local *lp = netdev_priv(dev);
2261	unsigned int start;
2262
2263	if (!(lp->features & XAE_FEATURE_STATS))
2264		return;
2265
2266	do {
2267		start = read_seqcount_begin(&lp->hw_stats_seqcount);
2268		rmon_stats->undersize_pkts =
2269			axienet_stat(lp, STAT_UNDERSIZE_FRAMES);
2270		rmon_stats->oversize_pkts =
2271			axienet_stat(lp, STAT_RX_OVERSIZE_FRAMES);
2272		rmon_stats->fragments =
2273			axienet_stat(lp, STAT_FRAGMENT_FRAMES);
2274
2275		rmon_stats->hist[0] =
2276			axienet_stat(lp, STAT_RX_64_BYTE_FRAMES);
2277		rmon_stats->hist[1] =
2278			axienet_stat(lp, STAT_RX_65_127_BYTE_FRAMES);
2279		rmon_stats->hist[2] =
2280			axienet_stat(lp, STAT_RX_128_255_BYTE_FRAMES);
2281		rmon_stats->hist[3] =
2282			axienet_stat(lp, STAT_RX_256_511_BYTE_FRAMES);
2283		rmon_stats->hist[4] =
2284			axienet_stat(lp, STAT_RX_512_1023_BYTE_FRAMES);
2285		rmon_stats->hist[5] =
2286			axienet_stat(lp, STAT_RX_1024_MAX_BYTE_FRAMES);
2287		rmon_stats->hist[6] =
2288			rmon_stats->oversize_pkts;
2289
2290		rmon_stats->hist_tx[0] =
2291			axienet_stat(lp, STAT_TX_64_BYTE_FRAMES);
2292		rmon_stats->hist_tx[1] =
2293			axienet_stat(lp, STAT_TX_65_127_BYTE_FRAMES);
2294		rmon_stats->hist_tx[2] =
2295			axienet_stat(lp, STAT_TX_128_255_BYTE_FRAMES);
2296		rmon_stats->hist_tx[3] =
2297			axienet_stat(lp, STAT_TX_256_511_BYTE_FRAMES);
2298		rmon_stats->hist_tx[4] =
2299			axienet_stat(lp, STAT_TX_512_1023_BYTE_FRAMES);
2300		rmon_stats->hist_tx[5] =
2301			axienet_stat(lp, STAT_TX_1024_MAX_BYTE_FRAMES);
2302		rmon_stats->hist_tx[6] =
2303			axienet_stat(lp, STAT_TX_OVERSIZE_FRAMES);
2304	} while (read_seqcount_retry(&lp->hw_stats_seqcount, start));
2305
2306	*ranges = axienet_rmon_ranges;
2307}
2308
2309static const struct ethtool_ops axienet_ethtool_ops = {
2310	.supported_coalesce_params = ETHTOOL_COALESCE_MAX_FRAMES |
2311				     ETHTOOL_COALESCE_USECS,
2312	.get_drvinfo    = axienet_ethtools_get_drvinfo,
2313	.get_regs_len   = axienet_ethtools_get_regs_len,
2314	.get_regs       = axienet_ethtools_get_regs,
2315	.get_link       = ethtool_op_get_link,
2316	.get_ringparam	= axienet_ethtools_get_ringparam,
2317	.set_ringparam	= axienet_ethtools_set_ringparam,
2318	.get_pauseparam = axienet_ethtools_get_pauseparam,
2319	.set_pauseparam = axienet_ethtools_set_pauseparam,
2320	.get_coalesce   = axienet_ethtools_get_coalesce,
2321	.set_coalesce   = axienet_ethtools_set_coalesce,
2322	.get_link_ksettings = axienet_ethtools_get_link_ksettings,
2323	.set_link_ksettings = axienet_ethtools_set_link_ksettings,
2324	.nway_reset	= axienet_ethtools_nway_reset,
2325	.get_ethtool_stats = axienet_ethtools_get_ethtool_stats,
2326	.get_strings    = axienet_ethtools_get_strings,
2327	.get_sset_count = axienet_ethtools_get_sset_count,
2328	.get_pause_stats = axienet_ethtools_get_pause_stats,
2329	.get_eth_mac_stats = axienet_ethtool_get_eth_mac_stats,
2330	.get_eth_ctrl_stats = axienet_ethtool_get_eth_ctrl_stats,
2331	.get_rmon_stats = axienet_ethtool_get_rmon_stats,
2332};
2333
2334static struct axienet_local *pcs_to_axienet_local(struct phylink_pcs *pcs)
2335{
2336	return container_of(pcs, struct axienet_local, pcs);
2337}
2338
2339static void axienet_pcs_get_state(struct phylink_pcs *pcs,
2340				  struct phylink_link_state *state)
2341{
2342	struct mdio_device *pcs_phy = pcs_to_axienet_local(pcs)->pcs_phy;
2343
2344	phylink_mii_c22_pcs_get_state(pcs_phy, state);
2345}
2346
2347static void axienet_pcs_an_restart(struct phylink_pcs *pcs)
2348{
2349	struct mdio_device *pcs_phy = pcs_to_axienet_local(pcs)->pcs_phy;
2350
2351	phylink_mii_c22_pcs_an_restart(pcs_phy);
2352}
2353
2354static int axienet_pcs_config(struct phylink_pcs *pcs, unsigned int neg_mode,
2355			      phy_interface_t interface,
2356			      const unsigned long *advertising,
2357			      bool permit_pause_to_mac)
2358{
2359	struct mdio_device *pcs_phy = pcs_to_axienet_local(pcs)->pcs_phy;
2360	struct net_device *ndev = pcs_to_axienet_local(pcs)->ndev;
2361	struct axienet_local *lp = netdev_priv(ndev);
2362	int ret;
2363
2364	if (lp->switch_x_sgmii) {
2365		ret = mdiodev_write(pcs_phy, XLNX_MII_STD_SELECT_REG,
2366				    interface == PHY_INTERFACE_MODE_SGMII ?
2367					XLNX_MII_STD_SELECT_SGMII : 0);
2368		if (ret < 0) {
2369			netdev_warn(ndev,
2370				    "Failed to switch PHY interface: %d\n",
2371				    ret);
2372			return ret;
2373		}
2374	}
2375
2376	ret = phylink_mii_c22_pcs_config(pcs_phy, interface, advertising,
2377					 neg_mode);
2378	if (ret < 0)
2379		netdev_warn(ndev, "Failed to configure PCS: %d\n", ret);
2380
2381	return ret;
2382}
2383
2384static const struct phylink_pcs_ops axienet_pcs_ops = {
2385	.pcs_get_state = axienet_pcs_get_state,
2386	.pcs_config = axienet_pcs_config,
2387	.pcs_an_restart = axienet_pcs_an_restart,
2388};
2389
2390static struct phylink_pcs *axienet_mac_select_pcs(struct phylink_config *config,
2391						  phy_interface_t interface)
2392{
2393	struct net_device *ndev = to_net_dev(config->dev);
2394	struct axienet_local *lp = netdev_priv(ndev);
2395
2396	if (interface == PHY_INTERFACE_MODE_1000BASEX ||
2397	    interface ==  PHY_INTERFACE_MODE_SGMII)
2398		return &lp->pcs;
2399
2400	return NULL;
2401}
2402
2403static void axienet_mac_config(struct phylink_config *config, unsigned int mode,
2404			       const struct phylink_link_state *state)
2405{
2406	/* nothing meaningful to do */
2407}
2408
2409static void axienet_mac_link_down(struct phylink_config *config,
2410				  unsigned int mode,
2411				  phy_interface_t interface)
2412{
2413	/* nothing meaningful to do */
2414}
2415
2416static void axienet_mac_link_up(struct phylink_config *config,
2417				struct phy_device *phy,
2418				unsigned int mode, phy_interface_t interface,
2419				int speed, int duplex,
2420				bool tx_pause, bool rx_pause)
2421{
2422	struct net_device *ndev = to_net_dev(config->dev);
2423	struct axienet_local *lp = netdev_priv(ndev);
2424	u32 emmc_reg, fcc_reg;
2425
2426	emmc_reg = axienet_ior(lp, XAE_EMMC_OFFSET);
2427	emmc_reg &= ~XAE_EMMC_LINKSPEED_MASK;
2428
2429	switch (speed) {
2430	case SPEED_1000:
2431		emmc_reg |= XAE_EMMC_LINKSPD_1000;
2432		break;
2433	case SPEED_100:
2434		emmc_reg |= XAE_EMMC_LINKSPD_100;
2435		break;
2436	case SPEED_10:
2437		emmc_reg |= XAE_EMMC_LINKSPD_10;
2438		break;
2439	default:
2440		dev_err(&ndev->dev,
2441			"Speed other than 10, 100 or 1Gbps is not supported\n");
2442		break;
2443	}
2444
2445	axienet_iow(lp, XAE_EMMC_OFFSET, emmc_reg);
2446
2447	fcc_reg = axienet_ior(lp, XAE_FCC_OFFSET);
2448	if (tx_pause)
2449		fcc_reg |= XAE_FCC_FCTX_MASK;
2450	else
2451		fcc_reg &= ~XAE_FCC_FCTX_MASK;
2452	if (rx_pause)
2453		fcc_reg |= XAE_FCC_FCRX_MASK;
2454	else
2455		fcc_reg &= ~XAE_FCC_FCRX_MASK;
2456	axienet_iow(lp, XAE_FCC_OFFSET, fcc_reg);
2457}
2458
2459static const struct phylink_mac_ops axienet_phylink_ops = {
2460	.mac_select_pcs = axienet_mac_select_pcs,
2461	.mac_config = axienet_mac_config,
2462	.mac_link_down = axienet_mac_link_down,
2463	.mac_link_up = axienet_mac_link_up,
2464};
2465
2466/**
2467 * axienet_dma_err_handler - Work queue task for Axi DMA Error
2468 * @work:	pointer to work_struct
2469 *
2470 * Resets the Axi DMA and Axi Ethernet devices, and reconfigures the
2471 * Tx/Rx BDs.
2472 */
2473static void axienet_dma_err_handler(struct work_struct *work)
2474{
2475	u32 i;
2476	u32 axienet_status;
2477	struct axidma_bd *cur_p;
2478	struct axienet_local *lp = container_of(work, struct axienet_local,
2479						dma_err_task);
2480	struct net_device *ndev = lp->ndev;
2481
2482	/* Don't bother if we are going to stop anyway */
2483	if (READ_ONCE(lp->stopping))
2484		return;
2485
2486	napi_disable(&lp->napi_tx);
2487	napi_disable(&lp->napi_rx);
2488
2489	axienet_setoptions(ndev, lp->options &
2490			   ~(XAE_OPTION_TXEN | XAE_OPTION_RXEN));
2491
2492	axienet_dma_stop(lp);
2493
2494	for (i = 0; i < lp->tx_bd_num; i++) {
2495		cur_p = &lp->tx_bd_v[i];
2496		if (cur_p->cntrl) {
2497			dma_addr_t addr = desc_get_phys_addr(lp, cur_p);
2498
2499			dma_unmap_single(lp->dev, addr,
2500					 (cur_p->cntrl &
2501					  XAXIDMA_BD_CTRL_LENGTH_MASK),
2502					 DMA_TO_DEVICE);
2503		}
2504		if (cur_p->skb)
2505			dev_kfree_skb_irq(cur_p->skb);
2506		cur_p->phys = 0;
2507		cur_p->phys_msb = 0;
2508		cur_p->cntrl = 0;
2509		cur_p->status = 0;
2510		cur_p->app0 = 0;
2511		cur_p->app1 = 0;
2512		cur_p->app2 = 0;
2513		cur_p->app3 = 0;
2514		cur_p->app4 = 0;
2515		cur_p->skb = NULL;
2516	}
2517
2518	for (i = 0; i < lp->rx_bd_num; i++) {
2519		cur_p = &lp->rx_bd_v[i];
2520		cur_p->status = 0;
2521		cur_p->app0 = 0;
2522		cur_p->app1 = 0;
2523		cur_p->app2 = 0;
2524		cur_p->app3 = 0;
2525		cur_p->app4 = 0;
2526	}
2527
2528	lp->tx_bd_ci = 0;
2529	lp->tx_bd_tail = 0;
2530	lp->rx_bd_ci = 0;
2531
2532	axienet_dma_start(lp);
2533
2534	axienet_status = axienet_ior(lp, XAE_RCW1_OFFSET);
2535	axienet_status &= ~XAE_RCW1_RX_MASK;
2536	axienet_iow(lp, XAE_RCW1_OFFSET, axienet_status);
2537
2538	axienet_status = axienet_ior(lp, XAE_IP_OFFSET);
2539	if (axienet_status & XAE_INT_RXRJECT_MASK)
2540		axienet_iow(lp, XAE_IS_OFFSET, XAE_INT_RXRJECT_MASK);
2541	axienet_iow(lp, XAE_IE_OFFSET, lp->eth_irq > 0 ?
2542		    XAE_INT_RECV_ERROR_MASK : 0);
2543	axienet_iow(lp, XAE_FCC_OFFSET, XAE_FCC_FCRX_MASK);
2544
2545	/* Sync default options with HW but leave receiver and
2546	 * transmitter disabled.
2547	 */
2548	axienet_setoptions(ndev, lp->options &
2549			   ~(XAE_OPTION_TXEN | XAE_OPTION_RXEN));
2550	axienet_set_mac_address(ndev, NULL);
2551	axienet_set_multicast_list(ndev);
 
2552	napi_enable(&lp->napi_rx);
2553	napi_enable(&lp->napi_tx);
2554	axienet_setoptions(ndev, lp->options);
2555}
2556
2557/**
2558 * axienet_probe - Axi Ethernet probe function.
2559 * @pdev:	Pointer to platform device structure.
2560 *
2561 * Return: 0, on success
2562 *	    Non-zero error value on failure.
2563 *
2564 * This is the probe routine for Axi Ethernet driver. This is called before
2565 * any other driver routines are invoked. It allocates and sets up the Ethernet
2566 * device. Parses through device tree and populates fields of
2567 * axienet_local. It registers the Ethernet device.
2568 */
2569static int axienet_probe(struct platform_device *pdev)
2570{
2571	int ret;
2572	struct device_node *np;
2573	struct axienet_local *lp;
2574	struct net_device *ndev;
2575	struct resource *ethres;
2576	u8 mac_addr[ETH_ALEN];
2577	int addr_width = 32;
2578	u32 value;
2579
2580	ndev = alloc_etherdev(sizeof(*lp));
2581	if (!ndev)
2582		return -ENOMEM;
2583
2584	platform_set_drvdata(pdev, ndev);
2585
2586	SET_NETDEV_DEV(ndev, &pdev->dev);
 
2587	ndev->features = NETIF_F_SG;
2588	ndev->ethtool_ops = &axienet_ethtool_ops;
2589
2590	/* MTU range: 64 - 9000 */
2591	ndev->min_mtu = 64;
2592	ndev->max_mtu = XAE_JUMBO_MTU;
2593
2594	lp = netdev_priv(ndev);
2595	lp->ndev = ndev;
2596	lp->dev = &pdev->dev;
2597	lp->options = XAE_OPTION_DEFAULTS;
2598	lp->rx_bd_num = RX_BD_NUM_DEFAULT;
2599	lp->tx_bd_num = TX_BD_NUM_DEFAULT;
2600
2601	u64_stats_init(&lp->rx_stat_sync);
2602	u64_stats_init(&lp->tx_stat_sync);
2603
2604	mutex_init(&lp->stats_lock);
2605	seqcount_mutex_init(&lp->hw_stats_seqcount, &lp->stats_lock);
2606	INIT_DEFERRABLE_WORK(&lp->stats_work, axienet_refresh_stats);
2607
2608	lp->axi_clk = devm_clk_get_optional(&pdev->dev, "s_axi_lite_clk");
2609	if (!lp->axi_clk) {
2610		/* For backward compatibility, if named AXI clock is not present,
2611		 * treat the first clock specified as the AXI clock.
2612		 */
2613		lp->axi_clk = devm_clk_get_optional(&pdev->dev, NULL);
2614	}
2615	if (IS_ERR(lp->axi_clk)) {
2616		ret = PTR_ERR(lp->axi_clk);
2617		goto free_netdev;
2618	}
2619	ret = clk_prepare_enable(lp->axi_clk);
2620	if (ret) {
2621		dev_err(&pdev->dev, "Unable to enable AXI clock: %d\n", ret);
2622		goto free_netdev;
2623	}
2624
2625	lp->misc_clks[0].id = "axis_clk";
2626	lp->misc_clks[1].id = "ref_clk";
2627	lp->misc_clks[2].id = "mgt_clk";
2628
2629	ret = devm_clk_bulk_get_optional(&pdev->dev, XAE_NUM_MISC_CLOCKS, lp->misc_clks);
2630	if (ret)
2631		goto cleanup_clk;
2632
2633	ret = clk_bulk_prepare_enable(XAE_NUM_MISC_CLOCKS, lp->misc_clks);
2634	if (ret)
2635		goto cleanup_clk;
2636
2637	/* Map device registers */
2638	lp->regs = devm_platform_get_and_ioremap_resource(pdev, 0, &ethres);
2639	if (IS_ERR(lp->regs)) {
2640		ret = PTR_ERR(lp->regs);
2641		goto cleanup_clk;
2642	}
2643	lp->regs_start = ethres->start;
2644
2645	/* Setup checksum offload, but default to off if not specified */
2646	lp->features = 0;
2647
2648	if (axienet_ior(lp, XAE_ABILITY_OFFSET) & XAE_ABILITY_STATS)
2649		lp->features |= XAE_FEATURE_STATS;
2650
2651	ret = of_property_read_u32(pdev->dev.of_node, "xlnx,txcsum", &value);
2652	if (!ret) {
2653		switch (value) {
2654		case 1:
 
 
2655			lp->features |= XAE_FEATURE_PARTIAL_TX_CSUM;
2656			/* Can checksum any contiguous range */
2657			ndev->features |= NETIF_F_HW_CSUM;
2658			break;
2659		case 2:
 
 
2660			lp->features |= XAE_FEATURE_FULL_TX_CSUM;
2661			/* Can checksum TCP/UDP over IPv4. */
2662			ndev->features |= NETIF_F_IP_CSUM;
2663			break;
 
 
2664		}
2665	}
2666	ret = of_property_read_u32(pdev->dev.of_node, "xlnx,rxcsum", &value);
2667	if (!ret) {
2668		switch (value) {
2669		case 1:
 
 
2670			lp->features |= XAE_FEATURE_PARTIAL_RX_CSUM;
2671			ndev->features |= NETIF_F_RXCSUM;
2672			break;
2673		case 2:
 
 
2674			lp->features |= XAE_FEATURE_FULL_RX_CSUM;
2675			ndev->features |= NETIF_F_RXCSUM;
2676			break;
 
 
2677		}
2678	}
2679	/* For supporting jumbo frames, the Axi Ethernet hardware must have
2680	 * a larger Rx/Tx Memory. Typically, the size must be large so that
2681	 * we can enable jumbo option and start supporting jumbo frames.
2682	 * Here we check for memory allocated for Rx/Tx in the hardware from
2683	 * the device-tree and accordingly set flags.
2684	 */
2685	of_property_read_u32(pdev->dev.of_node, "xlnx,rxmem", &lp->rxmem);
2686
2687	lp->switch_x_sgmii = of_property_read_bool(pdev->dev.of_node,
2688						   "xlnx,switch-x-sgmii");
2689
2690	/* Start with the proprietary, and broken phy_type */
2691	ret = of_property_read_u32(pdev->dev.of_node, "xlnx,phy-type", &value);
2692	if (!ret) {
2693		netdev_warn(ndev, "Please upgrade your device tree binary blob to use phy-mode");
2694		switch (value) {
2695		case XAE_PHY_TYPE_MII:
2696			lp->phy_mode = PHY_INTERFACE_MODE_MII;
2697			break;
2698		case XAE_PHY_TYPE_GMII:
2699			lp->phy_mode = PHY_INTERFACE_MODE_GMII;
2700			break;
2701		case XAE_PHY_TYPE_RGMII_2_0:
2702			lp->phy_mode = PHY_INTERFACE_MODE_RGMII_ID;
2703			break;
2704		case XAE_PHY_TYPE_SGMII:
2705			lp->phy_mode = PHY_INTERFACE_MODE_SGMII;
2706			break;
2707		case XAE_PHY_TYPE_1000BASE_X:
2708			lp->phy_mode = PHY_INTERFACE_MODE_1000BASEX;
2709			break;
2710		default:
2711			ret = -EINVAL;
2712			goto cleanup_clk;
2713		}
2714	} else {
2715		ret = of_get_phy_mode(pdev->dev.of_node, &lp->phy_mode);
2716		if (ret)
2717			goto cleanup_clk;
2718	}
2719	if (lp->switch_x_sgmii && lp->phy_mode != PHY_INTERFACE_MODE_SGMII &&
2720	    lp->phy_mode != PHY_INTERFACE_MODE_1000BASEX) {
2721		dev_err(&pdev->dev, "xlnx,switch-x-sgmii only supported with SGMII or 1000BaseX\n");
2722		ret = -EINVAL;
2723		goto cleanup_clk;
2724	}
2725
2726	if (!of_property_present(pdev->dev.of_node, "dmas")) {
2727		/* Find the DMA node, map the DMA registers, and decode the DMA IRQs */
2728		np = of_parse_phandle(pdev->dev.of_node, "axistream-connected", 0);
2729
2730		if (np) {
2731			struct resource dmares;
2732
2733			ret = of_address_to_resource(np, 0, &dmares);
2734			if (ret) {
2735				dev_err(&pdev->dev,
2736					"unable to get DMA resource\n");
2737				of_node_put(np);
2738				goto cleanup_clk;
2739			}
2740			lp->dma_regs = devm_ioremap_resource(&pdev->dev,
2741							     &dmares);
2742			lp->rx_irq = irq_of_parse_and_map(np, 1);
2743			lp->tx_irq = irq_of_parse_and_map(np, 0);
2744			of_node_put(np);
2745			lp->eth_irq = platform_get_irq_optional(pdev, 0);
2746		} else {
2747			/* Check for these resources directly on the Ethernet node. */
2748			lp->dma_regs = devm_platform_get_and_ioremap_resource(pdev, 1, NULL);
2749			lp->rx_irq = platform_get_irq(pdev, 1);
2750			lp->tx_irq = platform_get_irq(pdev, 0);
2751			lp->eth_irq = platform_get_irq_optional(pdev, 2);
2752		}
2753		if (IS_ERR(lp->dma_regs)) {
2754			dev_err(&pdev->dev, "could not map DMA regs\n");
2755			ret = PTR_ERR(lp->dma_regs);
2756			goto cleanup_clk;
2757		}
2758		if (lp->rx_irq <= 0 || lp->tx_irq <= 0) {
2759			dev_err(&pdev->dev, "could not determine irqs\n");
2760			ret = -ENOMEM;
2761			goto cleanup_clk;
2762		}
2763
2764		/* Reset core now that clocks are enabled, prior to accessing MDIO */
2765		ret = __axienet_device_reset(lp);
2766		if (ret)
2767			goto cleanup_clk;
2768
2769		/* Autodetect the need for 64-bit DMA pointers.
2770		 * When the IP is configured for a bus width bigger than 32 bits,
2771		 * writing the MSB registers is mandatory, even if they are all 0.
2772		 * We can detect this case by writing all 1's to one such register
2773		 * and see if that sticks: when the IP is configured for 32 bits
2774		 * only, those registers are RES0.
2775		 * Those MSB registers were introduced in IP v7.1, which we check first.
2776		 */
2777		if ((axienet_ior(lp, XAE_ID_OFFSET) >> 24) >= 0x9) {
2778			void __iomem *desc = lp->dma_regs + XAXIDMA_TX_CDESC_OFFSET + 4;
2779
2780			iowrite32(0x0, desc);
2781			if (ioread32(desc) == 0) {	/* sanity check */
2782				iowrite32(0xffffffff, desc);
2783				if (ioread32(desc) > 0) {
2784					lp->features |= XAE_FEATURE_DMA_64BIT;
2785					addr_width = 64;
2786					dev_info(&pdev->dev,
2787						 "autodetected 64-bit DMA range\n");
2788				}
2789				iowrite32(0x0, desc);
2790			}
2791		}
2792		if (!IS_ENABLED(CONFIG_64BIT) && lp->features & XAE_FEATURE_DMA_64BIT) {
2793			dev_err(&pdev->dev, "64-bit addressable DMA is not compatible with 32-bit archecture\n");
2794			ret = -EINVAL;
2795			goto cleanup_clk;
2796		}
2797
2798		ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(addr_width));
2799		if (ret) {
2800			dev_err(&pdev->dev, "No suitable DMA available\n");
2801			goto cleanup_clk;
2802		}
2803		netif_napi_add(ndev, &lp->napi_rx, axienet_rx_poll);
2804		netif_napi_add(ndev, &lp->napi_tx, axienet_tx_poll);
2805	} else {
2806		struct xilinx_vdma_config cfg;
2807		struct dma_chan *tx_chan;
2808
2809		lp->eth_irq = platform_get_irq_optional(pdev, 0);
2810		if (lp->eth_irq < 0 && lp->eth_irq != -ENXIO) {
2811			ret = lp->eth_irq;
2812			goto cleanup_clk;
2813		}
2814		tx_chan = dma_request_chan(lp->dev, "tx_chan0");
2815		if (IS_ERR(tx_chan)) {
2816			ret = PTR_ERR(tx_chan);
2817			dev_err_probe(lp->dev, ret, "No Ethernet DMA (TX) channel found\n");
2818			goto cleanup_clk;
2819		}
2820
2821		cfg.reset = 1;
2822		/* As name says VDMA but it has support for DMA channel reset */
2823		ret = xilinx_vdma_channel_set_config(tx_chan, &cfg);
2824		if (ret < 0) {
2825			dev_err(&pdev->dev, "Reset channel failed\n");
2826			dma_release_channel(tx_chan);
2827			goto cleanup_clk;
2828		}
2829
2830		dma_release_channel(tx_chan);
2831		lp->use_dmaengine = 1;
2832	}
2833
2834	if (lp->use_dmaengine)
2835		ndev->netdev_ops = &axienet_netdev_dmaengine_ops;
2836	else
2837		ndev->netdev_ops = &axienet_netdev_ops;
2838	/* Check for Ethernet core IRQ (optional) */
2839	if (lp->eth_irq <= 0)
2840		dev_info(&pdev->dev, "Ethernet core IRQ not defined\n");
2841
2842	/* Retrieve the MAC address */
2843	ret = of_get_mac_address(pdev->dev.of_node, mac_addr);
2844	if (!ret) {
2845		axienet_set_mac_address(ndev, mac_addr);
2846	} else {
2847		dev_warn(&pdev->dev, "could not find MAC address property: %d\n",
2848			 ret);
2849		axienet_set_mac_address(ndev, NULL);
2850	}
2851
2852	lp->coalesce_count_rx = XAXIDMA_DFT_RX_THRESHOLD;
2853	lp->coalesce_count_tx = XAXIDMA_DFT_TX_THRESHOLD;
2854	lp->coalesce_usec_rx = XAXIDMA_DFT_RX_USEC;
2855	lp->coalesce_usec_tx = XAXIDMA_DFT_TX_USEC;
2856
2857	ret = axienet_mdio_setup(lp);
2858	if (ret)
2859		dev_warn(&pdev->dev,
2860			 "error registering MDIO bus: %d\n", ret);
2861
2862	if (lp->phy_mode == PHY_INTERFACE_MODE_SGMII ||
2863	    lp->phy_mode == PHY_INTERFACE_MODE_1000BASEX) {
2864		np = of_parse_phandle(pdev->dev.of_node, "pcs-handle", 0);
2865		if (!np) {
2866			/* Deprecated: Always use "pcs-handle" for pcs_phy.
2867			 * Falling back to "phy-handle" here is only for
2868			 * backward compatibility with old device trees.
2869			 */
2870			np = of_parse_phandle(pdev->dev.of_node, "phy-handle", 0);
2871		}
2872		if (!np) {
2873			dev_err(&pdev->dev, "pcs-handle (preferred) or phy-handle required for 1000BaseX/SGMII\n");
2874			ret = -EINVAL;
2875			goto cleanup_mdio;
2876		}
2877		lp->pcs_phy = of_mdio_find_device(np);
2878		if (!lp->pcs_phy) {
2879			ret = -EPROBE_DEFER;
2880			of_node_put(np);
2881			goto cleanup_mdio;
2882		}
2883		of_node_put(np);
2884		lp->pcs.ops = &axienet_pcs_ops;
2885		lp->pcs.neg_mode = true;
2886		lp->pcs.poll = true;
2887	}
2888
2889	lp->phylink_config.dev = &ndev->dev;
2890	lp->phylink_config.type = PHYLINK_NETDEV;
2891	lp->phylink_config.mac_managed_pm = true;
2892	lp->phylink_config.mac_capabilities = MAC_SYM_PAUSE | MAC_ASYM_PAUSE |
2893		MAC_10FD | MAC_100FD | MAC_1000FD;
2894
2895	__set_bit(lp->phy_mode, lp->phylink_config.supported_interfaces);
2896	if (lp->switch_x_sgmii) {
2897		__set_bit(PHY_INTERFACE_MODE_1000BASEX,
2898			  lp->phylink_config.supported_interfaces);
2899		__set_bit(PHY_INTERFACE_MODE_SGMII,
2900			  lp->phylink_config.supported_interfaces);
2901	}
2902
2903	lp->phylink = phylink_create(&lp->phylink_config, pdev->dev.fwnode,
2904				     lp->phy_mode,
2905				     &axienet_phylink_ops);
2906	if (IS_ERR(lp->phylink)) {
2907		ret = PTR_ERR(lp->phylink);
2908		dev_err(&pdev->dev, "phylink_create error (%i)\n", ret);
2909		goto cleanup_mdio;
2910	}
2911
2912	ret = register_netdev(lp->ndev);
2913	if (ret) {
2914		dev_err(lp->dev, "register_netdev() error (%i)\n", ret);
2915		goto cleanup_phylink;
2916	}
2917
2918	return 0;
2919
2920cleanup_phylink:
2921	phylink_destroy(lp->phylink);
2922
2923cleanup_mdio:
2924	if (lp->pcs_phy)
2925		put_device(&lp->pcs_phy->dev);
2926	if (lp->mii_bus)
2927		axienet_mdio_teardown(lp);
2928cleanup_clk:
2929	clk_bulk_disable_unprepare(XAE_NUM_MISC_CLOCKS, lp->misc_clks);
2930	clk_disable_unprepare(lp->axi_clk);
2931
2932free_netdev:
2933	free_netdev(ndev);
2934
2935	return ret;
2936}
2937
2938static void axienet_remove(struct platform_device *pdev)
2939{
2940	struct net_device *ndev = platform_get_drvdata(pdev);
2941	struct axienet_local *lp = netdev_priv(ndev);
2942
2943	unregister_netdev(ndev);
2944
2945	if (lp->phylink)
2946		phylink_destroy(lp->phylink);
2947
2948	if (lp->pcs_phy)
2949		put_device(&lp->pcs_phy->dev);
2950
2951	axienet_mdio_teardown(lp);
2952
2953	clk_bulk_disable_unprepare(XAE_NUM_MISC_CLOCKS, lp->misc_clks);
2954	clk_disable_unprepare(lp->axi_clk);
2955
2956	free_netdev(ndev);
2957}
2958
2959static void axienet_shutdown(struct platform_device *pdev)
2960{
2961	struct net_device *ndev = platform_get_drvdata(pdev);
2962
2963	rtnl_lock();
2964	netif_device_detach(ndev);
2965
2966	if (netif_running(ndev))
2967		dev_close(ndev);
2968
2969	rtnl_unlock();
2970}
2971
2972static int axienet_suspend(struct device *dev)
2973{
2974	struct net_device *ndev = dev_get_drvdata(dev);
2975
2976	if (!netif_running(ndev))
2977		return 0;
2978
2979	netif_device_detach(ndev);
2980
2981	rtnl_lock();
2982	axienet_stop(ndev);
2983	rtnl_unlock();
2984
2985	return 0;
2986}
2987
2988static int axienet_resume(struct device *dev)
2989{
2990	struct net_device *ndev = dev_get_drvdata(dev);
2991
2992	if (!netif_running(ndev))
2993		return 0;
2994
2995	rtnl_lock();
2996	axienet_open(ndev);
2997	rtnl_unlock();
2998
2999	netif_device_attach(ndev);
3000
3001	return 0;
3002}
3003
3004static DEFINE_SIMPLE_DEV_PM_OPS(axienet_pm_ops,
3005				axienet_suspend, axienet_resume);
3006
3007static struct platform_driver axienet_driver = {
3008	.probe = axienet_probe,
3009	.remove = axienet_remove,
3010	.shutdown = axienet_shutdown,
3011	.driver = {
3012		 .name = "xilinx_axienet",
3013		 .pm = &axienet_pm_ops,
3014		 .of_match_table = axienet_of_match,
3015	},
3016};
3017
3018module_platform_driver(axienet_driver);
3019
3020MODULE_DESCRIPTION("Xilinx Axi Ethernet driver");
3021MODULE_AUTHOR("Xilinx");
3022MODULE_LICENSE("GPL");