Linux Audio

Check our new training course

Loading...
v6.13.7
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * Xilinx Axi Ethernet device driver
   4 *
   5 * Copyright (c) 2008 Nissin Systems Co., Ltd.,  Yoshio Kashiwagi
   6 * Copyright (c) 2005-2008 DLA Systems,  David H. Lynch Jr. <dhlii@dlasys.net>
   7 * Copyright (c) 2008-2009 Secret Lab Technologies Ltd.
   8 * Copyright (c) 2010 - 2011 Michal Simek <monstr@monstr.eu>
   9 * Copyright (c) 2010 - 2011 PetaLogix
  10 * Copyright (c) 2019 - 2022 Calian Advanced Technologies
  11 * Copyright (c) 2010 - 2012 Xilinx, Inc. All rights reserved.
  12 *
  13 * This is a driver for the Xilinx Axi Ethernet which is used in the Virtex6
  14 * and Spartan6.
  15 *
  16 * TODO:
  17 *  - Add Axi Fifo support.
  18 *  - Factor out Axi DMA code into separate driver.
  19 *  - Test and fix basic multicast filtering.
  20 *  - Add support for extended multicast filtering.
  21 *  - Test basic VLAN support.
  22 *  - Add support for extended VLAN support.
  23 */
  24
  25#include <linux/clk.h>
  26#include <linux/delay.h>
  27#include <linux/etherdevice.h>
  28#include <linux/module.h>
  29#include <linux/netdevice.h>
  30#include <linux/of.h>
  31#include <linux/of_mdio.h>
  32#include <linux/of_net.h>
 
  33#include <linux/of_irq.h>
  34#include <linux/of_address.h>
  35#include <linux/platform_device.h>
  36#include <linux/skbuff.h>
  37#include <linux/math64.h>
  38#include <linux/phy.h>
  39#include <linux/mii.h>
  40#include <linux/ethtool.h>
  41#include <linux/dmaengine.h>
  42#include <linux/dma-mapping.h>
  43#include <linux/dma/xilinx_dma.h>
  44#include <linux/circ_buf.h>
  45#include <net/netdev_queues.h>
  46
  47#include "xilinx_axienet.h"
  48
  49/* Descriptors defines for Tx and Rx DMA */
  50#define TX_BD_NUM_DEFAULT		128
  51#define RX_BD_NUM_DEFAULT		1024
  52#define TX_BD_NUM_MIN			(MAX_SKB_FRAGS + 1)
  53#define TX_BD_NUM_MAX			4096
  54#define RX_BD_NUM_MAX			4096
  55#define DMA_NUM_APP_WORDS		5
  56#define LEN_APP				4
  57#define RX_BUF_NUM_DEFAULT		128
  58
  59/* Must be shorter than length of ethtool_drvinfo.driver field to fit */
  60#define DRIVER_NAME		"xaxienet"
  61#define DRIVER_DESCRIPTION	"Xilinx Axi Ethernet driver"
  62#define DRIVER_VERSION		"1.00a"
  63
  64#define AXIENET_REGS_N		40
  65
  66static void axienet_rx_submit_desc(struct net_device *ndev);
  67
  68/* Match table for of_platform binding */
  69static const struct of_device_id axienet_of_match[] = {
  70	{ .compatible = "xlnx,axi-ethernet-1.00.a", },
  71	{ .compatible = "xlnx,axi-ethernet-1.01.a", },
  72	{ .compatible = "xlnx,axi-ethernet-2.01.a", },
  73	{},
  74};
  75
  76MODULE_DEVICE_TABLE(of, axienet_of_match);
  77
  78/* Option table for setting up Axi Ethernet hardware options */
  79static struct axienet_option axienet_options[] = {
  80	/* Turn on jumbo packet support for both Rx and Tx */
  81	{
  82		.opt = XAE_OPTION_JUMBO,
  83		.reg = XAE_TC_OFFSET,
  84		.m_or = XAE_TC_JUM_MASK,
  85	}, {
  86		.opt = XAE_OPTION_JUMBO,
  87		.reg = XAE_RCW1_OFFSET,
  88		.m_or = XAE_RCW1_JUM_MASK,
  89	}, { /* Turn on VLAN packet support for both Rx and Tx */
  90		.opt = XAE_OPTION_VLAN,
  91		.reg = XAE_TC_OFFSET,
  92		.m_or = XAE_TC_VLAN_MASK,
  93	}, {
  94		.opt = XAE_OPTION_VLAN,
  95		.reg = XAE_RCW1_OFFSET,
  96		.m_or = XAE_RCW1_VLAN_MASK,
  97	}, { /* Turn on FCS stripping on receive packets */
  98		.opt = XAE_OPTION_FCS_STRIP,
  99		.reg = XAE_RCW1_OFFSET,
 100		.m_or = XAE_RCW1_FCS_MASK,
 101	}, { /* Turn on FCS insertion on transmit packets */
 102		.opt = XAE_OPTION_FCS_INSERT,
 103		.reg = XAE_TC_OFFSET,
 104		.m_or = XAE_TC_FCS_MASK,
 105	}, { /* Turn off length/type field checking on receive packets */
 106		.opt = XAE_OPTION_LENTYPE_ERR,
 107		.reg = XAE_RCW1_OFFSET,
 108		.m_or = XAE_RCW1_LT_DIS_MASK,
 109	}, { /* Turn on Rx flow control */
 110		.opt = XAE_OPTION_FLOW_CONTROL,
 111		.reg = XAE_FCC_OFFSET,
 112		.m_or = XAE_FCC_FCRX_MASK,
 113	}, { /* Turn on Tx flow control */
 114		.opt = XAE_OPTION_FLOW_CONTROL,
 115		.reg = XAE_FCC_OFFSET,
 116		.m_or = XAE_FCC_FCTX_MASK,
 117	}, { /* Turn on promiscuous frame filtering */
 118		.opt = XAE_OPTION_PROMISC,
 119		.reg = XAE_FMI_OFFSET,
 120		.m_or = XAE_FMI_PM_MASK,
 121	}, { /* Enable transmitter */
 122		.opt = XAE_OPTION_TXEN,
 123		.reg = XAE_TC_OFFSET,
 124		.m_or = XAE_TC_TX_MASK,
 125	}, { /* Enable receiver */
 126		.opt = XAE_OPTION_RXEN,
 127		.reg = XAE_RCW1_OFFSET,
 128		.m_or = XAE_RCW1_RX_MASK,
 129	},
 130	{}
 131};
 132
 133static struct skbuf_dma_descriptor *axienet_get_rx_desc(struct axienet_local *lp, int i)
 134{
 135	return lp->rx_skb_ring[i & (RX_BUF_NUM_DEFAULT - 1)];
 136}
 137
 138static struct skbuf_dma_descriptor *axienet_get_tx_desc(struct axienet_local *lp, int i)
 139{
 140	return lp->tx_skb_ring[i & (TX_BD_NUM_MAX - 1)];
 141}
 142
 143/**
 144 * axienet_dma_in32 - Memory mapped Axi DMA register read
 145 * @lp:		Pointer to axienet local structure
 146 * @reg:	Address offset from the base address of the Axi DMA core
 147 *
 148 * Return: The contents of the Axi DMA register
 149 *
 150 * This function returns the contents of the corresponding Axi DMA register.
 151 */
 152static inline u32 axienet_dma_in32(struct axienet_local *lp, off_t reg)
 153{
 154	return ioread32(lp->dma_regs + reg);
 155}
 156
 157static void desc_set_phys_addr(struct axienet_local *lp, dma_addr_t addr,
 158			       struct axidma_bd *desc)
 159{
 160	desc->phys = lower_32_bits(addr);
 161	if (lp->features & XAE_FEATURE_DMA_64BIT)
 162		desc->phys_msb = upper_32_bits(addr);
 163}
 164
 165static dma_addr_t desc_get_phys_addr(struct axienet_local *lp,
 166				     struct axidma_bd *desc)
 167{
 168	dma_addr_t ret = desc->phys;
 169
 170	if (lp->features & XAE_FEATURE_DMA_64BIT)
 171		ret |= ((dma_addr_t)desc->phys_msb << 16) << 16;
 172
 173	return ret;
 174}
 175
 176/**
 177 * axienet_dma_bd_release - Release buffer descriptor rings
 178 * @ndev:	Pointer to the net_device structure
 179 *
 180 * This function is used to release the descriptors allocated in
 181 * axienet_dma_bd_init. axienet_dma_bd_release is called when Axi Ethernet
 182 * driver stop api is called.
 183 */
 184static void axienet_dma_bd_release(struct net_device *ndev)
 185{
 186	int i;
 187	struct axienet_local *lp = netdev_priv(ndev);
 188
 189	/* If we end up here, tx_bd_v must have been DMA allocated. */
 190	dma_free_coherent(lp->dev,
 191			  sizeof(*lp->tx_bd_v) * lp->tx_bd_num,
 192			  lp->tx_bd_v,
 193			  lp->tx_bd_p);
 194
 195	if (!lp->rx_bd_v)
 196		return;
 197
 198	for (i = 0; i < lp->rx_bd_num; i++) {
 199		dma_addr_t phys;
 200
 201		/* A NULL skb means this descriptor has not been initialised
 202		 * at all.
 203		 */
 204		if (!lp->rx_bd_v[i].skb)
 205			break;
 206
 207		dev_kfree_skb(lp->rx_bd_v[i].skb);
 208
 209		/* For each descriptor, we programmed cntrl with the (non-zero)
 210		 * descriptor size, after it had been successfully allocated.
 211		 * So a non-zero value in there means we need to unmap it.
 212		 */
 213		if (lp->rx_bd_v[i].cntrl) {
 214			phys = desc_get_phys_addr(lp, &lp->rx_bd_v[i]);
 215			dma_unmap_single(lp->dev, phys,
 216					 lp->max_frm_size, DMA_FROM_DEVICE);
 217		}
 218	}
 219
 220	dma_free_coherent(lp->dev,
 221			  sizeof(*lp->rx_bd_v) * lp->rx_bd_num,
 222			  lp->rx_bd_v,
 223			  lp->rx_bd_p);
 224}
 225
 226/**
 227 * axienet_usec_to_timer - Calculate IRQ delay timer value
 228 * @lp:		Pointer to the axienet_local structure
 229 * @coalesce_usec: Microseconds to convert into timer value
 230 */
 231static u32 axienet_usec_to_timer(struct axienet_local *lp, u32 coalesce_usec)
 232{
 233	u32 result;
 234	u64 clk_rate = 125000000; /* arbitrary guess if no clock rate set */
 235
 236	if (lp->axi_clk)
 237		clk_rate = clk_get_rate(lp->axi_clk);
 238
 239	/* 1 Timeout Interval = 125 * (clock period of SG clock) */
 240	result = DIV64_U64_ROUND_CLOSEST((u64)coalesce_usec * clk_rate,
 241					 (u64)125000000);
 242	if (result > 255)
 243		result = 255;
 244
 245	return result;
 246}
 247
 248/**
 249 * axienet_dma_start - Set up DMA registers and start DMA operation
 250 * @lp:		Pointer to the axienet_local structure
 251 */
 252static void axienet_dma_start(struct axienet_local *lp)
 253{
 254	/* Start updating the Rx channel control register */
 255	lp->rx_dma_cr = (lp->coalesce_count_rx << XAXIDMA_COALESCE_SHIFT) |
 256			XAXIDMA_IRQ_IOC_MASK | XAXIDMA_IRQ_ERROR_MASK;
 257	/* Only set interrupt delay timer if not generating an interrupt on
 258	 * the first RX packet. Otherwise leave at 0 to disable delay interrupt.
 259	 */
 260	if (lp->coalesce_count_rx > 1)
 261		lp->rx_dma_cr |= (axienet_usec_to_timer(lp, lp->coalesce_usec_rx)
 262					<< XAXIDMA_DELAY_SHIFT) |
 263				 XAXIDMA_IRQ_DELAY_MASK;
 264	axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, lp->rx_dma_cr);
 265
 266	/* Start updating the Tx channel control register */
 267	lp->tx_dma_cr = (lp->coalesce_count_tx << XAXIDMA_COALESCE_SHIFT) |
 268			XAXIDMA_IRQ_IOC_MASK | XAXIDMA_IRQ_ERROR_MASK;
 269	/* Only set interrupt delay timer if not generating an interrupt on
 270	 * the first TX packet. Otherwise leave at 0 to disable delay interrupt.
 271	 */
 272	if (lp->coalesce_count_tx > 1)
 273		lp->tx_dma_cr |= (axienet_usec_to_timer(lp, lp->coalesce_usec_tx)
 274					<< XAXIDMA_DELAY_SHIFT) |
 275				 XAXIDMA_IRQ_DELAY_MASK;
 276	axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, lp->tx_dma_cr);
 277
 278	/* Populate the tail pointer and bring the Rx Axi DMA engine out of
 279	 * halted state. This will make the Rx side ready for reception.
 280	 */
 281	axienet_dma_out_addr(lp, XAXIDMA_RX_CDESC_OFFSET, lp->rx_bd_p);
 282	lp->rx_dma_cr |= XAXIDMA_CR_RUNSTOP_MASK;
 283	axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, lp->rx_dma_cr);
 284	axienet_dma_out_addr(lp, XAXIDMA_RX_TDESC_OFFSET, lp->rx_bd_p +
 285			     (sizeof(*lp->rx_bd_v) * (lp->rx_bd_num - 1)));
 286
 287	/* Write to the RS (Run-stop) bit in the Tx channel control register.
 288	 * Tx channel is now ready to run. But only after we write to the
 289	 * tail pointer register that the Tx channel will start transmitting.
 290	 */
 291	axienet_dma_out_addr(lp, XAXIDMA_TX_CDESC_OFFSET, lp->tx_bd_p);
 292	lp->tx_dma_cr |= XAXIDMA_CR_RUNSTOP_MASK;
 293	axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, lp->tx_dma_cr);
 294}
 295
 296/**
 297 * axienet_dma_bd_init - Setup buffer descriptor rings for Axi DMA
 298 * @ndev:	Pointer to the net_device structure
 299 *
 300 * Return: 0, on success -ENOMEM, on failure
 301 *
 302 * This function is called to initialize the Rx and Tx DMA descriptor
 303 * rings. This initializes the descriptors with required default values
 304 * and is called when Axi Ethernet driver reset is called.
 305 */
 306static int axienet_dma_bd_init(struct net_device *ndev)
 307{
 308	int i;
 309	struct sk_buff *skb;
 310	struct axienet_local *lp = netdev_priv(ndev);
 311
 312	/* Reset the indexes which are used for accessing the BDs */
 313	lp->tx_bd_ci = 0;
 314	lp->tx_bd_tail = 0;
 315	lp->rx_bd_ci = 0;
 316
 317	/* Allocate the Tx and Rx buffer descriptors. */
 318	lp->tx_bd_v = dma_alloc_coherent(lp->dev,
 319					 sizeof(*lp->tx_bd_v) * lp->tx_bd_num,
 320					 &lp->tx_bd_p, GFP_KERNEL);
 321	if (!lp->tx_bd_v)
 322		return -ENOMEM;
 323
 324	lp->rx_bd_v = dma_alloc_coherent(lp->dev,
 325					 sizeof(*lp->rx_bd_v) * lp->rx_bd_num,
 326					 &lp->rx_bd_p, GFP_KERNEL);
 327	if (!lp->rx_bd_v)
 328		goto out;
 329
 330	for (i = 0; i < lp->tx_bd_num; i++) {
 331		dma_addr_t addr = lp->tx_bd_p +
 332				  sizeof(*lp->tx_bd_v) *
 333				  ((i + 1) % lp->tx_bd_num);
 334
 335		lp->tx_bd_v[i].next = lower_32_bits(addr);
 336		if (lp->features & XAE_FEATURE_DMA_64BIT)
 337			lp->tx_bd_v[i].next_msb = upper_32_bits(addr);
 338	}
 339
 340	for (i = 0; i < lp->rx_bd_num; i++) {
 341		dma_addr_t addr;
 342
 343		addr = lp->rx_bd_p + sizeof(*lp->rx_bd_v) *
 344			((i + 1) % lp->rx_bd_num);
 345		lp->rx_bd_v[i].next = lower_32_bits(addr);
 346		if (lp->features & XAE_FEATURE_DMA_64BIT)
 347			lp->rx_bd_v[i].next_msb = upper_32_bits(addr);
 348
 349		skb = netdev_alloc_skb_ip_align(ndev, lp->max_frm_size);
 350		if (!skb)
 351			goto out;
 352
 353		lp->rx_bd_v[i].skb = skb;
 354		addr = dma_map_single(lp->dev, skb->data,
 355				      lp->max_frm_size, DMA_FROM_DEVICE);
 356		if (dma_mapping_error(lp->dev, addr)) {
 357			netdev_err(ndev, "DMA mapping error\n");
 358			goto out;
 359		}
 360		desc_set_phys_addr(lp, addr, &lp->rx_bd_v[i]);
 361
 362		lp->rx_bd_v[i].cntrl = lp->max_frm_size;
 363	}
 364
 365	axienet_dma_start(lp);
 366
 367	return 0;
 368out:
 369	axienet_dma_bd_release(ndev);
 370	return -ENOMEM;
 371}
 372
 373/**
 374 * axienet_set_mac_address - Write the MAC address
 375 * @ndev:	Pointer to the net_device structure
 376 * @address:	6 byte Address to be written as MAC address
 377 *
 378 * This function is called to initialize the MAC address of the Axi Ethernet
 379 * core. It writes to the UAW0 and UAW1 registers of the core.
 380 */
 381static void axienet_set_mac_address(struct net_device *ndev,
 382				    const void *address)
 383{
 384	struct axienet_local *lp = netdev_priv(ndev);
 385
 386	if (address)
 387		eth_hw_addr_set(ndev, address);
 388	if (!is_valid_ether_addr(ndev->dev_addr))
 389		eth_hw_addr_random(ndev);
 390
 391	/* Set up unicast MAC address filter set its mac address */
 392	axienet_iow(lp, XAE_UAW0_OFFSET,
 393		    (ndev->dev_addr[0]) |
 394		    (ndev->dev_addr[1] << 8) |
 395		    (ndev->dev_addr[2] << 16) |
 396		    (ndev->dev_addr[3] << 24));
 397	axienet_iow(lp, XAE_UAW1_OFFSET,
 398		    (((axienet_ior(lp, XAE_UAW1_OFFSET)) &
 399		      ~XAE_UAW1_UNICASTADDR_MASK) |
 400		     (ndev->dev_addr[4] |
 401		     (ndev->dev_addr[5] << 8))));
 402}
 403
 404/**
 405 * netdev_set_mac_address - Write the MAC address (from outside the driver)
 406 * @ndev:	Pointer to the net_device structure
 407 * @p:		6 byte Address to be written as MAC address
 408 *
 409 * Return: 0 for all conditions. Presently, there is no failure case.
 410 *
 411 * This function is called to initialize the MAC address of the Axi Ethernet
 412 * core. It calls the core specific axienet_set_mac_address. This is the
 413 * function that goes into net_device_ops structure entry ndo_set_mac_address.
 414 */
 415static int netdev_set_mac_address(struct net_device *ndev, void *p)
 416{
 417	struct sockaddr *addr = p;
 418
 419	axienet_set_mac_address(ndev, addr->sa_data);
 420	return 0;
 421}
 422
 423/**
 424 * axienet_set_multicast_list - Prepare the multicast table
 425 * @ndev:	Pointer to the net_device structure
 426 *
 427 * This function is called to initialize the multicast table during
 428 * initialization. The Axi Ethernet basic multicast support has a four-entry
 429 * multicast table which is initialized here. Additionally this function
 430 * goes into the net_device_ops structure entry ndo_set_multicast_list. This
 431 * means whenever the multicast table entries need to be updated this
 432 * function gets called.
 433 */
 434static void axienet_set_multicast_list(struct net_device *ndev)
 435{
 436	int i = 0;
 437	u32 reg, af0reg, af1reg;
 438	struct axienet_local *lp = netdev_priv(ndev);
 439
 440	reg = axienet_ior(lp, XAE_FMI_OFFSET);
 441	reg &= ~XAE_FMI_PM_MASK;
 442	if (ndev->flags & IFF_PROMISC)
 443		reg |= XAE_FMI_PM_MASK;
 444	else
 445		reg &= ~XAE_FMI_PM_MASK;
 446	axienet_iow(lp, XAE_FMI_OFFSET, reg);
 447
 448	if (ndev->flags & IFF_ALLMULTI ||
 449	    netdev_mc_count(ndev) > XAE_MULTICAST_CAM_TABLE_NUM) {
 450		reg &= 0xFFFFFF00;
 
 
 
 
 
 
 451		axienet_iow(lp, XAE_FMI_OFFSET, reg);
 452		axienet_iow(lp, XAE_AF0_OFFSET, 1); /* Multicast bit */
 453		axienet_iow(lp, XAE_AF1_OFFSET, 0);
 454		axienet_iow(lp, XAE_AM0_OFFSET, 1); /* ditto */
 455		axienet_iow(lp, XAE_AM1_OFFSET, 0);
 456		axienet_iow(lp, XAE_FFE_OFFSET, 1);
 457		i = 1;
 458	} else if (!netdev_mc_empty(ndev)) {
 459		struct netdev_hw_addr *ha;
 460
 
 461		netdev_for_each_mc_addr(ha, ndev) {
 462			if (i >= XAE_MULTICAST_CAM_TABLE_NUM)
 463				break;
 464
 465			af0reg = (ha->addr[0]);
 466			af0reg |= (ha->addr[1] << 8);
 467			af0reg |= (ha->addr[2] << 16);
 468			af0reg |= (ha->addr[3] << 24);
 469
 470			af1reg = (ha->addr[4]);
 471			af1reg |= (ha->addr[5] << 8);
 472
 473			reg &= 0xFFFFFF00;
 474			reg |= i;
 475
 476			axienet_iow(lp, XAE_FMI_OFFSET, reg);
 477			axienet_iow(lp, XAE_AF0_OFFSET, af0reg);
 478			axienet_iow(lp, XAE_AF1_OFFSET, af1reg);
 479			axienet_iow(lp, XAE_AM0_OFFSET, 0xffffffff);
 480			axienet_iow(lp, XAE_AM1_OFFSET, 0x0000ffff);
 481			axienet_iow(lp, XAE_FFE_OFFSET, 1);
 482			i++;
 483		}
 484	}
 
 
 485
 486	for (; i < XAE_MULTICAST_CAM_TABLE_NUM; i++) {
 487		reg &= 0xFFFFFF00;
 488		reg |= i;
 489		axienet_iow(lp, XAE_FMI_OFFSET, reg);
 490		axienet_iow(lp, XAE_FFE_OFFSET, 0);
 
 
 
 
 
 
 
 
 
 
 491	}
 492}
 493
 494/**
 495 * axienet_setoptions - Set an Axi Ethernet option
 496 * @ndev:	Pointer to the net_device structure
 497 * @options:	Option to be enabled/disabled
 498 *
 499 * The Axi Ethernet core has multiple features which can be selectively turned
 500 * on or off. The typical options could be jumbo frame option, basic VLAN
 501 * option, promiscuous mode option etc. This function is used to set or clear
 502 * these options in the Axi Ethernet hardware. This is done through
 503 * axienet_option structure .
 504 */
 505static void axienet_setoptions(struct net_device *ndev, u32 options)
 506{
 507	int reg;
 508	struct axienet_local *lp = netdev_priv(ndev);
 509	struct axienet_option *tp = &axienet_options[0];
 510
 511	while (tp->opt) {
 512		reg = ((axienet_ior(lp, tp->reg)) & ~(tp->m_or));
 513		if (options & tp->opt)
 514			reg |= tp->m_or;
 515		axienet_iow(lp, tp->reg, reg);
 516		tp++;
 517	}
 518
 519	lp->options |= options;
 520}
 521
 522static u64 axienet_stat(struct axienet_local *lp, enum temac_stat stat)
 523{
 524	u32 counter;
 525
 526	if (lp->reset_in_progress)
 527		return lp->hw_stat_base[stat];
 528
 529	counter = axienet_ior(lp, XAE_STATS_OFFSET + stat * 8);
 530	return lp->hw_stat_base[stat] + (counter - lp->hw_last_counter[stat]);
 531}
 532
 533static void axienet_stats_update(struct axienet_local *lp, bool reset)
 534{
 535	enum temac_stat stat;
 536
 537	write_seqcount_begin(&lp->hw_stats_seqcount);
 538	lp->reset_in_progress = reset;
 539	for (stat = 0; stat < STAT_COUNT; stat++) {
 540		u32 counter = axienet_ior(lp, XAE_STATS_OFFSET + stat * 8);
 541
 542		lp->hw_stat_base[stat] += counter - lp->hw_last_counter[stat];
 543		lp->hw_last_counter[stat] = counter;
 544	}
 545	write_seqcount_end(&lp->hw_stats_seqcount);
 546}
 547
 548static void axienet_refresh_stats(struct work_struct *work)
 549{
 550	struct axienet_local *lp = container_of(work, struct axienet_local,
 551						stats_work.work);
 552
 553	mutex_lock(&lp->stats_lock);
 554	axienet_stats_update(lp, false);
 555	mutex_unlock(&lp->stats_lock);
 556
 557	/* Just less than 2^32 bytes at 2.5 GBit/s */
 558	schedule_delayed_work(&lp->stats_work, 13 * HZ);
 559}
 560
 561static int __axienet_device_reset(struct axienet_local *lp)
 562{
 563	u32 value;
 564	int ret;
 565
 566	/* Save statistics counters in case they will be reset */
 567	mutex_lock(&lp->stats_lock);
 568	if (lp->features & XAE_FEATURE_STATS)
 569		axienet_stats_update(lp, true);
 570
 571	/* Reset Axi DMA. This would reset Axi Ethernet core as well. The reset
 572	 * process of Axi DMA takes a while to complete as all pending
 573	 * commands/transfers will be flushed or completed during this
 574	 * reset process.
 575	 * Note that even though both TX and RX have their own reset register,
 576	 * they both reset the entire DMA core, so only one needs to be used.
 577	 */
 578	axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, XAXIDMA_CR_RESET_MASK);
 579	ret = read_poll_timeout(axienet_dma_in32, value,
 580				!(value & XAXIDMA_CR_RESET_MASK),
 581				DELAY_OF_ONE_MILLISEC, 50000, false, lp,
 582				XAXIDMA_TX_CR_OFFSET);
 583	if (ret) {
 584		dev_err(lp->dev, "%s: DMA reset timeout!\n", __func__);
 585		goto out;
 586	}
 587
 588	/* Wait for PhyRstCmplt bit to be set, indicating the PHY reset has finished */
 589	ret = read_poll_timeout(axienet_ior, value,
 590				value & XAE_INT_PHYRSTCMPLT_MASK,
 591				DELAY_OF_ONE_MILLISEC, 50000, false, lp,
 592				XAE_IS_OFFSET);
 593	if (ret) {
 594		dev_err(lp->dev, "%s: timeout waiting for PhyRstCmplt\n", __func__);
 595		goto out;
 596	}
 597
 598	/* Update statistics counters with new values */
 599	if (lp->features & XAE_FEATURE_STATS) {
 600		enum temac_stat stat;
 601
 602		write_seqcount_begin(&lp->hw_stats_seqcount);
 603		lp->reset_in_progress = false;
 604		for (stat = 0; stat < STAT_COUNT; stat++) {
 605			u32 counter =
 606				axienet_ior(lp, XAE_STATS_OFFSET + stat * 8);
 607
 608			lp->hw_stat_base[stat] +=
 609				lp->hw_last_counter[stat] - counter;
 610			lp->hw_last_counter[stat] = counter;
 611		}
 612		write_seqcount_end(&lp->hw_stats_seqcount);
 613	}
 614
 615out:
 616	mutex_unlock(&lp->stats_lock);
 617	return ret;
 618}
 619
 620/**
 621 * axienet_dma_stop - Stop DMA operation
 622 * @lp:		Pointer to the axienet_local structure
 623 */
 624static void axienet_dma_stop(struct axienet_local *lp)
 625{
 626	int count;
 627	u32 cr, sr;
 628
 629	cr = axienet_dma_in32(lp, XAXIDMA_RX_CR_OFFSET);
 630	cr &= ~(XAXIDMA_CR_RUNSTOP_MASK | XAXIDMA_IRQ_ALL_MASK);
 631	axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, cr);
 632	synchronize_irq(lp->rx_irq);
 633
 634	cr = axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET);
 635	cr &= ~(XAXIDMA_CR_RUNSTOP_MASK | XAXIDMA_IRQ_ALL_MASK);
 636	axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, cr);
 637	synchronize_irq(lp->tx_irq);
 638
 639	/* Give DMAs a chance to halt gracefully */
 640	sr = axienet_dma_in32(lp, XAXIDMA_RX_SR_OFFSET);
 641	for (count = 0; !(sr & XAXIDMA_SR_HALT_MASK) && count < 5; ++count) {
 642		msleep(20);
 643		sr = axienet_dma_in32(lp, XAXIDMA_RX_SR_OFFSET);
 644	}
 645
 646	sr = axienet_dma_in32(lp, XAXIDMA_TX_SR_OFFSET);
 647	for (count = 0; !(sr & XAXIDMA_SR_HALT_MASK) && count < 5; ++count) {
 648		msleep(20);
 649		sr = axienet_dma_in32(lp, XAXIDMA_TX_SR_OFFSET);
 650	}
 651
 652	/* Do a reset to ensure DMA is really stopped */
 653	axienet_lock_mii(lp);
 654	__axienet_device_reset(lp);
 655	axienet_unlock_mii(lp);
 656}
 657
 658/**
 659 * axienet_device_reset - Reset and initialize the Axi Ethernet hardware.
 660 * @ndev:	Pointer to the net_device structure
 661 *
 662 * This function is called to reset and initialize the Axi Ethernet core. This
 663 * is typically called during initialization. It does a reset of the Axi DMA
 664 * Rx/Tx channels and initializes the Axi DMA BDs. Since Axi DMA reset lines
 665 * are connected to Axi Ethernet reset lines, this in turn resets the Axi
 666 * Ethernet core. No separate hardware reset is done for the Axi Ethernet
 667 * core.
 668 * Returns 0 on success or a negative error number otherwise.
 669 */
 670static int axienet_device_reset(struct net_device *ndev)
 671{
 672	u32 axienet_status;
 673	struct axienet_local *lp = netdev_priv(ndev);
 674	int ret;
 675
 
 
 
 
 676	lp->max_frm_size = XAE_MAX_VLAN_FRAME_SIZE;
 677	lp->options |= XAE_OPTION_VLAN;
 678	lp->options &= (~XAE_OPTION_JUMBO);
 679
 680	if (ndev->mtu > XAE_MTU && ndev->mtu <= XAE_JUMBO_MTU) {
 
 681		lp->max_frm_size = ndev->mtu + VLAN_ETH_HLEN +
 682					XAE_TRL_SIZE;
 683
 684		if (lp->max_frm_size <= lp->rxmem)
 685			lp->options |= XAE_OPTION_JUMBO;
 686	}
 687
 688	if (!lp->use_dmaengine) {
 689		ret = __axienet_device_reset(lp);
 690		if (ret)
 691			return ret;
 692
 693		ret = axienet_dma_bd_init(ndev);
 694		if (ret) {
 695			netdev_err(ndev, "%s: descriptor allocation failed\n",
 696				   __func__);
 697			return ret;
 698		}
 699	}
 700
 701	axienet_status = axienet_ior(lp, XAE_RCW1_OFFSET);
 702	axienet_status &= ~XAE_RCW1_RX_MASK;
 703	axienet_iow(lp, XAE_RCW1_OFFSET, axienet_status);
 704
 705	axienet_status = axienet_ior(lp, XAE_IP_OFFSET);
 706	if (axienet_status & XAE_INT_RXRJECT_MASK)
 707		axienet_iow(lp, XAE_IS_OFFSET, XAE_INT_RXRJECT_MASK);
 708	axienet_iow(lp, XAE_IE_OFFSET, lp->eth_irq > 0 ?
 709		    XAE_INT_RECV_ERROR_MASK : 0);
 710
 711	axienet_iow(lp, XAE_FCC_OFFSET, XAE_FCC_FCRX_MASK);
 712
 713	/* Sync default options with HW but leave receiver and
 714	 * transmitter disabled.
 715	 */
 716	axienet_setoptions(ndev, lp->options &
 717			   ~(XAE_OPTION_TXEN | XAE_OPTION_RXEN));
 718	axienet_set_mac_address(ndev, NULL);
 719	axienet_set_multicast_list(ndev);
 720	axienet_setoptions(ndev, lp->options);
 721
 722	netif_trans_update(ndev);
 723
 724	return 0;
 725}
 726
 727/**
 728 * axienet_free_tx_chain - Clean up a series of linked TX descriptors.
 729 * @lp:		Pointer to the axienet_local structure
 730 * @first_bd:	Index of first descriptor to clean up
 731 * @nr_bds:	Max number of descriptors to clean up
 732 * @force:	Whether to clean descriptors even if not complete
 733 * @sizep:	Pointer to a u32 filled with the total sum of all bytes
 734 *		in all cleaned-up descriptors. Ignored if NULL.
 735 * @budget:	NAPI budget (use 0 when not called from NAPI poll)
 736 *
 737 * Would either be called after a successful transmit operation, or after
 738 * there was an error when setting up the chain.
 739 * Returns the number of packets handled.
 740 */
 741static int axienet_free_tx_chain(struct axienet_local *lp, u32 first_bd,
 742				 int nr_bds, bool force, u32 *sizep, int budget)
 743{
 744	struct axidma_bd *cur_p;
 745	unsigned int status;
 746	int i, packets = 0;
 747	dma_addr_t phys;
 
 748
 749	for (i = 0; i < nr_bds; i++) {
 750		cur_p = &lp->tx_bd_v[(first_bd + i) % lp->tx_bd_num];
 751		status = cur_p->status;
 752
 753		/* If force is not specified, clean up only descriptors
 754		 * that have been completed by the MAC.
 755		 */
 756		if (!force && !(status & XAXIDMA_BD_STS_COMPLETE_MASK))
 757			break;
 758
 759		/* Ensure we see complete descriptor update */
 760		dma_rmb();
 761		phys = desc_get_phys_addr(lp, cur_p);
 762		dma_unmap_single(lp->dev, phys,
 763				 (cur_p->cntrl & XAXIDMA_BD_CTRL_LENGTH_MASK),
 764				 DMA_TO_DEVICE);
 765
 766		if (cur_p->skb && (status & XAXIDMA_BD_STS_COMPLETE_MASK)) {
 767			napi_consume_skb(cur_p->skb, budget);
 768			packets++;
 769		}
 770
 771		cur_p->app0 = 0;
 772		cur_p->app1 = 0;
 773		cur_p->app2 = 0;
 774		cur_p->app4 = 0;
 775		cur_p->skb = NULL;
 776		/* ensure our transmit path and device don't prematurely see status cleared */
 777		wmb();
 778		cur_p->cntrl = 0;
 779		cur_p->status = 0;
 780
 781		if (sizep)
 782			*sizep += status & XAXIDMA_BD_STS_ACTUAL_LEN_MASK;
 783	}
 784
 785	if (!force) {
 786		lp->tx_bd_ci += i;
 787		if (lp->tx_bd_ci >= lp->tx_bd_num)
 788			lp->tx_bd_ci %= lp->tx_bd_num;
 789	}
 790
 791	return packets;
 792}
 793
 794/**
 795 * axienet_check_tx_bd_space - Checks if a BD/group of BDs are currently busy
 796 * @lp:		Pointer to the axienet_local structure
 797 * @num_frag:	The number of BDs to check for
 798 *
 799 * Return: 0, on success
 800 *	    NETDEV_TX_BUSY, if any of the descriptors are not free
 801 *
 802 * This function is invoked before BDs are allocated and transmission starts.
 803 * This function returns 0 if a BD or group of BDs can be allocated for
 804 * transmission. If the BD or any of the BDs are not free the function
 805 * returns a busy status.
 806 */
 807static inline int axienet_check_tx_bd_space(struct axienet_local *lp,
 808					    int num_frag)
 809{
 810	struct axidma_bd *cur_p;
 811
 812	/* Ensure we see all descriptor updates from device or TX polling */
 813	rmb();
 814	cur_p = &lp->tx_bd_v[(READ_ONCE(lp->tx_bd_tail) + num_frag) %
 815			     lp->tx_bd_num];
 816	if (cur_p->cntrl)
 817		return NETDEV_TX_BUSY;
 818	return 0;
 819}
 820
 821/**
 822 * axienet_dma_tx_cb - DMA engine callback for TX channel.
 823 * @data:       Pointer to the axienet_local structure.
 824 * @result:     error reporting through dmaengine_result.
 825 * This function is called by dmaengine driver for TX channel to notify
 826 * that the transmit is done.
 827 */
 828static void axienet_dma_tx_cb(void *data, const struct dmaengine_result *result)
 829{
 830	struct skbuf_dma_descriptor *skbuf_dma;
 831	struct axienet_local *lp = data;
 832	struct netdev_queue *txq;
 833	int len;
 834
 835	skbuf_dma = axienet_get_tx_desc(lp, lp->tx_ring_tail++);
 836	len = skbuf_dma->skb->len;
 837	txq = skb_get_tx_queue(lp->ndev, skbuf_dma->skb);
 838	u64_stats_update_begin(&lp->tx_stat_sync);
 839	u64_stats_add(&lp->tx_bytes, len);
 840	u64_stats_add(&lp->tx_packets, 1);
 841	u64_stats_update_end(&lp->tx_stat_sync);
 842	dma_unmap_sg(lp->dev, skbuf_dma->sgl, skbuf_dma->sg_len, DMA_TO_DEVICE);
 843	dev_consume_skb_any(skbuf_dma->skb);
 844	netif_txq_completed_wake(txq, 1, len,
 845				 CIRC_SPACE(lp->tx_ring_head, lp->tx_ring_tail, TX_BD_NUM_MAX),
 846				 2 * MAX_SKB_FRAGS);
 847}
 848
 849/**
 850 * axienet_start_xmit_dmaengine - Starts the transmission.
 851 * @skb:        sk_buff pointer that contains data to be Txed.
 852 * @ndev:       Pointer to net_device structure.
 853 *
 854 * Return: NETDEV_TX_OK on success or any non space errors.
 855 *         NETDEV_TX_BUSY when free element in TX skb ring buffer
 856 *         is not available.
 857 *
 858 * This function is invoked to initiate transmission. The
 859 * function sets the skbs, register dma callback API and submit
 860 * the dma transaction.
 861 * Additionally if checksum offloading is supported,
 862 * it populates AXI Stream Control fields with appropriate values.
 863 */
 864static netdev_tx_t
 865axienet_start_xmit_dmaengine(struct sk_buff *skb, struct net_device *ndev)
 866{
 867	struct dma_async_tx_descriptor *dma_tx_desc = NULL;
 868	struct axienet_local *lp = netdev_priv(ndev);
 869	u32 app_metadata[DMA_NUM_APP_WORDS] = {0};
 870	struct skbuf_dma_descriptor *skbuf_dma;
 871	struct dma_device *dma_dev;
 872	struct netdev_queue *txq;
 873	u32 csum_start_off;
 874	u32 csum_index_off;
 875	int sg_len;
 876	int ret;
 877
 878	dma_dev = lp->tx_chan->device;
 879	sg_len = skb_shinfo(skb)->nr_frags + 1;
 880	if (CIRC_SPACE(lp->tx_ring_head, lp->tx_ring_tail, TX_BD_NUM_MAX) <= sg_len) {
 881		netif_stop_queue(ndev);
 882		if (net_ratelimit())
 883			netdev_warn(ndev, "TX ring unexpectedly full\n");
 884		return NETDEV_TX_BUSY;
 885	}
 886
 887	skbuf_dma = axienet_get_tx_desc(lp, lp->tx_ring_head);
 888	if (!skbuf_dma)
 889		goto xmit_error_drop_skb;
 890
 891	lp->tx_ring_head++;
 892	sg_init_table(skbuf_dma->sgl, sg_len);
 893	ret = skb_to_sgvec(skb, skbuf_dma->sgl, 0, skb->len);
 894	if (ret < 0)
 895		goto xmit_error_drop_skb;
 896
 897	ret = dma_map_sg(lp->dev, skbuf_dma->sgl, sg_len, DMA_TO_DEVICE);
 898	if (!ret)
 899		goto xmit_error_drop_skb;
 900
 901	/* Fill up app fields for checksum */
 902	if (skb->ip_summed == CHECKSUM_PARTIAL) {
 903		if (lp->features & XAE_FEATURE_FULL_TX_CSUM) {
 904			/* Tx Full Checksum Offload Enabled */
 905			app_metadata[0] |= 2;
 906		} else if (lp->features & XAE_FEATURE_PARTIAL_TX_CSUM) {
 907			csum_start_off = skb_transport_offset(skb);
 908			csum_index_off = csum_start_off + skb->csum_offset;
 909			/* Tx Partial Checksum Offload Enabled */
 910			app_metadata[0] |= 1;
 911			app_metadata[1] = (csum_start_off << 16) | csum_index_off;
 912		}
 913	} else if (skb->ip_summed == CHECKSUM_UNNECESSARY) {
 914		app_metadata[0] |= 2; /* Tx Full Checksum Offload Enabled */
 915	}
 916
 917	dma_tx_desc = dma_dev->device_prep_slave_sg(lp->tx_chan, skbuf_dma->sgl,
 918			sg_len, DMA_MEM_TO_DEV,
 919			DMA_PREP_INTERRUPT, (void *)app_metadata);
 920	if (!dma_tx_desc)
 921		goto xmit_error_unmap_sg;
 922
 923	skbuf_dma->skb = skb;
 924	skbuf_dma->sg_len = sg_len;
 925	dma_tx_desc->callback_param = lp;
 926	dma_tx_desc->callback_result = axienet_dma_tx_cb;
 927	txq = skb_get_tx_queue(lp->ndev, skb);
 928	netdev_tx_sent_queue(txq, skb->len);
 929	netif_txq_maybe_stop(txq, CIRC_SPACE(lp->tx_ring_head, lp->tx_ring_tail, TX_BD_NUM_MAX),
 930			     MAX_SKB_FRAGS + 1, 2 * MAX_SKB_FRAGS);
 931
 932	dmaengine_submit(dma_tx_desc);
 933	dma_async_issue_pending(lp->tx_chan);
 934	return NETDEV_TX_OK;
 935
 936xmit_error_unmap_sg:
 937	dma_unmap_sg(lp->dev, skbuf_dma->sgl, sg_len, DMA_TO_DEVICE);
 938xmit_error_drop_skb:
 939	dev_kfree_skb_any(skb);
 940	return NETDEV_TX_OK;
 941}
 942
 943/**
 944 * axienet_tx_poll - Invoked once a transmit is completed by the
 945 * Axi DMA Tx channel.
 946 * @napi:	Pointer to NAPI structure.
 947 * @budget:	Max number of TX packets to process.
 948 *
 949 * Return: Number of TX packets processed.
 950 *
 951 * This function is invoked from the NAPI processing to notify the completion
 952 * of transmit operation. It clears fields in the corresponding Tx BDs and
 953 * unmaps the corresponding buffer so that CPU can regain ownership of the
 954 * buffer. It finally invokes "netif_wake_queue" to restart transmission if
 955 * required.
 956 */
 957static int axienet_tx_poll(struct napi_struct *napi, int budget)
 958{
 959	struct axienet_local *lp = container_of(napi, struct axienet_local, napi_tx);
 960	struct net_device *ndev = lp->ndev;
 961	u32 size = 0;
 962	int packets;
 963
 964	packets = axienet_free_tx_chain(lp, lp->tx_bd_ci, lp->tx_bd_num, false,
 965					&size, budget);
 966
 967	if (packets) {
 
 
 
 
 968		u64_stats_update_begin(&lp->tx_stat_sync);
 969		u64_stats_add(&lp->tx_packets, packets);
 970		u64_stats_add(&lp->tx_bytes, size);
 971		u64_stats_update_end(&lp->tx_stat_sync);
 972
 973		/* Matches barrier in axienet_start_xmit */
 974		smp_mb();
 975
 976		if (!axienet_check_tx_bd_space(lp, MAX_SKB_FRAGS + 1))
 977			netif_wake_queue(ndev);
 978	}
 979
 980	if (packets < budget && napi_complete_done(napi, packets)) {
 981		/* Re-enable TX completion interrupts. This should
 982		 * cause an immediate interrupt if any TX packets are
 983		 * already pending.
 984		 */
 985		axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, lp->tx_dma_cr);
 986	}
 987	return packets;
 988}
 989
 990/**
 991 * axienet_start_xmit - Starts the transmission.
 992 * @skb:	sk_buff pointer that contains data to be Txed.
 993 * @ndev:	Pointer to net_device structure.
 994 *
 995 * Return: NETDEV_TX_OK, on success
 996 *	    NETDEV_TX_BUSY, if any of the descriptors are not free
 997 *
 998 * This function is invoked from upper layers to initiate transmission. The
 999 * function uses the next available free BDs and populates their fields to
1000 * start the transmission. Additionally if checksum offloading is supported,
1001 * it populates AXI Stream Control fields with appropriate values.
1002 */
1003static netdev_tx_t
1004axienet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
1005{
1006	u32 ii;
1007	u32 num_frag;
1008	u32 csum_start_off;
1009	u32 csum_index_off;
1010	skb_frag_t *frag;
1011	dma_addr_t tail_p, phys;
1012	u32 orig_tail_ptr, new_tail_ptr;
1013	struct axienet_local *lp = netdev_priv(ndev);
1014	struct axidma_bd *cur_p;
1015
1016	orig_tail_ptr = lp->tx_bd_tail;
1017	new_tail_ptr = orig_tail_ptr;
1018
1019	num_frag = skb_shinfo(skb)->nr_frags;
1020	cur_p = &lp->tx_bd_v[orig_tail_ptr];
1021
1022	if (axienet_check_tx_bd_space(lp, num_frag + 1)) {
1023		/* Should not happen as last start_xmit call should have
1024		 * checked for sufficient space and queue should only be
1025		 * woken when sufficient space is available.
1026		 */
1027		netif_stop_queue(ndev);
1028		if (net_ratelimit())
1029			netdev_warn(ndev, "TX ring unexpectedly full\n");
1030		return NETDEV_TX_BUSY;
1031	}
1032
1033	if (skb->ip_summed == CHECKSUM_PARTIAL) {
1034		if (lp->features & XAE_FEATURE_FULL_TX_CSUM) {
1035			/* Tx Full Checksum Offload Enabled */
1036			cur_p->app0 |= 2;
1037		} else if (lp->features & XAE_FEATURE_PARTIAL_TX_CSUM) {
1038			csum_start_off = skb_transport_offset(skb);
1039			csum_index_off = csum_start_off + skb->csum_offset;
1040			/* Tx Partial Checksum Offload Enabled */
1041			cur_p->app0 |= 1;
1042			cur_p->app1 = (csum_start_off << 16) | csum_index_off;
1043		}
1044	} else if (skb->ip_summed == CHECKSUM_UNNECESSARY) {
1045		cur_p->app0 |= 2; /* Tx Full Checksum Offload Enabled */
1046	}
1047
1048	phys = dma_map_single(lp->dev, skb->data,
1049			      skb_headlen(skb), DMA_TO_DEVICE);
1050	if (unlikely(dma_mapping_error(lp->dev, phys))) {
1051		if (net_ratelimit())
1052			netdev_err(ndev, "TX DMA mapping error\n");
1053		ndev->stats.tx_dropped++;
1054		dev_kfree_skb_any(skb);
1055		return NETDEV_TX_OK;
1056	}
1057	desc_set_phys_addr(lp, phys, cur_p);
1058	cur_p->cntrl = skb_headlen(skb) | XAXIDMA_BD_CTRL_TXSOF_MASK;
1059
1060	for (ii = 0; ii < num_frag; ii++) {
1061		if (++new_tail_ptr >= lp->tx_bd_num)
1062			new_tail_ptr = 0;
1063		cur_p = &lp->tx_bd_v[new_tail_ptr];
1064		frag = &skb_shinfo(skb)->frags[ii];
1065		phys = dma_map_single(lp->dev,
1066				      skb_frag_address(frag),
1067				      skb_frag_size(frag),
1068				      DMA_TO_DEVICE);
1069		if (unlikely(dma_mapping_error(lp->dev, phys))) {
1070			if (net_ratelimit())
1071				netdev_err(ndev, "TX DMA mapping error\n");
1072			ndev->stats.tx_dropped++;
1073			axienet_free_tx_chain(lp, orig_tail_ptr, ii + 1,
1074					      true, NULL, 0);
1075			dev_kfree_skb_any(skb);
1076			return NETDEV_TX_OK;
1077		}
1078		desc_set_phys_addr(lp, phys, cur_p);
1079		cur_p->cntrl = skb_frag_size(frag);
1080	}
1081
1082	cur_p->cntrl |= XAXIDMA_BD_CTRL_TXEOF_MASK;
1083	cur_p->skb = skb;
1084
1085	tail_p = lp->tx_bd_p + sizeof(*lp->tx_bd_v) * new_tail_ptr;
1086	if (++new_tail_ptr >= lp->tx_bd_num)
1087		new_tail_ptr = 0;
1088	WRITE_ONCE(lp->tx_bd_tail, new_tail_ptr);
1089
1090	/* Start the transfer */
1091	axienet_dma_out_addr(lp, XAXIDMA_TX_TDESC_OFFSET, tail_p);
1092
1093	/* Stop queue if next transmit may not have space */
1094	if (axienet_check_tx_bd_space(lp, MAX_SKB_FRAGS + 1)) {
1095		netif_stop_queue(ndev);
1096
1097		/* Matches barrier in axienet_tx_poll */
1098		smp_mb();
1099
1100		/* Space might have just been freed - check again */
1101		if (!axienet_check_tx_bd_space(lp, MAX_SKB_FRAGS + 1))
1102			netif_wake_queue(ndev);
1103	}
1104
1105	return NETDEV_TX_OK;
1106}
1107
1108/**
1109 * axienet_dma_rx_cb - DMA engine callback for RX channel.
1110 * @data:       Pointer to the skbuf_dma_descriptor structure.
1111 * @result:     error reporting through dmaengine_result.
1112 * This function is called by dmaengine driver for RX channel to notify
1113 * that the packet is received.
1114 */
1115static void axienet_dma_rx_cb(void *data, const struct dmaengine_result *result)
1116{
1117	struct skbuf_dma_descriptor *skbuf_dma;
1118	size_t meta_len, meta_max_len, rx_len;
1119	struct axienet_local *lp = data;
1120	struct sk_buff *skb;
1121	u32 *app_metadata;
1122
1123	skbuf_dma = axienet_get_rx_desc(lp, lp->rx_ring_tail++);
1124	skb = skbuf_dma->skb;
1125	app_metadata = dmaengine_desc_get_metadata_ptr(skbuf_dma->desc, &meta_len,
1126						       &meta_max_len);
1127	dma_unmap_single(lp->dev, skbuf_dma->dma_address, lp->max_frm_size,
1128			 DMA_FROM_DEVICE);
1129	/* TODO: Derive app word index programmatically */
1130	rx_len = (app_metadata[LEN_APP] & 0xFFFF);
1131	skb_put(skb, rx_len);
1132	skb->protocol = eth_type_trans(skb, lp->ndev);
1133	skb->ip_summed = CHECKSUM_NONE;
1134
1135	__netif_rx(skb);
1136	u64_stats_update_begin(&lp->rx_stat_sync);
1137	u64_stats_add(&lp->rx_packets, 1);
1138	u64_stats_add(&lp->rx_bytes, rx_len);
1139	u64_stats_update_end(&lp->rx_stat_sync);
1140	axienet_rx_submit_desc(lp->ndev);
1141	dma_async_issue_pending(lp->rx_chan);
1142}
1143
1144/**
1145 * axienet_rx_poll - Triggered by RX ISR to complete the BD processing.
1146 * @napi:	Pointer to NAPI structure.
1147 * @budget:	Max number of RX packets to process.
1148 *
1149 * Return: Number of RX packets processed.
1150 */
1151static int axienet_rx_poll(struct napi_struct *napi, int budget)
1152{
1153	u32 length;
1154	u32 csumstatus;
1155	u32 size = 0;
1156	int packets = 0;
1157	dma_addr_t tail_p = 0;
1158	struct axidma_bd *cur_p;
1159	struct sk_buff *skb, *new_skb;
1160	struct axienet_local *lp = container_of(napi, struct axienet_local, napi_rx);
1161
1162	cur_p = &lp->rx_bd_v[lp->rx_bd_ci];
1163
1164	while (packets < budget && (cur_p->status & XAXIDMA_BD_STS_COMPLETE_MASK)) {
1165		dma_addr_t phys;
1166
1167		/* Ensure we see complete descriptor update */
1168		dma_rmb();
1169
1170		skb = cur_p->skb;
1171		cur_p->skb = NULL;
1172
1173		/* skb could be NULL if a previous pass already received the
1174		 * packet for this slot in the ring, but failed to refill it
1175		 * with a newly allocated buffer. In this case, don't try to
1176		 * receive it again.
1177		 */
1178		if (likely(skb)) {
1179			length = cur_p->app4 & 0x0000FFFF;
1180
1181			phys = desc_get_phys_addr(lp, cur_p);
1182			dma_unmap_single(lp->dev, phys, lp->max_frm_size,
1183					 DMA_FROM_DEVICE);
1184
1185			skb_put(skb, length);
1186			skb->protocol = eth_type_trans(skb, lp->ndev);
1187			/*skb_checksum_none_assert(skb);*/
1188			skb->ip_summed = CHECKSUM_NONE;
1189
1190			/* if we're doing Rx csum offload, set it up */
1191			if (lp->features & XAE_FEATURE_FULL_RX_CSUM) {
1192				csumstatus = (cur_p->app2 &
1193					      XAE_FULL_CSUM_STATUS_MASK) >> 3;
1194				if (csumstatus == XAE_IP_TCP_CSUM_VALIDATED ||
1195				    csumstatus == XAE_IP_UDP_CSUM_VALIDATED) {
1196					skb->ip_summed = CHECKSUM_UNNECESSARY;
1197				}
1198			} else if (lp->features & XAE_FEATURE_PARTIAL_RX_CSUM) {
 
 
1199				skb->csum = be32_to_cpu(cur_p->app3 & 0xFFFF);
1200				skb->ip_summed = CHECKSUM_COMPLETE;
1201			}
1202
1203			napi_gro_receive(napi, skb);
1204
1205			size += length;
1206			packets++;
1207		}
1208
1209		new_skb = napi_alloc_skb(napi, lp->max_frm_size);
1210		if (!new_skb)
1211			break;
1212
1213		phys = dma_map_single(lp->dev, new_skb->data,
1214				      lp->max_frm_size,
1215				      DMA_FROM_DEVICE);
1216		if (unlikely(dma_mapping_error(lp->dev, phys))) {
1217			if (net_ratelimit())
1218				netdev_err(lp->ndev, "RX DMA mapping error\n");
1219			dev_kfree_skb(new_skb);
1220			break;
1221		}
1222		desc_set_phys_addr(lp, phys, cur_p);
1223
1224		cur_p->cntrl = lp->max_frm_size;
1225		cur_p->status = 0;
1226		cur_p->skb = new_skb;
1227
1228		/* Only update tail_p to mark this slot as usable after it has
1229		 * been successfully refilled.
1230		 */
1231		tail_p = lp->rx_bd_p + sizeof(*lp->rx_bd_v) * lp->rx_bd_ci;
1232
1233		if (++lp->rx_bd_ci >= lp->rx_bd_num)
1234			lp->rx_bd_ci = 0;
1235		cur_p = &lp->rx_bd_v[lp->rx_bd_ci];
1236	}
1237
1238	u64_stats_update_begin(&lp->rx_stat_sync);
1239	u64_stats_add(&lp->rx_packets, packets);
1240	u64_stats_add(&lp->rx_bytes, size);
1241	u64_stats_update_end(&lp->rx_stat_sync);
1242
1243	if (tail_p)
1244		axienet_dma_out_addr(lp, XAXIDMA_RX_TDESC_OFFSET, tail_p);
1245
1246	if (packets < budget && napi_complete_done(napi, packets)) {
1247		/* Re-enable RX completion interrupts. This should
1248		 * cause an immediate interrupt if any RX packets are
1249		 * already pending.
1250		 */
1251		axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, lp->rx_dma_cr);
1252	}
1253	return packets;
1254}
1255
1256/**
1257 * axienet_tx_irq - Tx Done Isr.
1258 * @irq:	irq number
1259 * @_ndev:	net_device pointer
1260 *
1261 * Return: IRQ_HANDLED if device generated a TX interrupt, IRQ_NONE otherwise.
1262 *
1263 * This is the Axi DMA Tx done Isr. It invokes NAPI polling to complete the
1264 * TX BD processing.
1265 */
1266static irqreturn_t axienet_tx_irq(int irq, void *_ndev)
1267{
1268	unsigned int status;
1269	struct net_device *ndev = _ndev;
1270	struct axienet_local *lp = netdev_priv(ndev);
1271
1272	status = axienet_dma_in32(lp, XAXIDMA_TX_SR_OFFSET);
1273
1274	if (!(status & XAXIDMA_IRQ_ALL_MASK))
1275		return IRQ_NONE;
1276
1277	axienet_dma_out32(lp, XAXIDMA_TX_SR_OFFSET, status);
1278
1279	if (unlikely(status & XAXIDMA_IRQ_ERROR_MASK)) {
1280		netdev_err(ndev, "DMA Tx error 0x%x\n", status);
1281		netdev_err(ndev, "Current BD is at: 0x%x%08x\n",
1282			   (lp->tx_bd_v[lp->tx_bd_ci]).phys_msb,
1283			   (lp->tx_bd_v[lp->tx_bd_ci]).phys);
1284		schedule_work(&lp->dma_err_task);
1285	} else {
1286		/* Disable further TX completion interrupts and schedule
1287		 * NAPI to handle the completions.
1288		 */
1289		u32 cr = lp->tx_dma_cr;
1290
1291		cr &= ~(XAXIDMA_IRQ_IOC_MASK | XAXIDMA_IRQ_DELAY_MASK);
1292		if (napi_schedule_prep(&lp->napi_tx)) {
1293			axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, cr);
1294			__napi_schedule(&lp->napi_tx);
1295		}
1296	}
1297
1298	return IRQ_HANDLED;
1299}
1300
1301/**
1302 * axienet_rx_irq - Rx Isr.
1303 * @irq:	irq number
1304 * @_ndev:	net_device pointer
1305 *
1306 * Return: IRQ_HANDLED if device generated a RX interrupt, IRQ_NONE otherwise.
1307 *
1308 * This is the Axi DMA Rx Isr. It invokes NAPI polling to complete the RX BD
1309 * processing.
1310 */
1311static irqreturn_t axienet_rx_irq(int irq, void *_ndev)
1312{
1313	unsigned int status;
1314	struct net_device *ndev = _ndev;
1315	struct axienet_local *lp = netdev_priv(ndev);
1316
1317	status = axienet_dma_in32(lp, XAXIDMA_RX_SR_OFFSET);
1318
1319	if (!(status & XAXIDMA_IRQ_ALL_MASK))
1320		return IRQ_NONE;
1321
1322	axienet_dma_out32(lp, XAXIDMA_RX_SR_OFFSET, status);
1323
1324	if (unlikely(status & XAXIDMA_IRQ_ERROR_MASK)) {
1325		netdev_err(ndev, "DMA Rx error 0x%x\n", status);
1326		netdev_err(ndev, "Current BD is at: 0x%x%08x\n",
1327			   (lp->rx_bd_v[lp->rx_bd_ci]).phys_msb,
1328			   (lp->rx_bd_v[lp->rx_bd_ci]).phys);
1329		schedule_work(&lp->dma_err_task);
1330	} else {
1331		/* Disable further RX completion interrupts and schedule
1332		 * NAPI receive.
1333		 */
1334		u32 cr = lp->rx_dma_cr;
1335
1336		cr &= ~(XAXIDMA_IRQ_IOC_MASK | XAXIDMA_IRQ_DELAY_MASK);
1337		if (napi_schedule_prep(&lp->napi_rx)) {
1338			axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, cr);
1339			__napi_schedule(&lp->napi_rx);
1340		}
1341	}
1342
1343	return IRQ_HANDLED;
1344}
1345
1346/**
1347 * axienet_eth_irq - Ethernet core Isr.
1348 * @irq:	irq number
1349 * @_ndev:	net_device pointer
1350 *
1351 * Return: IRQ_HANDLED if device generated a core interrupt, IRQ_NONE otherwise.
1352 *
1353 * Handle miscellaneous conditions indicated by Ethernet core IRQ.
1354 */
1355static irqreturn_t axienet_eth_irq(int irq, void *_ndev)
1356{
1357	struct net_device *ndev = _ndev;
1358	struct axienet_local *lp = netdev_priv(ndev);
1359	unsigned int pending;
1360
1361	pending = axienet_ior(lp, XAE_IP_OFFSET);
1362	if (!pending)
1363		return IRQ_NONE;
1364
1365	if (pending & XAE_INT_RXFIFOOVR_MASK)
1366		ndev->stats.rx_missed_errors++;
1367
1368	if (pending & XAE_INT_RXRJECT_MASK)
1369		ndev->stats.rx_dropped++;
1370
1371	axienet_iow(lp, XAE_IS_OFFSET, pending);
1372	return IRQ_HANDLED;
1373}
1374
1375static void axienet_dma_err_handler(struct work_struct *work);
1376
1377/**
1378 * axienet_rx_submit_desc - Submit the rx descriptors to dmaengine.
1379 * allocate skbuff, map the scatterlist and obtain a descriptor
1380 * and then add the callback information and submit descriptor.
1381 *
1382 * @ndev:	net_device pointer
1383 *
1384 */
1385static void axienet_rx_submit_desc(struct net_device *ndev)
1386{
1387	struct dma_async_tx_descriptor *dma_rx_desc = NULL;
1388	struct axienet_local *lp = netdev_priv(ndev);
1389	struct skbuf_dma_descriptor *skbuf_dma;
1390	struct sk_buff *skb;
1391	dma_addr_t addr;
1392
1393	skbuf_dma = axienet_get_rx_desc(lp, lp->rx_ring_head);
1394	if (!skbuf_dma)
1395		return;
1396
1397	lp->rx_ring_head++;
1398	skb = netdev_alloc_skb(ndev, lp->max_frm_size);
1399	if (!skb)
1400		return;
1401
1402	sg_init_table(skbuf_dma->sgl, 1);
1403	addr = dma_map_single(lp->dev, skb->data, lp->max_frm_size, DMA_FROM_DEVICE);
1404	if (unlikely(dma_mapping_error(lp->dev, addr))) {
1405		if (net_ratelimit())
1406			netdev_err(ndev, "DMA mapping error\n");
1407		goto rx_submit_err_free_skb;
1408	}
1409	sg_dma_address(skbuf_dma->sgl) = addr;
1410	sg_dma_len(skbuf_dma->sgl) = lp->max_frm_size;
1411	dma_rx_desc = dmaengine_prep_slave_sg(lp->rx_chan, skbuf_dma->sgl,
1412					      1, DMA_DEV_TO_MEM,
1413					      DMA_PREP_INTERRUPT);
1414	if (!dma_rx_desc)
1415		goto rx_submit_err_unmap_skb;
1416
1417	skbuf_dma->skb = skb;
1418	skbuf_dma->dma_address = sg_dma_address(skbuf_dma->sgl);
1419	skbuf_dma->desc = dma_rx_desc;
1420	dma_rx_desc->callback_param = lp;
1421	dma_rx_desc->callback_result = axienet_dma_rx_cb;
1422	dmaengine_submit(dma_rx_desc);
1423
1424	return;
1425
1426rx_submit_err_unmap_skb:
1427	dma_unmap_single(lp->dev, addr, lp->max_frm_size, DMA_FROM_DEVICE);
1428rx_submit_err_free_skb:
1429	dev_kfree_skb(skb);
1430}
1431
1432/**
1433 * axienet_init_dmaengine - init the dmaengine code.
1434 * @ndev:       Pointer to net_device structure
1435 *
1436 * Return: 0, on success.
1437 *          non-zero error value on failure
1438 *
1439 * This is the dmaengine initialization code.
 
 
 
 
1440 */
1441static int axienet_init_dmaengine(struct net_device *ndev)
1442{
 
1443	struct axienet_local *lp = netdev_priv(ndev);
1444	struct skbuf_dma_descriptor *skbuf_dma;
1445	int i, ret;
1446
1447	lp->tx_chan = dma_request_chan(lp->dev, "tx_chan0");
1448	if (IS_ERR(lp->tx_chan)) {
1449		dev_err(lp->dev, "No Ethernet DMA (TX) channel found\n");
1450		return PTR_ERR(lp->tx_chan);
1451	}
1452
1453	lp->rx_chan = dma_request_chan(lp->dev, "rx_chan0");
1454	if (IS_ERR(lp->rx_chan)) {
1455		ret = PTR_ERR(lp->rx_chan);
1456		dev_err(lp->dev, "No Ethernet DMA (RX) channel found\n");
1457		goto err_dma_release_tx;
1458	}
1459
1460	lp->tx_ring_tail = 0;
1461	lp->tx_ring_head = 0;
1462	lp->rx_ring_tail = 0;
1463	lp->rx_ring_head = 0;
1464	lp->tx_skb_ring = kcalloc(TX_BD_NUM_MAX, sizeof(*lp->tx_skb_ring),
1465				  GFP_KERNEL);
1466	if (!lp->tx_skb_ring) {
1467		ret = -ENOMEM;
1468		goto err_dma_release_rx;
1469	}
1470	for (i = 0; i < TX_BD_NUM_MAX; i++) {
1471		skbuf_dma = kzalloc(sizeof(*skbuf_dma), GFP_KERNEL);
1472		if (!skbuf_dma) {
1473			ret = -ENOMEM;
1474			goto err_free_tx_skb_ring;
1475		}
1476		lp->tx_skb_ring[i] = skbuf_dma;
1477	}
1478
1479	lp->rx_skb_ring = kcalloc(RX_BUF_NUM_DEFAULT, sizeof(*lp->rx_skb_ring),
1480				  GFP_KERNEL);
1481	if (!lp->rx_skb_ring) {
1482		ret = -ENOMEM;
1483		goto err_free_tx_skb_ring;
1484	}
1485	for (i = 0; i < RX_BUF_NUM_DEFAULT; i++) {
1486		skbuf_dma = kzalloc(sizeof(*skbuf_dma), GFP_KERNEL);
1487		if (!skbuf_dma) {
1488			ret = -ENOMEM;
1489			goto err_free_rx_skb_ring;
1490		}
1491		lp->rx_skb_ring[i] = skbuf_dma;
1492	}
1493	/* TODO: Instead of BD_NUM_DEFAULT use runtime support */
1494	for (i = 0; i < RX_BUF_NUM_DEFAULT; i++)
1495		axienet_rx_submit_desc(ndev);
1496	dma_async_issue_pending(lp->rx_chan);
1497
1498	return 0;
1499
1500err_free_rx_skb_ring:
1501	for (i = 0; i < RX_BUF_NUM_DEFAULT; i++)
1502		kfree(lp->rx_skb_ring[i]);
1503	kfree(lp->rx_skb_ring);
1504err_free_tx_skb_ring:
1505	for (i = 0; i < TX_BD_NUM_MAX; i++)
1506		kfree(lp->tx_skb_ring[i]);
1507	kfree(lp->tx_skb_ring);
1508err_dma_release_rx:
1509	dma_release_channel(lp->rx_chan);
1510err_dma_release_tx:
1511	dma_release_channel(lp->tx_chan);
1512	return ret;
1513}
1514
1515/**
1516 * axienet_init_legacy_dma - init the dma legacy code.
1517 * @ndev:       Pointer to net_device structure
1518 *
1519 * Return: 0, on success.
1520 *          non-zero error value on failure
1521 *
1522 * This is the dma  initialization code. It also allocates interrupt
1523 * service routines, enables the interrupt lines and ISR handling.
1524 *
1525 */
1526static int axienet_init_legacy_dma(struct net_device *ndev)
1527{
1528	int ret;
1529	struct axienet_local *lp = netdev_priv(ndev);
1530
1531	/* Enable worker thread for Axi DMA error handling */
1532	lp->stopping = false;
1533	INIT_WORK(&lp->dma_err_task, axienet_dma_err_handler);
1534
1535	napi_enable(&lp->napi_rx);
1536	napi_enable(&lp->napi_tx);
1537
1538	/* Enable interrupts for Axi DMA Tx */
1539	ret = request_irq(lp->tx_irq, axienet_tx_irq, IRQF_SHARED,
1540			  ndev->name, ndev);
1541	if (ret)
1542		goto err_tx_irq;
1543	/* Enable interrupts for Axi DMA Rx */
1544	ret = request_irq(lp->rx_irq, axienet_rx_irq, IRQF_SHARED,
1545			  ndev->name, ndev);
1546	if (ret)
1547		goto err_rx_irq;
1548	/* Enable interrupts for Axi Ethernet core (if defined) */
1549	if (lp->eth_irq > 0) {
1550		ret = request_irq(lp->eth_irq, axienet_eth_irq, IRQF_SHARED,
1551				  ndev->name, ndev);
1552		if (ret)
1553			goto err_eth_irq;
1554	}
1555
1556	return 0;
1557
1558err_eth_irq:
1559	free_irq(lp->rx_irq, ndev);
1560err_rx_irq:
1561	free_irq(lp->tx_irq, ndev);
1562err_tx_irq:
1563	napi_disable(&lp->napi_tx);
1564	napi_disable(&lp->napi_rx);
1565	cancel_work_sync(&lp->dma_err_task);
1566	dev_err(lp->dev, "request_irq() failed\n");
1567	return ret;
1568}
1569
1570/**
1571 * axienet_open - Driver open routine.
1572 * @ndev:	Pointer to net_device structure
1573 *
1574 * Return: 0, on success.
1575 *	    non-zero error value on failure
1576 *
1577 * This is the driver open routine. It calls phylink_start to start the
1578 * PHY device.
1579 * It also allocates interrupt service routines, enables the interrupt lines
1580 * and ISR handling. Axi Ethernet core is reset through Axi DMA core. Buffer
1581 * descriptors are initialized.
1582 */
1583static int axienet_open(struct net_device *ndev)
1584{
1585	int ret;
1586	struct axienet_local *lp = netdev_priv(ndev);
1587
1588	/* When we do an Axi Ethernet reset, it resets the complete core
1589	 * including the MDIO. MDIO must be disabled before resetting.
1590	 * Hold MDIO bus lock to avoid MDIO accesses during the reset.
1591	 */
1592	axienet_lock_mii(lp);
1593	ret = axienet_device_reset(ndev);
1594	axienet_unlock_mii(lp);
1595
1596	ret = phylink_of_phy_connect(lp->phylink, lp->dev->of_node, 0);
1597	if (ret) {
1598		dev_err(lp->dev, "phylink_of_phy_connect() failed: %d\n", ret);
1599		return ret;
1600	}
1601
1602	phylink_start(lp->phylink);
1603
1604	/* Start the statistics refresh work */
1605	schedule_delayed_work(&lp->stats_work, 0);
1606
1607	if (lp->use_dmaengine) {
1608		/* Enable interrupts for Axi Ethernet core (if defined) */
1609		if (lp->eth_irq > 0) {
1610			ret = request_irq(lp->eth_irq, axienet_eth_irq, IRQF_SHARED,
1611					  ndev->name, ndev);
1612			if (ret)
1613				goto err_phy;
1614		}
1615
1616		ret = axienet_init_dmaengine(ndev);
1617		if (ret < 0)
1618			goto err_free_eth_irq;
1619	} else {
1620		ret = axienet_init_legacy_dma(ndev);
1621		if (ret)
1622			goto err_phy;
1623	}
1624
1625	return 0;
1626
1627err_free_eth_irq:
1628	if (lp->eth_irq > 0)
1629		free_irq(lp->eth_irq, ndev);
1630err_phy:
1631	cancel_delayed_work_sync(&lp->stats_work);
1632	phylink_stop(lp->phylink);
1633	phylink_disconnect_phy(lp->phylink);
 
 
1634	return ret;
1635}
1636
1637/**
1638 * axienet_stop - Driver stop routine.
1639 * @ndev:	Pointer to net_device structure
1640 *
1641 * Return: 0, on success.
1642 *
1643 * This is the driver stop routine. It calls phylink_disconnect to stop the PHY
1644 * device. It also removes the interrupt handlers and disables the interrupts.
1645 * The Axi DMA Tx/Rx BDs are released.
1646 */
1647static int axienet_stop(struct net_device *ndev)
1648{
1649	struct axienet_local *lp = netdev_priv(ndev);
1650	int i;
1651
1652	if (!lp->use_dmaengine) {
1653		WRITE_ONCE(lp->stopping, true);
1654		flush_work(&lp->dma_err_task);
1655
1656		napi_disable(&lp->napi_tx);
1657		napi_disable(&lp->napi_rx);
1658	}
1659
1660	cancel_delayed_work_sync(&lp->stats_work);
 
1661
1662	phylink_stop(lp->phylink);
1663	phylink_disconnect_phy(lp->phylink);
1664
1665	axienet_setoptions(ndev, lp->options &
1666			   ~(XAE_OPTION_TXEN | XAE_OPTION_RXEN));
1667
1668	if (!lp->use_dmaengine) {
1669		axienet_dma_stop(lp);
1670		cancel_work_sync(&lp->dma_err_task);
1671		free_irq(lp->tx_irq, ndev);
1672		free_irq(lp->rx_irq, ndev);
1673		axienet_dma_bd_release(ndev);
1674	} else {
1675		dmaengine_terminate_sync(lp->tx_chan);
1676		dmaengine_synchronize(lp->tx_chan);
1677		dmaengine_terminate_sync(lp->rx_chan);
1678		dmaengine_synchronize(lp->rx_chan);
1679
1680		for (i = 0; i < TX_BD_NUM_MAX; i++)
1681			kfree(lp->tx_skb_ring[i]);
1682		kfree(lp->tx_skb_ring);
1683		for (i = 0; i < RX_BUF_NUM_DEFAULT; i++)
1684			kfree(lp->rx_skb_ring[i]);
1685		kfree(lp->rx_skb_ring);
1686
1687		dma_release_channel(lp->rx_chan);
1688		dma_release_channel(lp->tx_chan);
1689	}
1690
1691	axienet_iow(lp, XAE_IE_OFFSET, 0);
1692
 
 
1693	if (lp->eth_irq > 0)
1694		free_irq(lp->eth_irq, ndev);
 
 
 
 
1695	return 0;
1696}
1697
1698/**
1699 * axienet_change_mtu - Driver change mtu routine.
1700 * @ndev:	Pointer to net_device structure
1701 * @new_mtu:	New mtu value to be applied
1702 *
1703 * Return: Always returns 0 (success).
1704 *
1705 * This is the change mtu driver routine. It checks if the Axi Ethernet
1706 * hardware supports jumbo frames before changing the mtu. This can be
1707 * called only when the device is not up.
1708 */
1709static int axienet_change_mtu(struct net_device *ndev, int new_mtu)
1710{
1711	struct axienet_local *lp = netdev_priv(ndev);
1712
1713	if (netif_running(ndev))
1714		return -EBUSY;
1715
1716	if ((new_mtu + VLAN_ETH_HLEN +
1717		XAE_TRL_SIZE) > lp->rxmem)
1718		return -EINVAL;
1719
1720	WRITE_ONCE(ndev->mtu, new_mtu);
1721
1722	return 0;
1723}
1724
1725#ifdef CONFIG_NET_POLL_CONTROLLER
1726/**
1727 * axienet_poll_controller - Axi Ethernet poll mechanism.
1728 * @ndev:	Pointer to net_device structure
1729 *
1730 * This implements Rx/Tx ISR poll mechanisms. The interrupts are disabled prior
1731 * to polling the ISRs and are enabled back after the polling is done.
1732 */
1733static void axienet_poll_controller(struct net_device *ndev)
1734{
1735	struct axienet_local *lp = netdev_priv(ndev);
1736
1737	disable_irq(lp->tx_irq);
1738	disable_irq(lp->rx_irq);
1739	axienet_rx_irq(lp->tx_irq, ndev);
1740	axienet_tx_irq(lp->rx_irq, ndev);
1741	enable_irq(lp->tx_irq);
1742	enable_irq(lp->rx_irq);
1743}
1744#endif
1745
1746static int axienet_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
1747{
1748	struct axienet_local *lp = netdev_priv(dev);
1749
1750	if (!netif_running(dev))
1751		return -EINVAL;
1752
1753	return phylink_mii_ioctl(lp->phylink, rq, cmd);
1754}
1755
1756static void
1757axienet_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
1758{
1759	struct axienet_local *lp = netdev_priv(dev);
1760	unsigned int start;
1761
1762	netdev_stats_to_stats64(stats, &dev->stats);
1763
1764	do {
1765		start = u64_stats_fetch_begin(&lp->rx_stat_sync);
1766		stats->rx_packets = u64_stats_read(&lp->rx_packets);
1767		stats->rx_bytes = u64_stats_read(&lp->rx_bytes);
1768	} while (u64_stats_fetch_retry(&lp->rx_stat_sync, start));
1769
1770	do {
1771		start = u64_stats_fetch_begin(&lp->tx_stat_sync);
1772		stats->tx_packets = u64_stats_read(&lp->tx_packets);
1773		stats->tx_bytes = u64_stats_read(&lp->tx_bytes);
1774	} while (u64_stats_fetch_retry(&lp->tx_stat_sync, start));
1775
1776	if (!(lp->features & XAE_FEATURE_STATS))
1777		return;
1778
1779	do {
1780		start = read_seqcount_begin(&lp->hw_stats_seqcount);
1781		stats->rx_length_errors =
1782			axienet_stat(lp, STAT_RX_LENGTH_ERRORS);
1783		stats->rx_crc_errors = axienet_stat(lp, STAT_RX_FCS_ERRORS);
1784		stats->rx_frame_errors =
1785			axienet_stat(lp, STAT_RX_ALIGNMENT_ERRORS);
1786		stats->rx_errors = axienet_stat(lp, STAT_UNDERSIZE_FRAMES) +
1787				   axienet_stat(lp, STAT_FRAGMENT_FRAMES) +
1788				   stats->rx_length_errors +
1789				   stats->rx_crc_errors +
1790				   stats->rx_frame_errors;
1791		stats->multicast = axienet_stat(lp, STAT_RX_MULTICAST_FRAMES);
1792
1793		stats->tx_aborted_errors =
1794			axienet_stat(lp, STAT_TX_EXCESS_COLLISIONS);
1795		stats->tx_fifo_errors =
1796			axienet_stat(lp, STAT_TX_UNDERRUN_ERRORS);
1797		stats->tx_window_errors =
1798			axienet_stat(lp, STAT_TX_LATE_COLLISIONS);
1799		stats->tx_errors = axienet_stat(lp, STAT_TX_EXCESS_DEFERRAL) +
1800				   stats->tx_aborted_errors +
1801				   stats->tx_fifo_errors +
1802				   stats->tx_window_errors;
1803	} while (read_seqcount_retry(&lp->hw_stats_seqcount, start));
1804}
1805
1806static const struct net_device_ops axienet_netdev_ops = {
1807	.ndo_open = axienet_open,
1808	.ndo_stop = axienet_stop,
1809	.ndo_start_xmit = axienet_start_xmit,
1810	.ndo_get_stats64 = axienet_get_stats64,
1811	.ndo_change_mtu	= axienet_change_mtu,
1812	.ndo_set_mac_address = netdev_set_mac_address,
1813	.ndo_validate_addr = eth_validate_addr,
1814	.ndo_eth_ioctl = axienet_ioctl,
1815	.ndo_set_rx_mode = axienet_set_multicast_list,
1816#ifdef CONFIG_NET_POLL_CONTROLLER
1817	.ndo_poll_controller = axienet_poll_controller,
1818#endif
1819};
1820
1821static const struct net_device_ops axienet_netdev_dmaengine_ops = {
1822	.ndo_open = axienet_open,
1823	.ndo_stop = axienet_stop,
1824	.ndo_start_xmit = axienet_start_xmit_dmaengine,
1825	.ndo_get_stats64 = axienet_get_stats64,
1826	.ndo_change_mtu	= axienet_change_mtu,
1827	.ndo_set_mac_address = netdev_set_mac_address,
1828	.ndo_validate_addr = eth_validate_addr,
1829	.ndo_eth_ioctl = axienet_ioctl,
1830	.ndo_set_rx_mode = axienet_set_multicast_list,
1831};
1832
1833/**
1834 * axienet_ethtools_get_drvinfo - Get various Axi Ethernet driver information.
1835 * @ndev:	Pointer to net_device structure
1836 * @ed:		Pointer to ethtool_drvinfo structure
1837 *
1838 * This implements ethtool command for getting the driver information.
1839 * Issue "ethtool -i ethX" under linux prompt to execute this function.
1840 */
1841static void axienet_ethtools_get_drvinfo(struct net_device *ndev,
1842					 struct ethtool_drvinfo *ed)
1843{
1844	strscpy(ed->driver, DRIVER_NAME, sizeof(ed->driver));
1845	strscpy(ed->version, DRIVER_VERSION, sizeof(ed->version));
1846}
1847
1848/**
1849 * axienet_ethtools_get_regs_len - Get the total regs length present in the
1850 *				   AxiEthernet core.
1851 * @ndev:	Pointer to net_device structure
1852 *
1853 * This implements ethtool command for getting the total register length
1854 * information.
1855 *
1856 * Return: the total regs length
1857 */
1858static int axienet_ethtools_get_regs_len(struct net_device *ndev)
1859{
1860	return sizeof(u32) * AXIENET_REGS_N;
1861}
1862
1863/**
1864 * axienet_ethtools_get_regs - Dump the contents of all registers present
1865 *			       in AxiEthernet core.
1866 * @ndev:	Pointer to net_device structure
1867 * @regs:	Pointer to ethtool_regs structure
1868 * @ret:	Void pointer used to return the contents of the registers.
1869 *
1870 * This implements ethtool command for getting the Axi Ethernet register dump.
1871 * Issue "ethtool -d ethX" to execute this function.
1872 */
1873static void axienet_ethtools_get_regs(struct net_device *ndev,
1874				      struct ethtool_regs *regs, void *ret)
1875{
1876	u32 *data = (u32 *)ret;
1877	size_t len = sizeof(u32) * AXIENET_REGS_N;
1878	struct axienet_local *lp = netdev_priv(ndev);
1879
1880	regs->version = 0;
1881	regs->len = len;
1882
1883	memset(data, 0, len);
1884	data[0] = axienet_ior(lp, XAE_RAF_OFFSET);
1885	data[1] = axienet_ior(lp, XAE_TPF_OFFSET);
1886	data[2] = axienet_ior(lp, XAE_IFGP_OFFSET);
1887	data[3] = axienet_ior(lp, XAE_IS_OFFSET);
1888	data[4] = axienet_ior(lp, XAE_IP_OFFSET);
1889	data[5] = axienet_ior(lp, XAE_IE_OFFSET);
1890	data[6] = axienet_ior(lp, XAE_TTAG_OFFSET);
1891	data[7] = axienet_ior(lp, XAE_RTAG_OFFSET);
1892	data[8] = axienet_ior(lp, XAE_UAWL_OFFSET);
1893	data[9] = axienet_ior(lp, XAE_UAWU_OFFSET);
1894	data[10] = axienet_ior(lp, XAE_TPID0_OFFSET);
1895	data[11] = axienet_ior(lp, XAE_TPID1_OFFSET);
1896	data[12] = axienet_ior(lp, XAE_PPST_OFFSET);
1897	data[13] = axienet_ior(lp, XAE_RCW0_OFFSET);
1898	data[14] = axienet_ior(lp, XAE_RCW1_OFFSET);
1899	data[15] = axienet_ior(lp, XAE_TC_OFFSET);
1900	data[16] = axienet_ior(lp, XAE_FCC_OFFSET);
1901	data[17] = axienet_ior(lp, XAE_EMMC_OFFSET);
1902	data[18] = axienet_ior(lp, XAE_PHYC_OFFSET);
1903	data[19] = axienet_ior(lp, XAE_MDIO_MC_OFFSET);
1904	data[20] = axienet_ior(lp, XAE_MDIO_MCR_OFFSET);
1905	data[21] = axienet_ior(lp, XAE_MDIO_MWD_OFFSET);
1906	data[22] = axienet_ior(lp, XAE_MDIO_MRD_OFFSET);
1907	data[27] = axienet_ior(lp, XAE_UAW0_OFFSET);
1908	data[28] = axienet_ior(lp, XAE_UAW1_OFFSET);
1909	data[29] = axienet_ior(lp, XAE_FMI_OFFSET);
1910	data[30] = axienet_ior(lp, XAE_AF0_OFFSET);
1911	data[31] = axienet_ior(lp, XAE_AF1_OFFSET);
1912	if (!lp->use_dmaengine) {
1913		data[32] = axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET);
1914		data[33] = axienet_dma_in32(lp, XAXIDMA_TX_SR_OFFSET);
1915		data[34] = axienet_dma_in32(lp, XAXIDMA_TX_CDESC_OFFSET);
1916		data[35] = axienet_dma_in32(lp, XAXIDMA_TX_TDESC_OFFSET);
1917		data[36] = axienet_dma_in32(lp, XAXIDMA_RX_CR_OFFSET);
1918		data[37] = axienet_dma_in32(lp, XAXIDMA_RX_SR_OFFSET);
1919		data[38] = axienet_dma_in32(lp, XAXIDMA_RX_CDESC_OFFSET);
1920		data[39] = axienet_dma_in32(lp, XAXIDMA_RX_TDESC_OFFSET);
1921	}
1922}
1923
1924static void
1925axienet_ethtools_get_ringparam(struct net_device *ndev,
1926			       struct ethtool_ringparam *ering,
1927			       struct kernel_ethtool_ringparam *kernel_ering,
1928			       struct netlink_ext_ack *extack)
1929{
1930	struct axienet_local *lp = netdev_priv(ndev);
1931
1932	ering->rx_max_pending = RX_BD_NUM_MAX;
1933	ering->rx_mini_max_pending = 0;
1934	ering->rx_jumbo_max_pending = 0;
1935	ering->tx_max_pending = TX_BD_NUM_MAX;
1936	ering->rx_pending = lp->rx_bd_num;
1937	ering->rx_mini_pending = 0;
1938	ering->rx_jumbo_pending = 0;
1939	ering->tx_pending = lp->tx_bd_num;
1940}
1941
1942static int
1943axienet_ethtools_set_ringparam(struct net_device *ndev,
1944			       struct ethtool_ringparam *ering,
1945			       struct kernel_ethtool_ringparam *kernel_ering,
1946			       struct netlink_ext_ack *extack)
1947{
1948	struct axienet_local *lp = netdev_priv(ndev);
1949
1950	if (ering->rx_pending > RX_BD_NUM_MAX ||
1951	    ering->rx_mini_pending ||
1952	    ering->rx_jumbo_pending ||
1953	    ering->tx_pending < TX_BD_NUM_MIN ||
1954	    ering->tx_pending > TX_BD_NUM_MAX)
1955		return -EINVAL;
1956
1957	if (netif_running(ndev))
1958		return -EBUSY;
1959
1960	lp->rx_bd_num = ering->rx_pending;
1961	lp->tx_bd_num = ering->tx_pending;
1962	return 0;
1963}
1964
1965/**
1966 * axienet_ethtools_get_pauseparam - Get the pause parameter setting for
1967 *				     Tx and Rx paths.
1968 * @ndev:	Pointer to net_device structure
1969 * @epauseparm:	Pointer to ethtool_pauseparam structure.
1970 *
1971 * This implements ethtool command for getting axi ethernet pause frame
1972 * setting. Issue "ethtool -a ethX" to execute this function.
1973 */
1974static void
1975axienet_ethtools_get_pauseparam(struct net_device *ndev,
1976				struct ethtool_pauseparam *epauseparm)
1977{
1978	struct axienet_local *lp = netdev_priv(ndev);
1979
1980	phylink_ethtool_get_pauseparam(lp->phylink, epauseparm);
1981}
1982
1983/**
1984 * axienet_ethtools_set_pauseparam - Set device pause parameter(flow control)
1985 *				     settings.
1986 * @ndev:	Pointer to net_device structure
1987 * @epauseparm:Pointer to ethtool_pauseparam structure
1988 *
1989 * This implements ethtool command for enabling flow control on Rx and Tx
1990 * paths. Issue "ethtool -A ethX tx on|off" under linux prompt to execute this
1991 * function.
1992 *
1993 * Return: 0 on success, -EFAULT if device is running
1994 */
1995static int
1996axienet_ethtools_set_pauseparam(struct net_device *ndev,
1997				struct ethtool_pauseparam *epauseparm)
1998{
1999	struct axienet_local *lp = netdev_priv(ndev);
2000
2001	return phylink_ethtool_set_pauseparam(lp->phylink, epauseparm);
2002}
2003
2004/**
2005 * axienet_ethtools_get_coalesce - Get DMA interrupt coalescing count.
2006 * @ndev:	Pointer to net_device structure
2007 * @ecoalesce:	Pointer to ethtool_coalesce structure
2008 * @kernel_coal: ethtool CQE mode setting structure
2009 * @extack:	extack for reporting error messages
2010 *
2011 * This implements ethtool command for getting the DMA interrupt coalescing
2012 * count on Tx and Rx paths. Issue "ethtool -c ethX" under linux prompt to
2013 * execute this function.
2014 *
2015 * Return: 0 always
2016 */
2017static int
2018axienet_ethtools_get_coalesce(struct net_device *ndev,
2019			      struct ethtool_coalesce *ecoalesce,
2020			      struct kernel_ethtool_coalesce *kernel_coal,
2021			      struct netlink_ext_ack *extack)
2022{
2023	struct axienet_local *lp = netdev_priv(ndev);
2024
2025	ecoalesce->rx_max_coalesced_frames = lp->coalesce_count_rx;
2026	ecoalesce->rx_coalesce_usecs = lp->coalesce_usec_rx;
2027	ecoalesce->tx_max_coalesced_frames = lp->coalesce_count_tx;
2028	ecoalesce->tx_coalesce_usecs = lp->coalesce_usec_tx;
2029	return 0;
2030}
2031
2032/**
2033 * axienet_ethtools_set_coalesce - Set DMA interrupt coalescing count.
2034 * @ndev:	Pointer to net_device structure
2035 * @ecoalesce:	Pointer to ethtool_coalesce structure
2036 * @kernel_coal: ethtool CQE mode setting structure
2037 * @extack:	extack for reporting error messages
2038 *
2039 * This implements ethtool command for setting the DMA interrupt coalescing
2040 * count on Tx and Rx paths. Issue "ethtool -C ethX rx-frames 5" under linux
2041 * prompt to execute this function.
2042 *
2043 * Return: 0, on success, Non-zero error value on failure.
2044 */
2045static int
2046axienet_ethtools_set_coalesce(struct net_device *ndev,
2047			      struct ethtool_coalesce *ecoalesce,
2048			      struct kernel_ethtool_coalesce *kernel_coal,
2049			      struct netlink_ext_ack *extack)
2050{
2051	struct axienet_local *lp = netdev_priv(ndev);
2052
2053	if (netif_running(ndev)) {
2054		NL_SET_ERR_MSG(extack,
2055			       "Please stop netif before applying configuration");
2056		return -EBUSY;
2057	}
2058
2059	if (ecoalesce->rx_max_coalesced_frames > 255 ||
2060	    ecoalesce->tx_max_coalesced_frames > 255) {
2061		NL_SET_ERR_MSG(extack, "frames must be less than 256");
2062		return -EINVAL;
2063	}
2064
2065	if (ecoalesce->rx_max_coalesced_frames)
2066		lp->coalesce_count_rx = ecoalesce->rx_max_coalesced_frames;
2067	if (ecoalesce->rx_coalesce_usecs)
2068		lp->coalesce_usec_rx = ecoalesce->rx_coalesce_usecs;
2069	if (ecoalesce->tx_max_coalesced_frames)
2070		lp->coalesce_count_tx = ecoalesce->tx_max_coalesced_frames;
2071	if (ecoalesce->tx_coalesce_usecs)
2072		lp->coalesce_usec_tx = ecoalesce->tx_coalesce_usecs;
2073
2074	return 0;
2075}
2076
2077static int
2078axienet_ethtools_get_link_ksettings(struct net_device *ndev,
2079				    struct ethtool_link_ksettings *cmd)
2080{
2081	struct axienet_local *lp = netdev_priv(ndev);
2082
2083	return phylink_ethtool_ksettings_get(lp->phylink, cmd);
2084}
2085
2086static int
2087axienet_ethtools_set_link_ksettings(struct net_device *ndev,
2088				    const struct ethtool_link_ksettings *cmd)
2089{
2090	struct axienet_local *lp = netdev_priv(ndev);
2091
2092	return phylink_ethtool_ksettings_set(lp->phylink, cmd);
2093}
2094
2095static int axienet_ethtools_nway_reset(struct net_device *dev)
2096{
2097	struct axienet_local *lp = netdev_priv(dev);
2098
2099	return phylink_ethtool_nway_reset(lp->phylink);
2100}
2101
2102static void axienet_ethtools_get_ethtool_stats(struct net_device *dev,
2103					       struct ethtool_stats *stats,
2104					       u64 *data)
2105{
2106	struct axienet_local *lp = netdev_priv(dev);
2107	unsigned int start;
2108
2109	do {
2110		start = read_seqcount_begin(&lp->hw_stats_seqcount);
2111		data[0] = axienet_stat(lp, STAT_RX_BYTES);
2112		data[1] = axienet_stat(lp, STAT_TX_BYTES);
2113		data[2] = axienet_stat(lp, STAT_RX_VLAN_FRAMES);
2114		data[3] = axienet_stat(lp, STAT_TX_VLAN_FRAMES);
2115		data[6] = axienet_stat(lp, STAT_TX_PFC_FRAMES);
2116		data[7] = axienet_stat(lp, STAT_RX_PFC_FRAMES);
2117		data[8] = axienet_stat(lp, STAT_USER_DEFINED0);
2118		data[9] = axienet_stat(lp, STAT_USER_DEFINED1);
2119		data[10] = axienet_stat(lp, STAT_USER_DEFINED2);
2120	} while (read_seqcount_retry(&lp->hw_stats_seqcount, start));
2121}
2122
2123static const char axienet_ethtool_stats_strings[][ETH_GSTRING_LEN] = {
2124	"Received bytes",
2125	"Transmitted bytes",
2126	"RX Good VLAN Tagged Frames",
2127	"TX Good VLAN Tagged Frames",
2128	"TX Good PFC Frames",
2129	"RX Good PFC Frames",
2130	"User Defined Counter 0",
2131	"User Defined Counter 1",
2132	"User Defined Counter 2",
2133};
2134
2135static void axienet_ethtools_get_strings(struct net_device *dev, u32 stringset, u8 *data)
2136{
2137	switch (stringset) {
2138	case ETH_SS_STATS:
2139		memcpy(data, axienet_ethtool_stats_strings,
2140		       sizeof(axienet_ethtool_stats_strings));
2141		break;
2142	}
2143}
2144
2145static int axienet_ethtools_get_sset_count(struct net_device *dev, int sset)
2146{
2147	struct axienet_local *lp = netdev_priv(dev);
2148
2149	switch (sset) {
2150	case ETH_SS_STATS:
2151		if (lp->features & XAE_FEATURE_STATS)
2152			return ARRAY_SIZE(axienet_ethtool_stats_strings);
2153		fallthrough;
2154	default:
2155		return -EOPNOTSUPP;
2156	}
2157}
2158
2159static void
2160axienet_ethtools_get_pause_stats(struct net_device *dev,
2161				 struct ethtool_pause_stats *pause_stats)
2162{
2163	struct axienet_local *lp = netdev_priv(dev);
2164	unsigned int start;
2165
2166	if (!(lp->features & XAE_FEATURE_STATS))
2167		return;
2168
2169	do {
2170		start = read_seqcount_begin(&lp->hw_stats_seqcount);
2171		pause_stats->tx_pause_frames =
2172			axienet_stat(lp, STAT_TX_PAUSE_FRAMES);
2173		pause_stats->rx_pause_frames =
2174			axienet_stat(lp, STAT_RX_PAUSE_FRAMES);
2175	} while (read_seqcount_retry(&lp->hw_stats_seqcount, start));
2176}
2177
2178static void
2179axienet_ethtool_get_eth_mac_stats(struct net_device *dev,
2180				  struct ethtool_eth_mac_stats *mac_stats)
2181{
2182	struct axienet_local *lp = netdev_priv(dev);
2183	unsigned int start;
2184
2185	if (!(lp->features & XAE_FEATURE_STATS))
2186		return;
2187
2188	do {
2189		start = read_seqcount_begin(&lp->hw_stats_seqcount);
2190		mac_stats->FramesTransmittedOK =
2191			axienet_stat(lp, STAT_TX_GOOD_FRAMES);
2192		mac_stats->SingleCollisionFrames =
2193			axienet_stat(lp, STAT_TX_SINGLE_COLLISION_FRAMES);
2194		mac_stats->MultipleCollisionFrames =
2195			axienet_stat(lp, STAT_TX_MULTIPLE_COLLISION_FRAMES);
2196		mac_stats->FramesReceivedOK =
2197			axienet_stat(lp, STAT_RX_GOOD_FRAMES);
2198		mac_stats->FrameCheckSequenceErrors =
2199			axienet_stat(lp, STAT_RX_FCS_ERRORS);
2200		mac_stats->AlignmentErrors =
2201			axienet_stat(lp, STAT_RX_ALIGNMENT_ERRORS);
2202		mac_stats->FramesWithDeferredXmissions =
2203			axienet_stat(lp, STAT_TX_DEFERRED_FRAMES);
2204		mac_stats->LateCollisions =
2205			axienet_stat(lp, STAT_TX_LATE_COLLISIONS);
2206		mac_stats->FramesAbortedDueToXSColls =
2207			axienet_stat(lp, STAT_TX_EXCESS_COLLISIONS);
2208		mac_stats->MulticastFramesXmittedOK =
2209			axienet_stat(lp, STAT_TX_MULTICAST_FRAMES);
2210		mac_stats->BroadcastFramesXmittedOK =
2211			axienet_stat(lp, STAT_TX_BROADCAST_FRAMES);
2212		mac_stats->FramesWithExcessiveDeferral =
2213			axienet_stat(lp, STAT_TX_EXCESS_DEFERRAL);
2214		mac_stats->MulticastFramesReceivedOK =
2215			axienet_stat(lp, STAT_RX_MULTICAST_FRAMES);
2216		mac_stats->BroadcastFramesReceivedOK =
2217			axienet_stat(lp, STAT_RX_BROADCAST_FRAMES);
2218		mac_stats->InRangeLengthErrors =
2219			axienet_stat(lp, STAT_RX_LENGTH_ERRORS);
2220	} while (read_seqcount_retry(&lp->hw_stats_seqcount, start));
2221}
2222
2223static void
2224axienet_ethtool_get_eth_ctrl_stats(struct net_device *dev,
2225				   struct ethtool_eth_ctrl_stats *ctrl_stats)
2226{
2227	struct axienet_local *lp = netdev_priv(dev);
2228	unsigned int start;
2229
2230	if (!(lp->features & XAE_FEATURE_STATS))
2231		return;
2232
2233	do {
2234		start = read_seqcount_begin(&lp->hw_stats_seqcount);
2235		ctrl_stats->MACControlFramesTransmitted =
2236			axienet_stat(lp, STAT_TX_CONTROL_FRAMES);
2237		ctrl_stats->MACControlFramesReceived =
2238			axienet_stat(lp, STAT_RX_CONTROL_FRAMES);
2239		ctrl_stats->UnsupportedOpcodesReceived =
2240			axienet_stat(lp, STAT_RX_CONTROL_OPCODE_ERRORS);
2241	} while (read_seqcount_retry(&lp->hw_stats_seqcount, start));
2242}
2243
2244static const struct ethtool_rmon_hist_range axienet_rmon_ranges[] = {
2245	{   64,    64 },
2246	{   65,   127 },
2247	{  128,   255 },
2248	{  256,   511 },
2249	{  512,  1023 },
2250	{ 1024,  1518 },
2251	{ 1519, 16384 },
2252	{ },
2253};
2254
2255static void
2256axienet_ethtool_get_rmon_stats(struct net_device *dev,
2257			       struct ethtool_rmon_stats *rmon_stats,
2258			       const struct ethtool_rmon_hist_range **ranges)
2259{
2260	struct axienet_local *lp = netdev_priv(dev);
2261	unsigned int start;
2262
2263	if (!(lp->features & XAE_FEATURE_STATS))
2264		return;
2265
2266	do {
2267		start = read_seqcount_begin(&lp->hw_stats_seqcount);
2268		rmon_stats->undersize_pkts =
2269			axienet_stat(lp, STAT_UNDERSIZE_FRAMES);
2270		rmon_stats->oversize_pkts =
2271			axienet_stat(lp, STAT_RX_OVERSIZE_FRAMES);
2272		rmon_stats->fragments =
2273			axienet_stat(lp, STAT_FRAGMENT_FRAMES);
2274
2275		rmon_stats->hist[0] =
2276			axienet_stat(lp, STAT_RX_64_BYTE_FRAMES);
2277		rmon_stats->hist[1] =
2278			axienet_stat(lp, STAT_RX_65_127_BYTE_FRAMES);
2279		rmon_stats->hist[2] =
2280			axienet_stat(lp, STAT_RX_128_255_BYTE_FRAMES);
2281		rmon_stats->hist[3] =
2282			axienet_stat(lp, STAT_RX_256_511_BYTE_FRAMES);
2283		rmon_stats->hist[4] =
2284			axienet_stat(lp, STAT_RX_512_1023_BYTE_FRAMES);
2285		rmon_stats->hist[5] =
2286			axienet_stat(lp, STAT_RX_1024_MAX_BYTE_FRAMES);
2287		rmon_stats->hist[6] =
2288			rmon_stats->oversize_pkts;
2289
2290		rmon_stats->hist_tx[0] =
2291			axienet_stat(lp, STAT_TX_64_BYTE_FRAMES);
2292		rmon_stats->hist_tx[1] =
2293			axienet_stat(lp, STAT_TX_65_127_BYTE_FRAMES);
2294		rmon_stats->hist_tx[2] =
2295			axienet_stat(lp, STAT_TX_128_255_BYTE_FRAMES);
2296		rmon_stats->hist_tx[3] =
2297			axienet_stat(lp, STAT_TX_256_511_BYTE_FRAMES);
2298		rmon_stats->hist_tx[4] =
2299			axienet_stat(lp, STAT_TX_512_1023_BYTE_FRAMES);
2300		rmon_stats->hist_tx[5] =
2301			axienet_stat(lp, STAT_TX_1024_MAX_BYTE_FRAMES);
2302		rmon_stats->hist_tx[6] =
2303			axienet_stat(lp, STAT_TX_OVERSIZE_FRAMES);
2304	} while (read_seqcount_retry(&lp->hw_stats_seqcount, start));
2305
2306	*ranges = axienet_rmon_ranges;
2307}
2308
2309static const struct ethtool_ops axienet_ethtool_ops = {
2310	.supported_coalesce_params = ETHTOOL_COALESCE_MAX_FRAMES |
2311				     ETHTOOL_COALESCE_USECS,
2312	.get_drvinfo    = axienet_ethtools_get_drvinfo,
2313	.get_regs_len   = axienet_ethtools_get_regs_len,
2314	.get_regs       = axienet_ethtools_get_regs,
2315	.get_link       = ethtool_op_get_link,
2316	.get_ringparam	= axienet_ethtools_get_ringparam,
2317	.set_ringparam	= axienet_ethtools_set_ringparam,
2318	.get_pauseparam = axienet_ethtools_get_pauseparam,
2319	.set_pauseparam = axienet_ethtools_set_pauseparam,
2320	.get_coalesce   = axienet_ethtools_get_coalesce,
2321	.set_coalesce   = axienet_ethtools_set_coalesce,
2322	.get_link_ksettings = axienet_ethtools_get_link_ksettings,
2323	.set_link_ksettings = axienet_ethtools_set_link_ksettings,
2324	.nway_reset	= axienet_ethtools_nway_reset,
2325	.get_ethtool_stats = axienet_ethtools_get_ethtool_stats,
2326	.get_strings    = axienet_ethtools_get_strings,
2327	.get_sset_count = axienet_ethtools_get_sset_count,
2328	.get_pause_stats = axienet_ethtools_get_pause_stats,
2329	.get_eth_mac_stats = axienet_ethtool_get_eth_mac_stats,
2330	.get_eth_ctrl_stats = axienet_ethtool_get_eth_ctrl_stats,
2331	.get_rmon_stats = axienet_ethtool_get_rmon_stats,
2332};
2333
2334static struct axienet_local *pcs_to_axienet_local(struct phylink_pcs *pcs)
2335{
2336	return container_of(pcs, struct axienet_local, pcs);
2337}
2338
2339static void axienet_pcs_get_state(struct phylink_pcs *pcs,
2340				  struct phylink_link_state *state)
2341{
2342	struct mdio_device *pcs_phy = pcs_to_axienet_local(pcs)->pcs_phy;
2343
2344	phylink_mii_c22_pcs_get_state(pcs_phy, state);
2345}
2346
2347static void axienet_pcs_an_restart(struct phylink_pcs *pcs)
2348{
2349	struct mdio_device *pcs_phy = pcs_to_axienet_local(pcs)->pcs_phy;
2350
2351	phylink_mii_c22_pcs_an_restart(pcs_phy);
2352}
2353
2354static int axienet_pcs_config(struct phylink_pcs *pcs, unsigned int neg_mode,
2355			      phy_interface_t interface,
2356			      const unsigned long *advertising,
2357			      bool permit_pause_to_mac)
2358{
2359	struct mdio_device *pcs_phy = pcs_to_axienet_local(pcs)->pcs_phy;
2360	struct net_device *ndev = pcs_to_axienet_local(pcs)->ndev;
2361	struct axienet_local *lp = netdev_priv(ndev);
2362	int ret;
2363
2364	if (lp->switch_x_sgmii) {
2365		ret = mdiodev_write(pcs_phy, XLNX_MII_STD_SELECT_REG,
2366				    interface == PHY_INTERFACE_MODE_SGMII ?
2367					XLNX_MII_STD_SELECT_SGMII : 0);
2368		if (ret < 0) {
2369			netdev_warn(ndev,
2370				    "Failed to switch PHY interface: %d\n",
2371				    ret);
2372			return ret;
2373		}
2374	}
2375
2376	ret = phylink_mii_c22_pcs_config(pcs_phy, interface, advertising,
2377					 neg_mode);
2378	if (ret < 0)
2379		netdev_warn(ndev, "Failed to configure PCS: %d\n", ret);
2380
2381	return ret;
2382}
2383
2384static const struct phylink_pcs_ops axienet_pcs_ops = {
2385	.pcs_get_state = axienet_pcs_get_state,
2386	.pcs_config = axienet_pcs_config,
2387	.pcs_an_restart = axienet_pcs_an_restart,
2388};
2389
2390static struct phylink_pcs *axienet_mac_select_pcs(struct phylink_config *config,
2391						  phy_interface_t interface)
2392{
2393	struct net_device *ndev = to_net_dev(config->dev);
2394	struct axienet_local *lp = netdev_priv(ndev);
2395
2396	if (interface == PHY_INTERFACE_MODE_1000BASEX ||
2397	    interface ==  PHY_INTERFACE_MODE_SGMII)
2398		return &lp->pcs;
2399
2400	return NULL;
2401}
2402
2403static void axienet_mac_config(struct phylink_config *config, unsigned int mode,
2404			       const struct phylink_link_state *state)
2405{
2406	/* nothing meaningful to do */
2407}
2408
2409static void axienet_mac_link_down(struct phylink_config *config,
2410				  unsigned int mode,
2411				  phy_interface_t interface)
2412{
2413	/* nothing meaningful to do */
2414}
2415
2416static void axienet_mac_link_up(struct phylink_config *config,
2417				struct phy_device *phy,
2418				unsigned int mode, phy_interface_t interface,
2419				int speed, int duplex,
2420				bool tx_pause, bool rx_pause)
2421{
2422	struct net_device *ndev = to_net_dev(config->dev);
2423	struct axienet_local *lp = netdev_priv(ndev);
2424	u32 emmc_reg, fcc_reg;
2425
2426	emmc_reg = axienet_ior(lp, XAE_EMMC_OFFSET);
2427	emmc_reg &= ~XAE_EMMC_LINKSPEED_MASK;
2428
2429	switch (speed) {
2430	case SPEED_1000:
2431		emmc_reg |= XAE_EMMC_LINKSPD_1000;
2432		break;
2433	case SPEED_100:
2434		emmc_reg |= XAE_EMMC_LINKSPD_100;
2435		break;
2436	case SPEED_10:
2437		emmc_reg |= XAE_EMMC_LINKSPD_10;
2438		break;
2439	default:
2440		dev_err(&ndev->dev,
2441			"Speed other than 10, 100 or 1Gbps is not supported\n");
2442		break;
2443	}
2444
2445	axienet_iow(lp, XAE_EMMC_OFFSET, emmc_reg);
2446
2447	fcc_reg = axienet_ior(lp, XAE_FCC_OFFSET);
2448	if (tx_pause)
2449		fcc_reg |= XAE_FCC_FCTX_MASK;
2450	else
2451		fcc_reg &= ~XAE_FCC_FCTX_MASK;
2452	if (rx_pause)
2453		fcc_reg |= XAE_FCC_FCRX_MASK;
2454	else
2455		fcc_reg &= ~XAE_FCC_FCRX_MASK;
2456	axienet_iow(lp, XAE_FCC_OFFSET, fcc_reg);
2457}
2458
2459static const struct phylink_mac_ops axienet_phylink_ops = {
2460	.mac_select_pcs = axienet_mac_select_pcs,
2461	.mac_config = axienet_mac_config,
2462	.mac_link_down = axienet_mac_link_down,
2463	.mac_link_up = axienet_mac_link_up,
2464};
2465
2466/**
2467 * axienet_dma_err_handler - Work queue task for Axi DMA Error
2468 * @work:	pointer to work_struct
2469 *
2470 * Resets the Axi DMA and Axi Ethernet devices, and reconfigures the
2471 * Tx/Rx BDs.
2472 */
2473static void axienet_dma_err_handler(struct work_struct *work)
2474{
2475	u32 i;
2476	u32 axienet_status;
2477	struct axidma_bd *cur_p;
2478	struct axienet_local *lp = container_of(work, struct axienet_local,
2479						dma_err_task);
2480	struct net_device *ndev = lp->ndev;
2481
2482	/* Don't bother if we are going to stop anyway */
2483	if (READ_ONCE(lp->stopping))
2484		return;
2485
2486	napi_disable(&lp->napi_tx);
2487	napi_disable(&lp->napi_rx);
2488
2489	axienet_setoptions(ndev, lp->options &
2490			   ~(XAE_OPTION_TXEN | XAE_OPTION_RXEN));
2491
2492	axienet_dma_stop(lp);
2493
2494	for (i = 0; i < lp->tx_bd_num; i++) {
2495		cur_p = &lp->tx_bd_v[i];
2496		if (cur_p->cntrl) {
2497			dma_addr_t addr = desc_get_phys_addr(lp, cur_p);
2498
2499			dma_unmap_single(lp->dev, addr,
2500					 (cur_p->cntrl &
2501					  XAXIDMA_BD_CTRL_LENGTH_MASK),
2502					 DMA_TO_DEVICE);
2503		}
2504		if (cur_p->skb)
2505			dev_kfree_skb_irq(cur_p->skb);
2506		cur_p->phys = 0;
2507		cur_p->phys_msb = 0;
2508		cur_p->cntrl = 0;
2509		cur_p->status = 0;
2510		cur_p->app0 = 0;
2511		cur_p->app1 = 0;
2512		cur_p->app2 = 0;
2513		cur_p->app3 = 0;
2514		cur_p->app4 = 0;
2515		cur_p->skb = NULL;
2516	}
2517
2518	for (i = 0; i < lp->rx_bd_num; i++) {
2519		cur_p = &lp->rx_bd_v[i];
2520		cur_p->status = 0;
2521		cur_p->app0 = 0;
2522		cur_p->app1 = 0;
2523		cur_p->app2 = 0;
2524		cur_p->app3 = 0;
2525		cur_p->app4 = 0;
2526	}
2527
2528	lp->tx_bd_ci = 0;
2529	lp->tx_bd_tail = 0;
2530	lp->rx_bd_ci = 0;
2531
2532	axienet_dma_start(lp);
2533
2534	axienet_status = axienet_ior(lp, XAE_RCW1_OFFSET);
2535	axienet_status &= ~XAE_RCW1_RX_MASK;
2536	axienet_iow(lp, XAE_RCW1_OFFSET, axienet_status);
2537
2538	axienet_status = axienet_ior(lp, XAE_IP_OFFSET);
2539	if (axienet_status & XAE_INT_RXRJECT_MASK)
2540		axienet_iow(lp, XAE_IS_OFFSET, XAE_INT_RXRJECT_MASK);
2541	axienet_iow(lp, XAE_IE_OFFSET, lp->eth_irq > 0 ?
2542		    XAE_INT_RECV_ERROR_MASK : 0);
2543	axienet_iow(lp, XAE_FCC_OFFSET, XAE_FCC_FCRX_MASK);
2544
2545	/* Sync default options with HW but leave receiver and
2546	 * transmitter disabled.
2547	 */
2548	axienet_setoptions(ndev, lp->options &
2549			   ~(XAE_OPTION_TXEN | XAE_OPTION_RXEN));
2550	axienet_set_mac_address(ndev, NULL);
2551	axienet_set_multicast_list(ndev);
 
2552	napi_enable(&lp->napi_rx);
2553	napi_enable(&lp->napi_tx);
2554	axienet_setoptions(ndev, lp->options);
2555}
2556
2557/**
2558 * axienet_probe - Axi Ethernet probe function.
2559 * @pdev:	Pointer to platform device structure.
2560 *
2561 * Return: 0, on success
2562 *	    Non-zero error value on failure.
2563 *
2564 * This is the probe routine for Axi Ethernet driver. This is called before
2565 * any other driver routines are invoked. It allocates and sets up the Ethernet
2566 * device. Parses through device tree and populates fields of
2567 * axienet_local. It registers the Ethernet device.
2568 */
2569static int axienet_probe(struct platform_device *pdev)
2570{
2571	int ret;
2572	struct device_node *np;
2573	struct axienet_local *lp;
2574	struct net_device *ndev;
2575	struct resource *ethres;
2576	u8 mac_addr[ETH_ALEN];
2577	int addr_width = 32;
2578	u32 value;
2579
2580	ndev = alloc_etherdev(sizeof(*lp));
2581	if (!ndev)
2582		return -ENOMEM;
2583
2584	platform_set_drvdata(pdev, ndev);
2585
2586	SET_NETDEV_DEV(ndev, &pdev->dev);
 
2587	ndev->features = NETIF_F_SG;
 
2588	ndev->ethtool_ops = &axienet_ethtool_ops;
2589
2590	/* MTU range: 64 - 9000 */
2591	ndev->min_mtu = 64;
2592	ndev->max_mtu = XAE_JUMBO_MTU;
2593
2594	lp = netdev_priv(ndev);
2595	lp->ndev = ndev;
2596	lp->dev = &pdev->dev;
2597	lp->options = XAE_OPTION_DEFAULTS;
2598	lp->rx_bd_num = RX_BD_NUM_DEFAULT;
2599	lp->tx_bd_num = TX_BD_NUM_DEFAULT;
2600
2601	u64_stats_init(&lp->rx_stat_sync);
2602	u64_stats_init(&lp->tx_stat_sync);
2603
2604	mutex_init(&lp->stats_lock);
2605	seqcount_mutex_init(&lp->hw_stats_seqcount, &lp->stats_lock);
2606	INIT_DEFERRABLE_WORK(&lp->stats_work, axienet_refresh_stats);
2607
2608	lp->axi_clk = devm_clk_get_optional(&pdev->dev, "s_axi_lite_clk");
2609	if (!lp->axi_clk) {
2610		/* For backward compatibility, if named AXI clock is not present,
2611		 * treat the first clock specified as the AXI clock.
2612		 */
2613		lp->axi_clk = devm_clk_get_optional(&pdev->dev, NULL);
2614	}
2615	if (IS_ERR(lp->axi_clk)) {
2616		ret = PTR_ERR(lp->axi_clk);
2617		goto free_netdev;
2618	}
2619	ret = clk_prepare_enable(lp->axi_clk);
2620	if (ret) {
2621		dev_err(&pdev->dev, "Unable to enable AXI clock: %d\n", ret);
2622		goto free_netdev;
2623	}
2624
2625	lp->misc_clks[0].id = "axis_clk";
2626	lp->misc_clks[1].id = "ref_clk";
2627	lp->misc_clks[2].id = "mgt_clk";
2628
2629	ret = devm_clk_bulk_get_optional(&pdev->dev, XAE_NUM_MISC_CLOCKS, lp->misc_clks);
2630	if (ret)
2631		goto cleanup_clk;
2632
2633	ret = clk_bulk_prepare_enable(XAE_NUM_MISC_CLOCKS, lp->misc_clks);
2634	if (ret)
2635		goto cleanup_clk;
2636
2637	/* Map device registers */
2638	lp->regs = devm_platform_get_and_ioremap_resource(pdev, 0, &ethres);
2639	if (IS_ERR(lp->regs)) {
2640		ret = PTR_ERR(lp->regs);
2641		goto cleanup_clk;
2642	}
2643	lp->regs_start = ethres->start;
2644
2645	/* Setup checksum offload, but default to off if not specified */
2646	lp->features = 0;
2647
2648	if (axienet_ior(lp, XAE_ABILITY_OFFSET) & XAE_ABILITY_STATS)
2649		lp->features |= XAE_FEATURE_STATS;
2650
2651	ret = of_property_read_u32(pdev->dev.of_node, "xlnx,txcsum", &value);
2652	if (!ret) {
2653		switch (value) {
2654		case 1:
 
 
2655			lp->features |= XAE_FEATURE_PARTIAL_TX_CSUM;
2656			/* Can checksum any contiguous range */
2657			ndev->features |= NETIF_F_HW_CSUM;
2658			break;
2659		case 2:
 
 
2660			lp->features |= XAE_FEATURE_FULL_TX_CSUM;
2661			/* Can checksum TCP/UDP over IPv4. */
2662			ndev->features |= NETIF_F_IP_CSUM;
2663			break;
 
 
2664		}
2665	}
2666	ret = of_property_read_u32(pdev->dev.of_node, "xlnx,rxcsum", &value);
2667	if (!ret) {
2668		switch (value) {
2669		case 1:
 
 
2670			lp->features |= XAE_FEATURE_PARTIAL_RX_CSUM;
2671			ndev->features |= NETIF_F_RXCSUM;
2672			break;
2673		case 2:
 
 
2674			lp->features |= XAE_FEATURE_FULL_RX_CSUM;
2675			ndev->features |= NETIF_F_RXCSUM;
2676			break;
 
 
2677		}
2678	}
2679	/* For supporting jumbo frames, the Axi Ethernet hardware must have
2680	 * a larger Rx/Tx Memory. Typically, the size must be large so that
2681	 * we can enable jumbo option and start supporting jumbo frames.
2682	 * Here we check for memory allocated for Rx/Tx in the hardware from
2683	 * the device-tree and accordingly set flags.
2684	 */
2685	of_property_read_u32(pdev->dev.of_node, "xlnx,rxmem", &lp->rxmem);
2686
2687	lp->switch_x_sgmii = of_property_read_bool(pdev->dev.of_node,
2688						   "xlnx,switch-x-sgmii");
2689
2690	/* Start with the proprietary, and broken phy_type */
2691	ret = of_property_read_u32(pdev->dev.of_node, "xlnx,phy-type", &value);
2692	if (!ret) {
2693		netdev_warn(ndev, "Please upgrade your device tree binary blob to use phy-mode");
2694		switch (value) {
2695		case XAE_PHY_TYPE_MII:
2696			lp->phy_mode = PHY_INTERFACE_MODE_MII;
2697			break;
2698		case XAE_PHY_TYPE_GMII:
2699			lp->phy_mode = PHY_INTERFACE_MODE_GMII;
2700			break;
2701		case XAE_PHY_TYPE_RGMII_2_0:
2702			lp->phy_mode = PHY_INTERFACE_MODE_RGMII_ID;
2703			break;
2704		case XAE_PHY_TYPE_SGMII:
2705			lp->phy_mode = PHY_INTERFACE_MODE_SGMII;
2706			break;
2707		case XAE_PHY_TYPE_1000BASE_X:
2708			lp->phy_mode = PHY_INTERFACE_MODE_1000BASEX;
2709			break;
2710		default:
2711			ret = -EINVAL;
2712			goto cleanup_clk;
2713		}
2714	} else {
2715		ret = of_get_phy_mode(pdev->dev.of_node, &lp->phy_mode);
2716		if (ret)
2717			goto cleanup_clk;
2718	}
2719	if (lp->switch_x_sgmii && lp->phy_mode != PHY_INTERFACE_MODE_SGMII &&
2720	    lp->phy_mode != PHY_INTERFACE_MODE_1000BASEX) {
2721		dev_err(&pdev->dev, "xlnx,switch-x-sgmii only supported with SGMII or 1000BaseX\n");
2722		ret = -EINVAL;
2723		goto cleanup_clk;
2724	}
2725
2726	if (!of_property_present(pdev->dev.of_node, "dmas")) {
2727		/* Find the DMA node, map the DMA registers, and decode the DMA IRQs */
2728		np = of_parse_phandle(pdev->dev.of_node, "axistream-connected", 0);
2729
2730		if (np) {
2731			struct resource dmares;
2732
2733			ret = of_address_to_resource(np, 0, &dmares);
2734			if (ret) {
2735				dev_err(&pdev->dev,
2736					"unable to get DMA resource\n");
2737				of_node_put(np);
2738				goto cleanup_clk;
2739			}
2740			lp->dma_regs = devm_ioremap_resource(&pdev->dev,
2741							     &dmares);
2742			lp->rx_irq = irq_of_parse_and_map(np, 1);
2743			lp->tx_irq = irq_of_parse_and_map(np, 0);
2744			of_node_put(np);
2745			lp->eth_irq = platform_get_irq_optional(pdev, 0);
2746		} else {
2747			/* Check for these resources directly on the Ethernet node. */
2748			lp->dma_regs = devm_platform_get_and_ioremap_resource(pdev, 1, NULL);
2749			lp->rx_irq = platform_get_irq(pdev, 1);
2750			lp->tx_irq = platform_get_irq(pdev, 0);
2751			lp->eth_irq = platform_get_irq_optional(pdev, 2);
2752		}
2753		if (IS_ERR(lp->dma_regs)) {
2754			dev_err(&pdev->dev, "could not map DMA regs\n");
2755			ret = PTR_ERR(lp->dma_regs);
2756			goto cleanup_clk;
2757		}
2758		if (lp->rx_irq <= 0 || lp->tx_irq <= 0) {
2759			dev_err(&pdev->dev, "could not determine irqs\n");
2760			ret = -ENOMEM;
2761			goto cleanup_clk;
2762		}
2763
2764		/* Reset core now that clocks are enabled, prior to accessing MDIO */
2765		ret = __axienet_device_reset(lp);
2766		if (ret)
2767			goto cleanup_clk;
2768
2769		/* Autodetect the need for 64-bit DMA pointers.
2770		 * When the IP is configured for a bus width bigger than 32 bits,
2771		 * writing the MSB registers is mandatory, even if they are all 0.
2772		 * We can detect this case by writing all 1's to one such register
2773		 * and see if that sticks: when the IP is configured for 32 bits
2774		 * only, those registers are RES0.
2775		 * Those MSB registers were introduced in IP v7.1, which we check first.
2776		 */
2777		if ((axienet_ior(lp, XAE_ID_OFFSET) >> 24) >= 0x9) {
2778			void __iomem *desc = lp->dma_regs + XAXIDMA_TX_CDESC_OFFSET + 4;
2779
2780			iowrite32(0x0, desc);
2781			if (ioread32(desc) == 0) {	/* sanity check */
2782				iowrite32(0xffffffff, desc);
2783				if (ioread32(desc) > 0) {
2784					lp->features |= XAE_FEATURE_DMA_64BIT;
2785					addr_width = 64;
2786					dev_info(&pdev->dev,
2787						 "autodetected 64-bit DMA range\n");
2788				}
2789				iowrite32(0x0, desc);
2790			}
2791		}
2792		if (!IS_ENABLED(CONFIG_64BIT) && lp->features & XAE_FEATURE_DMA_64BIT) {
2793			dev_err(&pdev->dev, "64-bit addressable DMA is not compatible with 32-bit archecture\n");
2794			ret = -EINVAL;
2795			goto cleanup_clk;
2796		}
2797
2798		ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(addr_width));
2799		if (ret) {
2800			dev_err(&pdev->dev, "No suitable DMA available\n");
 
 
2801			goto cleanup_clk;
2802		}
2803		netif_napi_add(ndev, &lp->napi_rx, axienet_rx_poll);
2804		netif_napi_add(ndev, &lp->napi_tx, axienet_tx_poll);
 
 
 
 
2805	} else {
2806		struct xilinx_vdma_config cfg;
2807		struct dma_chan *tx_chan;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2808
2809		lp->eth_irq = platform_get_irq_optional(pdev, 0);
2810		if (lp->eth_irq < 0 && lp->eth_irq != -ENXIO) {
2811			ret = lp->eth_irq;
2812			goto cleanup_clk;
2813		}
2814		tx_chan = dma_request_chan(lp->dev, "tx_chan0");
2815		if (IS_ERR(tx_chan)) {
2816			ret = PTR_ERR(tx_chan);
2817			dev_err_probe(lp->dev, ret, "No Ethernet DMA (TX) channel found\n");
2818			goto cleanup_clk;
2819		}
2820
2821		cfg.reset = 1;
2822		/* As name says VDMA but it has support for DMA channel reset */
2823		ret = xilinx_vdma_channel_set_config(tx_chan, &cfg);
2824		if (ret < 0) {
2825			dev_err(&pdev->dev, "Reset channel failed\n");
2826			dma_release_channel(tx_chan);
2827			goto cleanup_clk;
 
 
 
2828		}
 
 
 
 
 
 
2829
2830		dma_release_channel(tx_chan);
2831		lp->use_dmaengine = 1;
 
 
2832	}
2833
2834	if (lp->use_dmaengine)
2835		ndev->netdev_ops = &axienet_netdev_dmaengine_ops;
2836	else
2837		ndev->netdev_ops = &axienet_netdev_ops;
2838	/* Check for Ethernet core IRQ (optional) */
2839	if (lp->eth_irq <= 0)
2840		dev_info(&pdev->dev, "Ethernet core IRQ not defined\n");
2841
2842	/* Retrieve the MAC address */
2843	ret = of_get_mac_address(pdev->dev.of_node, mac_addr);
2844	if (!ret) {
2845		axienet_set_mac_address(ndev, mac_addr);
2846	} else {
2847		dev_warn(&pdev->dev, "could not find MAC address property: %d\n",
2848			 ret);
2849		axienet_set_mac_address(ndev, NULL);
2850	}
2851
2852	lp->coalesce_count_rx = XAXIDMA_DFT_RX_THRESHOLD;
2853	lp->coalesce_count_tx = XAXIDMA_DFT_TX_THRESHOLD;
2854	lp->coalesce_usec_rx = XAXIDMA_DFT_RX_USEC;
 
2855	lp->coalesce_usec_tx = XAXIDMA_DFT_TX_USEC;
2856
 
 
 
 
 
2857	ret = axienet_mdio_setup(lp);
2858	if (ret)
2859		dev_warn(&pdev->dev,
2860			 "error registering MDIO bus: %d\n", ret);
2861
2862	if (lp->phy_mode == PHY_INTERFACE_MODE_SGMII ||
2863	    lp->phy_mode == PHY_INTERFACE_MODE_1000BASEX) {
2864		np = of_parse_phandle(pdev->dev.of_node, "pcs-handle", 0);
2865		if (!np) {
2866			/* Deprecated: Always use "pcs-handle" for pcs_phy.
2867			 * Falling back to "phy-handle" here is only for
2868			 * backward compatibility with old device trees.
2869			 */
2870			np = of_parse_phandle(pdev->dev.of_node, "phy-handle", 0);
2871		}
2872		if (!np) {
2873			dev_err(&pdev->dev, "pcs-handle (preferred) or phy-handle required for 1000BaseX/SGMII\n");
2874			ret = -EINVAL;
2875			goto cleanup_mdio;
2876		}
2877		lp->pcs_phy = of_mdio_find_device(np);
2878		if (!lp->pcs_phy) {
2879			ret = -EPROBE_DEFER;
2880			of_node_put(np);
2881			goto cleanup_mdio;
2882		}
2883		of_node_put(np);
2884		lp->pcs.ops = &axienet_pcs_ops;
2885		lp->pcs.neg_mode = true;
2886		lp->pcs.poll = true;
2887	}
2888
2889	lp->phylink_config.dev = &ndev->dev;
2890	lp->phylink_config.type = PHYLINK_NETDEV;
2891	lp->phylink_config.mac_managed_pm = true;
2892	lp->phylink_config.mac_capabilities = MAC_SYM_PAUSE | MAC_ASYM_PAUSE |
2893		MAC_10FD | MAC_100FD | MAC_1000FD;
2894
2895	__set_bit(lp->phy_mode, lp->phylink_config.supported_interfaces);
2896	if (lp->switch_x_sgmii) {
2897		__set_bit(PHY_INTERFACE_MODE_1000BASEX,
2898			  lp->phylink_config.supported_interfaces);
2899		__set_bit(PHY_INTERFACE_MODE_SGMII,
2900			  lp->phylink_config.supported_interfaces);
2901	}
2902
2903	lp->phylink = phylink_create(&lp->phylink_config, pdev->dev.fwnode,
2904				     lp->phy_mode,
2905				     &axienet_phylink_ops);
2906	if (IS_ERR(lp->phylink)) {
2907		ret = PTR_ERR(lp->phylink);
2908		dev_err(&pdev->dev, "phylink_create error (%i)\n", ret);
2909		goto cleanup_mdio;
2910	}
2911
2912	ret = register_netdev(lp->ndev);
2913	if (ret) {
2914		dev_err(lp->dev, "register_netdev() error (%i)\n", ret);
2915		goto cleanup_phylink;
2916	}
2917
2918	return 0;
2919
2920cleanup_phylink:
2921	phylink_destroy(lp->phylink);
2922
2923cleanup_mdio:
2924	if (lp->pcs_phy)
2925		put_device(&lp->pcs_phy->dev);
2926	if (lp->mii_bus)
2927		axienet_mdio_teardown(lp);
2928cleanup_clk:
2929	clk_bulk_disable_unprepare(XAE_NUM_MISC_CLOCKS, lp->misc_clks);
2930	clk_disable_unprepare(lp->axi_clk);
2931
2932free_netdev:
2933	free_netdev(ndev);
2934
2935	return ret;
2936}
2937
2938static void axienet_remove(struct platform_device *pdev)
2939{
2940	struct net_device *ndev = platform_get_drvdata(pdev);
2941	struct axienet_local *lp = netdev_priv(ndev);
2942
2943	unregister_netdev(ndev);
2944
2945	if (lp->phylink)
2946		phylink_destroy(lp->phylink);
2947
2948	if (lp->pcs_phy)
2949		put_device(&lp->pcs_phy->dev);
2950
2951	axienet_mdio_teardown(lp);
2952
2953	clk_bulk_disable_unprepare(XAE_NUM_MISC_CLOCKS, lp->misc_clks);
2954	clk_disable_unprepare(lp->axi_clk);
2955
2956	free_netdev(ndev);
 
 
2957}
2958
2959static void axienet_shutdown(struct platform_device *pdev)
2960{
2961	struct net_device *ndev = platform_get_drvdata(pdev);
2962
2963	rtnl_lock();
2964	netif_device_detach(ndev);
2965
2966	if (netif_running(ndev))
2967		dev_close(ndev);
2968
2969	rtnl_unlock();
2970}
2971
2972static int axienet_suspend(struct device *dev)
2973{
2974	struct net_device *ndev = dev_get_drvdata(dev);
2975
2976	if (!netif_running(ndev))
2977		return 0;
2978
2979	netif_device_detach(ndev);
2980
2981	rtnl_lock();
2982	axienet_stop(ndev);
2983	rtnl_unlock();
2984
2985	return 0;
2986}
2987
2988static int axienet_resume(struct device *dev)
2989{
2990	struct net_device *ndev = dev_get_drvdata(dev);
2991
2992	if (!netif_running(ndev))
2993		return 0;
2994
2995	rtnl_lock();
2996	axienet_open(ndev);
2997	rtnl_unlock();
2998
2999	netif_device_attach(ndev);
3000
3001	return 0;
3002}
3003
3004static DEFINE_SIMPLE_DEV_PM_OPS(axienet_pm_ops,
3005				axienet_suspend, axienet_resume);
3006
3007static struct platform_driver axienet_driver = {
3008	.probe = axienet_probe,
3009	.remove = axienet_remove,
3010	.shutdown = axienet_shutdown,
3011	.driver = {
3012		 .name = "xilinx_axienet",
3013		 .pm = &axienet_pm_ops,
3014		 .of_match_table = axienet_of_match,
3015	},
3016};
3017
3018module_platform_driver(axienet_driver);
3019
3020MODULE_DESCRIPTION("Xilinx Axi Ethernet driver");
3021MODULE_AUTHOR("Xilinx");
3022MODULE_LICENSE("GPL");
v6.2
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * Xilinx Axi Ethernet device driver
   4 *
   5 * Copyright (c) 2008 Nissin Systems Co., Ltd.,  Yoshio Kashiwagi
   6 * Copyright (c) 2005-2008 DLA Systems,  David H. Lynch Jr. <dhlii@dlasys.net>
   7 * Copyright (c) 2008-2009 Secret Lab Technologies Ltd.
   8 * Copyright (c) 2010 - 2011 Michal Simek <monstr@monstr.eu>
   9 * Copyright (c) 2010 - 2011 PetaLogix
  10 * Copyright (c) 2019 - 2022 Calian Advanced Technologies
  11 * Copyright (c) 2010 - 2012 Xilinx, Inc. All rights reserved.
  12 *
  13 * This is a driver for the Xilinx Axi Ethernet which is used in the Virtex6
  14 * and Spartan6.
  15 *
  16 * TODO:
  17 *  - Add Axi Fifo support.
  18 *  - Factor out Axi DMA code into separate driver.
  19 *  - Test and fix basic multicast filtering.
  20 *  - Add support for extended multicast filtering.
  21 *  - Test basic VLAN support.
  22 *  - Add support for extended VLAN support.
  23 */
  24
  25#include <linux/clk.h>
  26#include <linux/delay.h>
  27#include <linux/etherdevice.h>
  28#include <linux/module.h>
  29#include <linux/netdevice.h>
 
  30#include <linux/of_mdio.h>
  31#include <linux/of_net.h>
  32#include <linux/of_platform.h>
  33#include <linux/of_irq.h>
  34#include <linux/of_address.h>
 
  35#include <linux/skbuff.h>
  36#include <linux/math64.h>
  37#include <linux/phy.h>
  38#include <linux/mii.h>
  39#include <linux/ethtool.h>
 
 
 
 
 
  40
  41#include "xilinx_axienet.h"
  42
  43/* Descriptors defines for Tx and Rx DMA */
  44#define TX_BD_NUM_DEFAULT		128
  45#define RX_BD_NUM_DEFAULT		1024
  46#define TX_BD_NUM_MIN			(MAX_SKB_FRAGS + 1)
  47#define TX_BD_NUM_MAX			4096
  48#define RX_BD_NUM_MAX			4096
 
 
 
  49
  50/* Must be shorter than length of ethtool_drvinfo.driver field to fit */
  51#define DRIVER_NAME		"xaxienet"
  52#define DRIVER_DESCRIPTION	"Xilinx Axi Ethernet driver"
  53#define DRIVER_VERSION		"1.00a"
  54
  55#define AXIENET_REGS_N		40
  56
 
 
  57/* Match table for of_platform binding */
  58static const struct of_device_id axienet_of_match[] = {
  59	{ .compatible = "xlnx,axi-ethernet-1.00.a", },
  60	{ .compatible = "xlnx,axi-ethernet-1.01.a", },
  61	{ .compatible = "xlnx,axi-ethernet-2.01.a", },
  62	{},
  63};
  64
  65MODULE_DEVICE_TABLE(of, axienet_of_match);
  66
  67/* Option table for setting up Axi Ethernet hardware options */
  68static struct axienet_option axienet_options[] = {
  69	/* Turn on jumbo packet support for both Rx and Tx */
  70	{
  71		.opt = XAE_OPTION_JUMBO,
  72		.reg = XAE_TC_OFFSET,
  73		.m_or = XAE_TC_JUM_MASK,
  74	}, {
  75		.opt = XAE_OPTION_JUMBO,
  76		.reg = XAE_RCW1_OFFSET,
  77		.m_or = XAE_RCW1_JUM_MASK,
  78	}, { /* Turn on VLAN packet support for both Rx and Tx */
  79		.opt = XAE_OPTION_VLAN,
  80		.reg = XAE_TC_OFFSET,
  81		.m_or = XAE_TC_VLAN_MASK,
  82	}, {
  83		.opt = XAE_OPTION_VLAN,
  84		.reg = XAE_RCW1_OFFSET,
  85		.m_or = XAE_RCW1_VLAN_MASK,
  86	}, { /* Turn on FCS stripping on receive packets */
  87		.opt = XAE_OPTION_FCS_STRIP,
  88		.reg = XAE_RCW1_OFFSET,
  89		.m_or = XAE_RCW1_FCS_MASK,
  90	}, { /* Turn on FCS insertion on transmit packets */
  91		.opt = XAE_OPTION_FCS_INSERT,
  92		.reg = XAE_TC_OFFSET,
  93		.m_or = XAE_TC_FCS_MASK,
  94	}, { /* Turn off length/type field checking on receive packets */
  95		.opt = XAE_OPTION_LENTYPE_ERR,
  96		.reg = XAE_RCW1_OFFSET,
  97		.m_or = XAE_RCW1_LT_DIS_MASK,
  98	}, { /* Turn on Rx flow control */
  99		.opt = XAE_OPTION_FLOW_CONTROL,
 100		.reg = XAE_FCC_OFFSET,
 101		.m_or = XAE_FCC_FCRX_MASK,
 102	}, { /* Turn on Tx flow control */
 103		.opt = XAE_OPTION_FLOW_CONTROL,
 104		.reg = XAE_FCC_OFFSET,
 105		.m_or = XAE_FCC_FCTX_MASK,
 106	}, { /* Turn on promiscuous frame filtering */
 107		.opt = XAE_OPTION_PROMISC,
 108		.reg = XAE_FMI_OFFSET,
 109		.m_or = XAE_FMI_PM_MASK,
 110	}, { /* Enable transmitter */
 111		.opt = XAE_OPTION_TXEN,
 112		.reg = XAE_TC_OFFSET,
 113		.m_or = XAE_TC_TX_MASK,
 114	}, { /* Enable receiver */
 115		.opt = XAE_OPTION_RXEN,
 116		.reg = XAE_RCW1_OFFSET,
 117		.m_or = XAE_RCW1_RX_MASK,
 118	},
 119	{}
 120};
 121
 
 
 
 
 
 
 
 
 
 
 122/**
 123 * axienet_dma_in32 - Memory mapped Axi DMA register read
 124 * @lp:		Pointer to axienet local structure
 125 * @reg:	Address offset from the base address of the Axi DMA core
 126 *
 127 * Return: The contents of the Axi DMA register
 128 *
 129 * This function returns the contents of the corresponding Axi DMA register.
 130 */
 131static inline u32 axienet_dma_in32(struct axienet_local *lp, off_t reg)
 132{
 133	return ioread32(lp->dma_regs + reg);
 134}
 135
 136static void desc_set_phys_addr(struct axienet_local *lp, dma_addr_t addr,
 137			       struct axidma_bd *desc)
 138{
 139	desc->phys = lower_32_bits(addr);
 140	if (lp->features & XAE_FEATURE_DMA_64BIT)
 141		desc->phys_msb = upper_32_bits(addr);
 142}
 143
 144static dma_addr_t desc_get_phys_addr(struct axienet_local *lp,
 145				     struct axidma_bd *desc)
 146{
 147	dma_addr_t ret = desc->phys;
 148
 149	if (lp->features & XAE_FEATURE_DMA_64BIT)
 150		ret |= ((dma_addr_t)desc->phys_msb << 16) << 16;
 151
 152	return ret;
 153}
 154
 155/**
 156 * axienet_dma_bd_release - Release buffer descriptor rings
 157 * @ndev:	Pointer to the net_device structure
 158 *
 159 * This function is used to release the descriptors allocated in
 160 * axienet_dma_bd_init. axienet_dma_bd_release is called when Axi Ethernet
 161 * driver stop api is called.
 162 */
 163static void axienet_dma_bd_release(struct net_device *ndev)
 164{
 165	int i;
 166	struct axienet_local *lp = netdev_priv(ndev);
 167
 168	/* If we end up here, tx_bd_v must have been DMA allocated. */
 169	dma_free_coherent(lp->dev,
 170			  sizeof(*lp->tx_bd_v) * lp->tx_bd_num,
 171			  lp->tx_bd_v,
 172			  lp->tx_bd_p);
 173
 174	if (!lp->rx_bd_v)
 175		return;
 176
 177	for (i = 0; i < lp->rx_bd_num; i++) {
 178		dma_addr_t phys;
 179
 180		/* A NULL skb means this descriptor has not been initialised
 181		 * at all.
 182		 */
 183		if (!lp->rx_bd_v[i].skb)
 184			break;
 185
 186		dev_kfree_skb(lp->rx_bd_v[i].skb);
 187
 188		/* For each descriptor, we programmed cntrl with the (non-zero)
 189		 * descriptor size, after it had been successfully allocated.
 190		 * So a non-zero value in there means we need to unmap it.
 191		 */
 192		if (lp->rx_bd_v[i].cntrl) {
 193			phys = desc_get_phys_addr(lp, &lp->rx_bd_v[i]);
 194			dma_unmap_single(lp->dev, phys,
 195					 lp->max_frm_size, DMA_FROM_DEVICE);
 196		}
 197	}
 198
 199	dma_free_coherent(lp->dev,
 200			  sizeof(*lp->rx_bd_v) * lp->rx_bd_num,
 201			  lp->rx_bd_v,
 202			  lp->rx_bd_p);
 203}
 204
 205/**
 206 * axienet_usec_to_timer - Calculate IRQ delay timer value
 207 * @lp:		Pointer to the axienet_local structure
 208 * @coalesce_usec: Microseconds to convert into timer value
 209 */
 210static u32 axienet_usec_to_timer(struct axienet_local *lp, u32 coalesce_usec)
 211{
 212	u32 result;
 213	u64 clk_rate = 125000000; /* arbitrary guess if no clock rate set */
 214
 215	if (lp->axi_clk)
 216		clk_rate = clk_get_rate(lp->axi_clk);
 217
 218	/* 1 Timeout Interval = 125 * (clock period of SG clock) */
 219	result = DIV64_U64_ROUND_CLOSEST((u64)coalesce_usec * clk_rate,
 220					 (u64)125000000);
 221	if (result > 255)
 222		result = 255;
 223
 224	return result;
 225}
 226
 227/**
 228 * axienet_dma_start - Set up DMA registers and start DMA operation
 229 * @lp:		Pointer to the axienet_local structure
 230 */
 231static void axienet_dma_start(struct axienet_local *lp)
 232{
 233	/* Start updating the Rx channel control register */
 234	lp->rx_dma_cr = (lp->coalesce_count_rx << XAXIDMA_COALESCE_SHIFT) |
 235			XAXIDMA_IRQ_IOC_MASK | XAXIDMA_IRQ_ERROR_MASK;
 236	/* Only set interrupt delay timer if not generating an interrupt on
 237	 * the first RX packet. Otherwise leave at 0 to disable delay interrupt.
 238	 */
 239	if (lp->coalesce_count_rx > 1)
 240		lp->rx_dma_cr |= (axienet_usec_to_timer(lp, lp->coalesce_usec_rx)
 241					<< XAXIDMA_DELAY_SHIFT) |
 242				 XAXIDMA_IRQ_DELAY_MASK;
 243	axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, lp->rx_dma_cr);
 244
 245	/* Start updating the Tx channel control register */
 246	lp->tx_dma_cr = (lp->coalesce_count_tx << XAXIDMA_COALESCE_SHIFT) |
 247			XAXIDMA_IRQ_IOC_MASK | XAXIDMA_IRQ_ERROR_MASK;
 248	/* Only set interrupt delay timer if not generating an interrupt on
 249	 * the first TX packet. Otherwise leave at 0 to disable delay interrupt.
 250	 */
 251	if (lp->coalesce_count_tx > 1)
 252		lp->tx_dma_cr |= (axienet_usec_to_timer(lp, lp->coalesce_usec_tx)
 253					<< XAXIDMA_DELAY_SHIFT) |
 254				 XAXIDMA_IRQ_DELAY_MASK;
 255	axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, lp->tx_dma_cr);
 256
 257	/* Populate the tail pointer and bring the Rx Axi DMA engine out of
 258	 * halted state. This will make the Rx side ready for reception.
 259	 */
 260	axienet_dma_out_addr(lp, XAXIDMA_RX_CDESC_OFFSET, lp->rx_bd_p);
 261	lp->rx_dma_cr |= XAXIDMA_CR_RUNSTOP_MASK;
 262	axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, lp->rx_dma_cr);
 263	axienet_dma_out_addr(lp, XAXIDMA_RX_TDESC_OFFSET, lp->rx_bd_p +
 264			     (sizeof(*lp->rx_bd_v) * (lp->rx_bd_num - 1)));
 265
 266	/* Write to the RS (Run-stop) bit in the Tx channel control register.
 267	 * Tx channel is now ready to run. But only after we write to the
 268	 * tail pointer register that the Tx channel will start transmitting.
 269	 */
 270	axienet_dma_out_addr(lp, XAXIDMA_TX_CDESC_OFFSET, lp->tx_bd_p);
 271	lp->tx_dma_cr |= XAXIDMA_CR_RUNSTOP_MASK;
 272	axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, lp->tx_dma_cr);
 273}
 274
 275/**
 276 * axienet_dma_bd_init - Setup buffer descriptor rings for Axi DMA
 277 * @ndev:	Pointer to the net_device structure
 278 *
 279 * Return: 0, on success -ENOMEM, on failure
 280 *
 281 * This function is called to initialize the Rx and Tx DMA descriptor
 282 * rings. This initializes the descriptors with required default values
 283 * and is called when Axi Ethernet driver reset is called.
 284 */
 285static int axienet_dma_bd_init(struct net_device *ndev)
 286{
 287	int i;
 288	struct sk_buff *skb;
 289	struct axienet_local *lp = netdev_priv(ndev);
 290
 291	/* Reset the indexes which are used for accessing the BDs */
 292	lp->tx_bd_ci = 0;
 293	lp->tx_bd_tail = 0;
 294	lp->rx_bd_ci = 0;
 295
 296	/* Allocate the Tx and Rx buffer descriptors. */
 297	lp->tx_bd_v = dma_alloc_coherent(lp->dev,
 298					 sizeof(*lp->tx_bd_v) * lp->tx_bd_num,
 299					 &lp->tx_bd_p, GFP_KERNEL);
 300	if (!lp->tx_bd_v)
 301		return -ENOMEM;
 302
 303	lp->rx_bd_v = dma_alloc_coherent(lp->dev,
 304					 sizeof(*lp->rx_bd_v) * lp->rx_bd_num,
 305					 &lp->rx_bd_p, GFP_KERNEL);
 306	if (!lp->rx_bd_v)
 307		goto out;
 308
 309	for (i = 0; i < lp->tx_bd_num; i++) {
 310		dma_addr_t addr = lp->tx_bd_p +
 311				  sizeof(*lp->tx_bd_v) *
 312				  ((i + 1) % lp->tx_bd_num);
 313
 314		lp->tx_bd_v[i].next = lower_32_bits(addr);
 315		if (lp->features & XAE_FEATURE_DMA_64BIT)
 316			lp->tx_bd_v[i].next_msb = upper_32_bits(addr);
 317	}
 318
 319	for (i = 0; i < lp->rx_bd_num; i++) {
 320		dma_addr_t addr;
 321
 322		addr = lp->rx_bd_p + sizeof(*lp->rx_bd_v) *
 323			((i + 1) % lp->rx_bd_num);
 324		lp->rx_bd_v[i].next = lower_32_bits(addr);
 325		if (lp->features & XAE_FEATURE_DMA_64BIT)
 326			lp->rx_bd_v[i].next_msb = upper_32_bits(addr);
 327
 328		skb = netdev_alloc_skb_ip_align(ndev, lp->max_frm_size);
 329		if (!skb)
 330			goto out;
 331
 332		lp->rx_bd_v[i].skb = skb;
 333		addr = dma_map_single(lp->dev, skb->data,
 334				      lp->max_frm_size, DMA_FROM_DEVICE);
 335		if (dma_mapping_error(lp->dev, addr)) {
 336			netdev_err(ndev, "DMA mapping error\n");
 337			goto out;
 338		}
 339		desc_set_phys_addr(lp, addr, &lp->rx_bd_v[i]);
 340
 341		lp->rx_bd_v[i].cntrl = lp->max_frm_size;
 342	}
 343
 344	axienet_dma_start(lp);
 345
 346	return 0;
 347out:
 348	axienet_dma_bd_release(ndev);
 349	return -ENOMEM;
 350}
 351
 352/**
 353 * axienet_set_mac_address - Write the MAC address
 354 * @ndev:	Pointer to the net_device structure
 355 * @address:	6 byte Address to be written as MAC address
 356 *
 357 * This function is called to initialize the MAC address of the Axi Ethernet
 358 * core. It writes to the UAW0 and UAW1 registers of the core.
 359 */
 360static void axienet_set_mac_address(struct net_device *ndev,
 361				    const void *address)
 362{
 363	struct axienet_local *lp = netdev_priv(ndev);
 364
 365	if (address)
 366		eth_hw_addr_set(ndev, address);
 367	if (!is_valid_ether_addr(ndev->dev_addr))
 368		eth_hw_addr_random(ndev);
 369
 370	/* Set up unicast MAC address filter set its mac address */
 371	axienet_iow(lp, XAE_UAW0_OFFSET,
 372		    (ndev->dev_addr[0]) |
 373		    (ndev->dev_addr[1] << 8) |
 374		    (ndev->dev_addr[2] << 16) |
 375		    (ndev->dev_addr[3] << 24));
 376	axienet_iow(lp, XAE_UAW1_OFFSET,
 377		    (((axienet_ior(lp, XAE_UAW1_OFFSET)) &
 378		      ~XAE_UAW1_UNICASTADDR_MASK) |
 379		     (ndev->dev_addr[4] |
 380		     (ndev->dev_addr[5] << 8))));
 381}
 382
 383/**
 384 * netdev_set_mac_address - Write the MAC address (from outside the driver)
 385 * @ndev:	Pointer to the net_device structure
 386 * @p:		6 byte Address to be written as MAC address
 387 *
 388 * Return: 0 for all conditions. Presently, there is no failure case.
 389 *
 390 * This function is called to initialize the MAC address of the Axi Ethernet
 391 * core. It calls the core specific axienet_set_mac_address. This is the
 392 * function that goes into net_device_ops structure entry ndo_set_mac_address.
 393 */
 394static int netdev_set_mac_address(struct net_device *ndev, void *p)
 395{
 396	struct sockaddr *addr = p;
 
 397	axienet_set_mac_address(ndev, addr->sa_data);
 398	return 0;
 399}
 400
 401/**
 402 * axienet_set_multicast_list - Prepare the multicast table
 403 * @ndev:	Pointer to the net_device structure
 404 *
 405 * This function is called to initialize the multicast table during
 406 * initialization. The Axi Ethernet basic multicast support has a four-entry
 407 * multicast table which is initialized here. Additionally this function
 408 * goes into the net_device_ops structure entry ndo_set_multicast_list. This
 409 * means whenever the multicast table entries need to be updated this
 410 * function gets called.
 411 */
 412static void axienet_set_multicast_list(struct net_device *ndev)
 413{
 414	int i;
 415	u32 reg, af0reg, af1reg;
 416	struct axienet_local *lp = netdev_priv(ndev);
 417
 418	if (ndev->flags & (IFF_ALLMULTI | IFF_PROMISC) ||
 
 
 
 
 
 
 
 
 419	    netdev_mc_count(ndev) > XAE_MULTICAST_CAM_TABLE_NUM) {
 420		/* We must make the kernel realize we had to move into
 421		 * promiscuous mode. If it was a promiscuous mode request
 422		 * the flag is already set. If not we set it.
 423		 */
 424		ndev->flags |= IFF_PROMISC;
 425		reg = axienet_ior(lp, XAE_FMI_OFFSET);
 426		reg |= XAE_FMI_PM_MASK;
 427		axienet_iow(lp, XAE_FMI_OFFSET, reg);
 428		dev_info(&ndev->dev, "Promiscuous mode enabled.\n");
 
 
 
 
 
 429	} else if (!netdev_mc_empty(ndev)) {
 430		struct netdev_hw_addr *ha;
 431
 432		i = 0;
 433		netdev_for_each_mc_addr(ha, ndev) {
 434			if (i >= XAE_MULTICAST_CAM_TABLE_NUM)
 435				break;
 436
 437			af0reg = (ha->addr[0]);
 438			af0reg |= (ha->addr[1] << 8);
 439			af0reg |= (ha->addr[2] << 16);
 440			af0reg |= (ha->addr[3] << 24);
 441
 442			af1reg = (ha->addr[4]);
 443			af1reg |= (ha->addr[5] << 8);
 444
 445			reg = axienet_ior(lp, XAE_FMI_OFFSET) & 0xFFFFFF00;
 446			reg |= i;
 447
 448			axienet_iow(lp, XAE_FMI_OFFSET, reg);
 449			axienet_iow(lp, XAE_AF0_OFFSET, af0reg);
 450			axienet_iow(lp, XAE_AF1_OFFSET, af1reg);
 
 
 
 451			i++;
 452		}
 453	} else {
 454		reg = axienet_ior(lp, XAE_FMI_OFFSET);
 455		reg &= ~XAE_FMI_PM_MASK;
 456
 
 
 
 457		axienet_iow(lp, XAE_FMI_OFFSET, reg);
 458
 459		for (i = 0; i < XAE_MULTICAST_CAM_TABLE_NUM; i++) {
 460			reg = axienet_ior(lp, XAE_FMI_OFFSET) & 0xFFFFFF00;
 461			reg |= i;
 462
 463			axienet_iow(lp, XAE_FMI_OFFSET, reg);
 464			axienet_iow(lp, XAE_AF0_OFFSET, 0);
 465			axienet_iow(lp, XAE_AF1_OFFSET, 0);
 466		}
 467
 468		dev_info(&ndev->dev, "Promiscuous mode disabled.\n");
 469	}
 470}
 471
 472/**
 473 * axienet_setoptions - Set an Axi Ethernet option
 474 * @ndev:	Pointer to the net_device structure
 475 * @options:	Option to be enabled/disabled
 476 *
 477 * The Axi Ethernet core has multiple features which can be selectively turned
 478 * on or off. The typical options could be jumbo frame option, basic VLAN
 479 * option, promiscuous mode option etc. This function is used to set or clear
 480 * these options in the Axi Ethernet hardware. This is done through
 481 * axienet_option structure .
 482 */
 483static void axienet_setoptions(struct net_device *ndev, u32 options)
 484{
 485	int reg;
 486	struct axienet_local *lp = netdev_priv(ndev);
 487	struct axienet_option *tp = &axienet_options[0];
 488
 489	while (tp->opt) {
 490		reg = ((axienet_ior(lp, tp->reg)) & ~(tp->m_or));
 491		if (options & tp->opt)
 492			reg |= tp->m_or;
 493		axienet_iow(lp, tp->reg, reg);
 494		tp++;
 495	}
 496
 497	lp->options |= options;
 498}
 499
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 500static int __axienet_device_reset(struct axienet_local *lp)
 501{
 502	u32 value;
 503	int ret;
 504
 
 
 
 
 
 505	/* Reset Axi DMA. This would reset Axi Ethernet core as well. The reset
 506	 * process of Axi DMA takes a while to complete as all pending
 507	 * commands/transfers will be flushed or completed during this
 508	 * reset process.
 509	 * Note that even though both TX and RX have their own reset register,
 510	 * they both reset the entire DMA core, so only one needs to be used.
 511	 */
 512	axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, XAXIDMA_CR_RESET_MASK);
 513	ret = read_poll_timeout(axienet_dma_in32, value,
 514				!(value & XAXIDMA_CR_RESET_MASK),
 515				DELAY_OF_ONE_MILLISEC, 50000, false, lp,
 516				XAXIDMA_TX_CR_OFFSET);
 517	if (ret) {
 518		dev_err(lp->dev, "%s: DMA reset timeout!\n", __func__);
 519		return ret;
 520	}
 521
 522	/* Wait for PhyRstCmplt bit to be set, indicating the PHY reset has finished */
 523	ret = read_poll_timeout(axienet_ior, value,
 524				value & XAE_INT_PHYRSTCMPLT_MASK,
 525				DELAY_OF_ONE_MILLISEC, 50000, false, lp,
 526				XAE_IS_OFFSET);
 527	if (ret) {
 528		dev_err(lp->dev, "%s: timeout waiting for PhyRstCmplt\n", __func__);
 529		return ret;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 530	}
 531
 532	return 0;
 
 
 533}
 534
 535/**
 536 * axienet_dma_stop - Stop DMA operation
 537 * @lp:		Pointer to the axienet_local structure
 538 */
 539static void axienet_dma_stop(struct axienet_local *lp)
 540{
 541	int count;
 542	u32 cr, sr;
 543
 544	cr = axienet_dma_in32(lp, XAXIDMA_RX_CR_OFFSET);
 545	cr &= ~(XAXIDMA_CR_RUNSTOP_MASK | XAXIDMA_IRQ_ALL_MASK);
 546	axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, cr);
 547	synchronize_irq(lp->rx_irq);
 548
 549	cr = axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET);
 550	cr &= ~(XAXIDMA_CR_RUNSTOP_MASK | XAXIDMA_IRQ_ALL_MASK);
 551	axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, cr);
 552	synchronize_irq(lp->tx_irq);
 553
 554	/* Give DMAs a chance to halt gracefully */
 555	sr = axienet_dma_in32(lp, XAXIDMA_RX_SR_OFFSET);
 556	for (count = 0; !(sr & XAXIDMA_SR_HALT_MASK) && count < 5; ++count) {
 557		msleep(20);
 558		sr = axienet_dma_in32(lp, XAXIDMA_RX_SR_OFFSET);
 559	}
 560
 561	sr = axienet_dma_in32(lp, XAXIDMA_TX_SR_OFFSET);
 562	for (count = 0; !(sr & XAXIDMA_SR_HALT_MASK) && count < 5; ++count) {
 563		msleep(20);
 564		sr = axienet_dma_in32(lp, XAXIDMA_TX_SR_OFFSET);
 565	}
 566
 567	/* Do a reset to ensure DMA is really stopped */
 568	axienet_lock_mii(lp);
 569	__axienet_device_reset(lp);
 570	axienet_unlock_mii(lp);
 571}
 572
 573/**
 574 * axienet_device_reset - Reset and initialize the Axi Ethernet hardware.
 575 * @ndev:	Pointer to the net_device structure
 576 *
 577 * This function is called to reset and initialize the Axi Ethernet core. This
 578 * is typically called during initialization. It does a reset of the Axi DMA
 579 * Rx/Tx channels and initializes the Axi DMA BDs. Since Axi DMA reset lines
 580 * are connected to Axi Ethernet reset lines, this in turn resets the Axi
 581 * Ethernet core. No separate hardware reset is done for the Axi Ethernet
 582 * core.
 583 * Returns 0 on success or a negative error number otherwise.
 584 */
 585static int axienet_device_reset(struct net_device *ndev)
 586{
 587	u32 axienet_status;
 588	struct axienet_local *lp = netdev_priv(ndev);
 589	int ret;
 590
 591	ret = __axienet_device_reset(lp);
 592	if (ret)
 593		return ret;
 594
 595	lp->max_frm_size = XAE_MAX_VLAN_FRAME_SIZE;
 596	lp->options |= XAE_OPTION_VLAN;
 597	lp->options &= (~XAE_OPTION_JUMBO);
 598
 599	if ((ndev->mtu > XAE_MTU) &&
 600	    (ndev->mtu <= XAE_JUMBO_MTU)) {
 601		lp->max_frm_size = ndev->mtu + VLAN_ETH_HLEN +
 602					XAE_TRL_SIZE;
 603
 604		if (lp->max_frm_size <= lp->rxmem)
 605			lp->options |= XAE_OPTION_JUMBO;
 606	}
 607
 608	ret = axienet_dma_bd_init(ndev);
 609	if (ret) {
 610		netdev_err(ndev, "%s: descriptor allocation failed\n",
 611			   __func__);
 612		return ret;
 
 
 
 
 
 
 613	}
 614
 615	axienet_status = axienet_ior(lp, XAE_RCW1_OFFSET);
 616	axienet_status &= ~XAE_RCW1_RX_MASK;
 617	axienet_iow(lp, XAE_RCW1_OFFSET, axienet_status);
 618
 619	axienet_status = axienet_ior(lp, XAE_IP_OFFSET);
 620	if (axienet_status & XAE_INT_RXRJECT_MASK)
 621		axienet_iow(lp, XAE_IS_OFFSET, XAE_INT_RXRJECT_MASK);
 622	axienet_iow(lp, XAE_IE_OFFSET, lp->eth_irq > 0 ?
 623		    XAE_INT_RECV_ERROR_MASK : 0);
 624
 625	axienet_iow(lp, XAE_FCC_OFFSET, XAE_FCC_FCRX_MASK);
 626
 627	/* Sync default options with HW but leave receiver and
 628	 * transmitter disabled.
 629	 */
 630	axienet_setoptions(ndev, lp->options &
 631			   ~(XAE_OPTION_TXEN | XAE_OPTION_RXEN));
 632	axienet_set_mac_address(ndev, NULL);
 633	axienet_set_multicast_list(ndev);
 634	axienet_setoptions(ndev, lp->options);
 635
 636	netif_trans_update(ndev);
 637
 638	return 0;
 639}
 640
 641/**
 642 * axienet_free_tx_chain - Clean up a series of linked TX descriptors.
 643 * @lp:		Pointer to the axienet_local structure
 644 * @first_bd:	Index of first descriptor to clean up
 645 * @nr_bds:	Max number of descriptors to clean up
 646 * @force:	Whether to clean descriptors even if not complete
 647 * @sizep:	Pointer to a u32 filled with the total sum of all bytes
 648 *		in all cleaned-up descriptors. Ignored if NULL.
 649 * @budget:	NAPI budget (use 0 when not called from NAPI poll)
 650 *
 651 * Would either be called after a successful transmit operation, or after
 652 * there was an error when setting up the chain.
 653 * Returns the number of descriptors handled.
 654 */
 655static int axienet_free_tx_chain(struct axienet_local *lp, u32 first_bd,
 656				 int nr_bds, bool force, u32 *sizep, int budget)
 657{
 658	struct axidma_bd *cur_p;
 659	unsigned int status;
 
 660	dma_addr_t phys;
 661	int i;
 662
 663	for (i = 0; i < nr_bds; i++) {
 664		cur_p = &lp->tx_bd_v[(first_bd + i) % lp->tx_bd_num];
 665		status = cur_p->status;
 666
 667		/* If force is not specified, clean up only descriptors
 668		 * that have been completed by the MAC.
 669		 */
 670		if (!force && !(status & XAXIDMA_BD_STS_COMPLETE_MASK))
 671			break;
 672
 673		/* Ensure we see complete descriptor update */
 674		dma_rmb();
 675		phys = desc_get_phys_addr(lp, cur_p);
 676		dma_unmap_single(lp->dev, phys,
 677				 (cur_p->cntrl & XAXIDMA_BD_CTRL_LENGTH_MASK),
 678				 DMA_TO_DEVICE);
 679
 680		if (cur_p->skb && (status & XAXIDMA_BD_STS_COMPLETE_MASK))
 681			napi_consume_skb(cur_p->skb, budget);
 
 
 682
 683		cur_p->app0 = 0;
 684		cur_p->app1 = 0;
 685		cur_p->app2 = 0;
 686		cur_p->app4 = 0;
 687		cur_p->skb = NULL;
 688		/* ensure our transmit path and device don't prematurely see status cleared */
 689		wmb();
 690		cur_p->cntrl = 0;
 691		cur_p->status = 0;
 692
 693		if (sizep)
 694			*sizep += status & XAXIDMA_BD_STS_ACTUAL_LEN_MASK;
 695	}
 696
 697	return i;
 
 
 
 
 
 
 698}
 699
 700/**
 701 * axienet_check_tx_bd_space - Checks if a BD/group of BDs are currently busy
 702 * @lp:		Pointer to the axienet_local structure
 703 * @num_frag:	The number of BDs to check for
 704 *
 705 * Return: 0, on success
 706 *	    NETDEV_TX_BUSY, if any of the descriptors are not free
 707 *
 708 * This function is invoked before BDs are allocated and transmission starts.
 709 * This function returns 0 if a BD or group of BDs can be allocated for
 710 * transmission. If the BD or any of the BDs are not free the function
 711 * returns a busy status.
 712 */
 713static inline int axienet_check_tx_bd_space(struct axienet_local *lp,
 714					    int num_frag)
 715{
 716	struct axidma_bd *cur_p;
 717
 718	/* Ensure we see all descriptor updates from device or TX polling */
 719	rmb();
 720	cur_p = &lp->tx_bd_v[(READ_ONCE(lp->tx_bd_tail) + num_frag) %
 721			     lp->tx_bd_num];
 722	if (cur_p->cntrl)
 723		return NETDEV_TX_BUSY;
 724	return 0;
 725}
 726
 727/**
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 728 * axienet_tx_poll - Invoked once a transmit is completed by the
 729 * Axi DMA Tx channel.
 730 * @napi:	Pointer to NAPI structure.
 731 * @budget:	Max number of TX packets to process.
 732 *
 733 * Return: Number of TX packets processed.
 734 *
 735 * This function is invoked from the NAPI processing to notify the completion
 736 * of transmit operation. It clears fields in the corresponding Tx BDs and
 737 * unmaps the corresponding buffer so that CPU can regain ownership of the
 738 * buffer. It finally invokes "netif_wake_queue" to restart transmission if
 739 * required.
 740 */
 741static int axienet_tx_poll(struct napi_struct *napi, int budget)
 742{
 743	struct axienet_local *lp = container_of(napi, struct axienet_local, napi_tx);
 744	struct net_device *ndev = lp->ndev;
 745	u32 size = 0;
 746	int packets;
 747
 748	packets = axienet_free_tx_chain(lp, lp->tx_bd_ci, budget, false, &size, budget);
 
 749
 750	if (packets) {
 751		lp->tx_bd_ci += packets;
 752		if (lp->tx_bd_ci >= lp->tx_bd_num)
 753			lp->tx_bd_ci %= lp->tx_bd_num;
 754
 755		u64_stats_update_begin(&lp->tx_stat_sync);
 756		u64_stats_add(&lp->tx_packets, packets);
 757		u64_stats_add(&lp->tx_bytes, size);
 758		u64_stats_update_end(&lp->tx_stat_sync);
 759
 760		/* Matches barrier in axienet_start_xmit */
 761		smp_mb();
 762
 763		if (!axienet_check_tx_bd_space(lp, MAX_SKB_FRAGS + 1))
 764			netif_wake_queue(ndev);
 765	}
 766
 767	if (packets < budget && napi_complete_done(napi, packets)) {
 768		/* Re-enable TX completion interrupts. This should
 769		 * cause an immediate interrupt if any TX packets are
 770		 * already pending.
 771		 */
 772		axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, lp->tx_dma_cr);
 773	}
 774	return packets;
 775}
 776
 777/**
 778 * axienet_start_xmit - Starts the transmission.
 779 * @skb:	sk_buff pointer that contains data to be Txed.
 780 * @ndev:	Pointer to net_device structure.
 781 *
 782 * Return: NETDEV_TX_OK, on success
 783 *	    NETDEV_TX_BUSY, if any of the descriptors are not free
 784 *
 785 * This function is invoked from upper layers to initiate transmission. The
 786 * function uses the next available free BDs and populates their fields to
 787 * start the transmission. Additionally if checksum offloading is supported,
 788 * it populates AXI Stream Control fields with appropriate values.
 789 */
 790static netdev_tx_t
 791axienet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
 792{
 793	u32 ii;
 794	u32 num_frag;
 795	u32 csum_start_off;
 796	u32 csum_index_off;
 797	skb_frag_t *frag;
 798	dma_addr_t tail_p, phys;
 799	u32 orig_tail_ptr, new_tail_ptr;
 800	struct axienet_local *lp = netdev_priv(ndev);
 801	struct axidma_bd *cur_p;
 802
 803	orig_tail_ptr = lp->tx_bd_tail;
 804	new_tail_ptr = orig_tail_ptr;
 805
 806	num_frag = skb_shinfo(skb)->nr_frags;
 807	cur_p = &lp->tx_bd_v[orig_tail_ptr];
 808
 809	if (axienet_check_tx_bd_space(lp, num_frag + 1)) {
 810		/* Should not happen as last start_xmit call should have
 811		 * checked for sufficient space and queue should only be
 812		 * woken when sufficient space is available.
 813		 */
 814		netif_stop_queue(ndev);
 815		if (net_ratelimit())
 816			netdev_warn(ndev, "TX ring unexpectedly full\n");
 817		return NETDEV_TX_BUSY;
 818	}
 819
 820	if (skb->ip_summed == CHECKSUM_PARTIAL) {
 821		if (lp->features & XAE_FEATURE_FULL_TX_CSUM) {
 822			/* Tx Full Checksum Offload Enabled */
 823			cur_p->app0 |= 2;
 824		} else if (lp->features & XAE_FEATURE_PARTIAL_RX_CSUM) {
 825			csum_start_off = skb_transport_offset(skb);
 826			csum_index_off = csum_start_off + skb->csum_offset;
 827			/* Tx Partial Checksum Offload Enabled */
 828			cur_p->app0 |= 1;
 829			cur_p->app1 = (csum_start_off << 16) | csum_index_off;
 830		}
 831	} else if (skb->ip_summed == CHECKSUM_UNNECESSARY) {
 832		cur_p->app0 |= 2; /* Tx Full Checksum Offload Enabled */
 833	}
 834
 835	phys = dma_map_single(lp->dev, skb->data,
 836			      skb_headlen(skb), DMA_TO_DEVICE);
 837	if (unlikely(dma_mapping_error(lp->dev, phys))) {
 838		if (net_ratelimit())
 839			netdev_err(ndev, "TX DMA mapping error\n");
 840		ndev->stats.tx_dropped++;
 
 841		return NETDEV_TX_OK;
 842	}
 843	desc_set_phys_addr(lp, phys, cur_p);
 844	cur_p->cntrl = skb_headlen(skb) | XAXIDMA_BD_CTRL_TXSOF_MASK;
 845
 846	for (ii = 0; ii < num_frag; ii++) {
 847		if (++new_tail_ptr >= lp->tx_bd_num)
 848			new_tail_ptr = 0;
 849		cur_p = &lp->tx_bd_v[new_tail_ptr];
 850		frag = &skb_shinfo(skb)->frags[ii];
 851		phys = dma_map_single(lp->dev,
 852				      skb_frag_address(frag),
 853				      skb_frag_size(frag),
 854				      DMA_TO_DEVICE);
 855		if (unlikely(dma_mapping_error(lp->dev, phys))) {
 856			if (net_ratelimit())
 857				netdev_err(ndev, "TX DMA mapping error\n");
 858			ndev->stats.tx_dropped++;
 859			axienet_free_tx_chain(lp, orig_tail_ptr, ii + 1,
 860					      true, NULL, 0);
 
 861			return NETDEV_TX_OK;
 862		}
 863		desc_set_phys_addr(lp, phys, cur_p);
 864		cur_p->cntrl = skb_frag_size(frag);
 865	}
 866
 867	cur_p->cntrl |= XAXIDMA_BD_CTRL_TXEOF_MASK;
 868	cur_p->skb = skb;
 869
 870	tail_p = lp->tx_bd_p + sizeof(*lp->tx_bd_v) * new_tail_ptr;
 871	if (++new_tail_ptr >= lp->tx_bd_num)
 872		new_tail_ptr = 0;
 873	WRITE_ONCE(lp->tx_bd_tail, new_tail_ptr);
 874
 875	/* Start the transfer */
 876	axienet_dma_out_addr(lp, XAXIDMA_TX_TDESC_OFFSET, tail_p);
 877
 878	/* Stop queue if next transmit may not have space */
 879	if (axienet_check_tx_bd_space(lp, MAX_SKB_FRAGS + 1)) {
 880		netif_stop_queue(ndev);
 881
 882		/* Matches barrier in axienet_tx_poll */
 883		smp_mb();
 884
 885		/* Space might have just been freed - check again */
 886		if (!axienet_check_tx_bd_space(lp, MAX_SKB_FRAGS + 1))
 887			netif_wake_queue(ndev);
 888	}
 889
 890	return NETDEV_TX_OK;
 891}
 892
 893/**
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 894 * axienet_rx_poll - Triggered by RX ISR to complete the BD processing.
 895 * @napi:	Pointer to NAPI structure.
 896 * @budget:	Max number of RX packets to process.
 897 *
 898 * Return: Number of RX packets processed.
 899 */
 900static int axienet_rx_poll(struct napi_struct *napi, int budget)
 901{
 902	u32 length;
 903	u32 csumstatus;
 904	u32 size = 0;
 905	int packets = 0;
 906	dma_addr_t tail_p = 0;
 907	struct axidma_bd *cur_p;
 908	struct sk_buff *skb, *new_skb;
 909	struct axienet_local *lp = container_of(napi, struct axienet_local, napi_rx);
 910
 911	cur_p = &lp->rx_bd_v[lp->rx_bd_ci];
 912
 913	while (packets < budget && (cur_p->status & XAXIDMA_BD_STS_COMPLETE_MASK)) {
 914		dma_addr_t phys;
 915
 916		/* Ensure we see complete descriptor update */
 917		dma_rmb();
 918
 919		skb = cur_p->skb;
 920		cur_p->skb = NULL;
 921
 922		/* skb could be NULL if a previous pass already received the
 923		 * packet for this slot in the ring, but failed to refill it
 924		 * with a newly allocated buffer. In this case, don't try to
 925		 * receive it again.
 926		 */
 927		if (likely(skb)) {
 928			length = cur_p->app4 & 0x0000FFFF;
 929
 930			phys = desc_get_phys_addr(lp, cur_p);
 931			dma_unmap_single(lp->dev, phys, lp->max_frm_size,
 932					 DMA_FROM_DEVICE);
 933
 934			skb_put(skb, length);
 935			skb->protocol = eth_type_trans(skb, lp->ndev);
 936			/*skb_checksum_none_assert(skb);*/
 937			skb->ip_summed = CHECKSUM_NONE;
 938
 939			/* if we're doing Rx csum offload, set it up */
 940			if (lp->features & XAE_FEATURE_FULL_RX_CSUM) {
 941				csumstatus = (cur_p->app2 &
 942					      XAE_FULL_CSUM_STATUS_MASK) >> 3;
 943				if (csumstatus == XAE_IP_TCP_CSUM_VALIDATED ||
 944				    csumstatus == XAE_IP_UDP_CSUM_VALIDATED) {
 945					skb->ip_summed = CHECKSUM_UNNECESSARY;
 946				}
 947			} else if ((lp->features & XAE_FEATURE_PARTIAL_RX_CSUM) != 0 &&
 948				   skb->protocol == htons(ETH_P_IP) &&
 949				   skb->len > 64) {
 950				skb->csum = be32_to_cpu(cur_p->app3 & 0xFFFF);
 951				skb->ip_summed = CHECKSUM_COMPLETE;
 952			}
 953
 954			napi_gro_receive(napi, skb);
 955
 956			size += length;
 957			packets++;
 958		}
 959
 960		new_skb = napi_alloc_skb(napi, lp->max_frm_size);
 961		if (!new_skb)
 962			break;
 963
 964		phys = dma_map_single(lp->dev, new_skb->data,
 965				      lp->max_frm_size,
 966				      DMA_FROM_DEVICE);
 967		if (unlikely(dma_mapping_error(lp->dev, phys))) {
 968			if (net_ratelimit())
 969				netdev_err(lp->ndev, "RX DMA mapping error\n");
 970			dev_kfree_skb(new_skb);
 971			break;
 972		}
 973		desc_set_phys_addr(lp, phys, cur_p);
 974
 975		cur_p->cntrl = lp->max_frm_size;
 976		cur_p->status = 0;
 977		cur_p->skb = new_skb;
 978
 979		/* Only update tail_p to mark this slot as usable after it has
 980		 * been successfully refilled.
 981		 */
 982		tail_p = lp->rx_bd_p + sizeof(*lp->rx_bd_v) * lp->rx_bd_ci;
 983
 984		if (++lp->rx_bd_ci >= lp->rx_bd_num)
 985			lp->rx_bd_ci = 0;
 986		cur_p = &lp->rx_bd_v[lp->rx_bd_ci];
 987	}
 988
 989	u64_stats_update_begin(&lp->rx_stat_sync);
 990	u64_stats_add(&lp->rx_packets, packets);
 991	u64_stats_add(&lp->rx_bytes, size);
 992	u64_stats_update_end(&lp->rx_stat_sync);
 993
 994	if (tail_p)
 995		axienet_dma_out_addr(lp, XAXIDMA_RX_TDESC_OFFSET, tail_p);
 996
 997	if (packets < budget && napi_complete_done(napi, packets)) {
 998		/* Re-enable RX completion interrupts. This should
 999		 * cause an immediate interrupt if any RX packets are
1000		 * already pending.
1001		 */
1002		axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, lp->rx_dma_cr);
1003	}
1004	return packets;
1005}
1006
1007/**
1008 * axienet_tx_irq - Tx Done Isr.
1009 * @irq:	irq number
1010 * @_ndev:	net_device pointer
1011 *
1012 * Return: IRQ_HANDLED if device generated a TX interrupt, IRQ_NONE otherwise.
1013 *
1014 * This is the Axi DMA Tx done Isr. It invokes NAPI polling to complete the
1015 * TX BD processing.
1016 */
1017static irqreturn_t axienet_tx_irq(int irq, void *_ndev)
1018{
1019	unsigned int status;
1020	struct net_device *ndev = _ndev;
1021	struct axienet_local *lp = netdev_priv(ndev);
1022
1023	status = axienet_dma_in32(lp, XAXIDMA_TX_SR_OFFSET);
1024
1025	if (!(status & XAXIDMA_IRQ_ALL_MASK))
1026		return IRQ_NONE;
1027
1028	axienet_dma_out32(lp, XAXIDMA_TX_SR_OFFSET, status);
1029
1030	if (unlikely(status & XAXIDMA_IRQ_ERROR_MASK)) {
1031		netdev_err(ndev, "DMA Tx error 0x%x\n", status);
1032		netdev_err(ndev, "Current BD is at: 0x%x%08x\n",
1033			   (lp->tx_bd_v[lp->tx_bd_ci]).phys_msb,
1034			   (lp->tx_bd_v[lp->tx_bd_ci]).phys);
1035		schedule_work(&lp->dma_err_task);
1036	} else {
1037		/* Disable further TX completion interrupts and schedule
1038		 * NAPI to handle the completions.
1039		 */
1040		u32 cr = lp->tx_dma_cr;
1041
1042		cr &= ~(XAXIDMA_IRQ_IOC_MASK | XAXIDMA_IRQ_DELAY_MASK);
1043		axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, cr);
1044
1045		napi_schedule(&lp->napi_tx);
 
1046	}
1047
1048	return IRQ_HANDLED;
1049}
1050
1051/**
1052 * axienet_rx_irq - Rx Isr.
1053 * @irq:	irq number
1054 * @_ndev:	net_device pointer
1055 *
1056 * Return: IRQ_HANDLED if device generated a RX interrupt, IRQ_NONE otherwise.
1057 *
1058 * This is the Axi DMA Rx Isr. It invokes NAPI polling to complete the RX BD
1059 * processing.
1060 */
1061static irqreturn_t axienet_rx_irq(int irq, void *_ndev)
1062{
1063	unsigned int status;
1064	struct net_device *ndev = _ndev;
1065	struct axienet_local *lp = netdev_priv(ndev);
1066
1067	status = axienet_dma_in32(lp, XAXIDMA_RX_SR_OFFSET);
1068
1069	if (!(status & XAXIDMA_IRQ_ALL_MASK))
1070		return IRQ_NONE;
1071
1072	axienet_dma_out32(lp, XAXIDMA_RX_SR_OFFSET, status);
1073
1074	if (unlikely(status & XAXIDMA_IRQ_ERROR_MASK)) {
1075		netdev_err(ndev, "DMA Rx error 0x%x\n", status);
1076		netdev_err(ndev, "Current BD is at: 0x%x%08x\n",
1077			   (lp->rx_bd_v[lp->rx_bd_ci]).phys_msb,
1078			   (lp->rx_bd_v[lp->rx_bd_ci]).phys);
1079		schedule_work(&lp->dma_err_task);
1080	} else {
1081		/* Disable further RX completion interrupts and schedule
1082		 * NAPI receive.
1083		 */
1084		u32 cr = lp->rx_dma_cr;
1085
1086		cr &= ~(XAXIDMA_IRQ_IOC_MASK | XAXIDMA_IRQ_DELAY_MASK);
1087		axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, cr);
1088
1089		napi_schedule(&lp->napi_rx);
 
1090	}
1091
1092	return IRQ_HANDLED;
1093}
1094
1095/**
1096 * axienet_eth_irq - Ethernet core Isr.
1097 * @irq:	irq number
1098 * @_ndev:	net_device pointer
1099 *
1100 * Return: IRQ_HANDLED if device generated a core interrupt, IRQ_NONE otherwise.
1101 *
1102 * Handle miscellaneous conditions indicated by Ethernet core IRQ.
1103 */
1104static irqreturn_t axienet_eth_irq(int irq, void *_ndev)
1105{
1106	struct net_device *ndev = _ndev;
1107	struct axienet_local *lp = netdev_priv(ndev);
1108	unsigned int pending;
1109
1110	pending = axienet_ior(lp, XAE_IP_OFFSET);
1111	if (!pending)
1112		return IRQ_NONE;
1113
1114	if (pending & XAE_INT_RXFIFOOVR_MASK)
1115		ndev->stats.rx_missed_errors++;
1116
1117	if (pending & XAE_INT_RXRJECT_MASK)
1118		ndev->stats.rx_frame_errors++;
1119
1120	axienet_iow(lp, XAE_IS_OFFSET, pending);
1121	return IRQ_HANDLED;
1122}
1123
1124static void axienet_dma_err_handler(struct work_struct *work);
1125
1126/**
1127 * axienet_open - Driver open routine.
1128 * @ndev:	Pointer to net_device structure
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1129 *
1130 * Return: 0, on success.
1131 *	    non-zero error value on failure
1132 *
1133 * This is the driver open routine. It calls phylink_start to start the
1134 * PHY device.
1135 * It also allocates interrupt service routines, enables the interrupt lines
1136 * and ISR handling. Axi Ethernet core is reset through Axi DMA core. Buffer
1137 * descriptors are initialized.
1138 */
1139static int axienet_open(struct net_device *ndev)
1140{
1141	int ret;
1142	struct axienet_local *lp = netdev_priv(ndev);
 
 
1143
1144	dev_dbg(&ndev->dev, "axienet_open()\n");
 
 
 
 
1145
1146	/* When we do an Axi Ethernet reset, it resets the complete core
1147	 * including the MDIO. MDIO must be disabled before resetting.
1148	 * Hold MDIO bus lock to avoid MDIO accesses during the reset.
1149	 */
1150	axienet_lock_mii(lp);
1151	ret = axienet_device_reset(ndev);
1152	axienet_unlock_mii(lp);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1153
1154	ret = phylink_of_phy_connect(lp->phylink, lp->dev->of_node, 0);
1155	if (ret) {
1156		dev_err(lp->dev, "phylink_of_phy_connect() failed: %d\n", ret);
1157		return ret;
 
 
 
 
 
 
 
 
 
1158	}
 
 
 
 
 
 
1159
1160	phylink_start(lp->phylink);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1161
1162	/* Enable worker thread for Axi DMA error handling */
 
1163	INIT_WORK(&lp->dma_err_task, axienet_dma_err_handler);
1164
1165	napi_enable(&lp->napi_rx);
1166	napi_enable(&lp->napi_tx);
1167
1168	/* Enable interrupts for Axi DMA Tx */
1169	ret = request_irq(lp->tx_irq, axienet_tx_irq, IRQF_SHARED,
1170			  ndev->name, ndev);
1171	if (ret)
1172		goto err_tx_irq;
1173	/* Enable interrupts for Axi DMA Rx */
1174	ret = request_irq(lp->rx_irq, axienet_rx_irq, IRQF_SHARED,
1175			  ndev->name, ndev);
1176	if (ret)
1177		goto err_rx_irq;
1178	/* Enable interrupts for Axi Ethernet core (if defined) */
1179	if (lp->eth_irq > 0) {
1180		ret = request_irq(lp->eth_irq, axienet_eth_irq, IRQF_SHARED,
1181				  ndev->name, ndev);
1182		if (ret)
1183			goto err_eth_irq;
1184	}
1185
1186	return 0;
1187
1188err_eth_irq:
1189	free_irq(lp->rx_irq, ndev);
1190err_rx_irq:
1191	free_irq(lp->tx_irq, ndev);
1192err_tx_irq:
1193	napi_disable(&lp->napi_tx);
1194	napi_disable(&lp->napi_rx);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1195	phylink_stop(lp->phylink);
1196	phylink_disconnect_phy(lp->phylink);
1197	cancel_work_sync(&lp->dma_err_task);
1198	dev_err(lp->dev, "request_irq() failed\n");
1199	return ret;
1200}
1201
1202/**
1203 * axienet_stop - Driver stop routine.
1204 * @ndev:	Pointer to net_device structure
1205 *
1206 * Return: 0, on success.
1207 *
1208 * This is the driver stop routine. It calls phylink_disconnect to stop the PHY
1209 * device. It also removes the interrupt handlers and disables the interrupts.
1210 * The Axi DMA Tx/Rx BDs are released.
1211 */
1212static int axienet_stop(struct net_device *ndev)
1213{
1214	struct axienet_local *lp = netdev_priv(ndev);
 
 
 
 
 
1215
1216	dev_dbg(&ndev->dev, "axienet_close()\n");
 
 
1217
1218	napi_disable(&lp->napi_tx);
1219	napi_disable(&lp->napi_rx);
1220
1221	phylink_stop(lp->phylink);
1222	phylink_disconnect_phy(lp->phylink);
1223
1224	axienet_setoptions(ndev, lp->options &
1225			   ~(XAE_OPTION_TXEN | XAE_OPTION_RXEN));
1226
1227	axienet_dma_stop(lp);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1228
1229	axienet_iow(lp, XAE_IE_OFFSET, 0);
1230
1231	cancel_work_sync(&lp->dma_err_task);
1232
1233	if (lp->eth_irq > 0)
1234		free_irq(lp->eth_irq, ndev);
1235	free_irq(lp->tx_irq, ndev);
1236	free_irq(lp->rx_irq, ndev);
1237
1238	axienet_dma_bd_release(ndev);
1239	return 0;
1240}
1241
1242/**
1243 * axienet_change_mtu - Driver change mtu routine.
1244 * @ndev:	Pointer to net_device structure
1245 * @new_mtu:	New mtu value to be applied
1246 *
1247 * Return: Always returns 0 (success).
1248 *
1249 * This is the change mtu driver routine. It checks if the Axi Ethernet
1250 * hardware supports jumbo frames before changing the mtu. This can be
1251 * called only when the device is not up.
1252 */
1253static int axienet_change_mtu(struct net_device *ndev, int new_mtu)
1254{
1255	struct axienet_local *lp = netdev_priv(ndev);
1256
1257	if (netif_running(ndev))
1258		return -EBUSY;
1259
1260	if ((new_mtu + VLAN_ETH_HLEN +
1261		XAE_TRL_SIZE) > lp->rxmem)
1262		return -EINVAL;
1263
1264	ndev->mtu = new_mtu;
1265
1266	return 0;
1267}
1268
1269#ifdef CONFIG_NET_POLL_CONTROLLER
1270/**
1271 * axienet_poll_controller - Axi Ethernet poll mechanism.
1272 * @ndev:	Pointer to net_device structure
1273 *
1274 * This implements Rx/Tx ISR poll mechanisms. The interrupts are disabled prior
1275 * to polling the ISRs and are enabled back after the polling is done.
1276 */
1277static void axienet_poll_controller(struct net_device *ndev)
1278{
1279	struct axienet_local *lp = netdev_priv(ndev);
 
1280	disable_irq(lp->tx_irq);
1281	disable_irq(lp->rx_irq);
1282	axienet_rx_irq(lp->tx_irq, ndev);
1283	axienet_tx_irq(lp->rx_irq, ndev);
1284	enable_irq(lp->tx_irq);
1285	enable_irq(lp->rx_irq);
1286}
1287#endif
1288
1289static int axienet_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
1290{
1291	struct axienet_local *lp = netdev_priv(dev);
1292
1293	if (!netif_running(dev))
1294		return -EINVAL;
1295
1296	return phylink_mii_ioctl(lp->phylink, rq, cmd);
1297}
1298
1299static void
1300axienet_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
1301{
1302	struct axienet_local *lp = netdev_priv(dev);
1303	unsigned int start;
1304
1305	netdev_stats_to_stats64(stats, &dev->stats);
1306
1307	do {
1308		start = u64_stats_fetch_begin(&lp->rx_stat_sync);
1309		stats->rx_packets = u64_stats_read(&lp->rx_packets);
1310		stats->rx_bytes = u64_stats_read(&lp->rx_bytes);
1311	} while (u64_stats_fetch_retry(&lp->rx_stat_sync, start));
1312
1313	do {
1314		start = u64_stats_fetch_begin(&lp->tx_stat_sync);
1315		stats->tx_packets = u64_stats_read(&lp->tx_packets);
1316		stats->tx_bytes = u64_stats_read(&lp->tx_bytes);
1317	} while (u64_stats_fetch_retry(&lp->tx_stat_sync, start));
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1318}
1319
1320static const struct net_device_ops axienet_netdev_ops = {
1321	.ndo_open = axienet_open,
1322	.ndo_stop = axienet_stop,
1323	.ndo_start_xmit = axienet_start_xmit,
1324	.ndo_get_stats64 = axienet_get_stats64,
1325	.ndo_change_mtu	= axienet_change_mtu,
1326	.ndo_set_mac_address = netdev_set_mac_address,
1327	.ndo_validate_addr = eth_validate_addr,
1328	.ndo_eth_ioctl = axienet_ioctl,
1329	.ndo_set_rx_mode = axienet_set_multicast_list,
1330#ifdef CONFIG_NET_POLL_CONTROLLER
1331	.ndo_poll_controller = axienet_poll_controller,
1332#endif
1333};
1334
 
 
 
 
 
 
 
 
 
 
 
 
1335/**
1336 * axienet_ethtools_get_drvinfo - Get various Axi Ethernet driver information.
1337 * @ndev:	Pointer to net_device structure
1338 * @ed:		Pointer to ethtool_drvinfo structure
1339 *
1340 * This implements ethtool command for getting the driver information.
1341 * Issue "ethtool -i ethX" under linux prompt to execute this function.
1342 */
1343static void axienet_ethtools_get_drvinfo(struct net_device *ndev,
1344					 struct ethtool_drvinfo *ed)
1345{
1346	strscpy(ed->driver, DRIVER_NAME, sizeof(ed->driver));
1347	strscpy(ed->version, DRIVER_VERSION, sizeof(ed->version));
1348}
1349
1350/**
1351 * axienet_ethtools_get_regs_len - Get the total regs length present in the
1352 *				   AxiEthernet core.
1353 * @ndev:	Pointer to net_device structure
1354 *
1355 * This implements ethtool command for getting the total register length
1356 * information.
1357 *
1358 * Return: the total regs length
1359 */
1360static int axienet_ethtools_get_regs_len(struct net_device *ndev)
1361{
1362	return sizeof(u32) * AXIENET_REGS_N;
1363}
1364
1365/**
1366 * axienet_ethtools_get_regs - Dump the contents of all registers present
1367 *			       in AxiEthernet core.
1368 * @ndev:	Pointer to net_device structure
1369 * @regs:	Pointer to ethtool_regs structure
1370 * @ret:	Void pointer used to return the contents of the registers.
1371 *
1372 * This implements ethtool command for getting the Axi Ethernet register dump.
1373 * Issue "ethtool -d ethX" to execute this function.
1374 */
1375static void axienet_ethtools_get_regs(struct net_device *ndev,
1376				      struct ethtool_regs *regs, void *ret)
1377{
1378	u32 *data = (u32 *)ret;
1379	size_t len = sizeof(u32) * AXIENET_REGS_N;
1380	struct axienet_local *lp = netdev_priv(ndev);
1381
1382	regs->version = 0;
1383	regs->len = len;
1384
1385	memset(data, 0, len);
1386	data[0] = axienet_ior(lp, XAE_RAF_OFFSET);
1387	data[1] = axienet_ior(lp, XAE_TPF_OFFSET);
1388	data[2] = axienet_ior(lp, XAE_IFGP_OFFSET);
1389	data[3] = axienet_ior(lp, XAE_IS_OFFSET);
1390	data[4] = axienet_ior(lp, XAE_IP_OFFSET);
1391	data[5] = axienet_ior(lp, XAE_IE_OFFSET);
1392	data[6] = axienet_ior(lp, XAE_TTAG_OFFSET);
1393	data[7] = axienet_ior(lp, XAE_RTAG_OFFSET);
1394	data[8] = axienet_ior(lp, XAE_UAWL_OFFSET);
1395	data[9] = axienet_ior(lp, XAE_UAWU_OFFSET);
1396	data[10] = axienet_ior(lp, XAE_TPID0_OFFSET);
1397	data[11] = axienet_ior(lp, XAE_TPID1_OFFSET);
1398	data[12] = axienet_ior(lp, XAE_PPST_OFFSET);
1399	data[13] = axienet_ior(lp, XAE_RCW0_OFFSET);
1400	data[14] = axienet_ior(lp, XAE_RCW1_OFFSET);
1401	data[15] = axienet_ior(lp, XAE_TC_OFFSET);
1402	data[16] = axienet_ior(lp, XAE_FCC_OFFSET);
1403	data[17] = axienet_ior(lp, XAE_EMMC_OFFSET);
1404	data[18] = axienet_ior(lp, XAE_PHYC_OFFSET);
1405	data[19] = axienet_ior(lp, XAE_MDIO_MC_OFFSET);
1406	data[20] = axienet_ior(lp, XAE_MDIO_MCR_OFFSET);
1407	data[21] = axienet_ior(lp, XAE_MDIO_MWD_OFFSET);
1408	data[22] = axienet_ior(lp, XAE_MDIO_MRD_OFFSET);
1409	data[27] = axienet_ior(lp, XAE_UAW0_OFFSET);
1410	data[28] = axienet_ior(lp, XAE_UAW1_OFFSET);
1411	data[29] = axienet_ior(lp, XAE_FMI_OFFSET);
1412	data[30] = axienet_ior(lp, XAE_AF0_OFFSET);
1413	data[31] = axienet_ior(lp, XAE_AF1_OFFSET);
1414	data[32] = axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET);
1415	data[33] = axienet_dma_in32(lp, XAXIDMA_TX_SR_OFFSET);
1416	data[34] = axienet_dma_in32(lp, XAXIDMA_TX_CDESC_OFFSET);
1417	data[35] = axienet_dma_in32(lp, XAXIDMA_TX_TDESC_OFFSET);
1418	data[36] = axienet_dma_in32(lp, XAXIDMA_RX_CR_OFFSET);
1419	data[37] = axienet_dma_in32(lp, XAXIDMA_RX_SR_OFFSET);
1420	data[38] = axienet_dma_in32(lp, XAXIDMA_RX_CDESC_OFFSET);
1421	data[39] = axienet_dma_in32(lp, XAXIDMA_RX_TDESC_OFFSET);
 
 
1422}
1423
1424static void
1425axienet_ethtools_get_ringparam(struct net_device *ndev,
1426			       struct ethtool_ringparam *ering,
1427			       struct kernel_ethtool_ringparam *kernel_ering,
1428			       struct netlink_ext_ack *extack)
1429{
1430	struct axienet_local *lp = netdev_priv(ndev);
1431
1432	ering->rx_max_pending = RX_BD_NUM_MAX;
1433	ering->rx_mini_max_pending = 0;
1434	ering->rx_jumbo_max_pending = 0;
1435	ering->tx_max_pending = TX_BD_NUM_MAX;
1436	ering->rx_pending = lp->rx_bd_num;
1437	ering->rx_mini_pending = 0;
1438	ering->rx_jumbo_pending = 0;
1439	ering->tx_pending = lp->tx_bd_num;
1440}
1441
1442static int
1443axienet_ethtools_set_ringparam(struct net_device *ndev,
1444			       struct ethtool_ringparam *ering,
1445			       struct kernel_ethtool_ringparam *kernel_ering,
1446			       struct netlink_ext_ack *extack)
1447{
1448	struct axienet_local *lp = netdev_priv(ndev);
1449
1450	if (ering->rx_pending > RX_BD_NUM_MAX ||
1451	    ering->rx_mini_pending ||
1452	    ering->rx_jumbo_pending ||
1453	    ering->tx_pending < TX_BD_NUM_MIN ||
1454	    ering->tx_pending > TX_BD_NUM_MAX)
1455		return -EINVAL;
1456
1457	if (netif_running(ndev))
1458		return -EBUSY;
1459
1460	lp->rx_bd_num = ering->rx_pending;
1461	lp->tx_bd_num = ering->tx_pending;
1462	return 0;
1463}
1464
1465/**
1466 * axienet_ethtools_get_pauseparam - Get the pause parameter setting for
1467 *				     Tx and Rx paths.
1468 * @ndev:	Pointer to net_device structure
1469 * @epauseparm:	Pointer to ethtool_pauseparam structure.
1470 *
1471 * This implements ethtool command for getting axi ethernet pause frame
1472 * setting. Issue "ethtool -a ethX" to execute this function.
1473 */
1474static void
1475axienet_ethtools_get_pauseparam(struct net_device *ndev,
1476				struct ethtool_pauseparam *epauseparm)
1477{
1478	struct axienet_local *lp = netdev_priv(ndev);
1479
1480	phylink_ethtool_get_pauseparam(lp->phylink, epauseparm);
1481}
1482
1483/**
1484 * axienet_ethtools_set_pauseparam - Set device pause parameter(flow control)
1485 *				     settings.
1486 * @ndev:	Pointer to net_device structure
1487 * @epauseparm:Pointer to ethtool_pauseparam structure
1488 *
1489 * This implements ethtool command for enabling flow control on Rx and Tx
1490 * paths. Issue "ethtool -A ethX tx on|off" under linux prompt to execute this
1491 * function.
1492 *
1493 * Return: 0 on success, -EFAULT if device is running
1494 */
1495static int
1496axienet_ethtools_set_pauseparam(struct net_device *ndev,
1497				struct ethtool_pauseparam *epauseparm)
1498{
1499	struct axienet_local *lp = netdev_priv(ndev);
1500
1501	return phylink_ethtool_set_pauseparam(lp->phylink, epauseparm);
1502}
1503
1504/**
1505 * axienet_ethtools_get_coalesce - Get DMA interrupt coalescing count.
1506 * @ndev:	Pointer to net_device structure
1507 * @ecoalesce:	Pointer to ethtool_coalesce structure
1508 * @kernel_coal: ethtool CQE mode setting structure
1509 * @extack:	extack for reporting error messages
1510 *
1511 * This implements ethtool command for getting the DMA interrupt coalescing
1512 * count on Tx and Rx paths. Issue "ethtool -c ethX" under linux prompt to
1513 * execute this function.
1514 *
1515 * Return: 0 always
1516 */
1517static int
1518axienet_ethtools_get_coalesce(struct net_device *ndev,
1519			      struct ethtool_coalesce *ecoalesce,
1520			      struct kernel_ethtool_coalesce *kernel_coal,
1521			      struct netlink_ext_ack *extack)
1522{
1523	struct axienet_local *lp = netdev_priv(ndev);
1524
1525	ecoalesce->rx_max_coalesced_frames = lp->coalesce_count_rx;
1526	ecoalesce->rx_coalesce_usecs = lp->coalesce_usec_rx;
1527	ecoalesce->tx_max_coalesced_frames = lp->coalesce_count_tx;
1528	ecoalesce->tx_coalesce_usecs = lp->coalesce_usec_tx;
1529	return 0;
1530}
1531
1532/**
1533 * axienet_ethtools_set_coalesce - Set DMA interrupt coalescing count.
1534 * @ndev:	Pointer to net_device structure
1535 * @ecoalesce:	Pointer to ethtool_coalesce structure
1536 * @kernel_coal: ethtool CQE mode setting structure
1537 * @extack:	extack for reporting error messages
1538 *
1539 * This implements ethtool command for setting the DMA interrupt coalescing
1540 * count on Tx and Rx paths. Issue "ethtool -C ethX rx-frames 5" under linux
1541 * prompt to execute this function.
1542 *
1543 * Return: 0, on success, Non-zero error value on failure.
1544 */
1545static int
1546axienet_ethtools_set_coalesce(struct net_device *ndev,
1547			      struct ethtool_coalesce *ecoalesce,
1548			      struct kernel_ethtool_coalesce *kernel_coal,
1549			      struct netlink_ext_ack *extack)
1550{
1551	struct axienet_local *lp = netdev_priv(ndev);
1552
1553	if (netif_running(ndev)) {
1554		netdev_err(ndev,
1555			   "Please stop netif before applying configuration\n");
1556		return -EFAULT;
 
 
 
 
 
 
1557	}
1558
1559	if (ecoalesce->rx_max_coalesced_frames)
1560		lp->coalesce_count_rx = ecoalesce->rx_max_coalesced_frames;
1561	if (ecoalesce->rx_coalesce_usecs)
1562		lp->coalesce_usec_rx = ecoalesce->rx_coalesce_usecs;
1563	if (ecoalesce->tx_max_coalesced_frames)
1564		lp->coalesce_count_tx = ecoalesce->tx_max_coalesced_frames;
1565	if (ecoalesce->tx_coalesce_usecs)
1566		lp->coalesce_usec_tx = ecoalesce->tx_coalesce_usecs;
1567
1568	return 0;
1569}
1570
1571static int
1572axienet_ethtools_get_link_ksettings(struct net_device *ndev,
1573				    struct ethtool_link_ksettings *cmd)
1574{
1575	struct axienet_local *lp = netdev_priv(ndev);
1576
1577	return phylink_ethtool_ksettings_get(lp->phylink, cmd);
1578}
1579
1580static int
1581axienet_ethtools_set_link_ksettings(struct net_device *ndev,
1582				    const struct ethtool_link_ksettings *cmd)
1583{
1584	struct axienet_local *lp = netdev_priv(ndev);
1585
1586	return phylink_ethtool_ksettings_set(lp->phylink, cmd);
1587}
1588
1589static int axienet_ethtools_nway_reset(struct net_device *dev)
1590{
1591	struct axienet_local *lp = netdev_priv(dev);
1592
1593	return phylink_ethtool_nway_reset(lp->phylink);
1594}
1595
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1596static const struct ethtool_ops axienet_ethtool_ops = {
1597	.supported_coalesce_params = ETHTOOL_COALESCE_MAX_FRAMES |
1598				     ETHTOOL_COALESCE_USECS,
1599	.get_drvinfo    = axienet_ethtools_get_drvinfo,
1600	.get_regs_len   = axienet_ethtools_get_regs_len,
1601	.get_regs       = axienet_ethtools_get_regs,
1602	.get_link       = ethtool_op_get_link,
1603	.get_ringparam	= axienet_ethtools_get_ringparam,
1604	.set_ringparam	= axienet_ethtools_set_ringparam,
1605	.get_pauseparam = axienet_ethtools_get_pauseparam,
1606	.set_pauseparam = axienet_ethtools_set_pauseparam,
1607	.get_coalesce   = axienet_ethtools_get_coalesce,
1608	.set_coalesce   = axienet_ethtools_set_coalesce,
1609	.get_link_ksettings = axienet_ethtools_get_link_ksettings,
1610	.set_link_ksettings = axienet_ethtools_set_link_ksettings,
1611	.nway_reset	= axienet_ethtools_nway_reset,
 
 
 
 
 
 
 
1612};
1613
1614static struct axienet_local *pcs_to_axienet_local(struct phylink_pcs *pcs)
1615{
1616	return container_of(pcs, struct axienet_local, pcs);
1617}
1618
1619static void axienet_pcs_get_state(struct phylink_pcs *pcs,
1620				  struct phylink_link_state *state)
1621{
1622	struct mdio_device *pcs_phy = pcs_to_axienet_local(pcs)->pcs_phy;
1623
1624	phylink_mii_c22_pcs_get_state(pcs_phy, state);
1625}
1626
1627static void axienet_pcs_an_restart(struct phylink_pcs *pcs)
1628{
1629	struct mdio_device *pcs_phy = pcs_to_axienet_local(pcs)->pcs_phy;
1630
1631	phylink_mii_c22_pcs_an_restart(pcs_phy);
1632}
1633
1634static int axienet_pcs_config(struct phylink_pcs *pcs, unsigned int mode,
1635			      phy_interface_t interface,
1636			      const unsigned long *advertising,
1637			      bool permit_pause_to_mac)
1638{
1639	struct mdio_device *pcs_phy = pcs_to_axienet_local(pcs)->pcs_phy;
1640	struct net_device *ndev = pcs_to_axienet_local(pcs)->ndev;
1641	struct axienet_local *lp = netdev_priv(ndev);
1642	int ret;
1643
1644	if (lp->switch_x_sgmii) {
1645		ret = mdiodev_write(pcs_phy, XLNX_MII_STD_SELECT_REG,
1646				    interface == PHY_INTERFACE_MODE_SGMII ?
1647					XLNX_MII_STD_SELECT_SGMII : 0);
1648		if (ret < 0) {
1649			netdev_warn(ndev,
1650				    "Failed to switch PHY interface: %d\n",
1651				    ret);
1652			return ret;
1653		}
1654	}
1655
1656	ret = phylink_mii_c22_pcs_config(pcs_phy, mode, interface, advertising);
 
1657	if (ret < 0)
1658		netdev_warn(ndev, "Failed to configure PCS: %d\n", ret);
1659
1660	return ret;
1661}
1662
1663static const struct phylink_pcs_ops axienet_pcs_ops = {
1664	.pcs_get_state = axienet_pcs_get_state,
1665	.pcs_config = axienet_pcs_config,
1666	.pcs_an_restart = axienet_pcs_an_restart,
1667};
1668
1669static struct phylink_pcs *axienet_mac_select_pcs(struct phylink_config *config,
1670						  phy_interface_t interface)
1671{
1672	struct net_device *ndev = to_net_dev(config->dev);
1673	struct axienet_local *lp = netdev_priv(ndev);
1674
1675	if (interface == PHY_INTERFACE_MODE_1000BASEX ||
1676	    interface ==  PHY_INTERFACE_MODE_SGMII)
1677		return &lp->pcs;
1678
1679	return NULL;
1680}
1681
1682static void axienet_mac_config(struct phylink_config *config, unsigned int mode,
1683			       const struct phylink_link_state *state)
1684{
1685	/* nothing meaningful to do */
1686}
1687
1688static void axienet_mac_link_down(struct phylink_config *config,
1689				  unsigned int mode,
1690				  phy_interface_t interface)
1691{
1692	/* nothing meaningful to do */
1693}
1694
1695static void axienet_mac_link_up(struct phylink_config *config,
1696				struct phy_device *phy,
1697				unsigned int mode, phy_interface_t interface,
1698				int speed, int duplex,
1699				bool tx_pause, bool rx_pause)
1700{
1701	struct net_device *ndev = to_net_dev(config->dev);
1702	struct axienet_local *lp = netdev_priv(ndev);
1703	u32 emmc_reg, fcc_reg;
1704
1705	emmc_reg = axienet_ior(lp, XAE_EMMC_OFFSET);
1706	emmc_reg &= ~XAE_EMMC_LINKSPEED_MASK;
1707
1708	switch (speed) {
1709	case SPEED_1000:
1710		emmc_reg |= XAE_EMMC_LINKSPD_1000;
1711		break;
1712	case SPEED_100:
1713		emmc_reg |= XAE_EMMC_LINKSPD_100;
1714		break;
1715	case SPEED_10:
1716		emmc_reg |= XAE_EMMC_LINKSPD_10;
1717		break;
1718	default:
1719		dev_err(&ndev->dev,
1720			"Speed other than 10, 100 or 1Gbps is not supported\n");
1721		break;
1722	}
1723
1724	axienet_iow(lp, XAE_EMMC_OFFSET, emmc_reg);
1725
1726	fcc_reg = axienet_ior(lp, XAE_FCC_OFFSET);
1727	if (tx_pause)
1728		fcc_reg |= XAE_FCC_FCTX_MASK;
1729	else
1730		fcc_reg &= ~XAE_FCC_FCTX_MASK;
1731	if (rx_pause)
1732		fcc_reg |= XAE_FCC_FCRX_MASK;
1733	else
1734		fcc_reg &= ~XAE_FCC_FCRX_MASK;
1735	axienet_iow(lp, XAE_FCC_OFFSET, fcc_reg);
1736}
1737
1738static const struct phylink_mac_ops axienet_phylink_ops = {
1739	.mac_select_pcs = axienet_mac_select_pcs,
1740	.mac_config = axienet_mac_config,
1741	.mac_link_down = axienet_mac_link_down,
1742	.mac_link_up = axienet_mac_link_up,
1743};
1744
1745/**
1746 * axienet_dma_err_handler - Work queue task for Axi DMA Error
1747 * @work:	pointer to work_struct
1748 *
1749 * Resets the Axi DMA and Axi Ethernet devices, and reconfigures the
1750 * Tx/Rx BDs.
1751 */
1752static void axienet_dma_err_handler(struct work_struct *work)
1753{
1754	u32 i;
1755	u32 axienet_status;
1756	struct axidma_bd *cur_p;
1757	struct axienet_local *lp = container_of(work, struct axienet_local,
1758						dma_err_task);
1759	struct net_device *ndev = lp->ndev;
1760
 
 
 
 
1761	napi_disable(&lp->napi_tx);
1762	napi_disable(&lp->napi_rx);
1763
1764	axienet_setoptions(ndev, lp->options &
1765			   ~(XAE_OPTION_TXEN | XAE_OPTION_RXEN));
1766
1767	axienet_dma_stop(lp);
1768
1769	for (i = 0; i < lp->tx_bd_num; i++) {
1770		cur_p = &lp->tx_bd_v[i];
1771		if (cur_p->cntrl) {
1772			dma_addr_t addr = desc_get_phys_addr(lp, cur_p);
1773
1774			dma_unmap_single(lp->dev, addr,
1775					 (cur_p->cntrl &
1776					  XAXIDMA_BD_CTRL_LENGTH_MASK),
1777					 DMA_TO_DEVICE);
1778		}
1779		if (cur_p->skb)
1780			dev_kfree_skb_irq(cur_p->skb);
1781		cur_p->phys = 0;
1782		cur_p->phys_msb = 0;
1783		cur_p->cntrl = 0;
1784		cur_p->status = 0;
1785		cur_p->app0 = 0;
1786		cur_p->app1 = 0;
1787		cur_p->app2 = 0;
1788		cur_p->app3 = 0;
1789		cur_p->app4 = 0;
1790		cur_p->skb = NULL;
1791	}
1792
1793	for (i = 0; i < lp->rx_bd_num; i++) {
1794		cur_p = &lp->rx_bd_v[i];
1795		cur_p->status = 0;
1796		cur_p->app0 = 0;
1797		cur_p->app1 = 0;
1798		cur_p->app2 = 0;
1799		cur_p->app3 = 0;
1800		cur_p->app4 = 0;
1801	}
1802
1803	lp->tx_bd_ci = 0;
1804	lp->tx_bd_tail = 0;
1805	lp->rx_bd_ci = 0;
1806
1807	axienet_dma_start(lp);
1808
1809	axienet_status = axienet_ior(lp, XAE_RCW1_OFFSET);
1810	axienet_status &= ~XAE_RCW1_RX_MASK;
1811	axienet_iow(lp, XAE_RCW1_OFFSET, axienet_status);
1812
1813	axienet_status = axienet_ior(lp, XAE_IP_OFFSET);
1814	if (axienet_status & XAE_INT_RXRJECT_MASK)
1815		axienet_iow(lp, XAE_IS_OFFSET, XAE_INT_RXRJECT_MASK);
1816	axienet_iow(lp, XAE_IE_OFFSET, lp->eth_irq > 0 ?
1817		    XAE_INT_RECV_ERROR_MASK : 0);
1818	axienet_iow(lp, XAE_FCC_OFFSET, XAE_FCC_FCRX_MASK);
1819
1820	/* Sync default options with HW but leave receiver and
1821	 * transmitter disabled.
1822	 */
1823	axienet_setoptions(ndev, lp->options &
1824			   ~(XAE_OPTION_TXEN | XAE_OPTION_RXEN));
1825	axienet_set_mac_address(ndev, NULL);
1826	axienet_set_multicast_list(ndev);
1827	axienet_setoptions(ndev, lp->options);
1828	napi_enable(&lp->napi_rx);
1829	napi_enable(&lp->napi_tx);
 
1830}
1831
1832/**
1833 * axienet_probe - Axi Ethernet probe function.
1834 * @pdev:	Pointer to platform device structure.
1835 *
1836 * Return: 0, on success
1837 *	    Non-zero error value on failure.
1838 *
1839 * This is the probe routine for Axi Ethernet driver. This is called before
1840 * any other driver routines are invoked. It allocates and sets up the Ethernet
1841 * device. Parses through device tree and populates fields of
1842 * axienet_local. It registers the Ethernet device.
1843 */
1844static int axienet_probe(struct platform_device *pdev)
1845{
1846	int ret;
1847	struct device_node *np;
1848	struct axienet_local *lp;
1849	struct net_device *ndev;
1850	struct resource *ethres;
1851	u8 mac_addr[ETH_ALEN];
1852	int addr_width = 32;
1853	u32 value;
1854
1855	ndev = alloc_etherdev(sizeof(*lp));
1856	if (!ndev)
1857		return -ENOMEM;
1858
1859	platform_set_drvdata(pdev, ndev);
1860
1861	SET_NETDEV_DEV(ndev, &pdev->dev);
1862	ndev->flags &= ~IFF_MULTICAST;  /* clear multicast */
1863	ndev->features = NETIF_F_SG;
1864	ndev->netdev_ops = &axienet_netdev_ops;
1865	ndev->ethtool_ops = &axienet_ethtool_ops;
1866
1867	/* MTU range: 64 - 9000 */
1868	ndev->min_mtu = 64;
1869	ndev->max_mtu = XAE_JUMBO_MTU;
1870
1871	lp = netdev_priv(ndev);
1872	lp->ndev = ndev;
1873	lp->dev = &pdev->dev;
1874	lp->options = XAE_OPTION_DEFAULTS;
1875	lp->rx_bd_num = RX_BD_NUM_DEFAULT;
1876	lp->tx_bd_num = TX_BD_NUM_DEFAULT;
1877
1878	u64_stats_init(&lp->rx_stat_sync);
1879	u64_stats_init(&lp->tx_stat_sync);
1880
1881	netif_napi_add(ndev, &lp->napi_rx, axienet_rx_poll);
1882	netif_napi_add(ndev, &lp->napi_tx, axienet_tx_poll);
 
1883
1884	lp->axi_clk = devm_clk_get_optional(&pdev->dev, "s_axi_lite_clk");
1885	if (!lp->axi_clk) {
1886		/* For backward compatibility, if named AXI clock is not present,
1887		 * treat the first clock specified as the AXI clock.
1888		 */
1889		lp->axi_clk = devm_clk_get_optional(&pdev->dev, NULL);
1890	}
1891	if (IS_ERR(lp->axi_clk)) {
1892		ret = PTR_ERR(lp->axi_clk);
1893		goto free_netdev;
1894	}
1895	ret = clk_prepare_enable(lp->axi_clk);
1896	if (ret) {
1897		dev_err(&pdev->dev, "Unable to enable AXI clock: %d\n", ret);
1898		goto free_netdev;
1899	}
1900
1901	lp->misc_clks[0].id = "axis_clk";
1902	lp->misc_clks[1].id = "ref_clk";
1903	lp->misc_clks[2].id = "mgt_clk";
1904
1905	ret = devm_clk_bulk_get_optional(&pdev->dev, XAE_NUM_MISC_CLOCKS, lp->misc_clks);
1906	if (ret)
1907		goto cleanup_clk;
1908
1909	ret = clk_bulk_prepare_enable(XAE_NUM_MISC_CLOCKS, lp->misc_clks);
1910	if (ret)
1911		goto cleanup_clk;
1912
1913	/* Map device registers */
1914	lp->regs = devm_platform_get_and_ioremap_resource(pdev, 0, &ethres);
1915	if (IS_ERR(lp->regs)) {
1916		ret = PTR_ERR(lp->regs);
1917		goto cleanup_clk;
1918	}
1919	lp->regs_start = ethres->start;
1920
1921	/* Setup checksum offload, but default to off if not specified */
1922	lp->features = 0;
1923
 
 
 
1924	ret = of_property_read_u32(pdev->dev.of_node, "xlnx,txcsum", &value);
1925	if (!ret) {
1926		switch (value) {
1927		case 1:
1928			lp->csum_offload_on_tx_path =
1929				XAE_FEATURE_PARTIAL_TX_CSUM;
1930			lp->features |= XAE_FEATURE_PARTIAL_TX_CSUM;
1931			/* Can checksum TCP/UDP over IPv4. */
1932			ndev->features |= NETIF_F_IP_CSUM;
1933			break;
1934		case 2:
1935			lp->csum_offload_on_tx_path =
1936				XAE_FEATURE_FULL_TX_CSUM;
1937			lp->features |= XAE_FEATURE_FULL_TX_CSUM;
1938			/* Can checksum TCP/UDP over IPv4. */
1939			ndev->features |= NETIF_F_IP_CSUM;
1940			break;
1941		default:
1942			lp->csum_offload_on_tx_path = XAE_NO_CSUM_OFFLOAD;
1943		}
1944	}
1945	ret = of_property_read_u32(pdev->dev.of_node, "xlnx,rxcsum", &value);
1946	if (!ret) {
1947		switch (value) {
1948		case 1:
1949			lp->csum_offload_on_rx_path =
1950				XAE_FEATURE_PARTIAL_RX_CSUM;
1951			lp->features |= XAE_FEATURE_PARTIAL_RX_CSUM;
 
1952			break;
1953		case 2:
1954			lp->csum_offload_on_rx_path =
1955				XAE_FEATURE_FULL_RX_CSUM;
1956			lp->features |= XAE_FEATURE_FULL_RX_CSUM;
 
1957			break;
1958		default:
1959			lp->csum_offload_on_rx_path = XAE_NO_CSUM_OFFLOAD;
1960		}
1961	}
1962	/* For supporting jumbo frames, the Axi Ethernet hardware must have
1963	 * a larger Rx/Tx Memory. Typically, the size must be large so that
1964	 * we can enable jumbo option and start supporting jumbo frames.
1965	 * Here we check for memory allocated for Rx/Tx in the hardware from
1966	 * the device-tree and accordingly set flags.
1967	 */
1968	of_property_read_u32(pdev->dev.of_node, "xlnx,rxmem", &lp->rxmem);
1969
1970	lp->switch_x_sgmii = of_property_read_bool(pdev->dev.of_node,
1971						   "xlnx,switch-x-sgmii");
1972
1973	/* Start with the proprietary, and broken phy_type */
1974	ret = of_property_read_u32(pdev->dev.of_node, "xlnx,phy-type", &value);
1975	if (!ret) {
1976		netdev_warn(ndev, "Please upgrade your device tree binary blob to use phy-mode");
1977		switch (value) {
1978		case XAE_PHY_TYPE_MII:
1979			lp->phy_mode = PHY_INTERFACE_MODE_MII;
1980			break;
1981		case XAE_PHY_TYPE_GMII:
1982			lp->phy_mode = PHY_INTERFACE_MODE_GMII;
1983			break;
1984		case XAE_PHY_TYPE_RGMII_2_0:
1985			lp->phy_mode = PHY_INTERFACE_MODE_RGMII_ID;
1986			break;
1987		case XAE_PHY_TYPE_SGMII:
1988			lp->phy_mode = PHY_INTERFACE_MODE_SGMII;
1989			break;
1990		case XAE_PHY_TYPE_1000BASE_X:
1991			lp->phy_mode = PHY_INTERFACE_MODE_1000BASEX;
1992			break;
1993		default:
1994			ret = -EINVAL;
1995			goto cleanup_clk;
1996		}
1997	} else {
1998		ret = of_get_phy_mode(pdev->dev.of_node, &lp->phy_mode);
1999		if (ret)
2000			goto cleanup_clk;
2001	}
2002	if (lp->switch_x_sgmii && lp->phy_mode != PHY_INTERFACE_MODE_SGMII &&
2003	    lp->phy_mode != PHY_INTERFACE_MODE_1000BASEX) {
2004		dev_err(&pdev->dev, "xlnx,switch-x-sgmii only supported with SGMII or 1000BaseX\n");
2005		ret = -EINVAL;
2006		goto cleanup_clk;
2007	}
2008
2009	/* Find the DMA node, map the DMA registers, and decode the DMA IRQs */
2010	np = of_parse_phandle(pdev->dev.of_node, "axistream-connected", 0);
2011	if (np) {
2012		struct resource dmares;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2013
2014		ret = of_address_to_resource(np, 0, &dmares);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2015		if (ret) {
2016			dev_err(&pdev->dev,
2017				"unable to get DMA resource\n");
2018			of_node_put(np);
2019			goto cleanup_clk;
2020		}
2021		lp->dma_regs = devm_ioremap_resource(&pdev->dev,
2022						     &dmares);
2023		lp->rx_irq = irq_of_parse_and_map(np, 1);
2024		lp->tx_irq = irq_of_parse_and_map(np, 0);
2025		of_node_put(np);
2026		lp->eth_irq = platform_get_irq_optional(pdev, 0);
2027	} else {
2028		/* Check for these resources directly on the Ethernet node. */
2029		lp->dma_regs = devm_platform_get_and_ioremap_resource(pdev, 1, NULL);
2030		lp->rx_irq = platform_get_irq(pdev, 1);
2031		lp->tx_irq = platform_get_irq(pdev, 0);
2032		lp->eth_irq = platform_get_irq_optional(pdev, 2);
2033	}
2034	if (IS_ERR(lp->dma_regs)) {
2035		dev_err(&pdev->dev, "could not map DMA regs\n");
2036		ret = PTR_ERR(lp->dma_regs);
2037		goto cleanup_clk;
2038	}
2039	if ((lp->rx_irq <= 0) || (lp->tx_irq <= 0)) {
2040		dev_err(&pdev->dev, "could not determine irqs\n");
2041		ret = -ENOMEM;
2042		goto cleanup_clk;
2043	}
2044
2045	/* Autodetect the need for 64-bit DMA pointers.
2046	 * When the IP is configured for a bus width bigger than 32 bits,
2047	 * writing the MSB registers is mandatory, even if they are all 0.
2048	 * We can detect this case by writing all 1's to one such register
2049	 * and see if that sticks: when the IP is configured for 32 bits
2050	 * only, those registers are RES0.
2051	 * Those MSB registers were introduced in IP v7.1, which we check first.
2052	 */
2053	if ((axienet_ior(lp, XAE_ID_OFFSET) >> 24) >= 0x9) {
2054		void __iomem *desc = lp->dma_regs + XAXIDMA_TX_CDESC_OFFSET + 4;
 
2055
2056		iowrite32(0x0, desc);
2057		if (ioread32(desc) == 0) {	/* sanity check */
2058			iowrite32(0xffffffff, desc);
2059			if (ioread32(desc) > 0) {
2060				lp->features |= XAE_FEATURE_DMA_64BIT;
2061				addr_width = 64;
2062				dev_info(&pdev->dev,
2063					 "autodetected 64-bit DMA range\n");
2064			}
2065			iowrite32(0x0, desc);
2066		}
2067	}
2068	if (!IS_ENABLED(CONFIG_64BIT) && lp->features & XAE_FEATURE_DMA_64BIT) {
2069		dev_err(&pdev->dev, "64-bit addressable DMA is not compatible with 32-bit archecture\n");
2070		ret = -EINVAL;
2071		goto cleanup_clk;
2072	}
2073
2074	ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(addr_width));
2075	if (ret) {
2076		dev_err(&pdev->dev, "No suitable DMA available\n");
2077		goto cleanup_clk;
2078	}
2079
 
 
 
 
2080	/* Check for Ethernet core IRQ (optional) */
2081	if (lp->eth_irq <= 0)
2082		dev_info(&pdev->dev, "Ethernet core IRQ not defined\n");
2083
2084	/* Retrieve the MAC address */
2085	ret = of_get_mac_address(pdev->dev.of_node, mac_addr);
2086	if (!ret) {
2087		axienet_set_mac_address(ndev, mac_addr);
2088	} else {
2089		dev_warn(&pdev->dev, "could not find MAC address property: %d\n",
2090			 ret);
2091		axienet_set_mac_address(ndev, NULL);
2092	}
2093
2094	lp->coalesce_count_rx = XAXIDMA_DFT_RX_THRESHOLD;
 
2095	lp->coalesce_usec_rx = XAXIDMA_DFT_RX_USEC;
2096	lp->coalesce_count_tx = XAXIDMA_DFT_TX_THRESHOLD;
2097	lp->coalesce_usec_tx = XAXIDMA_DFT_TX_USEC;
2098
2099	/* Reset core now that clocks are enabled, prior to accessing MDIO */
2100	ret = __axienet_device_reset(lp);
2101	if (ret)
2102		goto cleanup_clk;
2103
2104	ret = axienet_mdio_setup(lp);
2105	if (ret)
2106		dev_warn(&pdev->dev,
2107			 "error registering MDIO bus: %d\n", ret);
2108
2109	if (lp->phy_mode == PHY_INTERFACE_MODE_SGMII ||
2110	    lp->phy_mode == PHY_INTERFACE_MODE_1000BASEX) {
2111		np = of_parse_phandle(pdev->dev.of_node, "pcs-handle", 0);
2112		if (!np) {
2113			/* Deprecated: Always use "pcs-handle" for pcs_phy.
2114			 * Falling back to "phy-handle" here is only for
2115			 * backward compatibility with old device trees.
2116			 */
2117			np = of_parse_phandle(pdev->dev.of_node, "phy-handle", 0);
2118		}
2119		if (!np) {
2120			dev_err(&pdev->dev, "pcs-handle (preferred) or phy-handle required for 1000BaseX/SGMII\n");
2121			ret = -EINVAL;
2122			goto cleanup_mdio;
2123		}
2124		lp->pcs_phy = of_mdio_find_device(np);
2125		if (!lp->pcs_phy) {
2126			ret = -EPROBE_DEFER;
2127			of_node_put(np);
2128			goto cleanup_mdio;
2129		}
2130		of_node_put(np);
2131		lp->pcs.ops = &axienet_pcs_ops;
 
2132		lp->pcs.poll = true;
2133	}
2134
2135	lp->phylink_config.dev = &ndev->dev;
2136	lp->phylink_config.type = PHYLINK_NETDEV;
 
2137	lp->phylink_config.mac_capabilities = MAC_SYM_PAUSE | MAC_ASYM_PAUSE |
2138		MAC_10FD | MAC_100FD | MAC_1000FD;
2139
2140	__set_bit(lp->phy_mode, lp->phylink_config.supported_interfaces);
2141	if (lp->switch_x_sgmii) {
2142		__set_bit(PHY_INTERFACE_MODE_1000BASEX,
2143			  lp->phylink_config.supported_interfaces);
2144		__set_bit(PHY_INTERFACE_MODE_SGMII,
2145			  lp->phylink_config.supported_interfaces);
2146	}
2147
2148	lp->phylink = phylink_create(&lp->phylink_config, pdev->dev.fwnode,
2149				     lp->phy_mode,
2150				     &axienet_phylink_ops);
2151	if (IS_ERR(lp->phylink)) {
2152		ret = PTR_ERR(lp->phylink);
2153		dev_err(&pdev->dev, "phylink_create error (%i)\n", ret);
2154		goto cleanup_mdio;
2155	}
2156
2157	ret = register_netdev(lp->ndev);
2158	if (ret) {
2159		dev_err(lp->dev, "register_netdev() error (%i)\n", ret);
2160		goto cleanup_phylink;
2161	}
2162
2163	return 0;
2164
2165cleanup_phylink:
2166	phylink_destroy(lp->phylink);
2167
2168cleanup_mdio:
2169	if (lp->pcs_phy)
2170		put_device(&lp->pcs_phy->dev);
2171	if (lp->mii_bus)
2172		axienet_mdio_teardown(lp);
2173cleanup_clk:
2174	clk_bulk_disable_unprepare(XAE_NUM_MISC_CLOCKS, lp->misc_clks);
2175	clk_disable_unprepare(lp->axi_clk);
2176
2177free_netdev:
2178	free_netdev(ndev);
2179
2180	return ret;
2181}
2182
2183static int axienet_remove(struct platform_device *pdev)
2184{
2185	struct net_device *ndev = platform_get_drvdata(pdev);
2186	struct axienet_local *lp = netdev_priv(ndev);
2187
2188	unregister_netdev(ndev);
2189
2190	if (lp->phylink)
2191		phylink_destroy(lp->phylink);
2192
2193	if (lp->pcs_phy)
2194		put_device(&lp->pcs_phy->dev);
2195
2196	axienet_mdio_teardown(lp);
2197
2198	clk_bulk_disable_unprepare(XAE_NUM_MISC_CLOCKS, lp->misc_clks);
2199	clk_disable_unprepare(lp->axi_clk);
2200
2201	free_netdev(ndev);
2202
2203	return 0;
2204}
2205
2206static void axienet_shutdown(struct platform_device *pdev)
2207{
2208	struct net_device *ndev = platform_get_drvdata(pdev);
2209
2210	rtnl_lock();
2211	netif_device_detach(ndev);
2212
2213	if (netif_running(ndev))
2214		dev_close(ndev);
2215
2216	rtnl_unlock();
2217}
2218
2219static int axienet_suspend(struct device *dev)
2220{
2221	struct net_device *ndev = dev_get_drvdata(dev);
2222
2223	if (!netif_running(ndev))
2224		return 0;
2225
2226	netif_device_detach(ndev);
2227
2228	rtnl_lock();
2229	axienet_stop(ndev);
2230	rtnl_unlock();
2231
2232	return 0;
2233}
2234
2235static int axienet_resume(struct device *dev)
2236{
2237	struct net_device *ndev = dev_get_drvdata(dev);
2238
2239	if (!netif_running(ndev))
2240		return 0;
2241
2242	rtnl_lock();
2243	axienet_open(ndev);
2244	rtnl_unlock();
2245
2246	netif_device_attach(ndev);
2247
2248	return 0;
2249}
2250
2251static DEFINE_SIMPLE_DEV_PM_OPS(axienet_pm_ops,
2252				axienet_suspend, axienet_resume);
2253
2254static struct platform_driver axienet_driver = {
2255	.probe = axienet_probe,
2256	.remove = axienet_remove,
2257	.shutdown = axienet_shutdown,
2258	.driver = {
2259		 .name = "xilinx_axienet",
2260		 .pm = &axienet_pm_ops,
2261		 .of_match_table = axienet_of_match,
2262	},
2263};
2264
2265module_platform_driver(axienet_driver);
2266
2267MODULE_DESCRIPTION("Xilinx Axi Ethernet driver");
2268MODULE_AUTHOR("Xilinx");
2269MODULE_LICENSE("GPL");