Linux Audio

Check our new training course

Loading...
v6.13.7
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * Xilinx Axi Ethernet device driver
   4 *
   5 * Copyright (c) 2008 Nissin Systems Co., Ltd.,  Yoshio Kashiwagi
   6 * Copyright (c) 2005-2008 DLA Systems,  David H. Lynch Jr. <dhlii@dlasys.net>
   7 * Copyright (c) 2008-2009 Secret Lab Technologies Ltd.
   8 * Copyright (c) 2010 - 2011 Michal Simek <monstr@monstr.eu>
   9 * Copyright (c) 2010 - 2011 PetaLogix
  10 * Copyright (c) 2019 - 2022 Calian Advanced Technologies
  11 * Copyright (c) 2010 - 2012 Xilinx, Inc. All rights reserved.
  12 *
  13 * This is a driver for the Xilinx Axi Ethernet which is used in the Virtex6
  14 * and Spartan6.
  15 *
  16 * TODO:
  17 *  - Add Axi Fifo support.
  18 *  - Factor out Axi DMA code into separate driver.
  19 *  - Test and fix basic multicast filtering.
  20 *  - Add support for extended multicast filtering.
  21 *  - Test basic VLAN support.
  22 *  - Add support for extended VLAN support.
  23 */
  24
  25#include <linux/clk.h>
  26#include <linux/delay.h>
  27#include <linux/etherdevice.h>
  28#include <linux/module.h>
  29#include <linux/netdevice.h>
  30#include <linux/of.h>
  31#include <linux/of_mdio.h>
  32#include <linux/of_net.h>
  33#include <linux/of_irq.h>
  34#include <linux/of_address.h>
  35#include <linux/platform_device.h>
  36#include <linux/skbuff.h>
  37#include <linux/math64.h>
  38#include <linux/phy.h>
  39#include <linux/mii.h>
  40#include <linux/ethtool.h>
  41#include <linux/dmaengine.h>
  42#include <linux/dma-mapping.h>
  43#include <linux/dma/xilinx_dma.h>
  44#include <linux/circ_buf.h>
  45#include <net/netdev_queues.h>
  46
  47#include "xilinx_axienet.h"
  48
  49/* Descriptors defines for Tx and Rx DMA */
  50#define TX_BD_NUM_DEFAULT		128
  51#define RX_BD_NUM_DEFAULT		1024
  52#define TX_BD_NUM_MIN			(MAX_SKB_FRAGS + 1)
  53#define TX_BD_NUM_MAX			4096
  54#define RX_BD_NUM_MAX			4096
  55#define DMA_NUM_APP_WORDS		5
  56#define LEN_APP				4
  57#define RX_BUF_NUM_DEFAULT		128
  58
  59/* Must be shorter than length of ethtool_drvinfo.driver field to fit */
  60#define DRIVER_NAME		"xaxienet"
  61#define DRIVER_DESCRIPTION	"Xilinx Axi Ethernet driver"
  62#define DRIVER_VERSION		"1.00a"
  63
  64#define AXIENET_REGS_N		40
  65
  66static void axienet_rx_submit_desc(struct net_device *ndev);
  67
  68/* Match table for of_platform binding */
  69static const struct of_device_id axienet_of_match[] = {
  70	{ .compatible = "xlnx,axi-ethernet-1.00.a", },
  71	{ .compatible = "xlnx,axi-ethernet-1.01.a", },
  72	{ .compatible = "xlnx,axi-ethernet-2.01.a", },
  73	{},
  74};
  75
  76MODULE_DEVICE_TABLE(of, axienet_of_match);
  77
  78/* Option table for setting up Axi Ethernet hardware options */
  79static struct axienet_option axienet_options[] = {
  80	/* Turn on jumbo packet support for both Rx and Tx */
  81	{
  82		.opt = XAE_OPTION_JUMBO,
  83		.reg = XAE_TC_OFFSET,
  84		.m_or = XAE_TC_JUM_MASK,
  85	}, {
  86		.opt = XAE_OPTION_JUMBO,
  87		.reg = XAE_RCW1_OFFSET,
  88		.m_or = XAE_RCW1_JUM_MASK,
  89	}, { /* Turn on VLAN packet support for both Rx and Tx */
  90		.opt = XAE_OPTION_VLAN,
  91		.reg = XAE_TC_OFFSET,
  92		.m_or = XAE_TC_VLAN_MASK,
  93	}, {
  94		.opt = XAE_OPTION_VLAN,
  95		.reg = XAE_RCW1_OFFSET,
  96		.m_or = XAE_RCW1_VLAN_MASK,
  97	}, { /* Turn on FCS stripping on receive packets */
  98		.opt = XAE_OPTION_FCS_STRIP,
  99		.reg = XAE_RCW1_OFFSET,
 100		.m_or = XAE_RCW1_FCS_MASK,
 101	}, { /* Turn on FCS insertion on transmit packets */
 102		.opt = XAE_OPTION_FCS_INSERT,
 103		.reg = XAE_TC_OFFSET,
 104		.m_or = XAE_TC_FCS_MASK,
 105	}, { /* Turn off length/type field checking on receive packets */
 106		.opt = XAE_OPTION_LENTYPE_ERR,
 107		.reg = XAE_RCW1_OFFSET,
 108		.m_or = XAE_RCW1_LT_DIS_MASK,
 109	}, { /* Turn on Rx flow control */
 110		.opt = XAE_OPTION_FLOW_CONTROL,
 111		.reg = XAE_FCC_OFFSET,
 112		.m_or = XAE_FCC_FCRX_MASK,
 113	}, { /* Turn on Tx flow control */
 114		.opt = XAE_OPTION_FLOW_CONTROL,
 115		.reg = XAE_FCC_OFFSET,
 116		.m_or = XAE_FCC_FCTX_MASK,
 117	}, { /* Turn on promiscuous frame filtering */
 118		.opt = XAE_OPTION_PROMISC,
 119		.reg = XAE_FMI_OFFSET,
 120		.m_or = XAE_FMI_PM_MASK,
 121	}, { /* Enable transmitter */
 122		.opt = XAE_OPTION_TXEN,
 123		.reg = XAE_TC_OFFSET,
 124		.m_or = XAE_TC_TX_MASK,
 125	}, { /* Enable receiver */
 126		.opt = XAE_OPTION_RXEN,
 127		.reg = XAE_RCW1_OFFSET,
 128		.m_or = XAE_RCW1_RX_MASK,
 129	},
 130	{}
 131};
 132
 133static struct skbuf_dma_descriptor *axienet_get_rx_desc(struct axienet_local *lp, int i)
 134{
 135	return lp->rx_skb_ring[i & (RX_BUF_NUM_DEFAULT - 1)];
 136}
 137
 138static struct skbuf_dma_descriptor *axienet_get_tx_desc(struct axienet_local *lp, int i)
 139{
 140	return lp->tx_skb_ring[i & (TX_BD_NUM_MAX - 1)];
 141}
 142
 143/**
 144 * axienet_dma_in32 - Memory mapped Axi DMA register read
 145 * @lp:		Pointer to axienet local structure
 146 * @reg:	Address offset from the base address of the Axi DMA core
 147 *
 148 * Return: The contents of the Axi DMA register
 149 *
 150 * This function returns the contents of the corresponding Axi DMA register.
 151 */
 152static inline u32 axienet_dma_in32(struct axienet_local *lp, off_t reg)
 153{
 154	return ioread32(lp->dma_regs + reg);
 155}
 156
 157static void desc_set_phys_addr(struct axienet_local *lp, dma_addr_t addr,
 158			       struct axidma_bd *desc)
 159{
 160	desc->phys = lower_32_bits(addr);
 161	if (lp->features & XAE_FEATURE_DMA_64BIT)
 162		desc->phys_msb = upper_32_bits(addr);
 163}
 164
 165static dma_addr_t desc_get_phys_addr(struct axienet_local *lp,
 166				     struct axidma_bd *desc)
 
 167{
 168	dma_addr_t ret = desc->phys;
 169
 170	if (lp->features & XAE_FEATURE_DMA_64BIT)
 171		ret |= ((dma_addr_t)desc->phys_msb << 16) << 16;
 172
 173	return ret;
 174}
 175
 176/**
 177 * axienet_dma_bd_release - Release buffer descriptor rings
 178 * @ndev:	Pointer to the net_device structure
 179 *
 180 * This function is used to release the descriptors allocated in
 181 * axienet_dma_bd_init. axienet_dma_bd_release is called when Axi Ethernet
 182 * driver stop api is called.
 183 */
 184static void axienet_dma_bd_release(struct net_device *ndev)
 185{
 186	int i;
 187	struct axienet_local *lp = netdev_priv(ndev);
 188
 189	/* If we end up here, tx_bd_v must have been DMA allocated. */
 190	dma_free_coherent(lp->dev,
 191			  sizeof(*lp->tx_bd_v) * lp->tx_bd_num,
 192			  lp->tx_bd_v,
 193			  lp->tx_bd_p);
 194
 195	if (!lp->rx_bd_v)
 196		return;
 197
 198	for (i = 0; i < lp->rx_bd_num; i++) {
 199		dma_addr_t phys;
 200
 201		/* A NULL skb means this descriptor has not been initialised
 202		 * at all.
 203		 */
 204		if (!lp->rx_bd_v[i].skb)
 205			break;
 206
 207		dev_kfree_skb(lp->rx_bd_v[i].skb);
 208
 209		/* For each descriptor, we programmed cntrl with the (non-zero)
 210		 * descriptor size, after it had been successfully allocated.
 211		 * So a non-zero value in there means we need to unmap it.
 212		 */
 213		if (lp->rx_bd_v[i].cntrl) {
 214			phys = desc_get_phys_addr(lp, &lp->rx_bd_v[i]);
 215			dma_unmap_single(lp->dev, phys,
 216					 lp->max_frm_size, DMA_FROM_DEVICE);
 217		}
 218	}
 219
 220	dma_free_coherent(lp->dev,
 221			  sizeof(*lp->rx_bd_v) * lp->rx_bd_num,
 222			  lp->rx_bd_v,
 223			  lp->rx_bd_p);
 224}
 225
 226/**
 227 * axienet_usec_to_timer - Calculate IRQ delay timer value
 228 * @lp:		Pointer to the axienet_local structure
 229 * @coalesce_usec: Microseconds to convert into timer value
 230 */
 231static u32 axienet_usec_to_timer(struct axienet_local *lp, u32 coalesce_usec)
 232{
 233	u32 result;
 234	u64 clk_rate = 125000000; /* arbitrary guess if no clock rate set */
 235
 236	if (lp->axi_clk)
 237		clk_rate = clk_get_rate(lp->axi_clk);
 238
 239	/* 1 Timeout Interval = 125 * (clock period of SG clock) */
 240	result = DIV64_U64_ROUND_CLOSEST((u64)coalesce_usec * clk_rate,
 241					 (u64)125000000);
 242	if (result > 255)
 243		result = 255;
 244
 245	return result;
 246}
 247
 248/**
 249 * axienet_dma_start - Set up DMA registers and start DMA operation
 250 * @lp:		Pointer to the axienet_local structure
 251 */
 252static void axienet_dma_start(struct axienet_local *lp)
 253{
 254	/* Start updating the Rx channel control register */
 255	lp->rx_dma_cr = (lp->coalesce_count_rx << XAXIDMA_COALESCE_SHIFT) |
 256			XAXIDMA_IRQ_IOC_MASK | XAXIDMA_IRQ_ERROR_MASK;
 257	/* Only set interrupt delay timer if not generating an interrupt on
 258	 * the first RX packet. Otherwise leave at 0 to disable delay interrupt.
 259	 */
 260	if (lp->coalesce_count_rx > 1)
 261		lp->rx_dma_cr |= (axienet_usec_to_timer(lp, lp->coalesce_usec_rx)
 262					<< XAXIDMA_DELAY_SHIFT) |
 263				 XAXIDMA_IRQ_DELAY_MASK;
 264	axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, lp->rx_dma_cr);
 265
 266	/* Start updating the Tx channel control register */
 267	lp->tx_dma_cr = (lp->coalesce_count_tx << XAXIDMA_COALESCE_SHIFT) |
 268			XAXIDMA_IRQ_IOC_MASK | XAXIDMA_IRQ_ERROR_MASK;
 269	/* Only set interrupt delay timer if not generating an interrupt on
 270	 * the first TX packet. Otherwise leave at 0 to disable delay interrupt.
 271	 */
 272	if (lp->coalesce_count_tx > 1)
 273		lp->tx_dma_cr |= (axienet_usec_to_timer(lp, lp->coalesce_usec_tx)
 274					<< XAXIDMA_DELAY_SHIFT) |
 275				 XAXIDMA_IRQ_DELAY_MASK;
 276	axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, lp->tx_dma_cr);
 277
 278	/* Populate the tail pointer and bring the Rx Axi DMA engine out of
 279	 * halted state. This will make the Rx side ready for reception.
 280	 */
 281	axienet_dma_out_addr(lp, XAXIDMA_RX_CDESC_OFFSET, lp->rx_bd_p);
 282	lp->rx_dma_cr |= XAXIDMA_CR_RUNSTOP_MASK;
 283	axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, lp->rx_dma_cr);
 284	axienet_dma_out_addr(lp, XAXIDMA_RX_TDESC_OFFSET, lp->rx_bd_p +
 285			     (sizeof(*lp->rx_bd_v) * (lp->rx_bd_num - 1)));
 286
 287	/* Write to the RS (Run-stop) bit in the Tx channel control register.
 288	 * Tx channel is now ready to run. But only after we write to the
 289	 * tail pointer register that the Tx channel will start transmitting.
 290	 */
 291	axienet_dma_out_addr(lp, XAXIDMA_TX_CDESC_OFFSET, lp->tx_bd_p);
 292	lp->tx_dma_cr |= XAXIDMA_CR_RUNSTOP_MASK;
 293	axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, lp->tx_dma_cr);
 294}
 295
 296/**
 297 * axienet_dma_bd_init - Setup buffer descriptor rings for Axi DMA
 298 * @ndev:	Pointer to the net_device structure
 299 *
 300 * Return: 0, on success -ENOMEM, on failure
 
 301 *
 302 * This function is called to initialize the Rx and Tx DMA descriptor
 303 * rings. This initializes the descriptors with required default values
 304 * and is called when Axi Ethernet driver reset is called.
 305 */
 306static int axienet_dma_bd_init(struct net_device *ndev)
 307{
 
 308	int i;
 309	struct sk_buff *skb;
 310	struct axienet_local *lp = netdev_priv(ndev);
 311
 312	/* Reset the indexes which are used for accessing the BDs */
 313	lp->tx_bd_ci = 0;
 314	lp->tx_bd_tail = 0;
 315	lp->rx_bd_ci = 0;
 316
 317	/* Allocate the Tx and Rx buffer descriptors. */
 318	lp->tx_bd_v = dma_alloc_coherent(lp->dev,
 319					 sizeof(*lp->tx_bd_v) * lp->tx_bd_num,
 320					 &lp->tx_bd_p, GFP_KERNEL);
 
 
 321	if (!lp->tx_bd_v)
 322		return -ENOMEM;
 323
 324	lp->rx_bd_v = dma_alloc_coherent(lp->dev,
 325					 sizeof(*lp->rx_bd_v) * lp->rx_bd_num,
 326					 &lp->rx_bd_p, GFP_KERNEL);
 327	if (!lp->rx_bd_v)
 328		goto out;
 329
 330	for (i = 0; i < lp->tx_bd_num; i++) {
 331		dma_addr_t addr = lp->tx_bd_p +
 332				  sizeof(*lp->tx_bd_v) *
 333				  ((i + 1) % lp->tx_bd_num);
 334
 335		lp->tx_bd_v[i].next = lower_32_bits(addr);
 336		if (lp->features & XAE_FEATURE_DMA_64BIT)
 337			lp->tx_bd_v[i].next_msb = upper_32_bits(addr);
 338	}
 339
 340	for (i = 0; i < lp->rx_bd_num; i++) {
 341		dma_addr_t addr;
 342
 343		addr = lp->rx_bd_p + sizeof(*lp->rx_bd_v) *
 344			((i + 1) % lp->rx_bd_num);
 345		lp->rx_bd_v[i].next = lower_32_bits(addr);
 346		if (lp->features & XAE_FEATURE_DMA_64BIT)
 347			lp->rx_bd_v[i].next_msb = upper_32_bits(addr);
 348
 349		skb = netdev_alloc_skb_ip_align(ndev, lp->max_frm_size);
 350		if (!skb)
 351			goto out;
 352
 353		lp->rx_bd_v[i].skb = skb;
 354		addr = dma_map_single(lp->dev, skb->data,
 355				      lp->max_frm_size, DMA_FROM_DEVICE);
 356		if (dma_mapping_error(lp->dev, addr)) {
 357			netdev_err(ndev, "DMA mapping error\n");
 358			goto out;
 359		}
 360		desc_set_phys_addr(lp, addr, &lp->rx_bd_v[i]);
 361
 362		lp->rx_bd_v[i].cntrl = lp->max_frm_size;
 363	}
 364
 365	axienet_dma_start(lp);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 366
 367	return 0;
 368out:
 369	axienet_dma_bd_release(ndev);
 370	return -ENOMEM;
 371}
 372
 373/**
 374 * axienet_set_mac_address - Write the MAC address
 375 * @ndev:	Pointer to the net_device structure
 376 * @address:	6 byte Address to be written as MAC address
 377 *
 378 * This function is called to initialize the MAC address of the Axi Ethernet
 379 * core. It writes to the UAW0 and UAW1 registers of the core.
 380 */
 381static void axienet_set_mac_address(struct net_device *ndev,
 382				    const void *address)
 383{
 384	struct axienet_local *lp = netdev_priv(ndev);
 385
 386	if (address)
 387		eth_hw_addr_set(ndev, address);
 388	if (!is_valid_ether_addr(ndev->dev_addr))
 389		eth_hw_addr_random(ndev);
 390
 391	/* Set up unicast MAC address filter set its mac address */
 392	axienet_iow(lp, XAE_UAW0_OFFSET,
 393		    (ndev->dev_addr[0]) |
 394		    (ndev->dev_addr[1] << 8) |
 395		    (ndev->dev_addr[2] << 16) |
 396		    (ndev->dev_addr[3] << 24));
 397	axienet_iow(lp, XAE_UAW1_OFFSET,
 398		    (((axienet_ior(lp, XAE_UAW1_OFFSET)) &
 399		      ~XAE_UAW1_UNICASTADDR_MASK) |
 400		     (ndev->dev_addr[4] |
 401		     (ndev->dev_addr[5] << 8))));
 402}
 403
 404/**
 405 * netdev_set_mac_address - Write the MAC address (from outside the driver)
 406 * @ndev:	Pointer to the net_device structure
 407 * @p:		6 byte Address to be written as MAC address
 408 *
 409 * Return: 0 for all conditions. Presently, there is no failure case.
 410 *
 411 * This function is called to initialize the MAC address of the Axi Ethernet
 412 * core. It calls the core specific axienet_set_mac_address. This is the
 413 * function that goes into net_device_ops structure entry ndo_set_mac_address.
 414 */
 415static int netdev_set_mac_address(struct net_device *ndev, void *p)
 416{
 417	struct sockaddr *addr = p;
 418
 419	axienet_set_mac_address(ndev, addr->sa_data);
 420	return 0;
 421}
 422
 423/**
 424 * axienet_set_multicast_list - Prepare the multicast table
 425 * @ndev:	Pointer to the net_device structure
 426 *
 427 * This function is called to initialize the multicast table during
 428 * initialization. The Axi Ethernet basic multicast support has a four-entry
 429 * multicast table which is initialized here. Additionally this function
 430 * goes into the net_device_ops structure entry ndo_set_multicast_list. This
 431 * means whenever the multicast table entries need to be updated this
 432 * function gets called.
 433 */
 434static void axienet_set_multicast_list(struct net_device *ndev)
 435{
 436	int i = 0;
 437	u32 reg, af0reg, af1reg;
 438	struct axienet_local *lp = netdev_priv(ndev);
 439
 440	reg = axienet_ior(lp, XAE_FMI_OFFSET);
 441	reg &= ~XAE_FMI_PM_MASK;
 442	if (ndev->flags & IFF_PROMISC)
 443		reg |= XAE_FMI_PM_MASK;
 444	else
 445		reg &= ~XAE_FMI_PM_MASK;
 446	axienet_iow(lp, XAE_FMI_OFFSET, reg);
 447
 448	if (ndev->flags & IFF_ALLMULTI ||
 449	    netdev_mc_count(ndev) > XAE_MULTICAST_CAM_TABLE_NUM) {
 450		reg &= 0xFFFFFF00;
 
 
 
 
 
 451		axienet_iow(lp, XAE_FMI_OFFSET, reg);
 452		axienet_iow(lp, XAE_AF0_OFFSET, 1); /* Multicast bit */
 453		axienet_iow(lp, XAE_AF1_OFFSET, 0);
 454		axienet_iow(lp, XAE_AM0_OFFSET, 1); /* ditto */
 455		axienet_iow(lp, XAE_AM1_OFFSET, 0);
 456		axienet_iow(lp, XAE_FFE_OFFSET, 1);
 457		i = 1;
 458	} else if (!netdev_mc_empty(ndev)) {
 459		struct netdev_hw_addr *ha;
 460
 
 461		netdev_for_each_mc_addr(ha, ndev) {
 462			if (i >= XAE_MULTICAST_CAM_TABLE_NUM)
 463				break;
 464
 465			af0reg = (ha->addr[0]);
 466			af0reg |= (ha->addr[1] << 8);
 467			af0reg |= (ha->addr[2] << 16);
 468			af0reg |= (ha->addr[3] << 24);
 469
 470			af1reg = (ha->addr[4]);
 471			af1reg |= (ha->addr[5] << 8);
 472
 473			reg &= 0xFFFFFF00;
 474			reg |= i;
 475
 476			axienet_iow(lp, XAE_FMI_OFFSET, reg);
 477			axienet_iow(lp, XAE_AF0_OFFSET, af0reg);
 478			axienet_iow(lp, XAE_AF1_OFFSET, af1reg);
 479			axienet_iow(lp, XAE_AM0_OFFSET, 0xffffffff);
 480			axienet_iow(lp, XAE_AM1_OFFSET, 0x0000ffff);
 481			axienet_iow(lp, XAE_FFE_OFFSET, 1);
 482			i++;
 483		}
 484	}
 
 
 485
 486	for (; i < XAE_MULTICAST_CAM_TABLE_NUM; i++) {
 487		reg &= 0xFFFFFF00;
 488		reg |= i;
 489		axienet_iow(lp, XAE_FMI_OFFSET, reg);
 490		axienet_iow(lp, XAE_FFE_OFFSET, 0);
 
 
 
 
 
 
 
 
 
 
 491	}
 492}
 493
 494/**
 495 * axienet_setoptions - Set an Axi Ethernet option
 496 * @ndev:	Pointer to the net_device structure
 497 * @options:	Option to be enabled/disabled
 498 *
 499 * The Axi Ethernet core has multiple features which can be selectively turned
 500 * on or off. The typical options could be jumbo frame option, basic VLAN
 501 * option, promiscuous mode option etc. This function is used to set or clear
 502 * these options in the Axi Ethernet hardware. This is done through
 503 * axienet_option structure .
 504 */
 505static void axienet_setoptions(struct net_device *ndev, u32 options)
 506{
 507	int reg;
 508	struct axienet_local *lp = netdev_priv(ndev);
 509	struct axienet_option *tp = &axienet_options[0];
 510
 511	while (tp->opt) {
 512		reg = ((axienet_ior(lp, tp->reg)) & ~(tp->m_or));
 513		if (options & tp->opt)
 514			reg |= tp->m_or;
 515		axienet_iow(lp, tp->reg, reg);
 516		tp++;
 517	}
 518
 519	lp->options |= options;
 520}
 521
 522static u64 axienet_stat(struct axienet_local *lp, enum temac_stat stat)
 523{
 524	u32 counter;
 525
 526	if (lp->reset_in_progress)
 527		return lp->hw_stat_base[stat];
 528
 529	counter = axienet_ior(lp, XAE_STATS_OFFSET + stat * 8);
 530	return lp->hw_stat_base[stat] + (counter - lp->hw_last_counter[stat]);
 531}
 532
 533static void axienet_stats_update(struct axienet_local *lp, bool reset)
 534{
 535	enum temac_stat stat;
 536
 537	write_seqcount_begin(&lp->hw_stats_seqcount);
 538	lp->reset_in_progress = reset;
 539	for (stat = 0; stat < STAT_COUNT; stat++) {
 540		u32 counter = axienet_ior(lp, XAE_STATS_OFFSET + stat * 8);
 541
 542		lp->hw_stat_base[stat] += counter - lp->hw_last_counter[stat];
 543		lp->hw_last_counter[stat] = counter;
 544	}
 545	write_seqcount_end(&lp->hw_stats_seqcount);
 546}
 547
 548static void axienet_refresh_stats(struct work_struct *work)
 549{
 550	struct axienet_local *lp = container_of(work, struct axienet_local,
 551						stats_work.work);
 552
 553	mutex_lock(&lp->stats_lock);
 554	axienet_stats_update(lp, false);
 555	mutex_unlock(&lp->stats_lock);
 556
 557	/* Just less than 2^32 bytes at 2.5 GBit/s */
 558	schedule_delayed_work(&lp->stats_work, 13 * HZ);
 559}
 560
 561static int __axienet_device_reset(struct axienet_local *lp)
 562{
 563	u32 value;
 564	int ret;
 565
 566	/* Save statistics counters in case they will be reset */
 567	mutex_lock(&lp->stats_lock);
 568	if (lp->features & XAE_FEATURE_STATS)
 569		axienet_stats_update(lp, true);
 570
 571	/* Reset Axi DMA. This would reset Axi Ethernet core as well. The reset
 572	 * process of Axi DMA takes a while to complete as all pending
 573	 * commands/transfers will be flushed or completed during this
 574	 * reset process.
 575	 * Note that even though both TX and RX have their own reset register,
 576	 * they both reset the entire DMA core, so only one needs to be used.
 577	 */
 578	axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, XAXIDMA_CR_RESET_MASK);
 579	ret = read_poll_timeout(axienet_dma_in32, value,
 580				!(value & XAXIDMA_CR_RESET_MASK),
 581				DELAY_OF_ONE_MILLISEC, 50000, false, lp,
 582				XAXIDMA_TX_CR_OFFSET);
 583	if (ret) {
 584		dev_err(lp->dev, "%s: DMA reset timeout!\n", __func__);
 585		goto out;
 586	}
 587
 588	/* Wait for PhyRstCmplt bit to be set, indicating the PHY reset has finished */
 589	ret = read_poll_timeout(axienet_ior, value,
 590				value & XAE_INT_PHYRSTCMPLT_MASK,
 591				DELAY_OF_ONE_MILLISEC, 50000, false, lp,
 592				XAE_IS_OFFSET);
 593	if (ret) {
 594		dev_err(lp->dev, "%s: timeout waiting for PhyRstCmplt\n", __func__);
 595		goto out;
 596	}
 597
 598	/* Update statistics counters with new values */
 599	if (lp->features & XAE_FEATURE_STATS) {
 600		enum temac_stat stat;
 601
 602		write_seqcount_begin(&lp->hw_stats_seqcount);
 603		lp->reset_in_progress = false;
 604		for (stat = 0; stat < STAT_COUNT; stat++) {
 605			u32 counter =
 606				axienet_ior(lp, XAE_STATS_OFFSET + stat * 8);
 607
 608			lp->hw_stat_base[stat] +=
 609				lp->hw_last_counter[stat] - counter;
 610			lp->hw_last_counter[stat] = counter;
 611		}
 612		write_seqcount_end(&lp->hw_stats_seqcount);
 613	}
 614
 615out:
 616	mutex_unlock(&lp->stats_lock);
 617	return ret;
 618}
 619
 620/**
 621 * axienet_dma_stop - Stop DMA operation
 622 * @lp:		Pointer to the axienet_local structure
 623 */
 624static void axienet_dma_stop(struct axienet_local *lp)
 625{
 626	int count;
 627	u32 cr, sr;
 628
 629	cr = axienet_dma_in32(lp, XAXIDMA_RX_CR_OFFSET);
 630	cr &= ~(XAXIDMA_CR_RUNSTOP_MASK | XAXIDMA_IRQ_ALL_MASK);
 631	axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, cr);
 632	synchronize_irq(lp->rx_irq);
 633
 634	cr = axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET);
 635	cr &= ~(XAXIDMA_CR_RUNSTOP_MASK | XAXIDMA_IRQ_ALL_MASK);
 636	axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, cr);
 637	synchronize_irq(lp->tx_irq);
 638
 639	/* Give DMAs a chance to halt gracefully */
 640	sr = axienet_dma_in32(lp, XAXIDMA_RX_SR_OFFSET);
 641	for (count = 0; !(sr & XAXIDMA_SR_HALT_MASK) && count < 5; ++count) {
 642		msleep(20);
 643		sr = axienet_dma_in32(lp, XAXIDMA_RX_SR_OFFSET);
 644	}
 645
 646	sr = axienet_dma_in32(lp, XAXIDMA_TX_SR_OFFSET);
 647	for (count = 0; !(sr & XAXIDMA_SR_HALT_MASK) && count < 5; ++count) {
 648		msleep(20);
 649		sr = axienet_dma_in32(lp, XAXIDMA_TX_SR_OFFSET);
 650	}
 651
 652	/* Do a reset to ensure DMA is really stopped */
 653	axienet_lock_mii(lp);
 654	__axienet_device_reset(lp);
 655	axienet_unlock_mii(lp);
 656}
 657
 658/**
 659 * axienet_device_reset - Reset and initialize the Axi Ethernet hardware.
 660 * @ndev:	Pointer to the net_device structure
 661 *
 662 * This function is called to reset and initialize the Axi Ethernet core. This
 663 * is typically called during initialization. It does a reset of the Axi DMA
 664 * Rx/Tx channels and initializes the Axi DMA BDs. Since Axi DMA reset lines
 665 * are connected to Axi Ethernet reset lines, this in turn resets the Axi
 666 * Ethernet core. No separate hardware reset is done for the Axi Ethernet
 667 * core.
 668 * Returns 0 on success or a negative error number otherwise.
 669 */
 670static int axienet_device_reset(struct net_device *ndev)
 671{
 672	u32 axienet_status;
 673	struct axienet_local *lp = netdev_priv(ndev);
 674	int ret;
 
 
 675
 676	lp->max_frm_size = XAE_MAX_VLAN_FRAME_SIZE;
 677	lp->options |= XAE_OPTION_VLAN;
 678	lp->options &= (~XAE_OPTION_JUMBO);
 679
 680	if (ndev->mtu > XAE_MTU && ndev->mtu <= XAE_JUMBO_MTU) {
 681		lp->max_frm_size = ndev->mtu + VLAN_ETH_HLEN +
 682					XAE_TRL_SIZE;
 683
 684		if (lp->max_frm_size <= lp->rxmem)
 685			lp->options |= XAE_OPTION_JUMBO;
 686	}
 687
 688	if (!lp->use_dmaengine) {
 689		ret = __axienet_device_reset(lp);
 690		if (ret)
 691			return ret;
 692
 693		ret = axienet_dma_bd_init(ndev);
 694		if (ret) {
 695			netdev_err(ndev, "%s: descriptor allocation failed\n",
 696				   __func__);
 697			return ret;
 698		}
 699	}
 700
 701	axienet_status = axienet_ior(lp, XAE_RCW1_OFFSET);
 702	axienet_status &= ~XAE_RCW1_RX_MASK;
 703	axienet_iow(lp, XAE_RCW1_OFFSET, axienet_status);
 704
 705	axienet_status = axienet_ior(lp, XAE_IP_OFFSET);
 706	if (axienet_status & XAE_INT_RXRJECT_MASK)
 707		axienet_iow(lp, XAE_IS_OFFSET, XAE_INT_RXRJECT_MASK);
 708	axienet_iow(lp, XAE_IE_OFFSET, lp->eth_irq > 0 ?
 709		    XAE_INT_RECV_ERROR_MASK : 0);
 710
 711	axienet_iow(lp, XAE_FCC_OFFSET, XAE_FCC_FCRX_MASK);
 712
 713	/* Sync default options with HW but leave receiver and
 714	 * transmitter disabled.
 715	 */
 716	axienet_setoptions(ndev, lp->options &
 717			   ~(XAE_OPTION_TXEN | XAE_OPTION_RXEN));
 718	axienet_set_mac_address(ndev, NULL);
 719	axienet_set_multicast_list(ndev);
 720	axienet_setoptions(ndev, lp->options);
 721
 722	netif_trans_update(ndev);
 
 723
 724	return 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 725}
 726
 727/**
 728 * axienet_free_tx_chain - Clean up a series of linked TX descriptors.
 729 * @lp:		Pointer to the axienet_local structure
 730 * @first_bd:	Index of first descriptor to clean up
 731 * @nr_bds:	Max number of descriptors to clean up
 732 * @force:	Whether to clean descriptors even if not complete
 733 * @sizep:	Pointer to a u32 filled with the total sum of all bytes
 734 *		in all cleaned-up descriptors. Ignored if NULL.
 735 * @budget:	NAPI budget (use 0 when not called from NAPI poll)
 736 *
 737 * Would either be called after a successful transmit operation, or after
 738 * there was an error when setting up the chain.
 739 * Returns the number of packets handled.
 740 */
 741static int axienet_free_tx_chain(struct axienet_local *lp, u32 first_bd,
 742				 int nr_bds, bool force, u32 *sizep, int budget)
 743{
 
 
 
 744	struct axidma_bd *cur_p;
 745	unsigned int status;
 746	int i, packets = 0;
 747	dma_addr_t phys;
 748
 749	for (i = 0; i < nr_bds; i++) {
 750		cur_p = &lp->tx_bd_v[(first_bd + i) % lp->tx_bd_num];
 751		status = cur_p->status;
 752
 753		/* If force is not specified, clean up only descriptors
 754		 * that have been completed by the MAC.
 755		 */
 756		if (!force && !(status & XAXIDMA_BD_STS_COMPLETE_MASK))
 757			break;
 758
 759		/* Ensure we see complete descriptor update */
 760		dma_rmb();
 761		phys = desc_get_phys_addr(lp, cur_p);
 762		dma_unmap_single(lp->dev, phys,
 763				 (cur_p->cntrl & XAXIDMA_BD_CTRL_LENGTH_MASK),
 764				 DMA_TO_DEVICE);
 765
 766		if (cur_p->skb && (status & XAXIDMA_BD_STS_COMPLETE_MASK)) {
 767			napi_consume_skb(cur_p->skb, budget);
 768			packets++;
 769		}
 770
 
 
 
 
 
 
 
 
 
 771		cur_p->app0 = 0;
 772		cur_p->app1 = 0;
 773		cur_p->app2 = 0;
 774		cur_p->app4 = 0;
 775		cur_p->skb = NULL;
 776		/* ensure our transmit path and device don't prematurely see status cleared */
 777		wmb();
 778		cur_p->cntrl = 0;
 779		cur_p->status = 0;
 780
 781		if (sizep)
 782			*sizep += status & XAXIDMA_BD_STS_ACTUAL_LEN_MASK;
 783	}
 784
 785	if (!force) {
 786		lp->tx_bd_ci += i;
 787		if (lp->tx_bd_ci >= lp->tx_bd_num)
 788			lp->tx_bd_ci %= lp->tx_bd_num;
 789	}
 790
 791	return packets;
 
 
 792}
 793
 794/**
 795 * axienet_check_tx_bd_space - Checks if a BD/group of BDs are currently busy
 796 * @lp:		Pointer to the axienet_local structure
 797 * @num_frag:	The number of BDs to check for
 798 *
 799 * Return: 0, on success
 800 *	    NETDEV_TX_BUSY, if any of the descriptors are not free
 801 *
 802 * This function is invoked before BDs are allocated and transmission starts.
 803 * This function returns 0 if a BD or group of BDs can be allocated for
 804 * transmission. If the BD or any of the BDs are not free the function
 805 * returns a busy status.
 806 */
 807static inline int axienet_check_tx_bd_space(struct axienet_local *lp,
 808					    int num_frag)
 809{
 810	struct axidma_bd *cur_p;
 811
 812	/* Ensure we see all descriptor updates from device or TX polling */
 813	rmb();
 814	cur_p = &lp->tx_bd_v[(READ_ONCE(lp->tx_bd_tail) + num_frag) %
 815			     lp->tx_bd_num];
 816	if (cur_p->cntrl)
 817		return NETDEV_TX_BUSY;
 818	return 0;
 819}
 820
 821/**
 822 * axienet_dma_tx_cb - DMA engine callback for TX channel.
 823 * @data:       Pointer to the axienet_local structure.
 824 * @result:     error reporting through dmaengine_result.
 825 * This function is called by dmaengine driver for TX channel to notify
 826 * that the transmit is done.
 827 */
 828static void axienet_dma_tx_cb(void *data, const struct dmaengine_result *result)
 829{
 830	struct skbuf_dma_descriptor *skbuf_dma;
 831	struct axienet_local *lp = data;
 832	struct netdev_queue *txq;
 833	int len;
 834
 835	skbuf_dma = axienet_get_tx_desc(lp, lp->tx_ring_tail++);
 836	len = skbuf_dma->skb->len;
 837	txq = skb_get_tx_queue(lp->ndev, skbuf_dma->skb);
 838	u64_stats_update_begin(&lp->tx_stat_sync);
 839	u64_stats_add(&lp->tx_bytes, len);
 840	u64_stats_add(&lp->tx_packets, 1);
 841	u64_stats_update_end(&lp->tx_stat_sync);
 842	dma_unmap_sg(lp->dev, skbuf_dma->sgl, skbuf_dma->sg_len, DMA_TO_DEVICE);
 843	dev_consume_skb_any(skbuf_dma->skb);
 844	netif_txq_completed_wake(txq, 1, len,
 845				 CIRC_SPACE(lp->tx_ring_head, lp->tx_ring_tail, TX_BD_NUM_MAX),
 846				 2 * MAX_SKB_FRAGS);
 847}
 848
 849/**
 850 * axienet_start_xmit_dmaengine - Starts the transmission.
 851 * @skb:        sk_buff pointer that contains data to be Txed.
 852 * @ndev:       Pointer to net_device structure.
 853 *
 854 * Return: NETDEV_TX_OK on success or any non space errors.
 855 *         NETDEV_TX_BUSY when free element in TX skb ring buffer
 856 *         is not available.
 857 *
 858 * This function is invoked to initiate transmission. The
 859 * function sets the skbs, register dma callback API and submit
 860 * the dma transaction.
 861 * Additionally if checksum offloading is supported,
 862 * it populates AXI Stream Control fields with appropriate values.
 863 */
 864static netdev_tx_t
 865axienet_start_xmit_dmaengine(struct sk_buff *skb, struct net_device *ndev)
 866{
 867	struct dma_async_tx_descriptor *dma_tx_desc = NULL;
 868	struct axienet_local *lp = netdev_priv(ndev);
 869	u32 app_metadata[DMA_NUM_APP_WORDS] = {0};
 870	struct skbuf_dma_descriptor *skbuf_dma;
 871	struct dma_device *dma_dev;
 872	struct netdev_queue *txq;
 873	u32 csum_start_off;
 874	u32 csum_index_off;
 875	int sg_len;
 876	int ret;
 877
 878	dma_dev = lp->tx_chan->device;
 879	sg_len = skb_shinfo(skb)->nr_frags + 1;
 880	if (CIRC_SPACE(lp->tx_ring_head, lp->tx_ring_tail, TX_BD_NUM_MAX) <= sg_len) {
 881		netif_stop_queue(ndev);
 882		if (net_ratelimit())
 883			netdev_warn(ndev, "TX ring unexpectedly full\n");
 884		return NETDEV_TX_BUSY;
 885	}
 886
 887	skbuf_dma = axienet_get_tx_desc(lp, lp->tx_ring_head);
 888	if (!skbuf_dma)
 889		goto xmit_error_drop_skb;
 890
 891	lp->tx_ring_head++;
 892	sg_init_table(skbuf_dma->sgl, sg_len);
 893	ret = skb_to_sgvec(skb, skbuf_dma->sgl, 0, skb->len);
 894	if (ret < 0)
 895		goto xmit_error_drop_skb;
 896
 897	ret = dma_map_sg(lp->dev, skbuf_dma->sgl, sg_len, DMA_TO_DEVICE);
 898	if (!ret)
 899		goto xmit_error_drop_skb;
 900
 901	/* Fill up app fields for checksum */
 902	if (skb->ip_summed == CHECKSUM_PARTIAL) {
 903		if (lp->features & XAE_FEATURE_FULL_TX_CSUM) {
 904			/* Tx Full Checksum Offload Enabled */
 905			app_metadata[0] |= 2;
 906		} else if (lp->features & XAE_FEATURE_PARTIAL_TX_CSUM) {
 907			csum_start_off = skb_transport_offset(skb);
 908			csum_index_off = csum_start_off + skb->csum_offset;
 909			/* Tx Partial Checksum Offload Enabled */
 910			app_metadata[0] |= 1;
 911			app_metadata[1] = (csum_start_off << 16) | csum_index_off;
 912		}
 913	} else if (skb->ip_summed == CHECKSUM_UNNECESSARY) {
 914		app_metadata[0] |= 2; /* Tx Full Checksum Offload Enabled */
 915	}
 916
 917	dma_tx_desc = dma_dev->device_prep_slave_sg(lp->tx_chan, skbuf_dma->sgl,
 918			sg_len, DMA_MEM_TO_DEV,
 919			DMA_PREP_INTERRUPT, (void *)app_metadata);
 920	if (!dma_tx_desc)
 921		goto xmit_error_unmap_sg;
 922
 923	skbuf_dma->skb = skb;
 924	skbuf_dma->sg_len = sg_len;
 925	dma_tx_desc->callback_param = lp;
 926	dma_tx_desc->callback_result = axienet_dma_tx_cb;
 927	txq = skb_get_tx_queue(lp->ndev, skb);
 928	netdev_tx_sent_queue(txq, skb->len);
 929	netif_txq_maybe_stop(txq, CIRC_SPACE(lp->tx_ring_head, lp->tx_ring_tail, TX_BD_NUM_MAX),
 930			     MAX_SKB_FRAGS + 1, 2 * MAX_SKB_FRAGS);
 931
 932	dmaengine_submit(dma_tx_desc);
 933	dma_async_issue_pending(lp->tx_chan);
 934	return NETDEV_TX_OK;
 935
 936xmit_error_unmap_sg:
 937	dma_unmap_sg(lp->dev, skbuf_dma->sgl, sg_len, DMA_TO_DEVICE);
 938xmit_error_drop_skb:
 939	dev_kfree_skb_any(skb);
 940	return NETDEV_TX_OK;
 941}
 942
 943/**
 944 * axienet_tx_poll - Invoked once a transmit is completed by the
 945 * Axi DMA Tx channel.
 946 * @napi:	Pointer to NAPI structure.
 947 * @budget:	Max number of TX packets to process.
 948 *
 949 * Return: Number of TX packets processed.
 950 *
 951 * This function is invoked from the NAPI processing to notify the completion
 952 * of transmit operation. It clears fields in the corresponding Tx BDs and
 953 * unmaps the corresponding buffer so that CPU can regain ownership of the
 954 * buffer. It finally invokes "netif_wake_queue" to restart transmission if
 955 * required.
 956 */
 957static int axienet_tx_poll(struct napi_struct *napi, int budget)
 958{
 959	struct axienet_local *lp = container_of(napi, struct axienet_local, napi_tx);
 960	struct net_device *ndev = lp->ndev;
 961	u32 size = 0;
 962	int packets;
 963
 964	packets = axienet_free_tx_chain(lp, lp->tx_bd_ci, lp->tx_bd_num, false,
 965					&size, budget);
 966
 967	if (packets) {
 968		u64_stats_update_begin(&lp->tx_stat_sync);
 969		u64_stats_add(&lp->tx_packets, packets);
 970		u64_stats_add(&lp->tx_bytes, size);
 971		u64_stats_update_end(&lp->tx_stat_sync);
 972
 973		/* Matches barrier in axienet_start_xmit */
 974		smp_mb();
 975
 976		if (!axienet_check_tx_bd_space(lp, MAX_SKB_FRAGS + 1))
 977			netif_wake_queue(ndev);
 978	}
 979
 980	if (packets < budget && napi_complete_done(napi, packets)) {
 981		/* Re-enable TX completion interrupts. This should
 982		 * cause an immediate interrupt if any TX packets are
 983		 * already pending.
 984		 */
 985		axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, lp->tx_dma_cr);
 986	}
 987	return packets;
 988}
 989
 990/**
 991 * axienet_start_xmit - Starts the transmission.
 992 * @skb:	sk_buff pointer that contains data to be Txed.
 993 * @ndev:	Pointer to net_device structure.
 994 *
 995 * Return: NETDEV_TX_OK, on success
 996 *	    NETDEV_TX_BUSY, if any of the descriptors are not free
 997 *
 998 * This function is invoked from upper layers to initiate transmission. The
 999 * function uses the next available free BDs and populates their fields to
1000 * start the transmission. Additionally if checksum offloading is supported,
1001 * it populates AXI Stream Control fields with appropriate values.
1002 */
1003static netdev_tx_t
1004axienet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
1005{
1006	u32 ii;
1007	u32 num_frag;
1008	u32 csum_start_off;
1009	u32 csum_index_off;
1010	skb_frag_t *frag;
1011	dma_addr_t tail_p, phys;
1012	u32 orig_tail_ptr, new_tail_ptr;
1013	struct axienet_local *lp = netdev_priv(ndev);
1014	struct axidma_bd *cur_p;
1015
1016	orig_tail_ptr = lp->tx_bd_tail;
1017	new_tail_ptr = orig_tail_ptr;
1018
1019	num_frag = skb_shinfo(skb)->nr_frags;
1020	cur_p = &lp->tx_bd_v[orig_tail_ptr];
1021
1022	if (axienet_check_tx_bd_space(lp, num_frag + 1)) {
1023		/* Should not happen as last start_xmit call should have
1024		 * checked for sufficient space and queue should only be
1025		 * woken when sufficient space is available.
1026		 */
1027		netif_stop_queue(ndev);
1028		if (net_ratelimit())
1029			netdev_warn(ndev, "TX ring unexpectedly full\n");
1030		return NETDEV_TX_BUSY;
1031	}
1032
1033	if (skb->ip_summed == CHECKSUM_PARTIAL) {
1034		if (lp->features & XAE_FEATURE_FULL_TX_CSUM) {
1035			/* Tx Full Checksum Offload Enabled */
1036			cur_p->app0 |= 2;
1037		} else if (lp->features & XAE_FEATURE_PARTIAL_TX_CSUM) {
1038			csum_start_off = skb_transport_offset(skb);
1039			csum_index_off = csum_start_off + skb->csum_offset;
1040			/* Tx Partial Checksum Offload Enabled */
1041			cur_p->app0 |= 1;
1042			cur_p->app1 = (csum_start_off << 16) | csum_index_off;
1043		}
1044	} else if (skb->ip_summed == CHECKSUM_UNNECESSARY) {
1045		cur_p->app0 |= 2; /* Tx Full Checksum Offload Enabled */
1046	}
1047
1048	phys = dma_map_single(lp->dev, skb->data,
1049			      skb_headlen(skb), DMA_TO_DEVICE);
1050	if (unlikely(dma_mapping_error(lp->dev, phys))) {
1051		if (net_ratelimit())
1052			netdev_err(ndev, "TX DMA mapping error\n");
1053		ndev->stats.tx_dropped++;
1054		dev_kfree_skb_any(skb);
1055		return NETDEV_TX_OK;
1056	}
1057	desc_set_phys_addr(lp, phys, cur_p);
1058	cur_p->cntrl = skb_headlen(skb) | XAXIDMA_BD_CTRL_TXSOF_MASK;
 
 
1059
1060	for (ii = 0; ii < num_frag; ii++) {
1061		if (++new_tail_ptr >= lp->tx_bd_num)
1062			new_tail_ptr = 0;
1063		cur_p = &lp->tx_bd_v[new_tail_ptr];
1064		frag = &skb_shinfo(skb)->frags[ii];
1065		phys = dma_map_single(lp->dev,
1066				      skb_frag_address(frag),
1067				      skb_frag_size(frag),
1068				      DMA_TO_DEVICE);
1069		if (unlikely(dma_mapping_error(lp->dev, phys))) {
1070			if (net_ratelimit())
1071				netdev_err(ndev, "TX DMA mapping error\n");
1072			ndev->stats.tx_dropped++;
1073			axienet_free_tx_chain(lp, orig_tail_ptr, ii + 1,
1074					      true, NULL, 0);
1075			dev_kfree_skb_any(skb);
1076			return NETDEV_TX_OK;
1077		}
1078		desc_set_phys_addr(lp, phys, cur_p);
1079		cur_p->cntrl = skb_frag_size(frag);
1080	}
1081
1082	cur_p->cntrl |= XAXIDMA_BD_CTRL_TXEOF_MASK;
1083	cur_p->skb = skb;
1084
1085	tail_p = lp->tx_bd_p + sizeof(*lp->tx_bd_v) * new_tail_ptr;
1086	if (++new_tail_ptr >= lp->tx_bd_num)
1087		new_tail_ptr = 0;
1088	WRITE_ONCE(lp->tx_bd_tail, new_tail_ptr);
1089
 
1090	/* Start the transfer */
1091	axienet_dma_out_addr(lp, XAXIDMA_TX_TDESC_OFFSET, tail_p);
1092
1093	/* Stop queue if next transmit may not have space */
1094	if (axienet_check_tx_bd_space(lp, MAX_SKB_FRAGS + 1)) {
1095		netif_stop_queue(ndev);
1096
1097		/* Matches barrier in axienet_tx_poll */
1098		smp_mb();
1099
1100		/* Space might have just been freed - check again */
1101		if (!axienet_check_tx_bd_space(lp, MAX_SKB_FRAGS + 1))
1102			netif_wake_queue(ndev);
1103	}
1104
1105	return NETDEV_TX_OK;
1106}
1107
1108/**
1109 * axienet_dma_rx_cb - DMA engine callback for RX channel.
1110 * @data:       Pointer to the skbuf_dma_descriptor structure.
1111 * @result:     error reporting through dmaengine_result.
1112 * This function is called by dmaengine driver for RX channel to notify
1113 * that the packet is received.
1114 */
1115static void axienet_dma_rx_cb(void *data, const struct dmaengine_result *result)
1116{
1117	struct skbuf_dma_descriptor *skbuf_dma;
1118	size_t meta_len, meta_max_len, rx_len;
1119	struct axienet_local *lp = data;
1120	struct sk_buff *skb;
1121	u32 *app_metadata;
1122
1123	skbuf_dma = axienet_get_rx_desc(lp, lp->rx_ring_tail++);
1124	skb = skbuf_dma->skb;
1125	app_metadata = dmaengine_desc_get_metadata_ptr(skbuf_dma->desc, &meta_len,
1126						       &meta_max_len);
1127	dma_unmap_single(lp->dev, skbuf_dma->dma_address, lp->max_frm_size,
1128			 DMA_FROM_DEVICE);
1129	/* TODO: Derive app word index programmatically */
1130	rx_len = (app_metadata[LEN_APP] & 0xFFFF);
1131	skb_put(skb, rx_len);
1132	skb->protocol = eth_type_trans(skb, lp->ndev);
1133	skb->ip_summed = CHECKSUM_NONE;
1134
1135	__netif_rx(skb);
1136	u64_stats_update_begin(&lp->rx_stat_sync);
1137	u64_stats_add(&lp->rx_packets, 1);
1138	u64_stats_add(&lp->rx_bytes, rx_len);
1139	u64_stats_update_end(&lp->rx_stat_sync);
1140	axienet_rx_submit_desc(lp->ndev);
1141	dma_async_issue_pending(lp->rx_chan);
1142}
1143
1144/**
1145 * axienet_rx_poll - Triggered by RX ISR to complete the BD processing.
1146 * @napi:	Pointer to NAPI structure.
1147 * @budget:	Max number of RX packets to process.
1148 *
1149 * Return: Number of RX packets processed.
 
 
1150 */
1151static int axienet_rx_poll(struct napi_struct *napi, int budget)
1152{
1153	u32 length;
1154	u32 csumstatus;
1155	u32 size = 0;
1156	int packets = 0;
1157	dma_addr_t tail_p = 0;
1158	struct axidma_bd *cur_p;
1159	struct sk_buff *skb, *new_skb;
1160	struct axienet_local *lp = container_of(napi, struct axienet_local, napi_rx);
1161
 
1162	cur_p = &lp->rx_bd_v[lp->rx_bd_ci];
1163
1164	while (packets < budget && (cur_p->status & XAXIDMA_BD_STS_COMPLETE_MASK)) {
1165		dma_addr_t phys;
1166
1167		/* Ensure we see complete descriptor update */
1168		dma_rmb();
1169
1170		skb = cur_p->skb;
1171		cur_p->skb = NULL;
1172
1173		/* skb could be NULL if a previous pass already received the
1174		 * packet for this slot in the ring, but failed to refill it
1175		 * with a newly allocated buffer. In this case, don't try to
1176		 * receive it again.
1177		 */
1178		if (likely(skb)) {
1179			length = cur_p->app4 & 0x0000FFFF;
1180
1181			phys = desc_get_phys_addr(lp, cur_p);
1182			dma_unmap_single(lp->dev, phys, lp->max_frm_size,
1183					 DMA_FROM_DEVICE);
1184
1185			skb_put(skb, length);
1186			skb->protocol = eth_type_trans(skb, lp->ndev);
1187			/*skb_checksum_none_assert(skb);*/
1188			skb->ip_summed = CHECKSUM_NONE;
1189
1190			/* if we're doing Rx csum offload, set it up */
1191			if (lp->features & XAE_FEATURE_FULL_RX_CSUM) {
1192				csumstatus = (cur_p->app2 &
1193					      XAE_FULL_CSUM_STATUS_MASK) >> 3;
1194				if (csumstatus == XAE_IP_TCP_CSUM_VALIDATED ||
1195				    csumstatus == XAE_IP_UDP_CSUM_VALIDATED) {
1196					skb->ip_summed = CHECKSUM_UNNECESSARY;
1197				}
1198			} else if (lp->features & XAE_FEATURE_PARTIAL_RX_CSUM) {
1199				skb->csum = be32_to_cpu(cur_p->app3 & 0xFFFF);
1200				skb->ip_summed = CHECKSUM_COMPLETE;
1201			}
 
 
 
 
 
 
1202
1203			napi_gro_receive(napi, skb);
1204
1205			size += length;
1206			packets++;
1207		}
1208
1209		new_skb = napi_alloc_skb(napi, lp->max_frm_size);
1210		if (!new_skb)
1211			break;
1212
1213		phys = dma_map_single(lp->dev, new_skb->data,
1214				      lp->max_frm_size,
1215				      DMA_FROM_DEVICE);
1216		if (unlikely(dma_mapping_error(lp->dev, phys))) {
1217			if (net_ratelimit())
1218				netdev_err(lp->ndev, "RX DMA mapping error\n");
1219			dev_kfree_skb(new_skb);
1220			break;
1221		}
1222		desc_set_phys_addr(lp, phys, cur_p);
1223
 
 
 
1224		cur_p->cntrl = lp->max_frm_size;
1225		cur_p->status = 0;
1226		cur_p->skb = new_skb;
1227
1228		/* Only update tail_p to mark this slot as usable after it has
1229		 * been successfully refilled.
1230		 */
1231		tail_p = lp->rx_bd_p + sizeof(*lp->rx_bd_v) * lp->rx_bd_ci;
1232
1233		if (++lp->rx_bd_ci >= lp->rx_bd_num)
1234			lp->rx_bd_ci = 0;
1235		cur_p = &lp->rx_bd_v[lp->rx_bd_ci];
1236	}
1237
1238	u64_stats_update_begin(&lp->rx_stat_sync);
1239	u64_stats_add(&lp->rx_packets, packets);
1240	u64_stats_add(&lp->rx_bytes, size);
1241	u64_stats_update_end(&lp->rx_stat_sync);
1242
1243	if (tail_p)
1244		axienet_dma_out_addr(lp, XAXIDMA_RX_TDESC_OFFSET, tail_p);
1245
1246	if (packets < budget && napi_complete_done(napi, packets)) {
1247		/* Re-enable RX completion interrupts. This should
1248		 * cause an immediate interrupt if any RX packets are
1249		 * already pending.
1250		 */
1251		axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, lp->rx_dma_cr);
1252	}
1253	return packets;
1254}
1255
1256/**
1257 * axienet_tx_irq - Tx Done Isr.
1258 * @irq:	irq number
1259 * @_ndev:	net_device pointer
1260 *
1261 * Return: IRQ_HANDLED if device generated a TX interrupt, IRQ_NONE otherwise.
1262 *
1263 * This is the Axi DMA Tx done Isr. It invokes NAPI polling to complete the
1264 * TX BD processing.
1265 */
1266static irqreturn_t axienet_tx_irq(int irq, void *_ndev)
1267{
 
1268	unsigned int status;
1269	struct net_device *ndev = _ndev;
1270	struct axienet_local *lp = netdev_priv(ndev);
1271
1272	status = axienet_dma_in32(lp, XAXIDMA_TX_SR_OFFSET);
1273
 
 
 
1274	if (!(status & XAXIDMA_IRQ_ALL_MASK))
1275		return IRQ_NONE;
1276
1277	axienet_dma_out32(lp, XAXIDMA_TX_SR_OFFSET, status);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1278
1279	if (unlikely(status & XAXIDMA_IRQ_ERROR_MASK)) {
1280		netdev_err(ndev, "DMA Tx error 0x%x\n", status);
1281		netdev_err(ndev, "Current BD is at: 0x%x%08x\n",
1282			   (lp->tx_bd_v[lp->tx_bd_ci]).phys_msb,
1283			   (lp->tx_bd_v[lp->tx_bd_ci]).phys);
1284		schedule_work(&lp->dma_err_task);
1285	} else {
1286		/* Disable further TX completion interrupts and schedule
1287		 * NAPI to handle the completions.
1288		 */
1289		u32 cr = lp->tx_dma_cr;
1290
1291		cr &= ~(XAXIDMA_IRQ_IOC_MASK | XAXIDMA_IRQ_DELAY_MASK);
1292		if (napi_schedule_prep(&lp->napi_tx)) {
1293			axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, cr);
1294			__napi_schedule(&lp->napi_tx);
1295		}
1296	}
1297
 
1298	return IRQ_HANDLED;
1299}
1300
1301/**
1302 * axienet_rx_irq - Rx Isr.
1303 * @irq:	irq number
1304 * @_ndev:	net_device pointer
1305 *
1306 * Return: IRQ_HANDLED if device generated a RX interrupt, IRQ_NONE otherwise.
1307 *
1308 * This is the Axi DMA Rx Isr. It invokes NAPI polling to complete the RX BD
1309 * processing.
1310 */
1311static irqreturn_t axienet_rx_irq(int irq, void *_ndev)
1312{
 
1313	unsigned int status;
1314	struct net_device *ndev = _ndev;
1315	struct axienet_local *lp = netdev_priv(ndev);
1316
1317	status = axienet_dma_in32(lp, XAXIDMA_RX_SR_OFFSET);
1318
 
 
 
1319	if (!(status & XAXIDMA_IRQ_ALL_MASK))
1320		return IRQ_NONE;
1321
1322	axienet_dma_out32(lp, XAXIDMA_RX_SR_OFFSET, status);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1323
1324	if (unlikely(status & XAXIDMA_IRQ_ERROR_MASK)) {
1325		netdev_err(ndev, "DMA Rx error 0x%x\n", status);
1326		netdev_err(ndev, "Current BD is at: 0x%x%08x\n",
1327			   (lp->rx_bd_v[lp->rx_bd_ci]).phys_msb,
1328			   (lp->rx_bd_v[lp->rx_bd_ci]).phys);
1329		schedule_work(&lp->dma_err_task);
1330	} else {
1331		/* Disable further RX completion interrupts and schedule
1332		 * NAPI receive.
1333		 */
1334		u32 cr = lp->rx_dma_cr;
1335
1336		cr &= ~(XAXIDMA_IRQ_IOC_MASK | XAXIDMA_IRQ_DELAY_MASK);
1337		if (napi_schedule_prep(&lp->napi_rx)) {
1338			axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, cr);
1339			__napi_schedule(&lp->napi_rx);
1340		}
1341	}
1342
1343	return IRQ_HANDLED;
1344}
1345
1346/**
1347 * axienet_eth_irq - Ethernet core Isr.
1348 * @irq:	irq number
1349 * @_ndev:	net_device pointer
1350 *
1351 * Return: IRQ_HANDLED if device generated a core interrupt, IRQ_NONE otherwise.
1352 *
1353 * Handle miscellaneous conditions indicated by Ethernet core IRQ.
1354 */
1355static irqreturn_t axienet_eth_irq(int irq, void *_ndev)
1356{
1357	struct net_device *ndev = _ndev;
1358	struct axienet_local *lp = netdev_priv(ndev);
1359	unsigned int pending;
1360
1361	pending = axienet_ior(lp, XAE_IP_OFFSET);
1362	if (!pending)
1363		return IRQ_NONE;
1364
1365	if (pending & XAE_INT_RXFIFOOVR_MASK)
1366		ndev->stats.rx_missed_errors++;
1367
1368	if (pending & XAE_INT_RXRJECT_MASK)
1369		ndev->stats.rx_dropped++;
1370
1371	axienet_iow(lp, XAE_IS_OFFSET, pending);
1372	return IRQ_HANDLED;
1373}
1374
1375static void axienet_dma_err_handler(struct work_struct *work);
1376
1377/**
1378 * axienet_rx_submit_desc - Submit the rx descriptors to dmaengine.
1379 * allocate skbuff, map the scatterlist and obtain a descriptor
1380 * and then add the callback information and submit descriptor.
1381 *
1382 * @ndev:	net_device pointer
1383 *
1384 */
1385static void axienet_rx_submit_desc(struct net_device *ndev)
1386{
1387	struct dma_async_tx_descriptor *dma_rx_desc = NULL;
1388	struct axienet_local *lp = netdev_priv(ndev);
1389	struct skbuf_dma_descriptor *skbuf_dma;
1390	struct sk_buff *skb;
1391	dma_addr_t addr;
1392
1393	skbuf_dma = axienet_get_rx_desc(lp, lp->rx_ring_head);
1394	if (!skbuf_dma)
1395		return;
1396
1397	lp->rx_ring_head++;
1398	skb = netdev_alloc_skb(ndev, lp->max_frm_size);
1399	if (!skb)
1400		return;
1401
1402	sg_init_table(skbuf_dma->sgl, 1);
1403	addr = dma_map_single(lp->dev, skb->data, lp->max_frm_size, DMA_FROM_DEVICE);
1404	if (unlikely(dma_mapping_error(lp->dev, addr))) {
1405		if (net_ratelimit())
1406			netdev_err(ndev, "DMA mapping error\n");
1407		goto rx_submit_err_free_skb;
1408	}
1409	sg_dma_address(skbuf_dma->sgl) = addr;
1410	sg_dma_len(skbuf_dma->sgl) = lp->max_frm_size;
1411	dma_rx_desc = dmaengine_prep_slave_sg(lp->rx_chan, skbuf_dma->sgl,
1412					      1, DMA_DEV_TO_MEM,
1413					      DMA_PREP_INTERRUPT);
1414	if (!dma_rx_desc)
1415		goto rx_submit_err_unmap_skb;
1416
1417	skbuf_dma->skb = skb;
1418	skbuf_dma->dma_address = sg_dma_address(skbuf_dma->sgl);
1419	skbuf_dma->desc = dma_rx_desc;
1420	dma_rx_desc->callback_param = lp;
1421	dma_rx_desc->callback_result = axienet_dma_rx_cb;
1422	dmaengine_submit(dma_rx_desc);
1423
1424	return;
1425
1426rx_submit_err_unmap_skb:
1427	dma_unmap_single(lp->dev, addr, lp->max_frm_size, DMA_FROM_DEVICE);
1428rx_submit_err_free_skb:
1429	dev_kfree_skb(skb);
1430}
1431
1432/**
1433 * axienet_init_dmaengine - init the dmaengine code.
1434 * @ndev:       Pointer to net_device structure
1435 *
1436 * Return: 0, on success.
1437 *          non-zero error value on failure
 
1438 *
1439 * This is the dmaengine initialization code.
 
 
 
1440 */
1441static int axienet_init_dmaengine(struct net_device *ndev)
1442{
 
1443	struct axienet_local *lp = netdev_priv(ndev);
1444	struct skbuf_dma_descriptor *skbuf_dma;
1445	int i, ret;
1446
1447	lp->tx_chan = dma_request_chan(lp->dev, "tx_chan0");
1448	if (IS_ERR(lp->tx_chan)) {
1449		dev_err(lp->dev, "No Ethernet DMA (TX) channel found\n");
1450		return PTR_ERR(lp->tx_chan);
1451	}
1452
1453	lp->rx_chan = dma_request_chan(lp->dev, "rx_chan0");
1454	if (IS_ERR(lp->rx_chan)) {
1455		ret = PTR_ERR(lp->rx_chan);
1456		dev_err(lp->dev, "No Ethernet DMA (RX) channel found\n");
1457		goto err_dma_release_tx;
1458	}
1459
1460	lp->tx_ring_tail = 0;
1461	lp->tx_ring_head = 0;
1462	lp->rx_ring_tail = 0;
1463	lp->rx_ring_head = 0;
1464	lp->tx_skb_ring = kcalloc(TX_BD_NUM_MAX, sizeof(*lp->tx_skb_ring),
1465				  GFP_KERNEL);
1466	if (!lp->tx_skb_ring) {
1467		ret = -ENOMEM;
1468		goto err_dma_release_rx;
1469	}
1470	for (i = 0; i < TX_BD_NUM_MAX; i++) {
1471		skbuf_dma = kzalloc(sizeof(*skbuf_dma), GFP_KERNEL);
1472		if (!skbuf_dma) {
1473			ret = -ENOMEM;
1474			goto err_free_tx_skb_ring;
1475		}
1476		lp->tx_skb_ring[i] = skbuf_dma;
1477	}
1478
1479	lp->rx_skb_ring = kcalloc(RX_BUF_NUM_DEFAULT, sizeof(*lp->rx_skb_ring),
1480				  GFP_KERNEL);
1481	if (!lp->rx_skb_ring) {
1482		ret = -ENOMEM;
1483		goto err_free_tx_skb_ring;
 
 
 
 
1484	}
1485	for (i = 0; i < RX_BUF_NUM_DEFAULT; i++) {
1486		skbuf_dma = kzalloc(sizeof(*skbuf_dma), GFP_KERNEL);
1487		if (!skbuf_dma) {
1488			ret = -ENOMEM;
1489			goto err_free_rx_skb_ring;
1490		}
1491		lp->rx_skb_ring[i] = skbuf_dma;
1492	}
1493	/* TODO: Instead of BD_NUM_DEFAULT use runtime support */
1494	for (i = 0; i < RX_BUF_NUM_DEFAULT; i++)
1495		axienet_rx_submit_desc(ndev);
1496	dma_async_issue_pending(lp->rx_chan);
1497
1498	return 0;
1499
1500err_free_rx_skb_ring:
1501	for (i = 0; i < RX_BUF_NUM_DEFAULT; i++)
1502		kfree(lp->rx_skb_ring[i]);
1503	kfree(lp->rx_skb_ring);
1504err_free_tx_skb_ring:
1505	for (i = 0; i < TX_BD_NUM_MAX; i++)
1506		kfree(lp->tx_skb_ring[i]);
1507	kfree(lp->tx_skb_ring);
1508err_dma_release_rx:
1509	dma_release_channel(lp->rx_chan);
1510err_dma_release_tx:
1511	dma_release_channel(lp->tx_chan);
1512	return ret;
1513}
1514
1515/**
1516 * axienet_init_legacy_dma - init the dma legacy code.
1517 * @ndev:       Pointer to net_device structure
1518 *
1519 * Return: 0, on success.
1520 *          non-zero error value on failure
1521 *
1522 * This is the dma  initialization code. It also allocates interrupt
1523 * service routines, enables the interrupt lines and ISR handling.
1524 *
1525 */
1526static int axienet_init_legacy_dma(struct net_device *ndev)
1527{
1528	int ret;
1529	struct axienet_local *lp = netdev_priv(ndev);
1530
1531	/* Enable worker thread for Axi DMA error handling */
1532	lp->stopping = false;
1533	INIT_WORK(&lp->dma_err_task, axienet_dma_err_handler);
1534
1535	napi_enable(&lp->napi_rx);
1536	napi_enable(&lp->napi_tx);
1537
1538	/* Enable interrupts for Axi DMA Tx */
1539	ret = request_irq(lp->tx_irq, axienet_tx_irq, IRQF_SHARED,
1540			  ndev->name, ndev);
1541	if (ret)
1542		goto err_tx_irq;
1543	/* Enable interrupts for Axi DMA Rx */
1544	ret = request_irq(lp->rx_irq, axienet_rx_irq, IRQF_SHARED,
1545			  ndev->name, ndev);
1546	if (ret)
1547		goto err_rx_irq;
1548	/* Enable interrupts for Axi Ethernet core (if defined) */
1549	if (lp->eth_irq > 0) {
1550		ret = request_irq(lp->eth_irq, axienet_eth_irq, IRQF_SHARED,
1551				  ndev->name, ndev);
1552		if (ret)
1553			goto err_eth_irq;
1554	}
1555
1556	return 0;
1557
1558err_eth_irq:
1559	free_irq(lp->rx_irq, ndev);
1560err_rx_irq:
1561	free_irq(lp->tx_irq, ndev);
1562err_tx_irq:
1563	napi_disable(&lp->napi_tx);
1564	napi_disable(&lp->napi_rx);
1565	cancel_work_sync(&lp->dma_err_task);
 
1566	dev_err(lp->dev, "request_irq() failed\n");
1567	return ret;
1568}
1569
1570/**
1571 * axienet_open - Driver open routine.
1572 * @ndev:	Pointer to net_device structure
1573 *
1574 * Return: 0, on success.
1575 *	    non-zero error value on failure
1576 *
1577 * This is the driver open routine. It calls phylink_start to start the
1578 * PHY device.
1579 * It also allocates interrupt service routines, enables the interrupt lines
1580 * and ISR handling. Axi Ethernet core is reset through Axi DMA core. Buffer
1581 * descriptors are initialized.
1582 */
1583static int axienet_open(struct net_device *ndev)
1584{
1585	int ret;
1586	struct axienet_local *lp = netdev_priv(ndev);
1587
1588	/* When we do an Axi Ethernet reset, it resets the complete core
1589	 * including the MDIO. MDIO must be disabled before resetting.
1590	 * Hold MDIO bus lock to avoid MDIO accesses during the reset.
1591	 */
1592	axienet_lock_mii(lp);
1593	ret = axienet_device_reset(ndev);
1594	axienet_unlock_mii(lp);
1595
1596	ret = phylink_of_phy_connect(lp->phylink, lp->dev->of_node, 0);
1597	if (ret) {
1598		dev_err(lp->dev, "phylink_of_phy_connect() failed: %d\n", ret);
1599		return ret;
1600	}
1601
1602	phylink_start(lp->phylink);
1603
1604	/* Start the statistics refresh work */
1605	schedule_delayed_work(&lp->stats_work, 0);
1606
1607	if (lp->use_dmaengine) {
1608		/* Enable interrupts for Axi Ethernet core (if defined) */
1609		if (lp->eth_irq > 0) {
1610			ret = request_irq(lp->eth_irq, axienet_eth_irq, IRQF_SHARED,
1611					  ndev->name, ndev);
1612			if (ret)
1613				goto err_phy;
1614		}
1615
1616		ret = axienet_init_dmaengine(ndev);
1617		if (ret < 0)
1618			goto err_free_eth_irq;
1619	} else {
1620		ret = axienet_init_legacy_dma(ndev);
1621		if (ret)
1622			goto err_phy;
1623	}
1624
1625	return 0;
1626
1627err_free_eth_irq:
1628	if (lp->eth_irq > 0)
1629		free_irq(lp->eth_irq, ndev);
1630err_phy:
1631	cancel_delayed_work_sync(&lp->stats_work);
1632	phylink_stop(lp->phylink);
1633	phylink_disconnect_phy(lp->phylink);
1634	return ret;
1635}
1636
1637/**
1638 * axienet_stop - Driver stop routine.
1639 * @ndev:	Pointer to net_device structure
1640 *
1641 * Return: 0, on success.
1642 *
1643 * This is the driver stop routine. It calls phylink_disconnect to stop the PHY
1644 * device. It also removes the interrupt handlers and disables the interrupts.
1645 * The Axi DMA Tx/Rx BDs are released.
1646 */
1647static int axienet_stop(struct net_device *ndev)
1648{
 
1649	struct axienet_local *lp = netdev_priv(ndev);
1650	int i;
1651
1652	if (!lp->use_dmaengine) {
1653		WRITE_ONCE(lp->stopping, true);
1654		flush_work(&lp->dma_err_task);
1655
1656		napi_disable(&lp->napi_tx);
1657		napi_disable(&lp->napi_rx);
1658	}
1659
1660	cancel_delayed_work_sync(&lp->stats_work);
1661
1662	phylink_stop(lp->phylink);
1663	phylink_disconnect_phy(lp->phylink);
1664
 
 
 
 
 
 
1665	axienet_setoptions(ndev, lp->options &
1666			   ~(XAE_OPTION_TXEN | XAE_OPTION_RXEN));
1667
1668	if (!lp->use_dmaengine) {
1669		axienet_dma_stop(lp);
1670		cancel_work_sync(&lp->dma_err_task);
1671		free_irq(lp->tx_irq, ndev);
1672		free_irq(lp->rx_irq, ndev);
1673		axienet_dma_bd_release(ndev);
1674	} else {
1675		dmaengine_terminate_sync(lp->tx_chan);
1676		dmaengine_synchronize(lp->tx_chan);
1677		dmaengine_terminate_sync(lp->rx_chan);
1678		dmaengine_synchronize(lp->rx_chan);
1679
1680		for (i = 0; i < TX_BD_NUM_MAX; i++)
1681			kfree(lp->tx_skb_ring[i]);
1682		kfree(lp->tx_skb_ring);
1683		for (i = 0; i < RX_BUF_NUM_DEFAULT; i++)
1684			kfree(lp->rx_skb_ring[i]);
1685		kfree(lp->rx_skb_ring);
1686
1687		dma_release_channel(lp->rx_chan);
1688		dma_release_channel(lp->tx_chan);
1689	}
1690
1691	axienet_iow(lp, XAE_IE_OFFSET, 0);
 
 
1692
1693	if (lp->eth_irq > 0)
1694		free_irq(lp->eth_irq, ndev);
1695	return 0;
1696}
1697
1698/**
1699 * axienet_change_mtu - Driver change mtu routine.
1700 * @ndev:	Pointer to net_device structure
1701 * @new_mtu:	New mtu value to be applied
1702 *
1703 * Return: Always returns 0 (success).
1704 *
1705 * This is the change mtu driver routine. It checks if the Axi Ethernet
1706 * hardware supports jumbo frames before changing the mtu. This can be
1707 * called only when the device is not up.
1708 */
1709static int axienet_change_mtu(struct net_device *ndev, int new_mtu)
1710{
1711	struct axienet_local *lp = netdev_priv(ndev);
1712
1713	if (netif_running(ndev))
1714		return -EBUSY;
1715
1716	if ((new_mtu + VLAN_ETH_HLEN +
1717		XAE_TRL_SIZE) > lp->rxmem)
1718		return -EINVAL;
1719
1720	WRITE_ONCE(ndev->mtu, new_mtu);
 
 
 
1721
1722	return 0;
1723}
1724
1725#ifdef CONFIG_NET_POLL_CONTROLLER
1726/**
1727 * axienet_poll_controller - Axi Ethernet poll mechanism.
1728 * @ndev:	Pointer to net_device structure
1729 *
1730 * This implements Rx/Tx ISR poll mechanisms. The interrupts are disabled prior
1731 * to polling the ISRs and are enabled back after the polling is done.
1732 */
1733static void axienet_poll_controller(struct net_device *ndev)
1734{
1735	struct axienet_local *lp = netdev_priv(ndev);
1736
1737	disable_irq(lp->tx_irq);
1738	disable_irq(lp->rx_irq);
1739	axienet_rx_irq(lp->tx_irq, ndev);
1740	axienet_tx_irq(lp->rx_irq, ndev);
1741	enable_irq(lp->tx_irq);
1742	enable_irq(lp->rx_irq);
1743}
1744#endif
1745
1746static int axienet_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
1747{
1748	struct axienet_local *lp = netdev_priv(dev);
1749
1750	if (!netif_running(dev))
1751		return -EINVAL;
1752
1753	return phylink_mii_ioctl(lp->phylink, rq, cmd);
1754}
1755
1756static void
1757axienet_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
1758{
1759	struct axienet_local *lp = netdev_priv(dev);
1760	unsigned int start;
1761
1762	netdev_stats_to_stats64(stats, &dev->stats);
1763
1764	do {
1765		start = u64_stats_fetch_begin(&lp->rx_stat_sync);
1766		stats->rx_packets = u64_stats_read(&lp->rx_packets);
1767		stats->rx_bytes = u64_stats_read(&lp->rx_bytes);
1768	} while (u64_stats_fetch_retry(&lp->rx_stat_sync, start));
1769
1770	do {
1771		start = u64_stats_fetch_begin(&lp->tx_stat_sync);
1772		stats->tx_packets = u64_stats_read(&lp->tx_packets);
1773		stats->tx_bytes = u64_stats_read(&lp->tx_bytes);
1774	} while (u64_stats_fetch_retry(&lp->tx_stat_sync, start));
1775
1776	if (!(lp->features & XAE_FEATURE_STATS))
1777		return;
1778
1779	do {
1780		start = read_seqcount_begin(&lp->hw_stats_seqcount);
1781		stats->rx_length_errors =
1782			axienet_stat(lp, STAT_RX_LENGTH_ERRORS);
1783		stats->rx_crc_errors = axienet_stat(lp, STAT_RX_FCS_ERRORS);
1784		stats->rx_frame_errors =
1785			axienet_stat(lp, STAT_RX_ALIGNMENT_ERRORS);
1786		stats->rx_errors = axienet_stat(lp, STAT_UNDERSIZE_FRAMES) +
1787				   axienet_stat(lp, STAT_FRAGMENT_FRAMES) +
1788				   stats->rx_length_errors +
1789				   stats->rx_crc_errors +
1790				   stats->rx_frame_errors;
1791		stats->multicast = axienet_stat(lp, STAT_RX_MULTICAST_FRAMES);
1792
1793		stats->tx_aborted_errors =
1794			axienet_stat(lp, STAT_TX_EXCESS_COLLISIONS);
1795		stats->tx_fifo_errors =
1796			axienet_stat(lp, STAT_TX_UNDERRUN_ERRORS);
1797		stats->tx_window_errors =
1798			axienet_stat(lp, STAT_TX_LATE_COLLISIONS);
1799		stats->tx_errors = axienet_stat(lp, STAT_TX_EXCESS_DEFERRAL) +
1800				   stats->tx_aborted_errors +
1801				   stats->tx_fifo_errors +
1802				   stats->tx_window_errors;
1803	} while (read_seqcount_retry(&lp->hw_stats_seqcount, start));
1804}
1805
1806static const struct net_device_ops axienet_netdev_ops = {
1807	.ndo_open = axienet_open,
1808	.ndo_stop = axienet_stop,
1809	.ndo_start_xmit = axienet_start_xmit,
1810	.ndo_get_stats64 = axienet_get_stats64,
1811	.ndo_change_mtu	= axienet_change_mtu,
1812	.ndo_set_mac_address = netdev_set_mac_address,
1813	.ndo_validate_addr = eth_validate_addr,
1814	.ndo_eth_ioctl = axienet_ioctl,
1815	.ndo_set_rx_mode = axienet_set_multicast_list,
1816#ifdef CONFIG_NET_POLL_CONTROLLER
1817	.ndo_poll_controller = axienet_poll_controller,
1818#endif
1819};
1820
1821static const struct net_device_ops axienet_netdev_dmaengine_ops = {
1822	.ndo_open = axienet_open,
1823	.ndo_stop = axienet_stop,
1824	.ndo_start_xmit = axienet_start_xmit_dmaengine,
1825	.ndo_get_stats64 = axienet_get_stats64,
1826	.ndo_change_mtu	= axienet_change_mtu,
1827	.ndo_set_mac_address = netdev_set_mac_address,
1828	.ndo_validate_addr = eth_validate_addr,
1829	.ndo_eth_ioctl = axienet_ioctl,
1830	.ndo_set_rx_mode = axienet_set_multicast_list,
1831};
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1832
1833/**
1834 * axienet_ethtools_get_drvinfo - Get various Axi Ethernet driver information.
1835 * @ndev:	Pointer to net_device structure
1836 * @ed:		Pointer to ethtool_drvinfo structure
1837 *
1838 * This implements ethtool command for getting the driver information.
1839 * Issue "ethtool -i ethX" under linux prompt to execute this function.
1840 */
1841static void axienet_ethtools_get_drvinfo(struct net_device *ndev,
1842					 struct ethtool_drvinfo *ed)
1843{
1844	strscpy(ed->driver, DRIVER_NAME, sizeof(ed->driver));
1845	strscpy(ed->version, DRIVER_VERSION, sizeof(ed->version));
 
1846}
1847
1848/**
1849 * axienet_ethtools_get_regs_len - Get the total regs length present in the
1850 *				   AxiEthernet core.
1851 * @ndev:	Pointer to net_device structure
1852 *
1853 * This implements ethtool command for getting the total register length
1854 * information.
1855 *
1856 * Return: the total regs length
1857 */
1858static int axienet_ethtools_get_regs_len(struct net_device *ndev)
1859{
1860	return sizeof(u32) * AXIENET_REGS_N;
1861}
1862
1863/**
1864 * axienet_ethtools_get_regs - Dump the contents of all registers present
1865 *			       in AxiEthernet core.
1866 * @ndev:	Pointer to net_device structure
1867 * @regs:	Pointer to ethtool_regs structure
1868 * @ret:	Void pointer used to return the contents of the registers.
1869 *
1870 * This implements ethtool command for getting the Axi Ethernet register dump.
1871 * Issue "ethtool -d ethX" to execute this function.
1872 */
1873static void axienet_ethtools_get_regs(struct net_device *ndev,
1874				      struct ethtool_regs *regs, void *ret)
1875{
1876	u32 *data = (u32 *)ret;
1877	size_t len = sizeof(u32) * AXIENET_REGS_N;
1878	struct axienet_local *lp = netdev_priv(ndev);
1879
1880	regs->version = 0;
1881	regs->len = len;
1882
1883	memset(data, 0, len);
1884	data[0] = axienet_ior(lp, XAE_RAF_OFFSET);
1885	data[1] = axienet_ior(lp, XAE_TPF_OFFSET);
1886	data[2] = axienet_ior(lp, XAE_IFGP_OFFSET);
1887	data[3] = axienet_ior(lp, XAE_IS_OFFSET);
1888	data[4] = axienet_ior(lp, XAE_IP_OFFSET);
1889	data[5] = axienet_ior(lp, XAE_IE_OFFSET);
1890	data[6] = axienet_ior(lp, XAE_TTAG_OFFSET);
1891	data[7] = axienet_ior(lp, XAE_RTAG_OFFSET);
1892	data[8] = axienet_ior(lp, XAE_UAWL_OFFSET);
1893	data[9] = axienet_ior(lp, XAE_UAWU_OFFSET);
1894	data[10] = axienet_ior(lp, XAE_TPID0_OFFSET);
1895	data[11] = axienet_ior(lp, XAE_TPID1_OFFSET);
1896	data[12] = axienet_ior(lp, XAE_PPST_OFFSET);
1897	data[13] = axienet_ior(lp, XAE_RCW0_OFFSET);
1898	data[14] = axienet_ior(lp, XAE_RCW1_OFFSET);
1899	data[15] = axienet_ior(lp, XAE_TC_OFFSET);
1900	data[16] = axienet_ior(lp, XAE_FCC_OFFSET);
1901	data[17] = axienet_ior(lp, XAE_EMMC_OFFSET);
1902	data[18] = axienet_ior(lp, XAE_PHYC_OFFSET);
1903	data[19] = axienet_ior(lp, XAE_MDIO_MC_OFFSET);
1904	data[20] = axienet_ior(lp, XAE_MDIO_MCR_OFFSET);
1905	data[21] = axienet_ior(lp, XAE_MDIO_MWD_OFFSET);
1906	data[22] = axienet_ior(lp, XAE_MDIO_MRD_OFFSET);
 
 
 
 
1907	data[27] = axienet_ior(lp, XAE_UAW0_OFFSET);
1908	data[28] = axienet_ior(lp, XAE_UAW1_OFFSET);
1909	data[29] = axienet_ior(lp, XAE_FMI_OFFSET);
1910	data[30] = axienet_ior(lp, XAE_AF0_OFFSET);
1911	data[31] = axienet_ior(lp, XAE_AF1_OFFSET);
1912	if (!lp->use_dmaengine) {
1913		data[32] = axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET);
1914		data[33] = axienet_dma_in32(lp, XAXIDMA_TX_SR_OFFSET);
1915		data[34] = axienet_dma_in32(lp, XAXIDMA_TX_CDESC_OFFSET);
1916		data[35] = axienet_dma_in32(lp, XAXIDMA_TX_TDESC_OFFSET);
1917		data[36] = axienet_dma_in32(lp, XAXIDMA_RX_CR_OFFSET);
1918		data[37] = axienet_dma_in32(lp, XAXIDMA_RX_SR_OFFSET);
1919		data[38] = axienet_dma_in32(lp, XAXIDMA_RX_CDESC_OFFSET);
1920		data[39] = axienet_dma_in32(lp, XAXIDMA_RX_TDESC_OFFSET);
1921	}
1922}
1923
1924static void
1925axienet_ethtools_get_ringparam(struct net_device *ndev,
1926			       struct ethtool_ringparam *ering,
1927			       struct kernel_ethtool_ringparam *kernel_ering,
1928			       struct netlink_ext_ack *extack)
1929{
1930	struct axienet_local *lp = netdev_priv(ndev);
1931
1932	ering->rx_max_pending = RX_BD_NUM_MAX;
1933	ering->rx_mini_max_pending = 0;
1934	ering->rx_jumbo_max_pending = 0;
1935	ering->tx_max_pending = TX_BD_NUM_MAX;
1936	ering->rx_pending = lp->rx_bd_num;
1937	ering->rx_mini_pending = 0;
1938	ering->rx_jumbo_pending = 0;
1939	ering->tx_pending = lp->tx_bd_num;
1940}
1941
1942static int
1943axienet_ethtools_set_ringparam(struct net_device *ndev,
1944			       struct ethtool_ringparam *ering,
1945			       struct kernel_ethtool_ringparam *kernel_ering,
1946			       struct netlink_ext_ack *extack)
1947{
1948	struct axienet_local *lp = netdev_priv(ndev);
1949
1950	if (ering->rx_pending > RX_BD_NUM_MAX ||
1951	    ering->rx_mini_pending ||
1952	    ering->rx_jumbo_pending ||
1953	    ering->tx_pending < TX_BD_NUM_MIN ||
1954	    ering->tx_pending > TX_BD_NUM_MAX)
1955		return -EINVAL;
1956
1957	if (netif_running(ndev))
1958		return -EBUSY;
1959
1960	lp->rx_bd_num = ering->rx_pending;
1961	lp->tx_bd_num = ering->tx_pending;
1962	return 0;
1963}
1964
1965/**
1966 * axienet_ethtools_get_pauseparam - Get the pause parameter setting for
1967 *				     Tx and Rx paths.
1968 * @ndev:	Pointer to net_device structure
1969 * @epauseparm:	Pointer to ethtool_pauseparam structure.
1970 *
1971 * This implements ethtool command for getting axi ethernet pause frame
1972 * setting. Issue "ethtool -a ethX" to execute this function.
1973 */
1974static void
1975axienet_ethtools_get_pauseparam(struct net_device *ndev,
1976				struct ethtool_pauseparam *epauseparm)
1977{
 
1978	struct axienet_local *lp = netdev_priv(ndev);
1979
1980	phylink_ethtool_get_pauseparam(lp->phylink, epauseparm);
 
 
1981}
1982
1983/**
1984 * axienet_ethtools_set_pauseparam - Set device pause parameter(flow control)
1985 *				     settings.
1986 * @ndev:	Pointer to net_device structure
1987 * @epauseparm:Pointer to ethtool_pauseparam structure
1988 *
1989 * This implements ethtool command for enabling flow control on Rx and Tx
1990 * paths. Issue "ethtool -A ethX tx on|off" under linux prompt to execute this
1991 * function.
1992 *
1993 * Return: 0 on success, -EFAULT if device is running
1994 */
1995static int
1996axienet_ethtools_set_pauseparam(struct net_device *ndev,
1997				struct ethtool_pauseparam *epauseparm)
1998{
 
1999	struct axienet_local *lp = netdev_priv(ndev);
2000
2001	return phylink_ethtool_set_pauseparam(lp->phylink, epauseparm);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2002}
2003
2004/**
2005 * axienet_ethtools_get_coalesce - Get DMA interrupt coalescing count.
2006 * @ndev:	Pointer to net_device structure
2007 * @ecoalesce:	Pointer to ethtool_coalesce structure
2008 * @kernel_coal: ethtool CQE mode setting structure
2009 * @extack:	extack for reporting error messages
2010 *
2011 * This implements ethtool command for getting the DMA interrupt coalescing
2012 * count on Tx and Rx paths. Issue "ethtool -c ethX" under linux prompt to
2013 * execute this function.
2014 *
2015 * Return: 0 always
2016 */
2017static int
2018axienet_ethtools_get_coalesce(struct net_device *ndev,
2019			      struct ethtool_coalesce *ecoalesce,
2020			      struct kernel_ethtool_coalesce *kernel_coal,
2021			      struct netlink_ext_ack *extack)
2022{
 
2023	struct axienet_local *lp = netdev_priv(ndev);
2024
2025	ecoalesce->rx_max_coalesced_frames = lp->coalesce_count_rx;
2026	ecoalesce->rx_coalesce_usecs = lp->coalesce_usec_rx;
2027	ecoalesce->tx_max_coalesced_frames = lp->coalesce_count_tx;
2028	ecoalesce->tx_coalesce_usecs = lp->coalesce_usec_tx;
 
2029	return 0;
2030}
2031
2032/**
2033 * axienet_ethtools_set_coalesce - Set DMA interrupt coalescing count.
2034 * @ndev:	Pointer to net_device structure
2035 * @ecoalesce:	Pointer to ethtool_coalesce structure
2036 * @kernel_coal: ethtool CQE mode setting structure
2037 * @extack:	extack for reporting error messages
2038 *
2039 * This implements ethtool command for setting the DMA interrupt coalescing
2040 * count on Tx and Rx paths. Issue "ethtool -C ethX rx-frames 5" under linux
2041 * prompt to execute this function.
2042 *
2043 * Return: 0, on success, Non-zero error value on failure.
2044 */
2045static int
2046axienet_ethtools_set_coalesce(struct net_device *ndev,
2047			      struct ethtool_coalesce *ecoalesce,
2048			      struct kernel_ethtool_coalesce *kernel_coal,
2049			      struct netlink_ext_ack *extack)
2050{
2051	struct axienet_local *lp = netdev_priv(ndev);
2052
2053	if (netif_running(ndev)) {
2054		NL_SET_ERR_MSG(extack,
2055			       "Please stop netif before applying configuration");
2056		return -EBUSY;
2057	}
2058
2059	if (ecoalesce->rx_max_coalesced_frames > 255 ||
2060	    ecoalesce->tx_max_coalesced_frames > 255) {
2061		NL_SET_ERR_MSG(extack, "frames must be less than 256");
2062		return -EINVAL;
2063	}
2064
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2065	if (ecoalesce->rx_max_coalesced_frames)
2066		lp->coalesce_count_rx = ecoalesce->rx_max_coalesced_frames;
2067	if (ecoalesce->rx_coalesce_usecs)
2068		lp->coalesce_usec_rx = ecoalesce->rx_coalesce_usecs;
2069	if (ecoalesce->tx_max_coalesced_frames)
2070		lp->coalesce_count_tx = ecoalesce->tx_max_coalesced_frames;
2071	if (ecoalesce->tx_coalesce_usecs)
2072		lp->coalesce_usec_tx = ecoalesce->tx_coalesce_usecs;
2073
2074	return 0;
2075}
2076
2077static int
2078axienet_ethtools_get_link_ksettings(struct net_device *ndev,
2079				    struct ethtool_link_ksettings *cmd)
2080{
2081	struct axienet_local *lp = netdev_priv(ndev);
2082
2083	return phylink_ethtool_ksettings_get(lp->phylink, cmd);
2084}
2085
2086static int
2087axienet_ethtools_set_link_ksettings(struct net_device *ndev,
2088				    const struct ethtool_link_ksettings *cmd)
2089{
2090	struct axienet_local *lp = netdev_priv(ndev);
2091
2092	return phylink_ethtool_ksettings_set(lp->phylink, cmd);
2093}
2094
2095static int axienet_ethtools_nway_reset(struct net_device *dev)
2096{
2097	struct axienet_local *lp = netdev_priv(dev);
2098
2099	return phylink_ethtool_nway_reset(lp->phylink);
2100}
2101
2102static void axienet_ethtools_get_ethtool_stats(struct net_device *dev,
2103					       struct ethtool_stats *stats,
2104					       u64 *data)
2105{
2106	struct axienet_local *lp = netdev_priv(dev);
2107	unsigned int start;
2108
2109	do {
2110		start = read_seqcount_begin(&lp->hw_stats_seqcount);
2111		data[0] = axienet_stat(lp, STAT_RX_BYTES);
2112		data[1] = axienet_stat(lp, STAT_TX_BYTES);
2113		data[2] = axienet_stat(lp, STAT_RX_VLAN_FRAMES);
2114		data[3] = axienet_stat(lp, STAT_TX_VLAN_FRAMES);
2115		data[6] = axienet_stat(lp, STAT_TX_PFC_FRAMES);
2116		data[7] = axienet_stat(lp, STAT_RX_PFC_FRAMES);
2117		data[8] = axienet_stat(lp, STAT_USER_DEFINED0);
2118		data[9] = axienet_stat(lp, STAT_USER_DEFINED1);
2119		data[10] = axienet_stat(lp, STAT_USER_DEFINED2);
2120	} while (read_seqcount_retry(&lp->hw_stats_seqcount, start));
2121}
2122
2123static const char axienet_ethtool_stats_strings[][ETH_GSTRING_LEN] = {
2124	"Received bytes",
2125	"Transmitted bytes",
2126	"RX Good VLAN Tagged Frames",
2127	"TX Good VLAN Tagged Frames",
2128	"TX Good PFC Frames",
2129	"RX Good PFC Frames",
2130	"User Defined Counter 0",
2131	"User Defined Counter 1",
2132	"User Defined Counter 2",
2133};
2134
2135static void axienet_ethtools_get_strings(struct net_device *dev, u32 stringset, u8 *data)
2136{
2137	switch (stringset) {
2138	case ETH_SS_STATS:
2139		memcpy(data, axienet_ethtool_stats_strings,
2140		       sizeof(axienet_ethtool_stats_strings));
2141		break;
2142	}
2143}
2144
2145static int axienet_ethtools_get_sset_count(struct net_device *dev, int sset)
2146{
2147	struct axienet_local *lp = netdev_priv(dev);
2148
2149	switch (sset) {
2150	case ETH_SS_STATS:
2151		if (lp->features & XAE_FEATURE_STATS)
2152			return ARRAY_SIZE(axienet_ethtool_stats_strings);
2153		fallthrough;
2154	default:
2155		return -EOPNOTSUPP;
2156	}
2157}
2158
2159static void
2160axienet_ethtools_get_pause_stats(struct net_device *dev,
2161				 struct ethtool_pause_stats *pause_stats)
2162{
2163	struct axienet_local *lp = netdev_priv(dev);
2164	unsigned int start;
2165
2166	if (!(lp->features & XAE_FEATURE_STATS))
2167		return;
2168
2169	do {
2170		start = read_seqcount_begin(&lp->hw_stats_seqcount);
2171		pause_stats->tx_pause_frames =
2172			axienet_stat(lp, STAT_TX_PAUSE_FRAMES);
2173		pause_stats->rx_pause_frames =
2174			axienet_stat(lp, STAT_RX_PAUSE_FRAMES);
2175	} while (read_seqcount_retry(&lp->hw_stats_seqcount, start));
2176}
2177
2178static void
2179axienet_ethtool_get_eth_mac_stats(struct net_device *dev,
2180				  struct ethtool_eth_mac_stats *mac_stats)
2181{
2182	struct axienet_local *lp = netdev_priv(dev);
2183	unsigned int start;
2184
2185	if (!(lp->features & XAE_FEATURE_STATS))
2186		return;
2187
2188	do {
2189		start = read_seqcount_begin(&lp->hw_stats_seqcount);
2190		mac_stats->FramesTransmittedOK =
2191			axienet_stat(lp, STAT_TX_GOOD_FRAMES);
2192		mac_stats->SingleCollisionFrames =
2193			axienet_stat(lp, STAT_TX_SINGLE_COLLISION_FRAMES);
2194		mac_stats->MultipleCollisionFrames =
2195			axienet_stat(lp, STAT_TX_MULTIPLE_COLLISION_FRAMES);
2196		mac_stats->FramesReceivedOK =
2197			axienet_stat(lp, STAT_RX_GOOD_FRAMES);
2198		mac_stats->FrameCheckSequenceErrors =
2199			axienet_stat(lp, STAT_RX_FCS_ERRORS);
2200		mac_stats->AlignmentErrors =
2201			axienet_stat(lp, STAT_RX_ALIGNMENT_ERRORS);
2202		mac_stats->FramesWithDeferredXmissions =
2203			axienet_stat(lp, STAT_TX_DEFERRED_FRAMES);
2204		mac_stats->LateCollisions =
2205			axienet_stat(lp, STAT_TX_LATE_COLLISIONS);
2206		mac_stats->FramesAbortedDueToXSColls =
2207			axienet_stat(lp, STAT_TX_EXCESS_COLLISIONS);
2208		mac_stats->MulticastFramesXmittedOK =
2209			axienet_stat(lp, STAT_TX_MULTICAST_FRAMES);
2210		mac_stats->BroadcastFramesXmittedOK =
2211			axienet_stat(lp, STAT_TX_BROADCAST_FRAMES);
2212		mac_stats->FramesWithExcessiveDeferral =
2213			axienet_stat(lp, STAT_TX_EXCESS_DEFERRAL);
2214		mac_stats->MulticastFramesReceivedOK =
2215			axienet_stat(lp, STAT_RX_MULTICAST_FRAMES);
2216		mac_stats->BroadcastFramesReceivedOK =
2217			axienet_stat(lp, STAT_RX_BROADCAST_FRAMES);
2218		mac_stats->InRangeLengthErrors =
2219			axienet_stat(lp, STAT_RX_LENGTH_ERRORS);
2220	} while (read_seqcount_retry(&lp->hw_stats_seqcount, start));
2221}
2222
2223static void
2224axienet_ethtool_get_eth_ctrl_stats(struct net_device *dev,
2225				   struct ethtool_eth_ctrl_stats *ctrl_stats)
2226{
2227	struct axienet_local *lp = netdev_priv(dev);
2228	unsigned int start;
2229
2230	if (!(lp->features & XAE_FEATURE_STATS))
2231		return;
2232
2233	do {
2234		start = read_seqcount_begin(&lp->hw_stats_seqcount);
2235		ctrl_stats->MACControlFramesTransmitted =
2236			axienet_stat(lp, STAT_TX_CONTROL_FRAMES);
2237		ctrl_stats->MACControlFramesReceived =
2238			axienet_stat(lp, STAT_RX_CONTROL_FRAMES);
2239		ctrl_stats->UnsupportedOpcodesReceived =
2240			axienet_stat(lp, STAT_RX_CONTROL_OPCODE_ERRORS);
2241	} while (read_seqcount_retry(&lp->hw_stats_seqcount, start));
2242}
2243
2244static const struct ethtool_rmon_hist_range axienet_rmon_ranges[] = {
2245	{   64,    64 },
2246	{   65,   127 },
2247	{  128,   255 },
2248	{  256,   511 },
2249	{  512,  1023 },
2250	{ 1024,  1518 },
2251	{ 1519, 16384 },
2252	{ },
2253};
2254
2255static void
2256axienet_ethtool_get_rmon_stats(struct net_device *dev,
2257			       struct ethtool_rmon_stats *rmon_stats,
2258			       const struct ethtool_rmon_hist_range **ranges)
2259{
2260	struct axienet_local *lp = netdev_priv(dev);
2261	unsigned int start;
2262
2263	if (!(lp->features & XAE_FEATURE_STATS))
2264		return;
2265
2266	do {
2267		start = read_seqcount_begin(&lp->hw_stats_seqcount);
2268		rmon_stats->undersize_pkts =
2269			axienet_stat(lp, STAT_UNDERSIZE_FRAMES);
2270		rmon_stats->oversize_pkts =
2271			axienet_stat(lp, STAT_RX_OVERSIZE_FRAMES);
2272		rmon_stats->fragments =
2273			axienet_stat(lp, STAT_FRAGMENT_FRAMES);
2274
2275		rmon_stats->hist[0] =
2276			axienet_stat(lp, STAT_RX_64_BYTE_FRAMES);
2277		rmon_stats->hist[1] =
2278			axienet_stat(lp, STAT_RX_65_127_BYTE_FRAMES);
2279		rmon_stats->hist[2] =
2280			axienet_stat(lp, STAT_RX_128_255_BYTE_FRAMES);
2281		rmon_stats->hist[3] =
2282			axienet_stat(lp, STAT_RX_256_511_BYTE_FRAMES);
2283		rmon_stats->hist[4] =
2284			axienet_stat(lp, STAT_RX_512_1023_BYTE_FRAMES);
2285		rmon_stats->hist[5] =
2286			axienet_stat(lp, STAT_RX_1024_MAX_BYTE_FRAMES);
2287		rmon_stats->hist[6] =
2288			rmon_stats->oversize_pkts;
2289
2290		rmon_stats->hist_tx[0] =
2291			axienet_stat(lp, STAT_TX_64_BYTE_FRAMES);
2292		rmon_stats->hist_tx[1] =
2293			axienet_stat(lp, STAT_TX_65_127_BYTE_FRAMES);
2294		rmon_stats->hist_tx[2] =
2295			axienet_stat(lp, STAT_TX_128_255_BYTE_FRAMES);
2296		rmon_stats->hist_tx[3] =
2297			axienet_stat(lp, STAT_TX_256_511_BYTE_FRAMES);
2298		rmon_stats->hist_tx[4] =
2299			axienet_stat(lp, STAT_TX_512_1023_BYTE_FRAMES);
2300		rmon_stats->hist_tx[5] =
2301			axienet_stat(lp, STAT_TX_1024_MAX_BYTE_FRAMES);
2302		rmon_stats->hist_tx[6] =
2303			axienet_stat(lp, STAT_TX_OVERSIZE_FRAMES);
2304	} while (read_seqcount_retry(&lp->hw_stats_seqcount, start));
2305
2306	*ranges = axienet_rmon_ranges;
2307}
2308
2309static const struct ethtool_ops axienet_ethtool_ops = {
2310	.supported_coalesce_params = ETHTOOL_COALESCE_MAX_FRAMES |
2311				     ETHTOOL_COALESCE_USECS,
2312	.get_drvinfo    = axienet_ethtools_get_drvinfo,
2313	.get_regs_len   = axienet_ethtools_get_regs_len,
2314	.get_regs       = axienet_ethtools_get_regs,
2315	.get_link       = ethtool_op_get_link,
2316	.get_ringparam	= axienet_ethtools_get_ringparam,
2317	.set_ringparam	= axienet_ethtools_set_ringparam,
2318	.get_pauseparam = axienet_ethtools_get_pauseparam,
2319	.set_pauseparam = axienet_ethtools_set_pauseparam,
2320	.get_coalesce   = axienet_ethtools_get_coalesce,
2321	.set_coalesce   = axienet_ethtools_set_coalesce,
2322	.get_link_ksettings = axienet_ethtools_get_link_ksettings,
2323	.set_link_ksettings = axienet_ethtools_set_link_ksettings,
2324	.nway_reset	= axienet_ethtools_nway_reset,
2325	.get_ethtool_stats = axienet_ethtools_get_ethtool_stats,
2326	.get_strings    = axienet_ethtools_get_strings,
2327	.get_sset_count = axienet_ethtools_get_sset_count,
2328	.get_pause_stats = axienet_ethtools_get_pause_stats,
2329	.get_eth_mac_stats = axienet_ethtool_get_eth_mac_stats,
2330	.get_eth_ctrl_stats = axienet_ethtool_get_eth_ctrl_stats,
2331	.get_rmon_stats = axienet_ethtool_get_rmon_stats,
2332};
2333
2334static struct axienet_local *pcs_to_axienet_local(struct phylink_pcs *pcs)
2335{
2336	return container_of(pcs, struct axienet_local, pcs);
2337}
2338
2339static void axienet_pcs_get_state(struct phylink_pcs *pcs,
2340				  struct phylink_link_state *state)
2341{
2342	struct mdio_device *pcs_phy = pcs_to_axienet_local(pcs)->pcs_phy;
2343
2344	phylink_mii_c22_pcs_get_state(pcs_phy, state);
2345}
2346
2347static void axienet_pcs_an_restart(struct phylink_pcs *pcs)
2348{
2349	struct mdio_device *pcs_phy = pcs_to_axienet_local(pcs)->pcs_phy;
2350
2351	phylink_mii_c22_pcs_an_restart(pcs_phy);
2352}
2353
2354static int axienet_pcs_config(struct phylink_pcs *pcs, unsigned int neg_mode,
2355			      phy_interface_t interface,
2356			      const unsigned long *advertising,
2357			      bool permit_pause_to_mac)
2358{
2359	struct mdio_device *pcs_phy = pcs_to_axienet_local(pcs)->pcs_phy;
2360	struct net_device *ndev = pcs_to_axienet_local(pcs)->ndev;
2361	struct axienet_local *lp = netdev_priv(ndev);
2362	int ret;
2363
2364	if (lp->switch_x_sgmii) {
2365		ret = mdiodev_write(pcs_phy, XLNX_MII_STD_SELECT_REG,
2366				    interface == PHY_INTERFACE_MODE_SGMII ?
2367					XLNX_MII_STD_SELECT_SGMII : 0);
2368		if (ret < 0) {
2369			netdev_warn(ndev,
2370				    "Failed to switch PHY interface: %d\n",
2371				    ret);
2372			return ret;
2373		}
2374	}
2375
2376	ret = phylink_mii_c22_pcs_config(pcs_phy, interface, advertising,
2377					 neg_mode);
2378	if (ret < 0)
2379		netdev_warn(ndev, "Failed to configure PCS: %d\n", ret);
2380
2381	return ret;
2382}
2383
2384static const struct phylink_pcs_ops axienet_pcs_ops = {
2385	.pcs_get_state = axienet_pcs_get_state,
2386	.pcs_config = axienet_pcs_config,
2387	.pcs_an_restart = axienet_pcs_an_restart,
2388};
2389
2390static struct phylink_pcs *axienet_mac_select_pcs(struct phylink_config *config,
2391						  phy_interface_t interface)
2392{
2393	struct net_device *ndev = to_net_dev(config->dev);
2394	struct axienet_local *lp = netdev_priv(ndev);
2395
2396	if (interface == PHY_INTERFACE_MODE_1000BASEX ||
2397	    interface ==  PHY_INTERFACE_MODE_SGMII)
2398		return &lp->pcs;
2399
2400	return NULL;
2401}
2402
2403static void axienet_mac_config(struct phylink_config *config, unsigned int mode,
2404			       const struct phylink_link_state *state)
2405{
2406	/* nothing meaningful to do */
2407}
2408
2409static void axienet_mac_link_down(struct phylink_config *config,
2410				  unsigned int mode,
2411				  phy_interface_t interface)
2412{
2413	/* nothing meaningful to do */
2414}
2415
2416static void axienet_mac_link_up(struct phylink_config *config,
2417				struct phy_device *phy,
2418				unsigned int mode, phy_interface_t interface,
2419				int speed, int duplex,
2420				bool tx_pause, bool rx_pause)
2421{
2422	struct net_device *ndev = to_net_dev(config->dev);
2423	struct axienet_local *lp = netdev_priv(ndev);
2424	u32 emmc_reg, fcc_reg;
2425
2426	emmc_reg = axienet_ior(lp, XAE_EMMC_OFFSET);
2427	emmc_reg &= ~XAE_EMMC_LINKSPEED_MASK;
2428
2429	switch (speed) {
2430	case SPEED_1000:
2431		emmc_reg |= XAE_EMMC_LINKSPD_1000;
2432		break;
2433	case SPEED_100:
2434		emmc_reg |= XAE_EMMC_LINKSPD_100;
2435		break;
2436	case SPEED_10:
2437		emmc_reg |= XAE_EMMC_LINKSPD_10;
2438		break;
2439	default:
2440		dev_err(&ndev->dev,
2441			"Speed other than 10, 100 or 1Gbps is not supported\n");
2442		break;
2443	}
2444
2445	axienet_iow(lp, XAE_EMMC_OFFSET, emmc_reg);
2446
2447	fcc_reg = axienet_ior(lp, XAE_FCC_OFFSET);
2448	if (tx_pause)
2449		fcc_reg |= XAE_FCC_FCTX_MASK;
2450	else
2451		fcc_reg &= ~XAE_FCC_FCTX_MASK;
2452	if (rx_pause)
2453		fcc_reg |= XAE_FCC_FCRX_MASK;
2454	else
2455		fcc_reg &= ~XAE_FCC_FCRX_MASK;
2456	axienet_iow(lp, XAE_FCC_OFFSET, fcc_reg);
2457}
2458
2459static const struct phylink_mac_ops axienet_phylink_ops = {
2460	.mac_select_pcs = axienet_mac_select_pcs,
2461	.mac_config = axienet_mac_config,
2462	.mac_link_down = axienet_mac_link_down,
2463	.mac_link_up = axienet_mac_link_up,
2464};
2465
2466/**
2467 * axienet_dma_err_handler - Work queue task for Axi DMA Error
2468 * @work:	pointer to work_struct
2469 *
2470 * Resets the Axi DMA and Axi Ethernet devices, and reconfigures the
2471 * Tx/Rx BDs.
2472 */
2473static void axienet_dma_err_handler(struct work_struct *work)
2474{
2475	u32 i;
2476	u32 axienet_status;
2477	struct axidma_bd *cur_p;
2478	struct axienet_local *lp = container_of(work, struct axienet_local,
2479						dma_err_task);
2480	struct net_device *ndev = lp->ndev;
2481
2482	/* Don't bother if we are going to stop anyway */
2483	if (READ_ONCE(lp->stopping))
2484		return;
2485
2486	napi_disable(&lp->napi_tx);
2487	napi_disable(&lp->napi_rx);
2488
2489	axienet_setoptions(ndev, lp->options &
2490			   ~(XAE_OPTION_TXEN | XAE_OPTION_RXEN));
 
 
 
 
 
 
 
 
2491
2492	axienet_dma_stop(lp);
 
2493
2494	for (i = 0; i < lp->tx_bd_num; i++) {
2495		cur_p = &lp->tx_bd_v[i];
2496		if (cur_p->cntrl) {
2497			dma_addr_t addr = desc_get_phys_addr(lp, cur_p);
2498
2499			dma_unmap_single(lp->dev, addr,
 
 
 
2500					 (cur_p->cntrl &
2501					  XAXIDMA_BD_CTRL_LENGTH_MASK),
2502					 DMA_TO_DEVICE);
2503		}
2504		if (cur_p->skb)
2505			dev_kfree_skb_irq(cur_p->skb);
2506		cur_p->phys = 0;
2507		cur_p->phys_msb = 0;
2508		cur_p->cntrl = 0;
2509		cur_p->status = 0;
2510		cur_p->app0 = 0;
2511		cur_p->app1 = 0;
2512		cur_p->app2 = 0;
2513		cur_p->app3 = 0;
2514		cur_p->app4 = 0;
2515		cur_p->skb = NULL;
2516	}
2517
2518	for (i = 0; i < lp->rx_bd_num; i++) {
2519		cur_p = &lp->rx_bd_v[i];
2520		cur_p->status = 0;
2521		cur_p->app0 = 0;
2522		cur_p->app1 = 0;
2523		cur_p->app2 = 0;
2524		cur_p->app3 = 0;
2525		cur_p->app4 = 0;
2526	}
2527
2528	lp->tx_bd_ci = 0;
2529	lp->tx_bd_tail = 0;
2530	lp->rx_bd_ci = 0;
2531
2532	axienet_dma_start(lp);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2533
2534	axienet_status = axienet_ior(lp, XAE_RCW1_OFFSET);
2535	axienet_status &= ~XAE_RCW1_RX_MASK;
2536	axienet_iow(lp, XAE_RCW1_OFFSET, axienet_status);
2537
2538	axienet_status = axienet_ior(lp, XAE_IP_OFFSET);
2539	if (axienet_status & XAE_INT_RXRJECT_MASK)
2540		axienet_iow(lp, XAE_IS_OFFSET, XAE_INT_RXRJECT_MASK);
2541	axienet_iow(lp, XAE_IE_OFFSET, lp->eth_irq > 0 ?
2542		    XAE_INT_RECV_ERROR_MASK : 0);
2543	axienet_iow(lp, XAE_FCC_OFFSET, XAE_FCC_FCRX_MASK);
2544
2545	/* Sync default options with HW but leave receiver and
2546	 * transmitter disabled.
2547	 */
2548	axienet_setoptions(ndev, lp->options &
2549			   ~(XAE_OPTION_TXEN | XAE_OPTION_RXEN));
2550	axienet_set_mac_address(ndev, NULL);
2551	axienet_set_multicast_list(ndev);
2552	napi_enable(&lp->napi_rx);
2553	napi_enable(&lp->napi_tx);
2554	axienet_setoptions(ndev, lp->options);
2555}
2556
2557/**
2558 * axienet_probe - Axi Ethernet probe function.
2559 * @pdev:	Pointer to platform device structure.
 
2560 *
2561 * Return: 0, on success
2562 *	    Non-zero error value on failure.
2563 *
2564 * This is the probe routine for Axi Ethernet driver. This is called before
2565 * any other driver routines are invoked. It allocates and sets up the Ethernet
2566 * device. Parses through device tree and populates fields of
2567 * axienet_local. It registers the Ethernet device.
2568 */
2569static int axienet_probe(struct platform_device *pdev)
2570{
2571	int ret;
 
2572	struct device_node *np;
2573	struct axienet_local *lp;
2574	struct net_device *ndev;
2575	struct resource *ethres;
2576	u8 mac_addr[ETH_ALEN];
2577	int addr_width = 32;
2578	u32 value;
2579
2580	ndev = alloc_etherdev(sizeof(*lp));
2581	if (!ndev)
2582		return -ENOMEM;
2583
2584	platform_set_drvdata(pdev, ndev);
 
2585
2586	SET_NETDEV_DEV(ndev, &pdev->dev);
 
2587	ndev->features = NETIF_F_SG;
 
2588	ndev->ethtool_ops = &axienet_ethtool_ops;
2589
2590	/* MTU range: 64 - 9000 */
2591	ndev->min_mtu = 64;
2592	ndev->max_mtu = XAE_JUMBO_MTU;
2593
2594	lp = netdev_priv(ndev);
2595	lp->ndev = ndev;
2596	lp->dev = &pdev->dev;
2597	lp->options = XAE_OPTION_DEFAULTS;
2598	lp->rx_bd_num = RX_BD_NUM_DEFAULT;
2599	lp->tx_bd_num = TX_BD_NUM_DEFAULT;
2600
2601	u64_stats_init(&lp->rx_stat_sync);
2602	u64_stats_init(&lp->tx_stat_sync);
2603
2604	mutex_init(&lp->stats_lock);
2605	seqcount_mutex_init(&lp->hw_stats_seqcount, &lp->stats_lock);
2606	INIT_DEFERRABLE_WORK(&lp->stats_work, axienet_refresh_stats);
2607
2608	lp->axi_clk = devm_clk_get_optional(&pdev->dev, "s_axi_lite_clk");
2609	if (!lp->axi_clk) {
2610		/* For backward compatibility, if named AXI clock is not present,
2611		 * treat the first clock specified as the AXI clock.
2612		 */
2613		lp->axi_clk = devm_clk_get_optional(&pdev->dev, NULL);
2614	}
2615	if (IS_ERR(lp->axi_clk)) {
2616		ret = PTR_ERR(lp->axi_clk);
2617		goto free_netdev;
2618	}
2619	ret = clk_prepare_enable(lp->axi_clk);
2620	if (ret) {
2621		dev_err(&pdev->dev, "Unable to enable AXI clock: %d\n", ret);
2622		goto free_netdev;
2623	}
2624
2625	lp->misc_clks[0].id = "axis_clk";
2626	lp->misc_clks[1].id = "ref_clk";
2627	lp->misc_clks[2].id = "mgt_clk";
2628
2629	ret = devm_clk_bulk_get_optional(&pdev->dev, XAE_NUM_MISC_CLOCKS, lp->misc_clks);
2630	if (ret)
2631		goto cleanup_clk;
2632
2633	ret = clk_bulk_prepare_enable(XAE_NUM_MISC_CLOCKS, lp->misc_clks);
2634	if (ret)
2635		goto cleanup_clk;
2636
2637	/* Map device registers */
2638	lp->regs = devm_platform_get_and_ioremap_resource(pdev, 0, &ethres);
2639	if (IS_ERR(lp->regs)) {
2640		ret = PTR_ERR(lp->regs);
2641		goto cleanup_clk;
2642	}
2643	lp->regs_start = ethres->start;
2644
2645	/* Setup checksum offload, but default to off if not specified */
2646	lp->features = 0;
2647
2648	if (axienet_ior(lp, XAE_ABILITY_OFFSET) & XAE_ABILITY_STATS)
2649		lp->features |= XAE_FEATURE_STATS;
2650
2651	ret = of_property_read_u32(pdev->dev.of_node, "xlnx,txcsum", &value);
2652	if (!ret) {
2653		switch (value) {
2654		case 1:
 
 
2655			lp->features |= XAE_FEATURE_PARTIAL_TX_CSUM;
2656			/* Can checksum any contiguous range */
2657			ndev->features |= NETIF_F_HW_CSUM;
2658			break;
2659		case 2:
 
 
2660			lp->features |= XAE_FEATURE_FULL_TX_CSUM;
2661			/* Can checksum TCP/UDP over IPv4. */
2662			ndev->features |= NETIF_F_IP_CSUM;
2663			break;
 
 
2664		}
2665	}
2666	ret = of_property_read_u32(pdev->dev.of_node, "xlnx,rxcsum", &value);
2667	if (!ret) {
2668		switch (value) {
2669		case 1:
 
 
2670			lp->features |= XAE_FEATURE_PARTIAL_RX_CSUM;
2671			ndev->features |= NETIF_F_RXCSUM;
2672			break;
2673		case 2:
 
 
2674			lp->features |= XAE_FEATURE_FULL_RX_CSUM;
2675			ndev->features |= NETIF_F_RXCSUM;
2676			break;
 
 
2677		}
2678	}
2679	/* For supporting jumbo frames, the Axi Ethernet hardware must have
2680	 * a larger Rx/Tx Memory. Typically, the size must be large so that
2681	 * we can enable jumbo option and start supporting jumbo frames.
2682	 * Here we check for memory allocated for Rx/Tx in the hardware from
2683	 * the device-tree and accordingly set flags.
2684	 */
2685	of_property_read_u32(pdev->dev.of_node, "xlnx,rxmem", &lp->rxmem);
2686
2687	lp->switch_x_sgmii = of_property_read_bool(pdev->dev.of_node,
2688						   "xlnx,switch-x-sgmii");
2689
2690	/* Start with the proprietary, and broken phy_type */
2691	ret = of_property_read_u32(pdev->dev.of_node, "xlnx,phy-type", &value);
2692	if (!ret) {
2693		netdev_warn(ndev, "Please upgrade your device tree binary blob to use phy-mode");
2694		switch (value) {
2695		case XAE_PHY_TYPE_MII:
2696			lp->phy_mode = PHY_INTERFACE_MODE_MII;
2697			break;
2698		case XAE_PHY_TYPE_GMII:
2699			lp->phy_mode = PHY_INTERFACE_MODE_GMII;
2700			break;
2701		case XAE_PHY_TYPE_RGMII_2_0:
2702			lp->phy_mode = PHY_INTERFACE_MODE_RGMII_ID;
2703			break;
2704		case XAE_PHY_TYPE_SGMII:
2705			lp->phy_mode = PHY_INTERFACE_MODE_SGMII;
2706			break;
2707		case XAE_PHY_TYPE_1000BASE_X:
2708			lp->phy_mode = PHY_INTERFACE_MODE_1000BASEX;
2709			break;
2710		default:
2711			ret = -EINVAL;
2712			goto cleanup_clk;
2713		}
2714	} else {
2715		ret = of_get_phy_mode(pdev->dev.of_node, &lp->phy_mode);
2716		if (ret)
2717			goto cleanup_clk;
2718	}
2719	if (lp->switch_x_sgmii && lp->phy_mode != PHY_INTERFACE_MODE_SGMII &&
2720	    lp->phy_mode != PHY_INTERFACE_MODE_1000BASEX) {
2721		dev_err(&pdev->dev, "xlnx,switch-x-sgmii only supported with SGMII or 1000BaseX\n");
2722		ret = -EINVAL;
2723		goto cleanup_clk;
2724	}
2725
2726	if (!of_property_present(pdev->dev.of_node, "dmas")) {
2727		/* Find the DMA node, map the DMA registers, and decode the DMA IRQs */
2728		np = of_parse_phandle(pdev->dev.of_node, "axistream-connected", 0);
2729
2730		if (np) {
2731			struct resource dmares;
2732
2733			ret = of_address_to_resource(np, 0, &dmares);
2734			if (ret) {
2735				dev_err(&pdev->dev,
2736					"unable to get DMA resource\n");
2737				of_node_put(np);
2738				goto cleanup_clk;
2739			}
2740			lp->dma_regs = devm_ioremap_resource(&pdev->dev,
2741							     &dmares);
2742			lp->rx_irq = irq_of_parse_and_map(np, 1);
2743			lp->tx_irq = irq_of_parse_and_map(np, 0);
2744			of_node_put(np);
2745			lp->eth_irq = platform_get_irq_optional(pdev, 0);
2746		} else {
2747			/* Check for these resources directly on the Ethernet node. */
2748			lp->dma_regs = devm_platform_get_and_ioremap_resource(pdev, 1, NULL);
2749			lp->rx_irq = platform_get_irq(pdev, 1);
2750			lp->tx_irq = platform_get_irq(pdev, 0);
2751			lp->eth_irq = platform_get_irq_optional(pdev, 2);
2752		}
2753		if (IS_ERR(lp->dma_regs)) {
2754			dev_err(&pdev->dev, "could not map DMA regs\n");
2755			ret = PTR_ERR(lp->dma_regs);
2756			goto cleanup_clk;
2757		}
2758		if (lp->rx_irq <= 0 || lp->tx_irq <= 0) {
2759			dev_err(&pdev->dev, "could not determine irqs\n");
2760			ret = -ENOMEM;
2761			goto cleanup_clk;
2762		}
2763
2764		/* Reset core now that clocks are enabled, prior to accessing MDIO */
2765		ret = __axienet_device_reset(lp);
2766		if (ret)
2767			goto cleanup_clk;
2768
2769		/* Autodetect the need for 64-bit DMA pointers.
2770		 * When the IP is configured for a bus width bigger than 32 bits,
2771		 * writing the MSB registers is mandatory, even if they are all 0.
2772		 * We can detect this case by writing all 1's to one such register
2773		 * and see if that sticks: when the IP is configured for 32 bits
2774		 * only, those registers are RES0.
2775		 * Those MSB registers were introduced in IP v7.1, which we check first.
2776		 */
2777		if ((axienet_ior(lp, XAE_ID_OFFSET) >> 24) >= 0x9) {
2778			void __iomem *desc = lp->dma_regs + XAXIDMA_TX_CDESC_OFFSET + 4;
2779
2780			iowrite32(0x0, desc);
2781			if (ioread32(desc) == 0) {	/* sanity check */
2782				iowrite32(0xffffffff, desc);
2783				if (ioread32(desc) > 0) {
2784					lp->features |= XAE_FEATURE_DMA_64BIT;
2785					addr_width = 64;
2786					dev_info(&pdev->dev,
2787						 "autodetected 64-bit DMA range\n");
2788				}
2789				iowrite32(0x0, desc);
2790			}
2791		}
2792		if (!IS_ENABLED(CONFIG_64BIT) && lp->features & XAE_FEATURE_DMA_64BIT) {
2793			dev_err(&pdev->dev, "64-bit addressable DMA is not compatible with 32-bit archecture\n");
2794			ret = -EINVAL;
2795			goto cleanup_clk;
2796		}
2797
2798		ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(addr_width));
2799		if (ret) {
2800			dev_err(&pdev->dev, "No suitable DMA available\n");
2801			goto cleanup_clk;
2802		}
2803		netif_napi_add(ndev, &lp->napi_rx, axienet_rx_poll);
2804		netif_napi_add(ndev, &lp->napi_tx, axienet_tx_poll);
2805	} else {
2806		struct xilinx_vdma_config cfg;
2807		struct dma_chan *tx_chan;
2808
2809		lp->eth_irq = platform_get_irq_optional(pdev, 0);
2810		if (lp->eth_irq < 0 && lp->eth_irq != -ENXIO) {
2811			ret = lp->eth_irq;
2812			goto cleanup_clk;
2813		}
2814		tx_chan = dma_request_chan(lp->dev, "tx_chan0");
2815		if (IS_ERR(tx_chan)) {
2816			ret = PTR_ERR(tx_chan);
2817			dev_err_probe(lp->dev, ret, "No Ethernet DMA (TX) channel found\n");
2818			goto cleanup_clk;
2819		}
2820
2821		cfg.reset = 1;
2822		/* As name says VDMA but it has support for DMA channel reset */
2823		ret = xilinx_vdma_channel_set_config(tx_chan, &cfg);
2824		if (ret < 0) {
2825			dev_err(&pdev->dev, "Reset channel failed\n");
2826			dma_release_channel(tx_chan);
2827			goto cleanup_clk;
2828		}
2829
2830		dma_release_channel(tx_chan);
2831		lp->use_dmaengine = 1;
2832	}
2833
2834	if (lp->use_dmaengine)
2835		ndev->netdev_ops = &axienet_netdev_dmaengine_ops;
2836	else
2837		ndev->netdev_ops = &axienet_netdev_ops;
2838	/* Check for Ethernet core IRQ (optional) */
2839	if (lp->eth_irq <= 0)
2840		dev_info(&pdev->dev, "Ethernet core IRQ not defined\n");
2841
2842	/* Retrieve the MAC address */
2843	ret = of_get_mac_address(pdev->dev.of_node, mac_addr);
2844	if (!ret) {
2845		axienet_set_mac_address(ndev, mac_addr);
2846	} else {
2847		dev_warn(&pdev->dev, "could not find MAC address property: %d\n",
2848			 ret);
2849		axienet_set_mac_address(ndev, NULL);
2850	}
 
2851
2852	lp->coalesce_count_rx = XAXIDMA_DFT_RX_THRESHOLD;
2853	lp->coalesce_count_tx = XAXIDMA_DFT_TX_THRESHOLD;
2854	lp->coalesce_usec_rx = XAXIDMA_DFT_RX_USEC;
2855	lp->coalesce_usec_tx = XAXIDMA_DFT_TX_USEC;
2856
2857	ret = axienet_mdio_setup(lp);
 
2858	if (ret)
2859		dev_warn(&pdev->dev,
2860			 "error registering MDIO bus: %d\n", ret);
2861
2862	if (lp->phy_mode == PHY_INTERFACE_MODE_SGMII ||
2863	    lp->phy_mode == PHY_INTERFACE_MODE_1000BASEX) {
2864		np = of_parse_phandle(pdev->dev.of_node, "pcs-handle", 0);
2865		if (!np) {
2866			/* Deprecated: Always use "pcs-handle" for pcs_phy.
2867			 * Falling back to "phy-handle" here is only for
2868			 * backward compatibility with old device trees.
2869			 */
2870			np = of_parse_phandle(pdev->dev.of_node, "phy-handle", 0);
2871		}
2872		if (!np) {
2873			dev_err(&pdev->dev, "pcs-handle (preferred) or phy-handle required for 1000BaseX/SGMII\n");
2874			ret = -EINVAL;
2875			goto cleanup_mdio;
2876		}
2877		lp->pcs_phy = of_mdio_find_device(np);
2878		if (!lp->pcs_phy) {
2879			ret = -EPROBE_DEFER;
2880			of_node_put(np);
2881			goto cleanup_mdio;
2882		}
2883		of_node_put(np);
2884		lp->pcs.ops = &axienet_pcs_ops;
2885		lp->pcs.neg_mode = true;
2886		lp->pcs.poll = true;
2887	}
2888
2889	lp->phylink_config.dev = &ndev->dev;
2890	lp->phylink_config.type = PHYLINK_NETDEV;
2891	lp->phylink_config.mac_managed_pm = true;
2892	lp->phylink_config.mac_capabilities = MAC_SYM_PAUSE | MAC_ASYM_PAUSE |
2893		MAC_10FD | MAC_100FD | MAC_1000FD;
2894
2895	__set_bit(lp->phy_mode, lp->phylink_config.supported_interfaces);
2896	if (lp->switch_x_sgmii) {
2897		__set_bit(PHY_INTERFACE_MODE_1000BASEX,
2898			  lp->phylink_config.supported_interfaces);
2899		__set_bit(PHY_INTERFACE_MODE_SGMII,
2900			  lp->phylink_config.supported_interfaces);
2901	}
2902
2903	lp->phylink = phylink_create(&lp->phylink_config, pdev->dev.fwnode,
2904				     lp->phy_mode,
2905				     &axienet_phylink_ops);
2906	if (IS_ERR(lp->phylink)) {
2907		ret = PTR_ERR(lp->phylink);
2908		dev_err(&pdev->dev, "phylink_create error (%i)\n", ret);
2909		goto cleanup_mdio;
2910	}
2911
2912	ret = register_netdev(lp->ndev);
2913	if (ret) {
2914		dev_err(lp->dev, "register_netdev() error (%i)\n", ret);
2915		goto cleanup_phylink;
2916	}
2917
2918	return 0;
2919
2920cleanup_phylink:
2921	phylink_destroy(lp->phylink);
2922
2923cleanup_mdio:
2924	if (lp->pcs_phy)
2925		put_device(&lp->pcs_phy->dev);
2926	if (lp->mii_bus)
2927		axienet_mdio_teardown(lp);
2928cleanup_clk:
2929	clk_bulk_disable_unprepare(XAE_NUM_MISC_CLOCKS, lp->misc_clks);
2930	clk_disable_unprepare(lp->axi_clk);
2931
2932free_netdev:
2933	free_netdev(ndev);
2934
2935	return ret;
2936}
2937
2938static void axienet_remove(struct platform_device *pdev)
2939{
2940	struct net_device *ndev = platform_get_drvdata(pdev);
2941	struct axienet_local *lp = netdev_priv(ndev);
2942
2943	unregister_netdev(ndev);
2944
2945	if (lp->phylink)
2946		phylink_destroy(lp->phylink);
2947
2948	if (lp->pcs_phy)
2949		put_device(&lp->pcs_phy->dev);
2950
2951	axienet_mdio_teardown(lp);
 
2952
2953	clk_bulk_disable_unprepare(XAE_NUM_MISC_CLOCKS, lp->misc_clks);
2954	clk_disable_unprepare(lp->axi_clk);
2955
 
 
 
 
2956	free_netdev(ndev);
2957}
2958
2959static void axienet_shutdown(struct platform_device *pdev)
2960{
2961	struct net_device *ndev = platform_get_drvdata(pdev);
2962
2963	rtnl_lock();
2964	netif_device_detach(ndev);
2965
2966	if (netif_running(ndev))
2967		dev_close(ndev);
2968
2969	rtnl_unlock();
2970}
2971
2972static int axienet_suspend(struct device *dev)
2973{
2974	struct net_device *ndev = dev_get_drvdata(dev);
2975
2976	if (!netif_running(ndev))
2977		return 0;
2978
2979	netif_device_detach(ndev);
2980
2981	rtnl_lock();
2982	axienet_stop(ndev);
2983	rtnl_unlock();
2984
2985	return 0;
2986}
2987
2988static int axienet_resume(struct device *dev)
2989{
2990	struct net_device *ndev = dev_get_drvdata(dev);
2991
2992	if (!netif_running(ndev))
2993		return 0;
2994
2995	rtnl_lock();
2996	axienet_open(ndev);
2997	rtnl_unlock();
2998
2999	netif_device_attach(ndev);
3000
3001	return 0;
3002}
3003
3004static DEFINE_SIMPLE_DEV_PM_OPS(axienet_pm_ops,
3005				axienet_suspend, axienet_resume);
3006
3007static struct platform_driver axienet_driver = {
3008	.probe = axienet_probe,
3009	.remove = axienet_remove,
3010	.shutdown = axienet_shutdown,
3011	.driver = {
 
3012		 .name = "xilinx_axienet",
3013		 .pm = &axienet_pm_ops,
3014		 .of_match_table = axienet_of_match,
3015	},
3016};
3017
3018module_platform_driver(axienet_driver);
3019
3020MODULE_DESCRIPTION("Xilinx Axi Ethernet driver");
3021MODULE_AUTHOR("Xilinx");
3022MODULE_LICENSE("GPL");
v3.15
 
   1/*
   2 * Xilinx Axi Ethernet device driver
   3 *
   4 * Copyright (c) 2008 Nissin Systems Co., Ltd.,  Yoshio Kashiwagi
   5 * Copyright (c) 2005-2008 DLA Systems,  David H. Lynch Jr. <dhlii@dlasys.net>
   6 * Copyright (c) 2008-2009 Secret Lab Technologies Ltd.
   7 * Copyright (c) 2010 - 2011 Michal Simek <monstr@monstr.eu>
   8 * Copyright (c) 2010 - 2011 PetaLogix
 
   9 * Copyright (c) 2010 - 2012 Xilinx, Inc. All rights reserved.
  10 *
  11 * This is a driver for the Xilinx Axi Ethernet which is used in the Virtex6
  12 * and Spartan6.
  13 *
  14 * TODO:
  15 *  - Add Axi Fifo support.
  16 *  - Factor out Axi DMA code into separate driver.
  17 *  - Test and fix basic multicast filtering.
  18 *  - Add support for extended multicast filtering.
  19 *  - Test basic VLAN support.
  20 *  - Add support for extended VLAN support.
  21 */
  22
 
  23#include <linux/delay.h>
  24#include <linux/etherdevice.h>
  25#include <linux/module.h>
  26#include <linux/netdevice.h>
 
  27#include <linux/of_mdio.h>
  28#include <linux/of_platform.h>
  29#include <linux/of_irq.h>
  30#include <linux/of_address.h>
 
  31#include <linux/skbuff.h>
  32#include <linux/spinlock.h>
  33#include <linux/phy.h>
  34#include <linux/mii.h>
  35#include <linux/ethtool.h>
 
 
 
 
 
  36
  37#include "xilinx_axienet.h"
  38
  39/* Descriptors defines for Tx and Rx DMA - 2^n for the best performance */
  40#define TX_BD_NUM		64
  41#define RX_BD_NUM		128
 
 
 
 
 
 
  42
  43/* Must be shorter than length of ethtool_drvinfo.driver field to fit */
  44#define DRIVER_NAME		"xaxienet"
  45#define DRIVER_DESCRIPTION	"Xilinx Axi Ethernet driver"
  46#define DRIVER_VERSION		"1.00a"
  47
  48#define AXIENET_REGS_N		32
 
 
  49
  50/* Match table for of_platform binding */
  51static struct of_device_id axienet_of_match[] = {
  52	{ .compatible = "xlnx,axi-ethernet-1.00.a", },
  53	{ .compatible = "xlnx,axi-ethernet-1.01.a", },
  54	{ .compatible = "xlnx,axi-ethernet-2.01.a", },
  55	{},
  56};
  57
  58MODULE_DEVICE_TABLE(of, axienet_of_match);
  59
  60/* Option table for setting up Axi Ethernet hardware options */
  61static struct axienet_option axienet_options[] = {
  62	/* Turn on jumbo packet support for both Rx and Tx */
  63	{
  64		.opt = XAE_OPTION_JUMBO,
  65		.reg = XAE_TC_OFFSET,
  66		.m_or = XAE_TC_JUM_MASK,
  67	}, {
  68		.opt = XAE_OPTION_JUMBO,
  69		.reg = XAE_RCW1_OFFSET,
  70		.m_or = XAE_RCW1_JUM_MASK,
  71	}, { /* Turn on VLAN packet support for both Rx and Tx */
  72		.opt = XAE_OPTION_VLAN,
  73		.reg = XAE_TC_OFFSET,
  74		.m_or = XAE_TC_VLAN_MASK,
  75	}, {
  76		.opt = XAE_OPTION_VLAN,
  77		.reg = XAE_RCW1_OFFSET,
  78		.m_or = XAE_RCW1_VLAN_MASK,
  79	}, { /* Turn on FCS stripping on receive packets */
  80		.opt = XAE_OPTION_FCS_STRIP,
  81		.reg = XAE_RCW1_OFFSET,
  82		.m_or = XAE_RCW1_FCS_MASK,
  83	}, { /* Turn on FCS insertion on transmit packets */
  84		.opt = XAE_OPTION_FCS_INSERT,
  85		.reg = XAE_TC_OFFSET,
  86		.m_or = XAE_TC_FCS_MASK,
  87	}, { /* Turn off length/type field checking on receive packets */
  88		.opt = XAE_OPTION_LENTYPE_ERR,
  89		.reg = XAE_RCW1_OFFSET,
  90		.m_or = XAE_RCW1_LT_DIS_MASK,
  91	}, { /* Turn on Rx flow control */
  92		.opt = XAE_OPTION_FLOW_CONTROL,
  93		.reg = XAE_FCC_OFFSET,
  94		.m_or = XAE_FCC_FCRX_MASK,
  95	}, { /* Turn on Tx flow control */
  96		.opt = XAE_OPTION_FLOW_CONTROL,
  97		.reg = XAE_FCC_OFFSET,
  98		.m_or = XAE_FCC_FCTX_MASK,
  99	}, { /* Turn on promiscuous frame filtering */
 100		.opt = XAE_OPTION_PROMISC,
 101		.reg = XAE_FMI_OFFSET,
 102		.m_or = XAE_FMI_PM_MASK,
 103	}, { /* Enable transmitter */
 104		.opt = XAE_OPTION_TXEN,
 105		.reg = XAE_TC_OFFSET,
 106		.m_or = XAE_TC_TX_MASK,
 107	}, { /* Enable receiver */
 108		.opt = XAE_OPTION_RXEN,
 109		.reg = XAE_RCW1_OFFSET,
 110		.m_or = XAE_RCW1_RX_MASK,
 111	},
 112	{}
 113};
 114
 
 
 
 
 
 
 
 
 
 
 115/**
 116 * axienet_dma_in32 - Memory mapped Axi DMA register read
 117 * @lp:		Pointer to axienet local structure
 118 * @reg:	Address offset from the base address of the Axi DMA core
 119 *
 120 * returns: The contents of the Axi DMA register
 121 *
 122 * This function returns the contents of the corresponding Axi DMA register.
 123 */
 124static inline u32 axienet_dma_in32(struct axienet_local *lp, off_t reg)
 125{
 126	return in_be32(lp->dma_regs + reg);
 127}
 128
 129/**
 130 * axienet_dma_out32 - Memory mapped Axi DMA register write.
 131 * @lp:		Pointer to axienet local structure
 132 * @reg:	Address offset from the base address of the Axi DMA core
 133 * @value:	Value to be written into the Axi DMA register
 134 *
 135 * This function writes the desired value into the corresponding Axi DMA
 136 * register.
 137 */
 138static inline void axienet_dma_out32(struct axienet_local *lp,
 139				     off_t reg, u32 value)
 140{
 141	out_be32((lp->dma_regs + reg), value);
 
 
 
 
 
 142}
 143
 144/**
 145 * axienet_dma_bd_release - Release buffer descriptor rings
 146 * @ndev:	Pointer to the net_device structure
 147 *
 148 * This function is used to release the descriptors allocated in
 149 * axienet_dma_bd_init. axienet_dma_bd_release is called when Axi Ethernet
 150 * driver stop api is called.
 151 */
 152static void axienet_dma_bd_release(struct net_device *ndev)
 153{
 154	int i;
 155	struct axienet_local *lp = netdev_priv(ndev);
 156
 157	for (i = 0; i < RX_BD_NUM; i++) {
 158		dma_unmap_single(ndev->dev.parent, lp->rx_bd_v[i].phys,
 159				 lp->max_frm_size, DMA_FROM_DEVICE);
 160		dev_kfree_skb((struct sk_buff *)
 161			      (lp->rx_bd_v[i].sw_id_offset));
 162	}
 163
 164	if (lp->rx_bd_v) {
 165		dma_free_coherent(ndev->dev.parent,
 166				  sizeof(*lp->rx_bd_v) * RX_BD_NUM,
 167				  lp->rx_bd_v,
 168				  lp->rx_bd_p);
 169	}
 170	if (lp->tx_bd_v) {
 171		dma_free_coherent(ndev->dev.parent,
 172				  sizeof(*lp->tx_bd_v) * TX_BD_NUM,
 173				  lp->tx_bd_v,
 174				  lp->tx_bd_p);
 
 
 
 
 
 
 
 
 
 
 
 175	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 176}
 177
 178/**
 179 * axienet_dma_bd_init - Setup buffer descriptor rings for Axi DMA
 180 * @ndev:	Pointer to the net_device structure
 181 *
 182 * returns: 0, on success
 183 *	    -ENOMEM, on failure
 184 *
 185 * This function is called to initialize the Rx and Tx DMA descriptor
 186 * rings. This initializes the descriptors with required default values
 187 * and is called when Axi Ethernet driver reset is called.
 188 */
 189static int axienet_dma_bd_init(struct net_device *ndev)
 190{
 191	u32 cr;
 192	int i;
 193	struct sk_buff *skb;
 194	struct axienet_local *lp = netdev_priv(ndev);
 195
 196	/* Reset the indexes which are used for accessing the BDs */
 197	lp->tx_bd_ci = 0;
 198	lp->tx_bd_tail = 0;
 199	lp->rx_bd_ci = 0;
 200
 201	/*
 202	 * Allocate the Tx and Rx buffer descriptors.
 203	 */
 204	lp->tx_bd_v = dma_zalloc_coherent(ndev->dev.parent,
 205					  sizeof(*lp->tx_bd_v) * TX_BD_NUM,
 206					  &lp->tx_bd_p, GFP_KERNEL);
 207	if (!lp->tx_bd_v)
 208		goto out;
 209
 210	lp->rx_bd_v = dma_zalloc_coherent(ndev->dev.parent,
 211					  sizeof(*lp->rx_bd_v) * RX_BD_NUM,
 212					  &lp->rx_bd_p, GFP_KERNEL);
 213	if (!lp->rx_bd_v)
 214		goto out;
 215
 216	for (i = 0; i < TX_BD_NUM; i++) {
 217		lp->tx_bd_v[i].next = lp->tx_bd_p +
 218				      sizeof(*lp->tx_bd_v) *
 219				      ((i + 1) % TX_BD_NUM);
 220	}
 221
 222	for (i = 0; i < RX_BD_NUM; i++) {
 223		lp->rx_bd_v[i].next = lp->rx_bd_p +
 224				      sizeof(*lp->rx_bd_v) *
 225				      ((i + 1) % RX_BD_NUM);
 
 
 
 
 
 
 
 
 226
 227		skb = netdev_alloc_skb_ip_align(ndev, lp->max_frm_size);
 228		if (!skb)
 229			goto out;
 230
 231		lp->rx_bd_v[i].sw_id_offset = (u32) skb;
 232		lp->rx_bd_v[i].phys = dma_map_single(ndev->dev.parent,
 233						     skb->data,
 234						     lp->max_frm_size,
 235						     DMA_FROM_DEVICE);
 
 
 
 
 236		lp->rx_bd_v[i].cntrl = lp->max_frm_size;
 237	}
 238
 239	/* Start updating the Rx channel control register */
 240	cr = axienet_dma_in32(lp, XAXIDMA_RX_CR_OFFSET);
 241	/* Update the interrupt coalesce count */
 242	cr = ((cr & ~XAXIDMA_COALESCE_MASK) |
 243	      ((lp->coalesce_count_rx) << XAXIDMA_COALESCE_SHIFT));
 244	/* Update the delay timer count */
 245	cr = ((cr & ~XAXIDMA_DELAY_MASK) |
 246	      (XAXIDMA_DFT_RX_WAITBOUND << XAXIDMA_DELAY_SHIFT));
 247	/* Enable coalesce, delay timer and error interrupts */
 248	cr |= XAXIDMA_IRQ_ALL_MASK;
 249	/* Write to the Rx channel control register */
 250	axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, cr);
 251
 252	/* Start updating the Tx channel control register */
 253	cr = axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET);
 254	/* Update the interrupt coalesce count */
 255	cr = (((cr & ~XAXIDMA_COALESCE_MASK)) |
 256	      ((lp->coalesce_count_tx) << XAXIDMA_COALESCE_SHIFT));
 257	/* Update the delay timer count */
 258	cr = (((cr & ~XAXIDMA_DELAY_MASK)) |
 259	      (XAXIDMA_DFT_TX_WAITBOUND << XAXIDMA_DELAY_SHIFT));
 260	/* Enable coalesce, delay timer and error interrupts */
 261	cr |= XAXIDMA_IRQ_ALL_MASK;
 262	/* Write to the Tx channel control register */
 263	axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, cr);
 264
 265	/* Populate the tail pointer and bring the Rx Axi DMA engine out of
 266	 * halted state. This will make the Rx side ready for reception.*/
 267	axienet_dma_out32(lp, XAXIDMA_RX_CDESC_OFFSET, lp->rx_bd_p);
 268	cr = axienet_dma_in32(lp, XAXIDMA_RX_CR_OFFSET);
 269	axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET,
 270			  cr | XAXIDMA_CR_RUNSTOP_MASK);
 271	axienet_dma_out32(lp, XAXIDMA_RX_TDESC_OFFSET, lp->rx_bd_p +
 272			  (sizeof(*lp->rx_bd_v) * (RX_BD_NUM - 1)));
 273
 274	/* Write to the RS (Run-stop) bit in the Tx channel control register.
 275	 * Tx channel is now ready to run. But only after we write to the
 276	 * tail pointer register that the Tx channel will start transmitting */
 277	axienet_dma_out32(lp, XAXIDMA_TX_CDESC_OFFSET, lp->tx_bd_p);
 278	cr = axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET);
 279	axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET,
 280			  cr | XAXIDMA_CR_RUNSTOP_MASK);
 281
 282	return 0;
 283out:
 284	axienet_dma_bd_release(ndev);
 285	return -ENOMEM;
 286}
 287
 288/**
 289 * axienet_set_mac_address - Write the MAC address
 290 * @ndev:	Pointer to the net_device structure
 291 * @address:	6 byte Address to be written as MAC address
 292 *
 293 * This function is called to initialize the MAC address of the Axi Ethernet
 294 * core. It writes to the UAW0 and UAW1 registers of the core.
 295 */
 296static void axienet_set_mac_address(struct net_device *ndev, void *address)
 
 297{
 298	struct axienet_local *lp = netdev_priv(ndev);
 299
 300	if (address)
 301		memcpy(ndev->dev_addr, address, ETH_ALEN);
 302	if (!is_valid_ether_addr(ndev->dev_addr))
 303		eth_random_addr(ndev->dev_addr);
 304
 305	/* Set up unicast MAC address filter set its mac address */
 306	axienet_iow(lp, XAE_UAW0_OFFSET,
 307		    (ndev->dev_addr[0]) |
 308		    (ndev->dev_addr[1] << 8) |
 309		    (ndev->dev_addr[2] << 16) |
 310		    (ndev->dev_addr[3] << 24));
 311	axienet_iow(lp, XAE_UAW1_OFFSET,
 312		    (((axienet_ior(lp, XAE_UAW1_OFFSET)) &
 313		      ~XAE_UAW1_UNICASTADDR_MASK) |
 314		     (ndev->dev_addr[4] |
 315		     (ndev->dev_addr[5] << 8))));
 316}
 317
 318/**
 319 * netdev_set_mac_address - Write the MAC address (from outside the driver)
 320 * @ndev:	Pointer to the net_device structure
 321 * @p:		6 byte Address to be written as MAC address
 322 *
 323 * returns: 0 for all conditions. Presently, there is no failure case.
 324 *
 325 * This function is called to initialize the MAC address of the Axi Ethernet
 326 * core. It calls the core specific axienet_set_mac_address. This is the
 327 * function that goes into net_device_ops structure entry ndo_set_mac_address.
 328 */
 329static int netdev_set_mac_address(struct net_device *ndev, void *p)
 330{
 331	struct sockaddr *addr = p;
 
 332	axienet_set_mac_address(ndev, addr->sa_data);
 333	return 0;
 334}
 335
 336/**
 337 * axienet_set_multicast_list - Prepare the multicast table
 338 * @ndev:	Pointer to the net_device structure
 339 *
 340 * This function is called to initialize the multicast table during
 341 * initialization. The Axi Ethernet basic multicast support has a four-entry
 342 * multicast table which is initialized here. Additionally this function
 343 * goes into the net_device_ops structure entry ndo_set_multicast_list. This
 344 * means whenever the multicast table entries need to be updated this
 345 * function gets called.
 346 */
 347static void axienet_set_multicast_list(struct net_device *ndev)
 348{
 349	int i;
 350	u32 reg, af0reg, af1reg;
 351	struct axienet_local *lp = netdev_priv(ndev);
 352
 353	if (ndev->flags & (IFF_ALLMULTI | IFF_PROMISC) ||
 
 
 
 
 
 
 
 
 354	    netdev_mc_count(ndev) > XAE_MULTICAST_CAM_TABLE_NUM) {
 355		/* We must make the kernel realize we had to move into
 356		 * promiscuous mode. If it was a promiscuous mode request
 357		 * the flag is already set. If not we set it. */
 358		ndev->flags |= IFF_PROMISC;
 359		reg = axienet_ior(lp, XAE_FMI_OFFSET);
 360		reg |= XAE_FMI_PM_MASK;
 361		axienet_iow(lp, XAE_FMI_OFFSET, reg);
 362		dev_info(&ndev->dev, "Promiscuous mode enabled.\n");
 
 
 
 
 
 363	} else if (!netdev_mc_empty(ndev)) {
 364		struct netdev_hw_addr *ha;
 365
 366		i = 0;
 367		netdev_for_each_mc_addr(ha, ndev) {
 368			if (i >= XAE_MULTICAST_CAM_TABLE_NUM)
 369				break;
 370
 371			af0reg = (ha->addr[0]);
 372			af0reg |= (ha->addr[1] << 8);
 373			af0reg |= (ha->addr[2] << 16);
 374			af0reg |= (ha->addr[3] << 24);
 375
 376			af1reg = (ha->addr[4]);
 377			af1reg |= (ha->addr[5] << 8);
 378
 379			reg = axienet_ior(lp, XAE_FMI_OFFSET) & 0xFFFFFF00;
 380			reg |= i;
 381
 382			axienet_iow(lp, XAE_FMI_OFFSET, reg);
 383			axienet_iow(lp, XAE_AF0_OFFSET, af0reg);
 384			axienet_iow(lp, XAE_AF1_OFFSET, af1reg);
 
 
 
 385			i++;
 386		}
 387	} else {
 388		reg = axienet_ior(lp, XAE_FMI_OFFSET);
 389		reg &= ~XAE_FMI_PM_MASK;
 390
 
 
 
 391		axienet_iow(lp, XAE_FMI_OFFSET, reg);
 392
 393		for (i = 0; i < XAE_MULTICAST_CAM_TABLE_NUM; i++) {
 394			reg = axienet_ior(lp, XAE_FMI_OFFSET) & 0xFFFFFF00;
 395			reg |= i;
 396
 397			axienet_iow(lp, XAE_FMI_OFFSET, reg);
 398			axienet_iow(lp, XAE_AF0_OFFSET, 0);
 399			axienet_iow(lp, XAE_AF1_OFFSET, 0);
 400		}
 401
 402		dev_info(&ndev->dev, "Promiscuous mode disabled.\n");
 403	}
 404}
 405
 406/**
 407 * axienet_setoptions - Set an Axi Ethernet option
 408 * @ndev:	Pointer to the net_device structure
 409 * @options:	Option to be enabled/disabled
 410 *
 411 * The Axi Ethernet core has multiple features which can be selectively turned
 412 * on or off. The typical options could be jumbo frame option, basic VLAN
 413 * option, promiscuous mode option etc. This function is used to set or clear
 414 * these options in the Axi Ethernet hardware. This is done through
 415 * axienet_option structure .
 416 */
 417static void axienet_setoptions(struct net_device *ndev, u32 options)
 418{
 419	int reg;
 420	struct axienet_local *lp = netdev_priv(ndev);
 421	struct axienet_option *tp = &axienet_options[0];
 422
 423	while (tp->opt) {
 424		reg = ((axienet_ior(lp, tp->reg)) & ~(tp->m_or));
 425		if (options & tp->opt)
 426			reg |= tp->m_or;
 427		axienet_iow(lp, tp->reg, reg);
 428		tp++;
 429	}
 430
 431	lp->options |= options;
 432}
 433
 434static void __axienet_device_reset(struct axienet_local *lp,
 435				   struct device *dev, off_t offset)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 436{
 437	u32 timeout;
 
 
 
 
 
 
 
 438	/* Reset Axi DMA. This would reset Axi Ethernet core as well. The reset
 439	 * process of Axi DMA takes a while to complete as all pending
 440	 * commands/transfers will be flushed or completed during this
 441	 * reset process. */
 442	axienet_dma_out32(lp, offset, XAXIDMA_CR_RESET_MASK);
 443	timeout = DELAY_OF_ONE_MILLISEC;
 444	while (axienet_dma_in32(lp, offset) & XAXIDMA_CR_RESET_MASK) {
 445		udelay(1);
 446		if (--timeout == 0) {
 447			dev_err(dev, "axienet_device_reset DMA "
 448				"reset timeout!\n");
 449			break;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 450		}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 451	}
 
 
 
 
 
 
 
 
 
 
 
 452}
 453
 454/**
 455 * axienet_device_reset - Reset and initialize the Axi Ethernet hardware.
 456 * @ndev:	Pointer to the net_device structure
 457 *
 458 * This function is called to reset and initialize the Axi Ethernet core. This
 459 * is typically called during initialization. It does a reset of the Axi DMA
 460 * Rx/Tx channels and initializes the Axi DMA BDs. Since Axi DMA reset lines
 461 * areconnected to Axi Ethernet reset lines, this in turn resets the Axi
 462 * Ethernet core. No separate hardware reset is done for the Axi Ethernet
 463 * core.
 
 464 */
 465static void axienet_device_reset(struct net_device *ndev)
 466{
 467	u32 axienet_status;
 468	struct axienet_local *lp = netdev_priv(ndev);
 469
 470	__axienet_device_reset(lp, &ndev->dev, XAXIDMA_TX_CR_OFFSET);
 471	__axienet_device_reset(lp, &ndev->dev, XAXIDMA_RX_CR_OFFSET);
 472
 473	lp->max_frm_size = XAE_MAX_VLAN_FRAME_SIZE;
 
 474	lp->options &= (~XAE_OPTION_JUMBO);
 475
 476	if ((ndev->mtu > XAE_MTU) &&
 477	    (ndev->mtu <= XAE_JUMBO_MTU) &&
 478	    (lp->jumbo_support)) {
 479		lp->max_frm_size = ndev->mtu + XAE_HDR_VLAN_SIZE +
 480				   XAE_TRL_SIZE;
 481		lp->options |= XAE_OPTION_JUMBO;
 482	}
 483
 484	if (axienet_dma_bd_init(ndev)) {
 485		dev_err(&ndev->dev, "axienet_device_reset descriptor "
 486			"allocation failed\n");
 
 
 
 
 
 
 
 
 487	}
 488
 489	axienet_status = axienet_ior(lp, XAE_RCW1_OFFSET);
 490	axienet_status &= ~XAE_RCW1_RX_MASK;
 491	axienet_iow(lp, XAE_RCW1_OFFSET, axienet_status);
 492
 493	axienet_status = axienet_ior(lp, XAE_IP_OFFSET);
 494	if (axienet_status & XAE_INT_RXRJECT_MASK)
 495		axienet_iow(lp, XAE_IS_OFFSET, XAE_INT_RXRJECT_MASK);
 
 
 496
 497	axienet_iow(lp, XAE_FCC_OFFSET, XAE_FCC_FCRX_MASK);
 498
 499	/* Sync default options with HW but leave receiver and
 500	 * transmitter disabled.*/
 
 501	axienet_setoptions(ndev, lp->options &
 502			   ~(XAE_OPTION_TXEN | XAE_OPTION_RXEN));
 503	axienet_set_mac_address(ndev, NULL);
 504	axienet_set_multicast_list(ndev);
 505	axienet_setoptions(ndev, lp->options);
 506
 507	ndev->trans_start = jiffies;
 508}
 509
 510/**
 511 * axienet_adjust_link - Adjust the PHY link speed/duplex.
 512 * @ndev:	Pointer to the net_device structure
 513 *
 514 * This function is called to change the speed and duplex setting after
 515 * auto negotiation is done by the PHY. This is the function that gets
 516 * registered with the PHY interface through the "of_phy_connect" call.
 517 */
 518static void axienet_adjust_link(struct net_device *ndev)
 519{
 520	u32 emmc_reg;
 521	u32 link_state;
 522	u32 setspeed = 1;
 523	struct axienet_local *lp = netdev_priv(ndev);
 524	struct phy_device *phy = lp->phy_dev;
 525
 526	link_state = phy->speed | (phy->duplex << 1) | phy->link;
 527	if (lp->last_link != link_state) {
 528		if ((phy->speed == SPEED_10) || (phy->speed == SPEED_100)) {
 529			if (lp->phy_type == XAE_PHY_TYPE_1000BASE_X)
 530				setspeed = 0;
 531		} else {
 532			if ((phy->speed == SPEED_1000) &&
 533			    (lp->phy_type == XAE_PHY_TYPE_MII))
 534				setspeed = 0;
 535		}
 536
 537		if (setspeed == 1) {
 538			emmc_reg = axienet_ior(lp, XAE_EMMC_OFFSET);
 539			emmc_reg &= ~XAE_EMMC_LINKSPEED_MASK;
 540
 541			switch (phy->speed) {
 542			case SPEED_1000:
 543				emmc_reg |= XAE_EMMC_LINKSPD_1000;
 544				break;
 545			case SPEED_100:
 546				emmc_reg |= XAE_EMMC_LINKSPD_100;
 547				break;
 548			case SPEED_10:
 549				emmc_reg |= XAE_EMMC_LINKSPD_10;
 550				break;
 551			default:
 552				dev_err(&ndev->dev, "Speed other than 10, 100 "
 553					"or 1Gbps is not supported\n");
 554				break;
 555			}
 556
 557			axienet_iow(lp, XAE_EMMC_OFFSET, emmc_reg);
 558			lp->last_link = link_state;
 559			phy_print_status(phy);
 560		} else {
 561			dev_err(&ndev->dev, "Error setting Axi Ethernet "
 562				"mac speed\n");
 563		}
 564	}
 565}
 566
 567/**
 568 * axienet_start_xmit_done - Invoked once a transmit is completed by the
 569 * Axi DMA Tx channel.
 570 * @ndev:	Pointer to the net_device structure
 571 *
 572 * This function is invoked from the Axi DMA Tx isr to notify the completion
 573 * of transmit operation. It clears fields in the corresponding Tx BDs and
 574 * unmaps the corresponding buffer so that CPU can regain ownership of the
 575 * buffer. It finally invokes "netif_wake_queue" to restart transmission if
 576 * required.
 
 
 
 577 */
 578static void axienet_start_xmit_done(struct net_device *ndev)
 
 579{
 580	u32 size = 0;
 581	u32 packets = 0;
 582	struct axienet_local *lp = netdev_priv(ndev);
 583	struct axidma_bd *cur_p;
 584	unsigned int status = 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 585
 586	cur_p = &lp->tx_bd_v[lp->tx_bd_ci];
 587	status = cur_p->status;
 588	while (status & XAXIDMA_BD_STS_COMPLETE_MASK) {
 589		dma_unmap_single(ndev->dev.parent, cur_p->phys,
 590				(cur_p->cntrl & XAXIDMA_BD_CTRL_LENGTH_MASK),
 591				DMA_TO_DEVICE);
 592		if (cur_p->app4)
 593			dev_kfree_skb_irq((struct sk_buff *)cur_p->app4);
 594		/*cur_p->phys = 0;*/
 595		cur_p->app0 = 0;
 596		cur_p->app1 = 0;
 597		cur_p->app2 = 0;
 598		cur_p->app4 = 0;
 
 
 
 
 599		cur_p->status = 0;
 600
 601		size += status & XAXIDMA_BD_STS_ACTUAL_LEN_MASK;
 602		packets++;
 
 603
 604		++lp->tx_bd_ci;
 605		lp->tx_bd_ci %= TX_BD_NUM;
 606		cur_p = &lp->tx_bd_v[lp->tx_bd_ci];
 607		status = cur_p->status;
 608	}
 609
 610	ndev->stats.tx_packets += packets;
 611	ndev->stats.tx_bytes += size;
 612	netif_wake_queue(ndev);
 613}
 614
 615/**
 616 * axienet_check_tx_bd_space - Checks if a BD/group of BDs are currently busy
 617 * @lp:		Pointer to the axienet_local structure
 618 * @num_frag:	The number of BDs to check for
 619 *
 620 * returns: 0, on success
 621 *	    NETDEV_TX_BUSY, if any of the descriptors are not free
 622 *
 623 * This function is invoked before BDs are allocated and transmission starts.
 624 * This function returns 0 if a BD or group of BDs can be allocated for
 625 * transmission. If the BD or any of the BDs are not free the function
 626 * returns a busy status. This is invoked from axienet_start_xmit.
 627 */
 628static inline int axienet_check_tx_bd_space(struct axienet_local *lp,
 629					    int num_frag)
 630{
 631	struct axidma_bd *cur_p;
 632	cur_p = &lp->tx_bd_v[(lp->tx_bd_tail + num_frag) % TX_BD_NUM];
 633	if (cur_p->status & XAXIDMA_BD_STS_ALL_MASK)
 
 
 
 
 634		return NETDEV_TX_BUSY;
 635	return 0;
 636}
 637
 638/**
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 639 * axienet_start_xmit - Starts the transmission.
 640 * @skb:	sk_buff pointer that contains data to be Txed.
 641 * @ndev:	Pointer to net_device structure.
 642 *
 643 * returns: NETDEV_TX_OK, on success
 644 *	    NETDEV_TX_BUSY, if any of the descriptors are not free
 645 *
 646 * This function is invoked from upper layers to initiate transmission. The
 647 * function uses the next available free BDs and populates their fields to
 648 * start the transmission. Additionally if checksum offloading is supported,
 649 * it populates AXI Stream Control fields with appropriate values.
 650 */
 651static int axienet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
 
 652{
 653	u32 ii;
 654	u32 num_frag;
 655	u32 csum_start_off;
 656	u32 csum_index_off;
 657	skb_frag_t *frag;
 658	dma_addr_t tail_p;
 
 659	struct axienet_local *lp = netdev_priv(ndev);
 660	struct axidma_bd *cur_p;
 661
 
 
 
 662	num_frag = skb_shinfo(skb)->nr_frags;
 663	cur_p = &lp->tx_bd_v[lp->tx_bd_tail];
 664
 665	if (axienet_check_tx_bd_space(lp, num_frag)) {
 666		if (!netif_queue_stopped(ndev))
 667			netif_stop_queue(ndev);
 
 
 
 
 
 668		return NETDEV_TX_BUSY;
 669	}
 670
 671	if (skb->ip_summed == CHECKSUM_PARTIAL) {
 672		if (lp->features & XAE_FEATURE_FULL_TX_CSUM) {
 673			/* Tx Full Checksum Offload Enabled */
 674			cur_p->app0 |= 2;
 675		} else if (lp->features & XAE_FEATURE_PARTIAL_RX_CSUM) {
 676			csum_start_off = skb_transport_offset(skb);
 677			csum_index_off = csum_start_off + skb->csum_offset;
 678			/* Tx Partial Checksum Offload Enabled */
 679			cur_p->app0 |= 1;
 680			cur_p->app1 = (csum_start_off << 16) | csum_index_off;
 681		}
 682	} else if (skb->ip_summed == CHECKSUM_UNNECESSARY) {
 683		cur_p->app0 |= 2; /* Tx Full Checksum Offload Enabled */
 684	}
 685
 
 
 
 
 
 
 
 
 
 
 686	cur_p->cntrl = skb_headlen(skb) | XAXIDMA_BD_CTRL_TXSOF_MASK;
 687	cur_p->phys = dma_map_single(ndev->dev.parent, skb->data,
 688				     skb_headlen(skb), DMA_TO_DEVICE);
 689
 690	for (ii = 0; ii < num_frag; ii++) {
 691		++lp->tx_bd_tail;
 692		lp->tx_bd_tail %= TX_BD_NUM;
 693		cur_p = &lp->tx_bd_v[lp->tx_bd_tail];
 694		frag = &skb_shinfo(skb)->frags[ii];
 695		cur_p->phys = dma_map_single(ndev->dev.parent,
 696					     skb_frag_address(frag),
 697					     skb_frag_size(frag),
 698					     DMA_TO_DEVICE);
 
 
 
 
 
 
 
 
 
 
 699		cur_p->cntrl = skb_frag_size(frag);
 700	}
 701
 702	cur_p->cntrl |= XAXIDMA_BD_CTRL_TXEOF_MASK;
 703	cur_p->app4 = (unsigned long)skb;
 
 
 
 
 
 704
 705	tail_p = lp->tx_bd_p + sizeof(*lp->tx_bd_v) * lp->tx_bd_tail;
 706	/* Start the transfer */
 707	axienet_dma_out32(lp, XAXIDMA_TX_TDESC_OFFSET, tail_p);
 708	++lp->tx_bd_tail;
 709	lp->tx_bd_tail %= TX_BD_NUM;
 
 
 
 
 
 
 
 
 
 
 710
 711	return NETDEV_TX_OK;
 712}
 713
 714/**
 715 * axienet_recv - Is called from Axi DMA Rx Isr to complete the received
 716 *		  BD processing.
 717 * @ndev:	Pointer to net_device structure.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 718 *
 719 * This function is invoked from the Axi DMA Rx isr to process the Rx BDs. It
 720 * does minimal processing and invokes "netif_rx" to complete further
 721 * processing.
 722 */
 723static void axienet_recv(struct net_device *ndev)
 724{
 725	u32 length;
 726	u32 csumstatus;
 727	u32 size = 0;
 728	u32 packets = 0;
 729	dma_addr_t tail_p;
 730	struct axienet_local *lp = netdev_priv(ndev);
 731	struct sk_buff *skb, *new_skb;
 732	struct axidma_bd *cur_p;
 733
 734	tail_p = lp->rx_bd_p + sizeof(*lp->rx_bd_v) * lp->rx_bd_ci;
 735	cur_p = &lp->rx_bd_v[lp->rx_bd_ci];
 736
 737	while ((cur_p->status & XAXIDMA_BD_STS_COMPLETE_MASK)) {
 738		skb = (struct sk_buff *) (cur_p->sw_id_offset);
 739		length = cur_p->app4 & 0x0000FFFF;
 740
 741		dma_unmap_single(ndev->dev.parent, cur_p->phys,
 742				 lp->max_frm_size,
 743				 DMA_FROM_DEVICE);
 744
 745		skb_put(skb, length);
 746		skb->protocol = eth_type_trans(skb, ndev);
 747		/*skb_checksum_none_assert(skb);*/
 748		skb->ip_summed = CHECKSUM_NONE;
 749
 750		/* if we're doing Rx csum offload, set it up */
 751		if (lp->features & XAE_FEATURE_FULL_RX_CSUM) {
 752			csumstatus = (cur_p->app2 &
 753				      XAE_FULL_CSUM_STATUS_MASK) >> 3;
 754			if ((csumstatus == XAE_IP_TCP_CSUM_VALIDATED) ||
 755			    (csumstatus == XAE_IP_UDP_CSUM_VALIDATED)) {
 756				skb->ip_summed = CHECKSUM_UNNECESSARY;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 757			}
 758		} else if ((lp->features & XAE_FEATURE_PARTIAL_RX_CSUM) != 0 &&
 759			   skb->protocol == htons(ETH_P_IP) &&
 760			   skb->len > 64) {
 761			skb->csum = be32_to_cpu(cur_p->app3 & 0xFFFF);
 762			skb->ip_summed = CHECKSUM_COMPLETE;
 763		}
 764
 765		netif_rx(skb);
 766
 767		size += length;
 768		packets++;
 
 769
 770		new_skb = netdev_alloc_skb_ip_align(ndev, lp->max_frm_size);
 771		if (!new_skb)
 772			return;
 
 
 
 
 
 
 
 
 
 
 
 773
 774		cur_p->phys = dma_map_single(ndev->dev.parent, new_skb->data,
 775					     lp->max_frm_size,
 776					     DMA_FROM_DEVICE);
 777		cur_p->cntrl = lp->max_frm_size;
 778		cur_p->status = 0;
 779		cur_p->sw_id_offset = (u32) new_skb;
 
 
 
 
 
 780
 781		++lp->rx_bd_ci;
 782		lp->rx_bd_ci %= RX_BD_NUM;
 783		cur_p = &lp->rx_bd_v[lp->rx_bd_ci];
 784	}
 785
 786	ndev->stats.rx_packets += packets;
 787	ndev->stats.rx_bytes += size;
 788
 789	axienet_dma_out32(lp, XAXIDMA_RX_TDESC_OFFSET, tail_p);
 
 
 
 
 
 
 
 
 
 
 
 
 790}
 791
 792/**
 793 * axienet_tx_irq - Tx Done Isr.
 794 * @irq:	irq number
 795 * @_ndev:	net_device pointer
 796 *
 797 * returns: IRQ_HANDLED for all cases.
 798 *
 799 * This is the Axi DMA Tx done Isr. It invokes "axienet_start_xmit_done"
 800 * to complete the BD processing.
 801 */
 802static irqreturn_t axienet_tx_irq(int irq, void *_ndev)
 803{
 804	u32 cr;
 805	unsigned int status;
 806	struct net_device *ndev = _ndev;
 807	struct axienet_local *lp = netdev_priv(ndev);
 808
 809	status = axienet_dma_in32(lp, XAXIDMA_TX_SR_OFFSET);
 810	if (status & (XAXIDMA_IRQ_IOC_MASK | XAXIDMA_IRQ_DELAY_MASK)) {
 811		axienet_start_xmit_done(lp->ndev);
 812		goto out;
 813	}
 814	if (!(status & XAXIDMA_IRQ_ALL_MASK))
 815		dev_err(&ndev->dev, "No interrupts asserted in Tx path");
 816	if (status & XAXIDMA_IRQ_ERROR_MASK) {
 817		dev_err(&ndev->dev, "DMA Tx error 0x%x\n", status);
 818		dev_err(&ndev->dev, "Current BD is at: 0x%x\n",
 819			(lp->tx_bd_v[lp->tx_bd_ci]).phys);
 820
 821		cr = axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET);
 822		/* Disable coalesce, delay timer and error interrupts */
 823		cr &= (~XAXIDMA_IRQ_ALL_MASK);
 824		/* Write to the Tx channel control register */
 825		axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, cr);
 826
 827		cr = axienet_dma_in32(lp, XAXIDMA_RX_CR_OFFSET);
 828		/* Disable coalesce, delay timer and error interrupts */
 829		cr &= (~XAXIDMA_IRQ_ALL_MASK);
 830		/* Write to the Rx channel control register */
 831		axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, cr);
 832
 833		tasklet_schedule(&lp->dma_err_tasklet);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 834	}
 835out:
 836	axienet_dma_out32(lp, XAXIDMA_TX_SR_OFFSET, status);
 837	return IRQ_HANDLED;
 838}
 839
 840/**
 841 * axienet_rx_irq - Rx Isr.
 842 * @irq:	irq number
 843 * @_ndev:	net_device pointer
 844 *
 845 * returns: IRQ_HANDLED for all cases.
 846 *
 847 * This is the Axi DMA Rx Isr. It invokes "axienet_recv" to complete the BD
 848 * processing.
 849 */
 850static irqreturn_t axienet_rx_irq(int irq, void *_ndev)
 851{
 852	u32 cr;
 853	unsigned int status;
 854	struct net_device *ndev = _ndev;
 855	struct axienet_local *lp = netdev_priv(ndev);
 856
 857	status = axienet_dma_in32(lp, XAXIDMA_RX_SR_OFFSET);
 858	if (status & (XAXIDMA_IRQ_IOC_MASK | XAXIDMA_IRQ_DELAY_MASK)) {
 859		axienet_recv(lp->ndev);
 860		goto out;
 861	}
 862	if (!(status & XAXIDMA_IRQ_ALL_MASK))
 863		dev_err(&ndev->dev, "No interrupts asserted in Rx path");
 864	if (status & XAXIDMA_IRQ_ERROR_MASK) {
 865		dev_err(&ndev->dev, "DMA Rx error 0x%x\n", status);
 866		dev_err(&ndev->dev, "Current BD is at: 0x%x\n",
 867			(lp->rx_bd_v[lp->rx_bd_ci]).phys);
 868
 869		cr = axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET);
 870		/* Disable coalesce, delay timer and error interrupts */
 871		cr &= (~XAXIDMA_IRQ_ALL_MASK);
 872		/* Finally write to the Tx channel control register */
 873		axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, cr);
 874
 875		cr = axienet_dma_in32(lp, XAXIDMA_RX_CR_OFFSET);
 876		/* Disable coalesce, delay timer and error interrupts */
 877		cr &= (~XAXIDMA_IRQ_ALL_MASK);
 878		/* write to the Rx channel control register */
 879		axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, cr);
 880
 881		tasklet_schedule(&lp->dma_err_tasklet);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 882	}
 883out:
 884	axienet_dma_out32(lp, XAXIDMA_RX_SR_OFFSET, status);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 885	return IRQ_HANDLED;
 886}
 887
 888static void axienet_dma_err_handler(unsigned long data);
 889
 890/**
 891 * axienet_open - Driver open routine.
 892 * @ndev:	Pointer to net_device structure
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 893 *
 894 * returns: 0, on success.
 895 *	    -ENODEV, if PHY cannot be connected to
 896 *	    non-zero error value on failure
 897 *
 898 * This is the driver open routine. It calls phy_start to start the PHY device.
 899 * It also allocates interrupt service routines, enables the interrupt lines
 900 * and ISR handling. Axi Ethernet core is reset through Axi DMA core. Buffer
 901 * descriptors are initialized.
 902 */
 903static int axienet_open(struct net_device *ndev)
 904{
 905	int ret, mdio_mcreg;
 906	struct axienet_local *lp = netdev_priv(ndev);
 
 
 
 
 
 
 
 
 907
 908	dev_dbg(&ndev->dev, "axienet_open()\n");
 
 
 
 
 
 909
 910	mdio_mcreg = axienet_ior(lp, XAE_MDIO_MC_OFFSET);
 911	ret = axienet_mdio_wait_until_ready(lp);
 912	if (ret < 0)
 913		return ret;
 914	/* Disable the MDIO interface till Axi Ethernet Reset is completed.
 915	 * When we do an Axi Ethernet reset, it resets the complete core
 916	 * including the MDIO. If MDIO is not disabled when the reset
 917	 * process is started, MDIO will be broken afterwards. */
 918	axienet_iow(lp, XAE_MDIO_MC_OFFSET,
 919		    (mdio_mcreg & (~XAE_MDIO_MC_MDIOEN_MASK)));
 920	axienet_device_reset(ndev);
 921	/* Enable the MDIO */
 922	axienet_iow(lp, XAE_MDIO_MC_OFFSET, mdio_mcreg);
 923	ret = axienet_mdio_wait_until_ready(lp);
 924	if (ret < 0)
 925		return ret;
 
 
 926
 927	if (lp->phy_node) {
 928		lp->phy_dev = of_phy_connect(lp->ndev, lp->phy_node,
 929					     axienet_adjust_link, 0,
 930					     PHY_INTERFACE_MODE_GMII);
 931		if (!lp->phy_dev) {
 932			dev_err(lp->dev, "of_phy_connect() failed\n");
 933			return -ENODEV;
 934		}
 935		phy_start(lp->phy_dev);
 936	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 937
 938	/* Enable tasklets for Axi DMA error handling */
 939	tasklet_init(&lp->dma_err_tasklet, axienet_dma_err_handler,
 940		     (unsigned long) lp);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 941
 942	/* Enable interrupts for Axi DMA Tx */
 943	ret = request_irq(lp->tx_irq, axienet_tx_irq, 0, ndev->name, ndev);
 
 944	if (ret)
 945		goto err_tx_irq;
 946	/* Enable interrupts for Axi DMA Rx */
 947	ret = request_irq(lp->rx_irq, axienet_rx_irq, 0, ndev->name, ndev);
 
 948	if (ret)
 949		goto err_rx_irq;
 
 
 
 
 
 
 
 950
 951	return 0;
 952
 
 
 953err_rx_irq:
 954	free_irq(lp->tx_irq, ndev);
 955err_tx_irq:
 956	if (lp->phy_dev)
 957		phy_disconnect(lp->phy_dev);
 958	lp->phy_dev = NULL;
 959	tasklet_kill(&lp->dma_err_tasklet);
 960	dev_err(lp->dev, "request_irq() failed\n");
 961	return ret;
 962}
 963
 964/**
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 965 * axienet_stop - Driver stop routine.
 966 * @ndev:	Pointer to net_device structure
 967 *
 968 * returns: 0, on success.
 969 *
 970 * This is the driver stop routine. It calls phy_disconnect to stop the PHY
 971 * device. It also removes the interrupt handlers and disables the interrupts.
 972 * The Axi DMA Tx/Rx BDs are released.
 973 */
 974static int axienet_stop(struct net_device *ndev)
 975{
 976	u32 cr;
 977	struct axienet_local *lp = netdev_priv(ndev);
 
 978
 979	dev_dbg(&ndev->dev, "axienet_close()\n");
 
 
 
 
 
 
 
 
 
 
 
 980
 981	cr = axienet_dma_in32(lp, XAXIDMA_RX_CR_OFFSET);
 982	axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET,
 983			  cr & (~XAXIDMA_CR_RUNSTOP_MASK));
 984	cr = axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET);
 985	axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET,
 986			  cr & (~XAXIDMA_CR_RUNSTOP_MASK));
 987	axienet_setoptions(ndev, lp->options &
 988			   ~(XAE_OPTION_TXEN | XAE_OPTION_RXEN));
 989
 990	tasklet_kill(&lp->dma_err_tasklet);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 991
 992	free_irq(lp->tx_irq, ndev);
 993	free_irq(lp->rx_irq, ndev);
 
 994
 995	if (lp->phy_dev)
 996		phy_disconnect(lp->phy_dev);
 997	lp->phy_dev = NULL;
 998
 999	axienet_dma_bd_release(ndev);
 
1000	return 0;
1001}
1002
1003/**
1004 * axienet_change_mtu - Driver change mtu routine.
1005 * @ndev:	Pointer to net_device structure
1006 * @new_mtu:	New mtu value to be applied
1007 *
1008 * returns: Always returns 0 (success).
1009 *
1010 * This is the change mtu driver routine. It checks if the Axi Ethernet
1011 * hardware supports jumbo frames before changing the mtu. This can be
1012 * called only when the device is not up.
1013 */
1014static int axienet_change_mtu(struct net_device *ndev, int new_mtu)
1015{
1016	struct axienet_local *lp = netdev_priv(ndev);
1017
1018	if (netif_running(ndev))
1019		return -EBUSY;
1020	if (lp->jumbo_support) {
1021		if ((new_mtu > XAE_JUMBO_MTU) || (new_mtu < 64))
1022			return -EINVAL;
1023		ndev->mtu = new_mtu;
1024	} else {
1025		if ((new_mtu > XAE_MTU) || (new_mtu < 64))
1026			return -EINVAL;
1027		ndev->mtu = new_mtu;
1028	}
1029
1030	return 0;
1031}
1032
1033#ifdef CONFIG_NET_POLL_CONTROLLER
1034/**
1035 * axienet_poll_controller - Axi Ethernet poll mechanism.
1036 * @ndev:	Pointer to net_device structure
1037 *
1038 * This implements Rx/Tx ISR poll mechanisms. The interrupts are disabled prior
1039 * to polling the ISRs and are enabled back after the polling is done.
1040 */
1041static void axienet_poll_controller(struct net_device *ndev)
1042{
1043	struct axienet_local *lp = netdev_priv(ndev);
 
1044	disable_irq(lp->tx_irq);
1045	disable_irq(lp->rx_irq);
1046	axienet_rx_irq(lp->tx_irq, ndev);
1047	axienet_tx_irq(lp->rx_irq, ndev);
1048	enable_irq(lp->tx_irq);
1049	enable_irq(lp->rx_irq);
1050}
1051#endif
1052
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1053static const struct net_device_ops axienet_netdev_ops = {
1054	.ndo_open = axienet_open,
1055	.ndo_stop = axienet_stop,
1056	.ndo_start_xmit = axienet_start_xmit,
 
1057	.ndo_change_mtu	= axienet_change_mtu,
1058	.ndo_set_mac_address = netdev_set_mac_address,
1059	.ndo_validate_addr = eth_validate_addr,
 
1060	.ndo_set_rx_mode = axienet_set_multicast_list,
1061#ifdef CONFIG_NET_POLL_CONTROLLER
1062	.ndo_poll_controller = axienet_poll_controller,
1063#endif
1064};
1065
1066/**
1067 * axienet_ethtools_get_settings - Get Axi Ethernet settings related to PHY.
1068 * @ndev:	Pointer to net_device structure
1069 * @ecmd:	Pointer to ethtool_cmd structure
1070 *
1071 * This implements ethtool command for getting PHY settings. If PHY could
1072 * not be found, the function returns -ENODEV. This function calls the
1073 * relevant PHY ethtool API to get the PHY settings.
1074 * Issue "ethtool ethX" under linux prompt to execute this function.
1075 */
1076static int axienet_ethtools_get_settings(struct net_device *ndev,
1077					 struct ethtool_cmd *ecmd)
1078{
1079	struct axienet_local *lp = netdev_priv(ndev);
1080	struct phy_device *phydev = lp->phy_dev;
1081	if (!phydev)
1082		return -ENODEV;
1083	return phy_ethtool_gset(phydev, ecmd);
1084}
1085
1086/**
1087 * axienet_ethtools_set_settings - Set PHY settings as passed in the argument.
1088 * @ndev:	Pointer to net_device structure
1089 * @ecmd:	Pointer to ethtool_cmd structure
1090 *
1091 * This implements ethtool command for setting various PHY settings. If PHY
1092 * could not be found, the function returns -ENODEV. This function calls the
1093 * relevant PHY ethtool API to set the PHY.
1094 * Issue e.g. "ethtool -s ethX speed 1000" under linux prompt to execute this
1095 * function.
1096 */
1097static int axienet_ethtools_set_settings(struct net_device *ndev,
1098					 struct ethtool_cmd *ecmd)
1099{
1100	struct axienet_local *lp = netdev_priv(ndev);
1101	struct phy_device *phydev = lp->phy_dev;
1102	if (!phydev)
1103		return -ENODEV;
1104	return phy_ethtool_sset(phydev, ecmd);
1105}
1106
1107/**
1108 * axienet_ethtools_get_drvinfo - Get various Axi Ethernet driver information.
1109 * @ndev:	Pointer to net_device structure
1110 * @ed:		Pointer to ethtool_drvinfo structure
1111 *
1112 * This implements ethtool command for getting the driver information.
1113 * Issue "ethtool -i ethX" under linux prompt to execute this function.
1114 */
1115static void axienet_ethtools_get_drvinfo(struct net_device *ndev,
1116					 struct ethtool_drvinfo *ed)
1117{
1118	strlcpy(ed->driver, DRIVER_NAME, sizeof(ed->driver));
1119	strlcpy(ed->version, DRIVER_VERSION, sizeof(ed->version));
1120	ed->regdump_len = sizeof(u32) * AXIENET_REGS_N;
1121}
1122
1123/**
1124 * axienet_ethtools_get_regs_len - Get the total regs length present in the
1125 *				   AxiEthernet core.
1126 * @ndev:	Pointer to net_device structure
1127 *
1128 * This implements ethtool command for getting the total register length
1129 * information.
 
 
1130 */
1131static int axienet_ethtools_get_regs_len(struct net_device *ndev)
1132{
1133	return sizeof(u32) * AXIENET_REGS_N;
1134}
1135
1136/**
1137 * axienet_ethtools_get_regs - Dump the contents of all registers present
1138 *			       in AxiEthernet core.
1139 * @ndev:	Pointer to net_device structure
1140 * @regs:	Pointer to ethtool_regs structure
1141 * @ret:	Void pointer used to return the contents of the registers.
1142 *
1143 * This implements ethtool command for getting the Axi Ethernet register dump.
1144 * Issue "ethtool -d ethX" to execute this function.
1145 */
1146static void axienet_ethtools_get_regs(struct net_device *ndev,
1147				      struct ethtool_regs *regs, void *ret)
1148{
1149	u32 *data = (u32 *) ret;
1150	size_t len = sizeof(u32) * AXIENET_REGS_N;
1151	struct axienet_local *lp = netdev_priv(ndev);
1152
1153	regs->version = 0;
1154	regs->len = len;
1155
1156	memset(data, 0, len);
1157	data[0] = axienet_ior(lp, XAE_RAF_OFFSET);
1158	data[1] = axienet_ior(lp, XAE_TPF_OFFSET);
1159	data[2] = axienet_ior(lp, XAE_IFGP_OFFSET);
1160	data[3] = axienet_ior(lp, XAE_IS_OFFSET);
1161	data[4] = axienet_ior(lp, XAE_IP_OFFSET);
1162	data[5] = axienet_ior(lp, XAE_IE_OFFSET);
1163	data[6] = axienet_ior(lp, XAE_TTAG_OFFSET);
1164	data[7] = axienet_ior(lp, XAE_RTAG_OFFSET);
1165	data[8] = axienet_ior(lp, XAE_UAWL_OFFSET);
1166	data[9] = axienet_ior(lp, XAE_UAWU_OFFSET);
1167	data[10] = axienet_ior(lp, XAE_TPID0_OFFSET);
1168	data[11] = axienet_ior(lp, XAE_TPID1_OFFSET);
1169	data[12] = axienet_ior(lp, XAE_PPST_OFFSET);
1170	data[13] = axienet_ior(lp, XAE_RCW0_OFFSET);
1171	data[14] = axienet_ior(lp, XAE_RCW1_OFFSET);
1172	data[15] = axienet_ior(lp, XAE_TC_OFFSET);
1173	data[16] = axienet_ior(lp, XAE_FCC_OFFSET);
1174	data[17] = axienet_ior(lp, XAE_EMMC_OFFSET);
1175	data[18] = axienet_ior(lp, XAE_PHYC_OFFSET);
1176	data[19] = axienet_ior(lp, XAE_MDIO_MC_OFFSET);
1177	data[20] = axienet_ior(lp, XAE_MDIO_MCR_OFFSET);
1178	data[21] = axienet_ior(lp, XAE_MDIO_MWD_OFFSET);
1179	data[22] = axienet_ior(lp, XAE_MDIO_MRD_OFFSET);
1180	data[23] = axienet_ior(lp, XAE_MDIO_MIS_OFFSET);
1181	data[24] = axienet_ior(lp, XAE_MDIO_MIP_OFFSET);
1182	data[25] = axienet_ior(lp, XAE_MDIO_MIE_OFFSET);
1183	data[26] = axienet_ior(lp, XAE_MDIO_MIC_OFFSET);
1184	data[27] = axienet_ior(lp, XAE_UAW0_OFFSET);
1185	data[28] = axienet_ior(lp, XAE_UAW1_OFFSET);
1186	data[29] = axienet_ior(lp, XAE_FMI_OFFSET);
1187	data[30] = axienet_ior(lp, XAE_AF0_OFFSET);
1188	data[31] = axienet_ior(lp, XAE_AF1_OFFSET);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1189}
1190
1191/**
1192 * axienet_ethtools_get_pauseparam - Get the pause parameter setting for
1193 *				     Tx and Rx paths.
1194 * @ndev:	Pointer to net_device structure
1195 * @epauseparm:	Pointer to ethtool_pauseparam structure.
1196 *
1197 * This implements ethtool command for getting axi ethernet pause frame
1198 * setting. Issue "ethtool -a ethX" to execute this function.
1199 */
1200static void
1201axienet_ethtools_get_pauseparam(struct net_device *ndev,
1202				struct ethtool_pauseparam *epauseparm)
1203{
1204	u32 regval;
1205	struct axienet_local *lp = netdev_priv(ndev);
1206	epauseparm->autoneg  = 0;
1207	regval = axienet_ior(lp, XAE_FCC_OFFSET);
1208	epauseparm->tx_pause = regval & XAE_FCC_FCTX_MASK;
1209	epauseparm->rx_pause = regval & XAE_FCC_FCRX_MASK;
1210}
1211
1212/**
1213 * axienet_ethtools_set_pauseparam - Set device pause parameter(flow control)
1214 *				     settings.
1215 * @ndev:	Pointer to net_device structure
1216 * @epauseparam:Pointer to ethtool_pauseparam structure
1217 *
1218 * This implements ethtool command for enabling flow control on Rx and Tx
1219 * paths. Issue "ethtool -A ethX tx on|off" under linux prompt to execute this
1220 * function.
 
 
1221 */
1222static int
1223axienet_ethtools_set_pauseparam(struct net_device *ndev,
1224				struct ethtool_pauseparam *epauseparm)
1225{
1226	u32 regval = 0;
1227	struct axienet_local *lp = netdev_priv(ndev);
1228
1229	if (netif_running(ndev)) {
1230		printk(KERN_ERR	"%s: Please stop netif before applying "
1231		       "configruation\n", ndev->name);
1232		return -EFAULT;
1233	}
1234
1235	regval = axienet_ior(lp, XAE_FCC_OFFSET);
1236	if (epauseparm->tx_pause)
1237		regval |= XAE_FCC_FCTX_MASK;
1238	else
1239		regval &= ~XAE_FCC_FCTX_MASK;
1240	if (epauseparm->rx_pause)
1241		regval |= XAE_FCC_FCRX_MASK;
1242	else
1243		regval &= ~XAE_FCC_FCRX_MASK;
1244	axienet_iow(lp, XAE_FCC_OFFSET, regval);
1245
1246	return 0;
1247}
1248
1249/**
1250 * axienet_ethtools_get_coalesce - Get DMA interrupt coalescing count.
1251 * @ndev:	Pointer to net_device structure
1252 * @ecoalesce:	Pointer to ethtool_coalesce structure
 
 
1253 *
1254 * This implements ethtool command for getting the DMA interrupt coalescing
1255 * count on Tx and Rx paths. Issue "ethtool -c ethX" under linux prompt to
1256 * execute this function.
 
 
1257 */
1258static int axienet_ethtools_get_coalesce(struct net_device *ndev,
1259					 struct ethtool_coalesce *ecoalesce)
 
 
 
1260{
1261	u32 regval = 0;
1262	struct axienet_local *lp = netdev_priv(ndev);
1263	regval = axienet_dma_in32(lp, XAXIDMA_RX_CR_OFFSET);
1264	ecoalesce->rx_max_coalesced_frames = (regval & XAXIDMA_COALESCE_MASK)
1265					     >> XAXIDMA_COALESCE_SHIFT;
1266	regval = axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET);
1267	ecoalesce->tx_max_coalesced_frames = (regval & XAXIDMA_COALESCE_MASK)
1268					     >> XAXIDMA_COALESCE_SHIFT;
1269	return 0;
1270}
1271
1272/**
1273 * axienet_ethtools_set_coalesce - Set DMA interrupt coalescing count.
1274 * @ndev:	Pointer to net_device structure
1275 * @ecoalesce:	Pointer to ethtool_coalesce structure
 
 
1276 *
1277 * This implements ethtool command for setting the DMA interrupt coalescing
1278 * count on Tx and Rx paths. Issue "ethtool -C ethX rx-frames 5" under linux
1279 * prompt to execute this function.
 
 
1280 */
1281static int axienet_ethtools_set_coalesce(struct net_device *ndev,
1282					 struct ethtool_coalesce *ecoalesce)
 
 
 
1283{
1284	struct axienet_local *lp = netdev_priv(ndev);
1285
1286	if (netif_running(ndev)) {
1287		printk(KERN_ERR	"%s: Please stop netif before applying "
1288		       "configruation\n", ndev->name);
1289		return -EFAULT;
1290	}
1291
1292	if ((ecoalesce->rx_coalesce_usecs) ||
1293	    (ecoalesce->rx_coalesce_usecs_irq) ||
1294	    (ecoalesce->rx_max_coalesced_frames_irq) ||
1295	    (ecoalesce->tx_coalesce_usecs) ||
1296	    (ecoalesce->tx_coalesce_usecs_irq) ||
1297	    (ecoalesce->tx_max_coalesced_frames_irq) ||
1298	    (ecoalesce->stats_block_coalesce_usecs) ||
1299	    (ecoalesce->use_adaptive_rx_coalesce) ||
1300	    (ecoalesce->use_adaptive_tx_coalesce) ||
1301	    (ecoalesce->pkt_rate_low) ||
1302	    (ecoalesce->rx_coalesce_usecs_low) ||
1303	    (ecoalesce->rx_max_coalesced_frames_low) ||
1304	    (ecoalesce->tx_coalesce_usecs_low) ||
1305	    (ecoalesce->tx_max_coalesced_frames_low) ||
1306	    (ecoalesce->pkt_rate_high) ||
1307	    (ecoalesce->rx_coalesce_usecs_high) ||
1308	    (ecoalesce->rx_max_coalesced_frames_high) ||
1309	    (ecoalesce->tx_coalesce_usecs_high) ||
1310	    (ecoalesce->tx_max_coalesced_frames_high) ||
1311	    (ecoalesce->rate_sample_interval))
1312		return -EOPNOTSUPP;
1313	if (ecoalesce->rx_max_coalesced_frames)
1314		lp->coalesce_count_rx = ecoalesce->rx_max_coalesced_frames;
 
 
1315	if (ecoalesce->tx_max_coalesced_frames)
1316		lp->coalesce_count_tx = ecoalesce->tx_max_coalesced_frames;
 
 
1317
1318	return 0;
1319}
1320
1321static struct ethtool_ops axienet_ethtool_ops = {
1322	.get_settings   = axienet_ethtools_get_settings,
1323	.set_settings   = axienet_ethtools_set_settings,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1324	.get_drvinfo    = axienet_ethtools_get_drvinfo,
1325	.get_regs_len   = axienet_ethtools_get_regs_len,
1326	.get_regs       = axienet_ethtools_get_regs,
1327	.get_link       = ethtool_op_get_link,
 
 
1328	.get_pauseparam = axienet_ethtools_get_pauseparam,
1329	.set_pauseparam = axienet_ethtools_set_pauseparam,
1330	.get_coalesce   = axienet_ethtools_get_coalesce,
1331	.set_coalesce   = axienet_ethtools_set_coalesce,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1332};
1333
1334/**
1335 * axienet_dma_err_handler - Tasklet handler for Axi DMA Error
1336 * @data:	Data passed
1337 *
1338 * Resets the Axi DMA and Axi Ethernet devices, and reconfigures the
1339 * Tx/Rx BDs.
1340 */
1341static void axienet_dma_err_handler(unsigned long data)
1342{
 
1343	u32 axienet_status;
1344	u32 cr, i;
1345	int mdio_mcreg;
1346	struct axienet_local *lp = (struct axienet_local *) data;
1347	struct net_device *ndev = lp->ndev;
1348	struct axidma_bd *cur_p;
 
 
 
 
 
 
1349
1350	axienet_setoptions(ndev, lp->options &
1351			   ~(XAE_OPTION_TXEN | XAE_OPTION_RXEN));
1352	mdio_mcreg = axienet_ior(lp, XAE_MDIO_MC_OFFSET);
1353	axienet_mdio_wait_until_ready(lp);
1354	/* Disable the MDIO interface till Axi Ethernet Reset is completed.
1355	 * When we do an Axi Ethernet reset, it resets the complete core
1356	 * including the MDIO. So if MDIO is not disabled when the reset
1357	 * process is started, MDIO will be broken afterwards. */
1358	axienet_iow(lp, XAE_MDIO_MC_OFFSET, (mdio_mcreg &
1359		    ~XAE_MDIO_MC_MDIOEN_MASK));
1360
1361	__axienet_device_reset(lp, &ndev->dev, XAXIDMA_TX_CR_OFFSET);
1362	__axienet_device_reset(lp, &ndev->dev, XAXIDMA_RX_CR_OFFSET);
1363
1364	axienet_iow(lp, XAE_MDIO_MC_OFFSET, mdio_mcreg);
1365	axienet_mdio_wait_until_ready(lp);
 
 
1366
1367	for (i = 0; i < TX_BD_NUM; i++) {
1368		cur_p = &lp->tx_bd_v[i];
1369		if (cur_p->phys)
1370			dma_unmap_single(ndev->dev.parent, cur_p->phys,
1371					 (cur_p->cntrl &
1372					  XAXIDMA_BD_CTRL_LENGTH_MASK),
1373					 DMA_TO_DEVICE);
1374		if (cur_p->app4)
1375			dev_kfree_skb_irq((struct sk_buff *) cur_p->app4);
 
1376		cur_p->phys = 0;
 
1377		cur_p->cntrl = 0;
1378		cur_p->status = 0;
1379		cur_p->app0 = 0;
1380		cur_p->app1 = 0;
1381		cur_p->app2 = 0;
1382		cur_p->app3 = 0;
1383		cur_p->app4 = 0;
1384		cur_p->sw_id_offset = 0;
1385	}
1386
1387	for (i = 0; i < RX_BD_NUM; i++) {
1388		cur_p = &lp->rx_bd_v[i];
1389		cur_p->status = 0;
1390		cur_p->app0 = 0;
1391		cur_p->app1 = 0;
1392		cur_p->app2 = 0;
1393		cur_p->app3 = 0;
1394		cur_p->app4 = 0;
1395	}
1396
1397	lp->tx_bd_ci = 0;
1398	lp->tx_bd_tail = 0;
1399	lp->rx_bd_ci = 0;
1400
1401	/* Start updating the Rx channel control register */
1402	cr = axienet_dma_in32(lp, XAXIDMA_RX_CR_OFFSET);
1403	/* Update the interrupt coalesce count */
1404	cr = ((cr & ~XAXIDMA_COALESCE_MASK) |
1405	      (XAXIDMA_DFT_RX_THRESHOLD << XAXIDMA_COALESCE_SHIFT));
1406	/* Update the delay timer count */
1407	cr = ((cr & ~XAXIDMA_DELAY_MASK) |
1408	      (XAXIDMA_DFT_RX_WAITBOUND << XAXIDMA_DELAY_SHIFT));
1409	/* Enable coalesce, delay timer and error interrupts */
1410	cr |= XAXIDMA_IRQ_ALL_MASK;
1411	/* Finally write to the Rx channel control register */
1412	axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, cr);
1413
1414	/* Start updating the Tx channel control register */
1415	cr = axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET);
1416	/* Update the interrupt coalesce count */
1417	cr = (((cr & ~XAXIDMA_COALESCE_MASK)) |
1418	      (XAXIDMA_DFT_TX_THRESHOLD << XAXIDMA_COALESCE_SHIFT));
1419	/* Update the delay timer count */
1420	cr = (((cr & ~XAXIDMA_DELAY_MASK)) |
1421	      (XAXIDMA_DFT_TX_WAITBOUND << XAXIDMA_DELAY_SHIFT));
1422	/* Enable coalesce, delay timer and error interrupts */
1423	cr |= XAXIDMA_IRQ_ALL_MASK;
1424	/* Finally write to the Tx channel control register */
1425	axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, cr);
1426
1427	/* Populate the tail pointer and bring the Rx Axi DMA engine out of
1428	 * halted state. This will make the Rx side ready for reception.*/
1429	axienet_dma_out32(lp, XAXIDMA_RX_CDESC_OFFSET, lp->rx_bd_p);
1430	cr = axienet_dma_in32(lp, XAXIDMA_RX_CR_OFFSET);
1431	axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET,
1432			  cr | XAXIDMA_CR_RUNSTOP_MASK);
1433	axienet_dma_out32(lp, XAXIDMA_RX_TDESC_OFFSET, lp->rx_bd_p +
1434			  (sizeof(*lp->rx_bd_v) * (RX_BD_NUM - 1)));
1435
1436	/* Write to the RS (Run-stop) bit in the Tx channel control register.
1437	 * Tx channel is now ready to run. But only after we write to the
1438	 * tail pointer register that the Tx channel will start transmitting */
1439	axienet_dma_out32(lp, XAXIDMA_TX_CDESC_OFFSET, lp->tx_bd_p);
1440	cr = axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET);
1441	axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET,
1442			  cr | XAXIDMA_CR_RUNSTOP_MASK);
1443
1444	axienet_status = axienet_ior(lp, XAE_RCW1_OFFSET);
1445	axienet_status &= ~XAE_RCW1_RX_MASK;
1446	axienet_iow(lp, XAE_RCW1_OFFSET, axienet_status);
1447
1448	axienet_status = axienet_ior(lp, XAE_IP_OFFSET);
1449	if (axienet_status & XAE_INT_RXRJECT_MASK)
1450		axienet_iow(lp, XAE_IS_OFFSET, XAE_INT_RXRJECT_MASK);
 
 
1451	axienet_iow(lp, XAE_FCC_OFFSET, XAE_FCC_FCRX_MASK);
1452
1453	/* Sync default options with HW but leave receiver and
1454	 * transmitter disabled.*/
 
1455	axienet_setoptions(ndev, lp->options &
1456			   ~(XAE_OPTION_TXEN | XAE_OPTION_RXEN));
1457	axienet_set_mac_address(ndev, NULL);
1458	axienet_set_multicast_list(ndev);
 
 
1459	axienet_setoptions(ndev, lp->options);
1460}
1461
1462/**
1463 * axienet_of_probe - Axi Ethernet probe function.
1464 * @op:		Pointer to platform device structure.
1465 * @match:	Pointer to device id structure
1466 *
1467 * returns: 0, on success
1468 *	    Non-zero error value on failure.
1469 *
1470 * This is the probe routine for Axi Ethernet driver. This is called before
1471 * any other driver routines are invoked. It allocates and sets up the Ethernet
1472 * device. Parses through device tree and populates fields of
1473 * axienet_local. It registers the Ethernet device.
1474 */
1475static int axienet_of_probe(struct platform_device *op)
1476{
1477	__be32 *p;
1478	int size, ret = 0;
1479	struct device_node *np;
1480	struct axienet_local *lp;
1481	struct net_device *ndev;
1482	const void *addr;
 
 
 
1483
1484	ndev = alloc_etherdev(sizeof(*lp));
1485	if (!ndev)
1486		return -ENOMEM;
1487
1488	ether_setup(ndev);
1489	platform_set_drvdata(op, ndev);
1490
1491	SET_NETDEV_DEV(ndev, &op->dev);
1492	ndev->flags &= ~IFF_MULTICAST;  /* clear multicast */
1493	ndev->features = NETIF_F_SG;
1494	ndev->netdev_ops = &axienet_netdev_ops;
1495	ndev->ethtool_ops = &axienet_ethtool_ops;
1496
 
 
 
 
1497	lp = netdev_priv(ndev);
1498	lp->ndev = ndev;
1499	lp->dev = &op->dev;
1500	lp->options = XAE_OPTION_DEFAULTS;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1501	/* Map device registers */
1502	lp->regs = of_iomap(op->dev.of_node, 0);
1503	if (!lp->regs) {
1504		dev_err(&op->dev, "could not map Axi Ethernet regs.\n");
1505		goto nodev;
1506	}
 
 
1507	/* Setup checksum offload, but default to off if not specified */
1508	lp->features = 0;
1509
1510	p = (__be32 *) of_get_property(op->dev.of_node, "xlnx,txcsum", NULL);
1511	if (p) {
1512		switch (be32_to_cpup(p)) {
 
 
 
1513		case 1:
1514			lp->csum_offload_on_tx_path =
1515				XAE_FEATURE_PARTIAL_TX_CSUM;
1516			lp->features |= XAE_FEATURE_PARTIAL_TX_CSUM;
1517			/* Can checksum TCP/UDP over IPv4. */
1518			ndev->features |= NETIF_F_IP_CSUM;
1519			break;
1520		case 2:
1521			lp->csum_offload_on_tx_path =
1522				XAE_FEATURE_FULL_TX_CSUM;
1523			lp->features |= XAE_FEATURE_FULL_TX_CSUM;
1524			/* Can checksum TCP/UDP over IPv4. */
1525			ndev->features |= NETIF_F_IP_CSUM;
1526			break;
1527		default:
1528			lp->csum_offload_on_tx_path = XAE_NO_CSUM_OFFLOAD;
1529		}
1530	}
1531	p = (__be32 *) of_get_property(op->dev.of_node, "xlnx,rxcsum", NULL);
1532	if (p) {
1533		switch (be32_to_cpup(p)) {
1534		case 1:
1535			lp->csum_offload_on_rx_path =
1536				XAE_FEATURE_PARTIAL_RX_CSUM;
1537			lp->features |= XAE_FEATURE_PARTIAL_RX_CSUM;
 
1538			break;
1539		case 2:
1540			lp->csum_offload_on_rx_path =
1541				XAE_FEATURE_FULL_RX_CSUM;
1542			lp->features |= XAE_FEATURE_FULL_RX_CSUM;
 
1543			break;
1544		default:
1545			lp->csum_offload_on_rx_path = XAE_NO_CSUM_OFFLOAD;
1546		}
1547	}
1548	/* For supporting jumbo frames, the Axi Ethernet hardware must have
1549	 * a larger Rx/Tx Memory. Typically, the size must be more than or
1550	 * equal to 16384 bytes, so that we can enable jumbo option and start
1551	 * supporting jumbo frames. Here we check for memory allocated for
1552	 * Rx/Tx in the hardware from the device-tree and accordingly set
1553	 * flags. */
1554	p = (__be32 *) of_get_property(op->dev.of_node, "xlnx,rxmem", NULL);
1555	if (p) {
1556		if ((be32_to_cpup(p)) >= 0x4000)
1557			lp->jumbo_support = 1;
1558	}
1559	p = (__be32 *) of_get_property(op->dev.of_node, "xlnx,temac-type",
1560				       NULL);
1561	if (p)
1562		lp->temac_type = be32_to_cpup(p);
1563	p = (__be32 *) of_get_property(op->dev.of_node, "xlnx,phy-type", NULL);
1564	if (p)
1565		lp->phy_type = be32_to_cpup(p);
1566
1567	/* Find the DMA node, map the DMA registers, and decode the DMA IRQs */
1568	np = of_parse_phandle(op->dev.of_node, "axistream-connected", 0);
1569	if (!np) {
1570		dev_err(&op->dev, "could not find DMA node\n");
1571		goto err_iounmap;
1572	}
1573	lp->dma_regs = of_iomap(np, 0);
1574	if (lp->dma_regs) {
1575		dev_dbg(&op->dev, "MEM base: %p\n", lp->dma_regs);
 
 
 
 
 
 
 
1576	} else {
1577		dev_err(&op->dev, "unable to map DMA registers\n");
1578		of_node_put(np);
1579	}
1580	lp->rx_irq = irq_of_parse_and_map(np, 1);
1581	lp->tx_irq = irq_of_parse_and_map(np, 0);
1582	of_node_put(np);
1583	if ((lp->rx_irq <= 0) || (lp->tx_irq <= 0)) {
1584		dev_err(&op->dev, "could not determine irqs\n");
1585		ret = -ENOMEM;
1586		goto err_iounmap_2;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1587	}
 
 
 
 
 
 
 
 
1588
1589	/* Retrieve the MAC address */
1590	addr = of_get_property(op->dev.of_node, "local-mac-address", &size);
1591	if ((!addr) || (size != 6)) {
1592		dev_err(&op->dev, "could not find MAC address\n");
1593		ret = -ENODEV;
1594		goto err_iounmap_2;
 
 
1595	}
1596	axienet_set_mac_address(ndev, (void *) addr);
1597
1598	lp->coalesce_count_rx = XAXIDMA_DFT_RX_THRESHOLD;
1599	lp->coalesce_count_tx = XAXIDMA_DFT_TX_THRESHOLD;
 
 
1600
1601	lp->phy_node = of_parse_phandle(op->dev.of_node, "phy-handle", 0);
1602	ret = axienet_mdio_setup(lp, op->dev.of_node);
1603	if (ret)
1604		dev_warn(&op->dev, "error registering MDIO bus\n");
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1605
1606	ret = register_netdev(lp->ndev);
1607	if (ret) {
1608		dev_err(lp->dev, "register_netdev() error (%i)\n", ret);
1609		goto err_iounmap_2;
1610	}
1611
1612	return 0;
1613
1614err_iounmap_2:
1615	if (lp->dma_regs)
1616		iounmap(lp->dma_regs);
1617err_iounmap:
1618	iounmap(lp->regs);
1619nodev:
 
 
 
 
 
 
 
1620	free_netdev(ndev);
1621	ndev = NULL;
1622	return ret;
1623}
1624
1625static int axienet_of_remove(struct platform_device *op)
1626{
1627	struct net_device *ndev = platform_get_drvdata(op);
1628	struct axienet_local *lp = netdev_priv(ndev);
1629
 
 
 
 
 
 
 
 
1630	axienet_mdio_teardown(lp);
1631	unregister_netdev(ndev);
1632
1633	if (lp->phy_node)
1634		of_node_put(lp->phy_node);
1635	lp->phy_node = NULL;
1636
1637	iounmap(lp->regs);
1638	if (lp->dma_regs)
1639		iounmap(lp->dma_regs);
1640	free_netdev(ndev);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1641
1642	return 0;
1643}
1644
1645static struct platform_driver axienet_of_driver = {
1646	.probe = axienet_of_probe,
1647	.remove = axienet_of_remove,
 
 
 
 
1648	.driver = {
1649		 .owner = THIS_MODULE,
1650		 .name = "xilinx_axienet",
 
1651		 .of_match_table = axienet_of_match,
1652	},
1653};
1654
1655module_platform_driver(axienet_of_driver);
1656
1657MODULE_DESCRIPTION("Xilinx Axi Ethernet driver");
1658MODULE_AUTHOR("Xilinx");
1659MODULE_LICENSE("GPL");