Linux Audio

Check our new training course

Loading...
v6.13.7
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * Xilinx Axi Ethernet device driver
   4 *
   5 * Copyright (c) 2008 Nissin Systems Co., Ltd.,  Yoshio Kashiwagi
   6 * Copyright (c) 2005-2008 DLA Systems,  David H. Lynch Jr. <dhlii@dlasys.net>
   7 * Copyright (c) 2008-2009 Secret Lab Technologies Ltd.
   8 * Copyright (c) 2010 - 2011 Michal Simek <monstr@monstr.eu>
   9 * Copyright (c) 2010 - 2011 PetaLogix
  10 * Copyright (c) 2019 - 2022 Calian Advanced Technologies
  11 * Copyright (c) 2010 - 2012 Xilinx, Inc. All rights reserved.
  12 *
  13 * This is a driver for the Xilinx Axi Ethernet which is used in the Virtex6
  14 * and Spartan6.
  15 *
  16 * TODO:
  17 *  - Add Axi Fifo support.
  18 *  - Factor out Axi DMA code into separate driver.
  19 *  - Test and fix basic multicast filtering.
  20 *  - Add support for extended multicast filtering.
  21 *  - Test basic VLAN support.
  22 *  - Add support for extended VLAN support.
  23 */
  24
  25#include <linux/clk.h>
  26#include <linux/delay.h>
  27#include <linux/etherdevice.h>
 
  28#include <linux/module.h>
  29#include <linux/netdevice.h>
  30#include <linux/of.h>
  31#include <linux/of_mdio.h>
  32#include <linux/of_net.h>
  33#include <linux/of_irq.h>
  34#include <linux/of_address.h>
  35#include <linux/platform_device.h>
  36#include <linux/skbuff.h>
  37#include <linux/math64.h>
  38#include <linux/phy.h>
  39#include <linux/mii.h>
  40#include <linux/ethtool.h>
  41#include <linux/dmaengine.h>
  42#include <linux/dma-mapping.h>
  43#include <linux/dma/xilinx_dma.h>
  44#include <linux/circ_buf.h>
  45#include <net/netdev_queues.h>
  46
  47#include "xilinx_axienet.h"
  48
  49/* Descriptors defines for Tx and Rx DMA */
  50#define TX_BD_NUM_DEFAULT		128
  51#define RX_BD_NUM_DEFAULT		1024
  52#define TX_BD_NUM_MIN			(MAX_SKB_FRAGS + 1)
  53#define TX_BD_NUM_MAX			4096
  54#define RX_BD_NUM_MAX			4096
  55#define DMA_NUM_APP_WORDS		5
  56#define LEN_APP				4
  57#define RX_BUF_NUM_DEFAULT		128
  58
  59/* Must be shorter than length of ethtool_drvinfo.driver field to fit */
  60#define DRIVER_NAME		"xaxienet"
  61#define DRIVER_DESCRIPTION	"Xilinx Axi Ethernet driver"
  62#define DRIVER_VERSION		"1.00a"
  63
  64#define AXIENET_REGS_N		40
  65
  66static void axienet_rx_submit_desc(struct net_device *ndev);
  67
  68/* Match table for of_platform binding */
  69static const struct of_device_id axienet_of_match[] = {
  70	{ .compatible = "xlnx,axi-ethernet-1.00.a", },
  71	{ .compatible = "xlnx,axi-ethernet-1.01.a", },
  72	{ .compatible = "xlnx,axi-ethernet-2.01.a", },
  73	{},
  74};
  75
  76MODULE_DEVICE_TABLE(of, axienet_of_match);
  77
  78/* Option table for setting up Axi Ethernet hardware options */
  79static struct axienet_option axienet_options[] = {
  80	/* Turn on jumbo packet support for both Rx and Tx */
  81	{
  82		.opt = XAE_OPTION_JUMBO,
  83		.reg = XAE_TC_OFFSET,
  84		.m_or = XAE_TC_JUM_MASK,
  85	}, {
  86		.opt = XAE_OPTION_JUMBO,
  87		.reg = XAE_RCW1_OFFSET,
  88		.m_or = XAE_RCW1_JUM_MASK,
  89	}, { /* Turn on VLAN packet support for both Rx and Tx */
  90		.opt = XAE_OPTION_VLAN,
  91		.reg = XAE_TC_OFFSET,
  92		.m_or = XAE_TC_VLAN_MASK,
  93	}, {
  94		.opt = XAE_OPTION_VLAN,
  95		.reg = XAE_RCW1_OFFSET,
  96		.m_or = XAE_RCW1_VLAN_MASK,
  97	}, { /* Turn on FCS stripping on receive packets */
  98		.opt = XAE_OPTION_FCS_STRIP,
  99		.reg = XAE_RCW1_OFFSET,
 100		.m_or = XAE_RCW1_FCS_MASK,
 101	}, { /* Turn on FCS insertion on transmit packets */
 102		.opt = XAE_OPTION_FCS_INSERT,
 103		.reg = XAE_TC_OFFSET,
 104		.m_or = XAE_TC_FCS_MASK,
 105	}, { /* Turn off length/type field checking on receive packets */
 106		.opt = XAE_OPTION_LENTYPE_ERR,
 107		.reg = XAE_RCW1_OFFSET,
 108		.m_or = XAE_RCW1_LT_DIS_MASK,
 109	}, { /* Turn on Rx flow control */
 110		.opt = XAE_OPTION_FLOW_CONTROL,
 111		.reg = XAE_FCC_OFFSET,
 112		.m_or = XAE_FCC_FCRX_MASK,
 113	}, { /* Turn on Tx flow control */
 114		.opt = XAE_OPTION_FLOW_CONTROL,
 115		.reg = XAE_FCC_OFFSET,
 116		.m_or = XAE_FCC_FCTX_MASK,
 117	}, { /* Turn on promiscuous frame filtering */
 118		.opt = XAE_OPTION_PROMISC,
 119		.reg = XAE_FMI_OFFSET,
 120		.m_or = XAE_FMI_PM_MASK,
 121	}, { /* Enable transmitter */
 122		.opt = XAE_OPTION_TXEN,
 123		.reg = XAE_TC_OFFSET,
 124		.m_or = XAE_TC_TX_MASK,
 125	}, { /* Enable receiver */
 126		.opt = XAE_OPTION_RXEN,
 127		.reg = XAE_RCW1_OFFSET,
 128		.m_or = XAE_RCW1_RX_MASK,
 129	},
 130	{}
 131};
 132
 133static struct skbuf_dma_descriptor *axienet_get_rx_desc(struct axienet_local *lp, int i)
 134{
 135	return lp->rx_skb_ring[i & (RX_BUF_NUM_DEFAULT - 1)];
 136}
 137
 138static struct skbuf_dma_descriptor *axienet_get_tx_desc(struct axienet_local *lp, int i)
 139{
 140	return lp->tx_skb_ring[i & (TX_BD_NUM_MAX - 1)];
 141}
 142
 143/**
 144 * axienet_dma_in32 - Memory mapped Axi DMA register read
 145 * @lp:		Pointer to axienet local structure
 146 * @reg:	Address offset from the base address of the Axi DMA core
 147 *
 148 * Return: The contents of the Axi DMA register
 149 *
 150 * This function returns the contents of the corresponding Axi DMA register.
 151 */
 152static inline u32 axienet_dma_in32(struct axienet_local *lp, off_t reg)
 153{
 154	return ioread32(lp->dma_regs + reg);
 155}
 156
 157static void desc_set_phys_addr(struct axienet_local *lp, dma_addr_t addr,
 158			       struct axidma_bd *desc)
 159{
 160	desc->phys = lower_32_bits(addr);
 161	if (lp->features & XAE_FEATURE_DMA_64BIT)
 162		desc->phys_msb = upper_32_bits(addr);
 163}
 164
 165static dma_addr_t desc_get_phys_addr(struct axienet_local *lp,
 166				     struct axidma_bd *desc)
 
 167{
 168	dma_addr_t ret = desc->phys;
 169
 170	if (lp->features & XAE_FEATURE_DMA_64BIT)
 171		ret |= ((dma_addr_t)desc->phys_msb << 16) << 16;
 172
 173	return ret;
 174}
 175
 176/**
 177 * axienet_dma_bd_release - Release buffer descriptor rings
 178 * @ndev:	Pointer to the net_device structure
 179 *
 180 * This function is used to release the descriptors allocated in
 181 * axienet_dma_bd_init. axienet_dma_bd_release is called when Axi Ethernet
 182 * driver stop api is called.
 183 */
 184static void axienet_dma_bd_release(struct net_device *ndev)
 185{
 186	int i;
 187	struct axienet_local *lp = netdev_priv(ndev);
 188
 189	/* If we end up here, tx_bd_v must have been DMA allocated. */
 190	dma_free_coherent(lp->dev,
 191			  sizeof(*lp->tx_bd_v) * lp->tx_bd_num,
 192			  lp->tx_bd_v,
 193			  lp->tx_bd_p);
 194
 195	if (!lp->rx_bd_v)
 196		return;
 197
 198	for (i = 0; i < lp->rx_bd_num; i++) {
 199		dma_addr_t phys;
 200
 201		/* A NULL skb means this descriptor has not been initialised
 202		 * at all.
 203		 */
 204		if (!lp->rx_bd_v[i].skb)
 205			break;
 206
 207		dev_kfree_skb(lp->rx_bd_v[i].skb);
 208
 209		/* For each descriptor, we programmed cntrl with the (non-zero)
 210		 * descriptor size, after it had been successfully allocated.
 211		 * So a non-zero value in there means we need to unmap it.
 212		 */
 213		if (lp->rx_bd_v[i].cntrl) {
 214			phys = desc_get_phys_addr(lp, &lp->rx_bd_v[i]);
 215			dma_unmap_single(lp->dev, phys,
 216					 lp->max_frm_size, DMA_FROM_DEVICE);
 217		}
 218	}
 219
 220	dma_free_coherent(lp->dev,
 221			  sizeof(*lp->rx_bd_v) * lp->rx_bd_num,
 222			  lp->rx_bd_v,
 223			  lp->rx_bd_p);
 224}
 225
 226/**
 227 * axienet_usec_to_timer - Calculate IRQ delay timer value
 228 * @lp:		Pointer to the axienet_local structure
 229 * @coalesce_usec: Microseconds to convert into timer value
 230 */
 231static u32 axienet_usec_to_timer(struct axienet_local *lp, u32 coalesce_usec)
 232{
 233	u32 result;
 234	u64 clk_rate = 125000000; /* arbitrary guess if no clock rate set */
 235
 236	if (lp->axi_clk)
 237		clk_rate = clk_get_rate(lp->axi_clk);
 238
 239	/* 1 Timeout Interval = 125 * (clock period of SG clock) */
 240	result = DIV64_U64_ROUND_CLOSEST((u64)coalesce_usec * clk_rate,
 241					 (u64)125000000);
 242	if (result > 255)
 243		result = 255;
 244
 245	return result;
 246}
 247
 248/**
 249 * axienet_dma_start - Set up DMA registers and start DMA operation
 250 * @lp:		Pointer to the axienet_local structure
 251 */
 252static void axienet_dma_start(struct axienet_local *lp)
 253{
 254	/* Start updating the Rx channel control register */
 255	lp->rx_dma_cr = (lp->coalesce_count_rx << XAXIDMA_COALESCE_SHIFT) |
 256			XAXIDMA_IRQ_IOC_MASK | XAXIDMA_IRQ_ERROR_MASK;
 257	/* Only set interrupt delay timer if not generating an interrupt on
 258	 * the first RX packet. Otherwise leave at 0 to disable delay interrupt.
 259	 */
 260	if (lp->coalesce_count_rx > 1)
 261		lp->rx_dma_cr |= (axienet_usec_to_timer(lp, lp->coalesce_usec_rx)
 262					<< XAXIDMA_DELAY_SHIFT) |
 263				 XAXIDMA_IRQ_DELAY_MASK;
 264	axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, lp->rx_dma_cr);
 265
 266	/* Start updating the Tx channel control register */
 267	lp->tx_dma_cr = (lp->coalesce_count_tx << XAXIDMA_COALESCE_SHIFT) |
 268			XAXIDMA_IRQ_IOC_MASK | XAXIDMA_IRQ_ERROR_MASK;
 269	/* Only set interrupt delay timer if not generating an interrupt on
 270	 * the first TX packet. Otherwise leave at 0 to disable delay interrupt.
 271	 */
 272	if (lp->coalesce_count_tx > 1)
 273		lp->tx_dma_cr |= (axienet_usec_to_timer(lp, lp->coalesce_usec_tx)
 274					<< XAXIDMA_DELAY_SHIFT) |
 275				 XAXIDMA_IRQ_DELAY_MASK;
 276	axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, lp->tx_dma_cr);
 277
 278	/* Populate the tail pointer and bring the Rx Axi DMA engine out of
 279	 * halted state. This will make the Rx side ready for reception.
 280	 */
 281	axienet_dma_out_addr(lp, XAXIDMA_RX_CDESC_OFFSET, lp->rx_bd_p);
 282	lp->rx_dma_cr |= XAXIDMA_CR_RUNSTOP_MASK;
 283	axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, lp->rx_dma_cr);
 284	axienet_dma_out_addr(lp, XAXIDMA_RX_TDESC_OFFSET, lp->rx_bd_p +
 285			     (sizeof(*lp->rx_bd_v) * (lp->rx_bd_num - 1)));
 286
 287	/* Write to the RS (Run-stop) bit in the Tx channel control register.
 288	 * Tx channel is now ready to run. But only after we write to the
 289	 * tail pointer register that the Tx channel will start transmitting.
 290	 */
 291	axienet_dma_out_addr(lp, XAXIDMA_TX_CDESC_OFFSET, lp->tx_bd_p);
 292	lp->tx_dma_cr |= XAXIDMA_CR_RUNSTOP_MASK;
 293	axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, lp->tx_dma_cr);
 294}
 295
 296/**
 297 * axienet_dma_bd_init - Setup buffer descriptor rings for Axi DMA
 298 * @ndev:	Pointer to the net_device structure
 299 *
 300 * Return: 0, on success -ENOMEM, on failure
 
 301 *
 302 * This function is called to initialize the Rx and Tx DMA descriptor
 303 * rings. This initializes the descriptors with required default values
 304 * and is called when Axi Ethernet driver reset is called.
 305 */
 306static int axienet_dma_bd_init(struct net_device *ndev)
 307{
 
 308	int i;
 309	struct sk_buff *skb;
 310	struct axienet_local *lp = netdev_priv(ndev);
 311
 312	/* Reset the indexes which are used for accessing the BDs */
 313	lp->tx_bd_ci = 0;
 314	lp->tx_bd_tail = 0;
 315	lp->rx_bd_ci = 0;
 316
 317	/* Allocate the Tx and Rx buffer descriptors. */
 318	lp->tx_bd_v = dma_alloc_coherent(lp->dev,
 319					 sizeof(*lp->tx_bd_v) * lp->tx_bd_num,
 320					 &lp->tx_bd_p, GFP_KERNEL);
 321	if (!lp->tx_bd_v)
 322		return -ENOMEM;
 
 
 
 
 
 
 323
 324	lp->rx_bd_v = dma_alloc_coherent(lp->dev,
 325					 sizeof(*lp->rx_bd_v) * lp->rx_bd_num,
 326					 &lp->rx_bd_p, GFP_KERNEL);
 327	if (!lp->rx_bd_v)
 
 
 
 328		goto out;
 
 329
 330	for (i = 0; i < lp->tx_bd_num; i++) {
 331		dma_addr_t addr = lp->tx_bd_p +
 332				  sizeof(*lp->tx_bd_v) *
 333				  ((i + 1) % lp->tx_bd_num);
 334
 335		lp->tx_bd_v[i].next = lower_32_bits(addr);
 336		if (lp->features & XAE_FEATURE_DMA_64BIT)
 337			lp->tx_bd_v[i].next_msb = upper_32_bits(addr);
 338	}
 339
 340	for (i = 0; i < lp->rx_bd_num; i++) {
 341		dma_addr_t addr;
 342
 343		addr = lp->rx_bd_p + sizeof(*lp->rx_bd_v) *
 344			((i + 1) % lp->rx_bd_num);
 345		lp->rx_bd_v[i].next = lower_32_bits(addr);
 346		if (lp->features & XAE_FEATURE_DMA_64BIT)
 347			lp->rx_bd_v[i].next_msb = upper_32_bits(addr);
 348
 349		skb = netdev_alloc_skb_ip_align(ndev, lp->max_frm_size);
 350		if (!skb)
 351			goto out;
 
 
 352
 353		lp->rx_bd_v[i].skb = skb;
 354		addr = dma_map_single(lp->dev, skb->data,
 355				      lp->max_frm_size, DMA_FROM_DEVICE);
 356		if (dma_mapping_error(lp->dev, addr)) {
 357			netdev_err(ndev, "DMA mapping error\n");
 358			goto out;
 359		}
 360		desc_set_phys_addr(lp, addr, &lp->rx_bd_v[i]);
 361
 
 
 
 
 
 362		lp->rx_bd_v[i].cntrl = lp->max_frm_size;
 363	}
 364
 365	axienet_dma_start(lp);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 366
 367	return 0;
 368out:
 369	axienet_dma_bd_release(ndev);
 370	return -ENOMEM;
 371}
 372
 373/**
 374 * axienet_set_mac_address - Write the MAC address
 375 * @ndev:	Pointer to the net_device structure
 376 * @address:	6 byte Address to be written as MAC address
 377 *
 378 * This function is called to initialize the MAC address of the Axi Ethernet
 379 * core. It writes to the UAW0 and UAW1 registers of the core.
 380 */
 381static void axienet_set_mac_address(struct net_device *ndev,
 382				    const void *address)
 383{
 384	struct axienet_local *lp = netdev_priv(ndev);
 385
 386	if (address)
 387		eth_hw_addr_set(ndev, address);
 388	if (!is_valid_ether_addr(ndev->dev_addr))
 389		eth_hw_addr_random(ndev);
 390
 391	/* Set up unicast MAC address filter set its mac address */
 392	axienet_iow(lp, XAE_UAW0_OFFSET,
 393		    (ndev->dev_addr[0]) |
 394		    (ndev->dev_addr[1] << 8) |
 395		    (ndev->dev_addr[2] << 16) |
 396		    (ndev->dev_addr[3] << 24));
 397	axienet_iow(lp, XAE_UAW1_OFFSET,
 398		    (((axienet_ior(lp, XAE_UAW1_OFFSET)) &
 399		      ~XAE_UAW1_UNICASTADDR_MASK) |
 400		     (ndev->dev_addr[4] |
 401		     (ndev->dev_addr[5] << 8))));
 402}
 403
 404/**
 405 * netdev_set_mac_address - Write the MAC address (from outside the driver)
 406 * @ndev:	Pointer to the net_device structure
 407 * @p:		6 byte Address to be written as MAC address
 408 *
 409 * Return: 0 for all conditions. Presently, there is no failure case.
 410 *
 411 * This function is called to initialize the MAC address of the Axi Ethernet
 412 * core. It calls the core specific axienet_set_mac_address. This is the
 413 * function that goes into net_device_ops structure entry ndo_set_mac_address.
 414 */
 415static int netdev_set_mac_address(struct net_device *ndev, void *p)
 416{
 417	struct sockaddr *addr = p;
 418
 419	axienet_set_mac_address(ndev, addr->sa_data);
 420	return 0;
 421}
 422
 423/**
 424 * axienet_set_multicast_list - Prepare the multicast table
 425 * @ndev:	Pointer to the net_device structure
 426 *
 427 * This function is called to initialize the multicast table during
 428 * initialization. The Axi Ethernet basic multicast support has a four-entry
 429 * multicast table which is initialized here. Additionally this function
 430 * goes into the net_device_ops structure entry ndo_set_multicast_list. This
 431 * means whenever the multicast table entries need to be updated this
 432 * function gets called.
 433 */
 434static void axienet_set_multicast_list(struct net_device *ndev)
 435{
 436	int i = 0;
 437	u32 reg, af0reg, af1reg;
 438	struct axienet_local *lp = netdev_priv(ndev);
 439
 440	reg = axienet_ior(lp, XAE_FMI_OFFSET);
 441	reg &= ~XAE_FMI_PM_MASK;
 442	if (ndev->flags & IFF_PROMISC)
 443		reg |= XAE_FMI_PM_MASK;
 444	else
 445		reg &= ~XAE_FMI_PM_MASK;
 446	axienet_iow(lp, XAE_FMI_OFFSET, reg);
 447
 448	if (ndev->flags & IFF_ALLMULTI ||
 449	    netdev_mc_count(ndev) > XAE_MULTICAST_CAM_TABLE_NUM) {
 450		reg &= 0xFFFFFF00;
 
 
 
 
 
 451		axienet_iow(lp, XAE_FMI_OFFSET, reg);
 452		axienet_iow(lp, XAE_AF0_OFFSET, 1); /* Multicast bit */
 453		axienet_iow(lp, XAE_AF1_OFFSET, 0);
 454		axienet_iow(lp, XAE_AM0_OFFSET, 1); /* ditto */
 455		axienet_iow(lp, XAE_AM1_OFFSET, 0);
 456		axienet_iow(lp, XAE_FFE_OFFSET, 1);
 457		i = 1;
 458	} else if (!netdev_mc_empty(ndev)) {
 459		struct netdev_hw_addr *ha;
 460
 
 461		netdev_for_each_mc_addr(ha, ndev) {
 462			if (i >= XAE_MULTICAST_CAM_TABLE_NUM)
 463				break;
 464
 465			af0reg = (ha->addr[0]);
 466			af0reg |= (ha->addr[1] << 8);
 467			af0reg |= (ha->addr[2] << 16);
 468			af0reg |= (ha->addr[3] << 24);
 469
 470			af1reg = (ha->addr[4]);
 471			af1reg |= (ha->addr[5] << 8);
 472
 473			reg &= 0xFFFFFF00;
 474			reg |= i;
 475
 476			axienet_iow(lp, XAE_FMI_OFFSET, reg);
 477			axienet_iow(lp, XAE_AF0_OFFSET, af0reg);
 478			axienet_iow(lp, XAE_AF1_OFFSET, af1reg);
 479			axienet_iow(lp, XAE_AM0_OFFSET, 0xffffffff);
 480			axienet_iow(lp, XAE_AM1_OFFSET, 0x0000ffff);
 481			axienet_iow(lp, XAE_FFE_OFFSET, 1);
 482			i++;
 483		}
 484	}
 
 
 485
 486	for (; i < XAE_MULTICAST_CAM_TABLE_NUM; i++) {
 487		reg &= 0xFFFFFF00;
 488		reg |= i;
 489		axienet_iow(lp, XAE_FMI_OFFSET, reg);
 490		axienet_iow(lp, XAE_FFE_OFFSET, 0);
 
 
 
 
 
 
 
 
 
 
 491	}
 492}
 493
 494/**
 495 * axienet_setoptions - Set an Axi Ethernet option
 496 * @ndev:	Pointer to the net_device structure
 497 * @options:	Option to be enabled/disabled
 498 *
 499 * The Axi Ethernet core has multiple features which can be selectively turned
 500 * on or off. The typical options could be jumbo frame option, basic VLAN
 501 * option, promiscuous mode option etc. This function is used to set or clear
 502 * these options in the Axi Ethernet hardware. This is done through
 503 * axienet_option structure .
 504 */
 505static void axienet_setoptions(struct net_device *ndev, u32 options)
 506{
 507	int reg;
 508	struct axienet_local *lp = netdev_priv(ndev);
 509	struct axienet_option *tp = &axienet_options[0];
 510
 511	while (tp->opt) {
 512		reg = ((axienet_ior(lp, tp->reg)) & ~(tp->m_or));
 513		if (options & tp->opt)
 514			reg |= tp->m_or;
 515		axienet_iow(lp, tp->reg, reg);
 516		tp++;
 517	}
 518
 519	lp->options |= options;
 520}
 521
 522static u64 axienet_stat(struct axienet_local *lp, enum temac_stat stat)
 523{
 524	u32 counter;
 525
 526	if (lp->reset_in_progress)
 527		return lp->hw_stat_base[stat];
 528
 529	counter = axienet_ior(lp, XAE_STATS_OFFSET + stat * 8);
 530	return lp->hw_stat_base[stat] + (counter - lp->hw_last_counter[stat]);
 531}
 532
 533static void axienet_stats_update(struct axienet_local *lp, bool reset)
 534{
 535	enum temac_stat stat;
 536
 537	write_seqcount_begin(&lp->hw_stats_seqcount);
 538	lp->reset_in_progress = reset;
 539	for (stat = 0; stat < STAT_COUNT; stat++) {
 540		u32 counter = axienet_ior(lp, XAE_STATS_OFFSET + stat * 8);
 541
 542		lp->hw_stat_base[stat] += counter - lp->hw_last_counter[stat];
 543		lp->hw_last_counter[stat] = counter;
 544	}
 545	write_seqcount_end(&lp->hw_stats_seqcount);
 546}
 547
 548static void axienet_refresh_stats(struct work_struct *work)
 549{
 550	struct axienet_local *lp = container_of(work, struct axienet_local,
 551						stats_work.work);
 552
 553	mutex_lock(&lp->stats_lock);
 554	axienet_stats_update(lp, false);
 555	mutex_unlock(&lp->stats_lock);
 556
 557	/* Just less than 2^32 bytes at 2.5 GBit/s */
 558	schedule_delayed_work(&lp->stats_work, 13 * HZ);
 559}
 560
 561static int __axienet_device_reset(struct axienet_local *lp)
 562{
 563	u32 value;
 564	int ret;
 565
 566	/* Save statistics counters in case they will be reset */
 567	mutex_lock(&lp->stats_lock);
 568	if (lp->features & XAE_FEATURE_STATS)
 569		axienet_stats_update(lp, true);
 570
 571	/* Reset Axi DMA. This would reset Axi Ethernet core as well. The reset
 572	 * process of Axi DMA takes a while to complete as all pending
 573	 * commands/transfers will be flushed or completed during this
 574	 * reset process.
 575	 * Note that even though both TX and RX have their own reset register,
 576	 * they both reset the entire DMA core, so only one needs to be used.
 577	 */
 578	axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, XAXIDMA_CR_RESET_MASK);
 579	ret = read_poll_timeout(axienet_dma_in32, value,
 580				!(value & XAXIDMA_CR_RESET_MASK),
 581				DELAY_OF_ONE_MILLISEC, 50000, false, lp,
 582				XAXIDMA_TX_CR_OFFSET);
 583	if (ret) {
 584		dev_err(lp->dev, "%s: DMA reset timeout!\n", __func__);
 585		goto out;
 586	}
 587
 588	/* Wait for PhyRstCmplt bit to be set, indicating the PHY reset has finished */
 589	ret = read_poll_timeout(axienet_ior, value,
 590				value & XAE_INT_PHYRSTCMPLT_MASK,
 591				DELAY_OF_ONE_MILLISEC, 50000, false, lp,
 592				XAE_IS_OFFSET);
 593	if (ret) {
 594		dev_err(lp->dev, "%s: timeout waiting for PhyRstCmplt\n", __func__);
 595		goto out;
 596	}
 597
 598	/* Update statistics counters with new values */
 599	if (lp->features & XAE_FEATURE_STATS) {
 600		enum temac_stat stat;
 601
 602		write_seqcount_begin(&lp->hw_stats_seqcount);
 603		lp->reset_in_progress = false;
 604		for (stat = 0; stat < STAT_COUNT; stat++) {
 605			u32 counter =
 606				axienet_ior(lp, XAE_STATS_OFFSET + stat * 8);
 607
 608			lp->hw_stat_base[stat] +=
 609				lp->hw_last_counter[stat] - counter;
 610			lp->hw_last_counter[stat] = counter;
 611		}
 612		write_seqcount_end(&lp->hw_stats_seqcount);
 613	}
 614
 615out:
 616	mutex_unlock(&lp->stats_lock);
 617	return ret;
 618}
 619
 620/**
 621 * axienet_dma_stop - Stop DMA operation
 622 * @lp:		Pointer to the axienet_local structure
 623 */
 624static void axienet_dma_stop(struct axienet_local *lp)
 625{
 626	int count;
 627	u32 cr, sr;
 628
 629	cr = axienet_dma_in32(lp, XAXIDMA_RX_CR_OFFSET);
 630	cr &= ~(XAXIDMA_CR_RUNSTOP_MASK | XAXIDMA_IRQ_ALL_MASK);
 631	axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, cr);
 632	synchronize_irq(lp->rx_irq);
 633
 634	cr = axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET);
 635	cr &= ~(XAXIDMA_CR_RUNSTOP_MASK | XAXIDMA_IRQ_ALL_MASK);
 636	axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, cr);
 637	synchronize_irq(lp->tx_irq);
 638
 639	/* Give DMAs a chance to halt gracefully */
 640	sr = axienet_dma_in32(lp, XAXIDMA_RX_SR_OFFSET);
 641	for (count = 0; !(sr & XAXIDMA_SR_HALT_MASK) && count < 5; ++count) {
 642		msleep(20);
 643		sr = axienet_dma_in32(lp, XAXIDMA_RX_SR_OFFSET);
 644	}
 645
 646	sr = axienet_dma_in32(lp, XAXIDMA_TX_SR_OFFSET);
 647	for (count = 0; !(sr & XAXIDMA_SR_HALT_MASK) && count < 5; ++count) {
 648		msleep(20);
 649		sr = axienet_dma_in32(lp, XAXIDMA_TX_SR_OFFSET);
 650	}
 651
 652	/* Do a reset to ensure DMA is really stopped */
 653	axienet_lock_mii(lp);
 654	__axienet_device_reset(lp);
 655	axienet_unlock_mii(lp);
 656}
 657
 658/**
 659 * axienet_device_reset - Reset and initialize the Axi Ethernet hardware.
 660 * @ndev:	Pointer to the net_device structure
 661 *
 662 * This function is called to reset and initialize the Axi Ethernet core. This
 663 * is typically called during initialization. It does a reset of the Axi DMA
 664 * Rx/Tx channels and initializes the Axi DMA BDs. Since Axi DMA reset lines
 665 * are connected to Axi Ethernet reset lines, this in turn resets the Axi
 666 * Ethernet core. No separate hardware reset is done for the Axi Ethernet
 667 * core.
 668 * Returns 0 on success or a negative error number otherwise.
 669 */
 670static int axienet_device_reset(struct net_device *ndev)
 671{
 672	u32 axienet_status;
 673	struct axienet_local *lp = netdev_priv(ndev);
 674	int ret;
 
 
 675
 676	lp->max_frm_size = XAE_MAX_VLAN_FRAME_SIZE;
 677	lp->options |= XAE_OPTION_VLAN;
 678	lp->options &= (~XAE_OPTION_JUMBO);
 679
 680	if (ndev->mtu > XAE_MTU && ndev->mtu <= XAE_JUMBO_MTU) {
 681		lp->max_frm_size = ndev->mtu + VLAN_ETH_HLEN +
 682					XAE_TRL_SIZE;
 683
 684		if (lp->max_frm_size <= lp->rxmem)
 685			lp->options |= XAE_OPTION_JUMBO;
 686	}
 687
 688	if (!lp->use_dmaengine) {
 689		ret = __axienet_device_reset(lp);
 690		if (ret)
 691			return ret;
 692
 693		ret = axienet_dma_bd_init(ndev);
 694		if (ret) {
 695			netdev_err(ndev, "%s: descriptor allocation failed\n",
 696				   __func__);
 697			return ret;
 698		}
 699	}
 700
 701	axienet_status = axienet_ior(lp, XAE_RCW1_OFFSET);
 702	axienet_status &= ~XAE_RCW1_RX_MASK;
 703	axienet_iow(lp, XAE_RCW1_OFFSET, axienet_status);
 704
 705	axienet_status = axienet_ior(lp, XAE_IP_OFFSET);
 706	if (axienet_status & XAE_INT_RXRJECT_MASK)
 707		axienet_iow(lp, XAE_IS_OFFSET, XAE_INT_RXRJECT_MASK);
 708	axienet_iow(lp, XAE_IE_OFFSET, lp->eth_irq > 0 ?
 709		    XAE_INT_RECV_ERROR_MASK : 0);
 710
 711	axienet_iow(lp, XAE_FCC_OFFSET, XAE_FCC_FCRX_MASK);
 712
 713	/* Sync default options with HW but leave receiver and
 714	 * transmitter disabled.
 715	 */
 716	axienet_setoptions(ndev, lp->options &
 717			   ~(XAE_OPTION_TXEN | XAE_OPTION_RXEN));
 718	axienet_set_mac_address(ndev, NULL);
 719	axienet_set_multicast_list(ndev);
 720	axienet_setoptions(ndev, lp->options);
 721
 722	netif_trans_update(ndev);
 
 723
 724	return 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 725}
 726
 727/**
 728 * axienet_free_tx_chain - Clean up a series of linked TX descriptors.
 729 * @lp:		Pointer to the axienet_local structure
 730 * @first_bd:	Index of first descriptor to clean up
 731 * @nr_bds:	Max number of descriptors to clean up
 732 * @force:	Whether to clean descriptors even if not complete
 733 * @sizep:	Pointer to a u32 filled with the total sum of all bytes
 734 *		in all cleaned-up descriptors. Ignored if NULL.
 735 * @budget:	NAPI budget (use 0 when not called from NAPI poll)
 736 *
 737 * Would either be called after a successful transmit operation, or after
 738 * there was an error when setting up the chain.
 739 * Returns the number of packets handled.
 740 */
 741static int axienet_free_tx_chain(struct axienet_local *lp, u32 first_bd,
 742				 int nr_bds, bool force, u32 *sizep, int budget)
 743{
 
 
 
 744	struct axidma_bd *cur_p;
 745	unsigned int status;
 746	int i, packets = 0;
 747	dma_addr_t phys;
 748
 749	for (i = 0; i < nr_bds; i++) {
 750		cur_p = &lp->tx_bd_v[(first_bd + i) % lp->tx_bd_num];
 751		status = cur_p->status;
 752
 753		/* If force is not specified, clean up only descriptors
 754		 * that have been completed by the MAC.
 755		 */
 756		if (!force && !(status & XAXIDMA_BD_STS_COMPLETE_MASK))
 757			break;
 758
 759		/* Ensure we see complete descriptor update */
 760		dma_rmb();
 761		phys = desc_get_phys_addr(lp, cur_p);
 762		dma_unmap_single(lp->dev, phys,
 763				 (cur_p->cntrl & XAXIDMA_BD_CTRL_LENGTH_MASK),
 764				 DMA_TO_DEVICE);
 765
 766		if (cur_p->skb && (status & XAXIDMA_BD_STS_COMPLETE_MASK)) {
 767			napi_consume_skb(cur_p->skb, budget);
 768			packets++;
 769		}
 770
 
 
 
 
 
 
 
 
 
 771		cur_p->app0 = 0;
 772		cur_p->app1 = 0;
 773		cur_p->app2 = 0;
 774		cur_p->app4 = 0;
 775		cur_p->skb = NULL;
 776		/* ensure our transmit path and device don't prematurely see status cleared */
 777		wmb();
 778		cur_p->cntrl = 0;
 779		cur_p->status = 0;
 780
 781		if (sizep)
 782			*sizep += status & XAXIDMA_BD_STS_ACTUAL_LEN_MASK;
 783	}
 784
 785	if (!force) {
 786		lp->tx_bd_ci += i;
 787		if (lp->tx_bd_ci >= lp->tx_bd_num)
 788			lp->tx_bd_ci %= lp->tx_bd_num;
 789	}
 790
 791	return packets;
 
 
 792}
 793
 794/**
 795 * axienet_check_tx_bd_space - Checks if a BD/group of BDs are currently busy
 796 * @lp:		Pointer to the axienet_local structure
 797 * @num_frag:	The number of BDs to check for
 798 *
 799 * Return: 0, on success
 800 *	    NETDEV_TX_BUSY, if any of the descriptors are not free
 801 *
 802 * This function is invoked before BDs are allocated and transmission starts.
 803 * This function returns 0 if a BD or group of BDs can be allocated for
 804 * transmission. If the BD or any of the BDs are not free the function
 805 * returns a busy status.
 806 */
 807static inline int axienet_check_tx_bd_space(struct axienet_local *lp,
 808					    int num_frag)
 809{
 810	struct axidma_bd *cur_p;
 811
 812	/* Ensure we see all descriptor updates from device or TX polling */
 813	rmb();
 814	cur_p = &lp->tx_bd_v[(READ_ONCE(lp->tx_bd_tail) + num_frag) %
 815			     lp->tx_bd_num];
 816	if (cur_p->cntrl)
 817		return NETDEV_TX_BUSY;
 818	return 0;
 819}
 820
 821/**
 822 * axienet_dma_tx_cb - DMA engine callback for TX channel.
 823 * @data:       Pointer to the axienet_local structure.
 824 * @result:     error reporting through dmaengine_result.
 825 * This function is called by dmaengine driver for TX channel to notify
 826 * that the transmit is done.
 827 */
 828static void axienet_dma_tx_cb(void *data, const struct dmaengine_result *result)
 829{
 830	struct skbuf_dma_descriptor *skbuf_dma;
 831	struct axienet_local *lp = data;
 832	struct netdev_queue *txq;
 833	int len;
 834
 835	skbuf_dma = axienet_get_tx_desc(lp, lp->tx_ring_tail++);
 836	len = skbuf_dma->skb->len;
 837	txq = skb_get_tx_queue(lp->ndev, skbuf_dma->skb);
 838	u64_stats_update_begin(&lp->tx_stat_sync);
 839	u64_stats_add(&lp->tx_bytes, len);
 840	u64_stats_add(&lp->tx_packets, 1);
 841	u64_stats_update_end(&lp->tx_stat_sync);
 842	dma_unmap_sg(lp->dev, skbuf_dma->sgl, skbuf_dma->sg_len, DMA_TO_DEVICE);
 843	dev_consume_skb_any(skbuf_dma->skb);
 844	netif_txq_completed_wake(txq, 1, len,
 845				 CIRC_SPACE(lp->tx_ring_head, lp->tx_ring_tail, TX_BD_NUM_MAX),
 846				 2 * MAX_SKB_FRAGS);
 847}
 848
 849/**
 850 * axienet_start_xmit_dmaengine - Starts the transmission.
 851 * @skb:        sk_buff pointer that contains data to be Txed.
 852 * @ndev:       Pointer to net_device structure.
 853 *
 854 * Return: NETDEV_TX_OK on success or any non space errors.
 855 *         NETDEV_TX_BUSY when free element in TX skb ring buffer
 856 *         is not available.
 857 *
 858 * This function is invoked to initiate transmission. The
 859 * function sets the skbs, register dma callback API and submit
 860 * the dma transaction.
 861 * Additionally if checksum offloading is supported,
 862 * it populates AXI Stream Control fields with appropriate values.
 863 */
 864static netdev_tx_t
 865axienet_start_xmit_dmaengine(struct sk_buff *skb, struct net_device *ndev)
 866{
 867	struct dma_async_tx_descriptor *dma_tx_desc = NULL;
 868	struct axienet_local *lp = netdev_priv(ndev);
 869	u32 app_metadata[DMA_NUM_APP_WORDS] = {0};
 870	struct skbuf_dma_descriptor *skbuf_dma;
 871	struct dma_device *dma_dev;
 872	struct netdev_queue *txq;
 873	u32 csum_start_off;
 874	u32 csum_index_off;
 875	int sg_len;
 876	int ret;
 877
 878	dma_dev = lp->tx_chan->device;
 879	sg_len = skb_shinfo(skb)->nr_frags + 1;
 880	if (CIRC_SPACE(lp->tx_ring_head, lp->tx_ring_tail, TX_BD_NUM_MAX) <= sg_len) {
 881		netif_stop_queue(ndev);
 882		if (net_ratelimit())
 883			netdev_warn(ndev, "TX ring unexpectedly full\n");
 884		return NETDEV_TX_BUSY;
 885	}
 886
 887	skbuf_dma = axienet_get_tx_desc(lp, lp->tx_ring_head);
 888	if (!skbuf_dma)
 889		goto xmit_error_drop_skb;
 890
 891	lp->tx_ring_head++;
 892	sg_init_table(skbuf_dma->sgl, sg_len);
 893	ret = skb_to_sgvec(skb, skbuf_dma->sgl, 0, skb->len);
 894	if (ret < 0)
 895		goto xmit_error_drop_skb;
 896
 897	ret = dma_map_sg(lp->dev, skbuf_dma->sgl, sg_len, DMA_TO_DEVICE);
 898	if (!ret)
 899		goto xmit_error_drop_skb;
 900
 901	/* Fill up app fields for checksum */
 902	if (skb->ip_summed == CHECKSUM_PARTIAL) {
 903		if (lp->features & XAE_FEATURE_FULL_TX_CSUM) {
 904			/* Tx Full Checksum Offload Enabled */
 905			app_metadata[0] |= 2;
 906		} else if (lp->features & XAE_FEATURE_PARTIAL_TX_CSUM) {
 907			csum_start_off = skb_transport_offset(skb);
 908			csum_index_off = csum_start_off + skb->csum_offset;
 909			/* Tx Partial Checksum Offload Enabled */
 910			app_metadata[0] |= 1;
 911			app_metadata[1] = (csum_start_off << 16) | csum_index_off;
 912		}
 913	} else if (skb->ip_summed == CHECKSUM_UNNECESSARY) {
 914		app_metadata[0] |= 2; /* Tx Full Checksum Offload Enabled */
 915	}
 916
 917	dma_tx_desc = dma_dev->device_prep_slave_sg(lp->tx_chan, skbuf_dma->sgl,
 918			sg_len, DMA_MEM_TO_DEV,
 919			DMA_PREP_INTERRUPT, (void *)app_metadata);
 920	if (!dma_tx_desc)
 921		goto xmit_error_unmap_sg;
 922
 923	skbuf_dma->skb = skb;
 924	skbuf_dma->sg_len = sg_len;
 925	dma_tx_desc->callback_param = lp;
 926	dma_tx_desc->callback_result = axienet_dma_tx_cb;
 927	txq = skb_get_tx_queue(lp->ndev, skb);
 928	netdev_tx_sent_queue(txq, skb->len);
 929	netif_txq_maybe_stop(txq, CIRC_SPACE(lp->tx_ring_head, lp->tx_ring_tail, TX_BD_NUM_MAX),
 930			     MAX_SKB_FRAGS + 1, 2 * MAX_SKB_FRAGS);
 931
 932	dmaengine_submit(dma_tx_desc);
 933	dma_async_issue_pending(lp->tx_chan);
 934	return NETDEV_TX_OK;
 935
 936xmit_error_unmap_sg:
 937	dma_unmap_sg(lp->dev, skbuf_dma->sgl, sg_len, DMA_TO_DEVICE);
 938xmit_error_drop_skb:
 939	dev_kfree_skb_any(skb);
 940	return NETDEV_TX_OK;
 941}
 942
 943/**
 944 * axienet_tx_poll - Invoked once a transmit is completed by the
 945 * Axi DMA Tx channel.
 946 * @napi:	Pointer to NAPI structure.
 947 * @budget:	Max number of TX packets to process.
 948 *
 949 * Return: Number of TX packets processed.
 950 *
 951 * This function is invoked from the NAPI processing to notify the completion
 952 * of transmit operation. It clears fields in the corresponding Tx BDs and
 953 * unmaps the corresponding buffer so that CPU can regain ownership of the
 954 * buffer. It finally invokes "netif_wake_queue" to restart transmission if
 955 * required.
 956 */
 957static int axienet_tx_poll(struct napi_struct *napi, int budget)
 958{
 959	struct axienet_local *lp = container_of(napi, struct axienet_local, napi_tx);
 960	struct net_device *ndev = lp->ndev;
 961	u32 size = 0;
 962	int packets;
 963
 964	packets = axienet_free_tx_chain(lp, lp->tx_bd_ci, lp->tx_bd_num, false,
 965					&size, budget);
 966
 967	if (packets) {
 968		u64_stats_update_begin(&lp->tx_stat_sync);
 969		u64_stats_add(&lp->tx_packets, packets);
 970		u64_stats_add(&lp->tx_bytes, size);
 971		u64_stats_update_end(&lp->tx_stat_sync);
 972
 973		/* Matches barrier in axienet_start_xmit */
 974		smp_mb();
 975
 976		if (!axienet_check_tx_bd_space(lp, MAX_SKB_FRAGS + 1))
 977			netif_wake_queue(ndev);
 978	}
 979
 980	if (packets < budget && napi_complete_done(napi, packets)) {
 981		/* Re-enable TX completion interrupts. This should
 982		 * cause an immediate interrupt if any TX packets are
 983		 * already pending.
 984		 */
 985		axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, lp->tx_dma_cr);
 986	}
 987	return packets;
 988}
 989
 990/**
 991 * axienet_start_xmit - Starts the transmission.
 992 * @skb:	sk_buff pointer that contains data to be Txed.
 993 * @ndev:	Pointer to net_device structure.
 994 *
 995 * Return: NETDEV_TX_OK, on success
 996 *	    NETDEV_TX_BUSY, if any of the descriptors are not free
 997 *
 998 * This function is invoked from upper layers to initiate transmission. The
 999 * function uses the next available free BDs and populates their fields to
1000 * start the transmission. Additionally if checksum offloading is supported,
1001 * it populates AXI Stream Control fields with appropriate values.
1002 */
1003static netdev_tx_t
1004axienet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
1005{
1006	u32 ii;
1007	u32 num_frag;
1008	u32 csum_start_off;
1009	u32 csum_index_off;
1010	skb_frag_t *frag;
1011	dma_addr_t tail_p, phys;
1012	u32 orig_tail_ptr, new_tail_ptr;
1013	struct axienet_local *lp = netdev_priv(ndev);
1014	struct axidma_bd *cur_p;
1015
1016	orig_tail_ptr = lp->tx_bd_tail;
1017	new_tail_ptr = orig_tail_ptr;
1018
1019	num_frag = skb_shinfo(skb)->nr_frags;
1020	cur_p = &lp->tx_bd_v[orig_tail_ptr];
1021
1022	if (axienet_check_tx_bd_space(lp, num_frag + 1)) {
1023		/* Should not happen as last start_xmit call should have
1024		 * checked for sufficient space and queue should only be
1025		 * woken when sufficient space is available.
1026		 */
1027		netif_stop_queue(ndev);
1028		if (net_ratelimit())
1029			netdev_warn(ndev, "TX ring unexpectedly full\n");
1030		return NETDEV_TX_BUSY;
1031	}
1032
1033	if (skb->ip_summed == CHECKSUM_PARTIAL) {
1034		if (lp->features & XAE_FEATURE_FULL_TX_CSUM) {
1035			/* Tx Full Checksum Offload Enabled */
1036			cur_p->app0 |= 2;
1037		} else if (lp->features & XAE_FEATURE_PARTIAL_TX_CSUM) {
1038			csum_start_off = skb_transport_offset(skb);
1039			csum_index_off = csum_start_off + skb->csum_offset;
1040			/* Tx Partial Checksum Offload Enabled */
1041			cur_p->app0 |= 1;
1042			cur_p->app1 = (csum_start_off << 16) | csum_index_off;
1043		}
1044	} else if (skb->ip_summed == CHECKSUM_UNNECESSARY) {
1045		cur_p->app0 |= 2; /* Tx Full Checksum Offload Enabled */
1046	}
1047
1048	phys = dma_map_single(lp->dev, skb->data,
1049			      skb_headlen(skb), DMA_TO_DEVICE);
1050	if (unlikely(dma_mapping_error(lp->dev, phys))) {
1051		if (net_ratelimit())
1052			netdev_err(ndev, "TX DMA mapping error\n");
1053		ndev->stats.tx_dropped++;
1054		dev_kfree_skb_any(skb);
1055		return NETDEV_TX_OK;
1056	}
1057	desc_set_phys_addr(lp, phys, cur_p);
1058	cur_p->cntrl = skb_headlen(skb) | XAXIDMA_BD_CTRL_TXSOF_MASK;
 
 
1059
1060	for (ii = 0; ii < num_frag; ii++) {
1061		if (++new_tail_ptr >= lp->tx_bd_num)
1062			new_tail_ptr = 0;
1063		cur_p = &lp->tx_bd_v[new_tail_ptr];
1064		frag = &skb_shinfo(skb)->frags[ii];
1065		phys = dma_map_single(lp->dev,
1066				      skb_frag_address(frag),
1067				      skb_frag_size(frag),
1068				      DMA_TO_DEVICE);
1069		if (unlikely(dma_mapping_error(lp->dev, phys))) {
1070			if (net_ratelimit())
1071				netdev_err(ndev, "TX DMA mapping error\n");
1072			ndev->stats.tx_dropped++;
1073			axienet_free_tx_chain(lp, orig_tail_ptr, ii + 1,
1074					      true, NULL, 0);
1075			dev_kfree_skb_any(skb);
1076			return NETDEV_TX_OK;
1077		}
1078		desc_set_phys_addr(lp, phys, cur_p);
1079		cur_p->cntrl = skb_frag_size(frag);
1080	}
1081
1082	cur_p->cntrl |= XAXIDMA_BD_CTRL_TXEOF_MASK;
1083	cur_p->skb = skb;
1084
1085	tail_p = lp->tx_bd_p + sizeof(*lp->tx_bd_v) * new_tail_ptr;
1086	if (++new_tail_ptr >= lp->tx_bd_num)
1087		new_tail_ptr = 0;
1088	WRITE_ONCE(lp->tx_bd_tail, new_tail_ptr);
1089
 
1090	/* Start the transfer */
1091	axienet_dma_out_addr(lp, XAXIDMA_TX_TDESC_OFFSET, tail_p);
1092
1093	/* Stop queue if next transmit may not have space */
1094	if (axienet_check_tx_bd_space(lp, MAX_SKB_FRAGS + 1)) {
1095		netif_stop_queue(ndev);
1096
1097		/* Matches barrier in axienet_tx_poll */
1098		smp_mb();
1099
1100		/* Space might have just been freed - check again */
1101		if (!axienet_check_tx_bd_space(lp, MAX_SKB_FRAGS + 1))
1102			netif_wake_queue(ndev);
1103	}
1104
1105	return NETDEV_TX_OK;
1106}
1107
1108/**
1109 * axienet_dma_rx_cb - DMA engine callback for RX channel.
1110 * @data:       Pointer to the skbuf_dma_descriptor structure.
1111 * @result:     error reporting through dmaengine_result.
1112 * This function is called by dmaengine driver for RX channel to notify
1113 * that the packet is received.
1114 */
1115static void axienet_dma_rx_cb(void *data, const struct dmaengine_result *result)
1116{
1117	struct skbuf_dma_descriptor *skbuf_dma;
1118	size_t meta_len, meta_max_len, rx_len;
1119	struct axienet_local *lp = data;
1120	struct sk_buff *skb;
1121	u32 *app_metadata;
1122
1123	skbuf_dma = axienet_get_rx_desc(lp, lp->rx_ring_tail++);
1124	skb = skbuf_dma->skb;
1125	app_metadata = dmaengine_desc_get_metadata_ptr(skbuf_dma->desc, &meta_len,
1126						       &meta_max_len);
1127	dma_unmap_single(lp->dev, skbuf_dma->dma_address, lp->max_frm_size,
1128			 DMA_FROM_DEVICE);
1129	/* TODO: Derive app word index programmatically */
1130	rx_len = (app_metadata[LEN_APP] & 0xFFFF);
1131	skb_put(skb, rx_len);
1132	skb->protocol = eth_type_trans(skb, lp->ndev);
1133	skb->ip_summed = CHECKSUM_NONE;
1134
1135	__netif_rx(skb);
1136	u64_stats_update_begin(&lp->rx_stat_sync);
1137	u64_stats_add(&lp->rx_packets, 1);
1138	u64_stats_add(&lp->rx_bytes, rx_len);
1139	u64_stats_update_end(&lp->rx_stat_sync);
1140	axienet_rx_submit_desc(lp->ndev);
1141	dma_async_issue_pending(lp->rx_chan);
1142}
1143
1144/**
1145 * axienet_rx_poll - Triggered by RX ISR to complete the BD processing.
1146 * @napi:	Pointer to NAPI structure.
1147 * @budget:	Max number of RX packets to process.
1148 *
1149 * Return: Number of RX packets processed.
 
 
1150 */
1151static int axienet_rx_poll(struct napi_struct *napi, int budget)
1152{
1153	u32 length;
1154	u32 csumstatus;
1155	u32 size = 0;
1156	int packets = 0;
1157	dma_addr_t tail_p = 0;
1158	struct axidma_bd *cur_p;
1159	struct sk_buff *skb, *new_skb;
1160	struct axienet_local *lp = container_of(napi, struct axienet_local, napi_rx);
1161
 
1162	cur_p = &lp->rx_bd_v[lp->rx_bd_ci];
1163
1164	while (packets < budget && (cur_p->status & XAXIDMA_BD_STS_COMPLETE_MASK)) {
1165		dma_addr_t phys;
1166
1167		/* Ensure we see complete descriptor update */
1168		dma_rmb();
1169
1170		skb = cur_p->skb;
1171		cur_p->skb = NULL;
1172
1173		/* skb could be NULL if a previous pass already received the
1174		 * packet for this slot in the ring, but failed to refill it
1175		 * with a newly allocated buffer. In this case, don't try to
1176		 * receive it again.
1177		 */
1178		if (likely(skb)) {
1179			length = cur_p->app4 & 0x0000FFFF;
1180
1181			phys = desc_get_phys_addr(lp, cur_p);
1182			dma_unmap_single(lp->dev, phys, lp->max_frm_size,
1183					 DMA_FROM_DEVICE);
1184
1185			skb_put(skb, length);
1186			skb->protocol = eth_type_trans(skb, lp->ndev);
1187			/*skb_checksum_none_assert(skb);*/
1188			skb->ip_summed = CHECKSUM_NONE;
1189
1190			/* if we're doing Rx csum offload, set it up */
1191			if (lp->features & XAE_FEATURE_FULL_RX_CSUM) {
1192				csumstatus = (cur_p->app2 &
1193					      XAE_FULL_CSUM_STATUS_MASK) >> 3;
1194				if (csumstatus == XAE_IP_TCP_CSUM_VALIDATED ||
1195				    csumstatus == XAE_IP_UDP_CSUM_VALIDATED) {
1196					skb->ip_summed = CHECKSUM_UNNECESSARY;
1197				}
1198			} else if (lp->features & XAE_FEATURE_PARTIAL_RX_CSUM) {
1199				skb->csum = be32_to_cpu(cur_p->app3 & 0xFFFF);
1200				skb->ip_summed = CHECKSUM_COMPLETE;
1201			}
1202
1203			napi_gro_receive(napi, skb);
1204
1205			size += length;
1206			packets++;
1207		}
1208
1209		new_skb = napi_alloc_skb(napi, lp->max_frm_size);
1210		if (!new_skb)
1211			break;
1212
1213		phys = dma_map_single(lp->dev, new_skb->data,
1214				      lp->max_frm_size,
1215				      DMA_FROM_DEVICE);
1216		if (unlikely(dma_mapping_error(lp->dev, phys))) {
1217			if (net_ratelimit())
1218				netdev_err(lp->ndev, "RX DMA mapping error\n");
1219			dev_kfree_skb(new_skb);
1220			break;
1221		}
1222		desc_set_phys_addr(lp, phys, cur_p);
1223
1224		cur_p->cntrl = lp->max_frm_size;
1225		cur_p->status = 0;
1226		cur_p->skb = new_skb;
1227
1228		/* Only update tail_p to mark this slot as usable after it has
1229		 * been successfully refilled.
1230		 */
1231		tail_p = lp->rx_bd_p + sizeof(*lp->rx_bd_v) * lp->rx_bd_ci;
1232
1233		if (++lp->rx_bd_ci >= lp->rx_bd_num)
1234			lp->rx_bd_ci = 0;
1235		cur_p = &lp->rx_bd_v[lp->rx_bd_ci];
1236	}
1237
1238	u64_stats_update_begin(&lp->rx_stat_sync);
1239	u64_stats_add(&lp->rx_packets, packets);
1240	u64_stats_add(&lp->rx_bytes, size);
1241	u64_stats_update_end(&lp->rx_stat_sync);
1242
1243	if (tail_p)
1244		axienet_dma_out_addr(lp, XAXIDMA_RX_TDESC_OFFSET, tail_p);
1245
1246	if (packets < budget && napi_complete_done(napi, packets)) {
1247		/* Re-enable RX completion interrupts. This should
1248		 * cause an immediate interrupt if any RX packets are
1249		 * already pending.
1250		 */
1251		axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, lp->rx_dma_cr);
1252	}
1253	return packets;
1254}
1255
1256/**
1257 * axienet_tx_irq - Tx Done Isr.
1258 * @irq:	irq number
1259 * @_ndev:	net_device pointer
1260 *
1261 * Return: IRQ_HANDLED if device generated a TX interrupt, IRQ_NONE otherwise.
1262 *
1263 * This is the Axi DMA Tx done Isr. It invokes NAPI polling to complete the
1264 * TX BD processing.
1265 */
1266static irqreturn_t axienet_tx_irq(int irq, void *_ndev)
1267{
 
1268	unsigned int status;
1269	struct net_device *ndev = _ndev;
1270	struct axienet_local *lp = netdev_priv(ndev);
1271
1272	status = axienet_dma_in32(lp, XAXIDMA_TX_SR_OFFSET);
1273
 
 
 
1274	if (!(status & XAXIDMA_IRQ_ALL_MASK))
1275		return IRQ_NONE;
1276
1277	axienet_dma_out32(lp, XAXIDMA_TX_SR_OFFSET, status);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1278
1279	if (unlikely(status & XAXIDMA_IRQ_ERROR_MASK)) {
1280		netdev_err(ndev, "DMA Tx error 0x%x\n", status);
1281		netdev_err(ndev, "Current BD is at: 0x%x%08x\n",
1282			   (lp->tx_bd_v[lp->tx_bd_ci]).phys_msb,
1283			   (lp->tx_bd_v[lp->tx_bd_ci]).phys);
1284		schedule_work(&lp->dma_err_task);
1285	} else {
1286		/* Disable further TX completion interrupts and schedule
1287		 * NAPI to handle the completions.
1288		 */
1289		u32 cr = lp->tx_dma_cr;
1290
1291		cr &= ~(XAXIDMA_IRQ_IOC_MASK | XAXIDMA_IRQ_DELAY_MASK);
1292		if (napi_schedule_prep(&lp->napi_tx)) {
1293			axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, cr);
1294			__napi_schedule(&lp->napi_tx);
1295		}
1296	}
1297
 
1298	return IRQ_HANDLED;
1299}
1300
1301/**
1302 * axienet_rx_irq - Rx Isr.
1303 * @irq:	irq number
1304 * @_ndev:	net_device pointer
1305 *
1306 * Return: IRQ_HANDLED if device generated a RX interrupt, IRQ_NONE otherwise.
1307 *
1308 * This is the Axi DMA Rx Isr. It invokes NAPI polling to complete the RX BD
1309 * processing.
1310 */
1311static irqreturn_t axienet_rx_irq(int irq, void *_ndev)
1312{
 
1313	unsigned int status;
1314	struct net_device *ndev = _ndev;
1315	struct axienet_local *lp = netdev_priv(ndev);
1316
1317	status = axienet_dma_in32(lp, XAXIDMA_RX_SR_OFFSET);
1318
 
 
 
1319	if (!(status & XAXIDMA_IRQ_ALL_MASK))
1320		return IRQ_NONE;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1321
1322	axienet_dma_out32(lp, XAXIDMA_RX_SR_OFFSET, status);
1323
1324	if (unlikely(status & XAXIDMA_IRQ_ERROR_MASK)) {
1325		netdev_err(ndev, "DMA Rx error 0x%x\n", status);
1326		netdev_err(ndev, "Current BD is at: 0x%x%08x\n",
1327			   (lp->rx_bd_v[lp->rx_bd_ci]).phys_msb,
1328			   (lp->rx_bd_v[lp->rx_bd_ci]).phys);
1329		schedule_work(&lp->dma_err_task);
1330	} else {
1331		/* Disable further RX completion interrupts and schedule
1332		 * NAPI receive.
1333		 */
1334		u32 cr = lp->rx_dma_cr;
1335
1336		cr &= ~(XAXIDMA_IRQ_IOC_MASK | XAXIDMA_IRQ_DELAY_MASK);
1337		if (napi_schedule_prep(&lp->napi_rx)) {
1338			axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, cr);
1339			__napi_schedule(&lp->napi_rx);
1340		}
1341	}
1342
1343	return IRQ_HANDLED;
1344}
1345
1346/**
1347 * axienet_eth_irq - Ethernet core Isr.
1348 * @irq:	irq number
1349 * @_ndev:	net_device pointer
1350 *
1351 * Return: IRQ_HANDLED if device generated a core interrupt, IRQ_NONE otherwise.
1352 *
1353 * Handle miscellaneous conditions indicated by Ethernet core IRQ.
1354 */
1355static irqreturn_t axienet_eth_irq(int irq, void *_ndev)
1356{
1357	struct net_device *ndev = _ndev;
1358	struct axienet_local *lp = netdev_priv(ndev);
1359	unsigned int pending;
1360
1361	pending = axienet_ior(lp, XAE_IP_OFFSET);
1362	if (!pending)
1363		return IRQ_NONE;
1364
1365	if (pending & XAE_INT_RXFIFOOVR_MASK)
1366		ndev->stats.rx_missed_errors++;
1367
1368	if (pending & XAE_INT_RXRJECT_MASK)
1369		ndev->stats.rx_dropped++;
1370
1371	axienet_iow(lp, XAE_IS_OFFSET, pending);
1372	return IRQ_HANDLED;
1373}
1374
1375static void axienet_dma_err_handler(struct work_struct *work);
1376
1377/**
1378 * axienet_rx_submit_desc - Submit the rx descriptors to dmaengine.
1379 * allocate skbuff, map the scatterlist and obtain a descriptor
1380 * and then add the callback information and submit descriptor.
1381 *
1382 * @ndev:	net_device pointer
1383 *
1384 */
1385static void axienet_rx_submit_desc(struct net_device *ndev)
1386{
1387	struct dma_async_tx_descriptor *dma_rx_desc = NULL;
1388	struct axienet_local *lp = netdev_priv(ndev);
1389	struct skbuf_dma_descriptor *skbuf_dma;
1390	struct sk_buff *skb;
1391	dma_addr_t addr;
1392
1393	skbuf_dma = axienet_get_rx_desc(lp, lp->rx_ring_head);
1394	if (!skbuf_dma)
1395		return;
1396
1397	lp->rx_ring_head++;
1398	skb = netdev_alloc_skb(ndev, lp->max_frm_size);
1399	if (!skb)
1400		return;
1401
1402	sg_init_table(skbuf_dma->sgl, 1);
1403	addr = dma_map_single(lp->dev, skb->data, lp->max_frm_size, DMA_FROM_DEVICE);
1404	if (unlikely(dma_mapping_error(lp->dev, addr))) {
1405		if (net_ratelimit())
1406			netdev_err(ndev, "DMA mapping error\n");
1407		goto rx_submit_err_free_skb;
1408	}
1409	sg_dma_address(skbuf_dma->sgl) = addr;
1410	sg_dma_len(skbuf_dma->sgl) = lp->max_frm_size;
1411	dma_rx_desc = dmaengine_prep_slave_sg(lp->rx_chan, skbuf_dma->sgl,
1412					      1, DMA_DEV_TO_MEM,
1413					      DMA_PREP_INTERRUPT);
1414	if (!dma_rx_desc)
1415		goto rx_submit_err_unmap_skb;
1416
1417	skbuf_dma->skb = skb;
1418	skbuf_dma->dma_address = sg_dma_address(skbuf_dma->sgl);
1419	skbuf_dma->desc = dma_rx_desc;
1420	dma_rx_desc->callback_param = lp;
1421	dma_rx_desc->callback_result = axienet_dma_rx_cb;
1422	dmaengine_submit(dma_rx_desc);
1423
1424	return;
1425
1426rx_submit_err_unmap_skb:
1427	dma_unmap_single(lp->dev, addr, lp->max_frm_size, DMA_FROM_DEVICE);
1428rx_submit_err_free_skb:
1429	dev_kfree_skb(skb);
1430}
1431
1432/**
1433 * axienet_init_dmaengine - init the dmaengine code.
1434 * @ndev:       Pointer to net_device structure
1435 *
1436 * Return: 0, on success.
1437 *          non-zero error value on failure
 
1438 *
1439 * This is the dmaengine initialization code.
 
 
 
1440 */
1441static int axienet_init_dmaengine(struct net_device *ndev)
1442{
 
1443	struct axienet_local *lp = netdev_priv(ndev);
1444	struct skbuf_dma_descriptor *skbuf_dma;
1445	int i, ret;
1446
1447	lp->tx_chan = dma_request_chan(lp->dev, "tx_chan0");
1448	if (IS_ERR(lp->tx_chan)) {
1449		dev_err(lp->dev, "No Ethernet DMA (TX) channel found\n");
1450		return PTR_ERR(lp->tx_chan);
1451	}
1452
1453	lp->rx_chan = dma_request_chan(lp->dev, "rx_chan0");
1454	if (IS_ERR(lp->rx_chan)) {
1455		ret = PTR_ERR(lp->rx_chan);
1456		dev_err(lp->dev, "No Ethernet DMA (RX) channel found\n");
1457		goto err_dma_release_tx;
1458	}
1459
1460	lp->tx_ring_tail = 0;
1461	lp->tx_ring_head = 0;
1462	lp->rx_ring_tail = 0;
1463	lp->rx_ring_head = 0;
1464	lp->tx_skb_ring = kcalloc(TX_BD_NUM_MAX, sizeof(*lp->tx_skb_ring),
1465				  GFP_KERNEL);
1466	if (!lp->tx_skb_ring) {
1467		ret = -ENOMEM;
1468		goto err_dma_release_rx;
1469	}
1470	for (i = 0; i < TX_BD_NUM_MAX; i++) {
1471		skbuf_dma = kzalloc(sizeof(*skbuf_dma), GFP_KERNEL);
1472		if (!skbuf_dma) {
1473			ret = -ENOMEM;
1474			goto err_free_tx_skb_ring;
1475		}
1476		lp->tx_skb_ring[i] = skbuf_dma;
1477	}
1478
1479	lp->rx_skb_ring = kcalloc(RX_BUF_NUM_DEFAULT, sizeof(*lp->rx_skb_ring),
1480				  GFP_KERNEL);
1481	if (!lp->rx_skb_ring) {
1482		ret = -ENOMEM;
1483		goto err_free_tx_skb_ring;
1484	}
1485	for (i = 0; i < RX_BUF_NUM_DEFAULT; i++) {
1486		skbuf_dma = kzalloc(sizeof(*skbuf_dma), GFP_KERNEL);
1487		if (!skbuf_dma) {
1488			ret = -ENOMEM;
1489			goto err_free_rx_skb_ring;
1490		}
1491		lp->rx_skb_ring[i] = skbuf_dma;
1492	}
1493	/* TODO: Instead of BD_NUM_DEFAULT use runtime support */
1494	for (i = 0; i < RX_BUF_NUM_DEFAULT; i++)
1495		axienet_rx_submit_desc(ndev);
1496	dma_async_issue_pending(lp->rx_chan);
1497
1498	return 0;
1499
1500err_free_rx_skb_ring:
1501	for (i = 0; i < RX_BUF_NUM_DEFAULT; i++)
1502		kfree(lp->rx_skb_ring[i]);
1503	kfree(lp->rx_skb_ring);
1504err_free_tx_skb_ring:
1505	for (i = 0; i < TX_BD_NUM_MAX; i++)
1506		kfree(lp->tx_skb_ring[i]);
1507	kfree(lp->tx_skb_ring);
1508err_dma_release_rx:
1509	dma_release_channel(lp->rx_chan);
1510err_dma_release_tx:
1511	dma_release_channel(lp->tx_chan);
1512	return ret;
1513}
1514
1515/**
1516 * axienet_init_legacy_dma - init the dma legacy code.
1517 * @ndev:       Pointer to net_device structure
1518 *
1519 * Return: 0, on success.
1520 *          non-zero error value on failure
1521 *
1522 * This is the dma  initialization code. It also allocates interrupt
1523 * service routines, enables the interrupt lines and ISR handling.
1524 *
1525 */
1526static int axienet_init_legacy_dma(struct net_device *ndev)
1527{
1528	int ret;
1529	struct axienet_local *lp = netdev_priv(ndev);
1530
1531	/* Enable worker thread for Axi DMA error handling */
1532	lp->stopping = false;
1533	INIT_WORK(&lp->dma_err_task, axienet_dma_err_handler);
1534
1535	napi_enable(&lp->napi_rx);
1536	napi_enable(&lp->napi_tx);
1537
1538	/* Enable interrupts for Axi DMA Tx */
1539	ret = request_irq(lp->tx_irq, axienet_tx_irq, IRQF_SHARED,
1540			  ndev->name, ndev);
1541	if (ret)
1542		goto err_tx_irq;
1543	/* Enable interrupts for Axi DMA Rx */
1544	ret = request_irq(lp->rx_irq, axienet_rx_irq, IRQF_SHARED,
1545			  ndev->name, ndev);
1546	if (ret)
1547		goto err_rx_irq;
1548	/* Enable interrupts for Axi Ethernet core (if defined) */
1549	if (lp->eth_irq > 0) {
1550		ret = request_irq(lp->eth_irq, axienet_eth_irq, IRQF_SHARED,
1551				  ndev->name, ndev);
1552		if (ret)
1553			goto err_eth_irq;
1554	}
1555
1556	return 0;
1557
1558err_eth_irq:
1559	free_irq(lp->rx_irq, ndev);
1560err_rx_irq:
1561	free_irq(lp->tx_irq, ndev);
1562err_tx_irq:
1563	napi_disable(&lp->napi_tx);
1564	napi_disable(&lp->napi_rx);
1565	cancel_work_sync(&lp->dma_err_task);
1566	dev_err(lp->dev, "request_irq() failed\n");
1567	return ret;
1568}
1569
1570/**
1571 * axienet_open - Driver open routine.
1572 * @ndev:	Pointer to net_device structure
1573 *
1574 * Return: 0, on success.
1575 *	    non-zero error value on failure
1576 *
1577 * This is the driver open routine. It calls phylink_start to start the
1578 * PHY device.
1579 * It also allocates interrupt service routines, enables the interrupt lines
1580 * and ISR handling. Axi Ethernet core is reset through Axi DMA core. Buffer
1581 * descriptors are initialized.
1582 */
1583static int axienet_open(struct net_device *ndev)
1584{
1585	int ret;
1586	struct axienet_local *lp = netdev_priv(ndev);
1587
1588	/* When we do an Axi Ethernet reset, it resets the complete core
1589	 * including the MDIO. MDIO must be disabled before resetting.
1590	 * Hold MDIO bus lock to avoid MDIO accesses during the reset.
1591	 */
1592	axienet_lock_mii(lp);
1593	ret = axienet_device_reset(ndev);
1594	axienet_unlock_mii(lp);
1595
1596	ret = phylink_of_phy_connect(lp->phylink, lp->dev->of_node, 0);
1597	if (ret) {
1598		dev_err(lp->dev, "phylink_of_phy_connect() failed: %d\n", ret);
1599		return ret;
1600	}
1601
1602	phylink_start(lp->phylink);
1603
1604	/* Start the statistics refresh work */
1605	schedule_delayed_work(&lp->stats_work, 0);
1606
1607	if (lp->use_dmaengine) {
1608		/* Enable interrupts for Axi Ethernet core (if defined) */
1609		if (lp->eth_irq > 0) {
1610			ret = request_irq(lp->eth_irq, axienet_eth_irq, IRQF_SHARED,
1611					  ndev->name, ndev);
1612			if (ret)
1613				goto err_phy;
1614		}
1615
1616		ret = axienet_init_dmaengine(ndev);
1617		if (ret < 0)
1618			goto err_free_eth_irq;
1619	} else {
1620		ret = axienet_init_legacy_dma(ndev);
1621		if (ret)
1622			goto err_phy;
1623	}
1624
1625	return 0;
1626
1627err_free_eth_irq:
1628	if (lp->eth_irq > 0)
1629		free_irq(lp->eth_irq, ndev);
1630err_phy:
1631	cancel_delayed_work_sync(&lp->stats_work);
1632	phylink_stop(lp->phylink);
1633	phylink_disconnect_phy(lp->phylink);
1634	return ret;
1635}
1636
1637/**
1638 * axienet_stop - Driver stop routine.
1639 * @ndev:	Pointer to net_device structure
1640 *
1641 * Return: 0, on success.
1642 *
1643 * This is the driver stop routine. It calls phylink_disconnect to stop the PHY
1644 * device. It also removes the interrupt handlers and disables the interrupts.
1645 * The Axi DMA Tx/Rx BDs are released.
1646 */
1647static int axienet_stop(struct net_device *ndev)
1648{
 
1649	struct axienet_local *lp = netdev_priv(ndev);
1650	int i;
1651
1652	if (!lp->use_dmaengine) {
1653		WRITE_ONCE(lp->stopping, true);
1654		flush_work(&lp->dma_err_task);
1655
1656		napi_disable(&lp->napi_tx);
1657		napi_disable(&lp->napi_rx);
1658	}
1659
1660	cancel_delayed_work_sync(&lp->stats_work);
1661
1662	phylink_stop(lp->phylink);
1663	phylink_disconnect_phy(lp->phylink);
1664
 
 
 
 
 
 
1665	axienet_setoptions(ndev, lp->options &
1666			   ~(XAE_OPTION_TXEN | XAE_OPTION_RXEN));
1667
1668	if (!lp->use_dmaengine) {
1669		axienet_dma_stop(lp);
1670		cancel_work_sync(&lp->dma_err_task);
1671		free_irq(lp->tx_irq, ndev);
1672		free_irq(lp->rx_irq, ndev);
1673		axienet_dma_bd_release(ndev);
1674	} else {
1675		dmaengine_terminate_sync(lp->tx_chan);
1676		dmaengine_synchronize(lp->tx_chan);
1677		dmaengine_terminate_sync(lp->rx_chan);
1678		dmaengine_synchronize(lp->rx_chan);
1679
1680		for (i = 0; i < TX_BD_NUM_MAX; i++)
1681			kfree(lp->tx_skb_ring[i]);
1682		kfree(lp->tx_skb_ring);
1683		for (i = 0; i < RX_BUF_NUM_DEFAULT; i++)
1684			kfree(lp->rx_skb_ring[i]);
1685		kfree(lp->rx_skb_ring);
1686
1687		dma_release_channel(lp->rx_chan);
1688		dma_release_channel(lp->tx_chan);
1689	}
1690
1691	axienet_iow(lp, XAE_IE_OFFSET, 0);
 
 
1692
1693	if (lp->eth_irq > 0)
1694		free_irq(lp->eth_irq, ndev);
1695	return 0;
1696}
1697
1698/**
1699 * axienet_change_mtu - Driver change mtu routine.
1700 * @ndev:	Pointer to net_device structure
1701 * @new_mtu:	New mtu value to be applied
1702 *
1703 * Return: Always returns 0 (success).
1704 *
1705 * This is the change mtu driver routine. It checks if the Axi Ethernet
1706 * hardware supports jumbo frames before changing the mtu. This can be
1707 * called only when the device is not up.
1708 */
1709static int axienet_change_mtu(struct net_device *ndev, int new_mtu)
1710{
1711	struct axienet_local *lp = netdev_priv(ndev);
1712
1713	if (netif_running(ndev))
1714		return -EBUSY;
1715
1716	if ((new_mtu + VLAN_ETH_HLEN +
1717		XAE_TRL_SIZE) > lp->rxmem)
1718		return -EINVAL;
1719
1720	WRITE_ONCE(ndev->mtu, new_mtu);
 
 
 
1721
1722	return 0;
1723}
1724
1725#ifdef CONFIG_NET_POLL_CONTROLLER
1726/**
1727 * axienet_poll_controller - Axi Ethernet poll mechanism.
1728 * @ndev:	Pointer to net_device structure
1729 *
1730 * This implements Rx/Tx ISR poll mechanisms. The interrupts are disabled prior
1731 * to polling the ISRs and are enabled back after the polling is done.
1732 */
1733static void axienet_poll_controller(struct net_device *ndev)
1734{
1735	struct axienet_local *lp = netdev_priv(ndev);
1736
1737	disable_irq(lp->tx_irq);
1738	disable_irq(lp->rx_irq);
1739	axienet_rx_irq(lp->tx_irq, ndev);
1740	axienet_tx_irq(lp->rx_irq, ndev);
1741	enable_irq(lp->tx_irq);
1742	enable_irq(lp->rx_irq);
1743}
1744#endif
1745
1746static int axienet_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
1747{
1748	struct axienet_local *lp = netdev_priv(dev);
1749
1750	if (!netif_running(dev))
1751		return -EINVAL;
1752
1753	return phylink_mii_ioctl(lp->phylink, rq, cmd);
1754}
1755
1756static void
1757axienet_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
1758{
1759	struct axienet_local *lp = netdev_priv(dev);
1760	unsigned int start;
1761
1762	netdev_stats_to_stats64(stats, &dev->stats);
1763
1764	do {
1765		start = u64_stats_fetch_begin(&lp->rx_stat_sync);
1766		stats->rx_packets = u64_stats_read(&lp->rx_packets);
1767		stats->rx_bytes = u64_stats_read(&lp->rx_bytes);
1768	} while (u64_stats_fetch_retry(&lp->rx_stat_sync, start));
1769
1770	do {
1771		start = u64_stats_fetch_begin(&lp->tx_stat_sync);
1772		stats->tx_packets = u64_stats_read(&lp->tx_packets);
1773		stats->tx_bytes = u64_stats_read(&lp->tx_bytes);
1774	} while (u64_stats_fetch_retry(&lp->tx_stat_sync, start));
1775
1776	if (!(lp->features & XAE_FEATURE_STATS))
1777		return;
1778
1779	do {
1780		start = read_seqcount_begin(&lp->hw_stats_seqcount);
1781		stats->rx_length_errors =
1782			axienet_stat(lp, STAT_RX_LENGTH_ERRORS);
1783		stats->rx_crc_errors = axienet_stat(lp, STAT_RX_FCS_ERRORS);
1784		stats->rx_frame_errors =
1785			axienet_stat(lp, STAT_RX_ALIGNMENT_ERRORS);
1786		stats->rx_errors = axienet_stat(lp, STAT_UNDERSIZE_FRAMES) +
1787				   axienet_stat(lp, STAT_FRAGMENT_FRAMES) +
1788				   stats->rx_length_errors +
1789				   stats->rx_crc_errors +
1790				   stats->rx_frame_errors;
1791		stats->multicast = axienet_stat(lp, STAT_RX_MULTICAST_FRAMES);
1792
1793		stats->tx_aborted_errors =
1794			axienet_stat(lp, STAT_TX_EXCESS_COLLISIONS);
1795		stats->tx_fifo_errors =
1796			axienet_stat(lp, STAT_TX_UNDERRUN_ERRORS);
1797		stats->tx_window_errors =
1798			axienet_stat(lp, STAT_TX_LATE_COLLISIONS);
1799		stats->tx_errors = axienet_stat(lp, STAT_TX_EXCESS_DEFERRAL) +
1800				   stats->tx_aborted_errors +
1801				   stats->tx_fifo_errors +
1802				   stats->tx_window_errors;
1803	} while (read_seqcount_retry(&lp->hw_stats_seqcount, start));
1804}
1805
1806static const struct net_device_ops axienet_netdev_ops = {
1807	.ndo_open = axienet_open,
1808	.ndo_stop = axienet_stop,
1809	.ndo_start_xmit = axienet_start_xmit,
1810	.ndo_get_stats64 = axienet_get_stats64,
1811	.ndo_change_mtu	= axienet_change_mtu,
1812	.ndo_set_mac_address = netdev_set_mac_address,
1813	.ndo_validate_addr = eth_validate_addr,
1814	.ndo_eth_ioctl = axienet_ioctl,
1815	.ndo_set_rx_mode = axienet_set_multicast_list,
1816#ifdef CONFIG_NET_POLL_CONTROLLER
1817	.ndo_poll_controller = axienet_poll_controller,
1818#endif
1819};
1820
1821static const struct net_device_ops axienet_netdev_dmaengine_ops = {
1822	.ndo_open = axienet_open,
1823	.ndo_stop = axienet_stop,
1824	.ndo_start_xmit = axienet_start_xmit_dmaengine,
1825	.ndo_get_stats64 = axienet_get_stats64,
1826	.ndo_change_mtu	= axienet_change_mtu,
1827	.ndo_set_mac_address = netdev_set_mac_address,
1828	.ndo_validate_addr = eth_validate_addr,
1829	.ndo_eth_ioctl = axienet_ioctl,
1830	.ndo_set_rx_mode = axienet_set_multicast_list,
1831};
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1832
1833/**
1834 * axienet_ethtools_get_drvinfo - Get various Axi Ethernet driver information.
1835 * @ndev:	Pointer to net_device structure
1836 * @ed:		Pointer to ethtool_drvinfo structure
1837 *
1838 * This implements ethtool command for getting the driver information.
1839 * Issue "ethtool -i ethX" under linux prompt to execute this function.
1840 */
1841static void axienet_ethtools_get_drvinfo(struct net_device *ndev,
1842					 struct ethtool_drvinfo *ed)
1843{
1844	strscpy(ed->driver, DRIVER_NAME, sizeof(ed->driver));
1845	strscpy(ed->version, DRIVER_VERSION, sizeof(ed->version));
 
 
1846}
1847
1848/**
1849 * axienet_ethtools_get_regs_len - Get the total regs length present in the
1850 *				   AxiEthernet core.
1851 * @ndev:	Pointer to net_device structure
1852 *
1853 * This implements ethtool command for getting the total register length
1854 * information.
1855 *
1856 * Return: the total regs length
1857 */
1858static int axienet_ethtools_get_regs_len(struct net_device *ndev)
1859{
1860	return sizeof(u32) * AXIENET_REGS_N;
1861}
1862
1863/**
1864 * axienet_ethtools_get_regs - Dump the contents of all registers present
1865 *			       in AxiEthernet core.
1866 * @ndev:	Pointer to net_device structure
1867 * @regs:	Pointer to ethtool_regs structure
1868 * @ret:	Void pointer used to return the contents of the registers.
1869 *
1870 * This implements ethtool command for getting the Axi Ethernet register dump.
1871 * Issue "ethtool -d ethX" to execute this function.
1872 */
1873static void axienet_ethtools_get_regs(struct net_device *ndev,
1874				      struct ethtool_regs *regs, void *ret)
1875{
1876	u32 *data = (u32 *)ret;
1877	size_t len = sizeof(u32) * AXIENET_REGS_N;
1878	struct axienet_local *lp = netdev_priv(ndev);
1879
1880	regs->version = 0;
1881	regs->len = len;
1882
1883	memset(data, 0, len);
1884	data[0] = axienet_ior(lp, XAE_RAF_OFFSET);
1885	data[1] = axienet_ior(lp, XAE_TPF_OFFSET);
1886	data[2] = axienet_ior(lp, XAE_IFGP_OFFSET);
1887	data[3] = axienet_ior(lp, XAE_IS_OFFSET);
1888	data[4] = axienet_ior(lp, XAE_IP_OFFSET);
1889	data[5] = axienet_ior(lp, XAE_IE_OFFSET);
1890	data[6] = axienet_ior(lp, XAE_TTAG_OFFSET);
1891	data[7] = axienet_ior(lp, XAE_RTAG_OFFSET);
1892	data[8] = axienet_ior(lp, XAE_UAWL_OFFSET);
1893	data[9] = axienet_ior(lp, XAE_UAWU_OFFSET);
1894	data[10] = axienet_ior(lp, XAE_TPID0_OFFSET);
1895	data[11] = axienet_ior(lp, XAE_TPID1_OFFSET);
1896	data[12] = axienet_ior(lp, XAE_PPST_OFFSET);
1897	data[13] = axienet_ior(lp, XAE_RCW0_OFFSET);
1898	data[14] = axienet_ior(lp, XAE_RCW1_OFFSET);
1899	data[15] = axienet_ior(lp, XAE_TC_OFFSET);
1900	data[16] = axienet_ior(lp, XAE_FCC_OFFSET);
1901	data[17] = axienet_ior(lp, XAE_EMMC_OFFSET);
1902	data[18] = axienet_ior(lp, XAE_PHYC_OFFSET);
1903	data[19] = axienet_ior(lp, XAE_MDIO_MC_OFFSET);
1904	data[20] = axienet_ior(lp, XAE_MDIO_MCR_OFFSET);
1905	data[21] = axienet_ior(lp, XAE_MDIO_MWD_OFFSET);
1906	data[22] = axienet_ior(lp, XAE_MDIO_MRD_OFFSET);
 
 
 
 
1907	data[27] = axienet_ior(lp, XAE_UAW0_OFFSET);
1908	data[28] = axienet_ior(lp, XAE_UAW1_OFFSET);
1909	data[29] = axienet_ior(lp, XAE_FMI_OFFSET);
1910	data[30] = axienet_ior(lp, XAE_AF0_OFFSET);
1911	data[31] = axienet_ior(lp, XAE_AF1_OFFSET);
1912	if (!lp->use_dmaengine) {
1913		data[32] = axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET);
1914		data[33] = axienet_dma_in32(lp, XAXIDMA_TX_SR_OFFSET);
1915		data[34] = axienet_dma_in32(lp, XAXIDMA_TX_CDESC_OFFSET);
1916		data[35] = axienet_dma_in32(lp, XAXIDMA_TX_TDESC_OFFSET);
1917		data[36] = axienet_dma_in32(lp, XAXIDMA_RX_CR_OFFSET);
1918		data[37] = axienet_dma_in32(lp, XAXIDMA_RX_SR_OFFSET);
1919		data[38] = axienet_dma_in32(lp, XAXIDMA_RX_CDESC_OFFSET);
1920		data[39] = axienet_dma_in32(lp, XAXIDMA_RX_TDESC_OFFSET);
1921	}
1922}
1923
1924static void
1925axienet_ethtools_get_ringparam(struct net_device *ndev,
1926			       struct ethtool_ringparam *ering,
1927			       struct kernel_ethtool_ringparam *kernel_ering,
1928			       struct netlink_ext_ack *extack)
1929{
1930	struct axienet_local *lp = netdev_priv(ndev);
1931
1932	ering->rx_max_pending = RX_BD_NUM_MAX;
1933	ering->rx_mini_max_pending = 0;
1934	ering->rx_jumbo_max_pending = 0;
1935	ering->tx_max_pending = TX_BD_NUM_MAX;
1936	ering->rx_pending = lp->rx_bd_num;
1937	ering->rx_mini_pending = 0;
1938	ering->rx_jumbo_pending = 0;
1939	ering->tx_pending = lp->tx_bd_num;
1940}
1941
1942static int
1943axienet_ethtools_set_ringparam(struct net_device *ndev,
1944			       struct ethtool_ringparam *ering,
1945			       struct kernel_ethtool_ringparam *kernel_ering,
1946			       struct netlink_ext_ack *extack)
1947{
1948	struct axienet_local *lp = netdev_priv(ndev);
1949
1950	if (ering->rx_pending > RX_BD_NUM_MAX ||
1951	    ering->rx_mini_pending ||
1952	    ering->rx_jumbo_pending ||
1953	    ering->tx_pending < TX_BD_NUM_MIN ||
1954	    ering->tx_pending > TX_BD_NUM_MAX)
1955		return -EINVAL;
1956
1957	if (netif_running(ndev))
1958		return -EBUSY;
1959
1960	lp->rx_bd_num = ering->rx_pending;
1961	lp->tx_bd_num = ering->tx_pending;
1962	return 0;
1963}
1964
1965/**
1966 * axienet_ethtools_get_pauseparam - Get the pause parameter setting for
1967 *				     Tx and Rx paths.
1968 * @ndev:	Pointer to net_device structure
1969 * @epauseparm:	Pointer to ethtool_pauseparam structure.
1970 *
1971 * This implements ethtool command for getting axi ethernet pause frame
1972 * setting. Issue "ethtool -a ethX" to execute this function.
1973 */
1974static void
1975axienet_ethtools_get_pauseparam(struct net_device *ndev,
1976				struct ethtool_pauseparam *epauseparm)
1977{
 
1978	struct axienet_local *lp = netdev_priv(ndev);
1979
1980	phylink_ethtool_get_pauseparam(lp->phylink, epauseparm);
 
 
1981}
1982
1983/**
1984 * axienet_ethtools_set_pauseparam - Set device pause parameter(flow control)
1985 *				     settings.
1986 * @ndev:	Pointer to net_device structure
1987 * @epauseparm:Pointer to ethtool_pauseparam structure
1988 *
1989 * This implements ethtool command for enabling flow control on Rx and Tx
1990 * paths. Issue "ethtool -A ethX tx on|off" under linux prompt to execute this
1991 * function.
1992 *
1993 * Return: 0 on success, -EFAULT if device is running
1994 */
1995static int
1996axienet_ethtools_set_pauseparam(struct net_device *ndev,
1997				struct ethtool_pauseparam *epauseparm)
1998{
 
1999	struct axienet_local *lp = netdev_priv(ndev);
2000
2001	return phylink_ethtool_set_pauseparam(lp->phylink, epauseparm);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2002}
2003
2004/**
2005 * axienet_ethtools_get_coalesce - Get DMA interrupt coalescing count.
2006 * @ndev:	Pointer to net_device structure
2007 * @ecoalesce:	Pointer to ethtool_coalesce structure
2008 * @kernel_coal: ethtool CQE mode setting structure
2009 * @extack:	extack for reporting error messages
2010 *
2011 * This implements ethtool command for getting the DMA interrupt coalescing
2012 * count on Tx and Rx paths. Issue "ethtool -c ethX" under linux prompt to
2013 * execute this function.
2014 *
2015 * Return: 0 always
2016 */
2017static int
2018axienet_ethtools_get_coalesce(struct net_device *ndev,
2019			      struct ethtool_coalesce *ecoalesce,
2020			      struct kernel_ethtool_coalesce *kernel_coal,
2021			      struct netlink_ext_ack *extack)
2022{
 
2023	struct axienet_local *lp = netdev_priv(ndev);
2024
2025	ecoalesce->rx_max_coalesced_frames = lp->coalesce_count_rx;
2026	ecoalesce->rx_coalesce_usecs = lp->coalesce_usec_rx;
2027	ecoalesce->tx_max_coalesced_frames = lp->coalesce_count_tx;
2028	ecoalesce->tx_coalesce_usecs = lp->coalesce_usec_tx;
 
2029	return 0;
2030}
2031
2032/**
2033 * axienet_ethtools_set_coalesce - Set DMA interrupt coalescing count.
2034 * @ndev:	Pointer to net_device structure
2035 * @ecoalesce:	Pointer to ethtool_coalesce structure
2036 * @kernel_coal: ethtool CQE mode setting structure
2037 * @extack:	extack for reporting error messages
2038 *
2039 * This implements ethtool command for setting the DMA interrupt coalescing
2040 * count on Tx and Rx paths. Issue "ethtool -C ethX rx-frames 5" under linux
2041 * prompt to execute this function.
2042 *
2043 * Return: 0, on success, Non-zero error value on failure.
2044 */
2045static int
2046axienet_ethtools_set_coalesce(struct net_device *ndev,
2047			      struct ethtool_coalesce *ecoalesce,
2048			      struct kernel_ethtool_coalesce *kernel_coal,
2049			      struct netlink_ext_ack *extack)
2050{
2051	struct axienet_local *lp = netdev_priv(ndev);
2052
2053	if (netif_running(ndev)) {
2054		NL_SET_ERR_MSG(extack,
2055			       "Please stop netif before applying configuration");
2056		return -EBUSY;
2057	}
2058
2059	if (ecoalesce->rx_max_coalesced_frames > 255 ||
2060	    ecoalesce->tx_max_coalesced_frames > 255) {
2061		NL_SET_ERR_MSG(extack, "frames must be less than 256");
2062		return -EINVAL;
2063	}
2064
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2065	if (ecoalesce->rx_max_coalesced_frames)
2066		lp->coalesce_count_rx = ecoalesce->rx_max_coalesced_frames;
2067	if (ecoalesce->rx_coalesce_usecs)
2068		lp->coalesce_usec_rx = ecoalesce->rx_coalesce_usecs;
2069	if (ecoalesce->tx_max_coalesced_frames)
2070		lp->coalesce_count_tx = ecoalesce->tx_max_coalesced_frames;
2071	if (ecoalesce->tx_coalesce_usecs)
2072		lp->coalesce_usec_tx = ecoalesce->tx_coalesce_usecs;
2073
2074	return 0;
2075}
2076
2077static int
2078axienet_ethtools_get_link_ksettings(struct net_device *ndev,
2079				    struct ethtool_link_ksettings *cmd)
2080{
2081	struct axienet_local *lp = netdev_priv(ndev);
2082
2083	return phylink_ethtool_ksettings_get(lp->phylink, cmd);
2084}
2085
2086static int
2087axienet_ethtools_set_link_ksettings(struct net_device *ndev,
2088				    const struct ethtool_link_ksettings *cmd)
2089{
2090	struct axienet_local *lp = netdev_priv(ndev);
2091
2092	return phylink_ethtool_ksettings_set(lp->phylink, cmd);
2093}
2094
2095static int axienet_ethtools_nway_reset(struct net_device *dev)
2096{
2097	struct axienet_local *lp = netdev_priv(dev);
2098
2099	return phylink_ethtool_nway_reset(lp->phylink);
2100}
2101
2102static void axienet_ethtools_get_ethtool_stats(struct net_device *dev,
2103					       struct ethtool_stats *stats,
2104					       u64 *data)
2105{
2106	struct axienet_local *lp = netdev_priv(dev);
2107	unsigned int start;
2108
2109	do {
2110		start = read_seqcount_begin(&lp->hw_stats_seqcount);
2111		data[0] = axienet_stat(lp, STAT_RX_BYTES);
2112		data[1] = axienet_stat(lp, STAT_TX_BYTES);
2113		data[2] = axienet_stat(lp, STAT_RX_VLAN_FRAMES);
2114		data[3] = axienet_stat(lp, STAT_TX_VLAN_FRAMES);
2115		data[6] = axienet_stat(lp, STAT_TX_PFC_FRAMES);
2116		data[7] = axienet_stat(lp, STAT_RX_PFC_FRAMES);
2117		data[8] = axienet_stat(lp, STAT_USER_DEFINED0);
2118		data[9] = axienet_stat(lp, STAT_USER_DEFINED1);
2119		data[10] = axienet_stat(lp, STAT_USER_DEFINED2);
2120	} while (read_seqcount_retry(&lp->hw_stats_seqcount, start));
2121}
2122
2123static const char axienet_ethtool_stats_strings[][ETH_GSTRING_LEN] = {
2124	"Received bytes",
2125	"Transmitted bytes",
2126	"RX Good VLAN Tagged Frames",
2127	"TX Good VLAN Tagged Frames",
2128	"TX Good PFC Frames",
2129	"RX Good PFC Frames",
2130	"User Defined Counter 0",
2131	"User Defined Counter 1",
2132	"User Defined Counter 2",
2133};
2134
2135static void axienet_ethtools_get_strings(struct net_device *dev, u32 stringset, u8 *data)
2136{
2137	switch (stringset) {
2138	case ETH_SS_STATS:
2139		memcpy(data, axienet_ethtool_stats_strings,
2140		       sizeof(axienet_ethtool_stats_strings));
2141		break;
2142	}
2143}
2144
2145static int axienet_ethtools_get_sset_count(struct net_device *dev, int sset)
2146{
2147	struct axienet_local *lp = netdev_priv(dev);
2148
2149	switch (sset) {
2150	case ETH_SS_STATS:
2151		if (lp->features & XAE_FEATURE_STATS)
2152			return ARRAY_SIZE(axienet_ethtool_stats_strings);
2153		fallthrough;
2154	default:
2155		return -EOPNOTSUPP;
2156	}
2157}
2158
2159static void
2160axienet_ethtools_get_pause_stats(struct net_device *dev,
2161				 struct ethtool_pause_stats *pause_stats)
2162{
2163	struct axienet_local *lp = netdev_priv(dev);
2164	unsigned int start;
2165
2166	if (!(lp->features & XAE_FEATURE_STATS))
2167		return;
2168
2169	do {
2170		start = read_seqcount_begin(&lp->hw_stats_seqcount);
2171		pause_stats->tx_pause_frames =
2172			axienet_stat(lp, STAT_TX_PAUSE_FRAMES);
2173		pause_stats->rx_pause_frames =
2174			axienet_stat(lp, STAT_RX_PAUSE_FRAMES);
2175	} while (read_seqcount_retry(&lp->hw_stats_seqcount, start));
2176}
2177
2178static void
2179axienet_ethtool_get_eth_mac_stats(struct net_device *dev,
2180				  struct ethtool_eth_mac_stats *mac_stats)
2181{
2182	struct axienet_local *lp = netdev_priv(dev);
2183	unsigned int start;
2184
2185	if (!(lp->features & XAE_FEATURE_STATS))
2186		return;
2187
2188	do {
2189		start = read_seqcount_begin(&lp->hw_stats_seqcount);
2190		mac_stats->FramesTransmittedOK =
2191			axienet_stat(lp, STAT_TX_GOOD_FRAMES);
2192		mac_stats->SingleCollisionFrames =
2193			axienet_stat(lp, STAT_TX_SINGLE_COLLISION_FRAMES);
2194		mac_stats->MultipleCollisionFrames =
2195			axienet_stat(lp, STAT_TX_MULTIPLE_COLLISION_FRAMES);
2196		mac_stats->FramesReceivedOK =
2197			axienet_stat(lp, STAT_RX_GOOD_FRAMES);
2198		mac_stats->FrameCheckSequenceErrors =
2199			axienet_stat(lp, STAT_RX_FCS_ERRORS);
2200		mac_stats->AlignmentErrors =
2201			axienet_stat(lp, STAT_RX_ALIGNMENT_ERRORS);
2202		mac_stats->FramesWithDeferredXmissions =
2203			axienet_stat(lp, STAT_TX_DEFERRED_FRAMES);
2204		mac_stats->LateCollisions =
2205			axienet_stat(lp, STAT_TX_LATE_COLLISIONS);
2206		mac_stats->FramesAbortedDueToXSColls =
2207			axienet_stat(lp, STAT_TX_EXCESS_COLLISIONS);
2208		mac_stats->MulticastFramesXmittedOK =
2209			axienet_stat(lp, STAT_TX_MULTICAST_FRAMES);
2210		mac_stats->BroadcastFramesXmittedOK =
2211			axienet_stat(lp, STAT_TX_BROADCAST_FRAMES);
2212		mac_stats->FramesWithExcessiveDeferral =
2213			axienet_stat(lp, STAT_TX_EXCESS_DEFERRAL);
2214		mac_stats->MulticastFramesReceivedOK =
2215			axienet_stat(lp, STAT_RX_MULTICAST_FRAMES);
2216		mac_stats->BroadcastFramesReceivedOK =
2217			axienet_stat(lp, STAT_RX_BROADCAST_FRAMES);
2218		mac_stats->InRangeLengthErrors =
2219			axienet_stat(lp, STAT_RX_LENGTH_ERRORS);
2220	} while (read_seqcount_retry(&lp->hw_stats_seqcount, start));
2221}
2222
2223static void
2224axienet_ethtool_get_eth_ctrl_stats(struct net_device *dev,
2225				   struct ethtool_eth_ctrl_stats *ctrl_stats)
2226{
2227	struct axienet_local *lp = netdev_priv(dev);
2228	unsigned int start;
2229
2230	if (!(lp->features & XAE_FEATURE_STATS))
2231		return;
2232
2233	do {
2234		start = read_seqcount_begin(&lp->hw_stats_seqcount);
2235		ctrl_stats->MACControlFramesTransmitted =
2236			axienet_stat(lp, STAT_TX_CONTROL_FRAMES);
2237		ctrl_stats->MACControlFramesReceived =
2238			axienet_stat(lp, STAT_RX_CONTROL_FRAMES);
2239		ctrl_stats->UnsupportedOpcodesReceived =
2240			axienet_stat(lp, STAT_RX_CONTROL_OPCODE_ERRORS);
2241	} while (read_seqcount_retry(&lp->hw_stats_seqcount, start));
2242}
2243
2244static const struct ethtool_rmon_hist_range axienet_rmon_ranges[] = {
2245	{   64,    64 },
2246	{   65,   127 },
2247	{  128,   255 },
2248	{  256,   511 },
2249	{  512,  1023 },
2250	{ 1024,  1518 },
2251	{ 1519, 16384 },
2252	{ },
2253};
2254
2255static void
2256axienet_ethtool_get_rmon_stats(struct net_device *dev,
2257			       struct ethtool_rmon_stats *rmon_stats,
2258			       const struct ethtool_rmon_hist_range **ranges)
2259{
2260	struct axienet_local *lp = netdev_priv(dev);
2261	unsigned int start;
2262
2263	if (!(lp->features & XAE_FEATURE_STATS))
2264		return;
2265
2266	do {
2267		start = read_seqcount_begin(&lp->hw_stats_seqcount);
2268		rmon_stats->undersize_pkts =
2269			axienet_stat(lp, STAT_UNDERSIZE_FRAMES);
2270		rmon_stats->oversize_pkts =
2271			axienet_stat(lp, STAT_RX_OVERSIZE_FRAMES);
2272		rmon_stats->fragments =
2273			axienet_stat(lp, STAT_FRAGMENT_FRAMES);
2274
2275		rmon_stats->hist[0] =
2276			axienet_stat(lp, STAT_RX_64_BYTE_FRAMES);
2277		rmon_stats->hist[1] =
2278			axienet_stat(lp, STAT_RX_65_127_BYTE_FRAMES);
2279		rmon_stats->hist[2] =
2280			axienet_stat(lp, STAT_RX_128_255_BYTE_FRAMES);
2281		rmon_stats->hist[3] =
2282			axienet_stat(lp, STAT_RX_256_511_BYTE_FRAMES);
2283		rmon_stats->hist[4] =
2284			axienet_stat(lp, STAT_RX_512_1023_BYTE_FRAMES);
2285		rmon_stats->hist[5] =
2286			axienet_stat(lp, STAT_RX_1024_MAX_BYTE_FRAMES);
2287		rmon_stats->hist[6] =
2288			rmon_stats->oversize_pkts;
2289
2290		rmon_stats->hist_tx[0] =
2291			axienet_stat(lp, STAT_TX_64_BYTE_FRAMES);
2292		rmon_stats->hist_tx[1] =
2293			axienet_stat(lp, STAT_TX_65_127_BYTE_FRAMES);
2294		rmon_stats->hist_tx[2] =
2295			axienet_stat(lp, STAT_TX_128_255_BYTE_FRAMES);
2296		rmon_stats->hist_tx[3] =
2297			axienet_stat(lp, STAT_TX_256_511_BYTE_FRAMES);
2298		rmon_stats->hist_tx[4] =
2299			axienet_stat(lp, STAT_TX_512_1023_BYTE_FRAMES);
2300		rmon_stats->hist_tx[5] =
2301			axienet_stat(lp, STAT_TX_1024_MAX_BYTE_FRAMES);
2302		rmon_stats->hist_tx[6] =
2303			axienet_stat(lp, STAT_TX_OVERSIZE_FRAMES);
2304	} while (read_seqcount_retry(&lp->hw_stats_seqcount, start));
2305
2306	*ranges = axienet_rmon_ranges;
2307}
2308
2309static const struct ethtool_ops axienet_ethtool_ops = {
2310	.supported_coalesce_params = ETHTOOL_COALESCE_MAX_FRAMES |
2311				     ETHTOOL_COALESCE_USECS,
2312	.get_drvinfo    = axienet_ethtools_get_drvinfo,
2313	.get_regs_len   = axienet_ethtools_get_regs_len,
2314	.get_regs       = axienet_ethtools_get_regs,
2315	.get_link       = ethtool_op_get_link,
2316	.get_ringparam	= axienet_ethtools_get_ringparam,
2317	.set_ringparam	= axienet_ethtools_set_ringparam,
2318	.get_pauseparam = axienet_ethtools_get_pauseparam,
2319	.set_pauseparam = axienet_ethtools_set_pauseparam,
2320	.get_coalesce   = axienet_ethtools_get_coalesce,
2321	.set_coalesce   = axienet_ethtools_set_coalesce,
2322	.get_link_ksettings = axienet_ethtools_get_link_ksettings,
2323	.set_link_ksettings = axienet_ethtools_set_link_ksettings,
2324	.nway_reset	= axienet_ethtools_nway_reset,
2325	.get_ethtool_stats = axienet_ethtools_get_ethtool_stats,
2326	.get_strings    = axienet_ethtools_get_strings,
2327	.get_sset_count = axienet_ethtools_get_sset_count,
2328	.get_pause_stats = axienet_ethtools_get_pause_stats,
2329	.get_eth_mac_stats = axienet_ethtool_get_eth_mac_stats,
2330	.get_eth_ctrl_stats = axienet_ethtool_get_eth_ctrl_stats,
2331	.get_rmon_stats = axienet_ethtool_get_rmon_stats,
2332};
2333
2334static struct axienet_local *pcs_to_axienet_local(struct phylink_pcs *pcs)
2335{
2336	return container_of(pcs, struct axienet_local, pcs);
2337}
2338
2339static void axienet_pcs_get_state(struct phylink_pcs *pcs,
2340				  struct phylink_link_state *state)
2341{
2342	struct mdio_device *pcs_phy = pcs_to_axienet_local(pcs)->pcs_phy;
2343
2344	phylink_mii_c22_pcs_get_state(pcs_phy, state);
2345}
2346
2347static void axienet_pcs_an_restart(struct phylink_pcs *pcs)
2348{
2349	struct mdio_device *pcs_phy = pcs_to_axienet_local(pcs)->pcs_phy;
2350
2351	phylink_mii_c22_pcs_an_restart(pcs_phy);
2352}
2353
2354static int axienet_pcs_config(struct phylink_pcs *pcs, unsigned int neg_mode,
2355			      phy_interface_t interface,
2356			      const unsigned long *advertising,
2357			      bool permit_pause_to_mac)
2358{
2359	struct mdio_device *pcs_phy = pcs_to_axienet_local(pcs)->pcs_phy;
2360	struct net_device *ndev = pcs_to_axienet_local(pcs)->ndev;
2361	struct axienet_local *lp = netdev_priv(ndev);
2362	int ret;
2363
2364	if (lp->switch_x_sgmii) {
2365		ret = mdiodev_write(pcs_phy, XLNX_MII_STD_SELECT_REG,
2366				    interface == PHY_INTERFACE_MODE_SGMII ?
2367					XLNX_MII_STD_SELECT_SGMII : 0);
2368		if (ret < 0) {
2369			netdev_warn(ndev,
2370				    "Failed to switch PHY interface: %d\n",
2371				    ret);
2372			return ret;
2373		}
2374	}
2375
2376	ret = phylink_mii_c22_pcs_config(pcs_phy, interface, advertising,
2377					 neg_mode);
2378	if (ret < 0)
2379		netdev_warn(ndev, "Failed to configure PCS: %d\n", ret);
2380
2381	return ret;
2382}
2383
2384static const struct phylink_pcs_ops axienet_pcs_ops = {
2385	.pcs_get_state = axienet_pcs_get_state,
2386	.pcs_config = axienet_pcs_config,
2387	.pcs_an_restart = axienet_pcs_an_restart,
2388};
2389
2390static struct phylink_pcs *axienet_mac_select_pcs(struct phylink_config *config,
2391						  phy_interface_t interface)
2392{
2393	struct net_device *ndev = to_net_dev(config->dev);
2394	struct axienet_local *lp = netdev_priv(ndev);
2395
2396	if (interface == PHY_INTERFACE_MODE_1000BASEX ||
2397	    interface ==  PHY_INTERFACE_MODE_SGMII)
2398		return &lp->pcs;
2399
2400	return NULL;
2401}
2402
2403static void axienet_mac_config(struct phylink_config *config, unsigned int mode,
2404			       const struct phylink_link_state *state)
2405{
2406	/* nothing meaningful to do */
2407}
2408
2409static void axienet_mac_link_down(struct phylink_config *config,
2410				  unsigned int mode,
2411				  phy_interface_t interface)
2412{
2413	/* nothing meaningful to do */
2414}
2415
2416static void axienet_mac_link_up(struct phylink_config *config,
2417				struct phy_device *phy,
2418				unsigned int mode, phy_interface_t interface,
2419				int speed, int duplex,
2420				bool tx_pause, bool rx_pause)
2421{
2422	struct net_device *ndev = to_net_dev(config->dev);
2423	struct axienet_local *lp = netdev_priv(ndev);
2424	u32 emmc_reg, fcc_reg;
2425
2426	emmc_reg = axienet_ior(lp, XAE_EMMC_OFFSET);
2427	emmc_reg &= ~XAE_EMMC_LINKSPEED_MASK;
2428
2429	switch (speed) {
2430	case SPEED_1000:
2431		emmc_reg |= XAE_EMMC_LINKSPD_1000;
2432		break;
2433	case SPEED_100:
2434		emmc_reg |= XAE_EMMC_LINKSPD_100;
2435		break;
2436	case SPEED_10:
2437		emmc_reg |= XAE_EMMC_LINKSPD_10;
2438		break;
2439	default:
2440		dev_err(&ndev->dev,
2441			"Speed other than 10, 100 or 1Gbps is not supported\n");
2442		break;
2443	}
2444
2445	axienet_iow(lp, XAE_EMMC_OFFSET, emmc_reg);
2446
2447	fcc_reg = axienet_ior(lp, XAE_FCC_OFFSET);
2448	if (tx_pause)
2449		fcc_reg |= XAE_FCC_FCTX_MASK;
2450	else
2451		fcc_reg &= ~XAE_FCC_FCTX_MASK;
2452	if (rx_pause)
2453		fcc_reg |= XAE_FCC_FCRX_MASK;
2454	else
2455		fcc_reg &= ~XAE_FCC_FCRX_MASK;
2456	axienet_iow(lp, XAE_FCC_OFFSET, fcc_reg);
2457}
2458
2459static const struct phylink_mac_ops axienet_phylink_ops = {
2460	.mac_select_pcs = axienet_mac_select_pcs,
2461	.mac_config = axienet_mac_config,
2462	.mac_link_down = axienet_mac_link_down,
2463	.mac_link_up = axienet_mac_link_up,
2464};
2465
2466/**
2467 * axienet_dma_err_handler - Work queue task for Axi DMA Error
2468 * @work:	pointer to work_struct
2469 *
2470 * Resets the Axi DMA and Axi Ethernet devices, and reconfigures the
2471 * Tx/Rx BDs.
2472 */
2473static void axienet_dma_err_handler(struct work_struct *work)
2474{
2475	u32 i;
2476	u32 axienet_status;
2477	struct axidma_bd *cur_p;
2478	struct axienet_local *lp = container_of(work, struct axienet_local,
2479						dma_err_task);
2480	struct net_device *ndev = lp->ndev;
2481
2482	/* Don't bother if we are going to stop anyway */
2483	if (READ_ONCE(lp->stopping))
2484		return;
2485
2486	napi_disable(&lp->napi_tx);
2487	napi_disable(&lp->napi_rx);
2488
2489	axienet_setoptions(ndev, lp->options &
2490			   ~(XAE_OPTION_TXEN | XAE_OPTION_RXEN));
 
 
 
 
 
 
 
 
2491
2492	axienet_dma_stop(lp);
 
2493
2494	for (i = 0; i < lp->tx_bd_num; i++) {
2495		cur_p = &lp->tx_bd_v[i];
2496		if (cur_p->cntrl) {
2497			dma_addr_t addr = desc_get_phys_addr(lp, cur_p);
2498
2499			dma_unmap_single(lp->dev, addr,
 
 
 
2500					 (cur_p->cntrl &
2501					  XAXIDMA_BD_CTRL_LENGTH_MASK),
2502					 DMA_TO_DEVICE);
2503		}
2504		if (cur_p->skb)
2505			dev_kfree_skb_irq(cur_p->skb);
2506		cur_p->phys = 0;
2507		cur_p->phys_msb = 0;
2508		cur_p->cntrl = 0;
2509		cur_p->status = 0;
2510		cur_p->app0 = 0;
2511		cur_p->app1 = 0;
2512		cur_p->app2 = 0;
2513		cur_p->app3 = 0;
2514		cur_p->app4 = 0;
2515		cur_p->skb = NULL;
2516	}
2517
2518	for (i = 0; i < lp->rx_bd_num; i++) {
2519		cur_p = &lp->rx_bd_v[i];
2520		cur_p->status = 0;
2521		cur_p->app0 = 0;
2522		cur_p->app1 = 0;
2523		cur_p->app2 = 0;
2524		cur_p->app3 = 0;
2525		cur_p->app4 = 0;
2526	}
2527
2528	lp->tx_bd_ci = 0;
2529	lp->tx_bd_tail = 0;
2530	lp->rx_bd_ci = 0;
2531
2532	axienet_dma_start(lp);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2533
2534	axienet_status = axienet_ior(lp, XAE_RCW1_OFFSET);
2535	axienet_status &= ~XAE_RCW1_RX_MASK;
2536	axienet_iow(lp, XAE_RCW1_OFFSET, axienet_status);
2537
2538	axienet_status = axienet_ior(lp, XAE_IP_OFFSET);
2539	if (axienet_status & XAE_INT_RXRJECT_MASK)
2540		axienet_iow(lp, XAE_IS_OFFSET, XAE_INT_RXRJECT_MASK);
2541	axienet_iow(lp, XAE_IE_OFFSET, lp->eth_irq > 0 ?
2542		    XAE_INT_RECV_ERROR_MASK : 0);
2543	axienet_iow(lp, XAE_FCC_OFFSET, XAE_FCC_FCRX_MASK);
2544
2545	/* Sync default options with HW but leave receiver and
2546	 * transmitter disabled.
2547	 */
2548	axienet_setoptions(ndev, lp->options &
2549			   ~(XAE_OPTION_TXEN | XAE_OPTION_RXEN));
2550	axienet_set_mac_address(ndev, NULL);
2551	axienet_set_multicast_list(ndev);
2552	napi_enable(&lp->napi_rx);
2553	napi_enable(&lp->napi_tx);
2554	axienet_setoptions(ndev, lp->options);
2555}
2556
2557/**
2558 * axienet_probe - Axi Ethernet probe function.
2559 * @pdev:	Pointer to platform device structure.
 
2560 *
2561 * Return: 0, on success
2562 *	    Non-zero error value on failure.
2563 *
2564 * This is the probe routine for Axi Ethernet driver. This is called before
2565 * any other driver routines are invoked. It allocates and sets up the Ethernet
2566 * device. Parses through device tree and populates fields of
2567 * axienet_local. It registers the Ethernet device.
2568 */
2569static int axienet_probe(struct platform_device *pdev)
2570{
2571	int ret;
 
2572	struct device_node *np;
2573	struct axienet_local *lp;
2574	struct net_device *ndev;
2575	struct resource *ethres;
2576	u8 mac_addr[ETH_ALEN];
2577	int addr_width = 32;
2578	u32 value;
2579
2580	ndev = alloc_etherdev(sizeof(*lp));
2581	if (!ndev)
2582		return -ENOMEM;
2583
2584	platform_set_drvdata(pdev, ndev);
 
2585
2586	SET_NETDEV_DEV(ndev, &pdev->dev);
2587	ndev->features = NETIF_F_SG;
 
 
2588	ndev->ethtool_ops = &axienet_ethtool_ops;
2589
2590	/* MTU range: 64 - 9000 */
2591	ndev->min_mtu = 64;
2592	ndev->max_mtu = XAE_JUMBO_MTU;
2593
2594	lp = netdev_priv(ndev);
2595	lp->ndev = ndev;
2596	lp->dev = &pdev->dev;
2597	lp->options = XAE_OPTION_DEFAULTS;
2598	lp->rx_bd_num = RX_BD_NUM_DEFAULT;
2599	lp->tx_bd_num = TX_BD_NUM_DEFAULT;
2600
2601	u64_stats_init(&lp->rx_stat_sync);
2602	u64_stats_init(&lp->tx_stat_sync);
2603
2604	mutex_init(&lp->stats_lock);
2605	seqcount_mutex_init(&lp->hw_stats_seqcount, &lp->stats_lock);
2606	INIT_DEFERRABLE_WORK(&lp->stats_work, axienet_refresh_stats);
2607
2608	lp->axi_clk = devm_clk_get_optional(&pdev->dev, "s_axi_lite_clk");
2609	if (!lp->axi_clk) {
2610		/* For backward compatibility, if named AXI clock is not present,
2611		 * treat the first clock specified as the AXI clock.
2612		 */
2613		lp->axi_clk = devm_clk_get_optional(&pdev->dev, NULL);
2614	}
2615	if (IS_ERR(lp->axi_clk)) {
2616		ret = PTR_ERR(lp->axi_clk);
2617		goto free_netdev;
2618	}
2619	ret = clk_prepare_enable(lp->axi_clk);
2620	if (ret) {
2621		dev_err(&pdev->dev, "Unable to enable AXI clock: %d\n", ret);
2622		goto free_netdev;
2623	}
2624
2625	lp->misc_clks[0].id = "axis_clk";
2626	lp->misc_clks[1].id = "ref_clk";
2627	lp->misc_clks[2].id = "mgt_clk";
2628
2629	ret = devm_clk_bulk_get_optional(&pdev->dev, XAE_NUM_MISC_CLOCKS, lp->misc_clks);
2630	if (ret)
2631		goto cleanup_clk;
2632
2633	ret = clk_bulk_prepare_enable(XAE_NUM_MISC_CLOCKS, lp->misc_clks);
2634	if (ret)
2635		goto cleanup_clk;
2636
2637	/* Map device registers */
2638	lp->regs = devm_platform_get_and_ioremap_resource(pdev, 0, &ethres);
2639	if (IS_ERR(lp->regs)) {
2640		ret = PTR_ERR(lp->regs);
2641		goto cleanup_clk;
2642	}
2643	lp->regs_start = ethres->start;
2644
2645	/* Setup checksum offload, but default to off if not specified */
2646	lp->features = 0;
2647
2648	if (axienet_ior(lp, XAE_ABILITY_OFFSET) & XAE_ABILITY_STATS)
2649		lp->features |= XAE_FEATURE_STATS;
2650
2651	ret = of_property_read_u32(pdev->dev.of_node, "xlnx,txcsum", &value);
2652	if (!ret) {
2653		switch (value) {
2654		case 1:
 
 
2655			lp->features |= XAE_FEATURE_PARTIAL_TX_CSUM;
2656			/* Can checksum any contiguous range */
2657			ndev->features |= NETIF_F_HW_CSUM;
2658			break;
2659		case 2:
 
 
2660			lp->features |= XAE_FEATURE_FULL_TX_CSUM;
2661			/* Can checksum TCP/UDP over IPv4. */
2662			ndev->features |= NETIF_F_IP_CSUM;
2663			break;
 
 
2664		}
2665	}
2666	ret = of_property_read_u32(pdev->dev.of_node, "xlnx,rxcsum", &value);
2667	if (!ret) {
2668		switch (value) {
2669		case 1:
 
 
2670			lp->features |= XAE_FEATURE_PARTIAL_RX_CSUM;
2671			ndev->features |= NETIF_F_RXCSUM;
2672			break;
2673		case 2:
 
 
2674			lp->features |= XAE_FEATURE_FULL_RX_CSUM;
2675			ndev->features |= NETIF_F_RXCSUM;
2676			break;
 
 
2677		}
2678	}
2679	/* For supporting jumbo frames, the Axi Ethernet hardware must have
2680	 * a larger Rx/Tx Memory. Typically, the size must be large so that
2681	 * we can enable jumbo option and start supporting jumbo frames.
2682	 * Here we check for memory allocated for Rx/Tx in the hardware from
2683	 * the device-tree and accordingly set flags.
2684	 */
2685	of_property_read_u32(pdev->dev.of_node, "xlnx,rxmem", &lp->rxmem);
2686
2687	lp->switch_x_sgmii = of_property_read_bool(pdev->dev.of_node,
2688						   "xlnx,switch-x-sgmii");
2689
2690	/* Start with the proprietary, and broken phy_type */
2691	ret = of_property_read_u32(pdev->dev.of_node, "xlnx,phy-type", &value);
2692	if (!ret) {
2693		netdev_warn(ndev, "Please upgrade your device tree binary blob to use phy-mode");
2694		switch (value) {
2695		case XAE_PHY_TYPE_MII:
2696			lp->phy_mode = PHY_INTERFACE_MODE_MII;
2697			break;
2698		case XAE_PHY_TYPE_GMII:
2699			lp->phy_mode = PHY_INTERFACE_MODE_GMII;
2700			break;
2701		case XAE_PHY_TYPE_RGMII_2_0:
2702			lp->phy_mode = PHY_INTERFACE_MODE_RGMII_ID;
2703			break;
2704		case XAE_PHY_TYPE_SGMII:
2705			lp->phy_mode = PHY_INTERFACE_MODE_SGMII;
2706			break;
2707		case XAE_PHY_TYPE_1000BASE_X:
2708			lp->phy_mode = PHY_INTERFACE_MODE_1000BASEX;
2709			break;
2710		default:
2711			ret = -EINVAL;
2712			goto cleanup_clk;
2713		}
2714	} else {
2715		ret = of_get_phy_mode(pdev->dev.of_node, &lp->phy_mode);
2716		if (ret)
2717			goto cleanup_clk;
2718	}
2719	if (lp->switch_x_sgmii && lp->phy_mode != PHY_INTERFACE_MODE_SGMII &&
2720	    lp->phy_mode != PHY_INTERFACE_MODE_1000BASEX) {
2721		dev_err(&pdev->dev, "xlnx,switch-x-sgmii only supported with SGMII or 1000BaseX\n");
2722		ret = -EINVAL;
2723		goto cleanup_clk;
2724	}
2725
2726	if (!of_property_present(pdev->dev.of_node, "dmas")) {
2727		/* Find the DMA node, map the DMA registers, and decode the DMA IRQs */
2728		np = of_parse_phandle(pdev->dev.of_node, "axistream-connected", 0);
2729
2730		if (np) {
2731			struct resource dmares;
2732
2733			ret = of_address_to_resource(np, 0, &dmares);
2734			if (ret) {
2735				dev_err(&pdev->dev,
2736					"unable to get DMA resource\n");
2737				of_node_put(np);
2738				goto cleanup_clk;
2739			}
2740			lp->dma_regs = devm_ioremap_resource(&pdev->dev,
2741							     &dmares);
2742			lp->rx_irq = irq_of_parse_and_map(np, 1);
2743			lp->tx_irq = irq_of_parse_and_map(np, 0);
2744			of_node_put(np);
2745			lp->eth_irq = platform_get_irq_optional(pdev, 0);
2746		} else {
2747			/* Check for these resources directly on the Ethernet node. */
2748			lp->dma_regs = devm_platform_get_and_ioremap_resource(pdev, 1, NULL);
2749			lp->rx_irq = platform_get_irq(pdev, 1);
2750			lp->tx_irq = platform_get_irq(pdev, 0);
2751			lp->eth_irq = platform_get_irq_optional(pdev, 2);
2752		}
2753		if (IS_ERR(lp->dma_regs)) {
2754			dev_err(&pdev->dev, "could not map DMA regs\n");
2755			ret = PTR_ERR(lp->dma_regs);
2756			goto cleanup_clk;
2757		}
2758		if (lp->rx_irq <= 0 || lp->tx_irq <= 0) {
2759			dev_err(&pdev->dev, "could not determine irqs\n");
2760			ret = -ENOMEM;
2761			goto cleanup_clk;
2762		}
2763
2764		/* Reset core now that clocks are enabled, prior to accessing MDIO */
2765		ret = __axienet_device_reset(lp);
2766		if (ret)
2767			goto cleanup_clk;
2768
2769		/* Autodetect the need for 64-bit DMA pointers.
2770		 * When the IP is configured for a bus width bigger than 32 bits,
2771		 * writing the MSB registers is mandatory, even if they are all 0.
2772		 * We can detect this case by writing all 1's to one such register
2773		 * and see if that sticks: when the IP is configured for 32 bits
2774		 * only, those registers are RES0.
2775		 * Those MSB registers were introduced in IP v7.1, which we check first.
2776		 */
2777		if ((axienet_ior(lp, XAE_ID_OFFSET) >> 24) >= 0x9) {
2778			void __iomem *desc = lp->dma_regs + XAXIDMA_TX_CDESC_OFFSET + 4;
2779
2780			iowrite32(0x0, desc);
2781			if (ioread32(desc) == 0) {	/* sanity check */
2782				iowrite32(0xffffffff, desc);
2783				if (ioread32(desc) > 0) {
2784					lp->features |= XAE_FEATURE_DMA_64BIT;
2785					addr_width = 64;
2786					dev_info(&pdev->dev,
2787						 "autodetected 64-bit DMA range\n");
2788				}
2789				iowrite32(0x0, desc);
2790			}
2791		}
2792		if (!IS_ENABLED(CONFIG_64BIT) && lp->features & XAE_FEATURE_DMA_64BIT) {
2793			dev_err(&pdev->dev, "64-bit addressable DMA is not compatible with 32-bit archecture\n");
2794			ret = -EINVAL;
2795			goto cleanup_clk;
2796		}
2797
2798		ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(addr_width));
2799		if (ret) {
2800			dev_err(&pdev->dev, "No suitable DMA available\n");
2801			goto cleanup_clk;
2802		}
2803		netif_napi_add(ndev, &lp->napi_rx, axienet_rx_poll);
2804		netif_napi_add(ndev, &lp->napi_tx, axienet_tx_poll);
2805	} else {
2806		struct xilinx_vdma_config cfg;
2807		struct dma_chan *tx_chan;
2808
2809		lp->eth_irq = platform_get_irq_optional(pdev, 0);
2810		if (lp->eth_irq < 0 && lp->eth_irq != -ENXIO) {
2811			ret = lp->eth_irq;
2812			goto cleanup_clk;
2813		}
2814		tx_chan = dma_request_chan(lp->dev, "tx_chan0");
2815		if (IS_ERR(tx_chan)) {
2816			ret = PTR_ERR(tx_chan);
2817			dev_err_probe(lp->dev, ret, "No Ethernet DMA (TX) channel found\n");
2818			goto cleanup_clk;
2819		}
2820
2821		cfg.reset = 1;
2822		/* As name says VDMA but it has support for DMA channel reset */
2823		ret = xilinx_vdma_channel_set_config(tx_chan, &cfg);
2824		if (ret < 0) {
2825			dev_err(&pdev->dev, "Reset channel failed\n");
2826			dma_release_channel(tx_chan);
2827			goto cleanup_clk;
2828		}
2829
2830		dma_release_channel(tx_chan);
2831		lp->use_dmaengine = 1;
2832	}
2833
2834	if (lp->use_dmaengine)
2835		ndev->netdev_ops = &axienet_netdev_dmaengine_ops;
2836	else
2837		ndev->netdev_ops = &axienet_netdev_ops;
2838	/* Check for Ethernet core IRQ (optional) */
2839	if (lp->eth_irq <= 0)
2840		dev_info(&pdev->dev, "Ethernet core IRQ not defined\n");
2841
2842	/* Retrieve the MAC address */
2843	ret = of_get_mac_address(pdev->dev.of_node, mac_addr);
2844	if (!ret) {
2845		axienet_set_mac_address(ndev, mac_addr);
2846	} else {
2847		dev_warn(&pdev->dev, "could not find MAC address property: %d\n",
2848			 ret);
2849		axienet_set_mac_address(ndev, NULL);
2850	}
 
2851
2852	lp->coalesce_count_rx = XAXIDMA_DFT_RX_THRESHOLD;
2853	lp->coalesce_count_tx = XAXIDMA_DFT_TX_THRESHOLD;
2854	lp->coalesce_usec_rx = XAXIDMA_DFT_RX_USEC;
2855	lp->coalesce_usec_tx = XAXIDMA_DFT_TX_USEC;
2856
2857	ret = axienet_mdio_setup(lp);
 
2858	if (ret)
2859		dev_warn(&pdev->dev,
2860			 "error registering MDIO bus: %d\n", ret);
2861
2862	if (lp->phy_mode == PHY_INTERFACE_MODE_SGMII ||
2863	    lp->phy_mode == PHY_INTERFACE_MODE_1000BASEX) {
2864		np = of_parse_phandle(pdev->dev.of_node, "pcs-handle", 0);
2865		if (!np) {
2866			/* Deprecated: Always use "pcs-handle" for pcs_phy.
2867			 * Falling back to "phy-handle" here is only for
2868			 * backward compatibility with old device trees.
2869			 */
2870			np = of_parse_phandle(pdev->dev.of_node, "phy-handle", 0);
2871		}
2872		if (!np) {
2873			dev_err(&pdev->dev, "pcs-handle (preferred) or phy-handle required for 1000BaseX/SGMII\n");
2874			ret = -EINVAL;
2875			goto cleanup_mdio;
2876		}
2877		lp->pcs_phy = of_mdio_find_device(np);
2878		if (!lp->pcs_phy) {
2879			ret = -EPROBE_DEFER;
2880			of_node_put(np);
2881			goto cleanup_mdio;
2882		}
2883		of_node_put(np);
2884		lp->pcs.ops = &axienet_pcs_ops;
2885		lp->pcs.neg_mode = true;
2886		lp->pcs.poll = true;
2887	}
2888
2889	lp->phylink_config.dev = &ndev->dev;
2890	lp->phylink_config.type = PHYLINK_NETDEV;
2891	lp->phylink_config.mac_managed_pm = true;
2892	lp->phylink_config.mac_capabilities = MAC_SYM_PAUSE | MAC_ASYM_PAUSE |
2893		MAC_10FD | MAC_100FD | MAC_1000FD;
2894
2895	__set_bit(lp->phy_mode, lp->phylink_config.supported_interfaces);
2896	if (lp->switch_x_sgmii) {
2897		__set_bit(PHY_INTERFACE_MODE_1000BASEX,
2898			  lp->phylink_config.supported_interfaces);
2899		__set_bit(PHY_INTERFACE_MODE_SGMII,
2900			  lp->phylink_config.supported_interfaces);
2901	}
2902
2903	lp->phylink = phylink_create(&lp->phylink_config, pdev->dev.fwnode,
2904				     lp->phy_mode,
2905				     &axienet_phylink_ops);
2906	if (IS_ERR(lp->phylink)) {
2907		ret = PTR_ERR(lp->phylink);
2908		dev_err(&pdev->dev, "phylink_create error (%i)\n", ret);
2909		goto cleanup_mdio;
2910	}
2911
2912	ret = register_netdev(lp->ndev);
2913	if (ret) {
2914		dev_err(lp->dev, "register_netdev() error (%i)\n", ret);
2915		goto cleanup_phylink;
2916	}
2917
2918	return 0;
2919
2920cleanup_phylink:
2921	phylink_destroy(lp->phylink);
2922
2923cleanup_mdio:
2924	if (lp->pcs_phy)
2925		put_device(&lp->pcs_phy->dev);
2926	if (lp->mii_bus)
2927		axienet_mdio_teardown(lp);
2928cleanup_clk:
2929	clk_bulk_disable_unprepare(XAE_NUM_MISC_CLOCKS, lp->misc_clks);
2930	clk_disable_unprepare(lp->axi_clk);
2931
2932free_netdev:
 
 
 
 
 
2933	free_netdev(ndev);
2934
2935	return ret;
2936}
2937
2938static void axienet_remove(struct platform_device *pdev)
2939{
2940	struct net_device *ndev = platform_get_drvdata(pdev);
2941	struct axienet_local *lp = netdev_priv(ndev);
2942
2943	unregister_netdev(ndev);
2944
2945	if (lp->phylink)
2946		phylink_destroy(lp->phylink);
2947
2948	if (lp->pcs_phy)
2949		put_device(&lp->pcs_phy->dev);
2950
2951	axienet_mdio_teardown(lp);
 
2952
2953	clk_bulk_disable_unprepare(XAE_NUM_MISC_CLOCKS, lp->misc_clks);
2954	clk_disable_unprepare(lp->axi_clk);
2955
 
 
 
 
 
 
2956	free_netdev(ndev);
2957}
2958
2959static void axienet_shutdown(struct platform_device *pdev)
2960{
2961	struct net_device *ndev = platform_get_drvdata(pdev);
2962
2963	rtnl_lock();
2964	netif_device_detach(ndev);
2965
2966	if (netif_running(ndev))
2967		dev_close(ndev);
2968
2969	rtnl_unlock();
2970}
2971
2972static int axienet_suspend(struct device *dev)
2973{
2974	struct net_device *ndev = dev_get_drvdata(dev);
2975
2976	if (!netif_running(ndev))
2977		return 0;
2978
2979	netif_device_detach(ndev);
2980
2981	rtnl_lock();
2982	axienet_stop(ndev);
2983	rtnl_unlock();
2984
2985	return 0;
2986}
2987
2988static int axienet_resume(struct device *dev)
2989{
2990	struct net_device *ndev = dev_get_drvdata(dev);
2991
2992	if (!netif_running(ndev))
2993		return 0;
2994
2995	rtnl_lock();
2996	axienet_open(ndev);
2997	rtnl_unlock();
2998
2999	netif_device_attach(ndev);
3000
3001	return 0;
3002}
3003
3004static DEFINE_SIMPLE_DEV_PM_OPS(axienet_pm_ops,
3005				axienet_suspend, axienet_resume);
3006
3007static struct platform_driver axienet_driver = {
3008	.probe = axienet_probe,
3009	.remove = axienet_remove,
3010	.shutdown = axienet_shutdown,
3011	.driver = {
 
3012		 .name = "xilinx_axienet",
3013		 .pm = &axienet_pm_ops,
3014		 .of_match_table = axienet_of_match,
3015	},
3016};
3017
3018module_platform_driver(axienet_driver);
3019
3020MODULE_DESCRIPTION("Xilinx Axi Ethernet driver");
3021MODULE_AUTHOR("Xilinx");
3022MODULE_LICENSE("GPL");
v3.5.6
 
   1/*
   2 * Xilinx Axi Ethernet device driver
   3 *
   4 * Copyright (c) 2008 Nissin Systems Co., Ltd.,  Yoshio Kashiwagi
   5 * Copyright (c) 2005-2008 DLA Systems,  David H. Lynch Jr. <dhlii@dlasys.net>
   6 * Copyright (c) 2008-2009 Secret Lab Technologies Ltd.
   7 * Copyright (c) 2010 - 2011 Michal Simek <monstr@monstr.eu>
   8 * Copyright (c) 2010 - 2011 PetaLogix
 
   9 * Copyright (c) 2010 - 2012 Xilinx, Inc. All rights reserved.
  10 *
  11 * This is a driver for the Xilinx Axi Ethernet which is used in the Virtex6
  12 * and Spartan6.
  13 *
  14 * TODO:
  15 *  - Add Axi Fifo support.
  16 *  - Factor out Axi DMA code into separate driver.
  17 *  - Test and fix basic multicast filtering.
  18 *  - Add support for extended multicast filtering.
  19 *  - Test basic VLAN support.
  20 *  - Add support for extended VLAN support.
  21 */
  22
 
  23#include <linux/delay.h>
  24#include <linux/etherdevice.h>
  25#include <linux/init.h>
  26#include <linux/module.h>
  27#include <linux/netdevice.h>
 
  28#include <linux/of_mdio.h>
  29#include <linux/of_platform.h>
 
  30#include <linux/of_address.h>
 
  31#include <linux/skbuff.h>
  32#include <linux/spinlock.h>
  33#include <linux/phy.h>
  34#include <linux/mii.h>
  35#include <linux/ethtool.h>
 
 
 
 
 
  36
  37#include "xilinx_axienet.h"
  38
  39/* Descriptors defines for Tx and Rx DMA - 2^n for the best performance */
  40#define TX_BD_NUM		64
  41#define RX_BD_NUM		128
 
 
 
 
 
 
  42
  43/* Must be shorter than length of ethtool_drvinfo.driver field to fit */
  44#define DRIVER_NAME		"xaxienet"
  45#define DRIVER_DESCRIPTION	"Xilinx Axi Ethernet driver"
  46#define DRIVER_VERSION		"1.00a"
  47
  48#define AXIENET_REGS_N		32
 
 
  49
  50/* Match table for of_platform binding */
  51static struct of_device_id axienet_of_match[] __devinitdata = {
  52	{ .compatible = "xlnx,axi-ethernet-1.00.a", },
  53	{ .compatible = "xlnx,axi-ethernet-1.01.a", },
  54	{ .compatible = "xlnx,axi-ethernet-2.01.a", },
  55	{},
  56};
  57
  58MODULE_DEVICE_TABLE(of, axienet_of_match);
  59
  60/* Option table for setting up Axi Ethernet hardware options */
  61static struct axienet_option axienet_options[] = {
  62	/* Turn on jumbo packet support for both Rx and Tx */
  63	{
  64		.opt = XAE_OPTION_JUMBO,
  65		.reg = XAE_TC_OFFSET,
  66		.m_or = XAE_TC_JUM_MASK,
  67	}, {
  68		.opt = XAE_OPTION_JUMBO,
  69		.reg = XAE_RCW1_OFFSET,
  70		.m_or = XAE_RCW1_JUM_MASK,
  71	}, { /* Turn on VLAN packet support for both Rx and Tx */
  72		.opt = XAE_OPTION_VLAN,
  73		.reg = XAE_TC_OFFSET,
  74		.m_or = XAE_TC_VLAN_MASK,
  75	}, {
  76		.opt = XAE_OPTION_VLAN,
  77		.reg = XAE_RCW1_OFFSET,
  78		.m_or = XAE_RCW1_VLAN_MASK,
  79	}, { /* Turn on FCS stripping on receive packets */
  80		.opt = XAE_OPTION_FCS_STRIP,
  81		.reg = XAE_RCW1_OFFSET,
  82		.m_or = XAE_RCW1_FCS_MASK,
  83	}, { /* Turn on FCS insertion on transmit packets */
  84		.opt = XAE_OPTION_FCS_INSERT,
  85		.reg = XAE_TC_OFFSET,
  86		.m_or = XAE_TC_FCS_MASK,
  87	}, { /* Turn off length/type field checking on receive packets */
  88		.opt = XAE_OPTION_LENTYPE_ERR,
  89		.reg = XAE_RCW1_OFFSET,
  90		.m_or = XAE_RCW1_LT_DIS_MASK,
  91	}, { /* Turn on Rx flow control */
  92		.opt = XAE_OPTION_FLOW_CONTROL,
  93		.reg = XAE_FCC_OFFSET,
  94		.m_or = XAE_FCC_FCRX_MASK,
  95	}, { /* Turn on Tx flow control */
  96		.opt = XAE_OPTION_FLOW_CONTROL,
  97		.reg = XAE_FCC_OFFSET,
  98		.m_or = XAE_FCC_FCTX_MASK,
  99	}, { /* Turn on promiscuous frame filtering */
 100		.opt = XAE_OPTION_PROMISC,
 101		.reg = XAE_FMI_OFFSET,
 102		.m_or = XAE_FMI_PM_MASK,
 103	}, { /* Enable transmitter */
 104		.opt = XAE_OPTION_TXEN,
 105		.reg = XAE_TC_OFFSET,
 106		.m_or = XAE_TC_TX_MASK,
 107	}, { /* Enable receiver */
 108		.opt = XAE_OPTION_RXEN,
 109		.reg = XAE_RCW1_OFFSET,
 110		.m_or = XAE_RCW1_RX_MASK,
 111	},
 112	{}
 113};
 114
 
 
 
 
 
 
 
 
 
 
 115/**
 116 * axienet_dma_in32 - Memory mapped Axi DMA register read
 117 * @lp:		Pointer to axienet local structure
 118 * @reg:	Address offset from the base address of the Axi DMA core
 119 *
 120 * returns: The contents of the Axi DMA register
 121 *
 122 * This function returns the contents of the corresponding Axi DMA register.
 123 */
 124static inline u32 axienet_dma_in32(struct axienet_local *lp, off_t reg)
 125{
 126	return in_be32(lp->dma_regs + reg);
 127}
 128
 129/**
 130 * axienet_dma_out32 - Memory mapped Axi DMA register write.
 131 * @lp:		Pointer to axienet local structure
 132 * @reg:	Address offset from the base address of the Axi DMA core
 133 * @value:	Value to be written into the Axi DMA register
 134 *
 135 * This function writes the desired value into the corresponding Axi DMA
 136 * register.
 137 */
 138static inline void axienet_dma_out32(struct axienet_local *lp,
 139				     off_t reg, u32 value)
 140{
 141	out_be32((lp->dma_regs + reg), value);
 
 
 
 
 
 142}
 143
 144/**
 145 * axienet_dma_bd_release - Release buffer descriptor rings
 146 * @ndev:	Pointer to the net_device structure
 147 *
 148 * This function is used to release the descriptors allocated in
 149 * axienet_dma_bd_init. axienet_dma_bd_release is called when Axi Ethernet
 150 * driver stop api is called.
 151 */
 152static void axienet_dma_bd_release(struct net_device *ndev)
 153{
 154	int i;
 155	struct axienet_local *lp = netdev_priv(ndev);
 156
 157	for (i = 0; i < RX_BD_NUM; i++) {
 158		dma_unmap_single(ndev->dev.parent, lp->rx_bd_v[i].phys,
 159				 lp->max_frm_size, DMA_FROM_DEVICE);
 160		dev_kfree_skb((struct sk_buff *)
 161			      (lp->rx_bd_v[i].sw_id_offset));
 162	}
 163
 164	if (lp->rx_bd_v) {
 165		dma_free_coherent(ndev->dev.parent,
 166				  sizeof(*lp->rx_bd_v) * RX_BD_NUM,
 167				  lp->rx_bd_v,
 168				  lp->rx_bd_p);
 169	}
 170	if (lp->tx_bd_v) {
 171		dma_free_coherent(ndev->dev.parent,
 172				  sizeof(*lp->tx_bd_v) * TX_BD_NUM,
 173				  lp->tx_bd_v,
 174				  lp->tx_bd_p);
 
 
 
 
 
 
 
 
 
 
 
 175	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 176}
 177
 178/**
 179 * axienet_dma_bd_init - Setup buffer descriptor rings for Axi DMA
 180 * @ndev:	Pointer to the net_device structure
 181 *
 182 * returns: 0, on success
 183 *	    -ENOMEM, on failure
 184 *
 185 * This function is called to initialize the Rx and Tx DMA descriptor
 186 * rings. This initializes the descriptors with required default values
 187 * and is called when Axi Ethernet driver reset is called.
 188 */
 189static int axienet_dma_bd_init(struct net_device *ndev)
 190{
 191	u32 cr;
 192	int i;
 193	struct sk_buff *skb;
 194	struct axienet_local *lp = netdev_priv(ndev);
 195
 196	/* Reset the indexes which are used for accessing the BDs */
 197	lp->tx_bd_ci = 0;
 198	lp->tx_bd_tail = 0;
 199	lp->rx_bd_ci = 0;
 200
 201	/*
 202	 * Allocate the Tx and Rx buffer descriptors.
 203	 */
 204	lp->tx_bd_v = dma_alloc_coherent(ndev->dev.parent,
 205					 sizeof(*lp->tx_bd_v) * TX_BD_NUM,
 206					 &lp->tx_bd_p,
 207					 GFP_KERNEL);
 208	if (!lp->tx_bd_v) {
 209		dev_err(&ndev->dev, "unable to allocate DMA Tx buffer "
 210			"descriptors");
 211		goto out;
 212	}
 213
 214	lp->rx_bd_v = dma_alloc_coherent(ndev->dev.parent,
 215					 sizeof(*lp->rx_bd_v) * RX_BD_NUM,
 216					 &lp->rx_bd_p,
 217					 GFP_KERNEL);
 218	if (!lp->rx_bd_v) {
 219		dev_err(&ndev->dev, "unable to allocate DMA Rx buffer "
 220			"descriptors");
 221		goto out;
 222	}
 223
 224	memset(lp->tx_bd_v, 0, sizeof(*lp->tx_bd_v) * TX_BD_NUM);
 225	for (i = 0; i < TX_BD_NUM; i++) {
 226		lp->tx_bd_v[i].next = lp->tx_bd_p +
 227				      sizeof(*lp->tx_bd_v) *
 228				      ((i + 1) % TX_BD_NUM);
 229	}
 
 
 
 
 
 
 
 
 
 
 
 
 230
 231	memset(lp->rx_bd_v, 0, sizeof(*lp->rx_bd_v) * RX_BD_NUM);
 232	for (i = 0; i < RX_BD_NUM; i++) {
 233		lp->rx_bd_v[i].next = lp->rx_bd_p +
 234				      sizeof(*lp->rx_bd_v) *
 235				      ((i + 1) % RX_BD_NUM);
 236
 237		skb = netdev_alloc_skb_ip_align(ndev, lp->max_frm_size);
 238		if (!skb) {
 239			dev_err(&ndev->dev, "alloc_skb error %d\n", i);
 
 
 240			goto out;
 241		}
 
 242
 243		lp->rx_bd_v[i].sw_id_offset = (u32) skb;
 244		lp->rx_bd_v[i].phys = dma_map_single(ndev->dev.parent,
 245						     skb->data,
 246						     lp->max_frm_size,
 247						     DMA_FROM_DEVICE);
 248		lp->rx_bd_v[i].cntrl = lp->max_frm_size;
 249	}
 250
 251	/* Start updating the Rx channel control register */
 252	cr = axienet_dma_in32(lp, XAXIDMA_RX_CR_OFFSET);
 253	/* Update the interrupt coalesce count */
 254	cr = ((cr & ~XAXIDMA_COALESCE_MASK) |
 255	      ((lp->coalesce_count_rx) << XAXIDMA_COALESCE_SHIFT));
 256	/* Update the delay timer count */
 257	cr = ((cr & ~XAXIDMA_DELAY_MASK) |
 258	      (XAXIDMA_DFT_RX_WAITBOUND << XAXIDMA_DELAY_SHIFT));
 259	/* Enable coalesce, delay timer and error interrupts */
 260	cr |= XAXIDMA_IRQ_ALL_MASK;
 261	/* Write to the Rx channel control register */
 262	axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, cr);
 263
 264	/* Start updating the Tx channel control register */
 265	cr = axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET);
 266	/* Update the interrupt coalesce count */
 267	cr = (((cr & ~XAXIDMA_COALESCE_MASK)) |
 268	      ((lp->coalesce_count_tx) << XAXIDMA_COALESCE_SHIFT));
 269	/* Update the delay timer count */
 270	cr = (((cr & ~XAXIDMA_DELAY_MASK)) |
 271	      (XAXIDMA_DFT_TX_WAITBOUND << XAXIDMA_DELAY_SHIFT));
 272	/* Enable coalesce, delay timer and error interrupts */
 273	cr |= XAXIDMA_IRQ_ALL_MASK;
 274	/* Write to the Tx channel control register */
 275	axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, cr);
 276
 277	/* Populate the tail pointer and bring the Rx Axi DMA engine out of
 278	 * halted state. This will make the Rx side ready for reception.*/
 279	axienet_dma_out32(lp, XAXIDMA_RX_CDESC_OFFSET, lp->rx_bd_p);
 280	cr = axienet_dma_in32(lp, XAXIDMA_RX_CR_OFFSET);
 281	axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET,
 282			  cr | XAXIDMA_CR_RUNSTOP_MASK);
 283	axienet_dma_out32(lp, XAXIDMA_RX_TDESC_OFFSET, lp->rx_bd_p +
 284			  (sizeof(*lp->rx_bd_v) * (RX_BD_NUM - 1)));
 285
 286	/* Write to the RS (Run-stop) bit in the Tx channel control register.
 287	 * Tx channel is now ready to run. But only after we write to the
 288	 * tail pointer register that the Tx channel will start transmitting */
 289	axienet_dma_out32(lp, XAXIDMA_TX_CDESC_OFFSET, lp->tx_bd_p);
 290	cr = axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET);
 291	axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET,
 292			  cr | XAXIDMA_CR_RUNSTOP_MASK);
 293
 294	return 0;
 295out:
 296	axienet_dma_bd_release(ndev);
 297	return -ENOMEM;
 298}
 299
 300/**
 301 * axienet_set_mac_address - Write the MAC address
 302 * @ndev:	Pointer to the net_device structure
 303 * @address:	6 byte Address to be written as MAC address
 304 *
 305 * This function is called to initialize the MAC address of the Axi Ethernet
 306 * core. It writes to the UAW0 and UAW1 registers of the core.
 307 */
 308static void axienet_set_mac_address(struct net_device *ndev, void *address)
 
 309{
 310	struct axienet_local *lp = netdev_priv(ndev);
 311
 312	if (address)
 313		memcpy(ndev->dev_addr, address, ETH_ALEN);
 314	if (!is_valid_ether_addr(ndev->dev_addr))
 315		random_ether_addr(ndev->dev_addr);
 316
 317	/* Set up unicast MAC address filter set its mac address */
 318	axienet_iow(lp, XAE_UAW0_OFFSET,
 319		    (ndev->dev_addr[0]) |
 320		    (ndev->dev_addr[1] << 8) |
 321		    (ndev->dev_addr[2] << 16) |
 322		    (ndev->dev_addr[3] << 24));
 323	axienet_iow(lp, XAE_UAW1_OFFSET,
 324		    (((axienet_ior(lp, XAE_UAW1_OFFSET)) &
 325		      ~XAE_UAW1_UNICASTADDR_MASK) |
 326		     (ndev->dev_addr[4] |
 327		     (ndev->dev_addr[5] << 8))));
 328}
 329
 330/**
 331 * netdev_set_mac_address - Write the MAC address (from outside the driver)
 332 * @ndev:	Pointer to the net_device structure
 333 * @p:		6 byte Address to be written as MAC address
 334 *
 335 * returns: 0 for all conditions. Presently, there is no failure case.
 336 *
 337 * This function is called to initialize the MAC address of the Axi Ethernet
 338 * core. It calls the core specific axienet_set_mac_address. This is the
 339 * function that goes into net_device_ops structure entry ndo_set_mac_address.
 340 */
 341static int netdev_set_mac_address(struct net_device *ndev, void *p)
 342{
 343	struct sockaddr *addr = p;
 
 344	axienet_set_mac_address(ndev, addr->sa_data);
 345	return 0;
 346}
 347
 348/**
 349 * axienet_set_multicast_list - Prepare the multicast table
 350 * @ndev:	Pointer to the net_device structure
 351 *
 352 * This function is called to initialize the multicast table during
 353 * initialization. The Axi Ethernet basic multicast support has a four-entry
 354 * multicast table which is initialized here. Additionally this function
 355 * goes into the net_device_ops structure entry ndo_set_multicast_list. This
 356 * means whenever the multicast table entries need to be updated this
 357 * function gets called.
 358 */
 359static void axienet_set_multicast_list(struct net_device *ndev)
 360{
 361	int i;
 362	u32 reg, af0reg, af1reg;
 363	struct axienet_local *lp = netdev_priv(ndev);
 364
 365	if (ndev->flags & (IFF_ALLMULTI | IFF_PROMISC) ||
 
 
 
 
 
 
 
 
 366	    netdev_mc_count(ndev) > XAE_MULTICAST_CAM_TABLE_NUM) {
 367		/* We must make the kernel realize we had to move into
 368		 * promiscuous mode. If it was a promiscuous mode request
 369		 * the flag is already set. If not we set it. */
 370		ndev->flags |= IFF_PROMISC;
 371		reg = axienet_ior(lp, XAE_FMI_OFFSET);
 372		reg |= XAE_FMI_PM_MASK;
 373		axienet_iow(lp, XAE_FMI_OFFSET, reg);
 374		dev_info(&ndev->dev, "Promiscuous mode enabled.\n");
 
 
 
 
 
 375	} else if (!netdev_mc_empty(ndev)) {
 376		struct netdev_hw_addr *ha;
 377
 378		i = 0;
 379		netdev_for_each_mc_addr(ha, ndev) {
 380			if (i >= XAE_MULTICAST_CAM_TABLE_NUM)
 381				break;
 382
 383			af0reg = (ha->addr[0]);
 384			af0reg |= (ha->addr[1] << 8);
 385			af0reg |= (ha->addr[2] << 16);
 386			af0reg |= (ha->addr[3] << 24);
 387
 388			af1reg = (ha->addr[4]);
 389			af1reg |= (ha->addr[5] << 8);
 390
 391			reg = axienet_ior(lp, XAE_FMI_OFFSET) & 0xFFFFFF00;
 392			reg |= i;
 393
 394			axienet_iow(lp, XAE_FMI_OFFSET, reg);
 395			axienet_iow(lp, XAE_AF0_OFFSET, af0reg);
 396			axienet_iow(lp, XAE_AF1_OFFSET, af1reg);
 
 
 
 397			i++;
 398		}
 399	} else {
 400		reg = axienet_ior(lp, XAE_FMI_OFFSET);
 401		reg &= ~XAE_FMI_PM_MASK;
 402
 
 
 
 403		axienet_iow(lp, XAE_FMI_OFFSET, reg);
 404
 405		for (i = 0; i < XAE_MULTICAST_CAM_TABLE_NUM; i++) {
 406			reg = axienet_ior(lp, XAE_FMI_OFFSET) & 0xFFFFFF00;
 407			reg |= i;
 408
 409			axienet_iow(lp, XAE_FMI_OFFSET, reg);
 410			axienet_iow(lp, XAE_AF0_OFFSET, 0);
 411			axienet_iow(lp, XAE_AF1_OFFSET, 0);
 412		}
 413
 414		dev_info(&ndev->dev, "Promiscuous mode disabled.\n");
 415	}
 416}
 417
 418/**
 419 * axienet_setoptions - Set an Axi Ethernet option
 420 * @ndev:	Pointer to the net_device structure
 421 * @options:	Option to be enabled/disabled
 422 *
 423 * The Axi Ethernet core has multiple features which can be selectively turned
 424 * on or off. The typical options could be jumbo frame option, basic VLAN
 425 * option, promiscuous mode option etc. This function is used to set or clear
 426 * these options in the Axi Ethernet hardware. This is done through
 427 * axienet_option structure .
 428 */
 429static void axienet_setoptions(struct net_device *ndev, u32 options)
 430{
 431	int reg;
 432	struct axienet_local *lp = netdev_priv(ndev);
 433	struct axienet_option *tp = &axienet_options[0];
 434
 435	while (tp->opt) {
 436		reg = ((axienet_ior(lp, tp->reg)) & ~(tp->m_or));
 437		if (options & tp->opt)
 438			reg |= tp->m_or;
 439		axienet_iow(lp, tp->reg, reg);
 440		tp++;
 441	}
 442
 443	lp->options |= options;
 444}
 445
 446static void __axienet_device_reset(struct axienet_local *lp,
 447				   struct device *dev, off_t offset)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 448{
 449	u32 timeout;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 450	/* Reset Axi DMA. This would reset Axi Ethernet core as well. The reset
 451	 * process of Axi DMA takes a while to complete as all pending
 452	 * commands/transfers will be flushed or completed during this
 453	 * reset process. */
 454	axienet_dma_out32(lp, offset, XAXIDMA_CR_RESET_MASK);
 455	timeout = DELAY_OF_ONE_MILLISEC;
 456	while (axienet_dma_in32(lp, offset) & XAXIDMA_CR_RESET_MASK) {
 457		udelay(1);
 458		if (--timeout == 0) {
 459			dev_err(dev, "axienet_device_reset DMA "
 460				"reset timeout!\n");
 461			break;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 462		}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 463	}
 
 
 
 
 
 464}
 465
 466/**
 467 * axienet_device_reset - Reset and initialize the Axi Ethernet hardware.
 468 * @ndev:	Pointer to the net_device structure
 469 *
 470 * This function is called to reset and initialize the Axi Ethernet core. This
 471 * is typically called during initialization. It does a reset of the Axi DMA
 472 * Rx/Tx channels and initializes the Axi DMA BDs. Since Axi DMA reset lines
 473 * areconnected to Axi Ethernet reset lines, this in turn resets the Axi
 474 * Ethernet core. No separate hardware reset is done for the Axi Ethernet
 475 * core.
 
 476 */
 477static void axienet_device_reset(struct net_device *ndev)
 478{
 479	u32 axienet_status;
 480	struct axienet_local *lp = netdev_priv(ndev);
 481
 482	__axienet_device_reset(lp, &ndev->dev, XAXIDMA_TX_CR_OFFSET);
 483	__axienet_device_reset(lp, &ndev->dev, XAXIDMA_RX_CR_OFFSET);
 484
 485	lp->max_frm_size = XAE_MAX_VLAN_FRAME_SIZE;
 
 486	lp->options &= (~XAE_OPTION_JUMBO);
 487
 488	if ((ndev->mtu > XAE_MTU) &&
 489	    (ndev->mtu <= XAE_JUMBO_MTU) &&
 490	    (lp->jumbo_support)) {
 491		lp->max_frm_size = ndev->mtu + XAE_HDR_VLAN_SIZE +
 492				   XAE_TRL_SIZE;
 493		lp->options |= XAE_OPTION_JUMBO;
 494	}
 495
 496	if (axienet_dma_bd_init(ndev)) {
 497		dev_err(&ndev->dev, "axienet_device_reset descriptor "
 498			"allocation failed\n");
 
 
 
 
 
 
 
 
 499	}
 500
 501	axienet_status = axienet_ior(lp, XAE_RCW1_OFFSET);
 502	axienet_status &= ~XAE_RCW1_RX_MASK;
 503	axienet_iow(lp, XAE_RCW1_OFFSET, axienet_status);
 504
 505	axienet_status = axienet_ior(lp, XAE_IP_OFFSET);
 506	if (axienet_status & XAE_INT_RXRJECT_MASK)
 507		axienet_iow(lp, XAE_IS_OFFSET, XAE_INT_RXRJECT_MASK);
 
 
 508
 509	axienet_iow(lp, XAE_FCC_OFFSET, XAE_FCC_FCRX_MASK);
 510
 511	/* Sync default options with HW but leave receiver and
 512	 * transmitter disabled.*/
 
 513	axienet_setoptions(ndev, lp->options &
 514			   ~(XAE_OPTION_TXEN | XAE_OPTION_RXEN));
 515	axienet_set_mac_address(ndev, NULL);
 516	axienet_set_multicast_list(ndev);
 517	axienet_setoptions(ndev, lp->options);
 518
 519	ndev->trans_start = jiffies;
 520}
 521
 522/**
 523 * axienet_adjust_link - Adjust the PHY link speed/duplex.
 524 * @ndev:	Pointer to the net_device structure
 525 *
 526 * This function is called to change the speed and duplex setting after
 527 * auto negotiation is done by the PHY. This is the function that gets
 528 * registered with the PHY interface through the "of_phy_connect" call.
 529 */
 530static void axienet_adjust_link(struct net_device *ndev)
 531{
 532	u32 emmc_reg;
 533	u32 link_state;
 534	u32 setspeed = 1;
 535	struct axienet_local *lp = netdev_priv(ndev);
 536	struct phy_device *phy = lp->phy_dev;
 537
 538	link_state = phy->speed | (phy->duplex << 1) | phy->link;
 539	if (lp->last_link != link_state) {
 540		if ((phy->speed == SPEED_10) || (phy->speed == SPEED_100)) {
 541			if (lp->phy_type == XAE_PHY_TYPE_1000BASE_X)
 542				setspeed = 0;
 543		} else {
 544			if ((phy->speed == SPEED_1000) &&
 545			    (lp->phy_type == XAE_PHY_TYPE_MII))
 546				setspeed = 0;
 547		}
 548
 549		if (setspeed == 1) {
 550			emmc_reg = axienet_ior(lp, XAE_EMMC_OFFSET);
 551			emmc_reg &= ~XAE_EMMC_LINKSPEED_MASK;
 552
 553			switch (phy->speed) {
 554			case SPEED_1000:
 555				emmc_reg |= XAE_EMMC_LINKSPD_1000;
 556				break;
 557			case SPEED_100:
 558				emmc_reg |= XAE_EMMC_LINKSPD_100;
 559				break;
 560			case SPEED_10:
 561				emmc_reg |= XAE_EMMC_LINKSPD_10;
 562				break;
 563			default:
 564				dev_err(&ndev->dev, "Speed other than 10, 100 "
 565					"or 1Gbps is not supported\n");
 566				break;
 567			}
 568
 569			axienet_iow(lp, XAE_EMMC_OFFSET, emmc_reg);
 570			lp->last_link = link_state;
 571			phy_print_status(phy);
 572		} else {
 573			dev_err(&ndev->dev, "Error setting Axi Ethernet "
 574				"mac speed\n");
 575		}
 576	}
 577}
 578
 579/**
 580 * axienet_start_xmit_done - Invoked once a transmit is completed by the
 581 * Axi DMA Tx channel.
 582 * @ndev:	Pointer to the net_device structure
 583 *
 584 * This function is invoked from the Axi DMA Tx isr to notify the completion
 585 * of transmit operation. It clears fields in the corresponding Tx BDs and
 586 * unmaps the corresponding buffer so that CPU can regain ownership of the
 587 * buffer. It finally invokes "netif_wake_queue" to restart transmission if
 588 * required.
 
 
 
 589 */
 590static void axienet_start_xmit_done(struct net_device *ndev)
 
 591{
 592	u32 size = 0;
 593	u32 packets = 0;
 594	struct axienet_local *lp = netdev_priv(ndev);
 595	struct axidma_bd *cur_p;
 596	unsigned int status = 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 597
 598	cur_p = &lp->tx_bd_v[lp->tx_bd_ci];
 599	status = cur_p->status;
 600	while (status & XAXIDMA_BD_STS_COMPLETE_MASK) {
 601		dma_unmap_single(ndev->dev.parent, cur_p->phys,
 602				(cur_p->cntrl & XAXIDMA_BD_CTRL_LENGTH_MASK),
 603				DMA_TO_DEVICE);
 604		if (cur_p->app4)
 605			dev_kfree_skb_irq((struct sk_buff *)cur_p->app4);
 606		/*cur_p->phys = 0;*/
 607		cur_p->app0 = 0;
 608		cur_p->app1 = 0;
 609		cur_p->app2 = 0;
 610		cur_p->app4 = 0;
 
 
 
 
 611		cur_p->status = 0;
 612
 613		size += status & XAXIDMA_BD_STS_ACTUAL_LEN_MASK;
 614		packets++;
 
 615
 616		lp->tx_bd_ci = ++lp->tx_bd_ci % TX_BD_NUM;
 617		cur_p = &lp->tx_bd_v[lp->tx_bd_ci];
 618		status = cur_p->status;
 
 619	}
 620
 621	ndev->stats.tx_packets += packets;
 622	ndev->stats.tx_bytes += size;
 623	netif_wake_queue(ndev);
 624}
 625
 626/**
 627 * axienet_check_tx_bd_space - Checks if a BD/group of BDs are currently busy
 628 * @lp:		Pointer to the axienet_local structure
 629 * @num_frag:	The number of BDs to check for
 630 *
 631 * returns: 0, on success
 632 *	    NETDEV_TX_BUSY, if any of the descriptors are not free
 633 *
 634 * This function is invoked before BDs are allocated and transmission starts.
 635 * This function returns 0 if a BD or group of BDs can be allocated for
 636 * transmission. If the BD or any of the BDs are not free the function
 637 * returns a busy status. This is invoked from axienet_start_xmit.
 638 */
 639static inline int axienet_check_tx_bd_space(struct axienet_local *lp,
 640					    int num_frag)
 641{
 642	struct axidma_bd *cur_p;
 643	cur_p = &lp->tx_bd_v[(lp->tx_bd_tail + num_frag) % TX_BD_NUM];
 644	if (cur_p->status & XAXIDMA_BD_STS_ALL_MASK)
 
 
 
 
 645		return NETDEV_TX_BUSY;
 646	return 0;
 647}
 648
 649/**
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 650 * axienet_start_xmit - Starts the transmission.
 651 * @skb:	sk_buff pointer that contains data to be Txed.
 652 * @ndev:	Pointer to net_device structure.
 653 *
 654 * returns: NETDEV_TX_OK, on success
 655 *	    NETDEV_TX_BUSY, if any of the descriptors are not free
 656 *
 657 * This function is invoked from upper layers to initiate transmission. The
 658 * function uses the next available free BDs and populates their fields to
 659 * start the transmission. Additionally if checksum offloading is supported,
 660 * it populates AXI Stream Control fields with appropriate values.
 661 */
 662static int axienet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
 
 663{
 664	u32 ii;
 665	u32 num_frag;
 666	u32 csum_start_off;
 667	u32 csum_index_off;
 668	skb_frag_t *frag;
 669	dma_addr_t tail_p;
 
 670	struct axienet_local *lp = netdev_priv(ndev);
 671	struct axidma_bd *cur_p;
 672
 
 
 
 673	num_frag = skb_shinfo(skb)->nr_frags;
 674	cur_p = &lp->tx_bd_v[lp->tx_bd_tail];
 675
 676	if (axienet_check_tx_bd_space(lp, num_frag)) {
 677		if (!netif_queue_stopped(ndev))
 678			netif_stop_queue(ndev);
 
 
 
 
 
 679		return NETDEV_TX_BUSY;
 680	}
 681
 682	if (skb->ip_summed == CHECKSUM_PARTIAL) {
 683		if (lp->features & XAE_FEATURE_FULL_TX_CSUM) {
 684			/* Tx Full Checksum Offload Enabled */
 685			cur_p->app0 |= 2;
 686		} else if (lp->features & XAE_FEATURE_PARTIAL_RX_CSUM) {
 687			csum_start_off = skb_transport_offset(skb);
 688			csum_index_off = csum_start_off + skb->csum_offset;
 689			/* Tx Partial Checksum Offload Enabled */
 690			cur_p->app0 |= 1;
 691			cur_p->app1 = (csum_start_off << 16) | csum_index_off;
 692		}
 693	} else if (skb->ip_summed == CHECKSUM_UNNECESSARY) {
 694		cur_p->app0 |= 2; /* Tx Full Checksum Offload Enabled */
 695	}
 696
 
 
 
 
 
 
 
 
 
 
 697	cur_p->cntrl = skb_headlen(skb) | XAXIDMA_BD_CTRL_TXSOF_MASK;
 698	cur_p->phys = dma_map_single(ndev->dev.parent, skb->data,
 699				     skb_headlen(skb), DMA_TO_DEVICE);
 700
 701	for (ii = 0; ii < num_frag; ii++) {
 702		lp->tx_bd_tail = ++lp->tx_bd_tail % TX_BD_NUM;
 703		cur_p = &lp->tx_bd_v[lp->tx_bd_tail];
 
 704		frag = &skb_shinfo(skb)->frags[ii];
 705		cur_p->phys = dma_map_single(ndev->dev.parent,
 706					     skb_frag_address(frag),
 707					     skb_frag_size(frag),
 708					     DMA_TO_DEVICE);
 
 
 
 
 
 
 
 
 
 
 709		cur_p->cntrl = skb_frag_size(frag);
 710	}
 711
 712	cur_p->cntrl |= XAXIDMA_BD_CTRL_TXEOF_MASK;
 713	cur_p->app4 = (unsigned long)skb;
 
 
 
 
 
 714
 715	tail_p = lp->tx_bd_p + sizeof(*lp->tx_bd_v) * lp->tx_bd_tail;
 716	/* Start the transfer */
 717	axienet_dma_out32(lp, XAXIDMA_TX_TDESC_OFFSET, tail_p);
 718	lp->tx_bd_tail = ++lp->tx_bd_tail % TX_BD_NUM;
 
 
 
 
 
 
 
 
 
 
 
 719
 720	return NETDEV_TX_OK;
 721}
 722
 723/**
 724 * axienet_recv - Is called from Axi DMA Rx Isr to complete the received
 725 *		  BD processing.
 726 * @ndev:	Pointer to net_device structure.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 727 *
 728 * This function is invoked from the Axi DMA Rx isr to process the Rx BDs. It
 729 * does minimal processing and invokes "netif_rx" to complete further
 730 * processing.
 731 */
 732static void axienet_recv(struct net_device *ndev)
 733{
 734	u32 length;
 735	u32 csumstatus;
 736	u32 size = 0;
 737	u32 packets = 0;
 738	dma_addr_t tail_p;
 739	struct axienet_local *lp = netdev_priv(ndev);
 740	struct sk_buff *skb, *new_skb;
 741	struct axidma_bd *cur_p;
 742
 743	tail_p = lp->rx_bd_p + sizeof(*lp->rx_bd_v) * lp->rx_bd_ci;
 744	cur_p = &lp->rx_bd_v[lp->rx_bd_ci];
 745
 746	while ((cur_p->status & XAXIDMA_BD_STS_COMPLETE_MASK)) {
 747		skb = (struct sk_buff *) (cur_p->sw_id_offset);
 748		length = cur_p->app4 & 0x0000FFFF;
 749
 750		dma_unmap_single(ndev->dev.parent, cur_p->phys,
 751				 lp->max_frm_size,
 752				 DMA_FROM_DEVICE);
 753
 754		skb_put(skb, length);
 755		skb->protocol = eth_type_trans(skb, ndev);
 756		/*skb_checksum_none_assert(skb);*/
 757		skb->ip_summed = CHECKSUM_NONE;
 758
 759		/* if we're doing Rx csum offload, set it up */
 760		if (lp->features & XAE_FEATURE_FULL_RX_CSUM) {
 761			csumstatus = (cur_p->app2 &
 762				      XAE_FULL_CSUM_STATUS_MASK) >> 3;
 763			if ((csumstatus == XAE_IP_TCP_CSUM_VALIDATED) ||
 764			    (csumstatus == XAE_IP_UDP_CSUM_VALIDATED)) {
 765				skb->ip_summed = CHECKSUM_UNNECESSARY;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 766			}
 767		} else if ((lp->features & XAE_FEATURE_PARTIAL_RX_CSUM) != 0 &&
 768			   skb->protocol == __constant_htons(ETH_P_IP) &&
 769			   skb->len > 64) {
 770			skb->csum = be32_to_cpu(cur_p->app3 & 0xFFFF);
 771			skb->ip_summed = CHECKSUM_COMPLETE;
 772		}
 773
 774		netif_rx(skb);
 775
 776		size += length;
 777		packets++;
 778
 779		new_skb = netdev_alloc_skb_ip_align(ndev, lp->max_frm_size);
 780		if (!new_skb) {
 781			dev_err(&ndev->dev, "no memory for new sk_buff\n");
 782			return;
 783		}
 784		cur_p->phys = dma_map_single(ndev->dev.parent, new_skb->data,
 785					     lp->max_frm_size,
 786					     DMA_FROM_DEVICE);
 
 
 787		cur_p->cntrl = lp->max_frm_size;
 788		cur_p->status = 0;
 789		cur_p->sw_id_offset = (u32) new_skb;
 790
 791		lp->rx_bd_ci = ++lp->rx_bd_ci % RX_BD_NUM;
 
 
 
 
 
 
 792		cur_p = &lp->rx_bd_v[lp->rx_bd_ci];
 793	}
 794
 795	ndev->stats.rx_packets += packets;
 796	ndev->stats.rx_bytes += size;
 797
 798	axienet_dma_out32(lp, XAXIDMA_RX_TDESC_OFFSET, tail_p);
 
 
 
 
 
 
 
 
 
 
 
 
 799}
 800
 801/**
 802 * axienet_tx_irq - Tx Done Isr.
 803 * @irq:	irq number
 804 * @_ndev:	net_device pointer
 805 *
 806 * returns: IRQ_HANDLED for all cases.
 807 *
 808 * This is the Axi DMA Tx done Isr. It invokes "axienet_start_xmit_done"
 809 * to complete the BD processing.
 810 */
 811static irqreturn_t axienet_tx_irq(int irq, void *_ndev)
 812{
 813	u32 cr;
 814	unsigned int status;
 815	struct net_device *ndev = _ndev;
 816	struct axienet_local *lp = netdev_priv(ndev);
 817
 818	status = axienet_dma_in32(lp, XAXIDMA_TX_SR_OFFSET);
 819	if (status & (XAXIDMA_IRQ_IOC_MASK | XAXIDMA_IRQ_DELAY_MASK)) {
 820		axienet_start_xmit_done(lp->ndev);
 821		goto out;
 822	}
 823	if (!(status & XAXIDMA_IRQ_ALL_MASK))
 824		dev_err(&ndev->dev, "No interrupts asserted in Tx path");
 825	if (status & XAXIDMA_IRQ_ERROR_MASK) {
 826		dev_err(&ndev->dev, "DMA Tx error 0x%x\n", status);
 827		dev_err(&ndev->dev, "Current BD is at: 0x%x\n",
 828			(lp->tx_bd_v[lp->tx_bd_ci]).phys);
 829
 830		cr = axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET);
 831		/* Disable coalesce, delay timer and error interrupts */
 832		cr &= (~XAXIDMA_IRQ_ALL_MASK);
 833		/* Write to the Tx channel control register */
 834		axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, cr);
 835
 836		cr = axienet_dma_in32(lp, XAXIDMA_RX_CR_OFFSET);
 837		/* Disable coalesce, delay timer and error interrupts */
 838		cr &= (~XAXIDMA_IRQ_ALL_MASK);
 839		/* Write to the Rx channel control register */
 840		axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, cr);
 841
 842		tasklet_schedule(&lp->dma_err_tasklet);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 843	}
 844out:
 845	axienet_dma_out32(lp, XAXIDMA_TX_SR_OFFSET, status);
 846	return IRQ_HANDLED;
 847}
 848
 849/**
 850 * axienet_rx_irq - Rx Isr.
 851 * @irq:	irq number
 852 * @_ndev:	net_device pointer
 853 *
 854 * returns: IRQ_HANDLED for all cases.
 855 *
 856 * This is the Axi DMA Rx Isr. It invokes "axienet_recv" to complete the BD
 857 * processing.
 858 */
 859static irqreturn_t axienet_rx_irq(int irq, void *_ndev)
 860{
 861	u32 cr;
 862	unsigned int status;
 863	struct net_device *ndev = _ndev;
 864	struct axienet_local *lp = netdev_priv(ndev);
 865
 866	status = axienet_dma_in32(lp, XAXIDMA_RX_SR_OFFSET);
 867	if (status & (XAXIDMA_IRQ_IOC_MASK | XAXIDMA_IRQ_DELAY_MASK)) {
 868		axienet_recv(lp->ndev);
 869		goto out;
 870	}
 871	if (!(status & XAXIDMA_IRQ_ALL_MASK))
 872		dev_err(&ndev->dev, "No interrupts asserted in Rx path");
 873	if (status & XAXIDMA_IRQ_ERROR_MASK) {
 874		dev_err(&ndev->dev, "DMA Rx error 0x%x\n", status);
 875		dev_err(&ndev->dev, "Current BD is at: 0x%x\n",
 876			(lp->rx_bd_v[lp->rx_bd_ci]).phys);
 877
 878		cr = axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET);
 879		/* Disable coalesce, delay timer and error interrupts */
 880		cr &= (~XAXIDMA_IRQ_ALL_MASK);
 881		/* Finally write to the Tx channel control register */
 882		axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, cr);
 883
 884		cr = axienet_dma_in32(lp, XAXIDMA_RX_CR_OFFSET);
 885		/* Disable coalesce, delay timer and error interrupts */
 886		cr &= (~XAXIDMA_IRQ_ALL_MASK);
 887		/* write to the Rx channel control register */
 888		axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, cr);
 889
 890		tasklet_schedule(&lp->dma_err_tasklet);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 891	}
 892out:
 893	axienet_dma_out32(lp, XAXIDMA_RX_SR_OFFSET, status);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 894	return IRQ_HANDLED;
 895}
 896
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 897/**
 898 * axienet_open - Driver open routine.
 899 * @ndev:	Pointer to net_device structure
 900 *
 901 * returns: 0, on success.
 902 *	    -ENODEV, if PHY cannot be connected to
 903 *	    non-zero error value on failure
 904 *
 905 * This is the driver open routine. It calls phy_start to start the PHY device.
 906 * It also allocates interrupt service routines, enables the interrupt lines
 907 * and ISR handling. Axi Ethernet core is reset through Axi DMA core. Buffer
 908 * descriptors are initialized.
 909 */
 910static int axienet_open(struct net_device *ndev)
 911{
 912	int ret, mdio_mcreg;
 913	struct axienet_local *lp = netdev_priv(ndev);
 
 
 
 
 
 
 
 
 914
 915	dev_dbg(&ndev->dev, "axienet_open()\n");
 
 
 
 
 
 916
 917	mdio_mcreg = axienet_ior(lp, XAE_MDIO_MC_OFFSET);
 918	ret = axienet_mdio_wait_until_ready(lp);
 919	if (ret < 0)
 920		return ret;
 921	/* Disable the MDIO interface till Axi Ethernet Reset is completed.
 922	 * When we do an Axi Ethernet reset, it resets the complete core
 923	 * including the MDIO. If MDIO is not disabled when the reset
 924	 * process is started, MDIO will be broken afterwards. */
 925	axienet_iow(lp, XAE_MDIO_MC_OFFSET,
 926		    (mdio_mcreg & (~XAE_MDIO_MC_MDIOEN_MASK)));
 927	axienet_device_reset(ndev);
 928	/* Enable the MDIO */
 929	axienet_iow(lp, XAE_MDIO_MC_OFFSET, mdio_mcreg);
 930	ret = axienet_mdio_wait_until_ready(lp);
 931	if (ret < 0)
 932		return ret;
 
 
 933
 934	if (lp->phy_node) {
 935		lp->phy_dev = of_phy_connect(lp->ndev, lp->phy_node,
 936					     axienet_adjust_link, 0,
 937					     PHY_INTERFACE_MODE_GMII);
 938		if (!lp->phy_dev) {
 939			dev_err(lp->dev, "of_phy_connect() failed\n");
 940			return -ENODEV;
 
 
 
 
 941		}
 942		phy_start(lp->phy_dev);
 943	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 944
 945	/* Enable interrupts for Axi DMA Tx */
 946	ret = request_irq(lp->tx_irq, axienet_tx_irq, 0, ndev->name, ndev);
 
 947	if (ret)
 948		goto err_tx_irq;
 949	/* Enable interrupts for Axi DMA Rx */
 950	ret = request_irq(lp->rx_irq, axienet_rx_irq, 0, ndev->name, ndev);
 
 951	if (ret)
 952		goto err_rx_irq;
 953	/* Enable tasklets for Axi DMA error handling */
 954	tasklet_enable(&lp->dma_err_tasklet);
 
 
 
 
 
 
 955	return 0;
 956
 
 
 957err_rx_irq:
 958	free_irq(lp->tx_irq, ndev);
 959err_tx_irq:
 960	if (lp->phy_dev)
 961		phy_disconnect(lp->phy_dev);
 962	lp->phy_dev = NULL;
 963	dev_err(lp->dev, "request_irq() failed\n");
 964	return ret;
 965}
 966
 967/**
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 968 * axienet_stop - Driver stop routine.
 969 * @ndev:	Pointer to net_device structure
 970 *
 971 * returns: 0, on success.
 972 *
 973 * This is the driver stop routine. It calls phy_disconnect to stop the PHY
 974 * device. It also removes the interrupt handlers and disables the interrupts.
 975 * The Axi DMA Tx/Rx BDs are released.
 976 */
 977static int axienet_stop(struct net_device *ndev)
 978{
 979	u32 cr;
 980	struct axienet_local *lp = netdev_priv(ndev);
 
 981
 982	dev_dbg(&ndev->dev, "axienet_close()\n");
 
 
 
 
 
 
 
 
 
 
 
 983
 984	cr = axienet_dma_in32(lp, XAXIDMA_RX_CR_OFFSET);
 985	axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET,
 986			  cr & (~XAXIDMA_CR_RUNSTOP_MASK));
 987	cr = axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET);
 988	axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET,
 989			  cr & (~XAXIDMA_CR_RUNSTOP_MASK));
 990	axienet_setoptions(ndev, lp->options &
 991			   ~(XAE_OPTION_TXEN | XAE_OPTION_RXEN));
 992
 993	tasklet_disable(&lp->dma_err_tasklet);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 994
 995	free_irq(lp->tx_irq, ndev);
 996	free_irq(lp->rx_irq, ndev);
 
 997
 998	if (lp->phy_dev)
 999		phy_disconnect(lp->phy_dev);
1000	lp->phy_dev = NULL;
1001
1002	axienet_dma_bd_release(ndev);
 
1003	return 0;
1004}
1005
1006/**
1007 * axienet_change_mtu - Driver change mtu routine.
1008 * @ndev:	Pointer to net_device structure
1009 * @new_mtu:	New mtu value to be applied
1010 *
1011 * returns: Always returns 0 (success).
1012 *
1013 * This is the change mtu driver routine. It checks if the Axi Ethernet
1014 * hardware supports jumbo frames before changing the mtu. This can be
1015 * called only when the device is not up.
1016 */
1017static int axienet_change_mtu(struct net_device *ndev, int new_mtu)
1018{
1019	struct axienet_local *lp = netdev_priv(ndev);
1020
1021	if (netif_running(ndev))
1022		return -EBUSY;
1023	if (lp->jumbo_support) {
1024		if ((new_mtu > XAE_JUMBO_MTU) || (new_mtu < 64))
1025			return -EINVAL;
1026		ndev->mtu = new_mtu;
1027	} else {
1028		if ((new_mtu > XAE_MTU) || (new_mtu < 64))
1029			return -EINVAL;
1030		ndev->mtu = new_mtu;
1031	}
1032
1033	return 0;
1034}
1035
1036#ifdef CONFIG_NET_POLL_CONTROLLER
1037/**
1038 * axienet_poll_controller - Axi Ethernet poll mechanism.
1039 * @ndev:	Pointer to net_device structure
1040 *
1041 * This implements Rx/Tx ISR poll mechanisms. The interrupts are disabled prior
1042 * to polling the ISRs and are enabled back after the polling is done.
1043 */
1044static void axienet_poll_controller(struct net_device *ndev)
1045{
1046	struct axienet_local *lp = netdev_priv(ndev);
 
1047	disable_irq(lp->tx_irq);
1048	disable_irq(lp->rx_irq);
1049	axienet_rx_irq(lp->tx_irq, ndev);
1050	axienet_tx_irq(lp->rx_irq, ndev);
1051	enable_irq(lp->tx_irq);
1052	enable_irq(lp->rx_irq);
1053}
1054#endif
1055
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1056static const struct net_device_ops axienet_netdev_ops = {
1057	.ndo_open = axienet_open,
1058	.ndo_stop = axienet_stop,
1059	.ndo_start_xmit = axienet_start_xmit,
 
1060	.ndo_change_mtu	= axienet_change_mtu,
1061	.ndo_set_mac_address = netdev_set_mac_address,
1062	.ndo_validate_addr = eth_validate_addr,
 
1063	.ndo_set_rx_mode = axienet_set_multicast_list,
1064#ifdef CONFIG_NET_POLL_CONTROLLER
1065	.ndo_poll_controller = axienet_poll_controller,
1066#endif
1067};
1068
1069/**
1070 * axienet_ethtools_get_settings - Get Axi Ethernet settings related to PHY.
1071 * @ndev:	Pointer to net_device structure
1072 * @ecmd:	Pointer to ethtool_cmd structure
1073 *
1074 * This implements ethtool command for getting PHY settings. If PHY could
1075 * not be found, the function returns -ENODEV. This function calls the
1076 * relevant PHY ethtool API to get the PHY settings.
1077 * Issue "ethtool ethX" under linux prompt to execute this function.
1078 */
1079static int axienet_ethtools_get_settings(struct net_device *ndev,
1080					 struct ethtool_cmd *ecmd)
1081{
1082	struct axienet_local *lp = netdev_priv(ndev);
1083	struct phy_device *phydev = lp->phy_dev;
1084	if (!phydev)
1085		return -ENODEV;
1086	return phy_ethtool_gset(phydev, ecmd);
1087}
1088
1089/**
1090 * axienet_ethtools_set_settings - Set PHY settings as passed in the argument.
1091 * @ndev:	Pointer to net_device structure
1092 * @ecmd:	Pointer to ethtool_cmd structure
1093 *
1094 * This implements ethtool command for setting various PHY settings. If PHY
1095 * could not be found, the function returns -ENODEV. This function calls the
1096 * relevant PHY ethtool API to set the PHY.
1097 * Issue e.g. "ethtool -s ethX speed 1000" under linux prompt to execute this
1098 * function.
1099 */
1100static int axienet_ethtools_set_settings(struct net_device *ndev,
1101					 struct ethtool_cmd *ecmd)
1102{
1103	struct axienet_local *lp = netdev_priv(ndev);
1104	struct phy_device *phydev = lp->phy_dev;
1105	if (!phydev)
1106		return -ENODEV;
1107	return phy_ethtool_sset(phydev, ecmd);
1108}
1109
1110/**
1111 * axienet_ethtools_get_drvinfo - Get various Axi Ethernet driver information.
1112 * @ndev:	Pointer to net_device structure
1113 * @ed:		Pointer to ethtool_drvinfo structure
1114 *
1115 * This implements ethtool command for getting the driver information.
1116 * Issue "ethtool -i ethX" under linux prompt to execute this function.
1117 */
1118static void axienet_ethtools_get_drvinfo(struct net_device *ndev,
1119					 struct ethtool_drvinfo *ed)
1120{
1121	memset(ed, 0, sizeof(struct ethtool_drvinfo));
1122	strcpy(ed->driver, DRIVER_NAME);
1123	strcpy(ed->version, DRIVER_VERSION);
1124	ed->regdump_len = sizeof(u32) * AXIENET_REGS_N;
1125}
1126
1127/**
1128 * axienet_ethtools_get_regs_len - Get the total regs length present in the
1129 *				   AxiEthernet core.
1130 * @ndev:	Pointer to net_device structure
1131 *
1132 * This implements ethtool command for getting the total register length
1133 * information.
 
 
1134 */
1135static int axienet_ethtools_get_regs_len(struct net_device *ndev)
1136{
1137	return sizeof(u32) * AXIENET_REGS_N;
1138}
1139
1140/**
1141 * axienet_ethtools_get_regs - Dump the contents of all registers present
1142 *			       in AxiEthernet core.
1143 * @ndev:	Pointer to net_device structure
1144 * @regs:	Pointer to ethtool_regs structure
1145 * @ret:	Void pointer used to return the contents of the registers.
1146 *
1147 * This implements ethtool command for getting the Axi Ethernet register dump.
1148 * Issue "ethtool -d ethX" to execute this function.
1149 */
1150static void axienet_ethtools_get_regs(struct net_device *ndev,
1151				      struct ethtool_regs *regs, void *ret)
1152{
1153	u32 *data = (u32 *) ret;
1154	size_t len = sizeof(u32) * AXIENET_REGS_N;
1155	struct axienet_local *lp = netdev_priv(ndev);
1156
1157	regs->version = 0;
1158	regs->len = len;
1159
1160	memset(data, 0, len);
1161	data[0] = axienet_ior(lp, XAE_RAF_OFFSET);
1162	data[1] = axienet_ior(lp, XAE_TPF_OFFSET);
1163	data[2] = axienet_ior(lp, XAE_IFGP_OFFSET);
1164	data[3] = axienet_ior(lp, XAE_IS_OFFSET);
1165	data[4] = axienet_ior(lp, XAE_IP_OFFSET);
1166	data[5] = axienet_ior(lp, XAE_IE_OFFSET);
1167	data[6] = axienet_ior(lp, XAE_TTAG_OFFSET);
1168	data[7] = axienet_ior(lp, XAE_RTAG_OFFSET);
1169	data[8] = axienet_ior(lp, XAE_UAWL_OFFSET);
1170	data[9] = axienet_ior(lp, XAE_UAWU_OFFSET);
1171	data[10] = axienet_ior(lp, XAE_TPID0_OFFSET);
1172	data[11] = axienet_ior(lp, XAE_TPID1_OFFSET);
1173	data[12] = axienet_ior(lp, XAE_PPST_OFFSET);
1174	data[13] = axienet_ior(lp, XAE_RCW0_OFFSET);
1175	data[14] = axienet_ior(lp, XAE_RCW1_OFFSET);
1176	data[15] = axienet_ior(lp, XAE_TC_OFFSET);
1177	data[16] = axienet_ior(lp, XAE_FCC_OFFSET);
1178	data[17] = axienet_ior(lp, XAE_EMMC_OFFSET);
1179	data[18] = axienet_ior(lp, XAE_PHYC_OFFSET);
1180	data[19] = axienet_ior(lp, XAE_MDIO_MC_OFFSET);
1181	data[20] = axienet_ior(lp, XAE_MDIO_MCR_OFFSET);
1182	data[21] = axienet_ior(lp, XAE_MDIO_MWD_OFFSET);
1183	data[22] = axienet_ior(lp, XAE_MDIO_MRD_OFFSET);
1184	data[23] = axienet_ior(lp, XAE_MDIO_MIS_OFFSET);
1185	data[24] = axienet_ior(lp, XAE_MDIO_MIP_OFFSET);
1186	data[25] = axienet_ior(lp, XAE_MDIO_MIE_OFFSET);
1187	data[26] = axienet_ior(lp, XAE_MDIO_MIC_OFFSET);
1188	data[27] = axienet_ior(lp, XAE_UAW0_OFFSET);
1189	data[28] = axienet_ior(lp, XAE_UAW1_OFFSET);
1190	data[29] = axienet_ior(lp, XAE_FMI_OFFSET);
1191	data[30] = axienet_ior(lp, XAE_AF0_OFFSET);
1192	data[31] = axienet_ior(lp, XAE_AF1_OFFSET);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1193}
1194
1195/**
1196 * axienet_ethtools_get_pauseparam - Get the pause parameter setting for
1197 *				     Tx and Rx paths.
1198 * @ndev:	Pointer to net_device structure
1199 * @epauseparm:	Pointer to ethtool_pauseparam structure.
1200 *
1201 * This implements ethtool command for getting axi ethernet pause frame
1202 * setting. Issue "ethtool -a ethX" to execute this function.
1203 */
1204static void
1205axienet_ethtools_get_pauseparam(struct net_device *ndev,
1206				struct ethtool_pauseparam *epauseparm)
1207{
1208	u32 regval;
1209	struct axienet_local *lp = netdev_priv(ndev);
1210	epauseparm->autoneg  = 0;
1211	regval = axienet_ior(lp, XAE_FCC_OFFSET);
1212	epauseparm->tx_pause = regval & XAE_FCC_FCTX_MASK;
1213	epauseparm->rx_pause = regval & XAE_FCC_FCRX_MASK;
1214}
1215
1216/**
1217 * axienet_ethtools_set_pauseparam - Set device pause parameter(flow control)
1218 *				     settings.
1219 * @ndev:	Pointer to net_device structure
1220 * @epauseparam:Pointer to ethtool_pauseparam structure
1221 *
1222 * This implements ethtool command for enabling flow control on Rx and Tx
1223 * paths. Issue "ethtool -A ethX tx on|off" under linux prompt to execute this
1224 * function.
 
 
1225 */
1226static int
1227axienet_ethtools_set_pauseparam(struct net_device *ndev,
1228				struct ethtool_pauseparam *epauseparm)
1229{
1230	u32 regval = 0;
1231	struct axienet_local *lp = netdev_priv(ndev);
1232
1233	if (netif_running(ndev)) {
1234		printk(KERN_ERR	"%s: Please stop netif before applying "
1235		       "configruation\n", ndev->name);
1236		return -EFAULT;
1237	}
1238
1239	regval = axienet_ior(lp, XAE_FCC_OFFSET);
1240	if (epauseparm->tx_pause)
1241		regval |= XAE_FCC_FCTX_MASK;
1242	else
1243		regval &= ~XAE_FCC_FCTX_MASK;
1244	if (epauseparm->rx_pause)
1245		regval |= XAE_FCC_FCRX_MASK;
1246	else
1247		regval &= ~XAE_FCC_FCRX_MASK;
1248	axienet_iow(lp, XAE_FCC_OFFSET, regval);
1249
1250	return 0;
1251}
1252
1253/**
1254 * axienet_ethtools_get_coalesce - Get DMA interrupt coalescing count.
1255 * @ndev:	Pointer to net_device structure
1256 * @ecoalesce:	Pointer to ethtool_coalesce structure
 
 
1257 *
1258 * This implements ethtool command for getting the DMA interrupt coalescing
1259 * count on Tx and Rx paths. Issue "ethtool -c ethX" under linux prompt to
1260 * execute this function.
 
 
1261 */
1262static int axienet_ethtools_get_coalesce(struct net_device *ndev,
1263					 struct ethtool_coalesce *ecoalesce)
 
 
 
1264{
1265	u32 regval = 0;
1266	struct axienet_local *lp = netdev_priv(ndev);
1267	regval = axienet_dma_in32(lp, XAXIDMA_RX_CR_OFFSET);
1268	ecoalesce->rx_max_coalesced_frames = (regval & XAXIDMA_COALESCE_MASK)
1269					     >> XAXIDMA_COALESCE_SHIFT;
1270	regval = axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET);
1271	ecoalesce->tx_max_coalesced_frames = (regval & XAXIDMA_COALESCE_MASK)
1272					     >> XAXIDMA_COALESCE_SHIFT;
1273	return 0;
1274}
1275
1276/**
1277 * axienet_ethtools_set_coalesce - Set DMA interrupt coalescing count.
1278 * @ndev:	Pointer to net_device structure
1279 * @ecoalesce:	Pointer to ethtool_coalesce structure
 
 
1280 *
1281 * This implements ethtool command for setting the DMA interrupt coalescing
1282 * count on Tx and Rx paths. Issue "ethtool -C ethX rx-frames 5" under linux
1283 * prompt to execute this function.
 
 
1284 */
1285static int axienet_ethtools_set_coalesce(struct net_device *ndev,
1286					 struct ethtool_coalesce *ecoalesce)
 
 
 
1287{
1288	struct axienet_local *lp = netdev_priv(ndev);
1289
1290	if (netif_running(ndev)) {
1291		printk(KERN_ERR	"%s: Please stop netif before applying "
1292		       "configruation\n", ndev->name);
1293		return -EFAULT;
1294	}
1295
1296	if ((ecoalesce->rx_coalesce_usecs) ||
1297	    (ecoalesce->rx_coalesce_usecs_irq) ||
1298	    (ecoalesce->rx_max_coalesced_frames_irq) ||
1299	    (ecoalesce->tx_coalesce_usecs) ||
1300	    (ecoalesce->tx_coalesce_usecs_irq) ||
1301	    (ecoalesce->tx_max_coalesced_frames_irq) ||
1302	    (ecoalesce->stats_block_coalesce_usecs) ||
1303	    (ecoalesce->use_adaptive_rx_coalesce) ||
1304	    (ecoalesce->use_adaptive_tx_coalesce) ||
1305	    (ecoalesce->pkt_rate_low) ||
1306	    (ecoalesce->rx_coalesce_usecs_low) ||
1307	    (ecoalesce->rx_max_coalesced_frames_low) ||
1308	    (ecoalesce->tx_coalesce_usecs_low) ||
1309	    (ecoalesce->tx_max_coalesced_frames_low) ||
1310	    (ecoalesce->pkt_rate_high) ||
1311	    (ecoalesce->rx_coalesce_usecs_high) ||
1312	    (ecoalesce->rx_max_coalesced_frames_high) ||
1313	    (ecoalesce->tx_coalesce_usecs_high) ||
1314	    (ecoalesce->tx_max_coalesced_frames_high) ||
1315	    (ecoalesce->rate_sample_interval))
1316		return -EOPNOTSUPP;
1317	if (ecoalesce->rx_max_coalesced_frames)
1318		lp->coalesce_count_rx = ecoalesce->rx_max_coalesced_frames;
 
 
1319	if (ecoalesce->tx_max_coalesced_frames)
1320		lp->coalesce_count_tx = ecoalesce->tx_max_coalesced_frames;
 
 
1321
1322	return 0;
1323}
1324
1325static struct ethtool_ops axienet_ethtool_ops = {
1326	.get_settings   = axienet_ethtools_get_settings,
1327	.set_settings   = axienet_ethtools_set_settings,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1328	.get_drvinfo    = axienet_ethtools_get_drvinfo,
1329	.get_regs_len   = axienet_ethtools_get_regs_len,
1330	.get_regs       = axienet_ethtools_get_regs,
1331	.get_link       = ethtool_op_get_link,
 
 
1332	.get_pauseparam = axienet_ethtools_get_pauseparam,
1333	.set_pauseparam = axienet_ethtools_set_pauseparam,
1334	.get_coalesce   = axienet_ethtools_get_coalesce,
1335	.set_coalesce   = axienet_ethtools_set_coalesce,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1336};
1337
1338/**
1339 * axienet_dma_err_handler - Tasklet handler for Axi DMA Error
1340 * @data:	Data passed
1341 *
1342 * Resets the Axi DMA and Axi Ethernet devices, and reconfigures the
1343 * Tx/Rx BDs.
1344 */
1345static void axienet_dma_err_handler(unsigned long data)
1346{
 
1347	u32 axienet_status;
1348	u32 cr, i;
1349	int mdio_mcreg;
1350	struct axienet_local *lp = (struct axienet_local *) data;
1351	struct net_device *ndev = lp->ndev;
1352	struct axidma_bd *cur_p;
 
 
 
 
 
 
1353
1354	axienet_setoptions(ndev, lp->options &
1355			   ~(XAE_OPTION_TXEN | XAE_OPTION_RXEN));
1356	mdio_mcreg = axienet_ior(lp, XAE_MDIO_MC_OFFSET);
1357	axienet_mdio_wait_until_ready(lp);
1358	/* Disable the MDIO interface till Axi Ethernet Reset is completed.
1359	 * When we do an Axi Ethernet reset, it resets the complete core
1360	 * including the MDIO. So if MDIO is not disabled when the reset
1361	 * process is started, MDIO will be broken afterwards. */
1362	axienet_iow(lp, XAE_MDIO_MC_OFFSET, (mdio_mcreg &
1363		    ~XAE_MDIO_MC_MDIOEN_MASK));
1364
1365	__axienet_device_reset(lp, &ndev->dev, XAXIDMA_TX_CR_OFFSET);
1366	__axienet_device_reset(lp, &ndev->dev, XAXIDMA_RX_CR_OFFSET);
1367
1368	axienet_iow(lp, XAE_MDIO_MC_OFFSET, mdio_mcreg);
1369	axienet_mdio_wait_until_ready(lp);
 
 
1370
1371	for (i = 0; i < TX_BD_NUM; i++) {
1372		cur_p = &lp->tx_bd_v[i];
1373		if (cur_p->phys)
1374			dma_unmap_single(ndev->dev.parent, cur_p->phys,
1375					 (cur_p->cntrl &
1376					  XAXIDMA_BD_CTRL_LENGTH_MASK),
1377					 DMA_TO_DEVICE);
1378		if (cur_p->app4)
1379			dev_kfree_skb_irq((struct sk_buff *) cur_p->app4);
 
1380		cur_p->phys = 0;
 
1381		cur_p->cntrl = 0;
1382		cur_p->status = 0;
1383		cur_p->app0 = 0;
1384		cur_p->app1 = 0;
1385		cur_p->app2 = 0;
1386		cur_p->app3 = 0;
1387		cur_p->app4 = 0;
1388		cur_p->sw_id_offset = 0;
1389	}
1390
1391	for (i = 0; i < RX_BD_NUM; i++) {
1392		cur_p = &lp->rx_bd_v[i];
1393		cur_p->status = 0;
1394		cur_p->app0 = 0;
1395		cur_p->app1 = 0;
1396		cur_p->app2 = 0;
1397		cur_p->app3 = 0;
1398		cur_p->app4 = 0;
1399	}
1400
1401	lp->tx_bd_ci = 0;
1402	lp->tx_bd_tail = 0;
1403	lp->rx_bd_ci = 0;
1404
1405	/* Start updating the Rx channel control register */
1406	cr = axienet_dma_in32(lp, XAXIDMA_RX_CR_OFFSET);
1407	/* Update the interrupt coalesce count */
1408	cr = ((cr & ~XAXIDMA_COALESCE_MASK) |
1409	      (XAXIDMA_DFT_RX_THRESHOLD << XAXIDMA_COALESCE_SHIFT));
1410	/* Update the delay timer count */
1411	cr = ((cr & ~XAXIDMA_DELAY_MASK) |
1412	      (XAXIDMA_DFT_RX_WAITBOUND << XAXIDMA_DELAY_SHIFT));
1413	/* Enable coalesce, delay timer and error interrupts */
1414	cr |= XAXIDMA_IRQ_ALL_MASK;
1415	/* Finally write to the Rx channel control register */
1416	axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, cr);
1417
1418	/* Start updating the Tx channel control register */
1419	cr = axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET);
1420	/* Update the interrupt coalesce count */
1421	cr = (((cr & ~XAXIDMA_COALESCE_MASK)) |
1422	      (XAXIDMA_DFT_TX_THRESHOLD << XAXIDMA_COALESCE_SHIFT));
1423	/* Update the delay timer count */
1424	cr = (((cr & ~XAXIDMA_DELAY_MASK)) |
1425	      (XAXIDMA_DFT_TX_WAITBOUND << XAXIDMA_DELAY_SHIFT));
1426	/* Enable coalesce, delay timer and error interrupts */
1427	cr |= XAXIDMA_IRQ_ALL_MASK;
1428	/* Finally write to the Tx channel control register */
1429	axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, cr);
1430
1431	/* Populate the tail pointer and bring the Rx Axi DMA engine out of
1432	 * halted state. This will make the Rx side ready for reception.*/
1433	axienet_dma_out32(lp, XAXIDMA_RX_CDESC_OFFSET, lp->rx_bd_p);
1434	cr = axienet_dma_in32(lp, XAXIDMA_RX_CR_OFFSET);
1435	axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET,
1436			  cr | XAXIDMA_CR_RUNSTOP_MASK);
1437	axienet_dma_out32(lp, XAXIDMA_RX_TDESC_OFFSET, lp->rx_bd_p +
1438			  (sizeof(*lp->rx_bd_v) * (RX_BD_NUM - 1)));
1439
1440	/* Write to the RS (Run-stop) bit in the Tx channel control register.
1441	 * Tx channel is now ready to run. But only after we write to the
1442	 * tail pointer register that the Tx channel will start transmitting */
1443	axienet_dma_out32(lp, XAXIDMA_TX_CDESC_OFFSET, lp->tx_bd_p);
1444	cr = axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET);
1445	axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET,
1446			  cr | XAXIDMA_CR_RUNSTOP_MASK);
1447
1448	axienet_status = axienet_ior(lp, XAE_RCW1_OFFSET);
1449	axienet_status &= ~XAE_RCW1_RX_MASK;
1450	axienet_iow(lp, XAE_RCW1_OFFSET, axienet_status);
1451
1452	axienet_status = axienet_ior(lp, XAE_IP_OFFSET);
1453	if (axienet_status & XAE_INT_RXRJECT_MASK)
1454		axienet_iow(lp, XAE_IS_OFFSET, XAE_INT_RXRJECT_MASK);
 
 
1455	axienet_iow(lp, XAE_FCC_OFFSET, XAE_FCC_FCRX_MASK);
1456
1457	/* Sync default options with HW but leave receiver and
1458	 * transmitter disabled.*/
 
1459	axienet_setoptions(ndev, lp->options &
1460			   ~(XAE_OPTION_TXEN | XAE_OPTION_RXEN));
1461	axienet_set_mac_address(ndev, NULL);
1462	axienet_set_multicast_list(ndev);
 
 
1463	axienet_setoptions(ndev, lp->options);
1464}
1465
1466/**
1467 * axienet_of_probe - Axi Ethernet probe function.
1468 * @op:		Pointer to platform device structure.
1469 * @match:	Pointer to device id structure
1470 *
1471 * returns: 0, on success
1472 *	    Non-zero error value on failure.
1473 *
1474 * This is the probe routine for Axi Ethernet driver. This is called before
1475 * any other driver routines are invoked. It allocates and sets up the Ethernet
1476 * device. Parses through device tree and populates fields of
1477 * axienet_local. It registers the Ethernet device.
1478 */
1479static int __devinit axienet_of_probe(struct platform_device *op)
1480{
1481	__be32 *p;
1482	int size, ret = 0;
1483	struct device_node *np;
1484	struct axienet_local *lp;
1485	struct net_device *ndev;
1486	const void *addr;
 
 
 
1487
1488	ndev = alloc_etherdev(sizeof(*lp));
1489	if (!ndev)
1490		return -ENOMEM;
1491
1492	ether_setup(ndev);
1493	dev_set_drvdata(&op->dev, ndev);
1494
1495	SET_NETDEV_DEV(ndev, &op->dev);
1496	ndev->flags &= ~IFF_MULTICAST;  /* clear multicast */
1497	ndev->features = NETIF_F_SG | NETIF_F_FRAGLIST;
1498	ndev->netdev_ops = &axienet_netdev_ops;
1499	ndev->ethtool_ops = &axienet_ethtool_ops;
1500
 
 
 
 
1501	lp = netdev_priv(ndev);
1502	lp->ndev = ndev;
1503	lp->dev = &op->dev;
1504	lp->options = XAE_OPTION_DEFAULTS;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1505	/* Map device registers */
1506	lp->regs = of_iomap(op->dev.of_node, 0);
1507	if (!lp->regs) {
1508		dev_err(&op->dev, "could not map Axi Ethernet regs.\n");
1509		goto nodev;
1510	}
 
 
1511	/* Setup checksum offload, but default to off if not specified */
1512	lp->features = 0;
1513
1514	p = (__be32 *) of_get_property(op->dev.of_node, "xlnx,txcsum", NULL);
1515	if (p) {
1516		switch (be32_to_cpup(p)) {
 
 
 
1517		case 1:
1518			lp->csum_offload_on_tx_path =
1519				XAE_FEATURE_PARTIAL_TX_CSUM;
1520			lp->features |= XAE_FEATURE_PARTIAL_TX_CSUM;
1521			/* Can checksum TCP/UDP over IPv4. */
1522			ndev->features |= NETIF_F_IP_CSUM;
1523			break;
1524		case 2:
1525			lp->csum_offload_on_tx_path =
1526				XAE_FEATURE_FULL_TX_CSUM;
1527			lp->features |= XAE_FEATURE_FULL_TX_CSUM;
1528			/* Can checksum TCP/UDP over IPv4. */
1529			ndev->features |= NETIF_F_IP_CSUM;
1530			break;
1531		default:
1532			lp->csum_offload_on_tx_path = XAE_NO_CSUM_OFFLOAD;
1533		}
1534	}
1535	p = (__be32 *) of_get_property(op->dev.of_node, "xlnx,rxcsum", NULL);
1536	if (p) {
1537		switch (be32_to_cpup(p)) {
1538		case 1:
1539			lp->csum_offload_on_rx_path =
1540				XAE_FEATURE_PARTIAL_RX_CSUM;
1541			lp->features |= XAE_FEATURE_PARTIAL_RX_CSUM;
 
1542			break;
1543		case 2:
1544			lp->csum_offload_on_rx_path =
1545				XAE_FEATURE_FULL_RX_CSUM;
1546			lp->features |= XAE_FEATURE_FULL_RX_CSUM;
 
1547			break;
1548		default:
1549			lp->csum_offload_on_rx_path = XAE_NO_CSUM_OFFLOAD;
1550		}
1551	}
1552	/* For supporting jumbo frames, the Axi Ethernet hardware must have
1553	 * a larger Rx/Tx Memory. Typically, the size must be more than or
1554	 * equal to 16384 bytes, so that we can enable jumbo option and start
1555	 * supporting jumbo frames. Here we check for memory allocated for
1556	 * Rx/Tx in the hardware from the device-tree and accordingly set
1557	 * flags. */
1558	p = (__be32 *) of_get_property(op->dev.of_node, "xlnx,rxmem", NULL);
1559	if (p) {
1560		if ((be32_to_cpup(p)) >= 0x4000)
1561			lp->jumbo_support = 1;
1562	}
1563	p = (__be32 *) of_get_property(op->dev.of_node, "xlnx,temac-type",
1564				       NULL);
1565	if (p)
1566		lp->temac_type = be32_to_cpup(p);
1567	p = (__be32 *) of_get_property(op->dev.of_node, "xlnx,phy-type", NULL);
1568	if (p)
1569		lp->phy_type = be32_to_cpup(p);
1570
1571	/* Find the DMA node, map the DMA registers, and decode the DMA IRQs */
1572	np = of_parse_phandle(op->dev.of_node, "axistream-connected", 0);
1573	if (!np) {
1574		dev_err(&op->dev, "could not find DMA node\n");
1575		goto err_iounmap;
1576	}
1577	lp->dma_regs = of_iomap(np, 0);
1578	if (lp->dma_regs) {
1579		dev_dbg(&op->dev, "MEM base: %p\n", lp->dma_regs);
 
 
 
 
 
 
 
1580	} else {
1581		dev_err(&op->dev, "unable to map DMA registers\n");
1582		of_node_put(np);
1583	}
1584	lp->rx_irq = irq_of_parse_and_map(np, 1);
1585	lp->tx_irq = irq_of_parse_and_map(np, 0);
1586	of_node_put(np);
1587	if ((lp->rx_irq == NO_IRQ) || (lp->tx_irq == NO_IRQ)) {
1588		dev_err(&op->dev, "could not determine irqs\n");
1589		ret = -ENOMEM;
1590		goto err_iounmap_2;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1591	}
 
 
 
 
 
 
 
 
1592
1593	/* Retrieve the MAC address */
1594	addr = of_get_property(op->dev.of_node, "local-mac-address", &size);
1595	if ((!addr) || (size != 6)) {
1596		dev_err(&op->dev, "could not find MAC address\n");
1597		ret = -ENODEV;
1598		goto err_iounmap_2;
 
 
1599	}
1600	axienet_set_mac_address(ndev, (void *) addr);
1601
1602	lp->coalesce_count_rx = XAXIDMA_DFT_RX_THRESHOLD;
1603	lp->coalesce_count_tx = XAXIDMA_DFT_TX_THRESHOLD;
 
 
1604
1605	lp->phy_node = of_parse_phandle(op->dev.of_node, "phy-handle", 0);
1606	ret = axienet_mdio_setup(lp, op->dev.of_node);
1607	if (ret)
1608		dev_warn(&op->dev, "error registering MDIO bus\n");
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1609
1610	ret = register_netdev(lp->ndev);
1611	if (ret) {
1612		dev_err(lp->dev, "register_netdev() error (%i)\n", ret);
1613		goto err_iounmap_2;
1614	}
1615
1616	tasklet_init(&lp->dma_err_tasklet, axienet_dma_err_handler,
1617		     (unsigned long) lp);
1618	tasklet_disable(&lp->dma_err_tasklet);
 
1619
1620	return 0;
 
 
 
 
 
 
 
1621
1622err_iounmap_2:
1623	if (lp->dma_regs)
1624		iounmap(lp->dma_regs);
1625err_iounmap:
1626	iounmap(lp->regs);
1627nodev:
1628	free_netdev(ndev);
1629	ndev = NULL;
1630	return ret;
1631}
1632
1633static int __devexit axienet_of_remove(struct platform_device *op)
1634{
1635	struct net_device *ndev = dev_get_drvdata(&op->dev);
1636	struct axienet_local *lp = netdev_priv(ndev);
1637
 
 
 
 
 
 
 
 
1638	axienet_mdio_teardown(lp);
1639	unregister_netdev(ndev);
1640
1641	if (lp->phy_node)
1642		of_node_put(lp->phy_node);
1643	lp->phy_node = NULL;
1644
1645	dev_set_drvdata(&op->dev, NULL);
1646
1647	iounmap(lp->regs);
1648	if (lp->dma_regs)
1649		iounmap(lp->dma_regs);
1650	free_netdev(ndev);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1651
1652	return 0;
1653}
1654
1655static struct platform_driver axienet_of_driver = {
1656	.probe = axienet_of_probe,
1657	.remove = __devexit_p(axienet_of_remove),
 
 
 
 
1658	.driver = {
1659		 .owner = THIS_MODULE,
1660		 .name = "xilinx_axienet",
 
1661		 .of_match_table = axienet_of_match,
1662	},
1663};
1664
1665module_platform_driver(axienet_of_driver);
1666
1667MODULE_DESCRIPTION("Xilinx Axi Ethernet driver");
1668MODULE_AUTHOR("Xilinx");
1669MODULE_LICENSE("GPL");