Linux Audio

Check our new training course

Loading...
v6.8
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * sni_ave.c - Socionext UniPhier AVE ethernet driver
   4 * Copyright 2014 Panasonic Corporation
   5 * Copyright 2015-2017 Socionext Inc.
   6 */
   7
   8#include <linux/bitops.h>
   9#include <linux/clk.h>
  10#include <linux/etherdevice.h>
  11#include <linux/interrupt.h>
  12#include <linux/io.h>
  13#include <linux/iopoll.h>
  14#include <linux/mfd/syscon.h>
  15#include <linux/mii.h>
  16#include <linux/module.h>
  17#include <linux/netdevice.h>
  18#include <linux/of.h>
  19#include <linux/of_net.h>
  20#include <linux/of_mdio.h>
 
  21#include <linux/phy.h>
  22#include <linux/platform_device.h>
  23#include <linux/regmap.h>
  24#include <linux/reset.h>
  25#include <linux/types.h>
  26#include <linux/u64_stats_sync.h>
  27
  28/* General Register Group */
  29#define AVE_IDR			0x000	/* ID */
  30#define AVE_VR			0x004	/* Version */
  31#define AVE_GRR			0x008	/* Global Reset */
  32#define AVE_CFGR		0x00c	/* Configuration */
  33
  34/* Interrupt Register Group */
  35#define AVE_GIMR		0x100	/* Global Interrupt Mask */
  36#define AVE_GISR		0x104	/* Global Interrupt Status */
  37
  38/* MAC Register Group */
  39#define AVE_TXCR		0x200	/* TX Setup */
  40#define AVE_RXCR		0x204	/* RX Setup */
  41#define AVE_RXMAC1R		0x208	/* MAC address (lower) */
  42#define AVE_RXMAC2R		0x20c	/* MAC address (upper) */
  43#define AVE_MDIOCTR		0x214	/* MDIO Control */
  44#define AVE_MDIOAR		0x218	/* MDIO Address */
  45#define AVE_MDIOWDR		0x21c	/* MDIO Data */
  46#define AVE_MDIOSR		0x220	/* MDIO Status */
  47#define AVE_MDIORDR		0x224	/* MDIO Rd Data */
  48
  49/* Descriptor Control Register Group */
  50#define AVE_DESCC		0x300	/* Descriptor Control */
  51#define AVE_TXDC		0x304	/* TX Descriptor Configuration */
  52#define AVE_RXDC0		0x308	/* RX Descriptor Ring0 Configuration */
  53#define AVE_IIRQC		0x34c	/* Interval IRQ Control */
  54
  55/* Packet Filter Register Group */
  56#define AVE_PKTF_BASE		0x800	/* PF Base Address */
  57#define AVE_PFMBYTE_BASE	0xd00	/* PF Mask Byte Base Address */
  58#define AVE_PFMBIT_BASE		0xe00	/* PF Mask Bit Base Address */
  59#define AVE_PFSEL_BASE		0xf00	/* PF Selector Base Address */
  60#define AVE_PFEN		0xffc	/* Packet Filter Enable */
  61#define AVE_PKTF(ent)		(AVE_PKTF_BASE + (ent) * 0x40)
  62#define AVE_PFMBYTE(ent)	(AVE_PFMBYTE_BASE + (ent) * 8)
  63#define AVE_PFMBIT(ent)		(AVE_PFMBIT_BASE + (ent) * 4)
  64#define AVE_PFSEL(ent)		(AVE_PFSEL_BASE + (ent) * 4)
  65
  66/* 64bit descriptor memory */
  67#define AVE_DESC_SIZE_64	12	/* Descriptor Size */
  68
  69#define AVE_TXDM_64		0x1000	/* Tx Descriptor Memory */
  70#define AVE_RXDM_64		0x1c00	/* Rx Descriptor Memory */
  71
  72#define AVE_TXDM_SIZE_64	0x0ba0	/* Tx Descriptor Memory Size 3KB */
  73#define AVE_RXDM_SIZE_64	0x6000	/* Rx Descriptor Memory Size 24KB */
  74
  75/* 32bit descriptor memory */
  76#define AVE_DESC_SIZE_32	8	/* Descriptor Size */
  77
  78#define AVE_TXDM_32		0x1000	/* Tx Descriptor Memory */
  79#define AVE_RXDM_32		0x1800	/* Rx Descriptor Memory */
  80
  81#define AVE_TXDM_SIZE_32	0x07c0	/* Tx Descriptor Memory Size 2KB */
  82#define AVE_RXDM_SIZE_32	0x4000	/* Rx Descriptor Memory Size 16KB */
  83
  84/* RMII Bridge Register Group */
  85#define AVE_RSTCTRL		0x8028	/* Reset control */
  86#define AVE_RSTCTRL_RMIIRST	BIT(16)
  87#define AVE_LINKSEL		0x8034	/* Link speed setting */
  88#define AVE_LINKSEL_100M	BIT(0)
  89
  90/* AVE_GRR */
  91#define AVE_GRR_RXFFR		BIT(5)	/* Reset RxFIFO */
  92#define AVE_GRR_PHYRST		BIT(4)	/* Reset external PHY */
  93#define AVE_GRR_GRST		BIT(0)	/* Reset all MAC */
  94
  95/* AVE_CFGR */
  96#define AVE_CFGR_FLE		BIT(31)	/* Filter Function */
  97#define AVE_CFGR_CHE		BIT(30)	/* Checksum Function */
  98#define AVE_CFGR_MII		BIT(27)	/* Func mode (1:MII/RMII, 0:RGMII) */
  99#define AVE_CFGR_IPFCEN		BIT(24)	/* IP fragment sum Enable */
 100
 101/* AVE_GISR (common with GIMR) */
 102#define AVE_GI_PHY		BIT(24)	/* PHY interrupt */
 103#define AVE_GI_TX		BIT(16)	/* Tx complete */
 104#define AVE_GI_RXERR		BIT(8)	/* Receive frame more than max size */
 105#define AVE_GI_RXOVF		BIT(7)	/* Overflow at the RxFIFO */
 106#define AVE_GI_RXDROP		BIT(6)	/* Drop packet */
 107#define AVE_GI_RXIINT		BIT(5)	/* Interval interrupt */
 108
 109/* AVE_TXCR */
 110#define AVE_TXCR_FLOCTR		BIT(18)	/* Flow control */
 111#define AVE_TXCR_TXSPD_1G	BIT(17)
 112#define AVE_TXCR_TXSPD_100	BIT(16)
 113
 114/* AVE_RXCR */
 115#define AVE_RXCR_RXEN		BIT(30)	/* Rx enable */
 116#define AVE_RXCR_FDUPEN		BIT(22)	/* Interface mode */
 117#define AVE_RXCR_FLOCTR		BIT(21)	/* Flow control */
 118#define AVE_RXCR_AFEN		BIT(19)	/* MAC address filter */
 119#define AVE_RXCR_DRPEN		BIT(18)	/* Drop pause frame */
 120#define AVE_RXCR_MPSIZ_MASK	GENMASK(10, 0)
 121
 122/* AVE_MDIOCTR */
 123#define AVE_MDIOCTR_RREQ	BIT(3)	/* Read request */
 124#define AVE_MDIOCTR_WREQ	BIT(2)	/* Write request */
 125
 126/* AVE_MDIOSR */
 127#define AVE_MDIOSR_STS		BIT(0)	/* access status */
 128
 129/* AVE_DESCC */
 130#define AVE_DESCC_STATUS_MASK	GENMASK(31, 16)
 131#define AVE_DESCC_RD0		BIT(8)	/* Enable Rx descriptor Ring0 */
 132#define AVE_DESCC_RDSTP		BIT(4)	/* Pause Rx descriptor */
 133#define AVE_DESCC_TD		BIT(0)	/* Enable Tx descriptor */
 134
 135/* AVE_TXDC */
 136#define AVE_TXDC_SIZE		GENMASK(27, 16)	/* Size of Tx descriptor */
 137#define AVE_TXDC_ADDR		GENMASK(11, 0)	/* Start address */
 138#define AVE_TXDC_ADDR_START	0
 139
 140/* AVE_RXDC0 */
 141#define AVE_RXDC0_SIZE		GENMASK(30, 16)	/* Size of Rx descriptor */
 142#define AVE_RXDC0_ADDR		GENMASK(14, 0)	/* Start address */
 143#define AVE_RXDC0_ADDR_START	0
 144
 145/* AVE_IIRQC */
 146#define AVE_IIRQC_EN0		BIT(27)	/* Enable interval interrupt Ring0 */
 147#define AVE_IIRQC_BSCK		GENMASK(15, 0)	/* Interval count unit */
 148
 149/* Command status for descriptor */
 150#define AVE_STS_OWN		BIT(31)	/* Descriptor ownership */
 151#define AVE_STS_INTR		BIT(29)	/* Request for interrupt */
 152#define AVE_STS_OK		BIT(27)	/* Normal transmit */
 153/* TX */
 154#define AVE_STS_NOCSUM		BIT(28)	/* No use HW checksum */
 155#define AVE_STS_1ST		BIT(26)	/* Head of buffer chain */
 156#define AVE_STS_LAST		BIT(25)	/* Tail of buffer chain */
 157#define AVE_STS_OWC		BIT(21)	/* Out of window,Late Collision */
 158#define AVE_STS_EC		BIT(20)	/* Excess collision occurred */
 159#define AVE_STS_PKTLEN_TX_MASK	GENMASK(15, 0)
 160/* RX */
 161#define AVE_STS_CSSV		BIT(21)	/* Checksum check performed */
 162#define AVE_STS_CSER		BIT(20)	/* Checksum error detected */
 163#define AVE_STS_PKTLEN_RX_MASK	GENMASK(10, 0)
 164
 165/* Packet filter */
 166#define AVE_PFMBYTE_MASK0	(GENMASK(31, 8) | GENMASK(5, 0))
 167#define AVE_PFMBYTE_MASK1	GENMASK(25, 0)
 168#define AVE_PFMBIT_MASK		GENMASK(15, 0)
 169
 170#define AVE_PF_SIZE		17	/* Number of all packet filter */
 171#define AVE_PF_MULTICAST_SIZE	7	/* Number of multicast filter */
 172
 173#define AVE_PFNUM_FILTER	0	/* No.0 */
 174#define AVE_PFNUM_UNICAST	1	/* No.1 */
 175#define AVE_PFNUM_BROADCAST	2	/* No.2 */
 176#define AVE_PFNUM_MULTICAST	11	/* No.11-17 */
 177
 178/* NETIF Message control */
 179#define AVE_DEFAULT_MSG_ENABLE	(NETIF_MSG_DRV    |	\
 180				 NETIF_MSG_PROBE  |	\
 181				 NETIF_MSG_LINK   |	\
 182				 NETIF_MSG_TIMER  |	\
 183				 NETIF_MSG_IFDOWN |	\
 184				 NETIF_MSG_IFUP   |	\
 185				 NETIF_MSG_RX_ERR |	\
 186				 NETIF_MSG_TX_ERR)
 187
 188/* Parameter for descriptor */
 189#define AVE_NR_TXDESC		64	/* Tx descriptor */
 190#define AVE_NR_RXDESC		256	/* Rx descriptor */
 191
 192#define AVE_DESC_OFS_CMDSTS	0
 193#define AVE_DESC_OFS_ADDRL	4
 194#define AVE_DESC_OFS_ADDRU	8
 195
 196/* Parameter for ethernet frame */
 197#define AVE_MAX_ETHFRAME	1518
 198#define AVE_FRAME_HEADROOM	2
 199
 200/* Parameter for interrupt */
 201#define AVE_INTM_COUNT		20
 202#define AVE_FORCE_TXINTCNT	1
 203
 204/* SG */
 205#define SG_ETPINMODE		0x540
 206#define SG_ETPINMODE_EXTPHY	BIT(1)	/* for LD11 */
 207#define SG_ETPINMODE_RMII(ins)	BIT(ins)
 208
 209#define IS_DESC_64BIT(p)	((p)->data->is_desc_64bit)
 210
 211#define AVE_MAX_CLKS		4
 212#define AVE_MAX_RSTS		2
 213
 214enum desc_id {
 215	AVE_DESCID_RX,
 216	AVE_DESCID_TX,
 217};
 218
 219enum desc_state {
 220	AVE_DESC_RX_PERMIT,
 221	AVE_DESC_RX_SUSPEND,
 222	AVE_DESC_START,
 223	AVE_DESC_STOP,
 224};
 225
 226struct ave_desc {
 227	struct sk_buff	*skbs;
 228	dma_addr_t	skbs_dma;
 229	size_t		skbs_dmalen;
 230};
 231
 232struct ave_desc_info {
 233	u32	ndesc;		/* number of descriptor */
 234	u32	daddr;		/* start address of descriptor */
 235	u32	proc_idx;	/* index of processing packet */
 236	u32	done_idx;	/* index of processed packet */
 237	struct ave_desc *desc;	/* skb info related descriptor */
 238};
 239
 240struct ave_stats {
 241	struct	u64_stats_sync	syncp;
 242	u64	packets;
 243	u64	bytes;
 244	u64	errors;
 245	u64	dropped;
 246	u64	collisions;
 247	u64	fifo_errors;
 248};
 249
 250struct ave_private {
 251	void __iomem            *base;
 252	int                     irq;
 253	int			phy_id;
 254	unsigned int		desc_size;
 255	u32			msg_enable;
 256	int			nclks;
 257	struct clk		*clk[AVE_MAX_CLKS];
 258	int			nrsts;
 259	struct reset_control	*rst[AVE_MAX_RSTS];
 260	phy_interface_t		phy_mode;
 261	struct phy_device	*phydev;
 262	struct mii_bus		*mdio;
 263	struct regmap		*regmap;
 264	unsigned int		pinmode_mask;
 265	unsigned int		pinmode_val;
 266	u32			wolopts;
 267
 268	/* stats */
 269	struct ave_stats	stats_rx;
 270	struct ave_stats	stats_tx;
 271
 272	/* NAPI support */
 273	struct net_device	*ndev;
 274	struct napi_struct	napi_rx;
 275	struct napi_struct	napi_tx;
 276
 277	/* descriptor */
 278	struct ave_desc_info	rx;
 279	struct ave_desc_info	tx;
 280
 281	/* flow control */
 282	int pause_auto;
 283	int pause_rx;
 284	int pause_tx;
 285
 286	const struct ave_soc_data *data;
 287};
 288
 289struct ave_soc_data {
 290	bool	is_desc_64bit;
 291	const char	*clock_names[AVE_MAX_CLKS];
 292	const char	*reset_names[AVE_MAX_RSTS];
 293	int	(*get_pinmode)(struct ave_private *priv,
 294			       phy_interface_t phy_mode, u32 arg);
 295};
 296
 297static u32 ave_desc_read(struct net_device *ndev, enum desc_id id, int entry,
 298			 int offset)
 299{
 300	struct ave_private *priv = netdev_priv(ndev);
 301	u32 addr;
 302
 303	addr = ((id == AVE_DESCID_TX) ? priv->tx.daddr : priv->rx.daddr)
 304		+ entry * priv->desc_size + offset;
 305
 306	return readl(priv->base + addr);
 307}
 308
 309static u32 ave_desc_read_cmdsts(struct net_device *ndev, enum desc_id id,
 310				int entry)
 311{
 312	return ave_desc_read(ndev, id, entry, AVE_DESC_OFS_CMDSTS);
 313}
 314
 315static void ave_desc_write(struct net_device *ndev, enum desc_id id,
 316			   int entry, int offset, u32 val)
 317{
 318	struct ave_private *priv = netdev_priv(ndev);
 319	u32 addr;
 320
 321	addr = ((id == AVE_DESCID_TX) ? priv->tx.daddr : priv->rx.daddr)
 322		+ entry * priv->desc_size + offset;
 323
 324	writel(val, priv->base + addr);
 325}
 326
 327static void ave_desc_write_cmdsts(struct net_device *ndev, enum desc_id id,
 328				  int entry, u32 val)
 329{
 330	ave_desc_write(ndev, id, entry, AVE_DESC_OFS_CMDSTS, val);
 331}
 332
 333static void ave_desc_write_addr(struct net_device *ndev, enum desc_id id,
 334				int entry, dma_addr_t paddr)
 335{
 336	struct ave_private *priv = netdev_priv(ndev);
 337
 338	ave_desc_write(ndev, id, entry, AVE_DESC_OFS_ADDRL,
 339		       lower_32_bits(paddr));
 340	if (IS_DESC_64BIT(priv))
 341		ave_desc_write(ndev, id,
 342			       entry, AVE_DESC_OFS_ADDRU,
 343			       upper_32_bits(paddr));
 344}
 345
 346static u32 ave_irq_disable_all(struct net_device *ndev)
 347{
 348	struct ave_private *priv = netdev_priv(ndev);
 349	u32 ret;
 350
 351	ret = readl(priv->base + AVE_GIMR);
 352	writel(0, priv->base + AVE_GIMR);
 353
 354	return ret;
 355}
 356
 357static void ave_irq_restore(struct net_device *ndev, u32 val)
 358{
 359	struct ave_private *priv = netdev_priv(ndev);
 360
 361	writel(val, priv->base + AVE_GIMR);
 362}
 363
 364static void ave_irq_enable(struct net_device *ndev, u32 bitflag)
 365{
 366	struct ave_private *priv = netdev_priv(ndev);
 367
 368	writel(readl(priv->base + AVE_GIMR) | bitflag, priv->base + AVE_GIMR);
 369	writel(bitflag, priv->base + AVE_GISR);
 370}
 371
 372static void ave_hw_write_macaddr(struct net_device *ndev,
 373				 const unsigned char *mac_addr,
 374				 int reg1, int reg2)
 375{
 376	struct ave_private *priv = netdev_priv(ndev);
 377
 378	writel(mac_addr[0] | mac_addr[1] << 8 |
 379	       mac_addr[2] << 16 | mac_addr[3] << 24, priv->base + reg1);
 380	writel(mac_addr[4] | mac_addr[5] << 8, priv->base + reg2);
 381}
 382
 383static void ave_hw_read_version(struct net_device *ndev, char *buf, int len)
 384{
 385	struct ave_private *priv = netdev_priv(ndev);
 386	u32 major, minor, vr;
 387
 388	vr = readl(priv->base + AVE_VR);
 389	major = (vr & GENMASK(15, 8)) >> 8;
 390	minor = (vr & GENMASK(7, 0));
 391	snprintf(buf, len, "v%u.%u", major, minor);
 392}
 393
 394static void ave_ethtool_get_drvinfo(struct net_device *ndev,
 395				    struct ethtool_drvinfo *info)
 396{
 397	struct device *dev = ndev->dev.parent;
 398
 399	strscpy(info->driver, dev->driver->name, sizeof(info->driver));
 400	strscpy(info->bus_info, dev_name(dev), sizeof(info->bus_info));
 401	ave_hw_read_version(ndev, info->fw_version, sizeof(info->fw_version));
 402}
 403
 404static u32 ave_ethtool_get_msglevel(struct net_device *ndev)
 405{
 406	struct ave_private *priv = netdev_priv(ndev);
 407
 408	return priv->msg_enable;
 409}
 410
 411static void ave_ethtool_set_msglevel(struct net_device *ndev, u32 val)
 412{
 413	struct ave_private *priv = netdev_priv(ndev);
 414
 415	priv->msg_enable = val;
 416}
 417
 418static void ave_ethtool_get_wol(struct net_device *ndev,
 419				struct ethtool_wolinfo *wol)
 420{
 421	wol->supported = 0;
 422	wol->wolopts   = 0;
 423
 424	if (ndev->phydev)
 425		phy_ethtool_get_wol(ndev->phydev, wol);
 426}
 427
 428static int __ave_ethtool_set_wol(struct net_device *ndev,
 429				 struct ethtool_wolinfo *wol)
 430{
 431	if (!ndev->phydev ||
 432	    (wol->wolopts & (WAKE_ARP | WAKE_MAGICSECURE)))
 433		return -EOPNOTSUPP;
 434
 435	return phy_ethtool_set_wol(ndev->phydev, wol);
 436}
 437
 438static int ave_ethtool_set_wol(struct net_device *ndev,
 439			       struct ethtool_wolinfo *wol)
 440{
 441	int ret;
 442
 443	ret = __ave_ethtool_set_wol(ndev, wol);
 444	if (!ret)
 445		device_set_wakeup_enable(&ndev->dev, !!wol->wolopts);
 446
 447	return ret;
 448}
 449
 450static void ave_ethtool_get_pauseparam(struct net_device *ndev,
 451				       struct ethtool_pauseparam *pause)
 452{
 453	struct ave_private *priv = netdev_priv(ndev);
 454
 455	pause->autoneg  = priv->pause_auto;
 456	pause->rx_pause = priv->pause_rx;
 457	pause->tx_pause = priv->pause_tx;
 458}
 459
 460static int ave_ethtool_set_pauseparam(struct net_device *ndev,
 461				      struct ethtool_pauseparam *pause)
 462{
 463	struct ave_private *priv = netdev_priv(ndev);
 464	struct phy_device *phydev = ndev->phydev;
 465
 466	if (!phydev)
 467		return -EINVAL;
 468
 469	priv->pause_auto = pause->autoneg;
 470	priv->pause_rx   = pause->rx_pause;
 471	priv->pause_tx   = pause->tx_pause;
 472
 473	phy_set_asym_pause(phydev, pause->rx_pause, pause->tx_pause);
 474
 475	return 0;
 476}
 477
 478static const struct ethtool_ops ave_ethtool_ops = {
 479	.get_link_ksettings	= phy_ethtool_get_link_ksettings,
 480	.set_link_ksettings	= phy_ethtool_set_link_ksettings,
 481	.get_drvinfo		= ave_ethtool_get_drvinfo,
 482	.nway_reset		= phy_ethtool_nway_reset,
 483	.get_link		= ethtool_op_get_link,
 484	.get_msglevel		= ave_ethtool_get_msglevel,
 485	.set_msglevel		= ave_ethtool_set_msglevel,
 486	.get_wol		= ave_ethtool_get_wol,
 487	.set_wol		= ave_ethtool_set_wol,
 488	.get_pauseparam         = ave_ethtool_get_pauseparam,
 489	.set_pauseparam         = ave_ethtool_set_pauseparam,
 490};
 491
 492static int ave_mdiobus_read(struct mii_bus *bus, int phyid, int regnum)
 493{
 494	struct net_device *ndev = bus->priv;
 495	struct ave_private *priv;
 496	u32 mdioctl, mdiosr;
 497	int ret;
 498
 499	priv = netdev_priv(ndev);
 500
 501	/* write address */
 502	writel((phyid << 8) | regnum, priv->base + AVE_MDIOAR);
 503
 504	/* read request */
 505	mdioctl = readl(priv->base + AVE_MDIOCTR);
 506	writel((mdioctl | AVE_MDIOCTR_RREQ) & ~AVE_MDIOCTR_WREQ,
 507	       priv->base + AVE_MDIOCTR);
 508
 509	ret = readl_poll_timeout(priv->base + AVE_MDIOSR, mdiosr,
 510				 !(mdiosr & AVE_MDIOSR_STS), 20, 2000);
 511	if (ret) {
 512		netdev_err(ndev, "failed to read (phy:%d reg:%x)\n",
 513			   phyid, regnum);
 514		return ret;
 515	}
 516
 517	return readl(priv->base + AVE_MDIORDR) & GENMASK(15, 0);
 518}
 519
 520static int ave_mdiobus_write(struct mii_bus *bus, int phyid, int regnum,
 521			     u16 val)
 522{
 523	struct net_device *ndev = bus->priv;
 524	struct ave_private *priv;
 525	u32 mdioctl, mdiosr;
 526	int ret;
 527
 528	priv = netdev_priv(ndev);
 529
 530	/* write address */
 531	writel((phyid << 8) | regnum, priv->base + AVE_MDIOAR);
 532
 533	/* write data */
 534	writel(val, priv->base + AVE_MDIOWDR);
 535
 536	/* write request */
 537	mdioctl = readl(priv->base + AVE_MDIOCTR);
 538	writel((mdioctl | AVE_MDIOCTR_WREQ) & ~AVE_MDIOCTR_RREQ,
 539	       priv->base + AVE_MDIOCTR);
 540
 541	ret = readl_poll_timeout(priv->base + AVE_MDIOSR, mdiosr,
 542				 !(mdiosr & AVE_MDIOSR_STS), 20, 2000);
 543	if (ret)
 544		netdev_err(ndev, "failed to write (phy:%d reg:%x)\n",
 545			   phyid, regnum);
 546
 547	return ret;
 548}
 549
 550static int ave_dma_map(struct net_device *ndev, struct ave_desc *desc,
 551		       void *ptr, size_t len, enum dma_data_direction dir,
 552		       dma_addr_t *paddr)
 553{
 554	dma_addr_t map_addr;
 555
 556	map_addr = dma_map_single(ndev->dev.parent, ptr, len, dir);
 557	if (unlikely(dma_mapping_error(ndev->dev.parent, map_addr)))
 558		return -ENOMEM;
 559
 560	desc->skbs_dma = map_addr;
 561	desc->skbs_dmalen = len;
 562	*paddr = map_addr;
 563
 564	return 0;
 565}
 566
 567static void ave_dma_unmap(struct net_device *ndev, struct ave_desc *desc,
 568			  enum dma_data_direction dir)
 569{
 570	if (!desc->skbs_dma)
 571		return;
 572
 573	dma_unmap_single(ndev->dev.parent,
 574			 desc->skbs_dma, desc->skbs_dmalen, dir);
 575	desc->skbs_dma = 0;
 576}
 577
 578/* Prepare Rx descriptor and memory */
 579static int ave_rxdesc_prepare(struct net_device *ndev, int entry)
 580{
 581	struct ave_private *priv = netdev_priv(ndev);
 582	struct sk_buff *skb;
 583	dma_addr_t paddr;
 584	int ret;
 585
 586	skb = priv->rx.desc[entry].skbs;
 587	if (!skb) {
 588		skb = netdev_alloc_skb(ndev, AVE_MAX_ETHFRAME);
 589		if (!skb) {
 590			netdev_err(ndev, "can't allocate skb for Rx\n");
 591			return -ENOMEM;
 592		}
 593		skb->data += AVE_FRAME_HEADROOM;
 594		skb->tail += AVE_FRAME_HEADROOM;
 595	}
 596
 597	/* set disable to cmdsts */
 598	ave_desc_write_cmdsts(ndev, AVE_DESCID_RX, entry,
 599			      AVE_STS_INTR | AVE_STS_OWN);
 600
 601	/* map Rx buffer
 602	 * Rx buffer set to the Rx descriptor has two restrictions:
 603	 * - Rx buffer address is 4 byte aligned.
 604	 * - Rx buffer begins with 2 byte headroom, and data will be put from
 605	 *   (buffer + 2).
 606	 * To satisfy this, specify the address to put back the buffer
 607	 * pointer advanced by AVE_FRAME_HEADROOM, and expand the map size
 608	 * by AVE_FRAME_HEADROOM.
 609	 */
 610	ret = ave_dma_map(ndev, &priv->rx.desc[entry],
 611			  skb->data - AVE_FRAME_HEADROOM,
 612			  AVE_MAX_ETHFRAME + AVE_FRAME_HEADROOM,
 613			  DMA_FROM_DEVICE, &paddr);
 614	if (ret) {
 615		netdev_err(ndev, "can't map skb for Rx\n");
 616		dev_kfree_skb_any(skb);
 617		return ret;
 618	}
 619	priv->rx.desc[entry].skbs = skb;
 620
 621	/* set buffer pointer */
 622	ave_desc_write_addr(ndev, AVE_DESCID_RX, entry, paddr);
 623
 624	/* set enable to cmdsts */
 625	ave_desc_write_cmdsts(ndev, AVE_DESCID_RX, entry,
 626			      AVE_STS_INTR | AVE_MAX_ETHFRAME);
 627
 628	return ret;
 629}
 630
 631/* Switch state of descriptor */
 632static int ave_desc_switch(struct net_device *ndev, enum desc_state state)
 633{
 634	struct ave_private *priv = netdev_priv(ndev);
 635	int ret = 0;
 636	u32 val;
 637
 638	switch (state) {
 639	case AVE_DESC_START:
 640		writel(AVE_DESCC_TD | AVE_DESCC_RD0, priv->base + AVE_DESCC);
 641		break;
 642
 643	case AVE_DESC_STOP:
 644		writel(0, priv->base + AVE_DESCC);
 645		if (readl_poll_timeout(priv->base + AVE_DESCC, val, !val,
 646				       150, 15000)) {
 647			netdev_err(ndev, "can't stop descriptor\n");
 648			ret = -EBUSY;
 649		}
 650		break;
 651
 652	case AVE_DESC_RX_SUSPEND:
 653		val = readl(priv->base + AVE_DESCC);
 654		val |= AVE_DESCC_RDSTP;
 655		val &= ~AVE_DESCC_STATUS_MASK;
 656		writel(val, priv->base + AVE_DESCC);
 657		if (readl_poll_timeout(priv->base + AVE_DESCC, val,
 658				       val & (AVE_DESCC_RDSTP << 16),
 659				       150, 150000)) {
 660			netdev_err(ndev, "can't suspend descriptor\n");
 661			ret = -EBUSY;
 662		}
 663		break;
 664
 665	case AVE_DESC_RX_PERMIT:
 666		val = readl(priv->base + AVE_DESCC);
 667		val &= ~AVE_DESCC_RDSTP;
 668		val &= ~AVE_DESCC_STATUS_MASK;
 669		writel(val, priv->base + AVE_DESCC);
 670		break;
 671
 672	default:
 673		ret = -EINVAL;
 674		break;
 675	}
 676
 677	return ret;
 678}
 679
 680static int ave_tx_complete(struct net_device *ndev)
 681{
 682	struct ave_private *priv = netdev_priv(ndev);
 683	u32 proc_idx, done_idx, ndesc, cmdsts;
 684	unsigned int nr_freebuf = 0;
 685	unsigned int tx_packets = 0;
 686	unsigned int tx_bytes = 0;
 687
 688	proc_idx = priv->tx.proc_idx;
 689	done_idx = priv->tx.done_idx;
 690	ndesc    = priv->tx.ndesc;
 691
 692	/* free pre-stored skb from done_idx to proc_idx */
 693	while (proc_idx != done_idx) {
 694		cmdsts = ave_desc_read_cmdsts(ndev, AVE_DESCID_TX, done_idx);
 695
 696		/* do nothing if owner is HW (==1 for Tx) */
 697		if (cmdsts & AVE_STS_OWN)
 698			break;
 699
 700		/* check Tx status and updates statistics */
 701		if (cmdsts & AVE_STS_OK) {
 702			tx_bytes += cmdsts & AVE_STS_PKTLEN_TX_MASK;
 703			/* success */
 704			if (cmdsts & AVE_STS_LAST)
 705				tx_packets++;
 706		} else {
 707			/* error */
 708			if (cmdsts & AVE_STS_LAST) {
 709				priv->stats_tx.errors++;
 710				if (cmdsts & (AVE_STS_OWC | AVE_STS_EC))
 711					priv->stats_tx.collisions++;
 712			}
 713		}
 714
 715		/* release skb */
 716		if (priv->tx.desc[done_idx].skbs) {
 717			ave_dma_unmap(ndev, &priv->tx.desc[done_idx],
 718				      DMA_TO_DEVICE);
 719			dev_consume_skb_any(priv->tx.desc[done_idx].skbs);
 720			priv->tx.desc[done_idx].skbs = NULL;
 721			nr_freebuf++;
 722		}
 723		done_idx = (done_idx + 1) % ndesc;
 724	}
 725
 726	priv->tx.done_idx = done_idx;
 727
 728	/* update stats */
 729	u64_stats_update_begin(&priv->stats_tx.syncp);
 730	priv->stats_tx.packets += tx_packets;
 731	priv->stats_tx.bytes   += tx_bytes;
 732	u64_stats_update_end(&priv->stats_tx.syncp);
 733
 734	/* wake queue for freeing buffer */
 735	if (unlikely(netif_queue_stopped(ndev)) && nr_freebuf)
 736		netif_wake_queue(ndev);
 737
 738	return nr_freebuf;
 739}
 740
 741static int ave_rx_receive(struct net_device *ndev, int num)
 742{
 743	struct ave_private *priv = netdev_priv(ndev);
 744	unsigned int rx_packets = 0;
 745	unsigned int rx_bytes = 0;
 746	u32 proc_idx, done_idx;
 747	struct sk_buff *skb;
 748	unsigned int pktlen;
 749	int restpkt, npkts;
 750	u32 ndesc, cmdsts;
 751
 752	proc_idx = priv->rx.proc_idx;
 753	done_idx = priv->rx.done_idx;
 754	ndesc    = priv->rx.ndesc;
 755	restpkt  = ((proc_idx + ndesc - 1) - done_idx) % ndesc;
 756
 757	for (npkts = 0; npkts < num; npkts++) {
 758		/* we can't receive more packet, so fill desc quickly */
 759		if (--restpkt < 0)
 760			break;
 761
 762		cmdsts = ave_desc_read_cmdsts(ndev, AVE_DESCID_RX, proc_idx);
 763
 764		/* do nothing if owner is HW (==0 for Rx) */
 765		if (!(cmdsts & AVE_STS_OWN))
 766			break;
 767
 768		if (!(cmdsts & AVE_STS_OK)) {
 769			priv->stats_rx.errors++;
 770			proc_idx = (proc_idx + 1) % ndesc;
 771			continue;
 772		}
 773
 774		pktlen = cmdsts & AVE_STS_PKTLEN_RX_MASK;
 775
 776		/* get skbuff for rx */
 777		skb = priv->rx.desc[proc_idx].skbs;
 778		priv->rx.desc[proc_idx].skbs = NULL;
 779
 780		ave_dma_unmap(ndev, &priv->rx.desc[proc_idx], DMA_FROM_DEVICE);
 781
 782		skb->dev = ndev;
 783		skb_put(skb, pktlen);
 784		skb->protocol = eth_type_trans(skb, ndev);
 785
 786		if ((cmdsts & AVE_STS_CSSV) && (!(cmdsts & AVE_STS_CSER)))
 787			skb->ip_summed = CHECKSUM_UNNECESSARY;
 788
 789		rx_packets++;
 790		rx_bytes += pktlen;
 791
 792		netif_receive_skb(skb);
 793
 794		proc_idx = (proc_idx + 1) % ndesc;
 795	}
 796
 797	priv->rx.proc_idx = proc_idx;
 798
 799	/* update stats */
 800	u64_stats_update_begin(&priv->stats_rx.syncp);
 801	priv->stats_rx.packets += rx_packets;
 802	priv->stats_rx.bytes   += rx_bytes;
 803	u64_stats_update_end(&priv->stats_rx.syncp);
 804
 805	/* refill the Rx buffers */
 806	while (proc_idx != done_idx) {
 807		if (ave_rxdesc_prepare(ndev, done_idx))
 808			break;
 809		done_idx = (done_idx + 1) % ndesc;
 810	}
 811
 812	priv->rx.done_idx = done_idx;
 813
 814	return npkts;
 815}
 816
 817static int ave_napi_poll_rx(struct napi_struct *napi, int budget)
 818{
 819	struct ave_private *priv;
 820	struct net_device *ndev;
 821	int num;
 822
 823	priv = container_of(napi, struct ave_private, napi_rx);
 824	ndev = priv->ndev;
 825
 826	num = ave_rx_receive(ndev, budget);
 827	if (num < budget) {
 828		napi_complete_done(napi, num);
 829
 830		/* enable Rx interrupt when NAPI finishes */
 831		ave_irq_enable(ndev, AVE_GI_RXIINT);
 832	}
 833
 834	return num;
 835}
 836
 837static int ave_napi_poll_tx(struct napi_struct *napi, int budget)
 838{
 839	struct ave_private *priv;
 840	struct net_device *ndev;
 841	int num;
 842
 843	priv = container_of(napi, struct ave_private, napi_tx);
 844	ndev = priv->ndev;
 845
 846	num = ave_tx_complete(ndev);
 847	napi_complete(napi);
 848
 849	/* enable Tx interrupt when NAPI finishes */
 850	ave_irq_enable(ndev, AVE_GI_TX);
 851
 852	return num;
 853}
 854
 855static void ave_global_reset(struct net_device *ndev)
 856{
 857	struct ave_private *priv = netdev_priv(ndev);
 858	u32 val;
 859
 860	/* set config register */
 861	val = AVE_CFGR_FLE | AVE_CFGR_IPFCEN | AVE_CFGR_CHE;
 862	if (!phy_interface_mode_is_rgmii(priv->phy_mode))
 863		val |= AVE_CFGR_MII;
 864	writel(val, priv->base + AVE_CFGR);
 865
 866	/* reset RMII register */
 867	val = readl(priv->base + AVE_RSTCTRL);
 868	val &= ~AVE_RSTCTRL_RMIIRST;
 869	writel(val, priv->base + AVE_RSTCTRL);
 870
 871	/* assert reset */
 872	writel(AVE_GRR_GRST | AVE_GRR_PHYRST, priv->base + AVE_GRR);
 873	msleep(20);
 874
 875	/* 1st, negate PHY reset only */
 876	writel(AVE_GRR_GRST, priv->base + AVE_GRR);
 877	msleep(40);
 878
 879	/* negate reset */
 880	writel(0, priv->base + AVE_GRR);
 881	msleep(40);
 882
 883	/* negate RMII register */
 884	val = readl(priv->base + AVE_RSTCTRL);
 885	val |= AVE_RSTCTRL_RMIIRST;
 886	writel(val, priv->base + AVE_RSTCTRL);
 887
 888	ave_irq_disable_all(ndev);
 889}
 890
 891static void ave_rxfifo_reset(struct net_device *ndev)
 892{
 893	struct ave_private *priv = netdev_priv(ndev);
 894	u32 rxcr_org;
 895
 896	/* save and disable MAC receive op */
 897	rxcr_org = readl(priv->base + AVE_RXCR);
 898	writel(rxcr_org & (~AVE_RXCR_RXEN), priv->base + AVE_RXCR);
 899
 900	/* suspend Rx descriptor */
 901	ave_desc_switch(ndev, AVE_DESC_RX_SUSPEND);
 902
 903	/* receive all packets before descriptor starts */
 904	ave_rx_receive(ndev, priv->rx.ndesc);
 905
 906	/* assert reset */
 907	writel(AVE_GRR_RXFFR, priv->base + AVE_GRR);
 908	udelay(50);
 909
 910	/* negate reset */
 911	writel(0, priv->base + AVE_GRR);
 912	udelay(20);
 913
 914	/* negate interrupt status */
 915	writel(AVE_GI_RXOVF, priv->base + AVE_GISR);
 916
 917	/* permit descriptor */
 918	ave_desc_switch(ndev, AVE_DESC_RX_PERMIT);
 919
 920	/* restore MAC reccieve op */
 921	writel(rxcr_org, priv->base + AVE_RXCR);
 922}
 923
 924static irqreturn_t ave_irq_handler(int irq, void *netdev)
 925{
 926	struct net_device *ndev = (struct net_device *)netdev;
 927	struct ave_private *priv = netdev_priv(ndev);
 928	u32 gimr_val, gisr_val;
 929
 930	gimr_val = ave_irq_disable_all(ndev);
 931
 932	/* get interrupt status */
 933	gisr_val = readl(priv->base + AVE_GISR);
 934
 935	/* PHY */
 936	if (gisr_val & AVE_GI_PHY)
 937		writel(AVE_GI_PHY, priv->base + AVE_GISR);
 938
 939	/* check exceeding packet */
 940	if (gisr_val & AVE_GI_RXERR) {
 941		writel(AVE_GI_RXERR, priv->base + AVE_GISR);
 942		netdev_err(ndev, "receive a packet exceeding frame buffer\n");
 943	}
 944
 945	gisr_val &= gimr_val;
 946	if (!gisr_val)
 947		goto exit_isr;
 948
 949	/* RxFIFO overflow */
 950	if (gisr_val & AVE_GI_RXOVF) {
 951		priv->stats_rx.fifo_errors++;
 952		ave_rxfifo_reset(ndev);
 953		goto exit_isr;
 954	}
 955
 956	/* Rx drop */
 957	if (gisr_val & AVE_GI_RXDROP) {
 958		priv->stats_rx.dropped++;
 959		writel(AVE_GI_RXDROP, priv->base + AVE_GISR);
 960	}
 961
 962	/* Rx interval */
 963	if (gisr_val & AVE_GI_RXIINT) {
 964		napi_schedule(&priv->napi_rx);
 965		/* still force to disable Rx interrupt until NAPI finishes */
 966		gimr_val &= ~AVE_GI_RXIINT;
 967	}
 968
 969	/* Tx completed */
 970	if (gisr_val & AVE_GI_TX) {
 971		napi_schedule(&priv->napi_tx);
 972		/* still force to disable Tx interrupt until NAPI finishes */
 973		gimr_val &= ~AVE_GI_TX;
 974	}
 975
 976exit_isr:
 977	ave_irq_restore(ndev, gimr_val);
 978
 979	return IRQ_HANDLED;
 980}
 981
 982static int ave_pfsel_start(struct net_device *ndev, unsigned int entry)
 983{
 984	struct ave_private *priv = netdev_priv(ndev);
 985	u32 val;
 986
 987	if (WARN_ON(entry > AVE_PF_SIZE))
 988		return -EINVAL;
 989
 990	val = readl(priv->base + AVE_PFEN);
 991	writel(val | BIT(entry), priv->base + AVE_PFEN);
 992
 993	return 0;
 994}
 995
 996static int ave_pfsel_stop(struct net_device *ndev, unsigned int entry)
 997{
 998	struct ave_private *priv = netdev_priv(ndev);
 999	u32 val;
1000
1001	if (WARN_ON(entry > AVE_PF_SIZE))
1002		return -EINVAL;
1003
1004	val = readl(priv->base + AVE_PFEN);
1005	writel(val & ~BIT(entry), priv->base + AVE_PFEN);
1006
1007	return 0;
1008}
1009
1010static int ave_pfsel_set_macaddr(struct net_device *ndev,
1011				 unsigned int entry,
1012				 const unsigned char *mac_addr,
1013				 unsigned int set_size)
1014{
1015	struct ave_private *priv = netdev_priv(ndev);
1016
1017	if (WARN_ON(entry > AVE_PF_SIZE))
1018		return -EINVAL;
1019	if (WARN_ON(set_size > 6))
1020		return -EINVAL;
1021
1022	ave_pfsel_stop(ndev, entry);
1023
1024	/* set MAC address for the filter */
1025	ave_hw_write_macaddr(ndev, mac_addr,
1026			     AVE_PKTF(entry), AVE_PKTF(entry) + 4);
1027
1028	/* set byte mask */
1029	writel(GENMASK(31, set_size) & AVE_PFMBYTE_MASK0,
1030	       priv->base + AVE_PFMBYTE(entry));
1031	writel(AVE_PFMBYTE_MASK1, priv->base + AVE_PFMBYTE(entry) + 4);
1032
1033	/* set bit mask filter */
1034	writel(AVE_PFMBIT_MASK, priv->base + AVE_PFMBIT(entry));
1035
1036	/* set selector to ring 0 */
1037	writel(0, priv->base + AVE_PFSEL(entry));
1038
1039	/* restart filter */
1040	ave_pfsel_start(ndev, entry);
1041
1042	return 0;
1043}
1044
1045static void ave_pfsel_set_promisc(struct net_device *ndev,
1046				  unsigned int entry, u32 rxring)
1047{
1048	struct ave_private *priv = netdev_priv(ndev);
1049
1050	if (WARN_ON(entry > AVE_PF_SIZE))
1051		return;
1052
1053	ave_pfsel_stop(ndev, entry);
1054
1055	/* set byte mask */
1056	writel(AVE_PFMBYTE_MASK0, priv->base + AVE_PFMBYTE(entry));
1057	writel(AVE_PFMBYTE_MASK1, priv->base + AVE_PFMBYTE(entry) + 4);
1058
1059	/* set bit mask filter */
1060	writel(AVE_PFMBIT_MASK, priv->base + AVE_PFMBIT(entry));
1061
1062	/* set selector to rxring */
1063	writel(rxring, priv->base + AVE_PFSEL(entry));
1064
1065	ave_pfsel_start(ndev, entry);
1066}
1067
1068static void ave_pfsel_init(struct net_device *ndev)
1069{
1070	unsigned char bcast_mac[ETH_ALEN];
1071	int i;
1072
1073	eth_broadcast_addr(bcast_mac);
1074
1075	for (i = 0; i < AVE_PF_SIZE; i++)
1076		ave_pfsel_stop(ndev, i);
1077
1078	/* promiscious entry, select ring 0 */
1079	ave_pfsel_set_promisc(ndev, AVE_PFNUM_FILTER, 0);
1080
1081	/* unicast entry */
1082	ave_pfsel_set_macaddr(ndev, AVE_PFNUM_UNICAST, ndev->dev_addr, 6);
1083
1084	/* broadcast entry */
1085	ave_pfsel_set_macaddr(ndev, AVE_PFNUM_BROADCAST, bcast_mac, 6);
1086}
1087
1088static void ave_phy_adjust_link(struct net_device *ndev)
1089{
1090	struct ave_private *priv = netdev_priv(ndev);
1091	struct phy_device *phydev = ndev->phydev;
1092	u32 val, txcr, rxcr, rxcr_org;
1093	u16 rmt_adv = 0, lcl_adv = 0;
1094	u8 cap;
1095
1096	/* set RGMII speed */
1097	val = readl(priv->base + AVE_TXCR);
1098	val &= ~(AVE_TXCR_TXSPD_100 | AVE_TXCR_TXSPD_1G);
1099
1100	if (phy_interface_is_rgmii(phydev) && phydev->speed == SPEED_1000)
1101		val |= AVE_TXCR_TXSPD_1G;
1102	else if (phydev->speed == SPEED_100)
1103		val |= AVE_TXCR_TXSPD_100;
1104
1105	writel(val, priv->base + AVE_TXCR);
1106
1107	/* set RMII speed (100M/10M only) */
1108	if (!phy_interface_is_rgmii(phydev)) {
1109		val = readl(priv->base + AVE_LINKSEL);
1110		if (phydev->speed == SPEED_10)
1111			val &= ~AVE_LINKSEL_100M;
1112		else
1113			val |= AVE_LINKSEL_100M;
1114		writel(val, priv->base + AVE_LINKSEL);
1115	}
1116
1117	/* check current RXCR/TXCR */
1118	rxcr = readl(priv->base + AVE_RXCR);
1119	txcr = readl(priv->base + AVE_TXCR);
1120	rxcr_org = rxcr;
1121
1122	if (phydev->duplex) {
1123		rxcr |= AVE_RXCR_FDUPEN;
1124
1125		if (phydev->pause)
1126			rmt_adv |= LPA_PAUSE_CAP;
1127		if (phydev->asym_pause)
1128			rmt_adv |= LPA_PAUSE_ASYM;
1129
1130		lcl_adv = linkmode_adv_to_lcl_adv_t(phydev->advertising);
1131		cap = mii_resolve_flowctrl_fdx(lcl_adv, rmt_adv);
1132		if (cap & FLOW_CTRL_TX)
1133			txcr |= AVE_TXCR_FLOCTR;
1134		else
1135			txcr &= ~AVE_TXCR_FLOCTR;
1136		if (cap & FLOW_CTRL_RX)
1137			rxcr |= AVE_RXCR_FLOCTR;
1138		else
1139			rxcr &= ~AVE_RXCR_FLOCTR;
1140	} else {
1141		rxcr &= ~AVE_RXCR_FDUPEN;
1142		rxcr &= ~AVE_RXCR_FLOCTR;
1143		txcr &= ~AVE_TXCR_FLOCTR;
1144	}
1145
1146	if (rxcr_org != rxcr) {
1147		/* disable Rx mac */
1148		writel(rxcr & ~AVE_RXCR_RXEN, priv->base + AVE_RXCR);
1149		/* change and enable TX/Rx mac */
1150		writel(txcr, priv->base + AVE_TXCR);
1151		writel(rxcr, priv->base + AVE_RXCR);
1152	}
1153
1154	phy_print_status(phydev);
1155}
1156
1157static void ave_macaddr_init(struct net_device *ndev)
1158{
1159	ave_hw_write_macaddr(ndev, ndev->dev_addr, AVE_RXMAC1R, AVE_RXMAC2R);
1160
1161	/* pfsel unicast entry */
1162	ave_pfsel_set_macaddr(ndev, AVE_PFNUM_UNICAST, ndev->dev_addr, 6);
1163}
1164
1165static int ave_init(struct net_device *ndev)
1166{
1167	struct ethtool_wolinfo wol = { .cmd = ETHTOOL_GWOL };
1168	struct ave_private *priv = netdev_priv(ndev);
1169	struct device *dev = ndev->dev.parent;
1170	struct device_node *np = dev->of_node;
1171	struct device_node *mdio_np;
1172	struct phy_device *phydev;
1173	int nc, nr, ret;
1174
1175	/* enable clk because of hw access until ndo_open */
1176	for (nc = 0; nc < priv->nclks; nc++) {
1177		ret = clk_prepare_enable(priv->clk[nc]);
1178		if (ret) {
1179			dev_err(dev, "can't enable clock\n");
1180			goto out_clk_disable;
1181		}
1182	}
1183
1184	for (nr = 0; nr < priv->nrsts; nr++) {
1185		ret = reset_control_deassert(priv->rst[nr]);
1186		if (ret) {
1187			dev_err(dev, "can't deassert reset\n");
1188			goto out_reset_assert;
1189		}
1190	}
1191
1192	ret = regmap_update_bits(priv->regmap, SG_ETPINMODE,
1193				 priv->pinmode_mask, priv->pinmode_val);
1194	if (ret)
1195		goto out_reset_assert;
1196
1197	ave_global_reset(ndev);
1198
1199	mdio_np = of_get_child_by_name(np, "mdio");
1200	if (!mdio_np) {
1201		dev_err(dev, "mdio node not found\n");
1202		ret = -EINVAL;
1203		goto out_reset_assert;
1204	}
1205	ret = of_mdiobus_register(priv->mdio, mdio_np);
1206	of_node_put(mdio_np);
1207	if (ret) {
1208		dev_err(dev, "failed to register mdiobus\n");
1209		goto out_reset_assert;
1210	}
1211
1212	phydev = of_phy_get_and_connect(ndev, np, ave_phy_adjust_link);
1213	if (!phydev) {
1214		dev_err(dev, "could not attach to PHY\n");
1215		ret = -ENODEV;
1216		goto out_mdio_unregister;
1217	}
1218
1219	priv->phydev = phydev;
1220
1221	ave_ethtool_get_wol(ndev, &wol);
1222	device_set_wakeup_capable(&ndev->dev, !!wol.supported);
1223
1224	/* set wol initial state disabled */
1225	wol.wolopts = 0;
1226	__ave_ethtool_set_wol(ndev, &wol);
1227
1228	if (!phy_interface_is_rgmii(phydev))
1229		phy_set_max_speed(phydev, SPEED_100);
1230
1231	phy_support_asym_pause(phydev);
1232
1233	phydev->mac_managed_pm = true;
1234
1235	phy_attached_info(phydev);
1236
1237	return 0;
1238
1239out_mdio_unregister:
1240	mdiobus_unregister(priv->mdio);
1241out_reset_assert:
1242	while (--nr >= 0)
1243		reset_control_assert(priv->rst[nr]);
1244out_clk_disable:
1245	while (--nc >= 0)
1246		clk_disable_unprepare(priv->clk[nc]);
1247
1248	return ret;
1249}
1250
1251static void ave_uninit(struct net_device *ndev)
1252{
1253	struct ave_private *priv = netdev_priv(ndev);
1254	int i;
1255
1256	phy_disconnect(priv->phydev);
1257	mdiobus_unregister(priv->mdio);
1258
1259	/* disable clk because of hw access after ndo_stop */
1260	for (i = 0; i < priv->nrsts; i++)
1261		reset_control_assert(priv->rst[i]);
1262	for (i = 0; i < priv->nclks; i++)
1263		clk_disable_unprepare(priv->clk[i]);
1264}
1265
1266static int ave_open(struct net_device *ndev)
1267{
1268	struct ave_private *priv = netdev_priv(ndev);
1269	int entry;
1270	int ret;
1271	u32 val;
1272
1273	ret = request_irq(priv->irq, ave_irq_handler, IRQF_SHARED, ndev->name,
1274			  ndev);
1275	if (ret)
1276		return ret;
1277
1278	priv->tx.desc = kcalloc(priv->tx.ndesc, sizeof(*priv->tx.desc),
1279				GFP_KERNEL);
1280	if (!priv->tx.desc) {
1281		ret = -ENOMEM;
1282		goto out_free_irq;
1283	}
1284
1285	priv->rx.desc = kcalloc(priv->rx.ndesc, sizeof(*priv->rx.desc),
1286				GFP_KERNEL);
1287	if (!priv->rx.desc) {
1288		kfree(priv->tx.desc);
1289		ret = -ENOMEM;
1290		goto out_free_irq;
1291	}
1292
1293	/* initialize Tx work and descriptor */
1294	priv->tx.proc_idx = 0;
1295	priv->tx.done_idx = 0;
1296	for (entry = 0; entry < priv->tx.ndesc; entry++) {
1297		ave_desc_write_cmdsts(ndev, AVE_DESCID_TX, entry, 0);
1298		ave_desc_write_addr(ndev, AVE_DESCID_TX, entry, 0);
1299	}
1300	writel(AVE_TXDC_ADDR_START |
1301	       (((priv->tx.ndesc * priv->desc_size) << 16) & AVE_TXDC_SIZE),
1302	       priv->base + AVE_TXDC);
1303
1304	/* initialize Rx work and descriptor */
1305	priv->rx.proc_idx = 0;
1306	priv->rx.done_idx = 0;
1307	for (entry = 0; entry < priv->rx.ndesc; entry++) {
1308		if (ave_rxdesc_prepare(ndev, entry))
1309			break;
1310	}
1311	writel(AVE_RXDC0_ADDR_START |
1312	       (((priv->rx.ndesc * priv->desc_size) << 16) & AVE_RXDC0_SIZE),
1313	       priv->base + AVE_RXDC0);
1314
1315	ave_desc_switch(ndev, AVE_DESC_START);
1316
1317	ave_pfsel_init(ndev);
1318	ave_macaddr_init(ndev);
1319
1320	/* set Rx configuration */
1321	/* full duplex, enable pause drop, enalbe flow control */
1322	val = AVE_RXCR_RXEN | AVE_RXCR_FDUPEN | AVE_RXCR_DRPEN |
1323		AVE_RXCR_FLOCTR | (AVE_MAX_ETHFRAME & AVE_RXCR_MPSIZ_MASK);
1324	writel(val, priv->base + AVE_RXCR);
1325
1326	/* set Tx configuration */
1327	/* enable flow control, disable loopback */
1328	writel(AVE_TXCR_FLOCTR, priv->base + AVE_TXCR);
1329
1330	/* enable timer, clear EN,INTM, and mask interval unit(BSCK) */
1331	val = readl(priv->base + AVE_IIRQC) & AVE_IIRQC_BSCK;
1332	val |= AVE_IIRQC_EN0 | (AVE_INTM_COUNT << 16);
1333	writel(val, priv->base + AVE_IIRQC);
1334
1335	val = AVE_GI_RXIINT | AVE_GI_RXOVF | AVE_GI_TX | AVE_GI_RXDROP;
1336	ave_irq_restore(ndev, val);
1337
1338	napi_enable(&priv->napi_rx);
1339	napi_enable(&priv->napi_tx);
1340
1341	phy_start(ndev->phydev);
1342	phy_start_aneg(ndev->phydev);
1343	netif_start_queue(ndev);
1344
1345	return 0;
1346
1347out_free_irq:
1348	disable_irq(priv->irq);
1349	free_irq(priv->irq, ndev);
1350
1351	return ret;
1352}
1353
1354static int ave_stop(struct net_device *ndev)
1355{
1356	struct ave_private *priv = netdev_priv(ndev);
1357	int entry;
1358
1359	ave_irq_disable_all(ndev);
1360	disable_irq(priv->irq);
1361	free_irq(priv->irq, ndev);
1362
1363	netif_tx_disable(ndev);
1364	phy_stop(ndev->phydev);
1365	napi_disable(&priv->napi_tx);
1366	napi_disable(&priv->napi_rx);
1367
1368	ave_desc_switch(ndev, AVE_DESC_STOP);
1369
1370	/* free Tx buffer */
1371	for (entry = 0; entry < priv->tx.ndesc; entry++) {
1372		if (!priv->tx.desc[entry].skbs)
1373			continue;
1374
1375		ave_dma_unmap(ndev, &priv->tx.desc[entry], DMA_TO_DEVICE);
1376		dev_kfree_skb_any(priv->tx.desc[entry].skbs);
1377		priv->tx.desc[entry].skbs = NULL;
1378	}
1379	priv->tx.proc_idx = 0;
1380	priv->tx.done_idx = 0;
1381
1382	/* free Rx buffer */
1383	for (entry = 0; entry < priv->rx.ndesc; entry++) {
1384		if (!priv->rx.desc[entry].skbs)
1385			continue;
1386
1387		ave_dma_unmap(ndev, &priv->rx.desc[entry], DMA_FROM_DEVICE);
1388		dev_kfree_skb_any(priv->rx.desc[entry].skbs);
1389		priv->rx.desc[entry].skbs = NULL;
1390	}
1391	priv->rx.proc_idx = 0;
1392	priv->rx.done_idx = 0;
1393
1394	kfree(priv->tx.desc);
1395	kfree(priv->rx.desc);
1396
1397	return 0;
1398}
1399
1400static netdev_tx_t ave_start_xmit(struct sk_buff *skb, struct net_device *ndev)
1401{
1402	struct ave_private *priv = netdev_priv(ndev);
1403	u32 proc_idx, done_idx, ndesc, cmdsts;
1404	int ret, freepkt;
1405	dma_addr_t paddr;
1406
1407	proc_idx = priv->tx.proc_idx;
1408	done_idx = priv->tx.done_idx;
1409	ndesc = priv->tx.ndesc;
1410	freepkt = ((done_idx + ndesc - 1) - proc_idx) % ndesc;
1411
1412	/* stop queue when not enough entry */
1413	if (unlikely(freepkt < 1)) {
1414		netif_stop_queue(ndev);
1415		return NETDEV_TX_BUSY;
1416	}
1417
1418	/* add padding for short packet */
1419	if (skb_put_padto(skb, ETH_ZLEN)) {
1420		priv->stats_tx.dropped++;
1421		return NETDEV_TX_OK;
1422	}
1423
1424	/* map Tx buffer
1425	 * Tx buffer set to the Tx descriptor doesn't have any restriction.
1426	 */
1427	ret = ave_dma_map(ndev, &priv->tx.desc[proc_idx],
1428			  skb->data, skb->len, DMA_TO_DEVICE, &paddr);
1429	if (ret) {
1430		dev_kfree_skb_any(skb);
1431		priv->stats_tx.dropped++;
1432		return NETDEV_TX_OK;
1433	}
1434
1435	priv->tx.desc[proc_idx].skbs = skb;
1436
1437	ave_desc_write_addr(ndev, AVE_DESCID_TX, proc_idx, paddr);
1438
1439	cmdsts = AVE_STS_OWN | AVE_STS_1ST | AVE_STS_LAST |
1440		(skb->len & AVE_STS_PKTLEN_TX_MASK);
1441
1442	/* set interrupt per AVE_FORCE_TXINTCNT or when queue is stopped */
1443	if (!(proc_idx % AVE_FORCE_TXINTCNT) || netif_queue_stopped(ndev))
1444		cmdsts |= AVE_STS_INTR;
1445
1446	/* disable checksum calculation when skb doesn't calurate checksum */
1447	if (skb->ip_summed == CHECKSUM_NONE ||
1448	    skb->ip_summed == CHECKSUM_UNNECESSARY)
1449		cmdsts |= AVE_STS_NOCSUM;
1450
1451	ave_desc_write_cmdsts(ndev, AVE_DESCID_TX, proc_idx, cmdsts);
1452
1453	priv->tx.proc_idx = (proc_idx + 1) % ndesc;
1454
1455	return NETDEV_TX_OK;
1456}
1457
1458static int ave_ioctl(struct net_device *ndev, struct ifreq *ifr, int cmd)
1459{
1460	return phy_mii_ioctl(ndev->phydev, ifr, cmd);
1461}
1462
1463static const u8 v4multi_macadr[] = { 0x01, 0x00, 0x00, 0x00, 0x00, 0x00 };
1464static const u8 v6multi_macadr[] = { 0x33, 0x00, 0x00, 0x00, 0x00, 0x00 };
1465
1466static void ave_set_rx_mode(struct net_device *ndev)
1467{
1468	struct ave_private *priv = netdev_priv(ndev);
1469	struct netdev_hw_addr *hw_adr;
1470	int count, mc_cnt;
1471	u32 val;
1472
1473	/* MAC addr filter enable for promiscious mode */
1474	mc_cnt = netdev_mc_count(ndev);
1475	val = readl(priv->base + AVE_RXCR);
1476	if (ndev->flags & IFF_PROMISC || !mc_cnt)
1477		val &= ~AVE_RXCR_AFEN;
1478	else
1479		val |= AVE_RXCR_AFEN;
1480	writel(val, priv->base + AVE_RXCR);
1481
1482	/* set all multicast address */
1483	if ((ndev->flags & IFF_ALLMULTI) || mc_cnt > AVE_PF_MULTICAST_SIZE) {
1484		ave_pfsel_set_macaddr(ndev, AVE_PFNUM_MULTICAST,
1485				      v4multi_macadr, 1);
1486		ave_pfsel_set_macaddr(ndev, AVE_PFNUM_MULTICAST + 1,
1487				      v6multi_macadr, 1);
1488	} else {
1489		/* stop all multicast filter */
1490		for (count = 0; count < AVE_PF_MULTICAST_SIZE; count++)
1491			ave_pfsel_stop(ndev, AVE_PFNUM_MULTICAST + count);
1492
1493		/* set multicast addresses */
1494		count = 0;
1495		netdev_for_each_mc_addr(hw_adr, ndev) {
1496			if (count == mc_cnt)
1497				break;
1498			ave_pfsel_set_macaddr(ndev, AVE_PFNUM_MULTICAST + count,
1499					      hw_adr->addr, 6);
1500			count++;
1501		}
1502	}
1503}
1504
1505static void ave_get_stats64(struct net_device *ndev,
1506			    struct rtnl_link_stats64 *stats)
1507{
1508	struct ave_private *priv = netdev_priv(ndev);
1509	unsigned int start;
1510
1511	do {
1512		start = u64_stats_fetch_begin(&priv->stats_rx.syncp);
1513		stats->rx_packets = priv->stats_rx.packets;
1514		stats->rx_bytes	  = priv->stats_rx.bytes;
1515	} while (u64_stats_fetch_retry(&priv->stats_rx.syncp, start));
1516
1517	do {
1518		start = u64_stats_fetch_begin(&priv->stats_tx.syncp);
1519		stats->tx_packets = priv->stats_tx.packets;
1520		stats->tx_bytes	  = priv->stats_tx.bytes;
1521	} while (u64_stats_fetch_retry(&priv->stats_tx.syncp, start));
1522
1523	stats->rx_errors      = priv->stats_rx.errors;
1524	stats->tx_errors      = priv->stats_tx.errors;
1525	stats->rx_dropped     = priv->stats_rx.dropped;
1526	stats->tx_dropped     = priv->stats_tx.dropped;
1527	stats->rx_fifo_errors = priv->stats_rx.fifo_errors;
1528	stats->collisions     = priv->stats_tx.collisions;
1529}
1530
1531static int ave_set_mac_address(struct net_device *ndev, void *p)
1532{
1533	int ret = eth_mac_addr(ndev, p);
1534
1535	if (ret)
1536		return ret;
1537
1538	ave_macaddr_init(ndev);
1539
1540	return 0;
1541}
1542
1543static const struct net_device_ops ave_netdev_ops = {
1544	.ndo_init		= ave_init,
1545	.ndo_uninit		= ave_uninit,
1546	.ndo_open		= ave_open,
1547	.ndo_stop		= ave_stop,
1548	.ndo_start_xmit		= ave_start_xmit,
1549	.ndo_eth_ioctl		= ave_ioctl,
1550	.ndo_set_rx_mode	= ave_set_rx_mode,
1551	.ndo_get_stats64	= ave_get_stats64,
1552	.ndo_set_mac_address	= ave_set_mac_address,
1553};
1554
1555static int ave_probe(struct platform_device *pdev)
1556{
1557	const struct ave_soc_data *data;
1558	struct device *dev = &pdev->dev;
1559	char buf[ETHTOOL_FWVERS_LEN];
1560	struct of_phandle_args args;
1561	phy_interface_t phy_mode;
1562	struct ave_private *priv;
1563	struct net_device *ndev;
1564	struct device_node *np;
1565	void __iomem *base;
1566	const char *name;
1567	int i, irq, ret;
1568	u64 dma_mask;
1569	u32 ave_id;
1570
1571	data = of_device_get_match_data(dev);
1572	if (WARN_ON(!data))
1573		return -EINVAL;
1574
1575	np = dev->of_node;
1576	ret = of_get_phy_mode(np, &phy_mode);
1577	if (ret) {
1578		dev_err(dev, "phy-mode not found\n");
1579		return ret;
1580	}
1581
1582	irq = platform_get_irq(pdev, 0);
1583	if (irq < 0)
1584		return irq;
1585
1586	base = devm_platform_ioremap_resource(pdev, 0);
1587	if (IS_ERR(base))
1588		return PTR_ERR(base);
1589
1590	ndev = devm_alloc_etherdev(dev, sizeof(struct ave_private));
1591	if (!ndev) {
1592		dev_err(dev, "can't allocate ethernet device\n");
1593		return -ENOMEM;
1594	}
1595
1596	ndev->netdev_ops = &ave_netdev_ops;
1597	ndev->ethtool_ops = &ave_ethtool_ops;
1598	SET_NETDEV_DEV(ndev, dev);
1599
1600	ndev->features    |= (NETIF_F_IP_CSUM | NETIF_F_RXCSUM);
1601	ndev->hw_features |= (NETIF_F_IP_CSUM | NETIF_F_RXCSUM);
1602
1603	ndev->max_mtu = AVE_MAX_ETHFRAME - (ETH_HLEN + ETH_FCS_LEN);
1604
1605	ret = of_get_ethdev_address(np, ndev);
1606	if (ret) {
1607		/* if the mac address is invalid, use random mac address */
1608		eth_hw_addr_random(ndev);
1609		dev_warn(dev, "Using random MAC address: %pM\n",
1610			 ndev->dev_addr);
1611	}
1612
1613	priv = netdev_priv(ndev);
1614	priv->base = base;
1615	priv->irq = irq;
1616	priv->ndev = ndev;
1617	priv->msg_enable = netif_msg_init(-1, AVE_DEFAULT_MSG_ENABLE);
1618	priv->phy_mode = phy_mode;
1619	priv->data = data;
1620
1621	if (IS_DESC_64BIT(priv)) {
1622		priv->desc_size = AVE_DESC_SIZE_64;
1623		priv->tx.daddr  = AVE_TXDM_64;
1624		priv->rx.daddr  = AVE_RXDM_64;
1625		dma_mask = DMA_BIT_MASK(64);
1626	} else {
1627		priv->desc_size = AVE_DESC_SIZE_32;
1628		priv->tx.daddr  = AVE_TXDM_32;
1629		priv->rx.daddr  = AVE_RXDM_32;
1630		dma_mask = DMA_BIT_MASK(32);
1631	}
1632	ret = dma_set_mask(dev, dma_mask);
1633	if (ret)
1634		return ret;
1635
1636	priv->tx.ndesc = AVE_NR_TXDESC;
1637	priv->rx.ndesc = AVE_NR_RXDESC;
1638
1639	u64_stats_init(&priv->stats_tx.syncp);
1640	u64_stats_init(&priv->stats_rx.syncp);
1641
1642	for (i = 0; i < AVE_MAX_CLKS; i++) {
1643		name = priv->data->clock_names[i];
1644		if (!name)
1645			break;
1646		priv->clk[i] = devm_clk_get(dev, name);
1647		if (IS_ERR(priv->clk[i]))
1648			return PTR_ERR(priv->clk[i]);
1649		priv->nclks++;
1650	}
1651
1652	for (i = 0; i < AVE_MAX_RSTS; i++) {
1653		name = priv->data->reset_names[i];
1654		if (!name)
1655			break;
1656		priv->rst[i] = devm_reset_control_get_shared(dev, name);
1657		if (IS_ERR(priv->rst[i]))
1658			return PTR_ERR(priv->rst[i]);
1659		priv->nrsts++;
1660	}
1661
1662	ret = of_parse_phandle_with_fixed_args(np,
1663					       "socionext,syscon-phy-mode",
1664					       1, 0, &args);
1665	if (ret) {
1666		dev_err(dev, "can't get syscon-phy-mode property\n");
1667		return ret;
1668	}
1669	priv->regmap = syscon_node_to_regmap(args.np);
1670	of_node_put(args.np);
1671	if (IS_ERR(priv->regmap)) {
1672		dev_err(dev, "can't map syscon-phy-mode\n");
1673		return PTR_ERR(priv->regmap);
1674	}
1675	ret = priv->data->get_pinmode(priv, phy_mode, args.args[0]);
1676	if (ret) {
1677		dev_err(dev, "invalid phy-mode setting\n");
1678		return ret;
1679	}
1680
1681	priv->mdio = devm_mdiobus_alloc(dev);
1682	if (!priv->mdio)
1683		return -ENOMEM;
1684	priv->mdio->priv = ndev;
1685	priv->mdio->parent = dev;
1686	priv->mdio->read = ave_mdiobus_read;
1687	priv->mdio->write = ave_mdiobus_write;
1688	priv->mdio->name = "uniphier-mdio";
1689	snprintf(priv->mdio->id, MII_BUS_ID_SIZE, "%s-%x",
1690		 pdev->name, pdev->id);
1691
1692	/* Register as a NAPI supported driver */
1693	netif_napi_add(ndev, &priv->napi_rx, ave_napi_poll_rx);
1694	netif_napi_add_tx(ndev, &priv->napi_tx, ave_napi_poll_tx);
1695
1696	platform_set_drvdata(pdev, ndev);
1697
1698	ret = register_netdev(ndev);
1699	if (ret) {
1700		dev_err(dev, "failed to register netdevice\n");
1701		goto out_del_napi;
1702	}
1703
1704	/* get ID and version */
1705	ave_id = readl(priv->base + AVE_IDR);
1706	ave_hw_read_version(ndev, buf, sizeof(buf));
1707
1708	dev_info(dev, "Socionext %c%c%c%c Ethernet IP %s (irq=%d, phy=%s)\n",
1709		 (ave_id >> 24) & 0xff, (ave_id >> 16) & 0xff,
1710		 (ave_id >> 8) & 0xff, (ave_id >> 0) & 0xff,
1711		 buf, priv->irq, phy_modes(phy_mode));
1712
1713	return 0;
1714
1715out_del_napi:
1716	netif_napi_del(&priv->napi_rx);
1717	netif_napi_del(&priv->napi_tx);
1718
1719	return ret;
1720}
1721
1722static void ave_remove(struct platform_device *pdev)
1723{
1724	struct net_device *ndev = platform_get_drvdata(pdev);
1725	struct ave_private *priv = netdev_priv(ndev);
1726
1727	unregister_netdev(ndev);
1728	netif_napi_del(&priv->napi_rx);
1729	netif_napi_del(&priv->napi_tx);
 
 
1730}
1731
1732#ifdef CONFIG_PM_SLEEP
1733static int ave_suspend(struct device *dev)
1734{
1735	struct ethtool_wolinfo wol = { .cmd = ETHTOOL_GWOL };
1736	struct net_device *ndev = dev_get_drvdata(dev);
1737	struct ave_private *priv = netdev_priv(ndev);
1738	int ret = 0;
1739
1740	if (netif_running(ndev)) {
1741		ret = ave_stop(ndev);
1742		netif_device_detach(ndev);
1743	}
1744
1745	ave_ethtool_get_wol(ndev, &wol);
1746	priv->wolopts = wol.wolopts;
1747
1748	return ret;
1749}
1750
1751static int ave_resume(struct device *dev)
1752{
1753	struct ethtool_wolinfo wol = { .cmd = ETHTOOL_GWOL };
1754	struct net_device *ndev = dev_get_drvdata(dev);
1755	struct ave_private *priv = netdev_priv(ndev);
1756	int ret = 0;
1757
1758	ave_global_reset(ndev);
1759
1760	ret = phy_init_hw(ndev->phydev);
1761	if (ret)
1762		return ret;
1763
1764	ave_ethtool_get_wol(ndev, &wol);
1765	wol.wolopts = priv->wolopts;
1766	__ave_ethtool_set_wol(ndev, &wol);
1767
1768	if (netif_running(ndev)) {
1769		ret = ave_open(ndev);
1770		netif_device_attach(ndev);
1771	}
1772
1773	return ret;
1774}
1775
1776static SIMPLE_DEV_PM_OPS(ave_pm_ops, ave_suspend, ave_resume);
1777#define AVE_PM_OPS	(&ave_pm_ops)
1778#else
1779#define AVE_PM_OPS	NULL
1780#endif
1781
1782static int ave_pro4_get_pinmode(struct ave_private *priv,
1783				phy_interface_t phy_mode, u32 arg)
1784{
1785	if (arg > 0)
1786		return -EINVAL;
1787
1788	priv->pinmode_mask = SG_ETPINMODE_RMII(0);
1789
1790	switch (phy_mode) {
1791	case PHY_INTERFACE_MODE_RMII:
1792		priv->pinmode_val = SG_ETPINMODE_RMII(0);
1793		break;
1794	case PHY_INTERFACE_MODE_MII:
1795	case PHY_INTERFACE_MODE_RGMII:
1796	case PHY_INTERFACE_MODE_RGMII_ID:
1797	case PHY_INTERFACE_MODE_RGMII_RXID:
1798	case PHY_INTERFACE_MODE_RGMII_TXID:
1799		priv->pinmode_val = 0;
1800		break;
1801	default:
1802		return -EINVAL;
1803	}
1804
1805	return 0;
1806}
1807
1808static int ave_ld11_get_pinmode(struct ave_private *priv,
1809				phy_interface_t phy_mode, u32 arg)
1810{
1811	if (arg > 0)
1812		return -EINVAL;
1813
1814	priv->pinmode_mask = SG_ETPINMODE_EXTPHY | SG_ETPINMODE_RMII(0);
1815
1816	switch (phy_mode) {
1817	case PHY_INTERFACE_MODE_INTERNAL:
1818		priv->pinmode_val = 0;
1819		break;
1820	case PHY_INTERFACE_MODE_RMII:
1821		priv->pinmode_val = SG_ETPINMODE_EXTPHY | SG_ETPINMODE_RMII(0);
1822		break;
1823	default:
1824		return -EINVAL;
1825	}
1826
1827	return 0;
1828}
1829
1830static int ave_ld20_get_pinmode(struct ave_private *priv,
1831				phy_interface_t phy_mode, u32 arg)
1832{
1833	if (arg > 0)
1834		return -EINVAL;
1835
1836	priv->pinmode_mask = SG_ETPINMODE_RMII(0);
1837
1838	switch (phy_mode) {
1839	case PHY_INTERFACE_MODE_RMII:
1840		priv->pinmode_val = SG_ETPINMODE_RMII(0);
1841		break;
1842	case PHY_INTERFACE_MODE_RGMII:
1843	case PHY_INTERFACE_MODE_RGMII_ID:
1844	case PHY_INTERFACE_MODE_RGMII_RXID:
1845	case PHY_INTERFACE_MODE_RGMII_TXID:
1846		priv->pinmode_val = 0;
1847		break;
1848	default:
1849		return -EINVAL;
1850	}
1851
1852	return 0;
1853}
1854
1855static int ave_pxs3_get_pinmode(struct ave_private *priv,
1856				phy_interface_t phy_mode, u32 arg)
1857{
1858	if (arg > 1)
1859		return -EINVAL;
1860
1861	priv->pinmode_mask = SG_ETPINMODE_RMII(arg);
1862
1863	switch (phy_mode) {
1864	case PHY_INTERFACE_MODE_RMII:
1865		priv->pinmode_val = SG_ETPINMODE_RMII(arg);
1866		break;
1867	case PHY_INTERFACE_MODE_RGMII:
1868	case PHY_INTERFACE_MODE_RGMII_ID:
1869	case PHY_INTERFACE_MODE_RGMII_RXID:
1870	case PHY_INTERFACE_MODE_RGMII_TXID:
1871		priv->pinmode_val = 0;
1872		break;
1873	default:
1874		return -EINVAL;
1875	}
1876
1877	return 0;
1878}
1879
1880static const struct ave_soc_data ave_pro4_data = {
1881	.is_desc_64bit = false,
1882	.clock_names = {
1883		"gio", "ether", "ether-gb", "ether-phy",
1884	},
1885	.reset_names = {
1886		"gio", "ether",
1887	},
1888	.get_pinmode = ave_pro4_get_pinmode,
1889};
1890
1891static const struct ave_soc_data ave_pxs2_data = {
1892	.is_desc_64bit = false,
1893	.clock_names = {
1894		"ether",
1895	},
1896	.reset_names = {
1897		"ether",
1898	},
1899	.get_pinmode = ave_pro4_get_pinmode,
1900};
1901
1902static const struct ave_soc_data ave_ld11_data = {
1903	.is_desc_64bit = false,
1904	.clock_names = {
1905		"ether",
1906	},
1907	.reset_names = {
1908		"ether",
1909	},
1910	.get_pinmode = ave_ld11_get_pinmode,
1911};
1912
1913static const struct ave_soc_data ave_ld20_data = {
1914	.is_desc_64bit = true,
1915	.clock_names = {
1916		"ether",
1917	},
1918	.reset_names = {
1919		"ether",
1920	},
1921	.get_pinmode = ave_ld20_get_pinmode,
1922};
1923
1924static const struct ave_soc_data ave_pxs3_data = {
1925	.is_desc_64bit = false,
1926	.clock_names = {
1927		"ether",
1928	},
1929	.reset_names = {
1930		"ether",
1931	},
1932	.get_pinmode = ave_pxs3_get_pinmode,
1933};
1934
1935static const struct ave_soc_data ave_nx1_data = {
1936	.is_desc_64bit = true,
1937	.clock_names = {
1938		"ether",
1939	},
1940	.reset_names = {
1941		"ether",
1942	},
1943	.get_pinmode = ave_pxs3_get_pinmode,
1944};
1945
1946static const struct of_device_id of_ave_match[] = {
1947	{
1948		.compatible = "socionext,uniphier-pro4-ave4",
1949		.data = &ave_pro4_data,
1950	},
1951	{
1952		.compatible = "socionext,uniphier-pxs2-ave4",
1953		.data = &ave_pxs2_data,
1954	},
1955	{
1956		.compatible = "socionext,uniphier-ld11-ave4",
1957		.data = &ave_ld11_data,
1958	},
1959	{
1960		.compatible = "socionext,uniphier-ld20-ave4",
1961		.data = &ave_ld20_data,
1962	},
1963	{
1964		.compatible = "socionext,uniphier-pxs3-ave4",
1965		.data = &ave_pxs3_data,
1966	},
1967	{
1968		.compatible = "socionext,uniphier-nx1-ave4",
1969		.data = &ave_nx1_data,
1970	},
1971	{ /* Sentinel */ }
1972};
1973MODULE_DEVICE_TABLE(of, of_ave_match);
1974
1975static struct platform_driver ave_driver = {
1976	.probe  = ave_probe,
1977	.remove_new = ave_remove,
1978	.driver	= {
1979		.name = "ave",
1980		.pm   = AVE_PM_OPS,
1981		.of_match_table	= of_ave_match,
1982	},
1983};
1984module_platform_driver(ave_driver);
1985
1986MODULE_AUTHOR("Kunihiko Hayashi <hayashi.kunihiko@socionext.com>");
1987MODULE_DESCRIPTION("Socionext UniPhier AVE ethernet driver");
1988MODULE_LICENSE("GPL v2");
v6.2
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * sni_ave.c - Socionext UniPhier AVE ethernet driver
   4 * Copyright 2014 Panasonic Corporation
   5 * Copyright 2015-2017 Socionext Inc.
   6 */
   7
   8#include <linux/bitops.h>
   9#include <linux/clk.h>
  10#include <linux/etherdevice.h>
  11#include <linux/interrupt.h>
  12#include <linux/io.h>
  13#include <linux/iopoll.h>
  14#include <linux/mfd/syscon.h>
  15#include <linux/mii.h>
  16#include <linux/module.h>
  17#include <linux/netdevice.h>
 
  18#include <linux/of_net.h>
  19#include <linux/of_mdio.h>
  20#include <linux/of_platform.h>
  21#include <linux/phy.h>
 
  22#include <linux/regmap.h>
  23#include <linux/reset.h>
  24#include <linux/types.h>
  25#include <linux/u64_stats_sync.h>
  26
  27/* General Register Group */
  28#define AVE_IDR			0x000	/* ID */
  29#define AVE_VR			0x004	/* Version */
  30#define AVE_GRR			0x008	/* Global Reset */
  31#define AVE_CFGR		0x00c	/* Configuration */
  32
  33/* Interrupt Register Group */
  34#define AVE_GIMR		0x100	/* Global Interrupt Mask */
  35#define AVE_GISR		0x104	/* Global Interrupt Status */
  36
  37/* MAC Register Group */
  38#define AVE_TXCR		0x200	/* TX Setup */
  39#define AVE_RXCR		0x204	/* RX Setup */
  40#define AVE_RXMAC1R		0x208	/* MAC address (lower) */
  41#define AVE_RXMAC2R		0x20c	/* MAC address (upper) */
  42#define AVE_MDIOCTR		0x214	/* MDIO Control */
  43#define AVE_MDIOAR		0x218	/* MDIO Address */
  44#define AVE_MDIOWDR		0x21c	/* MDIO Data */
  45#define AVE_MDIOSR		0x220	/* MDIO Status */
  46#define AVE_MDIORDR		0x224	/* MDIO Rd Data */
  47
  48/* Descriptor Control Register Group */
  49#define AVE_DESCC		0x300	/* Descriptor Control */
  50#define AVE_TXDC		0x304	/* TX Descriptor Configuration */
  51#define AVE_RXDC0		0x308	/* RX Descriptor Ring0 Configuration */
  52#define AVE_IIRQC		0x34c	/* Interval IRQ Control */
  53
  54/* Packet Filter Register Group */
  55#define AVE_PKTF_BASE		0x800	/* PF Base Address */
  56#define AVE_PFMBYTE_BASE	0xd00	/* PF Mask Byte Base Address */
  57#define AVE_PFMBIT_BASE		0xe00	/* PF Mask Bit Base Address */
  58#define AVE_PFSEL_BASE		0xf00	/* PF Selector Base Address */
  59#define AVE_PFEN		0xffc	/* Packet Filter Enable */
  60#define AVE_PKTF(ent)		(AVE_PKTF_BASE + (ent) * 0x40)
  61#define AVE_PFMBYTE(ent)	(AVE_PFMBYTE_BASE + (ent) * 8)
  62#define AVE_PFMBIT(ent)		(AVE_PFMBIT_BASE + (ent) * 4)
  63#define AVE_PFSEL(ent)		(AVE_PFSEL_BASE + (ent) * 4)
  64
  65/* 64bit descriptor memory */
  66#define AVE_DESC_SIZE_64	12	/* Descriptor Size */
  67
  68#define AVE_TXDM_64		0x1000	/* Tx Descriptor Memory */
  69#define AVE_RXDM_64		0x1c00	/* Rx Descriptor Memory */
  70
  71#define AVE_TXDM_SIZE_64	0x0ba0	/* Tx Descriptor Memory Size 3KB */
  72#define AVE_RXDM_SIZE_64	0x6000	/* Rx Descriptor Memory Size 24KB */
  73
  74/* 32bit descriptor memory */
  75#define AVE_DESC_SIZE_32	8	/* Descriptor Size */
  76
  77#define AVE_TXDM_32		0x1000	/* Tx Descriptor Memory */
  78#define AVE_RXDM_32		0x1800	/* Rx Descriptor Memory */
  79
  80#define AVE_TXDM_SIZE_32	0x07c0	/* Tx Descriptor Memory Size 2KB */
  81#define AVE_RXDM_SIZE_32	0x4000	/* Rx Descriptor Memory Size 16KB */
  82
  83/* RMII Bridge Register Group */
  84#define AVE_RSTCTRL		0x8028	/* Reset control */
  85#define AVE_RSTCTRL_RMIIRST	BIT(16)
  86#define AVE_LINKSEL		0x8034	/* Link speed setting */
  87#define AVE_LINKSEL_100M	BIT(0)
  88
  89/* AVE_GRR */
  90#define AVE_GRR_RXFFR		BIT(5)	/* Reset RxFIFO */
  91#define AVE_GRR_PHYRST		BIT(4)	/* Reset external PHY */
  92#define AVE_GRR_GRST		BIT(0)	/* Reset all MAC */
  93
  94/* AVE_CFGR */
  95#define AVE_CFGR_FLE		BIT(31)	/* Filter Function */
  96#define AVE_CFGR_CHE		BIT(30)	/* Checksum Function */
  97#define AVE_CFGR_MII		BIT(27)	/* Func mode (1:MII/RMII, 0:RGMII) */
  98#define AVE_CFGR_IPFCEN		BIT(24)	/* IP fragment sum Enable */
  99
 100/* AVE_GISR (common with GIMR) */
 101#define AVE_GI_PHY		BIT(24)	/* PHY interrupt */
 102#define AVE_GI_TX		BIT(16)	/* Tx complete */
 103#define AVE_GI_RXERR		BIT(8)	/* Receive frame more than max size */
 104#define AVE_GI_RXOVF		BIT(7)	/* Overflow at the RxFIFO */
 105#define AVE_GI_RXDROP		BIT(6)	/* Drop packet */
 106#define AVE_GI_RXIINT		BIT(5)	/* Interval interrupt */
 107
 108/* AVE_TXCR */
 109#define AVE_TXCR_FLOCTR		BIT(18)	/* Flow control */
 110#define AVE_TXCR_TXSPD_1G	BIT(17)
 111#define AVE_TXCR_TXSPD_100	BIT(16)
 112
 113/* AVE_RXCR */
 114#define AVE_RXCR_RXEN		BIT(30)	/* Rx enable */
 115#define AVE_RXCR_FDUPEN		BIT(22)	/* Interface mode */
 116#define AVE_RXCR_FLOCTR		BIT(21)	/* Flow control */
 117#define AVE_RXCR_AFEN		BIT(19)	/* MAC address filter */
 118#define AVE_RXCR_DRPEN		BIT(18)	/* Drop pause frame */
 119#define AVE_RXCR_MPSIZ_MASK	GENMASK(10, 0)
 120
 121/* AVE_MDIOCTR */
 122#define AVE_MDIOCTR_RREQ	BIT(3)	/* Read request */
 123#define AVE_MDIOCTR_WREQ	BIT(2)	/* Write request */
 124
 125/* AVE_MDIOSR */
 126#define AVE_MDIOSR_STS		BIT(0)	/* access status */
 127
 128/* AVE_DESCC */
 129#define AVE_DESCC_STATUS_MASK	GENMASK(31, 16)
 130#define AVE_DESCC_RD0		BIT(8)	/* Enable Rx descriptor Ring0 */
 131#define AVE_DESCC_RDSTP		BIT(4)	/* Pause Rx descriptor */
 132#define AVE_DESCC_TD		BIT(0)	/* Enable Tx descriptor */
 133
 134/* AVE_TXDC */
 135#define AVE_TXDC_SIZE		GENMASK(27, 16)	/* Size of Tx descriptor */
 136#define AVE_TXDC_ADDR		GENMASK(11, 0)	/* Start address */
 137#define AVE_TXDC_ADDR_START	0
 138
 139/* AVE_RXDC0 */
 140#define AVE_RXDC0_SIZE		GENMASK(30, 16)	/* Size of Rx descriptor */
 141#define AVE_RXDC0_ADDR		GENMASK(14, 0)	/* Start address */
 142#define AVE_RXDC0_ADDR_START	0
 143
 144/* AVE_IIRQC */
 145#define AVE_IIRQC_EN0		BIT(27)	/* Enable interval interrupt Ring0 */
 146#define AVE_IIRQC_BSCK		GENMASK(15, 0)	/* Interval count unit */
 147
 148/* Command status for descriptor */
 149#define AVE_STS_OWN		BIT(31)	/* Descriptor ownership */
 150#define AVE_STS_INTR		BIT(29)	/* Request for interrupt */
 151#define AVE_STS_OK		BIT(27)	/* Normal transmit */
 152/* TX */
 153#define AVE_STS_NOCSUM		BIT(28)	/* No use HW checksum */
 154#define AVE_STS_1ST		BIT(26)	/* Head of buffer chain */
 155#define AVE_STS_LAST		BIT(25)	/* Tail of buffer chain */
 156#define AVE_STS_OWC		BIT(21)	/* Out of window,Late Collision */
 157#define AVE_STS_EC		BIT(20)	/* Excess collision occurred */
 158#define AVE_STS_PKTLEN_TX_MASK	GENMASK(15, 0)
 159/* RX */
 160#define AVE_STS_CSSV		BIT(21)	/* Checksum check performed */
 161#define AVE_STS_CSER		BIT(20)	/* Checksum error detected */
 162#define AVE_STS_PKTLEN_RX_MASK	GENMASK(10, 0)
 163
 164/* Packet filter */
 165#define AVE_PFMBYTE_MASK0	(GENMASK(31, 8) | GENMASK(5, 0))
 166#define AVE_PFMBYTE_MASK1	GENMASK(25, 0)
 167#define AVE_PFMBIT_MASK		GENMASK(15, 0)
 168
 169#define AVE_PF_SIZE		17	/* Number of all packet filter */
 170#define AVE_PF_MULTICAST_SIZE	7	/* Number of multicast filter */
 171
 172#define AVE_PFNUM_FILTER	0	/* No.0 */
 173#define AVE_PFNUM_UNICAST	1	/* No.1 */
 174#define AVE_PFNUM_BROADCAST	2	/* No.2 */
 175#define AVE_PFNUM_MULTICAST	11	/* No.11-17 */
 176
 177/* NETIF Message control */
 178#define AVE_DEFAULT_MSG_ENABLE	(NETIF_MSG_DRV    |	\
 179				 NETIF_MSG_PROBE  |	\
 180				 NETIF_MSG_LINK   |	\
 181				 NETIF_MSG_TIMER  |	\
 182				 NETIF_MSG_IFDOWN |	\
 183				 NETIF_MSG_IFUP   |	\
 184				 NETIF_MSG_RX_ERR |	\
 185				 NETIF_MSG_TX_ERR)
 186
 187/* Parameter for descriptor */
 188#define AVE_NR_TXDESC		64	/* Tx descriptor */
 189#define AVE_NR_RXDESC		256	/* Rx descriptor */
 190
 191#define AVE_DESC_OFS_CMDSTS	0
 192#define AVE_DESC_OFS_ADDRL	4
 193#define AVE_DESC_OFS_ADDRU	8
 194
 195/* Parameter for ethernet frame */
 196#define AVE_MAX_ETHFRAME	1518
 197#define AVE_FRAME_HEADROOM	2
 198
 199/* Parameter for interrupt */
 200#define AVE_INTM_COUNT		20
 201#define AVE_FORCE_TXINTCNT	1
 202
 203/* SG */
 204#define SG_ETPINMODE		0x540
 205#define SG_ETPINMODE_EXTPHY	BIT(1)	/* for LD11 */
 206#define SG_ETPINMODE_RMII(ins)	BIT(ins)
 207
 208#define IS_DESC_64BIT(p)	((p)->data->is_desc_64bit)
 209
 210#define AVE_MAX_CLKS		4
 211#define AVE_MAX_RSTS		2
 212
 213enum desc_id {
 214	AVE_DESCID_RX,
 215	AVE_DESCID_TX,
 216};
 217
 218enum desc_state {
 219	AVE_DESC_RX_PERMIT,
 220	AVE_DESC_RX_SUSPEND,
 221	AVE_DESC_START,
 222	AVE_DESC_STOP,
 223};
 224
 225struct ave_desc {
 226	struct sk_buff	*skbs;
 227	dma_addr_t	skbs_dma;
 228	size_t		skbs_dmalen;
 229};
 230
 231struct ave_desc_info {
 232	u32	ndesc;		/* number of descriptor */
 233	u32	daddr;		/* start address of descriptor */
 234	u32	proc_idx;	/* index of processing packet */
 235	u32	done_idx;	/* index of processed packet */
 236	struct ave_desc *desc;	/* skb info related descriptor */
 237};
 238
 239struct ave_stats {
 240	struct	u64_stats_sync	syncp;
 241	u64	packets;
 242	u64	bytes;
 243	u64	errors;
 244	u64	dropped;
 245	u64	collisions;
 246	u64	fifo_errors;
 247};
 248
 249struct ave_private {
 250	void __iomem            *base;
 251	int                     irq;
 252	int			phy_id;
 253	unsigned int		desc_size;
 254	u32			msg_enable;
 255	int			nclks;
 256	struct clk		*clk[AVE_MAX_CLKS];
 257	int			nrsts;
 258	struct reset_control	*rst[AVE_MAX_RSTS];
 259	phy_interface_t		phy_mode;
 260	struct phy_device	*phydev;
 261	struct mii_bus		*mdio;
 262	struct regmap		*regmap;
 263	unsigned int		pinmode_mask;
 264	unsigned int		pinmode_val;
 265	u32			wolopts;
 266
 267	/* stats */
 268	struct ave_stats	stats_rx;
 269	struct ave_stats	stats_tx;
 270
 271	/* NAPI support */
 272	struct net_device	*ndev;
 273	struct napi_struct	napi_rx;
 274	struct napi_struct	napi_tx;
 275
 276	/* descriptor */
 277	struct ave_desc_info	rx;
 278	struct ave_desc_info	tx;
 279
 280	/* flow control */
 281	int pause_auto;
 282	int pause_rx;
 283	int pause_tx;
 284
 285	const struct ave_soc_data *data;
 286};
 287
 288struct ave_soc_data {
 289	bool	is_desc_64bit;
 290	const char	*clock_names[AVE_MAX_CLKS];
 291	const char	*reset_names[AVE_MAX_RSTS];
 292	int	(*get_pinmode)(struct ave_private *priv,
 293			       phy_interface_t phy_mode, u32 arg);
 294};
 295
 296static u32 ave_desc_read(struct net_device *ndev, enum desc_id id, int entry,
 297			 int offset)
 298{
 299	struct ave_private *priv = netdev_priv(ndev);
 300	u32 addr;
 301
 302	addr = ((id == AVE_DESCID_TX) ? priv->tx.daddr : priv->rx.daddr)
 303		+ entry * priv->desc_size + offset;
 304
 305	return readl(priv->base + addr);
 306}
 307
 308static u32 ave_desc_read_cmdsts(struct net_device *ndev, enum desc_id id,
 309				int entry)
 310{
 311	return ave_desc_read(ndev, id, entry, AVE_DESC_OFS_CMDSTS);
 312}
 313
 314static void ave_desc_write(struct net_device *ndev, enum desc_id id,
 315			   int entry, int offset, u32 val)
 316{
 317	struct ave_private *priv = netdev_priv(ndev);
 318	u32 addr;
 319
 320	addr = ((id == AVE_DESCID_TX) ? priv->tx.daddr : priv->rx.daddr)
 321		+ entry * priv->desc_size + offset;
 322
 323	writel(val, priv->base + addr);
 324}
 325
 326static void ave_desc_write_cmdsts(struct net_device *ndev, enum desc_id id,
 327				  int entry, u32 val)
 328{
 329	ave_desc_write(ndev, id, entry, AVE_DESC_OFS_CMDSTS, val);
 330}
 331
 332static void ave_desc_write_addr(struct net_device *ndev, enum desc_id id,
 333				int entry, dma_addr_t paddr)
 334{
 335	struct ave_private *priv = netdev_priv(ndev);
 336
 337	ave_desc_write(ndev, id, entry, AVE_DESC_OFS_ADDRL,
 338		       lower_32_bits(paddr));
 339	if (IS_DESC_64BIT(priv))
 340		ave_desc_write(ndev, id,
 341			       entry, AVE_DESC_OFS_ADDRU,
 342			       upper_32_bits(paddr));
 343}
 344
 345static u32 ave_irq_disable_all(struct net_device *ndev)
 346{
 347	struct ave_private *priv = netdev_priv(ndev);
 348	u32 ret;
 349
 350	ret = readl(priv->base + AVE_GIMR);
 351	writel(0, priv->base + AVE_GIMR);
 352
 353	return ret;
 354}
 355
 356static void ave_irq_restore(struct net_device *ndev, u32 val)
 357{
 358	struct ave_private *priv = netdev_priv(ndev);
 359
 360	writel(val, priv->base + AVE_GIMR);
 361}
 362
 363static void ave_irq_enable(struct net_device *ndev, u32 bitflag)
 364{
 365	struct ave_private *priv = netdev_priv(ndev);
 366
 367	writel(readl(priv->base + AVE_GIMR) | bitflag, priv->base + AVE_GIMR);
 368	writel(bitflag, priv->base + AVE_GISR);
 369}
 370
 371static void ave_hw_write_macaddr(struct net_device *ndev,
 372				 const unsigned char *mac_addr,
 373				 int reg1, int reg2)
 374{
 375	struct ave_private *priv = netdev_priv(ndev);
 376
 377	writel(mac_addr[0] | mac_addr[1] << 8 |
 378	       mac_addr[2] << 16 | mac_addr[3] << 24, priv->base + reg1);
 379	writel(mac_addr[4] | mac_addr[5] << 8, priv->base + reg2);
 380}
 381
 382static void ave_hw_read_version(struct net_device *ndev, char *buf, int len)
 383{
 384	struct ave_private *priv = netdev_priv(ndev);
 385	u32 major, minor, vr;
 386
 387	vr = readl(priv->base + AVE_VR);
 388	major = (vr & GENMASK(15, 8)) >> 8;
 389	minor = (vr & GENMASK(7, 0));
 390	snprintf(buf, len, "v%u.%u", major, minor);
 391}
 392
 393static void ave_ethtool_get_drvinfo(struct net_device *ndev,
 394				    struct ethtool_drvinfo *info)
 395{
 396	struct device *dev = ndev->dev.parent;
 397
 398	strscpy(info->driver, dev->driver->name, sizeof(info->driver));
 399	strscpy(info->bus_info, dev_name(dev), sizeof(info->bus_info));
 400	ave_hw_read_version(ndev, info->fw_version, sizeof(info->fw_version));
 401}
 402
 403static u32 ave_ethtool_get_msglevel(struct net_device *ndev)
 404{
 405	struct ave_private *priv = netdev_priv(ndev);
 406
 407	return priv->msg_enable;
 408}
 409
 410static void ave_ethtool_set_msglevel(struct net_device *ndev, u32 val)
 411{
 412	struct ave_private *priv = netdev_priv(ndev);
 413
 414	priv->msg_enable = val;
 415}
 416
 417static void ave_ethtool_get_wol(struct net_device *ndev,
 418				struct ethtool_wolinfo *wol)
 419{
 420	wol->supported = 0;
 421	wol->wolopts   = 0;
 422
 423	if (ndev->phydev)
 424		phy_ethtool_get_wol(ndev->phydev, wol);
 425}
 426
 427static int __ave_ethtool_set_wol(struct net_device *ndev,
 428				 struct ethtool_wolinfo *wol)
 429{
 430	if (!ndev->phydev ||
 431	    (wol->wolopts & (WAKE_ARP | WAKE_MAGICSECURE)))
 432		return -EOPNOTSUPP;
 433
 434	return phy_ethtool_set_wol(ndev->phydev, wol);
 435}
 436
 437static int ave_ethtool_set_wol(struct net_device *ndev,
 438			       struct ethtool_wolinfo *wol)
 439{
 440	int ret;
 441
 442	ret = __ave_ethtool_set_wol(ndev, wol);
 443	if (!ret)
 444		device_set_wakeup_enable(&ndev->dev, !!wol->wolopts);
 445
 446	return ret;
 447}
 448
 449static void ave_ethtool_get_pauseparam(struct net_device *ndev,
 450				       struct ethtool_pauseparam *pause)
 451{
 452	struct ave_private *priv = netdev_priv(ndev);
 453
 454	pause->autoneg  = priv->pause_auto;
 455	pause->rx_pause = priv->pause_rx;
 456	pause->tx_pause = priv->pause_tx;
 457}
 458
 459static int ave_ethtool_set_pauseparam(struct net_device *ndev,
 460				      struct ethtool_pauseparam *pause)
 461{
 462	struct ave_private *priv = netdev_priv(ndev);
 463	struct phy_device *phydev = ndev->phydev;
 464
 465	if (!phydev)
 466		return -EINVAL;
 467
 468	priv->pause_auto = pause->autoneg;
 469	priv->pause_rx   = pause->rx_pause;
 470	priv->pause_tx   = pause->tx_pause;
 471
 472	phy_set_asym_pause(phydev, pause->rx_pause, pause->tx_pause);
 473
 474	return 0;
 475}
 476
 477static const struct ethtool_ops ave_ethtool_ops = {
 478	.get_link_ksettings	= phy_ethtool_get_link_ksettings,
 479	.set_link_ksettings	= phy_ethtool_set_link_ksettings,
 480	.get_drvinfo		= ave_ethtool_get_drvinfo,
 481	.nway_reset		= phy_ethtool_nway_reset,
 482	.get_link		= ethtool_op_get_link,
 483	.get_msglevel		= ave_ethtool_get_msglevel,
 484	.set_msglevel		= ave_ethtool_set_msglevel,
 485	.get_wol		= ave_ethtool_get_wol,
 486	.set_wol		= ave_ethtool_set_wol,
 487	.get_pauseparam         = ave_ethtool_get_pauseparam,
 488	.set_pauseparam         = ave_ethtool_set_pauseparam,
 489};
 490
 491static int ave_mdiobus_read(struct mii_bus *bus, int phyid, int regnum)
 492{
 493	struct net_device *ndev = bus->priv;
 494	struct ave_private *priv;
 495	u32 mdioctl, mdiosr;
 496	int ret;
 497
 498	priv = netdev_priv(ndev);
 499
 500	/* write address */
 501	writel((phyid << 8) | regnum, priv->base + AVE_MDIOAR);
 502
 503	/* read request */
 504	mdioctl = readl(priv->base + AVE_MDIOCTR);
 505	writel((mdioctl | AVE_MDIOCTR_RREQ) & ~AVE_MDIOCTR_WREQ,
 506	       priv->base + AVE_MDIOCTR);
 507
 508	ret = readl_poll_timeout(priv->base + AVE_MDIOSR, mdiosr,
 509				 !(mdiosr & AVE_MDIOSR_STS), 20, 2000);
 510	if (ret) {
 511		netdev_err(ndev, "failed to read (phy:%d reg:%x)\n",
 512			   phyid, regnum);
 513		return ret;
 514	}
 515
 516	return readl(priv->base + AVE_MDIORDR) & GENMASK(15, 0);
 517}
 518
 519static int ave_mdiobus_write(struct mii_bus *bus, int phyid, int regnum,
 520			     u16 val)
 521{
 522	struct net_device *ndev = bus->priv;
 523	struct ave_private *priv;
 524	u32 mdioctl, mdiosr;
 525	int ret;
 526
 527	priv = netdev_priv(ndev);
 528
 529	/* write address */
 530	writel((phyid << 8) | regnum, priv->base + AVE_MDIOAR);
 531
 532	/* write data */
 533	writel(val, priv->base + AVE_MDIOWDR);
 534
 535	/* write request */
 536	mdioctl = readl(priv->base + AVE_MDIOCTR);
 537	writel((mdioctl | AVE_MDIOCTR_WREQ) & ~AVE_MDIOCTR_RREQ,
 538	       priv->base + AVE_MDIOCTR);
 539
 540	ret = readl_poll_timeout(priv->base + AVE_MDIOSR, mdiosr,
 541				 !(mdiosr & AVE_MDIOSR_STS), 20, 2000);
 542	if (ret)
 543		netdev_err(ndev, "failed to write (phy:%d reg:%x)\n",
 544			   phyid, regnum);
 545
 546	return ret;
 547}
 548
 549static int ave_dma_map(struct net_device *ndev, struct ave_desc *desc,
 550		       void *ptr, size_t len, enum dma_data_direction dir,
 551		       dma_addr_t *paddr)
 552{
 553	dma_addr_t map_addr;
 554
 555	map_addr = dma_map_single(ndev->dev.parent, ptr, len, dir);
 556	if (unlikely(dma_mapping_error(ndev->dev.parent, map_addr)))
 557		return -ENOMEM;
 558
 559	desc->skbs_dma = map_addr;
 560	desc->skbs_dmalen = len;
 561	*paddr = map_addr;
 562
 563	return 0;
 564}
 565
 566static void ave_dma_unmap(struct net_device *ndev, struct ave_desc *desc,
 567			  enum dma_data_direction dir)
 568{
 569	if (!desc->skbs_dma)
 570		return;
 571
 572	dma_unmap_single(ndev->dev.parent,
 573			 desc->skbs_dma, desc->skbs_dmalen, dir);
 574	desc->skbs_dma = 0;
 575}
 576
 577/* Prepare Rx descriptor and memory */
 578static int ave_rxdesc_prepare(struct net_device *ndev, int entry)
 579{
 580	struct ave_private *priv = netdev_priv(ndev);
 581	struct sk_buff *skb;
 582	dma_addr_t paddr;
 583	int ret;
 584
 585	skb = priv->rx.desc[entry].skbs;
 586	if (!skb) {
 587		skb = netdev_alloc_skb(ndev, AVE_MAX_ETHFRAME);
 588		if (!skb) {
 589			netdev_err(ndev, "can't allocate skb for Rx\n");
 590			return -ENOMEM;
 591		}
 592		skb->data += AVE_FRAME_HEADROOM;
 593		skb->tail += AVE_FRAME_HEADROOM;
 594	}
 595
 596	/* set disable to cmdsts */
 597	ave_desc_write_cmdsts(ndev, AVE_DESCID_RX, entry,
 598			      AVE_STS_INTR | AVE_STS_OWN);
 599
 600	/* map Rx buffer
 601	 * Rx buffer set to the Rx descriptor has two restrictions:
 602	 * - Rx buffer address is 4 byte aligned.
 603	 * - Rx buffer begins with 2 byte headroom, and data will be put from
 604	 *   (buffer + 2).
 605	 * To satisfy this, specify the address to put back the buffer
 606	 * pointer advanced by AVE_FRAME_HEADROOM, and expand the map size
 607	 * by AVE_FRAME_HEADROOM.
 608	 */
 609	ret = ave_dma_map(ndev, &priv->rx.desc[entry],
 610			  skb->data - AVE_FRAME_HEADROOM,
 611			  AVE_MAX_ETHFRAME + AVE_FRAME_HEADROOM,
 612			  DMA_FROM_DEVICE, &paddr);
 613	if (ret) {
 614		netdev_err(ndev, "can't map skb for Rx\n");
 615		dev_kfree_skb_any(skb);
 616		return ret;
 617	}
 618	priv->rx.desc[entry].skbs = skb;
 619
 620	/* set buffer pointer */
 621	ave_desc_write_addr(ndev, AVE_DESCID_RX, entry, paddr);
 622
 623	/* set enable to cmdsts */
 624	ave_desc_write_cmdsts(ndev, AVE_DESCID_RX, entry,
 625			      AVE_STS_INTR | AVE_MAX_ETHFRAME);
 626
 627	return ret;
 628}
 629
 630/* Switch state of descriptor */
 631static int ave_desc_switch(struct net_device *ndev, enum desc_state state)
 632{
 633	struct ave_private *priv = netdev_priv(ndev);
 634	int ret = 0;
 635	u32 val;
 636
 637	switch (state) {
 638	case AVE_DESC_START:
 639		writel(AVE_DESCC_TD | AVE_DESCC_RD0, priv->base + AVE_DESCC);
 640		break;
 641
 642	case AVE_DESC_STOP:
 643		writel(0, priv->base + AVE_DESCC);
 644		if (readl_poll_timeout(priv->base + AVE_DESCC, val, !val,
 645				       150, 15000)) {
 646			netdev_err(ndev, "can't stop descriptor\n");
 647			ret = -EBUSY;
 648		}
 649		break;
 650
 651	case AVE_DESC_RX_SUSPEND:
 652		val = readl(priv->base + AVE_DESCC);
 653		val |= AVE_DESCC_RDSTP;
 654		val &= ~AVE_DESCC_STATUS_MASK;
 655		writel(val, priv->base + AVE_DESCC);
 656		if (readl_poll_timeout(priv->base + AVE_DESCC, val,
 657				       val & (AVE_DESCC_RDSTP << 16),
 658				       150, 150000)) {
 659			netdev_err(ndev, "can't suspend descriptor\n");
 660			ret = -EBUSY;
 661		}
 662		break;
 663
 664	case AVE_DESC_RX_PERMIT:
 665		val = readl(priv->base + AVE_DESCC);
 666		val &= ~AVE_DESCC_RDSTP;
 667		val &= ~AVE_DESCC_STATUS_MASK;
 668		writel(val, priv->base + AVE_DESCC);
 669		break;
 670
 671	default:
 672		ret = -EINVAL;
 673		break;
 674	}
 675
 676	return ret;
 677}
 678
 679static int ave_tx_complete(struct net_device *ndev)
 680{
 681	struct ave_private *priv = netdev_priv(ndev);
 682	u32 proc_idx, done_idx, ndesc, cmdsts;
 683	unsigned int nr_freebuf = 0;
 684	unsigned int tx_packets = 0;
 685	unsigned int tx_bytes = 0;
 686
 687	proc_idx = priv->tx.proc_idx;
 688	done_idx = priv->tx.done_idx;
 689	ndesc    = priv->tx.ndesc;
 690
 691	/* free pre-stored skb from done_idx to proc_idx */
 692	while (proc_idx != done_idx) {
 693		cmdsts = ave_desc_read_cmdsts(ndev, AVE_DESCID_TX, done_idx);
 694
 695		/* do nothing if owner is HW (==1 for Tx) */
 696		if (cmdsts & AVE_STS_OWN)
 697			break;
 698
 699		/* check Tx status and updates statistics */
 700		if (cmdsts & AVE_STS_OK) {
 701			tx_bytes += cmdsts & AVE_STS_PKTLEN_TX_MASK;
 702			/* success */
 703			if (cmdsts & AVE_STS_LAST)
 704				tx_packets++;
 705		} else {
 706			/* error */
 707			if (cmdsts & AVE_STS_LAST) {
 708				priv->stats_tx.errors++;
 709				if (cmdsts & (AVE_STS_OWC | AVE_STS_EC))
 710					priv->stats_tx.collisions++;
 711			}
 712		}
 713
 714		/* release skb */
 715		if (priv->tx.desc[done_idx].skbs) {
 716			ave_dma_unmap(ndev, &priv->tx.desc[done_idx],
 717				      DMA_TO_DEVICE);
 718			dev_consume_skb_any(priv->tx.desc[done_idx].skbs);
 719			priv->tx.desc[done_idx].skbs = NULL;
 720			nr_freebuf++;
 721		}
 722		done_idx = (done_idx + 1) % ndesc;
 723	}
 724
 725	priv->tx.done_idx = done_idx;
 726
 727	/* update stats */
 728	u64_stats_update_begin(&priv->stats_tx.syncp);
 729	priv->stats_tx.packets += tx_packets;
 730	priv->stats_tx.bytes   += tx_bytes;
 731	u64_stats_update_end(&priv->stats_tx.syncp);
 732
 733	/* wake queue for freeing buffer */
 734	if (unlikely(netif_queue_stopped(ndev)) && nr_freebuf)
 735		netif_wake_queue(ndev);
 736
 737	return nr_freebuf;
 738}
 739
 740static int ave_rx_receive(struct net_device *ndev, int num)
 741{
 742	struct ave_private *priv = netdev_priv(ndev);
 743	unsigned int rx_packets = 0;
 744	unsigned int rx_bytes = 0;
 745	u32 proc_idx, done_idx;
 746	struct sk_buff *skb;
 747	unsigned int pktlen;
 748	int restpkt, npkts;
 749	u32 ndesc, cmdsts;
 750
 751	proc_idx = priv->rx.proc_idx;
 752	done_idx = priv->rx.done_idx;
 753	ndesc    = priv->rx.ndesc;
 754	restpkt  = ((proc_idx + ndesc - 1) - done_idx) % ndesc;
 755
 756	for (npkts = 0; npkts < num; npkts++) {
 757		/* we can't receive more packet, so fill desc quickly */
 758		if (--restpkt < 0)
 759			break;
 760
 761		cmdsts = ave_desc_read_cmdsts(ndev, AVE_DESCID_RX, proc_idx);
 762
 763		/* do nothing if owner is HW (==0 for Rx) */
 764		if (!(cmdsts & AVE_STS_OWN))
 765			break;
 766
 767		if (!(cmdsts & AVE_STS_OK)) {
 768			priv->stats_rx.errors++;
 769			proc_idx = (proc_idx + 1) % ndesc;
 770			continue;
 771		}
 772
 773		pktlen = cmdsts & AVE_STS_PKTLEN_RX_MASK;
 774
 775		/* get skbuff for rx */
 776		skb = priv->rx.desc[proc_idx].skbs;
 777		priv->rx.desc[proc_idx].skbs = NULL;
 778
 779		ave_dma_unmap(ndev, &priv->rx.desc[proc_idx], DMA_FROM_DEVICE);
 780
 781		skb->dev = ndev;
 782		skb_put(skb, pktlen);
 783		skb->protocol = eth_type_trans(skb, ndev);
 784
 785		if ((cmdsts & AVE_STS_CSSV) && (!(cmdsts & AVE_STS_CSER)))
 786			skb->ip_summed = CHECKSUM_UNNECESSARY;
 787
 788		rx_packets++;
 789		rx_bytes += pktlen;
 790
 791		netif_receive_skb(skb);
 792
 793		proc_idx = (proc_idx + 1) % ndesc;
 794	}
 795
 796	priv->rx.proc_idx = proc_idx;
 797
 798	/* update stats */
 799	u64_stats_update_begin(&priv->stats_rx.syncp);
 800	priv->stats_rx.packets += rx_packets;
 801	priv->stats_rx.bytes   += rx_bytes;
 802	u64_stats_update_end(&priv->stats_rx.syncp);
 803
 804	/* refill the Rx buffers */
 805	while (proc_idx != done_idx) {
 806		if (ave_rxdesc_prepare(ndev, done_idx))
 807			break;
 808		done_idx = (done_idx + 1) % ndesc;
 809	}
 810
 811	priv->rx.done_idx = done_idx;
 812
 813	return npkts;
 814}
 815
 816static int ave_napi_poll_rx(struct napi_struct *napi, int budget)
 817{
 818	struct ave_private *priv;
 819	struct net_device *ndev;
 820	int num;
 821
 822	priv = container_of(napi, struct ave_private, napi_rx);
 823	ndev = priv->ndev;
 824
 825	num = ave_rx_receive(ndev, budget);
 826	if (num < budget) {
 827		napi_complete_done(napi, num);
 828
 829		/* enable Rx interrupt when NAPI finishes */
 830		ave_irq_enable(ndev, AVE_GI_RXIINT);
 831	}
 832
 833	return num;
 834}
 835
 836static int ave_napi_poll_tx(struct napi_struct *napi, int budget)
 837{
 838	struct ave_private *priv;
 839	struct net_device *ndev;
 840	int num;
 841
 842	priv = container_of(napi, struct ave_private, napi_tx);
 843	ndev = priv->ndev;
 844
 845	num = ave_tx_complete(ndev);
 846	napi_complete(napi);
 847
 848	/* enable Tx interrupt when NAPI finishes */
 849	ave_irq_enable(ndev, AVE_GI_TX);
 850
 851	return num;
 852}
 853
 854static void ave_global_reset(struct net_device *ndev)
 855{
 856	struct ave_private *priv = netdev_priv(ndev);
 857	u32 val;
 858
 859	/* set config register */
 860	val = AVE_CFGR_FLE | AVE_CFGR_IPFCEN | AVE_CFGR_CHE;
 861	if (!phy_interface_mode_is_rgmii(priv->phy_mode))
 862		val |= AVE_CFGR_MII;
 863	writel(val, priv->base + AVE_CFGR);
 864
 865	/* reset RMII register */
 866	val = readl(priv->base + AVE_RSTCTRL);
 867	val &= ~AVE_RSTCTRL_RMIIRST;
 868	writel(val, priv->base + AVE_RSTCTRL);
 869
 870	/* assert reset */
 871	writel(AVE_GRR_GRST | AVE_GRR_PHYRST, priv->base + AVE_GRR);
 872	msleep(20);
 873
 874	/* 1st, negate PHY reset only */
 875	writel(AVE_GRR_GRST, priv->base + AVE_GRR);
 876	msleep(40);
 877
 878	/* negate reset */
 879	writel(0, priv->base + AVE_GRR);
 880	msleep(40);
 881
 882	/* negate RMII register */
 883	val = readl(priv->base + AVE_RSTCTRL);
 884	val |= AVE_RSTCTRL_RMIIRST;
 885	writel(val, priv->base + AVE_RSTCTRL);
 886
 887	ave_irq_disable_all(ndev);
 888}
 889
 890static void ave_rxfifo_reset(struct net_device *ndev)
 891{
 892	struct ave_private *priv = netdev_priv(ndev);
 893	u32 rxcr_org;
 894
 895	/* save and disable MAC receive op */
 896	rxcr_org = readl(priv->base + AVE_RXCR);
 897	writel(rxcr_org & (~AVE_RXCR_RXEN), priv->base + AVE_RXCR);
 898
 899	/* suspend Rx descriptor */
 900	ave_desc_switch(ndev, AVE_DESC_RX_SUSPEND);
 901
 902	/* receive all packets before descriptor starts */
 903	ave_rx_receive(ndev, priv->rx.ndesc);
 904
 905	/* assert reset */
 906	writel(AVE_GRR_RXFFR, priv->base + AVE_GRR);
 907	udelay(50);
 908
 909	/* negate reset */
 910	writel(0, priv->base + AVE_GRR);
 911	udelay(20);
 912
 913	/* negate interrupt status */
 914	writel(AVE_GI_RXOVF, priv->base + AVE_GISR);
 915
 916	/* permit descriptor */
 917	ave_desc_switch(ndev, AVE_DESC_RX_PERMIT);
 918
 919	/* restore MAC reccieve op */
 920	writel(rxcr_org, priv->base + AVE_RXCR);
 921}
 922
 923static irqreturn_t ave_irq_handler(int irq, void *netdev)
 924{
 925	struct net_device *ndev = (struct net_device *)netdev;
 926	struct ave_private *priv = netdev_priv(ndev);
 927	u32 gimr_val, gisr_val;
 928
 929	gimr_val = ave_irq_disable_all(ndev);
 930
 931	/* get interrupt status */
 932	gisr_val = readl(priv->base + AVE_GISR);
 933
 934	/* PHY */
 935	if (gisr_val & AVE_GI_PHY)
 936		writel(AVE_GI_PHY, priv->base + AVE_GISR);
 937
 938	/* check exceeding packet */
 939	if (gisr_val & AVE_GI_RXERR) {
 940		writel(AVE_GI_RXERR, priv->base + AVE_GISR);
 941		netdev_err(ndev, "receive a packet exceeding frame buffer\n");
 942	}
 943
 944	gisr_val &= gimr_val;
 945	if (!gisr_val)
 946		goto exit_isr;
 947
 948	/* RxFIFO overflow */
 949	if (gisr_val & AVE_GI_RXOVF) {
 950		priv->stats_rx.fifo_errors++;
 951		ave_rxfifo_reset(ndev);
 952		goto exit_isr;
 953	}
 954
 955	/* Rx drop */
 956	if (gisr_val & AVE_GI_RXDROP) {
 957		priv->stats_rx.dropped++;
 958		writel(AVE_GI_RXDROP, priv->base + AVE_GISR);
 959	}
 960
 961	/* Rx interval */
 962	if (gisr_val & AVE_GI_RXIINT) {
 963		napi_schedule(&priv->napi_rx);
 964		/* still force to disable Rx interrupt until NAPI finishes */
 965		gimr_val &= ~AVE_GI_RXIINT;
 966	}
 967
 968	/* Tx completed */
 969	if (gisr_val & AVE_GI_TX) {
 970		napi_schedule(&priv->napi_tx);
 971		/* still force to disable Tx interrupt until NAPI finishes */
 972		gimr_val &= ~AVE_GI_TX;
 973	}
 974
 975exit_isr:
 976	ave_irq_restore(ndev, gimr_val);
 977
 978	return IRQ_HANDLED;
 979}
 980
 981static int ave_pfsel_start(struct net_device *ndev, unsigned int entry)
 982{
 983	struct ave_private *priv = netdev_priv(ndev);
 984	u32 val;
 985
 986	if (WARN_ON(entry > AVE_PF_SIZE))
 987		return -EINVAL;
 988
 989	val = readl(priv->base + AVE_PFEN);
 990	writel(val | BIT(entry), priv->base + AVE_PFEN);
 991
 992	return 0;
 993}
 994
 995static int ave_pfsel_stop(struct net_device *ndev, unsigned int entry)
 996{
 997	struct ave_private *priv = netdev_priv(ndev);
 998	u32 val;
 999
1000	if (WARN_ON(entry > AVE_PF_SIZE))
1001		return -EINVAL;
1002
1003	val = readl(priv->base + AVE_PFEN);
1004	writel(val & ~BIT(entry), priv->base + AVE_PFEN);
1005
1006	return 0;
1007}
1008
1009static int ave_pfsel_set_macaddr(struct net_device *ndev,
1010				 unsigned int entry,
1011				 const unsigned char *mac_addr,
1012				 unsigned int set_size)
1013{
1014	struct ave_private *priv = netdev_priv(ndev);
1015
1016	if (WARN_ON(entry > AVE_PF_SIZE))
1017		return -EINVAL;
1018	if (WARN_ON(set_size > 6))
1019		return -EINVAL;
1020
1021	ave_pfsel_stop(ndev, entry);
1022
1023	/* set MAC address for the filter */
1024	ave_hw_write_macaddr(ndev, mac_addr,
1025			     AVE_PKTF(entry), AVE_PKTF(entry) + 4);
1026
1027	/* set byte mask */
1028	writel(GENMASK(31, set_size) & AVE_PFMBYTE_MASK0,
1029	       priv->base + AVE_PFMBYTE(entry));
1030	writel(AVE_PFMBYTE_MASK1, priv->base + AVE_PFMBYTE(entry) + 4);
1031
1032	/* set bit mask filter */
1033	writel(AVE_PFMBIT_MASK, priv->base + AVE_PFMBIT(entry));
1034
1035	/* set selector to ring 0 */
1036	writel(0, priv->base + AVE_PFSEL(entry));
1037
1038	/* restart filter */
1039	ave_pfsel_start(ndev, entry);
1040
1041	return 0;
1042}
1043
1044static void ave_pfsel_set_promisc(struct net_device *ndev,
1045				  unsigned int entry, u32 rxring)
1046{
1047	struct ave_private *priv = netdev_priv(ndev);
1048
1049	if (WARN_ON(entry > AVE_PF_SIZE))
1050		return;
1051
1052	ave_pfsel_stop(ndev, entry);
1053
1054	/* set byte mask */
1055	writel(AVE_PFMBYTE_MASK0, priv->base + AVE_PFMBYTE(entry));
1056	writel(AVE_PFMBYTE_MASK1, priv->base + AVE_PFMBYTE(entry) + 4);
1057
1058	/* set bit mask filter */
1059	writel(AVE_PFMBIT_MASK, priv->base + AVE_PFMBIT(entry));
1060
1061	/* set selector to rxring */
1062	writel(rxring, priv->base + AVE_PFSEL(entry));
1063
1064	ave_pfsel_start(ndev, entry);
1065}
1066
1067static void ave_pfsel_init(struct net_device *ndev)
1068{
1069	unsigned char bcast_mac[ETH_ALEN];
1070	int i;
1071
1072	eth_broadcast_addr(bcast_mac);
1073
1074	for (i = 0; i < AVE_PF_SIZE; i++)
1075		ave_pfsel_stop(ndev, i);
1076
1077	/* promiscious entry, select ring 0 */
1078	ave_pfsel_set_promisc(ndev, AVE_PFNUM_FILTER, 0);
1079
1080	/* unicast entry */
1081	ave_pfsel_set_macaddr(ndev, AVE_PFNUM_UNICAST, ndev->dev_addr, 6);
1082
1083	/* broadcast entry */
1084	ave_pfsel_set_macaddr(ndev, AVE_PFNUM_BROADCAST, bcast_mac, 6);
1085}
1086
1087static void ave_phy_adjust_link(struct net_device *ndev)
1088{
1089	struct ave_private *priv = netdev_priv(ndev);
1090	struct phy_device *phydev = ndev->phydev;
1091	u32 val, txcr, rxcr, rxcr_org;
1092	u16 rmt_adv = 0, lcl_adv = 0;
1093	u8 cap;
1094
1095	/* set RGMII speed */
1096	val = readl(priv->base + AVE_TXCR);
1097	val &= ~(AVE_TXCR_TXSPD_100 | AVE_TXCR_TXSPD_1G);
1098
1099	if (phy_interface_is_rgmii(phydev) && phydev->speed == SPEED_1000)
1100		val |= AVE_TXCR_TXSPD_1G;
1101	else if (phydev->speed == SPEED_100)
1102		val |= AVE_TXCR_TXSPD_100;
1103
1104	writel(val, priv->base + AVE_TXCR);
1105
1106	/* set RMII speed (100M/10M only) */
1107	if (!phy_interface_is_rgmii(phydev)) {
1108		val = readl(priv->base + AVE_LINKSEL);
1109		if (phydev->speed == SPEED_10)
1110			val &= ~AVE_LINKSEL_100M;
1111		else
1112			val |= AVE_LINKSEL_100M;
1113		writel(val, priv->base + AVE_LINKSEL);
1114	}
1115
1116	/* check current RXCR/TXCR */
1117	rxcr = readl(priv->base + AVE_RXCR);
1118	txcr = readl(priv->base + AVE_TXCR);
1119	rxcr_org = rxcr;
1120
1121	if (phydev->duplex) {
1122		rxcr |= AVE_RXCR_FDUPEN;
1123
1124		if (phydev->pause)
1125			rmt_adv |= LPA_PAUSE_CAP;
1126		if (phydev->asym_pause)
1127			rmt_adv |= LPA_PAUSE_ASYM;
1128
1129		lcl_adv = linkmode_adv_to_lcl_adv_t(phydev->advertising);
1130		cap = mii_resolve_flowctrl_fdx(lcl_adv, rmt_adv);
1131		if (cap & FLOW_CTRL_TX)
1132			txcr |= AVE_TXCR_FLOCTR;
1133		else
1134			txcr &= ~AVE_TXCR_FLOCTR;
1135		if (cap & FLOW_CTRL_RX)
1136			rxcr |= AVE_RXCR_FLOCTR;
1137		else
1138			rxcr &= ~AVE_RXCR_FLOCTR;
1139	} else {
1140		rxcr &= ~AVE_RXCR_FDUPEN;
1141		rxcr &= ~AVE_RXCR_FLOCTR;
1142		txcr &= ~AVE_TXCR_FLOCTR;
1143	}
1144
1145	if (rxcr_org != rxcr) {
1146		/* disable Rx mac */
1147		writel(rxcr & ~AVE_RXCR_RXEN, priv->base + AVE_RXCR);
1148		/* change and enable TX/Rx mac */
1149		writel(txcr, priv->base + AVE_TXCR);
1150		writel(rxcr, priv->base + AVE_RXCR);
1151	}
1152
1153	phy_print_status(phydev);
1154}
1155
1156static void ave_macaddr_init(struct net_device *ndev)
1157{
1158	ave_hw_write_macaddr(ndev, ndev->dev_addr, AVE_RXMAC1R, AVE_RXMAC2R);
1159
1160	/* pfsel unicast entry */
1161	ave_pfsel_set_macaddr(ndev, AVE_PFNUM_UNICAST, ndev->dev_addr, 6);
1162}
1163
1164static int ave_init(struct net_device *ndev)
1165{
1166	struct ethtool_wolinfo wol = { .cmd = ETHTOOL_GWOL };
1167	struct ave_private *priv = netdev_priv(ndev);
1168	struct device *dev = ndev->dev.parent;
1169	struct device_node *np = dev->of_node;
1170	struct device_node *mdio_np;
1171	struct phy_device *phydev;
1172	int nc, nr, ret;
1173
1174	/* enable clk because of hw access until ndo_open */
1175	for (nc = 0; nc < priv->nclks; nc++) {
1176		ret = clk_prepare_enable(priv->clk[nc]);
1177		if (ret) {
1178			dev_err(dev, "can't enable clock\n");
1179			goto out_clk_disable;
1180		}
1181	}
1182
1183	for (nr = 0; nr < priv->nrsts; nr++) {
1184		ret = reset_control_deassert(priv->rst[nr]);
1185		if (ret) {
1186			dev_err(dev, "can't deassert reset\n");
1187			goto out_reset_assert;
1188		}
1189	}
1190
1191	ret = regmap_update_bits(priv->regmap, SG_ETPINMODE,
1192				 priv->pinmode_mask, priv->pinmode_val);
1193	if (ret)
1194		goto out_reset_assert;
1195
1196	ave_global_reset(ndev);
1197
1198	mdio_np = of_get_child_by_name(np, "mdio");
1199	if (!mdio_np) {
1200		dev_err(dev, "mdio node not found\n");
1201		ret = -EINVAL;
1202		goto out_reset_assert;
1203	}
1204	ret = of_mdiobus_register(priv->mdio, mdio_np);
1205	of_node_put(mdio_np);
1206	if (ret) {
1207		dev_err(dev, "failed to register mdiobus\n");
1208		goto out_reset_assert;
1209	}
1210
1211	phydev = of_phy_get_and_connect(ndev, np, ave_phy_adjust_link);
1212	if (!phydev) {
1213		dev_err(dev, "could not attach to PHY\n");
1214		ret = -ENODEV;
1215		goto out_mdio_unregister;
1216	}
1217
1218	priv->phydev = phydev;
1219
1220	ave_ethtool_get_wol(ndev, &wol);
1221	device_set_wakeup_capable(&ndev->dev, !!wol.supported);
1222
1223	/* set wol initial state disabled */
1224	wol.wolopts = 0;
1225	__ave_ethtool_set_wol(ndev, &wol);
1226
1227	if (!phy_interface_is_rgmii(phydev))
1228		phy_set_max_speed(phydev, SPEED_100);
1229
1230	phy_support_asym_pause(phydev);
1231
1232	phydev->mac_managed_pm = true;
1233
1234	phy_attached_info(phydev);
1235
1236	return 0;
1237
1238out_mdio_unregister:
1239	mdiobus_unregister(priv->mdio);
1240out_reset_assert:
1241	while (--nr >= 0)
1242		reset_control_assert(priv->rst[nr]);
1243out_clk_disable:
1244	while (--nc >= 0)
1245		clk_disable_unprepare(priv->clk[nc]);
1246
1247	return ret;
1248}
1249
1250static void ave_uninit(struct net_device *ndev)
1251{
1252	struct ave_private *priv = netdev_priv(ndev);
1253	int i;
1254
1255	phy_disconnect(priv->phydev);
1256	mdiobus_unregister(priv->mdio);
1257
1258	/* disable clk because of hw access after ndo_stop */
1259	for (i = 0; i < priv->nrsts; i++)
1260		reset_control_assert(priv->rst[i]);
1261	for (i = 0; i < priv->nclks; i++)
1262		clk_disable_unprepare(priv->clk[i]);
1263}
1264
1265static int ave_open(struct net_device *ndev)
1266{
1267	struct ave_private *priv = netdev_priv(ndev);
1268	int entry;
1269	int ret;
1270	u32 val;
1271
1272	ret = request_irq(priv->irq, ave_irq_handler, IRQF_SHARED, ndev->name,
1273			  ndev);
1274	if (ret)
1275		return ret;
1276
1277	priv->tx.desc = kcalloc(priv->tx.ndesc, sizeof(*priv->tx.desc),
1278				GFP_KERNEL);
1279	if (!priv->tx.desc) {
1280		ret = -ENOMEM;
1281		goto out_free_irq;
1282	}
1283
1284	priv->rx.desc = kcalloc(priv->rx.ndesc, sizeof(*priv->rx.desc),
1285				GFP_KERNEL);
1286	if (!priv->rx.desc) {
1287		kfree(priv->tx.desc);
1288		ret = -ENOMEM;
1289		goto out_free_irq;
1290	}
1291
1292	/* initialize Tx work and descriptor */
1293	priv->tx.proc_idx = 0;
1294	priv->tx.done_idx = 0;
1295	for (entry = 0; entry < priv->tx.ndesc; entry++) {
1296		ave_desc_write_cmdsts(ndev, AVE_DESCID_TX, entry, 0);
1297		ave_desc_write_addr(ndev, AVE_DESCID_TX, entry, 0);
1298	}
1299	writel(AVE_TXDC_ADDR_START |
1300	       (((priv->tx.ndesc * priv->desc_size) << 16) & AVE_TXDC_SIZE),
1301	       priv->base + AVE_TXDC);
1302
1303	/* initialize Rx work and descriptor */
1304	priv->rx.proc_idx = 0;
1305	priv->rx.done_idx = 0;
1306	for (entry = 0; entry < priv->rx.ndesc; entry++) {
1307		if (ave_rxdesc_prepare(ndev, entry))
1308			break;
1309	}
1310	writel(AVE_RXDC0_ADDR_START |
1311	       (((priv->rx.ndesc * priv->desc_size) << 16) & AVE_RXDC0_SIZE),
1312	       priv->base + AVE_RXDC0);
1313
1314	ave_desc_switch(ndev, AVE_DESC_START);
1315
1316	ave_pfsel_init(ndev);
1317	ave_macaddr_init(ndev);
1318
1319	/* set Rx configuration */
1320	/* full duplex, enable pause drop, enalbe flow control */
1321	val = AVE_RXCR_RXEN | AVE_RXCR_FDUPEN | AVE_RXCR_DRPEN |
1322		AVE_RXCR_FLOCTR | (AVE_MAX_ETHFRAME & AVE_RXCR_MPSIZ_MASK);
1323	writel(val, priv->base + AVE_RXCR);
1324
1325	/* set Tx configuration */
1326	/* enable flow control, disable loopback */
1327	writel(AVE_TXCR_FLOCTR, priv->base + AVE_TXCR);
1328
1329	/* enable timer, clear EN,INTM, and mask interval unit(BSCK) */
1330	val = readl(priv->base + AVE_IIRQC) & AVE_IIRQC_BSCK;
1331	val |= AVE_IIRQC_EN0 | (AVE_INTM_COUNT << 16);
1332	writel(val, priv->base + AVE_IIRQC);
1333
1334	val = AVE_GI_RXIINT | AVE_GI_RXOVF | AVE_GI_TX | AVE_GI_RXDROP;
1335	ave_irq_restore(ndev, val);
1336
1337	napi_enable(&priv->napi_rx);
1338	napi_enable(&priv->napi_tx);
1339
1340	phy_start(ndev->phydev);
1341	phy_start_aneg(ndev->phydev);
1342	netif_start_queue(ndev);
1343
1344	return 0;
1345
1346out_free_irq:
1347	disable_irq(priv->irq);
1348	free_irq(priv->irq, ndev);
1349
1350	return ret;
1351}
1352
1353static int ave_stop(struct net_device *ndev)
1354{
1355	struct ave_private *priv = netdev_priv(ndev);
1356	int entry;
1357
1358	ave_irq_disable_all(ndev);
1359	disable_irq(priv->irq);
1360	free_irq(priv->irq, ndev);
1361
1362	netif_tx_disable(ndev);
1363	phy_stop(ndev->phydev);
1364	napi_disable(&priv->napi_tx);
1365	napi_disable(&priv->napi_rx);
1366
1367	ave_desc_switch(ndev, AVE_DESC_STOP);
1368
1369	/* free Tx buffer */
1370	for (entry = 0; entry < priv->tx.ndesc; entry++) {
1371		if (!priv->tx.desc[entry].skbs)
1372			continue;
1373
1374		ave_dma_unmap(ndev, &priv->tx.desc[entry], DMA_TO_DEVICE);
1375		dev_kfree_skb_any(priv->tx.desc[entry].skbs);
1376		priv->tx.desc[entry].skbs = NULL;
1377	}
1378	priv->tx.proc_idx = 0;
1379	priv->tx.done_idx = 0;
1380
1381	/* free Rx buffer */
1382	for (entry = 0; entry < priv->rx.ndesc; entry++) {
1383		if (!priv->rx.desc[entry].skbs)
1384			continue;
1385
1386		ave_dma_unmap(ndev, &priv->rx.desc[entry], DMA_FROM_DEVICE);
1387		dev_kfree_skb_any(priv->rx.desc[entry].skbs);
1388		priv->rx.desc[entry].skbs = NULL;
1389	}
1390	priv->rx.proc_idx = 0;
1391	priv->rx.done_idx = 0;
1392
1393	kfree(priv->tx.desc);
1394	kfree(priv->rx.desc);
1395
1396	return 0;
1397}
1398
1399static netdev_tx_t ave_start_xmit(struct sk_buff *skb, struct net_device *ndev)
1400{
1401	struct ave_private *priv = netdev_priv(ndev);
1402	u32 proc_idx, done_idx, ndesc, cmdsts;
1403	int ret, freepkt;
1404	dma_addr_t paddr;
1405
1406	proc_idx = priv->tx.proc_idx;
1407	done_idx = priv->tx.done_idx;
1408	ndesc = priv->tx.ndesc;
1409	freepkt = ((done_idx + ndesc - 1) - proc_idx) % ndesc;
1410
1411	/* stop queue when not enough entry */
1412	if (unlikely(freepkt < 1)) {
1413		netif_stop_queue(ndev);
1414		return NETDEV_TX_BUSY;
1415	}
1416
1417	/* add padding for short packet */
1418	if (skb_put_padto(skb, ETH_ZLEN)) {
1419		priv->stats_tx.dropped++;
1420		return NETDEV_TX_OK;
1421	}
1422
1423	/* map Tx buffer
1424	 * Tx buffer set to the Tx descriptor doesn't have any restriction.
1425	 */
1426	ret = ave_dma_map(ndev, &priv->tx.desc[proc_idx],
1427			  skb->data, skb->len, DMA_TO_DEVICE, &paddr);
1428	if (ret) {
1429		dev_kfree_skb_any(skb);
1430		priv->stats_tx.dropped++;
1431		return NETDEV_TX_OK;
1432	}
1433
1434	priv->tx.desc[proc_idx].skbs = skb;
1435
1436	ave_desc_write_addr(ndev, AVE_DESCID_TX, proc_idx, paddr);
1437
1438	cmdsts = AVE_STS_OWN | AVE_STS_1ST | AVE_STS_LAST |
1439		(skb->len & AVE_STS_PKTLEN_TX_MASK);
1440
1441	/* set interrupt per AVE_FORCE_TXINTCNT or when queue is stopped */
1442	if (!(proc_idx % AVE_FORCE_TXINTCNT) || netif_queue_stopped(ndev))
1443		cmdsts |= AVE_STS_INTR;
1444
1445	/* disable checksum calculation when skb doesn't calurate checksum */
1446	if (skb->ip_summed == CHECKSUM_NONE ||
1447	    skb->ip_summed == CHECKSUM_UNNECESSARY)
1448		cmdsts |= AVE_STS_NOCSUM;
1449
1450	ave_desc_write_cmdsts(ndev, AVE_DESCID_TX, proc_idx, cmdsts);
1451
1452	priv->tx.proc_idx = (proc_idx + 1) % ndesc;
1453
1454	return NETDEV_TX_OK;
1455}
1456
1457static int ave_ioctl(struct net_device *ndev, struct ifreq *ifr, int cmd)
1458{
1459	return phy_mii_ioctl(ndev->phydev, ifr, cmd);
1460}
1461
1462static const u8 v4multi_macadr[] = { 0x01, 0x00, 0x00, 0x00, 0x00, 0x00 };
1463static const u8 v6multi_macadr[] = { 0x33, 0x00, 0x00, 0x00, 0x00, 0x00 };
1464
1465static void ave_set_rx_mode(struct net_device *ndev)
1466{
1467	struct ave_private *priv = netdev_priv(ndev);
1468	struct netdev_hw_addr *hw_adr;
1469	int count, mc_cnt;
1470	u32 val;
1471
1472	/* MAC addr filter enable for promiscious mode */
1473	mc_cnt = netdev_mc_count(ndev);
1474	val = readl(priv->base + AVE_RXCR);
1475	if (ndev->flags & IFF_PROMISC || !mc_cnt)
1476		val &= ~AVE_RXCR_AFEN;
1477	else
1478		val |= AVE_RXCR_AFEN;
1479	writel(val, priv->base + AVE_RXCR);
1480
1481	/* set all multicast address */
1482	if ((ndev->flags & IFF_ALLMULTI) || mc_cnt > AVE_PF_MULTICAST_SIZE) {
1483		ave_pfsel_set_macaddr(ndev, AVE_PFNUM_MULTICAST,
1484				      v4multi_macadr, 1);
1485		ave_pfsel_set_macaddr(ndev, AVE_PFNUM_MULTICAST + 1,
1486				      v6multi_macadr, 1);
1487	} else {
1488		/* stop all multicast filter */
1489		for (count = 0; count < AVE_PF_MULTICAST_SIZE; count++)
1490			ave_pfsel_stop(ndev, AVE_PFNUM_MULTICAST + count);
1491
1492		/* set multicast addresses */
1493		count = 0;
1494		netdev_for_each_mc_addr(hw_adr, ndev) {
1495			if (count == mc_cnt)
1496				break;
1497			ave_pfsel_set_macaddr(ndev, AVE_PFNUM_MULTICAST + count,
1498					      hw_adr->addr, 6);
1499			count++;
1500		}
1501	}
1502}
1503
1504static void ave_get_stats64(struct net_device *ndev,
1505			    struct rtnl_link_stats64 *stats)
1506{
1507	struct ave_private *priv = netdev_priv(ndev);
1508	unsigned int start;
1509
1510	do {
1511		start = u64_stats_fetch_begin(&priv->stats_rx.syncp);
1512		stats->rx_packets = priv->stats_rx.packets;
1513		stats->rx_bytes	  = priv->stats_rx.bytes;
1514	} while (u64_stats_fetch_retry(&priv->stats_rx.syncp, start));
1515
1516	do {
1517		start = u64_stats_fetch_begin(&priv->stats_tx.syncp);
1518		stats->tx_packets = priv->stats_tx.packets;
1519		stats->tx_bytes	  = priv->stats_tx.bytes;
1520	} while (u64_stats_fetch_retry(&priv->stats_tx.syncp, start));
1521
1522	stats->rx_errors      = priv->stats_rx.errors;
1523	stats->tx_errors      = priv->stats_tx.errors;
1524	stats->rx_dropped     = priv->stats_rx.dropped;
1525	stats->tx_dropped     = priv->stats_tx.dropped;
1526	stats->rx_fifo_errors = priv->stats_rx.fifo_errors;
1527	stats->collisions     = priv->stats_tx.collisions;
1528}
1529
1530static int ave_set_mac_address(struct net_device *ndev, void *p)
1531{
1532	int ret = eth_mac_addr(ndev, p);
1533
1534	if (ret)
1535		return ret;
1536
1537	ave_macaddr_init(ndev);
1538
1539	return 0;
1540}
1541
1542static const struct net_device_ops ave_netdev_ops = {
1543	.ndo_init		= ave_init,
1544	.ndo_uninit		= ave_uninit,
1545	.ndo_open		= ave_open,
1546	.ndo_stop		= ave_stop,
1547	.ndo_start_xmit		= ave_start_xmit,
1548	.ndo_eth_ioctl		= ave_ioctl,
1549	.ndo_set_rx_mode	= ave_set_rx_mode,
1550	.ndo_get_stats64	= ave_get_stats64,
1551	.ndo_set_mac_address	= ave_set_mac_address,
1552};
1553
1554static int ave_probe(struct platform_device *pdev)
1555{
1556	const struct ave_soc_data *data;
1557	struct device *dev = &pdev->dev;
1558	char buf[ETHTOOL_FWVERS_LEN];
1559	struct of_phandle_args args;
1560	phy_interface_t phy_mode;
1561	struct ave_private *priv;
1562	struct net_device *ndev;
1563	struct device_node *np;
1564	void __iomem *base;
1565	const char *name;
1566	int i, irq, ret;
1567	u64 dma_mask;
1568	u32 ave_id;
1569
1570	data = of_device_get_match_data(dev);
1571	if (WARN_ON(!data))
1572		return -EINVAL;
1573
1574	np = dev->of_node;
1575	ret = of_get_phy_mode(np, &phy_mode);
1576	if (ret) {
1577		dev_err(dev, "phy-mode not found\n");
1578		return ret;
1579	}
1580
1581	irq = platform_get_irq(pdev, 0);
1582	if (irq < 0)
1583		return irq;
1584
1585	base = devm_platform_ioremap_resource(pdev, 0);
1586	if (IS_ERR(base))
1587		return PTR_ERR(base);
1588
1589	ndev = devm_alloc_etherdev(dev, sizeof(struct ave_private));
1590	if (!ndev) {
1591		dev_err(dev, "can't allocate ethernet device\n");
1592		return -ENOMEM;
1593	}
1594
1595	ndev->netdev_ops = &ave_netdev_ops;
1596	ndev->ethtool_ops = &ave_ethtool_ops;
1597	SET_NETDEV_DEV(ndev, dev);
1598
1599	ndev->features    |= (NETIF_F_IP_CSUM | NETIF_F_RXCSUM);
1600	ndev->hw_features |= (NETIF_F_IP_CSUM | NETIF_F_RXCSUM);
1601
1602	ndev->max_mtu = AVE_MAX_ETHFRAME - (ETH_HLEN + ETH_FCS_LEN);
1603
1604	ret = of_get_ethdev_address(np, ndev);
1605	if (ret) {
1606		/* if the mac address is invalid, use random mac address */
1607		eth_hw_addr_random(ndev);
1608		dev_warn(dev, "Using random MAC address: %pM\n",
1609			 ndev->dev_addr);
1610	}
1611
1612	priv = netdev_priv(ndev);
1613	priv->base = base;
1614	priv->irq = irq;
1615	priv->ndev = ndev;
1616	priv->msg_enable = netif_msg_init(-1, AVE_DEFAULT_MSG_ENABLE);
1617	priv->phy_mode = phy_mode;
1618	priv->data = data;
1619
1620	if (IS_DESC_64BIT(priv)) {
1621		priv->desc_size = AVE_DESC_SIZE_64;
1622		priv->tx.daddr  = AVE_TXDM_64;
1623		priv->rx.daddr  = AVE_RXDM_64;
1624		dma_mask = DMA_BIT_MASK(64);
1625	} else {
1626		priv->desc_size = AVE_DESC_SIZE_32;
1627		priv->tx.daddr  = AVE_TXDM_32;
1628		priv->rx.daddr  = AVE_RXDM_32;
1629		dma_mask = DMA_BIT_MASK(32);
1630	}
1631	ret = dma_set_mask(dev, dma_mask);
1632	if (ret)
1633		return ret;
1634
1635	priv->tx.ndesc = AVE_NR_TXDESC;
1636	priv->rx.ndesc = AVE_NR_RXDESC;
1637
1638	u64_stats_init(&priv->stats_tx.syncp);
1639	u64_stats_init(&priv->stats_rx.syncp);
1640
1641	for (i = 0; i < AVE_MAX_CLKS; i++) {
1642		name = priv->data->clock_names[i];
1643		if (!name)
1644			break;
1645		priv->clk[i] = devm_clk_get(dev, name);
1646		if (IS_ERR(priv->clk[i]))
1647			return PTR_ERR(priv->clk[i]);
1648		priv->nclks++;
1649	}
1650
1651	for (i = 0; i < AVE_MAX_RSTS; i++) {
1652		name = priv->data->reset_names[i];
1653		if (!name)
1654			break;
1655		priv->rst[i] = devm_reset_control_get_shared(dev, name);
1656		if (IS_ERR(priv->rst[i]))
1657			return PTR_ERR(priv->rst[i]);
1658		priv->nrsts++;
1659	}
1660
1661	ret = of_parse_phandle_with_fixed_args(np,
1662					       "socionext,syscon-phy-mode",
1663					       1, 0, &args);
1664	if (ret) {
1665		dev_err(dev, "can't get syscon-phy-mode property\n");
1666		return ret;
1667	}
1668	priv->regmap = syscon_node_to_regmap(args.np);
1669	of_node_put(args.np);
1670	if (IS_ERR(priv->regmap)) {
1671		dev_err(dev, "can't map syscon-phy-mode\n");
1672		return PTR_ERR(priv->regmap);
1673	}
1674	ret = priv->data->get_pinmode(priv, phy_mode, args.args[0]);
1675	if (ret) {
1676		dev_err(dev, "invalid phy-mode setting\n");
1677		return ret;
1678	}
1679
1680	priv->mdio = devm_mdiobus_alloc(dev);
1681	if (!priv->mdio)
1682		return -ENOMEM;
1683	priv->mdio->priv = ndev;
1684	priv->mdio->parent = dev;
1685	priv->mdio->read = ave_mdiobus_read;
1686	priv->mdio->write = ave_mdiobus_write;
1687	priv->mdio->name = "uniphier-mdio";
1688	snprintf(priv->mdio->id, MII_BUS_ID_SIZE, "%s-%x",
1689		 pdev->name, pdev->id);
1690
1691	/* Register as a NAPI supported driver */
1692	netif_napi_add(ndev, &priv->napi_rx, ave_napi_poll_rx);
1693	netif_napi_add_tx(ndev, &priv->napi_tx, ave_napi_poll_tx);
1694
1695	platform_set_drvdata(pdev, ndev);
1696
1697	ret = register_netdev(ndev);
1698	if (ret) {
1699		dev_err(dev, "failed to register netdevice\n");
1700		goto out_del_napi;
1701	}
1702
1703	/* get ID and version */
1704	ave_id = readl(priv->base + AVE_IDR);
1705	ave_hw_read_version(ndev, buf, sizeof(buf));
1706
1707	dev_info(dev, "Socionext %c%c%c%c Ethernet IP %s (irq=%d, phy=%s)\n",
1708		 (ave_id >> 24) & 0xff, (ave_id >> 16) & 0xff,
1709		 (ave_id >> 8) & 0xff, (ave_id >> 0) & 0xff,
1710		 buf, priv->irq, phy_modes(phy_mode));
1711
1712	return 0;
1713
1714out_del_napi:
1715	netif_napi_del(&priv->napi_rx);
1716	netif_napi_del(&priv->napi_tx);
1717
1718	return ret;
1719}
1720
1721static int ave_remove(struct platform_device *pdev)
1722{
1723	struct net_device *ndev = platform_get_drvdata(pdev);
1724	struct ave_private *priv = netdev_priv(ndev);
1725
1726	unregister_netdev(ndev);
1727	netif_napi_del(&priv->napi_rx);
1728	netif_napi_del(&priv->napi_tx);
1729
1730	return 0;
1731}
1732
1733#ifdef CONFIG_PM_SLEEP
1734static int ave_suspend(struct device *dev)
1735{
1736	struct ethtool_wolinfo wol = { .cmd = ETHTOOL_GWOL };
1737	struct net_device *ndev = dev_get_drvdata(dev);
1738	struct ave_private *priv = netdev_priv(ndev);
1739	int ret = 0;
1740
1741	if (netif_running(ndev)) {
1742		ret = ave_stop(ndev);
1743		netif_device_detach(ndev);
1744	}
1745
1746	ave_ethtool_get_wol(ndev, &wol);
1747	priv->wolopts = wol.wolopts;
1748
1749	return ret;
1750}
1751
1752static int ave_resume(struct device *dev)
1753{
1754	struct ethtool_wolinfo wol = { .cmd = ETHTOOL_GWOL };
1755	struct net_device *ndev = dev_get_drvdata(dev);
1756	struct ave_private *priv = netdev_priv(ndev);
1757	int ret = 0;
1758
1759	ave_global_reset(ndev);
1760
1761	ret = phy_init_hw(ndev->phydev);
1762	if (ret)
1763		return ret;
1764
1765	ave_ethtool_get_wol(ndev, &wol);
1766	wol.wolopts = priv->wolopts;
1767	__ave_ethtool_set_wol(ndev, &wol);
1768
1769	if (netif_running(ndev)) {
1770		ret = ave_open(ndev);
1771		netif_device_attach(ndev);
1772	}
1773
1774	return ret;
1775}
1776
1777static SIMPLE_DEV_PM_OPS(ave_pm_ops, ave_suspend, ave_resume);
1778#define AVE_PM_OPS	(&ave_pm_ops)
1779#else
1780#define AVE_PM_OPS	NULL
1781#endif
1782
1783static int ave_pro4_get_pinmode(struct ave_private *priv,
1784				phy_interface_t phy_mode, u32 arg)
1785{
1786	if (arg > 0)
1787		return -EINVAL;
1788
1789	priv->pinmode_mask = SG_ETPINMODE_RMII(0);
1790
1791	switch (phy_mode) {
1792	case PHY_INTERFACE_MODE_RMII:
1793		priv->pinmode_val = SG_ETPINMODE_RMII(0);
1794		break;
1795	case PHY_INTERFACE_MODE_MII:
1796	case PHY_INTERFACE_MODE_RGMII:
1797	case PHY_INTERFACE_MODE_RGMII_ID:
1798	case PHY_INTERFACE_MODE_RGMII_RXID:
1799	case PHY_INTERFACE_MODE_RGMII_TXID:
1800		priv->pinmode_val = 0;
1801		break;
1802	default:
1803		return -EINVAL;
1804	}
1805
1806	return 0;
1807}
1808
1809static int ave_ld11_get_pinmode(struct ave_private *priv,
1810				phy_interface_t phy_mode, u32 arg)
1811{
1812	if (arg > 0)
1813		return -EINVAL;
1814
1815	priv->pinmode_mask = SG_ETPINMODE_EXTPHY | SG_ETPINMODE_RMII(0);
1816
1817	switch (phy_mode) {
1818	case PHY_INTERFACE_MODE_INTERNAL:
1819		priv->pinmode_val = 0;
1820		break;
1821	case PHY_INTERFACE_MODE_RMII:
1822		priv->pinmode_val = SG_ETPINMODE_EXTPHY | SG_ETPINMODE_RMII(0);
1823		break;
1824	default:
1825		return -EINVAL;
1826	}
1827
1828	return 0;
1829}
1830
1831static int ave_ld20_get_pinmode(struct ave_private *priv,
1832				phy_interface_t phy_mode, u32 arg)
1833{
1834	if (arg > 0)
1835		return -EINVAL;
1836
1837	priv->pinmode_mask = SG_ETPINMODE_RMII(0);
1838
1839	switch (phy_mode) {
1840	case PHY_INTERFACE_MODE_RMII:
1841		priv->pinmode_val = SG_ETPINMODE_RMII(0);
1842		break;
1843	case PHY_INTERFACE_MODE_RGMII:
1844	case PHY_INTERFACE_MODE_RGMII_ID:
1845	case PHY_INTERFACE_MODE_RGMII_RXID:
1846	case PHY_INTERFACE_MODE_RGMII_TXID:
1847		priv->pinmode_val = 0;
1848		break;
1849	default:
1850		return -EINVAL;
1851	}
1852
1853	return 0;
1854}
1855
1856static int ave_pxs3_get_pinmode(struct ave_private *priv,
1857				phy_interface_t phy_mode, u32 arg)
1858{
1859	if (arg > 1)
1860		return -EINVAL;
1861
1862	priv->pinmode_mask = SG_ETPINMODE_RMII(arg);
1863
1864	switch (phy_mode) {
1865	case PHY_INTERFACE_MODE_RMII:
1866		priv->pinmode_val = SG_ETPINMODE_RMII(arg);
1867		break;
1868	case PHY_INTERFACE_MODE_RGMII:
1869	case PHY_INTERFACE_MODE_RGMII_ID:
1870	case PHY_INTERFACE_MODE_RGMII_RXID:
1871	case PHY_INTERFACE_MODE_RGMII_TXID:
1872		priv->pinmode_val = 0;
1873		break;
1874	default:
1875		return -EINVAL;
1876	}
1877
1878	return 0;
1879}
1880
1881static const struct ave_soc_data ave_pro4_data = {
1882	.is_desc_64bit = false,
1883	.clock_names = {
1884		"gio", "ether", "ether-gb", "ether-phy",
1885	},
1886	.reset_names = {
1887		"gio", "ether",
1888	},
1889	.get_pinmode = ave_pro4_get_pinmode,
1890};
1891
1892static const struct ave_soc_data ave_pxs2_data = {
1893	.is_desc_64bit = false,
1894	.clock_names = {
1895		"ether",
1896	},
1897	.reset_names = {
1898		"ether",
1899	},
1900	.get_pinmode = ave_pro4_get_pinmode,
1901};
1902
1903static const struct ave_soc_data ave_ld11_data = {
1904	.is_desc_64bit = false,
1905	.clock_names = {
1906		"ether",
1907	},
1908	.reset_names = {
1909		"ether",
1910	},
1911	.get_pinmode = ave_ld11_get_pinmode,
1912};
1913
1914static const struct ave_soc_data ave_ld20_data = {
1915	.is_desc_64bit = true,
1916	.clock_names = {
1917		"ether",
1918	},
1919	.reset_names = {
1920		"ether",
1921	},
1922	.get_pinmode = ave_ld20_get_pinmode,
1923};
1924
1925static const struct ave_soc_data ave_pxs3_data = {
1926	.is_desc_64bit = false,
1927	.clock_names = {
1928		"ether",
1929	},
1930	.reset_names = {
1931		"ether",
1932	},
1933	.get_pinmode = ave_pxs3_get_pinmode,
1934};
1935
1936static const struct ave_soc_data ave_nx1_data = {
1937	.is_desc_64bit = true,
1938	.clock_names = {
1939		"ether",
1940	},
1941	.reset_names = {
1942		"ether",
1943	},
1944	.get_pinmode = ave_pxs3_get_pinmode,
1945};
1946
1947static const struct of_device_id of_ave_match[] = {
1948	{
1949		.compatible = "socionext,uniphier-pro4-ave4",
1950		.data = &ave_pro4_data,
1951	},
1952	{
1953		.compatible = "socionext,uniphier-pxs2-ave4",
1954		.data = &ave_pxs2_data,
1955	},
1956	{
1957		.compatible = "socionext,uniphier-ld11-ave4",
1958		.data = &ave_ld11_data,
1959	},
1960	{
1961		.compatible = "socionext,uniphier-ld20-ave4",
1962		.data = &ave_ld20_data,
1963	},
1964	{
1965		.compatible = "socionext,uniphier-pxs3-ave4",
1966		.data = &ave_pxs3_data,
1967	},
1968	{
1969		.compatible = "socionext,uniphier-nx1-ave4",
1970		.data = &ave_nx1_data,
1971	},
1972	{ /* Sentinel */ }
1973};
1974MODULE_DEVICE_TABLE(of, of_ave_match);
1975
1976static struct platform_driver ave_driver = {
1977	.probe  = ave_probe,
1978	.remove = ave_remove,
1979	.driver	= {
1980		.name = "ave",
1981		.pm   = AVE_PM_OPS,
1982		.of_match_table	= of_ave_match,
1983	},
1984};
1985module_platform_driver(ave_driver);
1986
1987MODULE_AUTHOR("Kunihiko Hayashi <hayashi.kunihiko@socionext.com>");
1988MODULE_DESCRIPTION("Socionext UniPhier AVE ethernet driver");
1989MODULE_LICENSE("GPL v2");