Linux Audio

Check our new training course

Loading...
Note: File does not exist in v4.17.
   1// SPDX-License-Identifier: GPL-2.0
   2/*  Atheros AR71xx built-in ethernet mac driver
   3 *
   4 *  Copyright (C) 2019 Oleksij Rempel <o.rempel@pengutronix.de>
   5 *
   6 *  List of authors contributed to this driver before mainlining:
   7 *  Alexander Couzens <lynxis@fe80.eu>
   8 *  Christian Lamparter <chunkeey@gmail.com>
   9 *  Chuanhong Guo <gch981213@gmail.com>
  10 *  Daniel F. Dickinson <cshored@thecshore.com>
  11 *  David Bauer <mail@david-bauer.net>
  12 *  Felix Fietkau <nbd@nbd.name>
  13 *  Gabor Juhos <juhosg@freemail.hu>
  14 *  Hauke Mehrtens <hauke@hauke-m.de>
  15 *  Johann Neuhauser <johann@it-neuhauser.de>
  16 *  John Crispin <john@phrozen.org>
  17 *  Jo-Philipp Wich <jo@mein.io>
  18 *  Koen Vandeputte <koen.vandeputte@ncentric.com>
  19 *  Lucian Cristian <lucian.cristian@gmail.com>
  20 *  Matt Merhar <mattmerhar@protonmail.com>
  21 *  Milan Krstic <milan.krstic@gmail.com>
  22 *  Petr Štetiar <ynezz@true.cz>
  23 *  Rosen Penev <rosenp@gmail.com>
  24 *  Stephen Walker <stephendwalker+github@gmail.com>
  25 *  Vittorio Gambaletta <openwrt@vittgam.net>
  26 *  Weijie Gao <hackpascal@gmail.com>
  27 *  Imre Kaloz <kaloz@openwrt.org>
  28 */
  29
  30#include <linux/if_vlan.h>
  31#include <linux/mfd/syscon.h>
  32#include <linux/of.h>
  33#include <linux/of_mdio.h>
  34#include <linux/of_net.h>
  35#include <linux/platform_device.h>
  36#include <linux/phylink.h>
  37#include <linux/regmap.h>
  38#include <linux/reset.h>
  39#include <linux/clk.h>
  40#include <linux/io.h>
  41#include <net/selftests.h>
  42
  43/* For our NAPI weight bigger does *NOT* mean better - it means more
  44 * D-cache misses and lots more wasted cycles than we'll ever
  45 * possibly gain from saving instructions.
  46 */
  47#define AG71XX_NAPI_WEIGHT	32
  48#define AG71XX_OOM_REFILL	(1 + HZ / 10)
  49
  50#define AG71XX_INT_ERR	(AG71XX_INT_RX_BE | AG71XX_INT_TX_BE)
  51#define AG71XX_INT_TX	(AG71XX_INT_TX_PS)
  52#define AG71XX_INT_RX	(AG71XX_INT_RX_PR | AG71XX_INT_RX_OF)
  53
  54#define AG71XX_INT_POLL	(AG71XX_INT_RX | AG71XX_INT_TX)
  55#define AG71XX_INT_INIT	(AG71XX_INT_ERR | AG71XX_INT_POLL)
  56
  57#define AG71XX_TX_MTU_LEN	1540
  58
  59#define AG71XX_TX_RING_SPLIT		512
  60#define AG71XX_TX_RING_DS_PER_PKT	DIV_ROUND_UP(AG71XX_TX_MTU_LEN, \
  61						     AG71XX_TX_RING_SPLIT)
  62#define AG71XX_TX_RING_SIZE_DEFAULT	128
  63#define AG71XX_RX_RING_SIZE_DEFAULT	256
  64
  65#define AG71XX_MDIO_RETRY	1000
  66#define AG71XX_MDIO_DELAY	5
  67#define AG71XX_MDIO_MAX_CLK	5000000
  68
  69/* Register offsets */
  70#define AG71XX_REG_MAC_CFG1	0x0000
  71#define MAC_CFG1_TXE		BIT(0)	/* Tx Enable */
  72#define MAC_CFG1_STX		BIT(1)	/* Synchronize Tx Enable */
  73#define MAC_CFG1_RXE		BIT(2)	/* Rx Enable */
  74#define MAC_CFG1_SRX		BIT(3)	/* Synchronize Rx Enable */
  75#define MAC_CFG1_TFC		BIT(4)	/* Tx Flow Control Enable */
  76#define MAC_CFG1_RFC		BIT(5)	/* Rx Flow Control Enable */
  77#define MAC_CFG1_SR		BIT(31)	/* Soft Reset */
  78#define MAC_CFG1_INIT	(MAC_CFG1_RXE | MAC_CFG1_TXE | \
  79			 MAC_CFG1_SRX | MAC_CFG1_STX)
  80
  81#define AG71XX_REG_MAC_CFG2	0x0004
  82#define MAC_CFG2_FDX		BIT(0)
  83#define MAC_CFG2_PAD_CRC_EN	BIT(2)
  84#define MAC_CFG2_LEN_CHECK	BIT(4)
  85#define MAC_CFG2_IF_1000	BIT(9)
  86#define MAC_CFG2_IF_10_100	BIT(8)
  87
  88#define AG71XX_REG_MAC_MFL	0x0010
  89
  90#define AG71XX_REG_MII_CFG	0x0020
  91#define MII_CFG_CLK_DIV_4	0
  92#define MII_CFG_CLK_DIV_6	2
  93#define MII_CFG_CLK_DIV_8	3
  94#define MII_CFG_CLK_DIV_10	4
  95#define MII_CFG_CLK_DIV_14	5
  96#define MII_CFG_CLK_DIV_20	6
  97#define MII_CFG_CLK_DIV_28	7
  98#define MII_CFG_CLK_DIV_34	8
  99#define MII_CFG_CLK_DIV_42	9
 100#define MII_CFG_CLK_DIV_50	10
 101#define MII_CFG_CLK_DIV_58	11
 102#define MII_CFG_CLK_DIV_66	12
 103#define MII_CFG_CLK_DIV_74	13
 104#define MII_CFG_CLK_DIV_82	14
 105#define MII_CFG_CLK_DIV_98	15
 106#define MII_CFG_RESET		BIT(31)
 107
 108#define AG71XX_REG_MII_CMD	0x0024
 109#define MII_CMD_READ		BIT(0)
 110
 111#define AG71XX_REG_MII_ADDR	0x0028
 112#define MII_ADDR_SHIFT		8
 113
 114#define AG71XX_REG_MII_CTRL	0x002c
 115#define AG71XX_REG_MII_STATUS	0x0030
 116#define AG71XX_REG_MII_IND	0x0034
 117#define MII_IND_BUSY		BIT(0)
 118#define MII_IND_INVALID		BIT(2)
 119
 120#define AG71XX_REG_MAC_IFCTL	0x0038
 121#define MAC_IFCTL_SPEED		BIT(16)
 122
 123#define AG71XX_REG_MAC_ADDR1	0x0040
 124#define AG71XX_REG_MAC_ADDR2	0x0044
 125#define AG71XX_REG_FIFO_CFG0	0x0048
 126#define FIFO_CFG0_WTM		BIT(0)	/* Watermark Module */
 127#define FIFO_CFG0_RXS		BIT(1)	/* Rx System Module */
 128#define FIFO_CFG0_RXF		BIT(2)	/* Rx Fabric Module */
 129#define FIFO_CFG0_TXS		BIT(3)	/* Tx System Module */
 130#define FIFO_CFG0_TXF		BIT(4)	/* Tx Fabric Module */
 131#define FIFO_CFG0_ALL	(FIFO_CFG0_WTM | FIFO_CFG0_RXS | FIFO_CFG0_RXF \
 132			| FIFO_CFG0_TXS | FIFO_CFG0_TXF)
 133#define FIFO_CFG0_INIT	(FIFO_CFG0_ALL << FIFO_CFG0_ENABLE_SHIFT)
 134
 135#define FIFO_CFG0_ENABLE_SHIFT	8
 136
 137#define AG71XX_REG_FIFO_CFG1	0x004c
 138#define AG71XX_REG_FIFO_CFG2	0x0050
 139#define AG71XX_REG_FIFO_CFG3	0x0054
 140#define AG71XX_REG_FIFO_CFG4	0x0058
 141#define FIFO_CFG4_DE		BIT(0)	/* Drop Event */
 142#define FIFO_CFG4_DV		BIT(1)	/* RX_DV Event */
 143#define FIFO_CFG4_FC		BIT(2)	/* False Carrier */
 144#define FIFO_CFG4_CE		BIT(3)	/* Code Error */
 145#define FIFO_CFG4_CR		BIT(4)	/* CRC error */
 146#define FIFO_CFG4_LM		BIT(5)	/* Length Mismatch */
 147#define FIFO_CFG4_LO		BIT(6)	/* Length out of range */
 148#define FIFO_CFG4_OK		BIT(7)	/* Packet is OK */
 149#define FIFO_CFG4_MC		BIT(8)	/* Multicast Packet */
 150#define FIFO_CFG4_BC		BIT(9)	/* Broadcast Packet */
 151#define FIFO_CFG4_DR		BIT(10)	/* Dribble */
 152#define FIFO_CFG4_CF		BIT(11)	/* Control Frame */
 153#define FIFO_CFG4_PF		BIT(12)	/* Pause Frame */
 154#define FIFO_CFG4_UO		BIT(13)	/* Unsupported Opcode */
 155#define FIFO_CFG4_VT		BIT(14)	/* VLAN tag detected */
 156#define FIFO_CFG4_LE		BIT(15)	/* Long Event */
 157#define FIFO_CFG4_FT		BIT(16)	/* Frame Truncated */
 158#define FIFO_CFG4_UC		BIT(17)	/* Unicast Packet */
 159#define FIFO_CFG4_INIT	(FIFO_CFG4_DE | FIFO_CFG4_DV | FIFO_CFG4_FC | \
 160			 FIFO_CFG4_CE | FIFO_CFG4_CR | FIFO_CFG4_LM | \
 161			 FIFO_CFG4_LO | FIFO_CFG4_OK | FIFO_CFG4_MC | \
 162			 FIFO_CFG4_BC | FIFO_CFG4_DR | FIFO_CFG4_LE | \
 163			 FIFO_CFG4_CF | FIFO_CFG4_PF | FIFO_CFG4_UO | \
 164			 FIFO_CFG4_VT)
 165
 166#define AG71XX_REG_FIFO_CFG5	0x005c
 167#define FIFO_CFG5_DE		BIT(0)	/* Drop Event */
 168#define FIFO_CFG5_DV		BIT(1)	/* RX_DV Event */
 169#define FIFO_CFG5_FC		BIT(2)	/* False Carrier */
 170#define FIFO_CFG5_CE		BIT(3)	/* Code Error */
 171#define FIFO_CFG5_CR		BIT(4)  /* CRC error */
 172#define FIFO_CFG5_LM		BIT(5)	/* Length Mismatch */
 173#define FIFO_CFG5_LO		BIT(6)	/* Length Out of Range */
 174#define FIFO_CFG5_OK		BIT(7)	/* Packet is OK */
 175#define FIFO_CFG5_MC		BIT(8)	/* Multicast Packet */
 176#define FIFO_CFG5_BC		BIT(9)	/* Broadcast Packet */
 177#define FIFO_CFG5_DR		BIT(10)	/* Dribble */
 178#define FIFO_CFG5_CF		BIT(11)	/* Control Frame */
 179#define FIFO_CFG5_PF		BIT(12)	/* Pause Frame */
 180#define FIFO_CFG5_UO		BIT(13)	/* Unsupported Opcode */
 181#define FIFO_CFG5_VT		BIT(14)	/* VLAN tag detected */
 182#define FIFO_CFG5_LE		BIT(15)	/* Long Event */
 183#define FIFO_CFG5_FT		BIT(16)	/* Frame Truncated */
 184#define FIFO_CFG5_UC		BIT(17)	/* Unicast Packet */
 185#define FIFO_CFG5_SF		BIT(18)	/* Short Frame */
 186#define FIFO_CFG5_BM		BIT(19)	/* Byte Mode */
 187#define FIFO_CFG5_INIT	(FIFO_CFG5_DE | FIFO_CFG5_DV | FIFO_CFG5_FC | \
 188			 FIFO_CFG5_CE | FIFO_CFG5_LM | FIFO_CFG5_LO | \
 189			 FIFO_CFG5_OK | FIFO_CFG5_MC | FIFO_CFG5_BC | \
 190			 FIFO_CFG5_DR | FIFO_CFG5_CF | FIFO_CFG5_UO | \
 191			 FIFO_CFG5_VT | FIFO_CFG5_LE | FIFO_CFG5_FT | \
 192			 FIFO_CFG5_UC | FIFO_CFG5_SF)
 193
 194#define AG71XX_REG_TX_CTRL	0x0180
 195#define TX_CTRL_TXE		BIT(0)	/* Tx Enable */
 196
 197#define AG71XX_REG_TX_DESC	0x0184
 198#define AG71XX_REG_TX_STATUS	0x0188
 199#define TX_STATUS_PS		BIT(0)	/* Packet Sent */
 200#define TX_STATUS_UR		BIT(1)	/* Tx Underrun */
 201#define TX_STATUS_BE		BIT(3)	/* Bus Error */
 202
 203#define AG71XX_REG_RX_CTRL	0x018c
 204#define RX_CTRL_RXE		BIT(0)	/* Rx Enable */
 205
 206#define AG71XX_DMA_RETRY	10
 207#define AG71XX_DMA_DELAY	1
 208
 209#define AG71XX_REG_RX_DESC	0x0190
 210#define AG71XX_REG_RX_STATUS	0x0194
 211#define RX_STATUS_PR		BIT(0)	/* Packet Received */
 212#define RX_STATUS_OF		BIT(2)	/* Rx Overflow */
 213#define RX_STATUS_BE		BIT(3)	/* Bus Error */
 214
 215#define AG71XX_REG_INT_ENABLE	0x0198
 216#define AG71XX_REG_INT_STATUS	0x019c
 217#define AG71XX_INT_TX_PS	BIT(0)
 218#define AG71XX_INT_TX_UR	BIT(1)
 219#define AG71XX_INT_TX_BE	BIT(3)
 220#define AG71XX_INT_RX_PR	BIT(4)
 221#define AG71XX_INT_RX_OF	BIT(6)
 222#define AG71XX_INT_RX_BE	BIT(7)
 223
 224#define AG71XX_REG_FIFO_DEPTH	0x01a8
 225#define AG71XX_REG_RX_SM	0x01b0
 226#define AG71XX_REG_TX_SM	0x01b4
 227
 228#define AG71XX_DEFAULT_MSG_ENABLE	\
 229	(NETIF_MSG_DRV			\
 230	| NETIF_MSG_PROBE		\
 231	| NETIF_MSG_LINK		\
 232	| NETIF_MSG_TIMER		\
 233	| NETIF_MSG_IFDOWN		\
 234	| NETIF_MSG_IFUP		\
 235	| NETIF_MSG_RX_ERR		\
 236	| NETIF_MSG_TX_ERR)
 237
 238struct ag71xx_statistic {
 239	unsigned short offset;
 240	u32 mask;
 241	const char name[ETH_GSTRING_LEN];
 242};
 243
 244static const struct ag71xx_statistic ag71xx_statistics[] = {
 245	{ 0x0080, GENMASK(17, 0), "Tx/Rx 64 Byte", },
 246	{ 0x0084, GENMASK(17, 0), "Tx/Rx 65-127 Byte", },
 247	{ 0x0088, GENMASK(17, 0), "Tx/Rx 128-255 Byte", },
 248	{ 0x008C, GENMASK(17, 0), "Tx/Rx 256-511 Byte", },
 249	{ 0x0090, GENMASK(17, 0), "Tx/Rx 512-1023 Byte", },
 250	{ 0x0094, GENMASK(17, 0), "Tx/Rx 1024-1518 Byte", },
 251	{ 0x0098, GENMASK(17, 0), "Tx/Rx 1519-1522 Byte VLAN", },
 252	{ 0x009C, GENMASK(23, 0), "Rx Byte", },
 253	{ 0x00A0, GENMASK(17, 0), "Rx Packet", },
 254	{ 0x00A4, GENMASK(11, 0), "Rx FCS Error", },
 255	{ 0x00A8, GENMASK(17, 0), "Rx Multicast Packet", },
 256	{ 0x00AC, GENMASK(21, 0), "Rx Broadcast Packet", },
 257	{ 0x00B0, GENMASK(17, 0), "Rx Control Frame Packet", },
 258	{ 0x00B4, GENMASK(11, 0), "Rx Pause Frame Packet", },
 259	{ 0x00B8, GENMASK(11, 0), "Rx Unknown OPCode Packet", },
 260	{ 0x00BC, GENMASK(11, 0), "Rx Alignment Error", },
 261	{ 0x00C0, GENMASK(15, 0), "Rx Frame Length Error", },
 262	{ 0x00C4, GENMASK(11, 0), "Rx Code Error", },
 263	{ 0x00C8, GENMASK(11, 0), "Rx Carrier Sense Error", },
 264	{ 0x00CC, GENMASK(11, 0), "Rx Undersize Packet", },
 265	{ 0x00D0, GENMASK(11, 0), "Rx Oversize Packet", },
 266	{ 0x00D4, GENMASK(11, 0), "Rx Fragments", },
 267	{ 0x00D8, GENMASK(11, 0), "Rx Jabber", },
 268	{ 0x00DC, GENMASK(11, 0), "Rx Dropped Packet", },
 269	{ 0x00E0, GENMASK(23, 0), "Tx Byte", },
 270	{ 0x00E4, GENMASK(17, 0), "Tx Packet", },
 271	{ 0x00E8, GENMASK(17, 0), "Tx Multicast Packet", },
 272	{ 0x00EC, GENMASK(17, 0), "Tx Broadcast Packet", },
 273	{ 0x00F0, GENMASK(11, 0), "Tx Pause Control Frame", },
 274	{ 0x00F4, GENMASK(11, 0), "Tx Deferral Packet", },
 275	{ 0x00F8, GENMASK(11, 0), "Tx Excessive Deferral Packet", },
 276	{ 0x00FC, GENMASK(11, 0), "Tx Single Collision Packet", },
 277	{ 0x0100, GENMASK(11, 0), "Tx Multiple Collision", },
 278	{ 0x0104, GENMASK(11, 0), "Tx Late Collision Packet", },
 279	{ 0x0108, GENMASK(11, 0), "Tx Excessive Collision Packet", },
 280	{ 0x010C, GENMASK(12, 0), "Tx Total Collision", },
 281	{ 0x0110, GENMASK(11, 0), "Tx Pause Frames Honored", },
 282	{ 0x0114, GENMASK(11, 0), "Tx Drop Frame", },
 283	{ 0x0118, GENMASK(11, 0), "Tx Jabber Frame", },
 284	{ 0x011C, GENMASK(11, 0), "Tx FCS Error", },
 285	{ 0x0120, GENMASK(11, 0), "Tx Control Frame", },
 286	{ 0x0124, GENMASK(11, 0), "Tx Oversize Frame", },
 287	{ 0x0128, GENMASK(11, 0), "Tx Undersize Frame", },
 288	{ 0x012C, GENMASK(11, 0), "Tx Fragment", },
 289};
 290
 291#define DESC_EMPTY		BIT(31)
 292#define DESC_MORE		BIT(24)
 293#define DESC_PKTLEN_M		0xfff
 294struct ag71xx_desc {
 295	u32 data;
 296	u32 ctrl;
 297	u32 next;
 298	u32 pad;
 299} __aligned(4);
 300
 301#define AG71XX_DESC_SIZE	roundup(sizeof(struct ag71xx_desc), \
 302					L1_CACHE_BYTES)
 303
 304struct ag71xx_buf {
 305	union {
 306		struct {
 307			struct sk_buff *skb;
 308			unsigned int len;
 309		} tx;
 310		struct {
 311			dma_addr_t dma_addr;
 312			void *rx_buf;
 313		} rx;
 314	};
 315};
 316
 317struct ag71xx_ring {
 318	/* "Hot" fields in the data path. */
 319	unsigned int curr;
 320	unsigned int dirty;
 321
 322	/* "Cold" fields - not used in the data path. */
 323	struct ag71xx_buf *buf;
 324	u16 order;
 325	u16 desc_split;
 326	dma_addr_t descs_dma;
 327	u8 *descs_cpu;
 328};
 329
 330enum ag71xx_type {
 331	AR7100,
 332	AR7240,
 333	AR9130,
 334	AR9330,
 335	AR9340,
 336	QCA9530,
 337	QCA9550,
 338};
 339
 340struct ag71xx_dcfg {
 341	u32 max_frame_len;
 342	const u32 *fifodata;
 343	u16 desc_pktlen_mask;
 344	bool tx_hang_workaround;
 345	enum ag71xx_type type;
 346};
 347
 348struct ag71xx {
 349	/* Critical data related to the per-packet data path are clustered
 350	 * early in this structure to help improve the D-cache footprint.
 351	 */
 352	struct ag71xx_ring rx_ring ____cacheline_aligned;
 353	struct ag71xx_ring tx_ring ____cacheline_aligned;
 354
 355	u16 rx_buf_size;
 356	u8 rx_buf_offset;
 357
 358	struct net_device *ndev;
 359	struct platform_device *pdev;
 360	struct napi_struct napi;
 361	u32 msg_enable;
 362	const struct ag71xx_dcfg *dcfg;
 363
 364	/* From this point onwards we're not looking at per-packet fields. */
 365	void __iomem *mac_base;
 366
 367	struct ag71xx_desc *stop_desc;
 368	dma_addr_t stop_desc_dma;
 369
 370	phy_interface_t phy_if_mode;
 371	struct phylink *phylink;
 372	struct phylink_config phylink_config;
 373
 374	struct delayed_work restart_work;
 375	struct timer_list oom_timer;
 376
 377	struct reset_control *mac_reset;
 378
 379	u32 fifodata[3];
 380	int mac_idx;
 381
 382	struct clk *clk_mdio;
 383};
 384
 385static int ag71xx_desc_empty(struct ag71xx_desc *desc)
 386{
 387	return (desc->ctrl & DESC_EMPTY) != 0;
 388}
 389
 390static struct ag71xx_desc *ag71xx_ring_desc(struct ag71xx_ring *ring, int idx)
 391{
 392	return (struct ag71xx_desc *)&ring->descs_cpu[idx * AG71XX_DESC_SIZE];
 393}
 394
 395static int ag71xx_ring_size_order(int size)
 396{
 397	return fls(size - 1);
 398}
 399
 400static bool ag71xx_is(struct ag71xx *ag, enum ag71xx_type type)
 401{
 402	return ag->dcfg->type == type;
 403}
 404
 405static void ag71xx_wr(struct ag71xx *ag, unsigned int reg, u32 value)
 406{
 407	iowrite32(value, ag->mac_base + reg);
 408	/* flush write */
 409	(void)ioread32(ag->mac_base + reg);
 410}
 411
 412static u32 ag71xx_rr(struct ag71xx *ag, unsigned int reg)
 413{
 414	return ioread32(ag->mac_base + reg);
 415}
 416
 417static void ag71xx_sb(struct ag71xx *ag, unsigned int reg, u32 mask)
 418{
 419	void __iomem *r;
 420
 421	r = ag->mac_base + reg;
 422	iowrite32(ioread32(r) | mask, r);
 423	/* flush write */
 424	(void)ioread32(r);
 425}
 426
 427static void ag71xx_cb(struct ag71xx *ag, unsigned int reg, u32 mask)
 428{
 429	void __iomem *r;
 430
 431	r = ag->mac_base + reg;
 432	iowrite32(ioread32(r) & ~mask, r);
 433	/* flush write */
 434	(void)ioread32(r);
 435}
 436
 437static void ag71xx_int_enable(struct ag71xx *ag, u32 ints)
 438{
 439	ag71xx_sb(ag, AG71XX_REG_INT_ENABLE, ints);
 440}
 441
 442static void ag71xx_int_disable(struct ag71xx *ag, u32 ints)
 443{
 444	ag71xx_cb(ag, AG71XX_REG_INT_ENABLE, ints);
 445}
 446
 447static int ag71xx_do_ioctl(struct net_device *ndev, struct ifreq *ifr, int cmd)
 448{
 449	struct ag71xx *ag = netdev_priv(ndev);
 450
 451	return phylink_mii_ioctl(ag->phylink, ifr, cmd);
 452}
 453
 454static void ag71xx_get_drvinfo(struct net_device *ndev,
 455			       struct ethtool_drvinfo *info)
 456{
 457	struct ag71xx *ag = netdev_priv(ndev);
 458
 459	strscpy(info->driver, "ag71xx", sizeof(info->driver));
 460	strscpy(info->bus_info, of_node_full_name(ag->pdev->dev.of_node),
 461		sizeof(info->bus_info));
 462}
 463
 464static int ag71xx_get_link_ksettings(struct net_device *ndev,
 465				   struct ethtool_link_ksettings *kset)
 466{
 467	struct ag71xx *ag = netdev_priv(ndev);
 468
 469	return phylink_ethtool_ksettings_get(ag->phylink, kset);
 470}
 471
 472static int ag71xx_set_link_ksettings(struct net_device *ndev,
 473				   const struct ethtool_link_ksettings *kset)
 474{
 475	struct ag71xx *ag = netdev_priv(ndev);
 476
 477	return phylink_ethtool_ksettings_set(ag->phylink, kset);
 478}
 479
 480static int ag71xx_ethtool_nway_reset(struct net_device *ndev)
 481{
 482	struct ag71xx *ag = netdev_priv(ndev);
 483
 484	return phylink_ethtool_nway_reset(ag->phylink);
 485}
 486
 487static void ag71xx_ethtool_get_pauseparam(struct net_device *ndev,
 488					  struct ethtool_pauseparam *pause)
 489{
 490	struct ag71xx *ag = netdev_priv(ndev);
 491
 492	phylink_ethtool_get_pauseparam(ag->phylink, pause);
 493}
 494
 495static int ag71xx_ethtool_set_pauseparam(struct net_device *ndev,
 496					 struct ethtool_pauseparam *pause)
 497{
 498	struct ag71xx *ag = netdev_priv(ndev);
 499
 500	return phylink_ethtool_set_pauseparam(ag->phylink, pause);
 501}
 502
 503static void ag71xx_ethtool_get_strings(struct net_device *netdev, u32 sset,
 504				       u8 *data)
 505{
 506	int i;
 507
 508	switch (sset) {
 509	case ETH_SS_STATS:
 510		for (i = 0; i < ARRAY_SIZE(ag71xx_statistics); i++)
 511			ethtool_puts(&data, ag71xx_statistics[i].name);
 512		break;
 513	case ETH_SS_TEST:
 514		net_selftest_get_strings(data);
 515		break;
 516	}
 517}
 518
 519static void ag71xx_ethtool_get_stats(struct net_device *ndev,
 520				     struct ethtool_stats *stats, u64 *data)
 521{
 522	struct ag71xx *ag = netdev_priv(ndev);
 523	int i;
 524
 525	for (i = 0; i < ARRAY_SIZE(ag71xx_statistics); i++)
 526		*data++ = ag71xx_rr(ag, ag71xx_statistics[i].offset)
 527				& ag71xx_statistics[i].mask;
 528}
 529
 530static int ag71xx_ethtool_get_sset_count(struct net_device *ndev, int sset)
 531{
 532	switch (sset) {
 533	case ETH_SS_STATS:
 534		return ARRAY_SIZE(ag71xx_statistics);
 535	case ETH_SS_TEST:
 536		return net_selftest_get_count();
 537	default:
 538		return -EOPNOTSUPP;
 539	}
 540}
 541
 542static const struct ethtool_ops ag71xx_ethtool_ops = {
 543	.get_drvinfo			= ag71xx_get_drvinfo,
 544	.get_link			= ethtool_op_get_link,
 545	.get_ts_info			= ethtool_op_get_ts_info,
 546	.get_link_ksettings		= ag71xx_get_link_ksettings,
 547	.set_link_ksettings		= ag71xx_set_link_ksettings,
 548	.nway_reset			= ag71xx_ethtool_nway_reset,
 549	.get_pauseparam			= ag71xx_ethtool_get_pauseparam,
 550	.set_pauseparam			= ag71xx_ethtool_set_pauseparam,
 551	.get_strings			= ag71xx_ethtool_get_strings,
 552	.get_ethtool_stats		= ag71xx_ethtool_get_stats,
 553	.get_sset_count			= ag71xx_ethtool_get_sset_count,
 554	.self_test			= net_selftest,
 555};
 556
 557static int ag71xx_mdio_wait_busy(struct ag71xx *ag)
 558{
 559	struct net_device *ndev = ag->ndev;
 560	int i;
 561
 562	for (i = 0; i < AG71XX_MDIO_RETRY; i++) {
 563		u32 busy;
 564
 565		udelay(AG71XX_MDIO_DELAY);
 566
 567		busy = ag71xx_rr(ag, AG71XX_REG_MII_IND);
 568		if (!busy)
 569			return 0;
 570
 571		udelay(AG71XX_MDIO_DELAY);
 572	}
 573
 574	netif_err(ag, link, ndev, "MDIO operation timed out\n");
 575
 576	return -ETIMEDOUT;
 577}
 578
 579static int ag71xx_mdio_mii_read(struct mii_bus *bus, int addr, int reg)
 580{
 581	struct ag71xx *ag = bus->priv;
 582	int err, val;
 583
 584	err = ag71xx_mdio_wait_busy(ag);
 585	if (err)
 586		return err;
 587
 588	ag71xx_wr(ag, AG71XX_REG_MII_ADDR,
 589		  ((addr & 0x1f) << MII_ADDR_SHIFT) | (reg & 0xff));
 590	/* enable read mode */
 591	ag71xx_wr(ag, AG71XX_REG_MII_CMD, MII_CMD_READ);
 592
 593	err = ag71xx_mdio_wait_busy(ag);
 594	if (err)
 595		return err;
 596
 597	val = ag71xx_rr(ag, AG71XX_REG_MII_STATUS);
 598	/* disable read mode */
 599	ag71xx_wr(ag, AG71XX_REG_MII_CMD, 0);
 600
 601	netif_dbg(ag, link, ag->ndev, "mii_read: addr=%04x, reg=%04x, value=%04x\n",
 602		  addr, reg, val);
 603
 604	return val;
 605}
 606
 607static int ag71xx_mdio_mii_write(struct mii_bus *bus, int addr, int reg,
 608				 u16 val)
 609{
 610	struct ag71xx *ag = bus->priv;
 611
 612	netif_dbg(ag, link, ag->ndev, "mii_write: addr=%04x, reg=%04x, value=%04x\n",
 613		  addr, reg, val);
 614
 615	ag71xx_wr(ag, AG71XX_REG_MII_ADDR,
 616		  ((addr & 0x1f) << MII_ADDR_SHIFT) | (reg & 0xff));
 617	ag71xx_wr(ag, AG71XX_REG_MII_CTRL, val);
 618
 619	return ag71xx_mdio_wait_busy(ag);
 620}
 621
 622static const u32 ar71xx_mdio_div_table[] = {
 623	4, 4, 6, 8, 10, 14, 20, 28,
 624};
 625
 626static const u32 ar7240_mdio_div_table[] = {
 627	2, 2, 4, 6, 8, 12, 18, 26, 32, 40, 48, 56, 62, 70, 78, 96,
 628};
 629
 630static const u32 ar933x_mdio_div_table[] = {
 631	4, 4, 6, 8, 10, 14, 20, 28, 34, 42, 50, 58, 66, 74, 82, 98,
 632};
 633
 634static int ag71xx_mdio_get_divider(struct ag71xx *ag, u32 *div)
 635{
 636	unsigned long ref_clock;
 637	const u32 *table;
 638	int ndivs, i;
 639
 640	ref_clock = clk_get_rate(ag->clk_mdio);
 641	if (!ref_clock)
 642		return -EINVAL;
 643
 644	if (ag71xx_is(ag, AR9330) || ag71xx_is(ag, AR9340)) {
 645		table = ar933x_mdio_div_table;
 646		ndivs = ARRAY_SIZE(ar933x_mdio_div_table);
 647	} else if (ag71xx_is(ag, AR7240)) {
 648		table = ar7240_mdio_div_table;
 649		ndivs = ARRAY_SIZE(ar7240_mdio_div_table);
 650	} else {
 651		table = ar71xx_mdio_div_table;
 652		ndivs = ARRAY_SIZE(ar71xx_mdio_div_table);
 653	}
 654
 655	for (i = 0; i < ndivs; i++) {
 656		unsigned long t;
 657
 658		t = ref_clock / table[i];
 659		if (t <= AG71XX_MDIO_MAX_CLK) {
 660			*div = i;
 661			return 0;
 662		}
 663	}
 664
 665	return -ENOENT;
 666}
 667
 668static int ag71xx_mdio_reset(struct mii_bus *bus)
 669{
 670	struct ag71xx *ag = bus->priv;
 671	int err;
 672	u32 t;
 673
 674	err = ag71xx_mdio_get_divider(ag, &t);
 675	if (err)
 676		return err;
 677
 678	ag71xx_wr(ag, AG71XX_REG_MII_CFG, t | MII_CFG_RESET);
 679	usleep_range(100, 200);
 680
 681	ag71xx_wr(ag, AG71XX_REG_MII_CFG, t);
 682	usleep_range(100, 200);
 683
 684	return 0;
 685}
 686
 687static int ag71xx_mdio_probe(struct ag71xx *ag)
 688{
 689	struct device *dev = &ag->pdev->dev;
 690	struct net_device *ndev = ag->ndev;
 691	struct reset_control *mdio_reset;
 692	static struct mii_bus *mii_bus;
 693	struct device_node *np, *mnp;
 694	int err;
 695
 696	np = dev->of_node;
 697
 698	ag->clk_mdio = devm_clk_get_enabled(dev, "mdio");
 699	if (IS_ERR(ag->clk_mdio)) {
 700		netif_err(ag, probe, ndev, "Failed to get mdio clk.\n");
 701		return PTR_ERR(ag->clk_mdio);
 702	}
 703
 704	mii_bus = devm_mdiobus_alloc(dev);
 705	if (!mii_bus)
 706		return -ENOMEM;
 707
 708	mdio_reset = devm_reset_control_get_exclusive(dev, "mdio");
 709	if (IS_ERR(mdio_reset)) {
 710		netif_err(ag, probe, ndev, "Failed to get reset mdio.\n");
 711		return PTR_ERR(mdio_reset);
 712	}
 713
 714	mii_bus->name = "ag71xx_mdio";
 715	mii_bus->read = ag71xx_mdio_mii_read;
 716	mii_bus->write = ag71xx_mdio_mii_write;
 717	mii_bus->reset = ag71xx_mdio_reset;
 718	mii_bus->priv = ag;
 719	mii_bus->parent = dev;
 720	snprintf(mii_bus->id, MII_BUS_ID_SIZE, "%s.%d", np->name, ag->mac_idx);
 721
 722	reset_control_assert(mdio_reset);
 723	msleep(100);
 724	reset_control_deassert(mdio_reset);
 725	msleep(200);
 726
 727	mnp = of_get_child_by_name(np, "mdio");
 728	err = devm_of_mdiobus_register(dev, mii_bus, mnp);
 729	of_node_put(mnp);
 730	if (err)
 731		return err;
 732
 733	return 0;
 734}
 735
 736static void ag71xx_hw_stop(struct ag71xx *ag)
 737{
 738	/* disable all interrupts and stop the rx/tx engine */
 739	ag71xx_wr(ag, AG71XX_REG_INT_ENABLE, 0);
 740	ag71xx_wr(ag, AG71XX_REG_RX_CTRL, 0);
 741	ag71xx_wr(ag, AG71XX_REG_TX_CTRL, 0);
 742}
 743
 744static bool ag71xx_check_dma_stuck(struct ag71xx *ag)
 745{
 746	unsigned long timestamp;
 747	u32 rx_sm, tx_sm, rx_fd;
 748
 749	timestamp = READ_ONCE(netdev_get_tx_queue(ag->ndev, 0)->trans_start);
 750	if (likely(time_before(jiffies, timestamp + HZ / 10)))
 751		return false;
 752
 753	if (!netif_carrier_ok(ag->ndev))
 754		return false;
 755
 756	rx_sm = ag71xx_rr(ag, AG71XX_REG_RX_SM);
 757	if ((rx_sm & 0x7) == 0x3 && ((rx_sm >> 4) & 0x7) == 0x6)
 758		return true;
 759
 760	tx_sm = ag71xx_rr(ag, AG71XX_REG_TX_SM);
 761	rx_fd = ag71xx_rr(ag, AG71XX_REG_FIFO_DEPTH);
 762	if (((tx_sm >> 4) & 0x7) == 0 && ((rx_sm & 0x7) == 0) &&
 763	    ((rx_sm >> 4) & 0x7) == 0 && rx_fd == 0)
 764		return true;
 765
 766	return false;
 767}
 768
 769static int ag71xx_tx_packets(struct ag71xx *ag, bool flush, int budget)
 770{
 771	struct ag71xx_ring *ring = &ag->tx_ring;
 772	int sent = 0, bytes_compl = 0, n = 0;
 773	struct net_device *ndev = ag->ndev;
 774	int ring_mask, ring_size;
 775	bool dma_stuck = false;
 776
 777	ring_mask = BIT(ring->order) - 1;
 778	ring_size = BIT(ring->order);
 779
 780	netif_dbg(ag, tx_queued, ndev, "processing TX ring\n");
 781
 782	while (ring->dirty + n != ring->curr) {
 783		struct ag71xx_desc *desc;
 784		struct sk_buff *skb;
 785		unsigned int i;
 786
 787		i = (ring->dirty + n) & ring_mask;
 788		desc = ag71xx_ring_desc(ring, i);
 789		skb = ring->buf[i].tx.skb;
 790
 791		if (!flush && !ag71xx_desc_empty(desc)) {
 792			if (ag->dcfg->tx_hang_workaround &&
 793			    ag71xx_check_dma_stuck(ag)) {
 794				schedule_delayed_work(&ag->restart_work,
 795						      HZ / 2);
 796				dma_stuck = true;
 797			}
 798			break;
 799		}
 800
 801		if (flush)
 802			desc->ctrl |= DESC_EMPTY;
 803
 804		n++;
 805		if (!skb)
 806			continue;
 807
 808		napi_consume_skb(skb, budget);
 809		ring->buf[i].tx.skb = NULL;
 810
 811		bytes_compl += ring->buf[i].tx.len;
 812
 813		sent++;
 814		ring->dirty += n;
 815
 816		while (n > 0) {
 817			ag71xx_wr(ag, AG71XX_REG_TX_STATUS, TX_STATUS_PS);
 818			n--;
 819		}
 820	}
 821
 822	netif_dbg(ag, tx_done, ndev, "%d packets sent out\n", sent);
 823
 824	if (!sent)
 825		return 0;
 826
 827	ag->ndev->stats.tx_bytes += bytes_compl;
 828	ag->ndev->stats.tx_packets += sent;
 829
 830	netdev_completed_queue(ag->ndev, sent, bytes_compl);
 831	if ((ring->curr - ring->dirty) < (ring_size * 3) / 4)
 832		netif_wake_queue(ag->ndev);
 833
 834	if (!dma_stuck)
 835		cancel_delayed_work(&ag->restart_work);
 836
 837	return sent;
 838}
 839
 840static void ag71xx_dma_wait_stop(struct ag71xx *ag)
 841{
 842	struct net_device *ndev = ag->ndev;
 843	int i;
 844
 845	for (i = 0; i < AG71XX_DMA_RETRY; i++) {
 846		u32 rx, tx;
 847
 848		mdelay(AG71XX_DMA_DELAY);
 849
 850		rx = ag71xx_rr(ag, AG71XX_REG_RX_CTRL) & RX_CTRL_RXE;
 851		tx = ag71xx_rr(ag, AG71XX_REG_TX_CTRL) & TX_CTRL_TXE;
 852		if (!rx && !tx)
 853			return;
 854	}
 855
 856	netif_err(ag, hw, ndev, "DMA stop operation timed out\n");
 857}
 858
 859static void ag71xx_dma_reset(struct ag71xx *ag)
 860{
 861	struct net_device *ndev = ag->ndev;
 862	u32 val;
 863	int i;
 864
 865	/* stop RX and TX */
 866	ag71xx_wr(ag, AG71XX_REG_RX_CTRL, 0);
 867	ag71xx_wr(ag, AG71XX_REG_TX_CTRL, 0);
 868
 869	/* give the hardware some time to really stop all rx/tx activity
 870	 * clearing the descriptors too early causes random memory corruption
 871	 */
 872	ag71xx_dma_wait_stop(ag);
 873
 874	/* clear descriptor addresses */
 875	ag71xx_wr(ag, AG71XX_REG_TX_DESC, ag->stop_desc_dma);
 876	ag71xx_wr(ag, AG71XX_REG_RX_DESC, ag->stop_desc_dma);
 877
 878	/* clear pending RX/TX interrupts */
 879	for (i = 0; i < 256; i++) {
 880		ag71xx_wr(ag, AG71XX_REG_RX_STATUS, RX_STATUS_PR);
 881		ag71xx_wr(ag, AG71XX_REG_TX_STATUS, TX_STATUS_PS);
 882	}
 883
 884	/* clear pending errors */
 885	ag71xx_wr(ag, AG71XX_REG_RX_STATUS, RX_STATUS_BE | RX_STATUS_OF);
 886	ag71xx_wr(ag, AG71XX_REG_TX_STATUS, TX_STATUS_BE | TX_STATUS_UR);
 887
 888	val = ag71xx_rr(ag, AG71XX_REG_RX_STATUS);
 889	if (val)
 890		netif_err(ag, hw, ndev, "unable to clear DMA Rx status: %08x\n",
 891			  val);
 892
 893	val = ag71xx_rr(ag, AG71XX_REG_TX_STATUS);
 894
 895	/* mask out reserved bits */
 896	val &= ~0xff000000;
 897
 898	if (val)
 899		netif_err(ag, hw, ndev, "unable to clear DMA Tx status: %08x\n",
 900			  val);
 901}
 902
 903static void ag71xx_hw_setup(struct ag71xx *ag)
 904{
 905	u32 init = MAC_CFG1_INIT;
 906
 907	/* setup MAC configuration registers */
 908	ag71xx_wr(ag, AG71XX_REG_MAC_CFG1, init);
 909
 910	ag71xx_sb(ag, AG71XX_REG_MAC_CFG2,
 911		  MAC_CFG2_PAD_CRC_EN | MAC_CFG2_LEN_CHECK);
 912
 913	/* setup max frame length to zero */
 914	ag71xx_wr(ag, AG71XX_REG_MAC_MFL, 0);
 915
 916	/* setup FIFO configuration registers */
 917	ag71xx_wr(ag, AG71XX_REG_FIFO_CFG0, FIFO_CFG0_INIT);
 918	ag71xx_wr(ag, AG71XX_REG_FIFO_CFG1, ag->fifodata[0]);
 919	ag71xx_wr(ag, AG71XX_REG_FIFO_CFG2, ag->fifodata[1]);
 920	ag71xx_wr(ag, AG71XX_REG_FIFO_CFG4, FIFO_CFG4_INIT);
 921	ag71xx_wr(ag, AG71XX_REG_FIFO_CFG5, FIFO_CFG5_INIT);
 922}
 923
 924static unsigned int ag71xx_max_frame_len(unsigned int mtu)
 925{
 926	return ETH_HLEN + VLAN_HLEN + mtu + ETH_FCS_LEN;
 927}
 928
 929static void ag71xx_hw_set_macaddr(struct ag71xx *ag, const unsigned char *mac)
 930{
 931	u32 t;
 932
 933	t = (((u32)mac[5]) << 24) | (((u32)mac[4]) << 16)
 934	  | (((u32)mac[3]) << 8) | ((u32)mac[2]);
 935
 936	ag71xx_wr(ag, AG71XX_REG_MAC_ADDR1, t);
 937
 938	t = (((u32)mac[1]) << 24) | (((u32)mac[0]) << 16);
 939	ag71xx_wr(ag, AG71XX_REG_MAC_ADDR2, t);
 940}
 941
 942static void ag71xx_fast_reset(struct ag71xx *ag)
 943{
 944	struct net_device *dev = ag->ndev;
 945	u32 rx_ds;
 946	u32 mii_reg;
 947
 948	ag71xx_hw_stop(ag);
 949
 950	mii_reg = ag71xx_rr(ag, AG71XX_REG_MII_CFG);
 951	rx_ds = ag71xx_rr(ag, AG71XX_REG_RX_DESC);
 952
 953	ag71xx_tx_packets(ag, true, 0);
 954
 955	reset_control_assert(ag->mac_reset);
 956	usleep_range(10, 20);
 957	reset_control_deassert(ag->mac_reset);
 958	usleep_range(10, 20);
 959
 960	ag71xx_dma_reset(ag);
 961	ag71xx_hw_setup(ag);
 962	ag->tx_ring.curr = 0;
 963	ag->tx_ring.dirty = 0;
 964	netdev_reset_queue(ag->ndev);
 965
 966	/* setup max frame length */
 967	ag71xx_wr(ag, AG71XX_REG_MAC_MFL,
 968		  ag71xx_max_frame_len(ag->ndev->mtu));
 969
 970	ag71xx_wr(ag, AG71XX_REG_RX_DESC, rx_ds);
 971	ag71xx_wr(ag, AG71XX_REG_TX_DESC, ag->tx_ring.descs_dma);
 972	ag71xx_wr(ag, AG71XX_REG_MII_CFG, mii_reg);
 973
 974	ag71xx_hw_set_macaddr(ag, dev->dev_addr);
 975}
 976
 977static void ag71xx_hw_start(struct ag71xx *ag)
 978{
 979	/* start RX engine */
 980	ag71xx_wr(ag, AG71XX_REG_RX_CTRL, RX_CTRL_RXE);
 981
 982	/* enable interrupts */
 983	ag71xx_wr(ag, AG71XX_REG_INT_ENABLE, AG71XX_INT_INIT);
 984
 985	netif_wake_queue(ag->ndev);
 986}
 987
 988static void ag71xx_mac_config(struct phylink_config *config, unsigned int mode,
 989			      const struct phylink_link_state *state)
 990{
 991	struct ag71xx *ag = netdev_priv(to_net_dev(config->dev));
 992
 993	if (phylink_autoneg_inband(mode))
 994		return;
 995
 996	if (!ag71xx_is(ag, AR7100) && !ag71xx_is(ag, AR9130))
 997		ag71xx_fast_reset(ag);
 998
 999	if (ag->tx_ring.desc_split) {
1000		ag->fifodata[2] &= 0xffff;
1001		ag->fifodata[2] |= ((2048 - ag->tx_ring.desc_split) / 4) << 16;
1002	}
1003
1004	ag71xx_wr(ag, AG71XX_REG_FIFO_CFG3, ag->fifodata[2]);
1005}
1006
1007static void ag71xx_mac_link_down(struct phylink_config *config,
1008				 unsigned int mode, phy_interface_t interface)
1009{
1010	struct ag71xx *ag = netdev_priv(to_net_dev(config->dev));
1011
1012	ag71xx_hw_stop(ag);
1013}
1014
1015static void ag71xx_mac_link_up(struct phylink_config *config,
1016			       struct phy_device *phy,
1017			       unsigned int mode, phy_interface_t interface,
1018			       int speed, int duplex,
1019			       bool tx_pause, bool rx_pause)
1020{
1021	struct ag71xx *ag = netdev_priv(to_net_dev(config->dev));
1022	u32 cfg1, cfg2;
1023	u32 ifctl;
1024	u32 fifo5;
1025
1026	cfg2 = ag71xx_rr(ag, AG71XX_REG_MAC_CFG2);
1027	cfg2 &= ~(MAC_CFG2_IF_1000 | MAC_CFG2_IF_10_100 | MAC_CFG2_FDX);
1028	cfg2 |= duplex ? MAC_CFG2_FDX : 0;
1029
1030	ifctl = ag71xx_rr(ag, AG71XX_REG_MAC_IFCTL);
1031	ifctl &= ~(MAC_IFCTL_SPEED);
1032
1033	fifo5 = ag71xx_rr(ag, AG71XX_REG_FIFO_CFG5);
1034	fifo5 &= ~FIFO_CFG5_BM;
1035
1036	switch (speed) {
1037	case SPEED_1000:
1038		cfg2 |= MAC_CFG2_IF_1000;
1039		fifo5 |= FIFO_CFG5_BM;
1040		break;
1041	case SPEED_100:
1042		cfg2 |= MAC_CFG2_IF_10_100;
1043		ifctl |= MAC_IFCTL_SPEED;
1044		break;
1045	case SPEED_10:
1046		cfg2 |= MAC_CFG2_IF_10_100;
1047		break;
1048	default:
1049		return;
1050	}
1051
1052	ag71xx_wr(ag, AG71XX_REG_MAC_CFG2, cfg2);
1053	ag71xx_wr(ag, AG71XX_REG_FIFO_CFG5, fifo5);
1054	ag71xx_wr(ag, AG71XX_REG_MAC_IFCTL, ifctl);
1055
1056	cfg1 = ag71xx_rr(ag, AG71XX_REG_MAC_CFG1);
1057	cfg1 &= ~(MAC_CFG1_TFC | MAC_CFG1_RFC);
1058	if (tx_pause)
1059		cfg1 |= MAC_CFG1_TFC;
1060
1061	if (rx_pause)
1062		cfg1 |= MAC_CFG1_RFC;
1063	ag71xx_wr(ag, AG71XX_REG_MAC_CFG1, cfg1);
1064
1065	ag71xx_hw_start(ag);
1066}
1067
1068static const struct phylink_mac_ops ag71xx_phylink_mac_ops = {
1069	.mac_config = ag71xx_mac_config,
1070	.mac_link_down = ag71xx_mac_link_down,
1071	.mac_link_up = ag71xx_mac_link_up,
1072};
1073
1074static int ag71xx_phylink_setup(struct ag71xx *ag)
1075{
1076	struct phylink *phylink;
1077
1078	ag->phylink_config.dev = &ag->ndev->dev;
1079	ag->phylink_config.type = PHYLINK_NETDEV;
1080	ag->phylink_config.mac_capabilities = MAC_SYM_PAUSE | MAC_ASYM_PAUSE |
1081		MAC_10 | MAC_100 | MAC_1000FD;
1082
1083	if ((ag71xx_is(ag, AR9330) && ag->mac_idx == 0) ||
1084	    ag71xx_is(ag, AR9340) ||
1085	    ag71xx_is(ag, QCA9530) ||
1086	    (ag71xx_is(ag, QCA9550) && ag->mac_idx == 1))
1087		__set_bit(PHY_INTERFACE_MODE_MII,
1088			  ag->phylink_config.supported_interfaces);
1089
1090	if ((ag71xx_is(ag, AR9330) && ag->mac_idx == 1) ||
1091	    (ag71xx_is(ag, AR9340) && ag->mac_idx == 1) ||
1092	    (ag71xx_is(ag, QCA9530) && ag->mac_idx == 1))
1093		__set_bit(PHY_INTERFACE_MODE_GMII,
1094			  ag->phylink_config.supported_interfaces);
1095
1096	if (ag71xx_is(ag, QCA9550) && ag->mac_idx == 0)
1097		__set_bit(PHY_INTERFACE_MODE_SGMII,
1098			  ag->phylink_config.supported_interfaces);
1099
1100	if (ag71xx_is(ag, AR9340) && ag->mac_idx == 0)
1101		__set_bit(PHY_INTERFACE_MODE_RMII,
1102			  ag->phylink_config.supported_interfaces);
1103
1104	if ((ag71xx_is(ag, AR9340) && ag->mac_idx == 0) ||
1105	    (ag71xx_is(ag, QCA9550) && ag->mac_idx == 1))
1106		__set_bit(PHY_INTERFACE_MODE_RGMII,
1107			  ag->phylink_config.supported_interfaces);
1108
1109	phylink = phylink_create(&ag->phylink_config, ag->pdev->dev.fwnode,
1110				 ag->phy_if_mode, &ag71xx_phylink_mac_ops);
1111	if (IS_ERR(phylink))
1112		return PTR_ERR(phylink);
1113
1114	ag->phylink = phylink;
1115	return 0;
1116}
1117
1118static void ag71xx_ring_tx_clean(struct ag71xx *ag)
1119{
1120	struct ag71xx_ring *ring = &ag->tx_ring;
1121	int ring_mask = BIT(ring->order) - 1;
1122	u32 bytes_compl = 0, pkts_compl = 0;
1123	struct net_device *ndev = ag->ndev;
1124
1125	while (ring->curr != ring->dirty) {
1126		struct ag71xx_desc *desc;
1127		u32 i = ring->dirty & ring_mask;
1128
1129		desc = ag71xx_ring_desc(ring, i);
1130		if (!ag71xx_desc_empty(desc)) {
1131			desc->ctrl = 0;
1132			ndev->stats.tx_errors++;
1133		}
1134
1135		if (ring->buf[i].tx.skb) {
1136			bytes_compl += ring->buf[i].tx.len;
1137			pkts_compl++;
1138			dev_kfree_skb_any(ring->buf[i].tx.skb);
1139		}
1140		ring->buf[i].tx.skb = NULL;
1141		ring->dirty++;
1142	}
1143
1144	/* flush descriptors */
1145	wmb();
1146
1147	netdev_completed_queue(ndev, pkts_compl, bytes_compl);
1148}
1149
1150static void ag71xx_ring_tx_init(struct ag71xx *ag)
1151{
1152	struct ag71xx_ring *ring = &ag->tx_ring;
1153	int ring_size = BIT(ring->order);
1154	int ring_mask = ring_size - 1;
1155	int i;
1156
1157	for (i = 0; i < ring_size; i++) {
1158		struct ag71xx_desc *desc = ag71xx_ring_desc(ring, i);
1159
1160		desc->next = (u32)(ring->descs_dma +
1161			AG71XX_DESC_SIZE * ((i + 1) & ring_mask));
1162
1163		desc->ctrl = DESC_EMPTY;
1164		ring->buf[i].tx.skb = NULL;
1165	}
1166
1167	/* flush descriptors */
1168	wmb();
1169
1170	ring->curr = 0;
1171	ring->dirty = 0;
1172	netdev_reset_queue(ag->ndev);
1173}
1174
1175static void ag71xx_ring_rx_clean(struct ag71xx *ag)
1176{
1177	struct ag71xx_ring *ring = &ag->rx_ring;
1178	int ring_size = BIT(ring->order);
1179	int i;
1180
1181	if (!ring->buf)
1182		return;
1183
1184	for (i = 0; i < ring_size; i++)
1185		if (ring->buf[i].rx.rx_buf) {
1186			dma_unmap_single(&ag->pdev->dev,
1187					 ring->buf[i].rx.dma_addr,
1188					 ag->rx_buf_size, DMA_FROM_DEVICE);
1189			skb_free_frag(ring->buf[i].rx.rx_buf);
1190		}
1191}
1192
1193static int ag71xx_buffer_size(struct ag71xx *ag)
1194{
1195	return ag->rx_buf_size +
1196	       SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
1197}
1198
1199static bool ag71xx_fill_rx_buf(struct ag71xx *ag, struct ag71xx_buf *buf,
1200			       int offset,
1201			       void *(*alloc)(unsigned int size))
1202{
1203	struct ag71xx_ring *ring = &ag->rx_ring;
1204	struct ag71xx_desc *desc;
1205	void *data;
1206
1207	desc = ag71xx_ring_desc(ring, buf - &ring->buf[0]);
1208
1209	data = alloc(ag71xx_buffer_size(ag));
1210	if (!data)
1211		return false;
1212
1213	buf->rx.rx_buf = data;
1214	buf->rx.dma_addr = dma_map_single(&ag->pdev->dev, data, ag->rx_buf_size,
1215					  DMA_FROM_DEVICE);
1216	desc->data = (u32)buf->rx.dma_addr + offset;
1217	return true;
1218}
1219
1220static int ag71xx_ring_rx_init(struct ag71xx *ag)
1221{
1222	struct ag71xx_ring *ring = &ag->rx_ring;
1223	struct net_device *ndev = ag->ndev;
1224	int ring_mask = BIT(ring->order) - 1;
1225	int ring_size = BIT(ring->order);
1226	unsigned int i;
1227	int ret;
1228
1229	ret = 0;
1230	for (i = 0; i < ring_size; i++) {
1231		struct ag71xx_desc *desc = ag71xx_ring_desc(ring, i);
1232
1233		desc->next = (u32)(ring->descs_dma +
1234			AG71XX_DESC_SIZE * ((i + 1) & ring_mask));
1235
1236		netif_dbg(ag, rx_status, ndev, "RX desc at %p, next is %08x\n",
1237			  desc, desc->next);
1238	}
1239
1240	for (i = 0; i < ring_size; i++) {
1241		struct ag71xx_desc *desc = ag71xx_ring_desc(ring, i);
1242
1243		if (!ag71xx_fill_rx_buf(ag, &ring->buf[i], ag->rx_buf_offset,
1244					netdev_alloc_frag)) {
1245			ret = -ENOMEM;
1246			break;
1247		}
1248
1249		desc->ctrl = DESC_EMPTY;
1250	}
1251
1252	/* flush descriptors */
1253	wmb();
1254
1255	ring->curr = 0;
1256	ring->dirty = 0;
1257
1258	return ret;
1259}
1260
1261static int ag71xx_ring_rx_refill(struct ag71xx *ag)
1262{
1263	struct ag71xx_ring *ring = &ag->rx_ring;
1264	int ring_mask = BIT(ring->order) - 1;
1265	int offset = ag->rx_buf_offset;
1266	unsigned int count;
1267
1268	count = 0;
1269	for (; ring->curr - ring->dirty > 0; ring->dirty++) {
1270		struct ag71xx_desc *desc;
1271		unsigned int i;
1272
1273		i = ring->dirty & ring_mask;
1274		desc = ag71xx_ring_desc(ring, i);
1275
1276		if (!ring->buf[i].rx.rx_buf &&
1277		    !ag71xx_fill_rx_buf(ag, &ring->buf[i], offset,
1278					napi_alloc_frag))
1279			break;
1280
1281		desc->ctrl = DESC_EMPTY;
1282		count++;
1283	}
1284
1285	/* flush descriptors */
1286	wmb();
1287
1288	netif_dbg(ag, rx_status, ag->ndev, "%u rx descriptors refilled\n",
1289		  count);
1290
1291	return count;
1292}
1293
1294static int ag71xx_rings_init(struct ag71xx *ag)
1295{
1296	struct ag71xx_ring *tx = &ag->tx_ring;
1297	struct ag71xx_ring *rx = &ag->rx_ring;
1298	int ring_size, tx_size;
1299
1300	ring_size = BIT(tx->order) + BIT(rx->order);
1301	tx_size = BIT(tx->order);
1302
1303	tx->buf = kcalloc(ring_size, sizeof(*tx->buf), GFP_KERNEL);
1304	if (!tx->buf)
1305		return -ENOMEM;
1306
1307	tx->descs_cpu = dma_alloc_coherent(&ag->pdev->dev,
1308					   ring_size * AG71XX_DESC_SIZE,
1309					   &tx->descs_dma, GFP_KERNEL);
1310	if (!tx->descs_cpu) {
1311		kfree(tx->buf);
1312		tx->buf = NULL;
1313		return -ENOMEM;
1314	}
1315
1316	rx->buf = &tx->buf[tx_size];
1317	rx->descs_cpu = ((void *)tx->descs_cpu) + tx_size * AG71XX_DESC_SIZE;
1318	rx->descs_dma = tx->descs_dma + tx_size * AG71XX_DESC_SIZE;
1319
1320	ag71xx_ring_tx_init(ag);
1321	return ag71xx_ring_rx_init(ag);
1322}
1323
1324static void ag71xx_rings_free(struct ag71xx *ag)
1325{
1326	struct ag71xx_ring *tx = &ag->tx_ring;
1327	struct ag71xx_ring *rx = &ag->rx_ring;
1328	int ring_size;
1329
1330	ring_size = BIT(tx->order) + BIT(rx->order);
1331
1332	if (tx->descs_cpu)
1333		dma_free_coherent(&ag->pdev->dev, ring_size * AG71XX_DESC_SIZE,
1334				  tx->descs_cpu, tx->descs_dma);
1335
1336	kfree(tx->buf);
1337
1338	tx->descs_cpu = NULL;
1339	rx->descs_cpu = NULL;
1340	tx->buf = NULL;
1341	rx->buf = NULL;
1342}
1343
1344static void ag71xx_rings_cleanup(struct ag71xx *ag)
1345{
1346	ag71xx_ring_rx_clean(ag);
1347	ag71xx_ring_tx_clean(ag);
1348	ag71xx_rings_free(ag);
1349
1350	netdev_reset_queue(ag->ndev);
1351}
1352
1353static void ag71xx_hw_init(struct ag71xx *ag)
1354{
1355	ag71xx_hw_stop(ag);
1356
1357	ag71xx_sb(ag, AG71XX_REG_MAC_CFG1, MAC_CFG1_SR);
1358	usleep_range(20, 30);
1359
1360	reset_control_assert(ag->mac_reset);
1361	msleep(100);
1362	reset_control_deassert(ag->mac_reset);
1363	msleep(200);
1364
1365	ag71xx_hw_setup(ag);
1366
1367	ag71xx_dma_reset(ag);
1368}
1369
1370static int ag71xx_hw_enable(struct ag71xx *ag)
1371{
1372	int ret;
1373
1374	ret = ag71xx_rings_init(ag);
1375	if (ret)
1376		return ret;
1377
1378	napi_enable(&ag->napi);
1379	ag71xx_wr(ag, AG71XX_REG_TX_DESC, ag->tx_ring.descs_dma);
1380	ag71xx_wr(ag, AG71XX_REG_RX_DESC, ag->rx_ring.descs_dma);
1381	netif_start_queue(ag->ndev);
1382
1383	return 0;
1384}
1385
1386static void ag71xx_hw_disable(struct ag71xx *ag)
1387{
1388	netif_stop_queue(ag->ndev);
1389
1390	ag71xx_hw_stop(ag);
1391	ag71xx_dma_reset(ag);
1392
1393	napi_disable(&ag->napi);
1394	del_timer_sync(&ag->oom_timer);
1395
1396	ag71xx_rings_cleanup(ag);
1397}
1398
1399static int ag71xx_open(struct net_device *ndev)
1400{
1401	struct ag71xx *ag = netdev_priv(ndev);
1402	unsigned int max_frame_len;
1403	int ret;
1404
1405	ret = phylink_of_phy_connect(ag->phylink, ag->pdev->dev.of_node, 0);
1406	if (ret) {
1407		netif_err(ag, link, ndev, "phylink_of_phy_connect filed with err: %i\n",
1408			  ret);
1409		return ret;
1410	}
1411
1412	max_frame_len = ag71xx_max_frame_len(ndev->mtu);
1413	ag->rx_buf_size =
1414		SKB_DATA_ALIGN(max_frame_len + NET_SKB_PAD + NET_IP_ALIGN);
1415
1416	/* setup max frame length */
1417	ag71xx_wr(ag, AG71XX_REG_MAC_MFL, max_frame_len);
1418	ag71xx_hw_set_macaddr(ag, ndev->dev_addr);
1419
1420	ret = ag71xx_hw_enable(ag);
1421	if (ret)
1422		goto err;
1423
1424	phylink_start(ag->phylink);
1425
1426	return 0;
1427
1428err:
1429	ag71xx_rings_cleanup(ag);
1430	phylink_disconnect_phy(ag->phylink);
1431	return ret;
1432}
1433
1434static int ag71xx_stop(struct net_device *ndev)
1435{
1436	struct ag71xx *ag = netdev_priv(ndev);
1437
1438	phylink_stop(ag->phylink);
1439	phylink_disconnect_phy(ag->phylink);
1440	ag71xx_hw_disable(ag);
1441
1442	return 0;
1443}
1444
1445static int ag71xx_fill_dma_desc(struct ag71xx_ring *ring, u32 addr, int len)
1446{
1447	int i, ring_mask, ndesc, split;
1448	struct ag71xx_desc *desc;
1449
1450	ring_mask = BIT(ring->order) - 1;
1451	ndesc = 0;
1452	split = ring->desc_split;
1453
1454	if (!split)
1455		split = len;
1456
1457	while (len > 0) {
1458		unsigned int cur_len = len;
1459
1460		i = (ring->curr + ndesc) & ring_mask;
1461		desc = ag71xx_ring_desc(ring, i);
1462
1463		if (!ag71xx_desc_empty(desc))
1464			return -1;
1465
1466		if (cur_len > split) {
1467			cur_len = split;
1468
1469			/*  TX will hang if DMA transfers <= 4 bytes,
1470			 * make sure next segment is more than 4 bytes long.
1471			 */
1472			if (len <= split + 4)
1473				cur_len -= 4;
1474		}
1475
1476		desc->data = addr;
1477		addr += cur_len;
1478		len -= cur_len;
1479
1480		if (len > 0)
1481			cur_len |= DESC_MORE;
1482
1483		/* prevent early tx attempt of this descriptor */
1484		if (!ndesc)
1485			cur_len |= DESC_EMPTY;
1486
1487		desc->ctrl = cur_len;
1488		ndesc++;
1489	}
1490
1491	return ndesc;
1492}
1493
1494static netdev_tx_t ag71xx_hard_start_xmit(struct sk_buff *skb,
1495					  struct net_device *ndev)
1496{
1497	int i, n, ring_min, ring_mask, ring_size;
1498	struct ag71xx *ag = netdev_priv(ndev);
1499	struct ag71xx_ring *ring;
1500	struct ag71xx_desc *desc;
1501	dma_addr_t dma_addr;
1502
1503	ring = &ag->tx_ring;
1504	ring_mask = BIT(ring->order) - 1;
1505	ring_size = BIT(ring->order);
1506
1507	if (skb->len <= 4) {
1508		netif_dbg(ag, tx_err, ndev, "packet len is too small\n");
1509		goto err_drop;
1510	}
1511
1512	dma_addr = dma_map_single(&ag->pdev->dev, skb->data, skb->len,
1513				  DMA_TO_DEVICE);
1514
1515	i = ring->curr & ring_mask;
1516	desc = ag71xx_ring_desc(ring, i);
1517
1518	/* setup descriptor fields */
1519	n = ag71xx_fill_dma_desc(ring, (u32)dma_addr,
1520				 skb->len & ag->dcfg->desc_pktlen_mask);
1521	if (n < 0)
1522		goto err_drop_unmap;
1523
1524	i = (ring->curr + n - 1) & ring_mask;
1525	ring->buf[i].tx.len = skb->len;
1526	ring->buf[i].tx.skb = skb;
1527
1528	netdev_sent_queue(ndev, skb->len);
1529
1530	skb_tx_timestamp(skb);
1531
1532	desc->ctrl &= ~DESC_EMPTY;
1533	ring->curr += n;
1534
1535	/* flush descriptor */
1536	wmb();
1537
1538	ring_min = 2;
1539	if (ring->desc_split)
1540		ring_min *= AG71XX_TX_RING_DS_PER_PKT;
1541
1542	if (ring->curr - ring->dirty >= ring_size - ring_min) {
1543		netif_dbg(ag, tx_err, ndev, "tx queue full\n");
1544		netif_stop_queue(ndev);
1545	}
1546
1547	netif_dbg(ag, tx_queued, ndev, "packet injected into TX queue\n");
1548
1549	/* enable TX engine */
1550	ag71xx_wr(ag, AG71XX_REG_TX_CTRL, TX_CTRL_TXE);
1551
1552	return NETDEV_TX_OK;
1553
1554err_drop_unmap:
1555	dma_unmap_single(&ag->pdev->dev, dma_addr, skb->len, DMA_TO_DEVICE);
1556
1557err_drop:
1558	ndev->stats.tx_dropped++;
1559
1560	dev_kfree_skb(skb);
1561	return NETDEV_TX_OK;
1562}
1563
1564static void ag71xx_oom_timer_handler(struct timer_list *t)
1565{
1566	struct ag71xx *ag = from_timer(ag, t, oom_timer);
1567
1568	napi_schedule(&ag->napi);
1569}
1570
1571static void ag71xx_tx_timeout(struct net_device *ndev, unsigned int txqueue)
1572{
1573	struct ag71xx *ag = netdev_priv(ndev);
1574
1575	netif_err(ag, tx_err, ndev, "tx timeout\n");
1576
1577	schedule_delayed_work(&ag->restart_work, 1);
1578}
1579
1580static void ag71xx_restart_work_func(struct work_struct *work)
1581{
1582	struct ag71xx *ag = container_of(work, struct ag71xx,
1583					 restart_work.work);
1584
1585	rtnl_lock();
1586	ag71xx_hw_disable(ag);
1587	ag71xx_hw_enable(ag);
1588
1589	phylink_stop(ag->phylink);
1590	phylink_start(ag->phylink);
1591
1592	rtnl_unlock();
1593}
1594
1595static int ag71xx_rx_packets(struct ag71xx *ag, int limit)
1596{
1597	struct net_device *ndev = ag->ndev;
1598	int ring_mask, ring_size, done = 0;
1599	unsigned int pktlen_mask, offset;
1600	struct ag71xx_ring *ring;
1601	struct sk_buff *skb;
1602	LIST_HEAD(rx_list);
1603
1604	ring = &ag->rx_ring;
1605	pktlen_mask = ag->dcfg->desc_pktlen_mask;
1606	offset = ag->rx_buf_offset;
1607	ring_mask = BIT(ring->order) - 1;
1608	ring_size = BIT(ring->order);
1609
1610	netif_dbg(ag, rx_status, ndev, "rx packets, limit=%d, curr=%u, dirty=%u\n",
1611		  limit, ring->curr, ring->dirty);
1612
1613	while (done < limit) {
1614		unsigned int i = ring->curr & ring_mask;
1615		struct ag71xx_desc *desc = ag71xx_ring_desc(ring, i);
1616		int pktlen;
1617
1618		if (ag71xx_desc_empty(desc))
1619			break;
1620
1621		if ((ring->dirty + ring_size) == ring->curr) {
1622			WARN_ONCE(1, "RX out of ring");
1623			break;
1624		}
1625
1626		ag71xx_wr(ag, AG71XX_REG_RX_STATUS, RX_STATUS_PR);
1627
1628		pktlen = desc->ctrl & pktlen_mask;
1629		pktlen -= ETH_FCS_LEN;
1630
1631		dma_unmap_single(&ag->pdev->dev, ring->buf[i].rx.dma_addr,
1632				 ag->rx_buf_size, DMA_FROM_DEVICE);
1633
1634		ndev->stats.rx_packets++;
1635		ndev->stats.rx_bytes += pktlen;
1636
1637		skb = napi_build_skb(ring->buf[i].rx.rx_buf, ag71xx_buffer_size(ag));
1638		if (!skb) {
1639			ndev->stats.rx_errors++;
1640			skb_free_frag(ring->buf[i].rx.rx_buf);
1641			goto next;
1642		}
1643
1644		skb_reserve(skb, offset);
1645		skb_put(skb, pktlen);
1646
1647		skb->dev = ndev;
1648		skb->ip_summed = CHECKSUM_NONE;
1649		skb->protocol = eth_type_trans(skb, ndev);
1650		list_add_tail(&skb->list, &rx_list);
1651
1652next:
1653		ring->buf[i].rx.rx_buf = NULL;
1654		done++;
1655
1656		ring->curr++;
1657	}
1658
1659	ag71xx_ring_rx_refill(ag);
1660
1661	netif_receive_skb_list(&rx_list);
1662
1663	netif_dbg(ag, rx_status, ndev, "rx finish, curr=%u, dirty=%u, done=%d\n",
1664		  ring->curr, ring->dirty, done);
1665
1666	return done;
1667}
1668
1669static int ag71xx_poll(struct napi_struct *napi, int limit)
1670{
1671	struct ag71xx *ag = container_of(napi, struct ag71xx, napi);
1672	struct ag71xx_ring *rx_ring = &ag->rx_ring;
1673	int rx_ring_size = BIT(rx_ring->order);
1674	struct net_device *ndev = ag->ndev;
1675	int tx_done, rx_done;
1676	u32 status;
1677
1678	tx_done = ag71xx_tx_packets(ag, false, limit);
1679
1680	netif_dbg(ag, rx_status, ndev, "processing RX ring\n");
1681	rx_done = ag71xx_rx_packets(ag, limit);
1682
1683	if (!rx_ring->buf[rx_ring->dirty % rx_ring_size].rx.rx_buf)
1684		goto oom;
1685
1686	status = ag71xx_rr(ag, AG71XX_REG_RX_STATUS);
1687	if (unlikely(status & RX_STATUS_OF)) {
1688		ag71xx_wr(ag, AG71XX_REG_RX_STATUS, RX_STATUS_OF);
1689		ndev->stats.rx_fifo_errors++;
1690
1691		/* restart RX */
1692		ag71xx_wr(ag, AG71XX_REG_RX_CTRL, RX_CTRL_RXE);
1693	}
1694
1695	if (rx_done < limit) {
1696		if (status & RX_STATUS_PR)
1697			goto more;
1698
1699		status = ag71xx_rr(ag, AG71XX_REG_TX_STATUS);
1700		if (status & TX_STATUS_PS)
1701			goto more;
1702
1703		netif_dbg(ag, rx_status, ndev, "disable polling mode, rx=%d, tx=%d,limit=%d\n",
1704			  rx_done, tx_done, limit);
1705
1706		napi_complete(napi);
1707
1708		/* enable interrupts */
1709		ag71xx_int_enable(ag, AG71XX_INT_POLL);
1710		return rx_done;
1711	}
1712
1713more:
1714	netif_dbg(ag, rx_status, ndev, "stay in polling mode, rx=%d, tx=%d, limit=%d\n",
1715		  rx_done, tx_done, limit);
1716	return limit;
1717
1718oom:
1719	netif_err(ag, rx_err, ndev, "out of memory\n");
1720
1721	mod_timer(&ag->oom_timer, jiffies + AG71XX_OOM_REFILL);
1722	napi_complete(napi);
1723	return 0;
1724}
1725
1726static irqreturn_t ag71xx_interrupt(int irq, void *dev_id)
1727{
1728	struct net_device *ndev = dev_id;
1729	struct ag71xx *ag;
1730	u32 status;
1731
1732	ag = netdev_priv(ndev);
1733	status = ag71xx_rr(ag, AG71XX_REG_INT_STATUS);
1734
1735	if (unlikely(!status))
1736		return IRQ_NONE;
1737
1738	if (unlikely(status & AG71XX_INT_ERR)) {
1739		if (status & AG71XX_INT_TX_BE) {
1740			ag71xx_wr(ag, AG71XX_REG_TX_STATUS, TX_STATUS_BE);
1741			netif_err(ag, intr, ndev, "TX BUS error\n");
1742		}
1743		if (status & AG71XX_INT_RX_BE) {
1744			ag71xx_wr(ag, AG71XX_REG_RX_STATUS, RX_STATUS_BE);
1745			netif_err(ag, intr, ndev, "RX BUS error\n");
1746		}
1747	}
1748
1749	if (likely(status & AG71XX_INT_POLL)) {
1750		ag71xx_int_disable(ag, AG71XX_INT_POLL);
1751		netif_dbg(ag, intr, ndev, "enable polling mode\n");
1752		napi_schedule(&ag->napi);
1753	}
1754
1755	return IRQ_HANDLED;
1756}
1757
1758static int ag71xx_change_mtu(struct net_device *ndev, int new_mtu)
1759{
1760	struct ag71xx *ag = netdev_priv(ndev);
1761
1762	WRITE_ONCE(ndev->mtu, new_mtu);
1763	ag71xx_wr(ag, AG71XX_REG_MAC_MFL,
1764		  ag71xx_max_frame_len(ndev->mtu));
1765
1766	return 0;
1767}
1768
1769static const struct net_device_ops ag71xx_netdev_ops = {
1770	.ndo_open		= ag71xx_open,
1771	.ndo_stop		= ag71xx_stop,
1772	.ndo_start_xmit		= ag71xx_hard_start_xmit,
1773	.ndo_eth_ioctl		= ag71xx_do_ioctl,
1774	.ndo_tx_timeout		= ag71xx_tx_timeout,
1775	.ndo_change_mtu		= ag71xx_change_mtu,
1776	.ndo_set_mac_address	= eth_mac_addr,
1777	.ndo_validate_addr	= eth_validate_addr,
1778};
1779
1780static const u32 ar71xx_addr_ar7100[] = {
1781	0x19000000, 0x1a000000,
1782};
1783
1784static int ag71xx_probe(struct platform_device *pdev)
1785{
1786	struct device_node *np = pdev->dev.of_node;
1787	const struct ag71xx_dcfg *dcfg;
1788	struct net_device *ndev;
1789	struct resource *res;
1790	struct clk *clk_eth;
1791	int tx_size, err, i;
1792	struct ag71xx *ag;
1793
1794	if (!np)
1795		return -ENODEV;
1796
1797	ndev = devm_alloc_etherdev(&pdev->dev, sizeof(*ag));
1798	if (!ndev)
1799		return -ENOMEM;
1800
1801	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1802	if (!res)
1803		return -EINVAL;
1804
1805	dcfg = of_device_get_match_data(&pdev->dev);
1806	if (!dcfg)
1807		return -EINVAL;
1808
1809	ag = netdev_priv(ndev);
1810	ag->mac_idx = -1;
1811	for (i = 0; i < ARRAY_SIZE(ar71xx_addr_ar7100); i++) {
1812		if (ar71xx_addr_ar7100[i] == res->start)
1813			ag->mac_idx = i;
1814	}
1815
1816	if (ag->mac_idx < 0) {
1817		netif_err(ag, probe, ndev, "unknown mac idx\n");
1818		return -EINVAL;
1819	}
1820
1821	clk_eth = devm_clk_get_enabled(&pdev->dev, "eth");
1822	if (IS_ERR(clk_eth))
1823		return dev_err_probe(&pdev->dev, PTR_ERR(clk_eth),
1824				     "Failed to get eth clk.");
1825
1826	SET_NETDEV_DEV(ndev, &pdev->dev);
1827
1828	ag->pdev = pdev;
1829	ag->ndev = ndev;
1830	ag->dcfg = dcfg;
1831	ag->msg_enable = netif_msg_init(-1, AG71XX_DEFAULT_MSG_ENABLE);
1832	memcpy(ag->fifodata, dcfg->fifodata, sizeof(ag->fifodata));
1833
1834	ag->mac_reset = devm_reset_control_get(&pdev->dev, "mac");
1835	if (IS_ERR(ag->mac_reset))
1836		return dev_err_probe(&pdev->dev, PTR_ERR(ag->mac_reset),
1837				     "missing mac reset");
1838
1839	ag->mac_base = devm_ioremap_resource(&pdev->dev, res);
1840	if (IS_ERR(ag->mac_base))
1841		return PTR_ERR(ag->mac_base);
1842
1843	/* ensure that HW is in manual polling mode before interrupts are
1844	 * activated. Otherwise ag71xx_interrupt might call napi_schedule
1845	 * before it is initialized by netif_napi_add.
1846	 */
1847	ag71xx_int_disable(ag, AG71XX_INT_POLL);
1848
1849	ndev->irq = platform_get_irq(pdev, 0);
1850	err = devm_request_irq(&pdev->dev, ndev->irq, ag71xx_interrupt,
1851			       0x0, dev_name(&pdev->dev), ndev);
1852	if (err) {
1853		netif_err(ag, probe, ndev, "unable to request IRQ %d\n",
1854			  ndev->irq);
1855		return err;
1856	}
1857
1858	ndev->netdev_ops = &ag71xx_netdev_ops;
1859	ndev->ethtool_ops = &ag71xx_ethtool_ops;
1860
1861	INIT_DELAYED_WORK(&ag->restart_work, ag71xx_restart_work_func);
1862	timer_setup(&ag->oom_timer, ag71xx_oom_timer_handler, 0);
1863
1864	tx_size = AG71XX_TX_RING_SIZE_DEFAULT;
1865	ag->rx_ring.order = ag71xx_ring_size_order(AG71XX_RX_RING_SIZE_DEFAULT);
1866
1867	ndev->min_mtu = 68;
1868	ndev->max_mtu = dcfg->max_frame_len - ag71xx_max_frame_len(0);
1869
1870	ag->rx_buf_offset = NET_SKB_PAD;
1871	if (!ag71xx_is(ag, AR7100) && !ag71xx_is(ag, AR9130))
1872		ag->rx_buf_offset += NET_IP_ALIGN;
1873
1874	if (ag71xx_is(ag, AR7100)) {
1875		ag->tx_ring.desc_split = AG71XX_TX_RING_SPLIT;
1876		tx_size *= AG71XX_TX_RING_DS_PER_PKT;
1877	}
1878	ag->tx_ring.order = ag71xx_ring_size_order(tx_size);
1879
1880	ag->stop_desc = dmam_alloc_coherent(&pdev->dev,
1881					    sizeof(struct ag71xx_desc),
1882					    &ag->stop_desc_dma, GFP_KERNEL);
1883	if (!ag->stop_desc)
1884		return -ENOMEM;
1885
1886	ag->stop_desc->data = 0;
1887	ag->stop_desc->ctrl = 0;
1888	ag->stop_desc->next = (u32)ag->stop_desc_dma;
1889
1890	err = of_get_ethdev_address(np, ndev);
1891	if (err == -EPROBE_DEFER)
1892		return err;
1893	if (err) {
1894		netif_err(ag, probe, ndev, "invalid MAC address, using random address\n");
1895		eth_hw_addr_random(ndev);
1896	}
1897
1898	err = of_get_phy_mode(np, &ag->phy_if_mode);
1899	if (err) {
1900		netif_err(ag, probe, ndev, "missing phy-mode property in DT\n");
1901		return err;
1902	}
1903
1904	netif_napi_add_weight(ndev, &ag->napi, ag71xx_poll,
1905			      AG71XX_NAPI_WEIGHT);
1906
1907	ag71xx_wr(ag, AG71XX_REG_MAC_CFG1, 0);
1908
1909	ag71xx_hw_init(ag);
1910
1911	err = ag71xx_mdio_probe(ag);
1912	if (err)
1913		return err;
1914
1915	err = ag71xx_phylink_setup(ag);
1916	if (err)
1917		return dev_err_probe(&pdev->dev, err,
1918				     "failed to setup phylink");
1919
1920	err = devm_register_netdev(&pdev->dev, ndev);
1921	if (err) {
1922		netif_err(ag, probe, ndev, "unable to register net device\n");
1923		return err;
1924	}
1925
1926	netif_info(ag, probe, ndev, "Atheros AG71xx at 0x%08lx, irq %d, mode:%s\n",
1927		   (unsigned long)ag->mac_base, ndev->irq,
1928		   phy_modes(ag->phy_if_mode));
1929
1930	return 0;
1931}
1932
1933static const u32 ar71xx_fifo_ar7100[] = {
1934	0x0fff0000, 0x00001fff, 0x00780fff,
1935};
1936
1937static const u32 ar71xx_fifo_ar9130[] = {
1938	0x0fff0000, 0x00001fff, 0x008001ff,
1939};
1940
1941static const u32 ar71xx_fifo_ar9330[] = {
1942	0x0010ffff, 0x015500aa, 0x01f00140,
1943};
1944
1945static const struct ag71xx_dcfg ag71xx_dcfg_ar7100 = {
1946	.type = AR7100,
1947	.fifodata = ar71xx_fifo_ar7100,
1948	.max_frame_len = 1540,
1949	.desc_pktlen_mask = SZ_4K - 1,
1950	.tx_hang_workaround = false,
1951};
1952
1953static const struct ag71xx_dcfg ag71xx_dcfg_ar7240 = {
1954	.type = AR7240,
1955	.fifodata = ar71xx_fifo_ar7100,
1956	.max_frame_len = 1540,
1957	.desc_pktlen_mask = SZ_4K - 1,
1958	.tx_hang_workaround = true,
1959};
1960
1961static const struct ag71xx_dcfg ag71xx_dcfg_ar9130 = {
1962	.type = AR9130,
1963	.fifodata = ar71xx_fifo_ar9130,
1964	.max_frame_len = 1540,
1965	.desc_pktlen_mask = SZ_4K - 1,
1966	.tx_hang_workaround = false,
1967};
1968
1969static const struct ag71xx_dcfg ag71xx_dcfg_ar9330 = {
1970	.type = AR9330,
1971	.fifodata = ar71xx_fifo_ar9330,
1972	.max_frame_len = 1540,
1973	.desc_pktlen_mask = SZ_4K - 1,
1974	.tx_hang_workaround = true,
1975};
1976
1977static const struct ag71xx_dcfg ag71xx_dcfg_ar9340 = {
1978	.type = AR9340,
1979	.fifodata = ar71xx_fifo_ar9330,
1980	.max_frame_len = SZ_16K - 1,
1981	.desc_pktlen_mask = SZ_16K - 1,
1982	.tx_hang_workaround = true,
1983};
1984
1985static const struct ag71xx_dcfg ag71xx_dcfg_qca9530 = {
1986	.type = QCA9530,
1987	.fifodata = ar71xx_fifo_ar9330,
1988	.max_frame_len = SZ_16K - 1,
1989	.desc_pktlen_mask = SZ_16K - 1,
1990	.tx_hang_workaround = true,
1991};
1992
1993static const struct ag71xx_dcfg ag71xx_dcfg_qca9550 = {
1994	.type = QCA9550,
1995	.fifodata = ar71xx_fifo_ar9330,
1996	.max_frame_len = 1540,
1997	.desc_pktlen_mask = SZ_16K - 1,
1998	.tx_hang_workaround = true,
1999};
2000
2001static const struct of_device_id ag71xx_match[] = {
2002	{ .compatible = "qca,ar7100-eth", .data = &ag71xx_dcfg_ar7100 },
2003	{ .compatible = "qca,ar7240-eth", .data = &ag71xx_dcfg_ar7240 },
2004	{ .compatible = "qca,ar7241-eth", .data = &ag71xx_dcfg_ar7240 },
2005	{ .compatible = "qca,ar7242-eth", .data = &ag71xx_dcfg_ar7240 },
2006	{ .compatible = "qca,ar9130-eth", .data = &ag71xx_dcfg_ar9130 },
2007	{ .compatible = "qca,ar9330-eth", .data = &ag71xx_dcfg_ar9330 },
2008	{ .compatible = "qca,ar9340-eth", .data = &ag71xx_dcfg_ar9340 },
2009	{ .compatible = "qca,qca9530-eth", .data = &ag71xx_dcfg_qca9530 },
2010	{ .compatible = "qca,qca9550-eth", .data = &ag71xx_dcfg_qca9550 },
2011	{ .compatible = "qca,qca9560-eth", .data = &ag71xx_dcfg_qca9550 },
2012	{}
2013};
2014MODULE_DEVICE_TABLE(of, ag71xx_match);
2015
2016static struct platform_driver ag71xx_driver = {
2017	.probe		= ag71xx_probe,
2018	.driver = {
2019		.name	= "ag71xx",
2020		.of_match_table = ag71xx_match,
2021	}
2022};
2023
2024module_platform_driver(ag71xx_driver);
2025MODULE_DESCRIPTION("Atheros AR71xx built-in ethernet mac driver");
2026MODULE_LICENSE("GPL v2");