Linux Audio

Check our new training course

Loading...
Note: File does not exist in v3.15.
   1// SPDX-License-Identifier: GPL-2.0
   2/* Renesas Ethernet AVB device driver
   3 *
   4 * Copyright (C) 2014-2019 Renesas Electronics Corporation
   5 * Copyright (C) 2015 Renesas Solutions Corp.
   6 * Copyright (C) 2015-2016 Cogent Embedded, Inc. <source@cogentembedded.com>
   7 *
   8 * Based on the SuperH Ethernet driver
   9 */
  10
  11#include <linux/cache.h>
  12#include <linux/clk.h>
  13#include <linux/delay.h>
  14#include <linux/dma-mapping.h>
  15#include <linux/err.h>
  16#include <linux/etherdevice.h>
  17#include <linux/ethtool.h>
  18#include <linux/if_vlan.h>
  19#include <linux/kernel.h>
  20#include <linux/list.h>
  21#include <linux/module.h>
  22#include <linux/net_tstamp.h>
  23#include <linux/of.h>
  24#include <linux/of_mdio.h>
  25#include <linux/of_net.h>
  26#include <linux/platform_device.h>
  27#include <linux/pm_runtime.h>
  28#include <linux/slab.h>
  29#include <linux/spinlock.h>
  30#include <linux/reset.h>
  31#include <linux/math64.h>
  32#include <net/ip.h>
  33#include <net/page_pool/helpers.h>
  34
  35#include "ravb.h"
  36
  37#define RAVB_DEF_MSG_ENABLE \
  38		(NETIF_MSG_LINK	  | \
  39		 NETIF_MSG_TIMER  | \
  40		 NETIF_MSG_RX_ERR | \
  41		 NETIF_MSG_TX_ERR)
  42
  43void ravb_modify(struct net_device *ndev, enum ravb_reg reg, u32 clear,
  44		 u32 set)
  45{
  46	ravb_write(ndev, (ravb_read(ndev, reg) & ~clear) | set, reg);
  47}
  48
  49int ravb_wait(struct net_device *ndev, enum ravb_reg reg, u32 mask, u32 value)
  50{
  51	int i;
  52
  53	for (i = 0; i < 10000; i++) {
  54		if ((ravb_read(ndev, reg) & mask) == value)
  55			return 0;
  56		udelay(10);
  57	}
  58	return -ETIMEDOUT;
  59}
  60
  61static int ravb_set_opmode(struct net_device *ndev, u32 opmode)
  62{
  63	u32 csr_ops = 1U << (opmode & CCC_OPC);
  64	u32 ccc_mask = CCC_OPC;
  65	int error;
  66
  67	/* If gPTP active in config mode is supported it needs to be configured
  68	 * along with CSEL and operating mode in the same access. This is a
  69	 * hardware limitation.
  70	 */
  71	if (opmode & CCC_GAC)
  72		ccc_mask |= CCC_GAC | CCC_CSEL;
  73
  74	/* Set operating mode */
  75	ravb_modify(ndev, CCC, ccc_mask, opmode);
  76	/* Check if the operating mode is changed to the requested one */
  77	error = ravb_wait(ndev, CSR, CSR_OPS, csr_ops);
  78	if (error) {
  79		netdev_err(ndev, "failed to switch device to requested mode (%u)\n",
  80			   opmode & CCC_OPC);
  81	}
  82
  83	return error;
  84}
  85
  86static void ravb_set_rate_gbeth(struct net_device *ndev)
  87{
  88	struct ravb_private *priv = netdev_priv(ndev);
  89
  90	switch (priv->speed) {
  91	case 10:		/* 10BASE */
  92		ravb_write(ndev, GBETH_GECMR_SPEED_10, GECMR);
  93		break;
  94	case 100:		/* 100BASE */
  95		ravb_write(ndev, GBETH_GECMR_SPEED_100, GECMR);
  96		break;
  97	case 1000:		/* 1000BASE */
  98		ravb_write(ndev, GBETH_GECMR_SPEED_1000, GECMR);
  99		break;
 100	}
 101}
 102
 103static void ravb_set_rate_rcar(struct net_device *ndev)
 104{
 105	struct ravb_private *priv = netdev_priv(ndev);
 106
 107	switch (priv->speed) {
 108	case 100:		/* 100BASE */
 109		ravb_write(ndev, GECMR_SPEED_100, GECMR);
 110		break;
 111	case 1000:		/* 1000BASE */
 112		ravb_write(ndev, GECMR_SPEED_1000, GECMR);
 113		break;
 114	}
 115}
 116
 117/* Get MAC address from the MAC address registers
 118 *
 119 * Ethernet AVB device doesn't have ROM for MAC address.
 120 * This function gets the MAC address that was used by a bootloader.
 121 */
 122static void ravb_read_mac_address(struct device_node *np,
 123				  struct net_device *ndev)
 124{
 125	int ret;
 126
 127	ret = of_get_ethdev_address(np, ndev);
 128	if (ret) {
 129		u32 mahr = ravb_read(ndev, MAHR);
 130		u32 malr = ravb_read(ndev, MALR);
 131		u8 addr[ETH_ALEN];
 132
 133		addr[0] = (mahr >> 24) & 0xFF;
 134		addr[1] = (mahr >> 16) & 0xFF;
 135		addr[2] = (mahr >>  8) & 0xFF;
 136		addr[3] = (mahr >>  0) & 0xFF;
 137		addr[4] = (malr >>  8) & 0xFF;
 138		addr[5] = (malr >>  0) & 0xFF;
 139		eth_hw_addr_set(ndev, addr);
 140	}
 141}
 142
 143static void ravb_mdio_ctrl(struct mdiobb_ctrl *ctrl, u32 mask, int set)
 144{
 145	struct ravb_private *priv = container_of(ctrl, struct ravb_private,
 146						 mdiobb);
 147
 148	ravb_modify(priv->ndev, PIR, mask, set ? mask : 0);
 149}
 150
 151/* MDC pin control */
 152static void ravb_set_mdc(struct mdiobb_ctrl *ctrl, int level)
 153{
 154	ravb_mdio_ctrl(ctrl, PIR_MDC, level);
 155}
 156
 157/* Data I/O pin control */
 158static void ravb_set_mdio_dir(struct mdiobb_ctrl *ctrl, int output)
 159{
 160	ravb_mdio_ctrl(ctrl, PIR_MMD, output);
 161}
 162
 163/* Set data bit */
 164static void ravb_set_mdio_data(struct mdiobb_ctrl *ctrl, int value)
 165{
 166	ravb_mdio_ctrl(ctrl, PIR_MDO, value);
 167}
 168
 169/* Get data bit */
 170static int ravb_get_mdio_data(struct mdiobb_ctrl *ctrl)
 171{
 172	struct ravb_private *priv = container_of(ctrl, struct ravb_private,
 173						 mdiobb);
 174
 175	return (ravb_read(priv->ndev, PIR) & PIR_MDI) != 0;
 176}
 177
 178/* MDIO bus control struct */
 179static const struct mdiobb_ops bb_ops = {
 180	.owner = THIS_MODULE,
 181	.set_mdc = ravb_set_mdc,
 182	.set_mdio_dir = ravb_set_mdio_dir,
 183	.set_mdio_data = ravb_set_mdio_data,
 184	.get_mdio_data = ravb_get_mdio_data,
 185};
 186
 187static struct ravb_rx_desc *
 188ravb_rx_get_desc(struct ravb_private *priv, unsigned int q,
 189		 unsigned int i)
 190{
 191	return priv->rx_ring[q].raw + priv->info->rx_desc_size * i;
 192}
 193
 194/* Free TX skb function for AVB-IP */
 195static int ravb_tx_free(struct net_device *ndev, int q, bool free_txed_only)
 196{
 197	struct ravb_private *priv = netdev_priv(ndev);
 198	struct net_device_stats *stats = &priv->stats[q];
 199	unsigned int num_tx_desc = priv->num_tx_desc;
 200	struct ravb_tx_desc *desc;
 201	unsigned int entry;
 202	int free_num = 0;
 203	u32 size;
 204
 205	for (; priv->cur_tx[q] - priv->dirty_tx[q] > 0; priv->dirty_tx[q]++) {
 206		bool txed;
 207
 208		entry = priv->dirty_tx[q] % (priv->num_tx_ring[q] *
 209					     num_tx_desc);
 210		desc = &priv->tx_ring[q][entry];
 211		txed = desc->die_dt == DT_FEMPTY;
 212		if (free_txed_only && !txed)
 213			break;
 214		/* Descriptor type must be checked before all other reads */
 215		dma_rmb();
 216		size = le16_to_cpu(desc->ds_tagl) & TX_DS;
 217		/* Free the original skb. */
 218		if (priv->tx_skb[q][entry / num_tx_desc]) {
 219			dma_unmap_single(ndev->dev.parent, le32_to_cpu(desc->dptr),
 220					 size, DMA_TO_DEVICE);
 221			/* Last packet descriptor? */
 222			if (entry % num_tx_desc == num_tx_desc - 1) {
 223				entry /= num_tx_desc;
 224				dev_kfree_skb_any(priv->tx_skb[q][entry]);
 225				priv->tx_skb[q][entry] = NULL;
 226				if (txed)
 227					stats->tx_packets++;
 228			}
 229			free_num++;
 230		}
 231		if (txed)
 232			stats->tx_bytes += size;
 233		desc->die_dt = DT_EEMPTY;
 234	}
 235	return free_num;
 236}
 237
 238static void ravb_rx_ring_free(struct net_device *ndev, int q)
 239{
 240	struct ravb_private *priv = netdev_priv(ndev);
 241	unsigned int ring_size;
 242
 243	if (!priv->rx_ring[q].raw)
 244		return;
 245
 246	ring_size = priv->info->rx_desc_size * (priv->num_rx_ring[q] + 1);
 247	dma_free_coherent(ndev->dev.parent, ring_size, priv->rx_ring[q].raw,
 248			  priv->rx_desc_dma[q]);
 249	priv->rx_ring[q].raw = NULL;
 250}
 251
 252/* Free skb's and DMA buffers for Ethernet AVB */
 253static void ravb_ring_free(struct net_device *ndev, int q)
 254{
 255	struct ravb_private *priv = netdev_priv(ndev);
 256	unsigned int num_tx_desc = priv->num_tx_desc;
 257	unsigned int ring_size;
 258	unsigned int i;
 259
 260	ravb_rx_ring_free(ndev, q);
 261
 262	if (priv->tx_ring[q]) {
 263		ravb_tx_free(ndev, q, false);
 264
 265		ring_size = sizeof(struct ravb_tx_desc) *
 266			    (priv->num_tx_ring[q] * num_tx_desc + 1);
 267		dma_free_coherent(ndev->dev.parent, ring_size, priv->tx_ring[q],
 268				  priv->tx_desc_dma[q]);
 269		priv->tx_ring[q] = NULL;
 270	}
 271
 272	/* Free RX buffers */
 273	for (i = 0; i < priv->num_rx_ring[q]; i++) {
 274		if (priv->rx_buffers[q][i].page)
 275			page_pool_put_page(priv->rx_pool[q],
 276					   priv->rx_buffers[q][i].page,
 277					   0, true);
 278	}
 279	kfree(priv->rx_buffers[q]);
 280	priv->rx_buffers[q] = NULL;
 281	page_pool_destroy(priv->rx_pool[q]);
 282
 283	/* Free aligned TX buffers */
 284	kfree(priv->tx_align[q]);
 285	priv->tx_align[q] = NULL;
 286
 287	/* Free TX skb ringbuffer.
 288	 * SKBs are freed by ravb_tx_free() call above.
 289	 */
 290	kfree(priv->tx_skb[q]);
 291	priv->tx_skb[q] = NULL;
 292}
 293
 294static int
 295ravb_alloc_rx_buffer(struct net_device *ndev, int q, u32 entry, gfp_t gfp_mask,
 296		     struct ravb_rx_desc *rx_desc)
 297{
 298	struct ravb_private *priv = netdev_priv(ndev);
 299	const struct ravb_hw_info *info = priv->info;
 300	struct ravb_rx_buffer *rx_buff;
 301	dma_addr_t dma_addr;
 302	unsigned int size;
 303
 304	rx_buff = &priv->rx_buffers[q][entry];
 305	size = info->rx_buffer_size;
 306	rx_buff->page = page_pool_alloc(priv->rx_pool[q], &rx_buff->offset,
 307					&size, gfp_mask);
 308	if (unlikely(!rx_buff->page)) {
 309		/* We just set the data size to 0 for a failed mapping which
 310		 * should prevent DMA from happening...
 311		 */
 312		rx_desc->ds_cc = cpu_to_le16(0);
 313		return -ENOMEM;
 314	}
 315
 316	dma_addr = page_pool_get_dma_addr(rx_buff->page) + rx_buff->offset;
 317	dma_sync_single_for_device(ndev->dev.parent, dma_addr,
 318				   info->rx_buffer_size, DMA_FROM_DEVICE);
 319	rx_desc->dptr = cpu_to_le32(dma_addr);
 320
 321	/* The end of the RX buffer is used to store skb shared data, so we need
 322	 * to ensure that the hardware leaves enough space for this.
 323	 */
 324	rx_desc->ds_cc = cpu_to_le16(info->rx_buffer_size -
 325				     SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) -
 326				     ETH_FCS_LEN + sizeof(__sum16));
 327	return 0;
 328}
 329
 330static u32
 331ravb_rx_ring_refill(struct net_device *ndev, int q, u32 count, gfp_t gfp_mask)
 332{
 333	struct ravb_private *priv = netdev_priv(ndev);
 334	struct ravb_rx_desc *rx_desc;
 335	u32 i, entry;
 336
 337	for (i = 0; i < count; i++) {
 338		entry = (priv->dirty_rx[q] + i) % priv->num_rx_ring[q];
 339		rx_desc = ravb_rx_get_desc(priv, q, entry);
 340
 341		if (!priv->rx_buffers[q][entry].page) {
 342			if (unlikely(ravb_alloc_rx_buffer(ndev, q, entry,
 343							  gfp_mask, rx_desc)))
 344				break;
 345		}
 346		/* Descriptor type must be set after all the above writes */
 347		dma_wmb();
 348		rx_desc->die_dt = DT_FEMPTY;
 349	}
 350
 351	return i;
 352}
 353
 354/* Format skb and descriptor buffer for Ethernet AVB */
 355static void ravb_ring_format(struct net_device *ndev, int q)
 356{
 357	struct ravb_private *priv = netdev_priv(ndev);
 358	unsigned int num_tx_desc = priv->num_tx_desc;
 359	struct ravb_rx_desc *rx_desc;
 360	struct ravb_tx_desc *tx_desc;
 361	struct ravb_desc *desc;
 362	unsigned int tx_ring_size = sizeof(*tx_desc) * priv->num_tx_ring[q] *
 363				    num_tx_desc;
 364	unsigned int i;
 365
 366	priv->cur_rx[q] = 0;
 367	priv->cur_tx[q] = 0;
 368	priv->dirty_rx[q] = 0;
 369	priv->dirty_tx[q] = 0;
 370
 371	/* Regular RX descriptors have already been initialized by
 372	 * ravb_rx_ring_refill(), we just need to initialize the final link
 373	 * descriptor.
 374	 */
 375	rx_desc = ravb_rx_get_desc(priv, q, priv->num_rx_ring[q]);
 376	rx_desc->dptr = cpu_to_le32((u32)priv->rx_desc_dma[q]);
 377	rx_desc->die_dt = DT_LINKFIX; /* type */
 378
 379	memset(priv->tx_ring[q], 0, tx_ring_size);
 380	/* Build TX ring buffer */
 381	for (i = 0, tx_desc = priv->tx_ring[q]; i < priv->num_tx_ring[q];
 382	     i++, tx_desc++) {
 383		tx_desc->die_dt = DT_EEMPTY;
 384		if (num_tx_desc > 1) {
 385			tx_desc++;
 386			tx_desc->die_dt = DT_EEMPTY;
 387		}
 388	}
 389	tx_desc->dptr = cpu_to_le32((u32)priv->tx_desc_dma[q]);
 390	tx_desc->die_dt = DT_LINKFIX; /* type */
 391
 392	/* RX descriptor base address for best effort */
 393	desc = &priv->desc_bat[RX_QUEUE_OFFSET + q];
 394	desc->die_dt = DT_LINKFIX; /* type */
 395	desc->dptr = cpu_to_le32((u32)priv->rx_desc_dma[q]);
 396
 397	/* TX descriptor base address for best effort */
 398	desc = &priv->desc_bat[q];
 399	desc->die_dt = DT_LINKFIX; /* type */
 400	desc->dptr = cpu_to_le32((u32)priv->tx_desc_dma[q]);
 401}
 402
 403static void *ravb_alloc_rx_desc(struct net_device *ndev, int q)
 404{
 405	struct ravb_private *priv = netdev_priv(ndev);
 406	unsigned int ring_size;
 407
 408	ring_size = priv->info->rx_desc_size * (priv->num_rx_ring[q] + 1);
 409
 410	priv->rx_ring[q].raw = dma_alloc_coherent(ndev->dev.parent, ring_size,
 411						  &priv->rx_desc_dma[q],
 412						  GFP_KERNEL);
 413
 414	return priv->rx_ring[q].raw;
 415}
 416
 417/* Init skb and descriptor buffer for Ethernet AVB */
 418static int ravb_ring_init(struct net_device *ndev, int q)
 419{
 420	struct ravb_private *priv = netdev_priv(ndev);
 421	unsigned int num_tx_desc = priv->num_tx_desc;
 422	struct page_pool_params params = {
 423		.order = 0,
 424		.flags = PP_FLAG_DMA_MAP,
 425		.pool_size = priv->num_rx_ring[q],
 426		.nid = NUMA_NO_NODE,
 427		.dev = ndev->dev.parent,
 428		.dma_dir = DMA_FROM_DEVICE,
 429	};
 430	unsigned int ring_size;
 431	u32 num_filled;
 432
 433	/* Allocate RX page pool and buffers */
 434	priv->rx_pool[q] = page_pool_create(&params);
 435	if (IS_ERR(priv->rx_pool[q]))
 436		goto error;
 437
 438	/* Allocate RX buffers */
 439	priv->rx_buffers[q] = kcalloc(priv->num_rx_ring[q],
 440				      sizeof(*priv->rx_buffers[q]), GFP_KERNEL);
 441	if (!priv->rx_buffers[q])
 442		goto error;
 443
 444	/* Allocate TX skb rings */
 445	priv->tx_skb[q] = kcalloc(priv->num_tx_ring[q],
 446				  sizeof(*priv->tx_skb[q]), GFP_KERNEL);
 447	if (!priv->tx_skb[q])
 448		goto error;
 449
 450	/* Allocate all RX descriptors. */
 451	if (!ravb_alloc_rx_desc(ndev, q))
 452		goto error;
 453
 454	/* Populate RX ring buffer. */
 455	priv->dirty_rx[q] = 0;
 456	ring_size = priv->info->rx_desc_size * priv->num_rx_ring[q];
 457	memset(priv->rx_ring[q].raw, 0, ring_size);
 458	num_filled = ravb_rx_ring_refill(ndev, q, priv->num_rx_ring[q],
 459					 GFP_KERNEL);
 460	if (num_filled != priv->num_rx_ring[q])
 461		goto error;
 462
 463	if (num_tx_desc > 1) {
 464		/* Allocate rings for the aligned buffers */
 465		priv->tx_align[q] = kmalloc(DPTR_ALIGN * priv->num_tx_ring[q] +
 466					    DPTR_ALIGN - 1, GFP_KERNEL);
 467		if (!priv->tx_align[q])
 468			goto error;
 469	}
 470
 471	/* Allocate all TX descriptors. */
 472	ring_size = sizeof(struct ravb_tx_desc) *
 473		    (priv->num_tx_ring[q] * num_tx_desc + 1);
 474	priv->tx_ring[q] = dma_alloc_coherent(ndev->dev.parent, ring_size,
 475					      &priv->tx_desc_dma[q],
 476					      GFP_KERNEL);
 477	if (!priv->tx_ring[q])
 478		goto error;
 479
 480	return 0;
 481
 482error:
 483	ravb_ring_free(ndev, q);
 484
 485	return -ENOMEM;
 486}
 487
 488static void ravb_csum_init_gbeth(struct net_device *ndev)
 489{
 490	bool tx_enable = ndev->features & NETIF_F_HW_CSUM;
 491	bool rx_enable = ndev->features & NETIF_F_RXCSUM;
 492
 493	if (!(tx_enable || rx_enable))
 494		goto done;
 495
 496	ravb_write(ndev, 0, CSR0);
 497	if (ravb_wait(ndev, CSR0, CSR0_TPE | CSR0_RPE, 0)) {
 498		netdev_err(ndev, "Timeout enabling hardware checksum\n");
 499
 500		if (tx_enable)
 501			ndev->features &= ~NETIF_F_HW_CSUM;
 502
 503		if (rx_enable)
 504			ndev->features &= ~NETIF_F_RXCSUM;
 505	} else {
 506		if (tx_enable)
 507			ravb_write(ndev, CSR1_CSUM_ENABLE, CSR1);
 508
 509		if (rx_enable)
 510			ravb_write(ndev, CSR2_CSUM_ENABLE, CSR2);
 511	}
 512
 513done:
 514	ravb_write(ndev, CSR0_TPE | CSR0_RPE, CSR0);
 515}
 516
 517static void ravb_emac_init_gbeth(struct net_device *ndev)
 518{
 519	struct ravb_private *priv = netdev_priv(ndev);
 520
 521	if (priv->phy_interface == PHY_INTERFACE_MODE_MII) {
 522		ravb_write(ndev, (1000 << 16) | CXR35_SEL_XMII_MII, CXR35);
 523		ravb_modify(ndev, CXR31, CXR31_SEL_LINK0 | CXR31_SEL_LINK1, 0);
 524	} else {
 525		ravb_write(ndev, (1000 << 16) | CXR35_SEL_XMII_RGMII, CXR35);
 526		ravb_modify(ndev, CXR31, CXR31_SEL_LINK0 | CXR31_SEL_LINK1,
 527			    CXR31_SEL_LINK0);
 528	}
 529
 530	/* Receive frame limit set register */
 531	ravb_write(ndev, priv->info->rx_max_frame_size + ETH_FCS_LEN, RFLR);
 532
 533	/* EMAC Mode: PAUSE prohibition; Duplex; TX; RX; CRC Pass Through */
 534	ravb_write(ndev, ECMR_ZPF | ((priv->duplex > 0) ? ECMR_DM : 0) |
 535			 ECMR_TE | ECMR_RE | ECMR_RCPT |
 536			 ECMR_TXF | ECMR_RXF, ECMR);
 537
 538	ravb_set_rate_gbeth(ndev);
 539
 540	/* Set MAC address */
 541	ravb_write(ndev,
 542		   (ndev->dev_addr[0] << 24) | (ndev->dev_addr[1] << 16) |
 543		   (ndev->dev_addr[2] << 8)  | (ndev->dev_addr[3]), MAHR);
 544	ravb_write(ndev, (ndev->dev_addr[4] << 8)  | (ndev->dev_addr[5]), MALR);
 545
 546	/* E-MAC status register clear */
 547	ravb_write(ndev, ECSR_ICD | ECSR_LCHNG | ECSR_PFRI, ECSR);
 548
 549	ravb_csum_init_gbeth(ndev);
 550
 551	/* E-MAC interrupt enable register */
 552	ravb_write(ndev, ECSIPR_ICDIP, ECSIPR);
 553}
 554
 555static void ravb_emac_init_rcar(struct net_device *ndev)
 556{
 557	struct ravb_private *priv = netdev_priv(ndev);
 558
 559	/* Set receive frame length
 560	 *
 561	 * The length set here describes the frame from the destination address
 562	 * up to and including the CRC data. However only the frame data,
 563	 * excluding the CRC, are transferred to memory. To allow for the
 564	 * largest frames add the CRC length to the maximum Rx descriptor size.
 565	 */
 566	ravb_write(ndev, priv->info->rx_max_frame_size + ETH_FCS_LEN, RFLR);
 567
 568	/* EMAC Mode: PAUSE prohibition; Duplex; RX Checksum; TX; RX */
 569	ravb_write(ndev, ECMR_ZPF | ECMR_DM |
 570		   (ndev->features & NETIF_F_RXCSUM ? ECMR_RCSC : 0) |
 571		   ECMR_TE | ECMR_RE, ECMR);
 572
 573	ravb_set_rate_rcar(ndev);
 574
 575	/* Set MAC address */
 576	ravb_write(ndev,
 577		   (ndev->dev_addr[0] << 24) | (ndev->dev_addr[1] << 16) |
 578		   (ndev->dev_addr[2] << 8)  | (ndev->dev_addr[3]), MAHR);
 579	ravb_write(ndev,
 580		   (ndev->dev_addr[4] << 8)  | (ndev->dev_addr[5]), MALR);
 581
 582	/* E-MAC status register clear */
 583	ravb_write(ndev, ECSR_ICD | ECSR_MPD, ECSR);
 584
 585	/* E-MAC interrupt enable register */
 586	ravb_write(ndev, ECSIPR_ICDIP | ECSIPR_MPDIP | ECSIPR_LCHNGIP, ECSIPR);
 587}
 588
 589static void ravb_emac_init_rcar_gen4(struct net_device *ndev)
 590{
 591	struct ravb_private *priv = netdev_priv(ndev);
 592	bool mii = priv->phy_interface == PHY_INTERFACE_MODE_MII;
 593
 594	ravb_modify(ndev, APSR, APSR_MIISELECT, mii ? APSR_MIISELECT : 0);
 595
 596	ravb_emac_init_rcar(ndev);
 597}
 598
 599/* E-MAC init function */
 600static void ravb_emac_init(struct net_device *ndev)
 601{
 602	struct ravb_private *priv = netdev_priv(ndev);
 603	const struct ravb_hw_info *info = priv->info;
 604
 605	info->emac_init(ndev);
 606}
 607
 608static int ravb_dmac_init_gbeth(struct net_device *ndev)
 609{
 610	struct ravb_private *priv = netdev_priv(ndev);
 611	int error;
 612
 613	error = ravb_ring_init(ndev, RAVB_BE);
 614	if (error)
 615		return error;
 616
 617	/* Descriptor format */
 618	ravb_ring_format(ndev, RAVB_BE);
 619
 620	/* Set DMAC RX */
 621	ravb_write(ndev, 0x60000000, RCR);
 622
 623	/* Set Max Frame Length (RTC) */
 624	ravb_write(ndev, 0x7ffc0000 | priv->info->rx_max_frame_size, RTC);
 625
 626	/* Set FIFO size */
 627	ravb_write(ndev, 0x00222200, TGC);
 628
 629	ravb_write(ndev, 0, TCCR);
 630
 631	/* Frame receive */
 632	ravb_write(ndev, RIC0_FRE0, RIC0);
 633	/* Disable FIFO full warning */
 634	ravb_write(ndev, 0x0, RIC1);
 635	/* Receive FIFO full error, descriptor empty */
 636	ravb_write(ndev, RIC2_QFE0 | RIC2_RFFE, RIC2);
 637
 638	ravb_write(ndev, TIC_FTE0, TIC);
 639
 640	return 0;
 641}
 642
 643static int ravb_dmac_init_rcar(struct net_device *ndev)
 644{
 645	struct ravb_private *priv = netdev_priv(ndev);
 646	const struct ravb_hw_info *info = priv->info;
 647	int error;
 648
 649	error = ravb_ring_init(ndev, RAVB_BE);
 650	if (error)
 651		return error;
 652	error = ravb_ring_init(ndev, RAVB_NC);
 653	if (error) {
 654		ravb_ring_free(ndev, RAVB_BE);
 655		return error;
 656	}
 657
 658	/* Descriptor format */
 659	ravb_ring_format(ndev, RAVB_BE);
 660	ravb_ring_format(ndev, RAVB_NC);
 661
 662	/* Set AVB RX */
 663	ravb_write(ndev,
 664		   RCR_EFFS | RCR_ENCF | RCR_ETS0 | RCR_ESF | 0x18000000, RCR);
 665
 666	/* Set FIFO size */
 667	ravb_write(ndev, TGC_TQP_AVBMODE1 | 0x00112200, TGC);
 668
 669	/* Timestamp enable */
 670	ravb_write(ndev, TCCR_TFEN, TCCR);
 671
 672	/* Interrupt init: */
 673	if (info->multi_irqs) {
 674		/* Clear DIL.DPLx */
 675		ravb_write(ndev, 0, DIL);
 676		/* Set queue specific interrupt */
 677		ravb_write(ndev, CIE_CRIE | CIE_CTIE | CIE_CL0M, CIE);
 678	}
 679	/* Frame receive */
 680	ravb_write(ndev, RIC0_FRE0 | RIC0_FRE1, RIC0);
 681	/* Disable FIFO full warning */
 682	ravb_write(ndev, 0, RIC1);
 683	/* Receive FIFO full error, descriptor empty */
 684	ravb_write(ndev, RIC2_QFE0 | RIC2_QFE1 | RIC2_RFFE, RIC2);
 685	/* Frame transmitted, timestamp FIFO updated */
 686	ravb_write(ndev, TIC_FTE0 | TIC_FTE1 | TIC_TFUE, TIC);
 687
 688	return 0;
 689}
 690
 691/* Device init function for Ethernet AVB */
 692static int ravb_dmac_init(struct net_device *ndev)
 693{
 694	struct ravb_private *priv = netdev_priv(ndev);
 695	const struct ravb_hw_info *info = priv->info;
 696	int error;
 697
 698	/* Set CONFIG mode */
 699	error = ravb_set_opmode(ndev, CCC_OPC_CONFIG);
 700	if (error)
 701		return error;
 702
 703	error = info->dmac_init(ndev);
 704	if (error)
 705		return error;
 706
 707	/* Setting the control will start the AVB-DMAC process. */
 708	return ravb_set_opmode(ndev, CCC_OPC_OPERATION);
 709}
 710
 711static void ravb_get_tx_tstamp(struct net_device *ndev)
 712{
 713	struct ravb_private *priv = netdev_priv(ndev);
 714	struct ravb_tstamp_skb *ts_skb, *ts_skb2;
 715	struct skb_shared_hwtstamps shhwtstamps;
 716	struct sk_buff *skb;
 717	struct timespec64 ts;
 718	u16 tag, tfa_tag;
 719	int count;
 720	u32 tfa2;
 721
 722	count = (ravb_read(ndev, TSR) & TSR_TFFL) >> 8;
 723	while (count--) {
 724		tfa2 = ravb_read(ndev, TFA2);
 725		tfa_tag = (tfa2 & TFA2_TST) >> 16;
 726		ts.tv_nsec = (u64)ravb_read(ndev, TFA0);
 727		ts.tv_sec = ((u64)(tfa2 & TFA2_TSV) << 32) |
 728			    ravb_read(ndev, TFA1);
 729		memset(&shhwtstamps, 0, sizeof(shhwtstamps));
 730		shhwtstamps.hwtstamp = timespec64_to_ktime(ts);
 731		list_for_each_entry_safe(ts_skb, ts_skb2, &priv->ts_skb_list,
 732					 list) {
 733			skb = ts_skb->skb;
 734			tag = ts_skb->tag;
 735			list_del(&ts_skb->list);
 736			kfree(ts_skb);
 737			if (tag == tfa_tag) {
 738				skb_tstamp_tx(skb, &shhwtstamps);
 739				dev_consume_skb_any(skb);
 740				break;
 741			} else {
 742				dev_kfree_skb_any(skb);
 743			}
 744		}
 745		ravb_modify(ndev, TCCR, TCCR_TFR, TCCR_TFR);
 746	}
 747}
 748
 749static void ravb_rx_csum_gbeth(struct sk_buff *skb)
 750{
 751	struct skb_shared_info *shinfo = skb_shinfo(skb);
 752	size_t csum_len;
 753	u16 *hw_csum;
 754
 755	/* The hardware checksum status is contained in 4 bytes appended to
 756	 * packet data.
 757	 *
 758	 * For ipv4, the first 2 bytes are the ip header checksum status. We can
 759	 * ignore this as it will always be re-checked in inet_gro_receive().
 760	 *
 761	 * The last 2 bytes are the protocol checksum status which will be zero
 762	 * if the checksum has been validated.
 763	 */
 764	csum_len = sizeof(*hw_csum) * 2;
 765	if (unlikely(skb->len < csum_len))
 766		return;
 767
 768	if (skb_is_nonlinear(skb)) {
 769		skb_frag_t *last_frag = &shinfo->frags[shinfo->nr_frags - 1];
 770
 771		hw_csum = (u16 *)(skb_frag_address(last_frag) +
 772				  skb_frag_size(last_frag));
 773		skb_frag_size_sub(last_frag, csum_len);
 774	} else {
 775		hw_csum = (u16 *)skb_tail_pointer(skb);
 776		skb_trim(skb, skb->len - csum_len);
 777	}
 778
 779	if (!get_unaligned(--hw_csum))
 780		skb->ip_summed = CHECKSUM_UNNECESSARY;
 781}
 782
 783static void ravb_rx_csum(struct sk_buff *skb)
 784{
 785	u8 *hw_csum;
 786
 787	/* The hardware checksum is contained in sizeof(__sum16) (2) bytes
 788	 * appended to packet data
 789	 */
 790	if (unlikely(skb->len < sizeof(__sum16)))
 791		return;
 792	hw_csum = skb_tail_pointer(skb) - sizeof(__sum16);
 793	skb->csum = csum_unfold((__force __sum16)get_unaligned_le16(hw_csum));
 794	skb->ip_summed = CHECKSUM_COMPLETE;
 795	skb_trim(skb, skb->len - sizeof(__sum16));
 796}
 797
 798/* Packet receive function for Gigabit Ethernet */
 799static int ravb_rx_gbeth(struct net_device *ndev, int budget, int q)
 800{
 801	struct ravb_private *priv = netdev_priv(ndev);
 802	const struct ravb_hw_info *info = priv->info;
 803	struct net_device_stats *stats;
 804	struct ravb_rx_desc *desc;
 805	struct sk_buff *skb;
 806	int rx_packets = 0;
 807	u8  desc_status;
 808	u16 desc_len;
 809	u8  die_dt;
 810	int entry;
 811	int limit;
 812	int i;
 813
 814	limit = priv->dirty_rx[q] + priv->num_rx_ring[q] - priv->cur_rx[q];
 815	stats = &priv->stats[q];
 816
 817	for (i = 0; i < limit; i++, priv->cur_rx[q]++) {
 818		entry = priv->cur_rx[q] % priv->num_rx_ring[q];
 819		desc = &priv->rx_ring[q].desc[entry];
 820		if (rx_packets == budget || desc->die_dt == DT_FEMPTY)
 821			break;
 822
 823		/* Descriptor type must be checked before all other reads */
 824		dma_rmb();
 825		desc_status = desc->msc;
 826		desc_len = le16_to_cpu(desc->ds_cc) & RX_DS;
 827
 828		/* We use 0-byte descriptors to mark the DMA mapping errors */
 829		if (!desc_len)
 830			continue;
 831
 832		if (desc_status & MSC_MC)
 833			stats->multicast++;
 834
 835		if (desc_status & (MSC_CRC | MSC_RFE | MSC_RTSF | MSC_RTLF | MSC_CEEF)) {
 836			stats->rx_errors++;
 837			if (desc_status & MSC_CRC)
 838				stats->rx_crc_errors++;
 839			if (desc_status & MSC_RFE)
 840				stats->rx_frame_errors++;
 841			if (desc_status & (MSC_RTLF | MSC_RTSF))
 842				stats->rx_length_errors++;
 843			if (desc_status & MSC_CEEF)
 844				stats->rx_missed_errors++;
 845		} else {
 846			struct ravb_rx_buffer *rx_buff;
 847			void *rx_addr;
 848
 849			rx_buff = &priv->rx_buffers[q][entry];
 850			rx_addr = page_address(rx_buff->page) + rx_buff->offset;
 851			die_dt = desc->die_dt & 0xF0;
 852			dma_sync_single_for_cpu(ndev->dev.parent,
 853						le32_to_cpu(desc->dptr),
 854						desc_len, DMA_FROM_DEVICE);
 855
 856			switch (die_dt) {
 857			case DT_FSINGLE:
 858			case DT_FSTART:
 859				/* Start of packet: Set initial data length. */
 860				skb = napi_build_skb(rx_addr,
 861						     info->rx_buffer_size);
 862				if (unlikely(!skb)) {
 863					stats->rx_errors++;
 864					page_pool_put_page(priv->rx_pool[q],
 865							   rx_buff->page, 0,
 866							   true);
 867					goto refill;
 868				}
 869				skb_mark_for_recycle(skb);
 870				skb_put(skb, desc_len);
 871
 872				/* Save this skb if the packet spans multiple
 873				 * descriptors.
 874				 */
 875				if (die_dt == DT_FSTART)
 876					priv->rx_1st_skb = skb;
 877				break;
 878
 879			case DT_FMID:
 880			case DT_FEND:
 881				/* Continuing a packet: Add this buffer as an RX
 882				 * frag.
 883				 */
 884
 885				/* rx_1st_skb will be NULL if napi_build_skb()
 886				 * failed for the first descriptor of a
 887				 * multi-descriptor packet.
 888				 */
 889				if (unlikely(!priv->rx_1st_skb)) {
 890					stats->rx_errors++;
 891					page_pool_put_page(priv->rx_pool[q],
 892							   rx_buff->page, 0,
 893							   true);
 894
 895					/* We may find a DT_FSINGLE or DT_FSTART
 896					 * descriptor in the queue which we can
 897					 * process, so don't give up yet.
 898					 */
 899					continue;
 900				}
 901				skb_add_rx_frag(priv->rx_1st_skb,
 902						skb_shinfo(priv->rx_1st_skb)->nr_frags,
 903						rx_buff->page, rx_buff->offset,
 904						desc_len, info->rx_buffer_size);
 905
 906				/* Set skb to point at the whole packet so that
 907				 * we only need one code path for finishing a
 908				 * packet.
 909				 */
 910				skb = priv->rx_1st_skb;
 911			}
 912
 913			switch (die_dt) {
 914			case DT_FSINGLE:
 915			case DT_FEND:
 916				/* Finishing a packet: Determine protocol &
 917				 * checksum, hand off to NAPI and update our
 918				 * stats.
 919				 */
 920				skb->protocol = eth_type_trans(skb, ndev);
 921				if (ndev->features & NETIF_F_RXCSUM)
 922					ravb_rx_csum_gbeth(skb);
 923				stats->rx_bytes += skb->len;
 924				napi_gro_receive(&priv->napi[q], skb);
 925				rx_packets++;
 926
 927				/* Clear rx_1st_skb so that it will only be
 928				 * non-NULL when valid.
 929				 */
 930				priv->rx_1st_skb = NULL;
 931			}
 932
 933			/* Mark this RX buffer as consumed. */
 934			rx_buff->page = NULL;
 935		}
 936	}
 937
 938refill:
 939	/* Refill the RX ring buffers. */
 940	priv->dirty_rx[q] += ravb_rx_ring_refill(ndev, q,
 941						 priv->cur_rx[q] - priv->dirty_rx[q],
 942						 GFP_ATOMIC);
 943
 944	stats->rx_packets += rx_packets;
 945	return rx_packets;
 946}
 947
 948/* Packet receive function for Ethernet AVB */
 949static int ravb_rx_rcar(struct net_device *ndev, int budget, int q)
 950{
 951	struct ravb_private *priv = netdev_priv(ndev);
 952	const struct ravb_hw_info *info = priv->info;
 953	struct net_device_stats *stats = &priv->stats[q];
 954	struct ravb_ex_rx_desc *desc;
 955	unsigned int limit, i;
 956	struct sk_buff *skb;
 957	struct timespec64 ts;
 958	int rx_packets = 0;
 959	u8  desc_status;
 960	u16 pkt_len;
 961	int entry;
 962
 963	limit = priv->dirty_rx[q] + priv->num_rx_ring[q] - priv->cur_rx[q];
 964	for (i = 0; i < limit; i++, priv->cur_rx[q]++) {
 965		entry = priv->cur_rx[q] % priv->num_rx_ring[q];
 966		desc = &priv->rx_ring[q].ex_desc[entry];
 967		if (rx_packets == budget || desc->die_dt == DT_FEMPTY)
 968			break;
 969
 970		/* Descriptor type must be checked before all other reads */
 971		dma_rmb();
 972		desc_status = desc->msc;
 973		pkt_len = le16_to_cpu(desc->ds_cc) & RX_DS;
 974
 975		/* We use 0-byte descriptors to mark the DMA mapping errors */
 976		if (!pkt_len)
 977			continue;
 978
 979		if (desc_status & MSC_MC)
 980			stats->multicast++;
 981
 982		if (desc_status & (MSC_CRC | MSC_RFE | MSC_RTSF | MSC_RTLF |
 983				   MSC_CEEF)) {
 984			stats->rx_errors++;
 985			if (desc_status & MSC_CRC)
 986				stats->rx_crc_errors++;
 987			if (desc_status & MSC_RFE)
 988				stats->rx_frame_errors++;
 989			if (desc_status & (MSC_RTLF | MSC_RTSF))
 990				stats->rx_length_errors++;
 991			if (desc_status & MSC_CEEF)
 992				stats->rx_missed_errors++;
 993		} else {
 994			u32 get_ts = priv->tstamp_rx_ctrl & RAVB_RXTSTAMP_TYPE;
 995			struct ravb_rx_buffer *rx_buff;
 996			void *rx_addr;
 997
 998			rx_buff = &priv->rx_buffers[q][entry];
 999			rx_addr = page_address(rx_buff->page) + rx_buff->offset;
1000			dma_sync_single_for_cpu(ndev->dev.parent,
1001						le32_to_cpu(desc->dptr),
1002						pkt_len, DMA_FROM_DEVICE);
1003
1004			skb = napi_build_skb(rx_addr, info->rx_buffer_size);
1005			if (unlikely(!skb)) {
1006				stats->rx_errors++;
1007				page_pool_put_page(priv->rx_pool[q],
1008						   rx_buff->page, 0, true);
1009				break;
1010			}
1011			skb_mark_for_recycle(skb);
1012			get_ts &= (q == RAVB_NC) ?
1013					RAVB_RXTSTAMP_TYPE_V2_L2_EVENT :
1014					~RAVB_RXTSTAMP_TYPE_V2_L2_EVENT;
1015			if (get_ts) {
1016				struct skb_shared_hwtstamps *shhwtstamps;
1017
1018				shhwtstamps = skb_hwtstamps(skb);
1019				memset(shhwtstamps, 0, sizeof(*shhwtstamps));
1020				ts.tv_sec = ((u64) le16_to_cpu(desc->ts_sh) <<
1021					     32) | le32_to_cpu(desc->ts_sl);
1022				ts.tv_nsec = le32_to_cpu(desc->ts_n);
1023				shhwtstamps->hwtstamp = timespec64_to_ktime(ts);
1024			}
1025
1026			skb_put(skb, pkt_len);
1027			skb->protocol = eth_type_trans(skb, ndev);
1028			if (ndev->features & NETIF_F_RXCSUM)
1029				ravb_rx_csum(skb);
1030			napi_gro_receive(&priv->napi[q], skb);
1031			rx_packets++;
1032			stats->rx_bytes += pkt_len;
1033
1034			/* Mark this RX buffer as consumed. */
1035			rx_buff->page = NULL;
1036		}
1037	}
1038
1039	/* Refill the RX ring buffers. */
1040	priv->dirty_rx[q] += ravb_rx_ring_refill(ndev, q,
1041						 priv->cur_rx[q] - priv->dirty_rx[q],
1042						 GFP_ATOMIC);
1043
1044	stats->rx_packets += rx_packets;
1045	return rx_packets;
1046}
1047
1048/* Packet receive function for Ethernet AVB */
1049static int ravb_rx(struct net_device *ndev, int budget, int q)
1050{
1051	struct ravb_private *priv = netdev_priv(ndev);
1052	const struct ravb_hw_info *info = priv->info;
1053
1054	return info->receive(ndev, budget, q);
1055}
1056
1057static void ravb_rcv_snd_disable(struct net_device *ndev)
1058{
1059	/* Disable TX and RX */
1060	ravb_modify(ndev, ECMR, ECMR_RE | ECMR_TE, 0);
1061}
1062
1063static void ravb_rcv_snd_enable(struct net_device *ndev)
1064{
1065	/* Enable TX and RX */
1066	ravb_modify(ndev, ECMR, ECMR_RE | ECMR_TE, ECMR_RE | ECMR_TE);
1067}
1068
1069/* function for waiting dma process finished */
1070static int ravb_stop_dma(struct net_device *ndev)
1071{
1072	struct ravb_private *priv = netdev_priv(ndev);
1073	const struct ravb_hw_info *info = priv->info;
1074	int error;
1075
1076	/* Wait for stopping the hardware TX process */
1077	error = ravb_wait(ndev, TCCR, info->tccr_mask, 0);
1078
1079	if (error)
1080		return error;
1081
1082	error = ravb_wait(ndev, CSR, CSR_TPO0 | CSR_TPO1 | CSR_TPO2 | CSR_TPO3,
1083			  0);
1084	if (error)
1085		return error;
1086
1087	/* Stop the E-MAC's RX/TX processes. */
1088	ravb_rcv_snd_disable(ndev);
1089
1090	/* Wait for stopping the RX DMA process */
1091	error = ravb_wait(ndev, CSR, CSR_RPO, 0);
1092	if (error)
1093		return error;
1094
1095	/* Stop AVB-DMAC process */
1096	return ravb_set_opmode(ndev, CCC_OPC_CONFIG);
1097}
1098
1099/* E-MAC interrupt handler */
1100static void ravb_emac_interrupt_unlocked(struct net_device *ndev)
1101{
1102	struct ravb_private *priv = netdev_priv(ndev);
1103	u32 ecsr, psr;
1104
1105	ecsr = ravb_read(ndev, ECSR);
1106	ravb_write(ndev, ecsr, ECSR);	/* clear interrupt */
1107
1108	if (ecsr & ECSR_MPD)
1109		pm_wakeup_event(&priv->pdev->dev, 0);
1110	if (ecsr & ECSR_ICD)
1111		ndev->stats.tx_carrier_errors++;
1112	if (ecsr & ECSR_LCHNG) {
1113		/* Link changed */
1114		if (priv->no_avb_link)
1115			return;
1116		psr = ravb_read(ndev, PSR);
1117		if (priv->avb_link_active_low)
1118			psr ^= PSR_LMON;
1119		if (!(psr & PSR_LMON)) {
1120			/* DIsable RX and TX */
1121			ravb_rcv_snd_disable(ndev);
1122		} else {
1123			/* Enable RX and TX */
1124			ravb_rcv_snd_enable(ndev);
1125		}
1126	}
1127}
1128
1129static irqreturn_t ravb_emac_interrupt(int irq, void *dev_id)
1130{
1131	struct net_device *ndev = dev_id;
1132	struct ravb_private *priv = netdev_priv(ndev);
1133	struct device *dev = &priv->pdev->dev;
1134	irqreturn_t result = IRQ_HANDLED;
1135
1136	pm_runtime_get_noresume(dev);
1137
1138	if (unlikely(!pm_runtime_active(dev))) {
1139		result = IRQ_NONE;
1140		goto out_rpm_put;
1141	}
1142
1143	spin_lock(&priv->lock);
1144	ravb_emac_interrupt_unlocked(ndev);
1145	spin_unlock(&priv->lock);
1146
1147out_rpm_put:
1148	pm_runtime_put_noidle(dev);
1149	return result;
1150}
1151
1152/* Error interrupt handler */
1153static void ravb_error_interrupt(struct net_device *ndev)
1154{
1155	struct ravb_private *priv = netdev_priv(ndev);
1156	u32 eis, ris2;
1157
1158	eis = ravb_read(ndev, EIS);
1159	ravb_write(ndev, ~(EIS_QFS | EIS_RESERVED), EIS);
1160	if (eis & EIS_QFS) {
1161		ris2 = ravb_read(ndev, RIS2);
1162		ravb_write(ndev, ~(RIS2_QFF0 | RIS2_QFF1 | RIS2_RFFF | RIS2_RESERVED),
1163			   RIS2);
1164
1165		/* Receive Descriptor Empty int */
1166		if (ris2 & RIS2_QFF0)
1167			priv->stats[RAVB_BE].rx_over_errors++;
1168
1169		/* Receive Descriptor Empty int */
1170		if (ris2 & RIS2_QFF1)
1171			priv->stats[RAVB_NC].rx_over_errors++;
1172
1173		/* Receive FIFO Overflow int */
1174		if (ris2 & RIS2_RFFF)
1175			priv->rx_fifo_errors++;
1176	}
1177}
1178
1179static bool ravb_queue_interrupt(struct net_device *ndev, int q)
1180{
1181	struct ravb_private *priv = netdev_priv(ndev);
1182	const struct ravb_hw_info *info = priv->info;
1183	u32 ris0 = ravb_read(ndev, RIS0);
1184	u32 ric0 = ravb_read(ndev, RIC0);
1185	u32 tis  = ravb_read(ndev, TIS);
1186	u32 tic  = ravb_read(ndev, TIC);
1187
1188	if (((ris0 & ric0) & BIT(q)) || ((tis  & tic)  & BIT(q))) {
1189		if (napi_schedule_prep(&priv->napi[q])) {
1190			/* Mask RX and TX interrupts */
1191			if (!info->irq_en_dis) {
1192				ravb_write(ndev, ric0 & ~BIT(q), RIC0);
1193				ravb_write(ndev, tic & ~BIT(q), TIC);
1194			} else {
1195				ravb_write(ndev, BIT(q), RID0);
1196				ravb_write(ndev, BIT(q), TID);
1197			}
1198			__napi_schedule(&priv->napi[q]);
1199		} else {
1200			netdev_warn(ndev,
1201				    "ignoring interrupt, rx status 0x%08x, rx mask 0x%08x,\n",
1202				    ris0, ric0);
1203			netdev_warn(ndev,
1204				    "                    tx status 0x%08x, tx mask 0x%08x.\n",
1205				    tis, tic);
1206		}
1207		return true;
1208	}
1209	return false;
1210}
1211
1212static bool ravb_timestamp_interrupt(struct net_device *ndev)
1213{
1214	u32 tis = ravb_read(ndev, TIS);
1215
1216	if (tis & TIS_TFUF) {
1217		ravb_write(ndev, ~(TIS_TFUF | TIS_RESERVED), TIS);
1218		ravb_get_tx_tstamp(ndev);
1219		return true;
1220	}
1221	return false;
1222}
1223
1224static irqreturn_t ravb_interrupt(int irq, void *dev_id)
1225{
1226	struct net_device *ndev = dev_id;
1227	struct ravb_private *priv = netdev_priv(ndev);
1228	const struct ravb_hw_info *info = priv->info;
1229	struct device *dev = &priv->pdev->dev;
1230	irqreturn_t result = IRQ_NONE;
1231	u32 iss;
1232
1233	pm_runtime_get_noresume(dev);
1234
1235	if (unlikely(!pm_runtime_active(dev)))
1236		goto out_rpm_put;
1237
1238	spin_lock(&priv->lock);
1239	/* Get interrupt status */
1240	iss = ravb_read(ndev, ISS);
1241
1242	/* Received and transmitted interrupts */
1243	if (iss & (ISS_FRS | ISS_FTS | ISS_TFUS)) {
1244		int q;
1245
1246		/* Timestamp updated */
1247		if (ravb_timestamp_interrupt(ndev))
1248			result = IRQ_HANDLED;
1249
1250		/* Network control and best effort queue RX/TX */
1251		if (info->nc_queues) {
1252			for (q = RAVB_NC; q >= RAVB_BE; q--) {
1253				if (ravb_queue_interrupt(ndev, q))
1254					result = IRQ_HANDLED;
1255			}
1256		} else {
1257			if (ravb_queue_interrupt(ndev, RAVB_BE))
1258				result = IRQ_HANDLED;
1259		}
1260	}
1261
1262	/* E-MAC status summary */
1263	if (iss & ISS_MS) {
1264		ravb_emac_interrupt_unlocked(ndev);
1265		result = IRQ_HANDLED;
1266	}
1267
1268	/* Error status summary */
1269	if (iss & ISS_ES) {
1270		ravb_error_interrupt(ndev);
1271		result = IRQ_HANDLED;
1272	}
1273
1274	/* gPTP interrupt status summary */
1275	if (iss & ISS_CGIS) {
1276		ravb_ptp_interrupt(ndev);
1277		result = IRQ_HANDLED;
1278	}
1279
1280	spin_unlock(&priv->lock);
1281
1282out_rpm_put:
1283	pm_runtime_put_noidle(dev);
1284	return result;
1285}
1286
1287/* Timestamp/Error/gPTP interrupt handler */
1288static irqreturn_t ravb_multi_interrupt(int irq, void *dev_id)
1289{
1290	struct net_device *ndev = dev_id;
1291	struct ravb_private *priv = netdev_priv(ndev);
1292	struct device *dev = &priv->pdev->dev;
1293	irqreturn_t result = IRQ_NONE;
1294	u32 iss;
1295
1296	pm_runtime_get_noresume(dev);
1297
1298	if (unlikely(!pm_runtime_active(dev)))
1299		goto out_rpm_put;
1300
1301	spin_lock(&priv->lock);
1302	/* Get interrupt status */
1303	iss = ravb_read(ndev, ISS);
1304
1305	/* Timestamp updated */
1306	if ((iss & ISS_TFUS) && ravb_timestamp_interrupt(ndev))
1307		result = IRQ_HANDLED;
1308
1309	/* Error status summary */
1310	if (iss & ISS_ES) {
1311		ravb_error_interrupt(ndev);
1312		result = IRQ_HANDLED;
1313	}
1314
1315	/* gPTP interrupt status summary */
1316	if (iss & ISS_CGIS) {
1317		ravb_ptp_interrupt(ndev);
1318		result = IRQ_HANDLED;
1319	}
1320
1321	spin_unlock(&priv->lock);
1322
1323out_rpm_put:
1324	pm_runtime_put_noidle(dev);
1325	return result;
1326}
1327
1328static irqreturn_t ravb_dma_interrupt(int irq, void *dev_id, int q)
1329{
1330	struct net_device *ndev = dev_id;
1331	struct ravb_private *priv = netdev_priv(ndev);
1332	struct device *dev = &priv->pdev->dev;
1333	irqreturn_t result = IRQ_NONE;
1334
1335	pm_runtime_get_noresume(dev);
1336
1337	if (unlikely(!pm_runtime_active(dev)))
1338		goto out_rpm_put;
1339
1340	spin_lock(&priv->lock);
1341
1342	/* Network control/Best effort queue RX/TX */
1343	if (ravb_queue_interrupt(ndev, q))
1344		result = IRQ_HANDLED;
1345
1346	spin_unlock(&priv->lock);
1347
1348out_rpm_put:
1349	pm_runtime_put_noidle(dev);
1350	return result;
1351}
1352
1353static irqreturn_t ravb_be_interrupt(int irq, void *dev_id)
1354{
1355	return ravb_dma_interrupt(irq, dev_id, RAVB_BE);
1356}
1357
1358static irqreturn_t ravb_nc_interrupt(int irq, void *dev_id)
1359{
1360	return ravb_dma_interrupt(irq, dev_id, RAVB_NC);
1361}
1362
1363static int ravb_poll(struct napi_struct *napi, int budget)
1364{
1365	struct net_device *ndev = napi->dev;
1366	struct ravb_private *priv = netdev_priv(ndev);
1367	const struct ravb_hw_info *info = priv->info;
1368	unsigned long flags;
1369	int q = napi - priv->napi;
1370	int mask = BIT(q);
1371	int work_done;
1372
1373	/* Processing RX Descriptor Ring */
1374	/* Clear RX interrupt */
1375	ravb_write(ndev, ~(mask | RIS0_RESERVED), RIS0);
1376	work_done = ravb_rx(ndev, budget, q);
1377
1378	/* Processing TX Descriptor Ring */
1379	spin_lock_irqsave(&priv->lock, flags);
1380	/* Clear TX interrupt */
1381	ravb_write(ndev, ~(mask | TIS_RESERVED), TIS);
1382	ravb_tx_free(ndev, q, true);
1383	netif_wake_subqueue(ndev, q);
1384	spin_unlock_irqrestore(&priv->lock, flags);
1385
1386	/* Receive error message handling */
1387	priv->rx_over_errors = priv->stats[RAVB_BE].rx_over_errors;
1388	if (info->nc_queues)
1389		priv->rx_over_errors += priv->stats[RAVB_NC].rx_over_errors;
1390	if (priv->rx_over_errors != ndev->stats.rx_over_errors)
1391		ndev->stats.rx_over_errors = priv->rx_over_errors;
1392	if (priv->rx_fifo_errors != ndev->stats.rx_fifo_errors)
1393		ndev->stats.rx_fifo_errors = priv->rx_fifo_errors;
1394
1395	if (work_done < budget && napi_complete_done(napi, work_done)) {
1396		/* Re-enable RX/TX interrupts */
1397		spin_lock_irqsave(&priv->lock, flags);
1398		if (!info->irq_en_dis) {
1399			ravb_modify(ndev, RIC0, mask, mask);
1400			ravb_modify(ndev, TIC,  mask, mask);
1401		} else {
1402			ravb_write(ndev, mask, RIE0);
1403			ravb_write(ndev, mask, TIE);
1404		}
1405		spin_unlock_irqrestore(&priv->lock, flags);
1406	}
1407
1408	return work_done;
1409}
1410
1411static void ravb_set_duplex_gbeth(struct net_device *ndev)
1412{
1413	struct ravb_private *priv = netdev_priv(ndev);
1414
1415	ravb_modify(ndev, ECMR, ECMR_DM, priv->duplex > 0 ? ECMR_DM : 0);
1416}
1417
1418/* PHY state control function */
1419static void ravb_adjust_link(struct net_device *ndev)
1420{
1421	struct ravb_private *priv = netdev_priv(ndev);
1422	const struct ravb_hw_info *info = priv->info;
1423	struct phy_device *phydev = ndev->phydev;
1424	bool new_state = false;
1425	unsigned long flags;
1426
1427	spin_lock_irqsave(&priv->lock, flags);
1428
1429	/* Disable TX and RX right over here, if E-MAC change is ignored */
1430	if (priv->no_avb_link)
1431		ravb_rcv_snd_disable(ndev);
1432
1433	if (phydev->link) {
1434		if (info->half_duplex && phydev->duplex != priv->duplex) {
1435			new_state = true;
1436			priv->duplex = phydev->duplex;
1437			ravb_set_duplex_gbeth(ndev);
1438		}
1439
1440		if (phydev->speed != priv->speed) {
1441			new_state = true;
1442			priv->speed = phydev->speed;
1443			info->set_rate(ndev);
1444		}
1445		if (!priv->link) {
1446			ravb_modify(ndev, ECMR, ECMR_TXF, 0);
1447			new_state = true;
1448			priv->link = phydev->link;
1449		}
1450	} else if (priv->link) {
1451		new_state = true;
1452		priv->link = 0;
1453		priv->speed = 0;
1454		if (info->half_duplex)
1455			priv->duplex = -1;
1456	}
1457
1458	/* Enable TX and RX right over here, if E-MAC change is ignored */
1459	if (priv->no_avb_link && phydev->link)
1460		ravb_rcv_snd_enable(ndev);
1461
1462	spin_unlock_irqrestore(&priv->lock, flags);
1463
1464	if (new_state && netif_msg_link(priv))
1465		phy_print_status(phydev);
1466}
1467
1468/* PHY init function */
1469static int ravb_phy_init(struct net_device *ndev)
1470{
1471	struct device_node *np = ndev->dev.parent->of_node;
1472	struct ravb_private *priv = netdev_priv(ndev);
1473	const struct ravb_hw_info *info = priv->info;
1474	struct phy_device *phydev;
1475	struct device_node *pn;
1476	phy_interface_t iface;
1477	int err;
1478
1479	priv->link = 0;
1480	priv->speed = 0;
1481	priv->duplex = -1;
1482
1483	/* Try connecting to PHY */
1484	pn = of_parse_phandle(np, "phy-handle", 0);
1485	if (!pn) {
1486		/* In the case of a fixed PHY, the DT node associated
1487		 * to the PHY is the Ethernet MAC DT node.
1488		 */
1489		if (of_phy_is_fixed_link(np)) {
1490			err = of_phy_register_fixed_link(np);
1491			if (err)
1492				return err;
1493		}
1494		pn = of_node_get(np);
1495	}
1496
1497	iface = priv->rgmii_override ? PHY_INTERFACE_MODE_RGMII
1498				     : priv->phy_interface;
1499	phydev = of_phy_connect(ndev, pn, ravb_adjust_link, 0, iface);
1500	of_node_put(pn);
1501	if (!phydev) {
1502		netdev_err(ndev, "failed to connect PHY\n");
1503		err = -ENOENT;
1504		goto err_deregister_fixed_link;
1505	}
1506
1507	if (!info->half_duplex) {
1508		/* 10BASE, Pause and Asym Pause is not supported */
1509		phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_10baseT_Half_BIT);
1510		phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_10baseT_Full_BIT);
1511		phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_Pause_BIT);
1512		phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_Asym_Pause_BIT);
1513
1514		/* Half Duplex is not supported */
1515		phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_1000baseT_Half_BIT);
1516		phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_100baseT_Half_BIT);
1517	}
1518
1519	phy_attached_info(phydev);
1520
1521	return 0;
1522
1523err_deregister_fixed_link:
1524	if (of_phy_is_fixed_link(np))
1525		of_phy_deregister_fixed_link(np);
1526
1527	return err;
1528}
1529
1530/* PHY control start function */
1531static int ravb_phy_start(struct net_device *ndev)
1532{
1533	int error;
1534
1535	error = ravb_phy_init(ndev);
1536	if (error)
1537		return error;
1538
1539	phy_start(ndev->phydev);
1540
1541	return 0;
1542}
1543
1544static u32 ravb_get_msglevel(struct net_device *ndev)
1545{
1546	struct ravb_private *priv = netdev_priv(ndev);
1547
1548	return priv->msg_enable;
1549}
1550
1551static void ravb_set_msglevel(struct net_device *ndev, u32 value)
1552{
1553	struct ravb_private *priv = netdev_priv(ndev);
1554
1555	priv->msg_enable = value;
1556}
1557
1558static const char ravb_gstrings_stats_gbeth[][ETH_GSTRING_LEN] = {
1559	"rx_queue_0_current",
1560	"tx_queue_0_current",
1561	"rx_queue_0_dirty",
1562	"tx_queue_0_dirty",
1563	"rx_queue_0_packets",
1564	"tx_queue_0_packets",
1565	"rx_queue_0_bytes",
1566	"tx_queue_0_bytes",
1567	"rx_queue_0_mcast_packets",
1568	"rx_queue_0_errors",
1569	"rx_queue_0_crc_errors",
1570	"rx_queue_0_frame_errors",
1571	"rx_queue_0_length_errors",
1572	"rx_queue_0_csum_offload_errors",
1573	"rx_queue_0_over_errors",
1574};
1575
1576static const char ravb_gstrings_stats[][ETH_GSTRING_LEN] = {
1577	"rx_queue_0_current",
1578	"tx_queue_0_current",
1579	"rx_queue_0_dirty",
1580	"tx_queue_0_dirty",
1581	"rx_queue_0_packets",
1582	"tx_queue_0_packets",
1583	"rx_queue_0_bytes",
1584	"tx_queue_0_bytes",
1585	"rx_queue_0_mcast_packets",
1586	"rx_queue_0_errors",
1587	"rx_queue_0_crc_errors",
1588	"rx_queue_0_frame_errors",
1589	"rx_queue_0_length_errors",
1590	"rx_queue_0_missed_errors",
1591	"rx_queue_0_over_errors",
1592
1593	"rx_queue_1_current",
1594	"tx_queue_1_current",
1595	"rx_queue_1_dirty",
1596	"tx_queue_1_dirty",
1597	"rx_queue_1_packets",
1598	"tx_queue_1_packets",
1599	"rx_queue_1_bytes",
1600	"tx_queue_1_bytes",
1601	"rx_queue_1_mcast_packets",
1602	"rx_queue_1_errors",
1603	"rx_queue_1_crc_errors",
1604	"rx_queue_1_frame_errors",
1605	"rx_queue_1_length_errors",
1606	"rx_queue_1_missed_errors",
1607	"rx_queue_1_over_errors",
1608};
1609
1610static int ravb_get_sset_count(struct net_device *netdev, int sset)
1611{
1612	struct ravb_private *priv = netdev_priv(netdev);
1613	const struct ravb_hw_info *info = priv->info;
1614
1615	switch (sset) {
1616	case ETH_SS_STATS:
1617		return info->stats_len;
1618	default:
1619		return -EOPNOTSUPP;
1620	}
1621}
1622
1623static void ravb_get_ethtool_stats(struct net_device *ndev,
1624				   struct ethtool_stats *estats, u64 *data)
1625{
1626	struct ravb_private *priv = netdev_priv(ndev);
1627	const struct ravb_hw_info *info = priv->info;
1628	int num_rx_q;
1629	int i = 0;
1630	int q;
1631
1632	num_rx_q = info->nc_queues ? NUM_RX_QUEUE : 1;
1633	/* Device-specific stats */
1634	for (q = RAVB_BE; q < num_rx_q; q++) {
1635		struct net_device_stats *stats = &priv->stats[q];
1636
1637		data[i++] = priv->cur_rx[q];
1638		data[i++] = priv->cur_tx[q];
1639		data[i++] = priv->dirty_rx[q];
1640		data[i++] = priv->dirty_tx[q];
1641		data[i++] = stats->rx_packets;
1642		data[i++] = stats->tx_packets;
1643		data[i++] = stats->rx_bytes;
1644		data[i++] = stats->tx_bytes;
1645		data[i++] = stats->multicast;
1646		data[i++] = stats->rx_errors;
1647		data[i++] = stats->rx_crc_errors;
1648		data[i++] = stats->rx_frame_errors;
1649		data[i++] = stats->rx_length_errors;
1650		data[i++] = stats->rx_missed_errors;
1651		data[i++] = stats->rx_over_errors;
1652	}
1653}
1654
1655static void ravb_get_strings(struct net_device *ndev, u32 stringset, u8 *data)
1656{
1657	struct ravb_private *priv = netdev_priv(ndev);
1658	const struct ravb_hw_info *info = priv->info;
1659
1660	switch (stringset) {
1661	case ETH_SS_STATS:
1662		memcpy(data, info->gstrings_stats, info->gstrings_size);
1663		break;
1664	}
1665}
1666
1667static void ravb_get_ringparam(struct net_device *ndev,
1668			       struct ethtool_ringparam *ring,
1669			       struct kernel_ethtool_ringparam *kernel_ring,
1670			       struct netlink_ext_ack *extack)
1671{
1672	struct ravb_private *priv = netdev_priv(ndev);
1673
1674	ring->rx_max_pending = BE_RX_RING_MAX;
1675	ring->tx_max_pending = BE_TX_RING_MAX;
1676	ring->rx_pending = priv->num_rx_ring[RAVB_BE];
1677	ring->tx_pending = priv->num_tx_ring[RAVB_BE];
1678}
1679
1680static int ravb_set_ringparam(struct net_device *ndev,
1681			      struct ethtool_ringparam *ring,
1682			      struct kernel_ethtool_ringparam *kernel_ring,
1683			      struct netlink_ext_ack *extack)
1684{
1685	struct ravb_private *priv = netdev_priv(ndev);
1686	const struct ravb_hw_info *info = priv->info;
1687	int error;
1688
1689	if (ring->tx_pending > BE_TX_RING_MAX ||
1690	    ring->rx_pending > BE_RX_RING_MAX ||
1691	    ring->tx_pending < BE_TX_RING_MIN ||
1692	    ring->rx_pending < BE_RX_RING_MIN)
1693		return -EINVAL;
1694	if (ring->rx_mini_pending || ring->rx_jumbo_pending)
1695		return -EINVAL;
1696
1697	if (netif_running(ndev)) {
1698		netif_device_detach(ndev);
1699		/* Stop PTP Clock driver */
1700		if (info->gptp)
1701			ravb_ptp_stop(ndev);
1702		/* Wait for DMA stopping */
1703		error = ravb_stop_dma(ndev);
1704		if (error) {
1705			netdev_err(ndev,
1706				   "cannot set ringparam! Any AVB processes are still running?\n");
1707			return error;
1708		}
1709		synchronize_irq(ndev->irq);
1710
1711		/* Free all the skb's in the RX queue and the DMA buffers. */
1712		ravb_ring_free(ndev, RAVB_BE);
1713		if (info->nc_queues)
1714			ravb_ring_free(ndev, RAVB_NC);
1715	}
1716
1717	/* Set new parameters */
1718	priv->num_rx_ring[RAVB_BE] = ring->rx_pending;
1719	priv->num_tx_ring[RAVB_BE] = ring->tx_pending;
1720
1721	if (netif_running(ndev)) {
1722		error = ravb_dmac_init(ndev);
1723		if (error) {
1724			netdev_err(ndev,
1725				   "%s: ravb_dmac_init() failed, error %d\n",
1726				   __func__, error);
1727			return error;
1728		}
1729
1730		ravb_emac_init(ndev);
1731
1732		/* Initialise PTP Clock driver */
1733		if (info->gptp)
1734			ravb_ptp_init(ndev, priv->pdev);
1735
1736		netif_device_attach(ndev);
1737	}
1738
1739	return 0;
1740}
1741
1742static int ravb_get_ts_info(struct net_device *ndev,
1743			    struct kernel_ethtool_ts_info *info)
1744{
1745	struct ravb_private *priv = netdev_priv(ndev);
1746	const struct ravb_hw_info *hw_info = priv->info;
1747
1748	if (hw_info->gptp || hw_info->ccc_gac) {
1749		info->so_timestamping =
1750			SOF_TIMESTAMPING_TX_SOFTWARE |
1751			SOF_TIMESTAMPING_TX_HARDWARE |
1752			SOF_TIMESTAMPING_RX_HARDWARE |
1753			SOF_TIMESTAMPING_RAW_HARDWARE;
1754		info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON);
1755		info->rx_filters =
1756			(1 << HWTSTAMP_FILTER_NONE) |
1757			(1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT) |
1758			(1 << HWTSTAMP_FILTER_ALL);
1759		info->phc_index = ptp_clock_index(priv->ptp.clock);
1760	}
1761
1762	return 0;
1763}
1764
1765static void ravb_get_wol(struct net_device *ndev, struct ethtool_wolinfo *wol)
1766{
1767	struct ravb_private *priv = netdev_priv(ndev);
1768
1769	wol->supported = WAKE_MAGIC;
1770	wol->wolopts = priv->wol_enabled ? WAKE_MAGIC : 0;
1771}
1772
1773static int ravb_set_wol(struct net_device *ndev, struct ethtool_wolinfo *wol)
1774{
1775	struct ravb_private *priv = netdev_priv(ndev);
1776	const struct ravb_hw_info *info = priv->info;
1777
1778	if (!info->magic_pkt || (wol->wolopts & ~WAKE_MAGIC))
1779		return -EOPNOTSUPP;
1780
1781	priv->wol_enabled = !!(wol->wolopts & WAKE_MAGIC);
1782
1783	device_set_wakeup_enable(&priv->pdev->dev, priv->wol_enabled);
1784
1785	return 0;
1786}
1787
1788static const struct ethtool_ops ravb_ethtool_ops = {
1789	.nway_reset		= phy_ethtool_nway_reset,
1790	.get_msglevel		= ravb_get_msglevel,
1791	.set_msglevel		= ravb_set_msglevel,
1792	.get_link		= ethtool_op_get_link,
1793	.get_strings		= ravb_get_strings,
1794	.get_ethtool_stats	= ravb_get_ethtool_stats,
1795	.get_sset_count		= ravb_get_sset_count,
1796	.get_ringparam		= ravb_get_ringparam,
1797	.set_ringparam		= ravb_set_ringparam,
1798	.get_ts_info		= ravb_get_ts_info,
1799	.get_link_ksettings	= phy_ethtool_get_link_ksettings,
1800	.set_link_ksettings	= phy_ethtool_set_link_ksettings,
1801	.get_wol		= ravb_get_wol,
1802	.set_wol		= ravb_set_wol,
1803};
1804
1805static int ravb_set_config_mode(struct net_device *ndev)
1806{
1807	struct ravb_private *priv = netdev_priv(ndev);
1808	const struct ravb_hw_info *info = priv->info;
1809	int error;
1810
1811	if (info->gptp) {
1812		error = ravb_set_opmode(ndev, CCC_OPC_CONFIG);
1813		if (error)
1814			return error;
1815		/* Set CSEL value */
1816		ravb_modify(ndev, CCC, CCC_CSEL, CCC_CSEL_HPB);
1817	} else if (info->ccc_gac) {
1818		error = ravb_set_opmode(ndev, CCC_OPC_CONFIG | CCC_GAC | CCC_CSEL_HPB);
1819	} else {
1820		error = ravb_set_opmode(ndev, CCC_OPC_CONFIG);
1821	}
1822
1823	return error;
1824}
1825
1826static void ravb_set_gti(struct net_device *ndev)
1827{
1828	struct ravb_private *priv = netdev_priv(ndev);
1829	const struct ravb_hw_info *info = priv->info;
1830
1831	if (!(info->gptp || info->ccc_gac))
1832		return;
1833
1834	ravb_write(ndev, priv->gti_tiv, GTI);
1835
1836	/* Request GTI loading */
1837	ravb_modify(ndev, GCCR, GCCR_LTI, GCCR_LTI);
1838}
1839
1840static int ravb_compute_gti(struct net_device *ndev)
1841{
1842	struct ravb_private *priv = netdev_priv(ndev);
1843	const struct ravb_hw_info *info = priv->info;
1844	struct device *dev = ndev->dev.parent;
1845	unsigned long rate;
1846	u64 inc;
1847
1848	if (!(info->gptp || info->ccc_gac))
1849		return 0;
1850
1851	if (info->gptp_ref_clk)
1852		rate = clk_get_rate(priv->gptp_clk);
1853	else
1854		rate = clk_get_rate(priv->clk);
1855	if (!rate)
1856		return -EINVAL;
1857
1858	inc = div64_ul(1000000000ULL << 20, rate);
1859
1860	if (inc < GTI_TIV_MIN || inc > GTI_TIV_MAX) {
1861		dev_err(dev, "gti.tiv increment 0x%llx is outside the range 0x%x - 0x%x\n",
1862			inc, GTI_TIV_MIN, GTI_TIV_MAX);
1863		return -EINVAL;
1864	}
1865	priv->gti_tiv = inc;
1866
1867	return 0;
1868}
1869
1870/* Set tx and rx clock internal delay modes */
1871static void ravb_parse_delay_mode(struct device_node *np, struct net_device *ndev)
1872{
1873	struct ravb_private *priv = netdev_priv(ndev);
1874	bool explicit_delay = false;
1875	u32 delay;
1876
1877	if (!priv->info->internal_delay)
1878		return;
1879
1880	if (!of_property_read_u32(np, "rx-internal-delay-ps", &delay)) {
1881		/* Valid values are 0 and 1800, according to DT bindings */
1882		priv->rxcidm = !!delay;
1883		explicit_delay = true;
1884	}
1885	if (!of_property_read_u32(np, "tx-internal-delay-ps", &delay)) {
1886		/* Valid values are 0 and 2000, according to DT bindings */
1887		priv->txcidm = !!delay;
1888		explicit_delay = true;
1889	}
1890
1891	if (explicit_delay)
1892		return;
1893
1894	/* Fall back to legacy rgmii-*id behavior */
1895	if (priv->phy_interface == PHY_INTERFACE_MODE_RGMII_ID ||
1896	    priv->phy_interface == PHY_INTERFACE_MODE_RGMII_RXID) {
1897		priv->rxcidm = 1;
1898		priv->rgmii_override = 1;
1899	}
1900
1901	if (priv->phy_interface == PHY_INTERFACE_MODE_RGMII_ID ||
1902	    priv->phy_interface == PHY_INTERFACE_MODE_RGMII_TXID) {
1903		priv->txcidm = 1;
1904		priv->rgmii_override = 1;
1905	}
1906}
1907
1908static void ravb_set_delay_mode(struct net_device *ndev)
1909{
1910	struct ravb_private *priv = netdev_priv(ndev);
1911	u32 set = 0;
1912
1913	if (!priv->info->internal_delay)
1914		return;
1915
1916	if (priv->rxcidm)
1917		set |= APSR_RDM;
1918	if (priv->txcidm)
1919		set |= APSR_TDM;
1920	ravb_modify(ndev, APSR, APSR_RDM | APSR_TDM, set);
1921}
1922
1923/* Network device open function for Ethernet AVB */
1924static int ravb_open(struct net_device *ndev)
1925{
1926	struct ravb_private *priv = netdev_priv(ndev);
1927	const struct ravb_hw_info *info = priv->info;
1928	struct device *dev = &priv->pdev->dev;
1929	int error;
1930
1931	napi_enable(&priv->napi[RAVB_BE]);
1932	if (info->nc_queues)
1933		napi_enable(&priv->napi[RAVB_NC]);
1934
1935	error = pm_runtime_resume_and_get(dev);
1936	if (error < 0)
1937		goto out_napi_off;
1938
1939	/* Set AVB config mode */
1940	error = ravb_set_config_mode(ndev);
1941	if (error)
1942		goto out_rpm_put;
1943
1944	ravb_set_delay_mode(ndev);
1945	ravb_write(ndev, priv->desc_bat_dma, DBAT);
1946
1947	/* Device init */
1948	error = ravb_dmac_init(ndev);
1949	if (error)
1950		goto out_set_reset;
1951
1952	ravb_emac_init(ndev);
1953
1954	ravb_set_gti(ndev);
1955
1956	/* Initialise PTP Clock driver */
1957	if (info->gptp || info->ccc_gac)
1958		ravb_ptp_init(ndev, priv->pdev);
1959
1960	/* PHY control start */
1961	error = ravb_phy_start(ndev);
1962	if (error)
1963		goto out_ptp_stop;
1964
1965	netif_tx_start_all_queues(ndev);
1966
1967	return 0;
1968
1969out_ptp_stop:
1970	/* Stop PTP Clock driver */
1971	if (info->gptp || info->ccc_gac)
1972		ravb_ptp_stop(ndev);
1973	ravb_stop_dma(ndev);
1974out_set_reset:
1975	ravb_set_opmode(ndev, CCC_OPC_RESET);
1976out_rpm_put:
1977	pm_runtime_mark_last_busy(dev);
1978	pm_runtime_put_autosuspend(dev);
1979out_napi_off:
1980	if (info->nc_queues)
1981		napi_disable(&priv->napi[RAVB_NC]);
1982	napi_disable(&priv->napi[RAVB_BE]);
1983	return error;
1984}
1985
1986/* Timeout function for Ethernet AVB */
1987static void ravb_tx_timeout(struct net_device *ndev, unsigned int txqueue)
1988{
1989	struct ravb_private *priv = netdev_priv(ndev);
1990
1991	netif_err(priv, tx_err, ndev,
1992		  "transmit timed out, status %08x, resetting...\n",
1993		  ravb_read(ndev, ISS));
1994
1995	/* tx_errors count up */
1996	ndev->stats.tx_errors++;
1997
1998	schedule_work(&priv->work);
1999}
2000
2001static void ravb_tx_timeout_work(struct work_struct *work)
2002{
2003	struct ravb_private *priv = container_of(work, struct ravb_private,
2004						 work);
2005	const struct ravb_hw_info *info = priv->info;
2006	struct net_device *ndev = priv->ndev;
2007	int error;
2008
2009	if (!rtnl_trylock()) {
2010		usleep_range(1000, 2000);
2011		schedule_work(&priv->work);
2012		return;
2013	}
2014
2015	netif_tx_stop_all_queues(ndev);
2016
2017	/* Stop PTP Clock driver */
2018	if (info->gptp)
2019		ravb_ptp_stop(ndev);
2020
2021	/* Wait for DMA stopping */
2022	if (ravb_stop_dma(ndev)) {
2023		/* If ravb_stop_dma() fails, the hardware is still operating
2024		 * for TX and/or RX. So, this should not call the following
2025		 * functions because ravb_dmac_init() is possible to fail too.
2026		 * Also, this should not retry ravb_stop_dma() again and again
2027		 * here because it's possible to wait forever. So, this just
2028		 * re-enables the TX and RX and skip the following
2029		 * re-initialization procedure.
2030		 */
2031		ravb_rcv_snd_enable(ndev);
2032		goto out;
2033	}
2034
2035	ravb_ring_free(ndev, RAVB_BE);
2036	if (info->nc_queues)
2037		ravb_ring_free(ndev, RAVB_NC);
2038
2039	/* Device init */
2040	error = ravb_dmac_init(ndev);
2041	if (error) {
2042		/* If ravb_dmac_init() fails, descriptors are freed. So, this
2043		 * should return here to avoid re-enabling the TX and RX in
2044		 * ravb_emac_init().
2045		 */
2046		netdev_err(ndev, "%s: ravb_dmac_init() failed, error %d\n",
2047			   __func__, error);
2048		goto out_unlock;
2049	}
2050	ravb_emac_init(ndev);
2051
2052out:
2053	/* Initialise PTP Clock driver */
2054	if (info->gptp)
2055		ravb_ptp_init(ndev, priv->pdev);
2056
2057	netif_tx_start_all_queues(ndev);
2058
2059out_unlock:
2060	rtnl_unlock();
2061}
2062
2063static bool ravb_can_tx_csum_gbeth(struct sk_buff *skb)
2064{
2065	u16 net_protocol = ntohs(skb->protocol);
2066	u8 inner_protocol;
2067
2068	/* GbEth IP can calculate the checksum if:
2069	 * - there are zero or one VLAN headers with TPID=0x8100
2070	 * - the network protocol is IPv4 or IPv6
2071	 * - the transport protocol is TCP, UDP or ICMP
2072	 * - the packet is not fragmented
2073	 */
2074
2075	if (net_protocol == ETH_P_8021Q) {
2076		struct vlan_hdr vhdr, *vh;
2077
2078		vh = skb_header_pointer(skb, ETH_HLEN, sizeof(vhdr), &vhdr);
2079		if (!vh)
2080			return false;
2081
2082		net_protocol = ntohs(vh->h_vlan_encapsulated_proto);
2083	}
2084
2085	switch (net_protocol) {
2086	case ETH_P_IP:
2087		inner_protocol = ip_hdr(skb)->protocol;
2088		break;
2089	case ETH_P_IPV6:
2090		inner_protocol = ipv6_hdr(skb)->nexthdr;
2091		break;
2092	default:
2093		return false;
2094	}
2095
2096	switch (inner_protocol) {
2097	case IPPROTO_TCP:
2098	case IPPROTO_UDP:
2099		return true;
2100	default:
2101		return false;
2102	}
2103}
2104
2105/* Packet transmit function for Ethernet AVB */
2106static netdev_tx_t ravb_start_xmit(struct sk_buff *skb, struct net_device *ndev)
2107{
2108	struct ravb_private *priv = netdev_priv(ndev);
2109	const struct ravb_hw_info *info = priv->info;
2110	unsigned int num_tx_desc = priv->num_tx_desc;
2111	u16 q = skb_get_queue_mapping(skb);
2112	struct ravb_tstamp_skb *ts_skb;
2113	struct ravb_tx_desc *desc;
2114	unsigned long flags;
2115	dma_addr_t dma_addr;
2116	void *buffer;
2117	u32 entry;
2118	u32 len;
2119
2120	if (skb->ip_summed == CHECKSUM_PARTIAL && !ravb_can_tx_csum_gbeth(skb))
2121		skb_checksum_help(skb);
2122
2123	spin_lock_irqsave(&priv->lock, flags);
2124	if (priv->cur_tx[q] - priv->dirty_tx[q] > (priv->num_tx_ring[q] - 1) *
2125	    num_tx_desc) {
2126		netif_err(priv, tx_queued, ndev,
2127			  "still transmitting with the full ring!\n");
2128		netif_stop_subqueue(ndev, q);
2129		spin_unlock_irqrestore(&priv->lock, flags);
2130		return NETDEV_TX_BUSY;
2131	}
2132
2133	if (skb_put_padto(skb, ETH_ZLEN))
2134		goto exit;
2135
2136	entry = priv->cur_tx[q] % (priv->num_tx_ring[q] * num_tx_desc);
2137	priv->tx_skb[q][entry / num_tx_desc] = skb;
2138
2139	if (num_tx_desc > 1) {
2140		buffer = PTR_ALIGN(priv->tx_align[q], DPTR_ALIGN) +
2141			 entry / num_tx_desc * DPTR_ALIGN;
2142		len = PTR_ALIGN(skb->data, DPTR_ALIGN) - skb->data;
2143
2144		/* Zero length DMA descriptors are problematic as they seem
2145		 * to terminate DMA transfers. Avoid them by simply using a
2146		 * length of DPTR_ALIGN (4) when skb data is aligned to
2147		 * DPTR_ALIGN.
2148		 *
2149		 * As skb is guaranteed to have at least ETH_ZLEN (60)
2150		 * bytes of data by the call to skb_put_padto() above this
2151		 * is safe with respect to both the length of the first DMA
2152		 * descriptor (len) overflowing the available data and the
2153		 * length of the second DMA descriptor (skb->len - len)
2154		 * being negative.
2155		 */
2156		if (len == 0)
2157			len = DPTR_ALIGN;
2158
2159		memcpy(buffer, skb->data, len);
2160		dma_addr = dma_map_single(ndev->dev.parent, buffer, len,
2161					  DMA_TO_DEVICE);
2162		if (dma_mapping_error(ndev->dev.parent, dma_addr))
2163			goto drop;
2164
2165		desc = &priv->tx_ring[q][entry];
2166		desc->ds_tagl = cpu_to_le16(len);
2167		desc->dptr = cpu_to_le32(dma_addr);
2168
2169		buffer = skb->data + len;
2170		len = skb->len - len;
2171		dma_addr = dma_map_single(ndev->dev.parent, buffer, len,
2172					  DMA_TO_DEVICE);
2173		if (dma_mapping_error(ndev->dev.parent, dma_addr))
2174			goto unmap;
2175
2176		desc++;
2177	} else {
2178		desc = &priv->tx_ring[q][entry];
2179		len = skb->len;
2180		dma_addr = dma_map_single(ndev->dev.parent, skb->data, skb->len,
2181					  DMA_TO_DEVICE);
2182		if (dma_mapping_error(ndev->dev.parent, dma_addr))
2183			goto drop;
2184	}
2185	desc->ds_tagl = cpu_to_le16(len);
2186	desc->dptr = cpu_to_le32(dma_addr);
2187
2188	/* TX timestamp required */
2189	if (info->gptp || info->ccc_gac) {
2190		if (q == RAVB_NC) {
2191			ts_skb = kmalloc(sizeof(*ts_skb), GFP_ATOMIC);
2192			if (!ts_skb) {
2193				if (num_tx_desc > 1) {
2194					desc--;
2195					dma_unmap_single(ndev->dev.parent, dma_addr,
2196							 len, DMA_TO_DEVICE);
2197				}
2198				goto unmap;
2199			}
2200			ts_skb->skb = skb_get(skb);
2201			ts_skb->tag = priv->ts_skb_tag++;
2202			priv->ts_skb_tag &= 0x3ff;
2203			list_add_tail(&ts_skb->list, &priv->ts_skb_list);
2204
2205			/* TAG and timestamp required flag */
2206			skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
2207			desc->tagh_tsr = (ts_skb->tag >> 4) | TX_TSR;
2208			desc->ds_tagl |= cpu_to_le16(ts_skb->tag << 12);
2209		}
2210
2211		skb_tx_timestamp(skb);
2212	}
2213	/* Descriptor type must be set after all the above writes */
2214	dma_wmb();
2215	if (num_tx_desc > 1) {
2216		desc->die_dt = DT_FEND;
2217		desc--;
2218		desc->die_dt = DT_FSTART;
2219	} else {
2220		desc->die_dt = DT_FSINGLE;
2221	}
2222	ravb_modify(ndev, TCCR, TCCR_TSRQ0 << q, TCCR_TSRQ0 << q);
2223
2224	priv->cur_tx[q] += num_tx_desc;
2225	if (priv->cur_tx[q] - priv->dirty_tx[q] >
2226	    (priv->num_tx_ring[q] - 1) * num_tx_desc &&
2227	    !ravb_tx_free(ndev, q, true))
2228		netif_stop_subqueue(ndev, q);
2229
2230exit:
2231	spin_unlock_irqrestore(&priv->lock, flags);
2232	return NETDEV_TX_OK;
2233
2234unmap:
2235	dma_unmap_single(ndev->dev.parent, le32_to_cpu(desc->dptr),
2236			 le16_to_cpu(desc->ds_tagl), DMA_TO_DEVICE);
2237drop:
2238	dev_kfree_skb_any(skb);
2239	priv->tx_skb[q][entry / num_tx_desc] = NULL;
2240	goto exit;
2241}
2242
2243static u16 ravb_select_queue(struct net_device *ndev, struct sk_buff *skb,
2244			     struct net_device *sb_dev)
2245{
2246	/* If skb needs TX timestamp, it is handled in network control queue */
2247	return (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) ? RAVB_NC :
2248							       RAVB_BE;
2249
2250}
2251
2252static struct net_device_stats *ravb_get_stats(struct net_device *ndev)
2253{
2254	struct ravb_private *priv = netdev_priv(ndev);
2255	const struct ravb_hw_info *info = priv->info;
2256	struct net_device_stats *nstats, *stats0, *stats1;
2257	struct device *dev = &priv->pdev->dev;
2258
2259	nstats = &ndev->stats;
2260
2261	pm_runtime_get_noresume(dev);
2262
2263	if (!pm_runtime_active(dev))
2264		goto out_rpm_put;
2265
2266	stats0 = &priv->stats[RAVB_BE];
2267
2268	if (info->tx_counters) {
2269		nstats->tx_dropped += ravb_read(ndev, TROCR);
2270		ravb_write(ndev, 0, TROCR);	/* (write clear) */
2271	}
2272
2273	if (info->carrier_counters) {
2274		nstats->collisions += ravb_read(ndev, CXR41);
2275		ravb_write(ndev, 0, CXR41);	/* (write clear) */
2276		nstats->tx_carrier_errors += ravb_read(ndev, CXR42);
2277		ravb_write(ndev, 0, CXR42);	/* (write clear) */
2278	}
2279
2280	nstats->rx_packets = stats0->rx_packets;
2281	nstats->tx_packets = stats0->tx_packets;
2282	nstats->rx_bytes = stats0->rx_bytes;
2283	nstats->tx_bytes = stats0->tx_bytes;
2284	nstats->multicast = stats0->multicast;
2285	nstats->rx_errors = stats0->rx_errors;
2286	nstats->rx_crc_errors = stats0->rx_crc_errors;
2287	nstats->rx_frame_errors = stats0->rx_frame_errors;
2288	nstats->rx_length_errors = stats0->rx_length_errors;
2289	nstats->rx_missed_errors = stats0->rx_missed_errors;
2290	nstats->rx_over_errors = stats0->rx_over_errors;
2291	if (info->nc_queues) {
2292		stats1 = &priv->stats[RAVB_NC];
2293
2294		nstats->rx_packets += stats1->rx_packets;
2295		nstats->tx_packets += stats1->tx_packets;
2296		nstats->rx_bytes += stats1->rx_bytes;
2297		nstats->tx_bytes += stats1->tx_bytes;
2298		nstats->multicast += stats1->multicast;
2299		nstats->rx_errors += stats1->rx_errors;
2300		nstats->rx_crc_errors += stats1->rx_crc_errors;
2301		nstats->rx_frame_errors += stats1->rx_frame_errors;
2302		nstats->rx_length_errors += stats1->rx_length_errors;
2303		nstats->rx_missed_errors += stats1->rx_missed_errors;
2304		nstats->rx_over_errors += stats1->rx_over_errors;
2305	}
2306
2307out_rpm_put:
2308	pm_runtime_put_noidle(dev);
2309	return nstats;
2310}
2311
2312/* Update promiscuous bit */
2313static void ravb_set_rx_mode(struct net_device *ndev)
2314{
2315	struct ravb_private *priv = netdev_priv(ndev);
2316	unsigned long flags;
2317
2318	spin_lock_irqsave(&priv->lock, flags);
2319	ravb_modify(ndev, ECMR, ECMR_PRM,
2320		    ndev->flags & IFF_PROMISC ? ECMR_PRM : 0);
2321	spin_unlock_irqrestore(&priv->lock, flags);
2322}
2323
2324/* Device close function for Ethernet AVB */
2325static int ravb_close(struct net_device *ndev)
2326{
2327	struct device_node *np = ndev->dev.parent->of_node;
2328	struct ravb_private *priv = netdev_priv(ndev);
2329	const struct ravb_hw_info *info = priv->info;
2330	struct ravb_tstamp_skb *ts_skb, *ts_skb2;
2331	struct device *dev = &priv->pdev->dev;
2332	int error;
2333
2334	netif_tx_stop_all_queues(ndev);
2335
2336	/* Disable interrupts by clearing the interrupt masks. */
2337	ravb_write(ndev, 0, RIC0);
2338	ravb_write(ndev, 0, RIC2);
2339	ravb_write(ndev, 0, TIC);
2340
2341	/* PHY disconnect */
2342	if (ndev->phydev) {
2343		phy_stop(ndev->phydev);
2344		phy_disconnect(ndev->phydev);
2345		if (of_phy_is_fixed_link(np))
2346			of_phy_deregister_fixed_link(np);
2347	}
2348
2349	/* Stop PTP Clock driver */
2350	if (info->gptp || info->ccc_gac)
2351		ravb_ptp_stop(ndev);
2352
2353	/* Set the config mode to stop the AVB-DMAC's processes */
2354	if (ravb_stop_dma(ndev) < 0)
2355		netdev_err(ndev,
2356			   "device will be stopped after h/w processes are done.\n");
2357
2358	/* Clear the timestamp list */
2359	if (info->gptp || info->ccc_gac) {
2360		list_for_each_entry_safe(ts_skb, ts_skb2, &priv->ts_skb_list, list) {
2361			list_del(&ts_skb->list);
2362			kfree_skb(ts_skb->skb);
2363			kfree(ts_skb);
2364		}
2365	}
2366
2367	cancel_work_sync(&priv->work);
2368
2369	if (info->nc_queues)
2370		napi_disable(&priv->napi[RAVB_NC]);
2371	napi_disable(&priv->napi[RAVB_BE]);
2372
2373	/* Free all the skb's in the RX queue and the DMA buffers. */
2374	ravb_ring_free(ndev, RAVB_BE);
2375	if (info->nc_queues)
2376		ravb_ring_free(ndev, RAVB_NC);
2377
2378	/* Update statistics. */
2379	ravb_get_stats(ndev);
2380
2381	/* Set reset mode. */
2382	error = ravb_set_opmode(ndev, CCC_OPC_RESET);
2383	if (error)
2384		return error;
2385
2386	pm_runtime_mark_last_busy(dev);
2387	pm_runtime_put_autosuspend(dev);
2388
2389	return 0;
2390}
2391
2392static int ravb_hwtstamp_get(struct net_device *ndev, struct ifreq *req)
2393{
2394	struct ravb_private *priv = netdev_priv(ndev);
2395	struct hwtstamp_config config;
2396
2397	config.flags = 0;
2398	config.tx_type = priv->tstamp_tx_ctrl ? HWTSTAMP_TX_ON :
2399						HWTSTAMP_TX_OFF;
2400	switch (priv->tstamp_rx_ctrl & RAVB_RXTSTAMP_TYPE) {
2401	case RAVB_RXTSTAMP_TYPE_V2_L2_EVENT:
2402		config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_EVENT;
2403		break;
2404	case RAVB_RXTSTAMP_TYPE_ALL:
2405		config.rx_filter = HWTSTAMP_FILTER_ALL;
2406		break;
2407	default:
2408		config.rx_filter = HWTSTAMP_FILTER_NONE;
2409	}
2410
2411	return copy_to_user(req->ifr_data, &config, sizeof(config)) ?
2412		-EFAULT : 0;
2413}
2414
2415/* Control hardware time stamping */
2416static int ravb_hwtstamp_set(struct net_device *ndev, struct ifreq *req)
2417{
2418	struct ravb_private *priv = netdev_priv(ndev);
2419	struct hwtstamp_config config;
2420	u32 tstamp_rx_ctrl = RAVB_RXTSTAMP_ENABLED;
2421	u32 tstamp_tx_ctrl;
2422
2423	if (copy_from_user(&config, req->ifr_data, sizeof(config)))
2424		return -EFAULT;
2425
2426	switch (config.tx_type) {
2427	case HWTSTAMP_TX_OFF:
2428		tstamp_tx_ctrl = 0;
2429		break;
2430	case HWTSTAMP_TX_ON:
2431		tstamp_tx_ctrl = RAVB_TXTSTAMP_ENABLED;
2432		break;
2433	default:
2434		return -ERANGE;
2435	}
2436
2437	switch (config.rx_filter) {
2438	case HWTSTAMP_FILTER_NONE:
2439		tstamp_rx_ctrl = 0;
2440		break;
2441	case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
2442		tstamp_rx_ctrl |= RAVB_RXTSTAMP_TYPE_V2_L2_EVENT;
2443		break;
2444	default:
2445		config.rx_filter = HWTSTAMP_FILTER_ALL;
2446		tstamp_rx_ctrl |= RAVB_RXTSTAMP_TYPE_ALL;
2447	}
2448
2449	priv->tstamp_tx_ctrl = tstamp_tx_ctrl;
2450	priv->tstamp_rx_ctrl = tstamp_rx_ctrl;
2451
2452	return copy_to_user(req->ifr_data, &config, sizeof(config)) ?
2453		-EFAULT : 0;
2454}
2455
2456/* ioctl to device function */
2457static int ravb_do_ioctl(struct net_device *ndev, struct ifreq *req, int cmd)
2458{
2459	struct phy_device *phydev = ndev->phydev;
2460
2461	if (!netif_running(ndev))
2462		return -EINVAL;
2463
2464	if (!phydev)
2465		return -ENODEV;
2466
2467	switch (cmd) {
2468	case SIOCGHWTSTAMP:
2469		return ravb_hwtstamp_get(ndev, req);
2470	case SIOCSHWTSTAMP:
2471		return ravb_hwtstamp_set(ndev, req);
2472	}
2473
2474	return phy_mii_ioctl(phydev, req, cmd);
2475}
2476
2477static int ravb_change_mtu(struct net_device *ndev, int new_mtu)
2478{
2479	struct ravb_private *priv = netdev_priv(ndev);
2480
2481	WRITE_ONCE(ndev->mtu, new_mtu);
2482
2483	if (netif_running(ndev)) {
2484		synchronize_irq(priv->emac_irq);
2485		ravb_emac_init(ndev);
2486	}
2487
2488	netdev_update_features(ndev);
2489
2490	return 0;
2491}
2492
2493static void ravb_set_rx_csum(struct net_device *ndev, bool enable)
2494{
2495	struct ravb_private *priv = netdev_priv(ndev);
2496	unsigned long flags;
2497
2498	spin_lock_irqsave(&priv->lock, flags);
2499
2500	/* Disable TX and RX */
2501	ravb_rcv_snd_disable(ndev);
2502
2503	/* Modify RX Checksum setting */
2504	ravb_modify(ndev, ECMR, ECMR_RCSC, enable ? ECMR_RCSC : 0);
2505
2506	/* Enable TX and RX */
2507	ravb_rcv_snd_enable(ndev);
2508
2509	spin_unlock_irqrestore(&priv->lock, flags);
2510}
2511
2512static int ravb_endisable_csum_gbeth(struct net_device *ndev, enum ravb_reg reg,
2513				     u32 val, u32 mask)
2514{
2515	u32 csr0 = CSR0_TPE | CSR0_RPE;
2516	int ret;
2517
2518	ravb_write(ndev, csr0 & ~mask, CSR0);
2519	ret = ravb_wait(ndev, CSR0, mask, 0);
2520	if (!ret)
2521		ravb_write(ndev, val, reg);
2522
2523	ravb_write(ndev, csr0, CSR0);
2524
2525	return ret;
2526}
2527
2528static int ravb_set_features_gbeth(struct net_device *ndev,
2529				   netdev_features_t features)
2530{
2531	netdev_features_t changed = ndev->features ^ features;
2532	struct ravb_private *priv = netdev_priv(ndev);
2533	unsigned long flags;
2534	int ret = 0;
2535	u32 val;
2536
2537	spin_lock_irqsave(&priv->lock, flags);
2538	if (changed & NETIF_F_RXCSUM) {
2539		if (features & NETIF_F_RXCSUM)
2540			val = CSR2_CSUM_ENABLE;
2541		else
2542			val = 0;
2543
2544		ret = ravb_endisable_csum_gbeth(ndev, CSR2, val, CSR0_RPE);
2545		if (ret)
2546			goto done;
2547	}
2548
2549	if (changed & NETIF_F_HW_CSUM) {
2550		if (features & NETIF_F_HW_CSUM)
2551			val = CSR1_CSUM_ENABLE;
2552		else
2553			val = 0;
2554
2555		ret = ravb_endisable_csum_gbeth(ndev, CSR1, val, CSR0_TPE);
2556		if (ret)
2557			goto done;
2558	}
2559
2560done:
2561	spin_unlock_irqrestore(&priv->lock, flags);
2562
2563	return ret;
2564}
2565
2566static int ravb_set_features_rcar(struct net_device *ndev,
2567				  netdev_features_t features)
2568{
2569	netdev_features_t changed = ndev->features ^ features;
2570
2571	if (changed & NETIF_F_RXCSUM)
2572		ravb_set_rx_csum(ndev, features & NETIF_F_RXCSUM);
2573
2574	return 0;
2575}
2576
2577static int ravb_set_features(struct net_device *ndev,
2578			     netdev_features_t features)
2579{
2580	struct ravb_private *priv = netdev_priv(ndev);
2581	const struct ravb_hw_info *info = priv->info;
2582	struct device *dev = &priv->pdev->dev;
2583	int ret;
2584
2585	pm_runtime_get_noresume(dev);
2586
2587	if (pm_runtime_active(dev))
2588		ret = info->set_feature(ndev, features);
2589	else
2590		ret = 0;
2591
2592	pm_runtime_put_noidle(dev);
2593
2594	if (ret)
2595		return ret;
2596
2597	ndev->features = features;
2598
2599	return 0;
2600}
2601
2602static const struct net_device_ops ravb_netdev_ops = {
2603	.ndo_open		= ravb_open,
2604	.ndo_stop		= ravb_close,
2605	.ndo_start_xmit		= ravb_start_xmit,
2606	.ndo_select_queue	= ravb_select_queue,
2607	.ndo_get_stats		= ravb_get_stats,
2608	.ndo_set_rx_mode	= ravb_set_rx_mode,
2609	.ndo_tx_timeout		= ravb_tx_timeout,
2610	.ndo_eth_ioctl		= ravb_do_ioctl,
2611	.ndo_change_mtu		= ravb_change_mtu,
2612	.ndo_validate_addr	= eth_validate_addr,
2613	.ndo_set_mac_address	= eth_mac_addr,
2614	.ndo_set_features	= ravb_set_features,
2615};
2616
2617/* MDIO bus init function */
2618static int ravb_mdio_init(struct ravb_private *priv)
2619{
2620	struct platform_device *pdev = priv->pdev;
2621	struct device *dev = &pdev->dev;
2622	struct device_node *mdio_node;
2623	struct phy_device *phydev;
2624	struct device_node *pn;
2625	int error;
2626
2627	/* Bitbang init */
2628	priv->mdiobb.ops = &bb_ops;
2629
2630	/* MII controller setting */
2631	priv->mii_bus = alloc_mdio_bitbang(&priv->mdiobb);
2632	if (!priv->mii_bus)
2633		return -ENOMEM;
2634
2635	/* Hook up MII support for ethtool */
2636	priv->mii_bus->name = "ravb_mii";
2637	priv->mii_bus->parent = dev;
2638	snprintf(priv->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x",
2639		 pdev->name, pdev->id);
2640
2641	/* Register MDIO bus */
2642	mdio_node = of_get_child_by_name(dev->of_node, "mdio");
2643	if (!mdio_node) {
2644		/* backwards compatibility for DT lacking mdio subnode */
2645		mdio_node = of_node_get(dev->of_node);
2646	}
2647	error = of_mdiobus_register(priv->mii_bus, mdio_node);
2648	of_node_put(mdio_node);
2649	if (error)
2650		goto out_free_bus;
2651
2652	pn = of_parse_phandle(dev->of_node, "phy-handle", 0);
2653	phydev = of_phy_find_device(pn);
2654	if (phydev) {
2655		phydev->mac_managed_pm = true;
2656		put_device(&phydev->mdio.dev);
2657	}
2658	of_node_put(pn);
2659
2660	return 0;
2661
2662out_free_bus:
2663	free_mdio_bitbang(priv->mii_bus);
2664	return error;
2665}
2666
2667/* MDIO bus release function */
2668static int ravb_mdio_release(struct ravb_private *priv)
2669{
2670	/* Unregister mdio bus */
2671	mdiobus_unregister(priv->mii_bus);
2672
2673	/* Free bitbang info */
2674	free_mdio_bitbang(priv->mii_bus);
2675
2676	return 0;
2677}
2678
2679static const struct ravb_hw_info ravb_gen2_hw_info = {
2680	.receive = ravb_rx_rcar,
2681	.set_rate = ravb_set_rate_rcar,
2682	.set_feature = ravb_set_features_rcar,
2683	.dmac_init = ravb_dmac_init_rcar,
2684	.emac_init = ravb_emac_init_rcar,
2685	.gstrings_stats = ravb_gstrings_stats,
2686	.gstrings_size = sizeof(ravb_gstrings_stats),
2687	.net_hw_features = NETIF_F_RXCSUM,
2688	.net_features = NETIF_F_RXCSUM,
2689	.stats_len = ARRAY_SIZE(ravb_gstrings_stats),
2690	.tccr_mask = TCCR_TSRQ0 | TCCR_TSRQ1 | TCCR_TSRQ2 | TCCR_TSRQ3,
2691	.tx_max_frame_size = SZ_2K,
2692	.rx_max_frame_size = SZ_2K,
2693	.rx_buffer_size = SZ_2K +
2694			  SKB_DATA_ALIGN(sizeof(struct skb_shared_info)),
2695	.rx_desc_size = sizeof(struct ravb_ex_rx_desc),
2696	.aligned_tx = 1,
2697	.gptp = 1,
2698	.nc_queues = 1,
2699	.magic_pkt = 1,
2700};
2701
2702static const struct ravb_hw_info ravb_gen3_hw_info = {
2703	.receive = ravb_rx_rcar,
2704	.set_rate = ravb_set_rate_rcar,
2705	.set_feature = ravb_set_features_rcar,
2706	.dmac_init = ravb_dmac_init_rcar,
2707	.emac_init = ravb_emac_init_rcar,
2708	.gstrings_stats = ravb_gstrings_stats,
2709	.gstrings_size = sizeof(ravb_gstrings_stats),
2710	.net_hw_features = NETIF_F_RXCSUM,
2711	.net_features = NETIF_F_RXCSUM,
2712	.stats_len = ARRAY_SIZE(ravb_gstrings_stats),
2713	.tccr_mask = TCCR_TSRQ0 | TCCR_TSRQ1 | TCCR_TSRQ2 | TCCR_TSRQ3,
2714	.tx_max_frame_size = SZ_2K,
2715	.rx_max_frame_size = SZ_2K,
2716	.rx_buffer_size = SZ_2K +
2717			  SKB_DATA_ALIGN(sizeof(struct skb_shared_info)),
2718	.rx_desc_size = sizeof(struct ravb_ex_rx_desc),
2719	.internal_delay = 1,
2720	.tx_counters = 1,
2721	.multi_irqs = 1,
2722	.irq_en_dis = 1,
2723	.ccc_gac = 1,
2724	.nc_queues = 1,
2725	.magic_pkt = 1,
2726};
2727
2728static const struct ravb_hw_info ravb_gen4_hw_info = {
2729	.receive = ravb_rx_rcar,
2730	.set_rate = ravb_set_rate_rcar,
2731	.set_feature = ravb_set_features_rcar,
2732	.dmac_init = ravb_dmac_init_rcar,
2733	.emac_init = ravb_emac_init_rcar_gen4,
2734	.gstrings_stats = ravb_gstrings_stats,
2735	.gstrings_size = sizeof(ravb_gstrings_stats),
2736	.net_hw_features = NETIF_F_RXCSUM,
2737	.net_features = NETIF_F_RXCSUM,
2738	.stats_len = ARRAY_SIZE(ravb_gstrings_stats),
2739	.tccr_mask = TCCR_TSRQ0 | TCCR_TSRQ1 | TCCR_TSRQ2 | TCCR_TSRQ3,
2740	.tx_max_frame_size = SZ_2K,
2741	.rx_max_frame_size = SZ_2K,
2742	.rx_buffer_size = SZ_2K +
2743			  SKB_DATA_ALIGN(sizeof(struct skb_shared_info)),
2744	.rx_desc_size = sizeof(struct ravb_ex_rx_desc),
2745	.internal_delay = 1,
2746	.tx_counters = 1,
2747	.multi_irqs = 1,
2748	.irq_en_dis = 1,
2749	.ccc_gac = 1,
2750	.nc_queues = 1,
2751	.magic_pkt = 1,
2752};
2753
2754static const struct ravb_hw_info ravb_rzv2m_hw_info = {
2755	.receive = ravb_rx_rcar,
2756	.set_rate = ravb_set_rate_rcar,
2757	.set_feature = ravb_set_features_rcar,
2758	.dmac_init = ravb_dmac_init_rcar,
2759	.emac_init = ravb_emac_init_rcar,
2760	.gstrings_stats = ravb_gstrings_stats,
2761	.gstrings_size = sizeof(ravb_gstrings_stats),
2762	.net_hw_features = NETIF_F_RXCSUM,
2763	.net_features = NETIF_F_RXCSUM,
2764	.stats_len = ARRAY_SIZE(ravb_gstrings_stats),
2765	.tccr_mask = TCCR_TSRQ0 | TCCR_TSRQ1 | TCCR_TSRQ2 | TCCR_TSRQ3,
2766	.tx_max_frame_size = SZ_2K,
2767	.rx_max_frame_size = SZ_2K,
2768	.rx_buffer_size = SZ_2K +
2769			  SKB_DATA_ALIGN(sizeof(struct skb_shared_info)),
2770	.rx_desc_size = sizeof(struct ravb_ex_rx_desc),
2771	.multi_irqs = 1,
2772	.err_mgmt_irqs = 1,
2773	.gptp = 1,
2774	.gptp_ref_clk = 1,
2775	.nc_queues = 1,
2776	.magic_pkt = 1,
2777};
2778
2779static const struct ravb_hw_info gbeth_hw_info = {
2780	.receive = ravb_rx_gbeth,
2781	.set_rate = ravb_set_rate_gbeth,
2782	.set_feature = ravb_set_features_gbeth,
2783	.dmac_init = ravb_dmac_init_gbeth,
2784	.emac_init = ravb_emac_init_gbeth,
2785	.gstrings_stats = ravb_gstrings_stats_gbeth,
2786	.gstrings_size = sizeof(ravb_gstrings_stats_gbeth),
2787	.net_hw_features = NETIF_F_RXCSUM | NETIF_F_HW_CSUM,
2788	.net_features = NETIF_F_RXCSUM | NETIF_F_HW_CSUM,
2789	.vlan_features = NETIF_F_RXCSUM | NETIF_F_HW_CSUM,
2790	.stats_len = ARRAY_SIZE(ravb_gstrings_stats_gbeth),
2791	.tccr_mask = TCCR_TSRQ0,
2792	.tx_max_frame_size = 1522,
2793	.rx_max_frame_size = SZ_8K,
2794	.rx_buffer_size = SZ_2K,
2795	.rx_desc_size = sizeof(struct ravb_rx_desc),
2796	.aligned_tx = 1,
2797	.coalesce_irqs = 1,
2798	.tx_counters = 1,
2799	.carrier_counters = 1,
2800	.half_duplex = 1,
2801};
2802
2803static const struct of_device_id ravb_match_table[] = {
2804	{ .compatible = "renesas,etheravb-r8a7790", .data = &ravb_gen2_hw_info },
2805	{ .compatible = "renesas,etheravb-r8a7794", .data = &ravb_gen2_hw_info },
2806	{ .compatible = "renesas,etheravb-rcar-gen2", .data = &ravb_gen2_hw_info },
2807	{ .compatible = "renesas,etheravb-r8a7795", .data = &ravb_gen3_hw_info },
2808	{ .compatible = "renesas,etheravb-rcar-gen3", .data = &ravb_gen3_hw_info },
2809	{ .compatible = "renesas,etheravb-rcar-gen4", .data = &ravb_gen4_hw_info },
2810	{ .compatible = "renesas,etheravb-rzv2m", .data = &ravb_rzv2m_hw_info },
2811	{ .compatible = "renesas,rzg2l-gbeth", .data = &gbeth_hw_info },
2812	{ }
2813};
2814MODULE_DEVICE_TABLE(of, ravb_match_table);
2815
2816static int ravb_setup_irq(struct ravb_private *priv, const char *irq_name,
2817			  const char *ch, int *irq, irq_handler_t handler)
2818{
2819	struct platform_device *pdev = priv->pdev;
2820	struct net_device *ndev = priv->ndev;
2821	struct device *dev = &pdev->dev;
2822	const char *devname = dev_name(dev);
2823	unsigned long flags;
2824	int error, irq_num;
2825
2826	if (irq_name) {
2827		devname = devm_kasprintf(dev, GFP_KERNEL, "%s:%s", devname, ch);
2828		if (!devname)
2829			return -ENOMEM;
2830
2831		irq_num = platform_get_irq_byname(pdev, irq_name);
2832		flags = 0;
2833	} else {
2834		irq_num = platform_get_irq(pdev, 0);
2835		flags = IRQF_SHARED;
2836	}
2837	if (irq_num < 0)
2838		return irq_num;
2839
2840	if (irq)
2841		*irq = irq_num;
2842
2843	error = devm_request_irq(dev, irq_num, handler, flags, devname, ndev);
2844	if (error)
2845		netdev_err(ndev, "cannot request IRQ %s\n", devname);
2846
2847	return error;
2848}
2849
2850static int ravb_setup_irqs(struct ravb_private *priv)
2851{
2852	const struct ravb_hw_info *info = priv->info;
2853	struct net_device *ndev = priv->ndev;
2854	const char *irq_name, *emac_irq_name;
2855	int error;
2856
2857	if (!info->multi_irqs)
2858		return ravb_setup_irq(priv, NULL, NULL, &ndev->irq, ravb_interrupt);
2859
2860	if (info->err_mgmt_irqs) {
2861		irq_name = "dia";
2862		emac_irq_name = "line3";
2863	} else {
2864		irq_name = "ch22";
2865		emac_irq_name = "ch24";
2866	}
2867
2868	error = ravb_setup_irq(priv, irq_name, "ch22:multi", &ndev->irq, ravb_multi_interrupt);
2869	if (error)
2870		return error;
2871
2872	error = ravb_setup_irq(priv, emac_irq_name, "ch24:emac", &priv->emac_irq,
2873			       ravb_emac_interrupt);
2874	if (error)
2875		return error;
2876
2877	if (info->err_mgmt_irqs) {
2878		error = ravb_setup_irq(priv, "err_a", "err_a", NULL, ravb_multi_interrupt);
2879		if (error)
2880			return error;
2881
2882		error = ravb_setup_irq(priv, "mgmt_a", "mgmt_a", NULL, ravb_multi_interrupt);
2883		if (error)
2884			return error;
2885	}
2886
2887	error = ravb_setup_irq(priv, "ch0", "ch0:rx_be", NULL, ravb_be_interrupt);
2888	if (error)
2889		return error;
2890
2891	error = ravb_setup_irq(priv, "ch1", "ch1:rx_nc", NULL, ravb_nc_interrupt);
2892	if (error)
2893		return error;
2894
2895	error = ravb_setup_irq(priv, "ch18", "ch18:tx_be", NULL, ravb_be_interrupt);
2896	if (error)
2897		return error;
2898
2899	return ravb_setup_irq(priv, "ch19", "ch19:tx_nc", NULL, ravb_nc_interrupt);
2900}
2901
2902static int ravb_probe(struct platform_device *pdev)
2903{
2904	struct device_node *np = pdev->dev.of_node;
2905	const struct ravb_hw_info *info;
2906	struct reset_control *rstc;
2907	struct ravb_private *priv;
2908	struct net_device *ndev;
2909	struct resource *res;
2910	int error, q;
2911
2912	if (!np) {
2913		dev_err(&pdev->dev,
2914			"this driver is required to be instantiated from device tree\n");
2915		return -EINVAL;
2916	}
2917
2918	rstc = devm_reset_control_get_exclusive(&pdev->dev, NULL);
2919	if (IS_ERR(rstc))
2920		return dev_err_probe(&pdev->dev, PTR_ERR(rstc),
2921				     "failed to get cpg reset\n");
2922
2923	ndev = alloc_etherdev_mqs(sizeof(struct ravb_private),
2924				  NUM_TX_QUEUE, NUM_RX_QUEUE);
2925	if (!ndev)
2926		return -ENOMEM;
2927
2928	info = of_device_get_match_data(&pdev->dev);
2929
2930	ndev->features = info->net_features;
2931	ndev->hw_features = info->net_hw_features;
2932	ndev->vlan_features = info->vlan_features;
2933
2934	error = reset_control_deassert(rstc);
2935	if (error)
2936		goto out_free_netdev;
2937
2938	SET_NETDEV_DEV(ndev, &pdev->dev);
2939
2940	priv = netdev_priv(ndev);
2941	priv->info = info;
2942	priv->rstc = rstc;
2943	priv->ndev = ndev;
2944	priv->pdev = pdev;
2945	priv->num_tx_ring[RAVB_BE] = BE_TX_RING_SIZE;
2946	priv->num_rx_ring[RAVB_BE] = BE_RX_RING_SIZE;
2947	if (info->nc_queues) {
2948		priv->num_tx_ring[RAVB_NC] = NC_TX_RING_SIZE;
2949		priv->num_rx_ring[RAVB_NC] = NC_RX_RING_SIZE;
2950	}
2951
2952	error = ravb_setup_irqs(priv);
2953	if (error)
2954		goto out_reset_assert;
2955
2956	priv->clk = devm_clk_get(&pdev->dev, NULL);
2957	if (IS_ERR(priv->clk)) {
2958		error = PTR_ERR(priv->clk);
2959		goto out_reset_assert;
2960	}
2961
2962	if (info->gptp_ref_clk) {
2963		priv->gptp_clk = devm_clk_get(&pdev->dev, "gptp");
2964		if (IS_ERR(priv->gptp_clk)) {
2965			error = PTR_ERR(priv->gptp_clk);
2966			goto out_reset_assert;
2967		}
2968	}
2969
2970	priv->refclk = devm_clk_get_optional(&pdev->dev, "refclk");
2971	if (IS_ERR(priv->refclk)) {
2972		error = PTR_ERR(priv->refclk);
2973		goto out_reset_assert;
2974	}
2975	clk_prepare(priv->refclk);
2976
2977	platform_set_drvdata(pdev, ndev);
2978	pm_runtime_set_autosuspend_delay(&pdev->dev, 100);
2979	pm_runtime_use_autosuspend(&pdev->dev);
2980	pm_runtime_enable(&pdev->dev);
2981	error = pm_runtime_resume_and_get(&pdev->dev);
2982	if (error < 0)
2983		goto out_rpm_disable;
2984
2985	priv->addr = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
2986	if (IS_ERR(priv->addr)) {
2987		error = PTR_ERR(priv->addr);
2988		goto out_rpm_put;
2989	}
2990
2991	/* The Ether-specific entries in the device structure. */
2992	ndev->base_addr = res->start;
2993
2994	spin_lock_init(&priv->lock);
2995	INIT_WORK(&priv->work, ravb_tx_timeout_work);
2996
2997	error = of_get_phy_mode(np, &priv->phy_interface);
2998	if (error && error != -ENODEV)
2999		goto out_rpm_put;
3000
3001	priv->no_avb_link = of_property_read_bool(np, "renesas,no-ether-link");
3002	priv->avb_link_active_low =
3003		of_property_read_bool(np, "renesas,ether-link-active-low");
3004
3005	ndev->max_mtu = info->tx_max_frame_size -
3006		(ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN);
3007	ndev->min_mtu = ETH_MIN_MTU;
3008
3009	/* FIXME: R-Car Gen2 has 4byte alignment restriction for tx buffer
3010	 * Use two descriptor to handle such situation. First descriptor to
3011	 * handle aligned data buffer and second descriptor to handle the
3012	 * overflow data because of alignment.
3013	 */
3014	priv->num_tx_desc = info->aligned_tx ? 2 : 1;
3015
3016	/* Set function */
3017	ndev->netdev_ops = &ravb_netdev_ops;
3018	ndev->ethtool_ops = &ravb_ethtool_ops;
3019
3020	error = ravb_compute_gti(ndev);
3021	if (error)
3022		goto out_rpm_put;
3023
3024	ravb_parse_delay_mode(np, ndev);
3025
3026	/* Allocate descriptor base address table */
3027	priv->desc_bat_size = sizeof(struct ravb_desc) * DBAT_ENTRY_NUM;
3028	priv->desc_bat = dma_alloc_coherent(ndev->dev.parent, priv->desc_bat_size,
3029					    &priv->desc_bat_dma, GFP_KERNEL);
3030	if (!priv->desc_bat) {
3031		dev_err(&pdev->dev,
3032			"Cannot allocate desc base address table (size %d bytes)\n",
3033			priv->desc_bat_size);
3034		error = -ENOMEM;
3035		goto out_rpm_put;
3036	}
3037	for (q = RAVB_BE; q < DBAT_ENTRY_NUM; q++)
3038		priv->desc_bat[q].die_dt = DT_EOS;
3039
3040	/* Initialise HW timestamp list */
3041	INIT_LIST_HEAD(&priv->ts_skb_list);
3042
3043	/* Debug message level */
3044	priv->msg_enable = RAVB_DEF_MSG_ENABLE;
3045
3046	/* Set config mode as this is needed for PHY initialization. */
3047	error = ravb_set_opmode(ndev, CCC_OPC_CONFIG);
3048	if (error)
3049		goto out_rpm_put;
3050
3051	/* Read and set MAC address */
3052	ravb_read_mac_address(np, ndev);
3053	if (!is_valid_ether_addr(ndev->dev_addr)) {
3054		dev_warn(&pdev->dev,
3055			 "no valid MAC address supplied, using a random one\n");
3056		eth_hw_addr_random(ndev);
3057	}
3058
3059	/* MDIO bus init */
3060	error = ravb_mdio_init(priv);
3061	if (error) {
3062		dev_err(&pdev->dev, "failed to initialize MDIO\n");
3063		goto out_reset_mode;
3064	}
3065
3066	/* Undo previous switch to config opmode. */
3067	error = ravb_set_opmode(ndev, CCC_OPC_RESET);
3068	if (error)
3069		goto out_mdio_release;
3070
3071	netif_napi_add(ndev, &priv->napi[RAVB_BE], ravb_poll);
3072	if (info->nc_queues)
3073		netif_napi_add(ndev, &priv->napi[RAVB_NC], ravb_poll);
3074
3075	if (info->coalesce_irqs) {
3076		netdev_sw_irq_coalesce_default_on(ndev);
3077		if (num_present_cpus() == 1)
3078			dev_set_threaded(ndev, true);
3079	}
3080
3081	/* Network device register */
3082	error = register_netdev(ndev);
3083	if (error)
3084		goto out_napi_del;
3085
3086	device_set_wakeup_capable(&pdev->dev, 1);
3087
3088	/* Print device information */
3089	netdev_info(ndev, "Base address at %#x, %pM, IRQ %d.\n",
3090		    (u32)ndev->base_addr, ndev->dev_addr, ndev->irq);
3091
3092	pm_runtime_mark_last_busy(&pdev->dev);
3093	pm_runtime_put_autosuspend(&pdev->dev);
3094
3095	return 0;
3096
3097out_napi_del:
3098	if (info->nc_queues)
3099		netif_napi_del(&priv->napi[RAVB_NC]);
3100
3101	netif_napi_del(&priv->napi[RAVB_BE]);
3102out_mdio_release:
3103	ravb_mdio_release(priv);
3104out_reset_mode:
3105	ravb_set_opmode(ndev, CCC_OPC_RESET);
3106	dma_free_coherent(ndev->dev.parent, priv->desc_bat_size, priv->desc_bat,
3107			  priv->desc_bat_dma);
3108out_rpm_put:
3109	pm_runtime_put(&pdev->dev);
3110out_rpm_disable:
3111	pm_runtime_disable(&pdev->dev);
3112	pm_runtime_dont_use_autosuspend(&pdev->dev);
3113	clk_unprepare(priv->refclk);
3114out_reset_assert:
3115	reset_control_assert(rstc);
3116out_free_netdev:
3117	free_netdev(ndev);
3118	return error;
3119}
3120
3121static void ravb_remove(struct platform_device *pdev)
3122{
3123	struct net_device *ndev = platform_get_drvdata(pdev);
3124	struct ravb_private *priv = netdev_priv(ndev);
3125	const struct ravb_hw_info *info = priv->info;
3126	struct device *dev = &priv->pdev->dev;
3127	int error;
3128
3129	error = pm_runtime_resume_and_get(dev);
3130	if (error < 0)
3131		return;
3132
3133	unregister_netdev(ndev);
3134	if (info->nc_queues)
3135		netif_napi_del(&priv->napi[RAVB_NC]);
3136	netif_napi_del(&priv->napi[RAVB_BE]);
3137
3138	ravb_mdio_release(priv);
3139
3140	dma_free_coherent(ndev->dev.parent, priv->desc_bat_size, priv->desc_bat,
3141			  priv->desc_bat_dma);
3142
3143	pm_runtime_put_sync_suspend(&pdev->dev);
3144	pm_runtime_disable(&pdev->dev);
3145	pm_runtime_dont_use_autosuspend(dev);
3146	clk_unprepare(priv->refclk);
3147	reset_control_assert(priv->rstc);
3148	free_netdev(ndev);
3149	platform_set_drvdata(pdev, NULL);
3150}
3151
3152static int ravb_wol_setup(struct net_device *ndev)
3153{
3154	struct ravb_private *priv = netdev_priv(ndev);
3155	const struct ravb_hw_info *info = priv->info;
3156
3157	/* Disable interrupts by clearing the interrupt masks. */
3158	ravb_write(ndev, 0, RIC0);
3159	ravb_write(ndev, 0, RIC2);
3160	ravb_write(ndev, 0, TIC);
3161
3162	/* Only allow ECI interrupts */
3163	synchronize_irq(priv->emac_irq);
3164	if (info->nc_queues)
3165		napi_disable(&priv->napi[RAVB_NC]);
3166	napi_disable(&priv->napi[RAVB_BE]);
3167	ravb_write(ndev, ECSIPR_MPDIP, ECSIPR);
3168
3169	/* Enable MagicPacket */
3170	ravb_modify(ndev, ECMR, ECMR_MPDE, ECMR_MPDE);
3171
3172	if (priv->info->ccc_gac)
3173		ravb_ptp_stop(ndev);
3174
3175	return enable_irq_wake(priv->emac_irq);
3176}
3177
3178static int ravb_wol_restore(struct net_device *ndev)
3179{
3180	struct ravb_private *priv = netdev_priv(ndev);
3181	const struct ravb_hw_info *info = priv->info;
3182	int error;
3183
3184	/* Set reset mode to rearm the WoL logic. */
3185	error = ravb_set_opmode(ndev, CCC_OPC_RESET);
3186	if (error)
3187		return error;
3188
3189	/* Set AVB config mode. */
3190	error = ravb_set_config_mode(ndev);
3191	if (error)
3192		return error;
3193
3194	if (priv->info->ccc_gac)
3195		ravb_ptp_init(ndev, priv->pdev);
3196
3197	if (info->nc_queues)
3198		napi_enable(&priv->napi[RAVB_NC]);
3199	napi_enable(&priv->napi[RAVB_BE]);
3200
3201	/* Disable MagicPacket */
3202	ravb_modify(ndev, ECMR, ECMR_MPDE, 0);
3203
3204	ravb_close(ndev);
3205
3206	return disable_irq_wake(priv->emac_irq);
3207}
3208
3209static int ravb_suspend(struct device *dev)
3210{
3211	struct net_device *ndev = dev_get_drvdata(dev);
3212	struct ravb_private *priv = netdev_priv(ndev);
3213	int ret;
3214
3215	if (!netif_running(ndev))
3216		goto reset_assert;
3217
3218	netif_device_detach(ndev);
3219
3220	rtnl_lock();
3221	if (priv->wol_enabled) {
3222		ret = ravb_wol_setup(ndev);
3223		rtnl_unlock();
3224		return ret;
3225	}
3226
3227	ret = ravb_close(ndev);
3228	rtnl_unlock();
3229	if (ret)
3230		return ret;
3231
3232	ret = pm_runtime_force_suspend(&priv->pdev->dev);
3233	if (ret)
3234		return ret;
3235
3236reset_assert:
3237	return reset_control_assert(priv->rstc);
3238}
3239
3240static int ravb_resume(struct device *dev)
3241{
3242	struct net_device *ndev = dev_get_drvdata(dev);
3243	struct ravb_private *priv = netdev_priv(ndev);
3244	int ret;
3245
3246	ret = reset_control_deassert(priv->rstc);
3247	if (ret)
3248		return ret;
3249
3250	if (!netif_running(ndev))
3251		return 0;
3252
3253	rtnl_lock();
3254	/* If WoL is enabled restore the interface. */
3255	if (priv->wol_enabled)
3256		ret = ravb_wol_restore(ndev);
3257	else
3258		ret = pm_runtime_force_resume(dev);
3259	if (ret) {
3260		rtnl_unlock();
3261		return ret;
3262	}
3263
3264	/* Reopening the interface will restore the device to the working state. */
3265	ret = ravb_open(ndev);
3266	rtnl_unlock();
3267	if (ret < 0)
3268		goto out_rpm_put;
3269
3270	ravb_set_rx_mode(ndev);
3271	netif_device_attach(ndev);
3272
3273	return 0;
3274
3275out_rpm_put:
3276	if (!priv->wol_enabled) {
3277		pm_runtime_mark_last_busy(dev);
3278		pm_runtime_put_autosuspend(dev);
3279	}
3280
3281	return ret;
3282}
3283
3284static int ravb_runtime_suspend(struct device *dev)
3285{
3286	struct net_device *ndev = dev_get_drvdata(dev);
3287	struct ravb_private *priv = netdev_priv(ndev);
3288
3289	clk_disable(priv->refclk);
3290
3291	return 0;
3292}
3293
3294static int ravb_runtime_resume(struct device *dev)
3295{
3296	struct net_device *ndev = dev_get_drvdata(dev);
3297	struct ravb_private *priv = netdev_priv(ndev);
3298
3299	return clk_enable(priv->refclk);
3300}
3301
3302static const struct dev_pm_ops ravb_dev_pm_ops = {
3303	SYSTEM_SLEEP_PM_OPS(ravb_suspend, ravb_resume)
3304	RUNTIME_PM_OPS(ravb_runtime_suspend, ravb_runtime_resume, NULL)
3305};
3306
3307static struct platform_driver ravb_driver = {
3308	.probe		= ravb_probe,
3309	.remove		= ravb_remove,
3310	.driver = {
3311		.name	= "ravb",
3312		.pm	= pm_ptr(&ravb_dev_pm_ops),
3313		.of_match_table = ravb_match_table,
3314	},
3315};
3316
3317module_platform_driver(ravb_driver);
3318
3319MODULE_AUTHOR("Mitsuhiro Kimura, Masaru Nagai");
3320MODULE_DESCRIPTION("Renesas Ethernet AVB driver");
3321MODULE_LICENSE("GPL v2");