Linux Audio

Check our new training course

Linux debugging, profiling, tracing and performance analysis training

Apr 14-17, 2025
Register
Loading...
v6.8
   1// SPDX-License-Identifier: GPL-2.0
   2/* Renesas Ethernet AVB device driver
   3 *
   4 * Copyright (C) 2014-2019 Renesas Electronics Corporation
   5 * Copyright (C) 2015 Renesas Solutions Corp.
   6 * Copyright (C) 2015-2016 Cogent Embedded, Inc. <source@cogentembedded.com>
   7 *
   8 * Based on the SuperH Ethernet driver
   9 */
  10
  11#include <linux/cache.h>
  12#include <linux/clk.h>
  13#include <linux/delay.h>
  14#include <linux/dma-mapping.h>
  15#include <linux/err.h>
  16#include <linux/etherdevice.h>
  17#include <linux/ethtool.h>
  18#include <linux/if_vlan.h>
  19#include <linux/kernel.h>
  20#include <linux/list.h>
  21#include <linux/module.h>
  22#include <linux/net_tstamp.h>
  23#include <linux/of.h>
 
 
  24#include <linux/of_mdio.h>
  25#include <linux/of_net.h>
  26#include <linux/platform_device.h>
  27#include <linux/pm_runtime.h>
  28#include <linux/slab.h>
  29#include <linux/spinlock.h>
 
  30#include <linux/reset.h>
  31#include <linux/math64.h>
  32
  33#include "ravb.h"
  34
  35#define RAVB_DEF_MSG_ENABLE \
  36		(NETIF_MSG_LINK	  | \
  37		 NETIF_MSG_TIMER  | \
  38		 NETIF_MSG_RX_ERR | \
  39		 NETIF_MSG_TX_ERR)
  40
  41static const char *ravb_rx_irqs[NUM_RX_QUEUE] = {
  42	"ch0", /* RAVB_BE */
  43	"ch1", /* RAVB_NC */
  44};
  45
  46static const char *ravb_tx_irqs[NUM_TX_QUEUE] = {
  47	"ch18", /* RAVB_BE */
  48	"ch19", /* RAVB_NC */
  49};
  50
  51void ravb_modify(struct net_device *ndev, enum ravb_reg reg, u32 clear,
  52		 u32 set)
  53{
  54	ravb_write(ndev, (ravb_read(ndev, reg) & ~clear) | set, reg);
  55}
  56
  57int ravb_wait(struct net_device *ndev, enum ravb_reg reg, u32 mask, u32 value)
  58{
  59	int i;
  60
  61	for (i = 0; i < 10000; i++) {
  62		if ((ravb_read(ndev, reg) & mask) == value)
  63			return 0;
  64		udelay(10);
  65	}
  66	return -ETIMEDOUT;
  67}
  68
  69static int ravb_set_opmode(struct net_device *ndev, u32 opmode)
  70{
  71	u32 csr_ops = 1U << (opmode & CCC_OPC);
  72	u32 ccc_mask = CCC_OPC;
  73	int error;
  74
  75	/* If gPTP active in config mode is supported it needs to be configured
  76	 * along with CSEL and operating mode in the same access. This is a
  77	 * hardware limitation.
  78	 */
  79	if (opmode & CCC_GAC)
  80		ccc_mask |= CCC_GAC | CCC_CSEL;
  81
  82	/* Set operating mode */
  83	ravb_modify(ndev, CCC, ccc_mask, opmode);
  84	/* Check if the operating mode is changed to the requested one */
  85	error = ravb_wait(ndev, CSR, CSR_OPS, csr_ops);
  86	if (error) {
  87		netdev_err(ndev, "failed to switch device to requested mode (%u)\n",
  88			   opmode & CCC_OPC);
  89	}
  90
  91	return error;
  92}
  93
  94static void ravb_set_rate_gbeth(struct net_device *ndev)
  95{
  96	struct ravb_private *priv = netdev_priv(ndev);
  97
  98	switch (priv->speed) {
  99	case 10:                /* 10BASE */
 100		ravb_write(ndev, GBETH_GECMR_SPEED_10, GECMR);
 101		break;
 102	case 100:               /* 100BASE */
 103		ravb_write(ndev, GBETH_GECMR_SPEED_100, GECMR);
 104		break;
 105	case 1000:              /* 1000BASE */
 106		ravb_write(ndev, GBETH_GECMR_SPEED_1000, GECMR);
 107		break;
 108	}
 109}
 110
 111static void ravb_set_rate_rcar(struct net_device *ndev)
 112{
 113	struct ravb_private *priv = netdev_priv(ndev);
 114
 115	switch (priv->speed) {
 116	case 100:		/* 100BASE */
 117		ravb_write(ndev, GECMR_SPEED_100, GECMR);
 118		break;
 119	case 1000:		/* 1000BASE */
 120		ravb_write(ndev, GECMR_SPEED_1000, GECMR);
 121		break;
 122	}
 123}
 124
 125static void ravb_set_buffer_align(struct sk_buff *skb)
 126{
 127	u32 reserve = (unsigned long)skb->data & (RAVB_ALIGN - 1);
 128
 129	if (reserve)
 130		skb_reserve(skb, RAVB_ALIGN - reserve);
 131}
 132
 133/* Get MAC address from the MAC address registers
 134 *
 135 * Ethernet AVB device doesn't have ROM for MAC address.
 136 * This function gets the MAC address that was used by a bootloader.
 137 */
 138static void ravb_read_mac_address(struct device_node *np,
 139				  struct net_device *ndev)
 140{
 141	int ret;
 142
 143	ret = of_get_ethdev_address(np, ndev);
 144	if (ret) {
 145		u32 mahr = ravb_read(ndev, MAHR);
 146		u32 malr = ravb_read(ndev, MALR);
 147		u8 addr[ETH_ALEN];
 148
 149		addr[0] = (mahr >> 24) & 0xFF;
 150		addr[1] = (mahr >> 16) & 0xFF;
 151		addr[2] = (mahr >>  8) & 0xFF;
 152		addr[3] = (mahr >>  0) & 0xFF;
 153		addr[4] = (malr >>  8) & 0xFF;
 154		addr[5] = (malr >>  0) & 0xFF;
 155		eth_hw_addr_set(ndev, addr);
 156	}
 157}
 158
 159static void ravb_mdio_ctrl(struct mdiobb_ctrl *ctrl, u32 mask, int set)
 160{
 161	struct ravb_private *priv = container_of(ctrl, struct ravb_private,
 162						 mdiobb);
 163
 164	ravb_modify(priv->ndev, PIR, mask, set ? mask : 0);
 165}
 166
 167/* MDC pin control */
 168static void ravb_set_mdc(struct mdiobb_ctrl *ctrl, int level)
 169{
 170	ravb_mdio_ctrl(ctrl, PIR_MDC, level);
 171}
 172
 173/* Data I/O pin control */
 174static void ravb_set_mdio_dir(struct mdiobb_ctrl *ctrl, int output)
 175{
 176	ravb_mdio_ctrl(ctrl, PIR_MMD, output);
 177}
 178
 179/* Set data bit */
 180static void ravb_set_mdio_data(struct mdiobb_ctrl *ctrl, int value)
 181{
 182	ravb_mdio_ctrl(ctrl, PIR_MDO, value);
 183}
 184
 185/* Get data bit */
 186static int ravb_get_mdio_data(struct mdiobb_ctrl *ctrl)
 187{
 188	struct ravb_private *priv = container_of(ctrl, struct ravb_private,
 189						 mdiobb);
 190
 191	return (ravb_read(priv->ndev, PIR) & PIR_MDI) != 0;
 192}
 193
 194/* MDIO bus control struct */
 195static const struct mdiobb_ops bb_ops = {
 196	.owner = THIS_MODULE,
 197	.set_mdc = ravb_set_mdc,
 198	.set_mdio_dir = ravb_set_mdio_dir,
 199	.set_mdio_data = ravb_set_mdio_data,
 200	.get_mdio_data = ravb_get_mdio_data,
 201};
 202
 203/* Free TX skb function for AVB-IP */
 204static int ravb_tx_free(struct net_device *ndev, int q, bool free_txed_only)
 205{
 206	struct ravb_private *priv = netdev_priv(ndev);
 207	struct net_device_stats *stats = &priv->stats[q];
 208	unsigned int num_tx_desc = priv->num_tx_desc;
 209	struct ravb_tx_desc *desc;
 210	unsigned int entry;
 211	int free_num = 0;
 212	u32 size;
 213
 214	for (; priv->cur_tx[q] - priv->dirty_tx[q] > 0; priv->dirty_tx[q]++) {
 215		bool txed;
 216
 217		entry = priv->dirty_tx[q] % (priv->num_tx_ring[q] *
 218					     num_tx_desc);
 219		desc = &priv->tx_ring[q][entry];
 220		txed = desc->die_dt == DT_FEMPTY;
 221		if (free_txed_only && !txed)
 222			break;
 223		/* Descriptor type must be checked before all other reads */
 224		dma_rmb();
 225		size = le16_to_cpu(desc->ds_tagl) & TX_DS;
 226		/* Free the original skb. */
 227		if (priv->tx_skb[q][entry / num_tx_desc]) {
 228			dma_unmap_single(ndev->dev.parent, le32_to_cpu(desc->dptr),
 229					 size, DMA_TO_DEVICE);
 230			/* Last packet descriptor? */
 231			if (entry % num_tx_desc == num_tx_desc - 1) {
 232				entry /= num_tx_desc;
 233				dev_kfree_skb_any(priv->tx_skb[q][entry]);
 234				priv->tx_skb[q][entry] = NULL;
 235				if (txed)
 236					stats->tx_packets++;
 237			}
 238			free_num++;
 239		}
 240		if (txed)
 241			stats->tx_bytes += size;
 242		desc->die_dt = DT_EEMPTY;
 243	}
 244	return free_num;
 245}
 246
 247static void ravb_rx_ring_free_gbeth(struct net_device *ndev, int q)
 248{
 249	struct ravb_private *priv = netdev_priv(ndev);
 250	unsigned int ring_size;
 251	unsigned int i;
 252
 253	if (!priv->gbeth_rx_ring)
 254		return;
 255
 256	for (i = 0; i < priv->num_rx_ring[q]; i++) {
 257		struct ravb_rx_desc *desc = &priv->gbeth_rx_ring[i];
 258
 259		if (!dma_mapping_error(ndev->dev.parent,
 260				       le32_to_cpu(desc->dptr)))
 261			dma_unmap_single(ndev->dev.parent,
 262					 le32_to_cpu(desc->dptr),
 263					 GBETH_RX_BUFF_MAX,
 264					 DMA_FROM_DEVICE);
 265	}
 266	ring_size = sizeof(struct ravb_rx_desc) * (priv->num_rx_ring[q] + 1);
 267	dma_free_coherent(ndev->dev.parent, ring_size, priv->gbeth_rx_ring,
 268			  priv->rx_desc_dma[q]);
 269	priv->gbeth_rx_ring = NULL;
 270}
 271
 272static void ravb_rx_ring_free_rcar(struct net_device *ndev, int q)
 273{
 274	struct ravb_private *priv = netdev_priv(ndev);
 275	unsigned int ring_size;
 276	unsigned int i;
 277
 278	if (!priv->rx_ring[q])
 279		return;
 280
 281	for (i = 0; i < priv->num_rx_ring[q]; i++) {
 282		struct ravb_ex_rx_desc *desc = &priv->rx_ring[q][i];
 283
 284		if (!dma_mapping_error(ndev->dev.parent,
 285				       le32_to_cpu(desc->dptr)))
 286			dma_unmap_single(ndev->dev.parent,
 287					 le32_to_cpu(desc->dptr),
 288					 RX_BUF_SZ,
 289					 DMA_FROM_DEVICE);
 290	}
 291	ring_size = sizeof(struct ravb_ex_rx_desc) *
 292		    (priv->num_rx_ring[q] + 1);
 293	dma_free_coherent(ndev->dev.parent, ring_size, priv->rx_ring[q],
 294			  priv->rx_desc_dma[q]);
 295	priv->rx_ring[q] = NULL;
 296}
 297
 298/* Free skb's and DMA buffers for Ethernet AVB */
 299static void ravb_ring_free(struct net_device *ndev, int q)
 300{
 301	struct ravb_private *priv = netdev_priv(ndev);
 302	const struct ravb_hw_info *info = priv->info;
 303	unsigned int num_tx_desc = priv->num_tx_desc;
 304	unsigned int ring_size;
 305	unsigned int i;
 306
 307	info->rx_ring_free(ndev, q);
 308
 309	if (priv->tx_ring[q]) {
 310		ravb_tx_free(ndev, q, false);
 311
 312		ring_size = sizeof(struct ravb_tx_desc) *
 313			    (priv->num_tx_ring[q] * num_tx_desc + 1);
 314		dma_free_coherent(ndev->dev.parent, ring_size, priv->tx_ring[q],
 315				  priv->tx_desc_dma[q]);
 316		priv->tx_ring[q] = NULL;
 317	}
 318
 319	/* Free RX skb ringbuffer */
 320	if (priv->rx_skb[q]) {
 321		for (i = 0; i < priv->num_rx_ring[q]; i++)
 322			dev_kfree_skb(priv->rx_skb[q][i]);
 323	}
 324	kfree(priv->rx_skb[q]);
 325	priv->rx_skb[q] = NULL;
 326
 327	/* Free aligned TX buffers */
 328	kfree(priv->tx_align[q]);
 329	priv->tx_align[q] = NULL;
 330
 331	/* Free TX skb ringbuffer.
 332	 * SKBs are freed by ravb_tx_free() call above.
 333	 */
 334	kfree(priv->tx_skb[q]);
 335	priv->tx_skb[q] = NULL;
 336}
 337
 338static void ravb_rx_ring_format_gbeth(struct net_device *ndev, int q)
 339{
 340	struct ravb_private *priv = netdev_priv(ndev);
 341	struct ravb_rx_desc *rx_desc;
 342	unsigned int rx_ring_size;
 343	dma_addr_t dma_addr;
 344	unsigned int i;
 345
 346	rx_ring_size = sizeof(*rx_desc) * priv->num_rx_ring[q];
 347	memset(priv->gbeth_rx_ring, 0, rx_ring_size);
 348	/* Build RX ring buffer */
 349	for (i = 0; i < priv->num_rx_ring[q]; i++) {
 350		/* RX descriptor */
 351		rx_desc = &priv->gbeth_rx_ring[i];
 352		rx_desc->ds_cc = cpu_to_le16(GBETH_RX_DESC_DATA_SIZE);
 353		dma_addr = dma_map_single(ndev->dev.parent, priv->rx_skb[q][i]->data,
 354					  GBETH_RX_BUFF_MAX,
 355					  DMA_FROM_DEVICE);
 356		/* We just set the data size to 0 for a failed mapping which
 357		 * should prevent DMA from happening...
 358		 */
 359		if (dma_mapping_error(ndev->dev.parent, dma_addr))
 360			rx_desc->ds_cc = cpu_to_le16(0);
 361		rx_desc->dptr = cpu_to_le32(dma_addr);
 362		rx_desc->die_dt = DT_FEMPTY;
 363	}
 364	rx_desc = &priv->gbeth_rx_ring[i];
 365	rx_desc->dptr = cpu_to_le32((u32)priv->rx_desc_dma[q]);
 366	rx_desc->die_dt = DT_LINKFIX; /* type */
 367}
 368
 369static void ravb_rx_ring_format_rcar(struct net_device *ndev, int q)
 370{
 371	struct ravb_private *priv = netdev_priv(ndev);
 372	struct ravb_ex_rx_desc *rx_desc;
 373	unsigned int rx_ring_size = sizeof(*rx_desc) * priv->num_rx_ring[q];
 374	dma_addr_t dma_addr;
 375	unsigned int i;
 376
 377	memset(priv->rx_ring[q], 0, rx_ring_size);
 378	/* Build RX ring buffer */
 379	for (i = 0; i < priv->num_rx_ring[q]; i++) {
 380		/* RX descriptor */
 381		rx_desc = &priv->rx_ring[q][i];
 382		rx_desc->ds_cc = cpu_to_le16(RX_BUF_SZ);
 383		dma_addr = dma_map_single(ndev->dev.parent, priv->rx_skb[q][i]->data,
 384					  RX_BUF_SZ,
 385					  DMA_FROM_DEVICE);
 386		/* We just set the data size to 0 for a failed mapping which
 387		 * should prevent DMA from happening...
 388		 */
 389		if (dma_mapping_error(ndev->dev.parent, dma_addr))
 390			rx_desc->ds_cc = cpu_to_le16(0);
 391		rx_desc->dptr = cpu_to_le32(dma_addr);
 392		rx_desc->die_dt = DT_FEMPTY;
 393	}
 394	rx_desc = &priv->rx_ring[q][i];
 395	rx_desc->dptr = cpu_to_le32((u32)priv->rx_desc_dma[q]);
 396	rx_desc->die_dt = DT_LINKFIX; /* type */
 397}
 398
 399/* Format skb and descriptor buffer for Ethernet AVB */
 400static void ravb_ring_format(struct net_device *ndev, int q)
 401{
 402	struct ravb_private *priv = netdev_priv(ndev);
 403	const struct ravb_hw_info *info = priv->info;
 404	unsigned int num_tx_desc = priv->num_tx_desc;
 405	struct ravb_tx_desc *tx_desc;
 406	struct ravb_desc *desc;
 407	unsigned int tx_ring_size = sizeof(*tx_desc) * priv->num_tx_ring[q] *
 408				    num_tx_desc;
 409	unsigned int i;
 410
 411	priv->cur_rx[q] = 0;
 412	priv->cur_tx[q] = 0;
 413	priv->dirty_rx[q] = 0;
 414	priv->dirty_tx[q] = 0;
 415
 416	info->rx_ring_format(ndev, q);
 417
 418	memset(priv->tx_ring[q], 0, tx_ring_size);
 419	/* Build TX ring buffer */
 420	for (i = 0, tx_desc = priv->tx_ring[q]; i < priv->num_tx_ring[q];
 421	     i++, tx_desc++) {
 422		tx_desc->die_dt = DT_EEMPTY;
 423		if (num_tx_desc > 1) {
 424			tx_desc++;
 425			tx_desc->die_dt = DT_EEMPTY;
 426		}
 427	}
 428	tx_desc->dptr = cpu_to_le32((u32)priv->tx_desc_dma[q]);
 429	tx_desc->die_dt = DT_LINKFIX; /* type */
 430
 431	/* RX descriptor base address for best effort */
 432	desc = &priv->desc_bat[RX_QUEUE_OFFSET + q];
 433	desc->die_dt = DT_LINKFIX; /* type */
 434	desc->dptr = cpu_to_le32((u32)priv->rx_desc_dma[q]);
 435
 436	/* TX descriptor base address for best effort */
 437	desc = &priv->desc_bat[q];
 438	desc->die_dt = DT_LINKFIX; /* type */
 439	desc->dptr = cpu_to_le32((u32)priv->tx_desc_dma[q]);
 440}
 441
 442static void *ravb_alloc_rx_desc_gbeth(struct net_device *ndev, int q)
 443{
 444	struct ravb_private *priv = netdev_priv(ndev);
 445	unsigned int ring_size;
 446
 447	ring_size = sizeof(struct ravb_rx_desc) * (priv->num_rx_ring[q] + 1);
 448
 449	priv->gbeth_rx_ring = dma_alloc_coherent(ndev->dev.parent, ring_size,
 450						 &priv->rx_desc_dma[q],
 451						 GFP_KERNEL);
 452	return priv->gbeth_rx_ring;
 453}
 454
 455static void *ravb_alloc_rx_desc_rcar(struct net_device *ndev, int q)
 456{
 457	struct ravb_private *priv = netdev_priv(ndev);
 458	unsigned int ring_size;
 459
 460	ring_size = sizeof(struct ravb_ex_rx_desc) * (priv->num_rx_ring[q] + 1);
 461
 462	priv->rx_ring[q] = dma_alloc_coherent(ndev->dev.parent, ring_size,
 463					      &priv->rx_desc_dma[q],
 464					      GFP_KERNEL);
 465	return priv->rx_ring[q];
 466}
 467
 468/* Init skb and descriptor buffer for Ethernet AVB */
 469static int ravb_ring_init(struct net_device *ndev, int q)
 470{
 471	struct ravb_private *priv = netdev_priv(ndev);
 472	const struct ravb_hw_info *info = priv->info;
 473	unsigned int num_tx_desc = priv->num_tx_desc;
 474	unsigned int ring_size;
 475	struct sk_buff *skb;
 476	unsigned int i;
 477
 478	/* Allocate RX and TX skb rings */
 479	priv->rx_skb[q] = kcalloc(priv->num_rx_ring[q],
 480				  sizeof(*priv->rx_skb[q]), GFP_KERNEL);
 481	priv->tx_skb[q] = kcalloc(priv->num_tx_ring[q],
 482				  sizeof(*priv->tx_skb[q]), GFP_KERNEL);
 483	if (!priv->rx_skb[q] || !priv->tx_skb[q])
 484		goto error;
 485
 486	for (i = 0; i < priv->num_rx_ring[q]; i++) {
 487		skb = __netdev_alloc_skb(ndev, info->max_rx_len, GFP_KERNEL);
 488		if (!skb)
 489			goto error;
 490		ravb_set_buffer_align(skb);
 491		priv->rx_skb[q][i] = skb;
 492	}
 493
 494	if (num_tx_desc > 1) {
 495		/* Allocate rings for the aligned buffers */
 496		priv->tx_align[q] = kmalloc(DPTR_ALIGN * priv->num_tx_ring[q] +
 497					    DPTR_ALIGN - 1, GFP_KERNEL);
 498		if (!priv->tx_align[q])
 499			goto error;
 500	}
 501
 502	/* Allocate all RX descriptors. */
 503	if (!info->alloc_rx_desc(ndev, q))
 504		goto error;
 505
 506	priv->dirty_rx[q] = 0;
 507
 508	/* Allocate all TX descriptors. */
 509	ring_size = sizeof(struct ravb_tx_desc) *
 510		    (priv->num_tx_ring[q] * num_tx_desc + 1);
 511	priv->tx_ring[q] = dma_alloc_coherent(ndev->dev.parent, ring_size,
 512					      &priv->tx_desc_dma[q],
 513					      GFP_KERNEL);
 514	if (!priv->tx_ring[q])
 515		goto error;
 516
 517	return 0;
 518
 519error:
 520	ravb_ring_free(ndev, q);
 521
 522	return -ENOMEM;
 523}
 524
 525static void ravb_emac_init_gbeth(struct net_device *ndev)
 526{
 527	struct ravb_private *priv = netdev_priv(ndev);
 528
 529	if (priv->phy_interface == PHY_INTERFACE_MODE_MII) {
 530		ravb_write(ndev, (1000 << 16) | CXR35_SEL_XMII_MII, CXR35);
 531		ravb_modify(ndev, CXR31, CXR31_SEL_LINK0 | CXR31_SEL_LINK1, 0);
 532	} else {
 533		ravb_write(ndev, (1000 << 16) | CXR35_SEL_XMII_RGMII, CXR35);
 534		ravb_modify(ndev, CXR31, CXR31_SEL_LINK0 | CXR31_SEL_LINK1,
 535			    CXR31_SEL_LINK0);
 536	}
 537
 538	/* Receive frame limit set register */
 539	ravb_write(ndev, GBETH_RX_BUFF_MAX + ETH_FCS_LEN, RFLR);
 540
 541	/* EMAC Mode: PAUSE prohibition; Duplex; TX; RX; CRC Pass Through */
 542	ravb_write(ndev, ECMR_ZPF | ((priv->duplex > 0) ? ECMR_DM : 0) |
 543			 ECMR_TE | ECMR_RE | ECMR_RCPT |
 544			 ECMR_TXF | ECMR_RXF, ECMR);
 545
 546	ravb_set_rate_gbeth(ndev);
 547
 548	/* Set MAC address */
 549	ravb_write(ndev,
 550		   (ndev->dev_addr[0] << 24) | (ndev->dev_addr[1] << 16) |
 551		   (ndev->dev_addr[2] << 8)  | (ndev->dev_addr[3]), MAHR);
 552	ravb_write(ndev, (ndev->dev_addr[4] << 8)  | (ndev->dev_addr[5]), MALR);
 553
 554	/* E-MAC status register clear */
 555	ravb_write(ndev, ECSR_ICD | ECSR_LCHNG | ECSR_PFRI, ECSR);
 556	ravb_write(ndev, CSR0_TPE | CSR0_RPE, CSR0);
 557
 558	/* E-MAC interrupt enable register */
 559	ravb_write(ndev, ECSIPR_ICDIP, ECSIPR);
 
 
 
 
 
 
 
 
 560}
 561
 562static void ravb_emac_init_rcar(struct net_device *ndev)
 563{
 564	/* Receive frame limit set register */
 565	ravb_write(ndev, ndev->mtu + ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN, RFLR);
 566
 567	/* EMAC Mode: PAUSE prohibition; Duplex; RX Checksum; TX; RX */
 568	ravb_write(ndev, ECMR_ZPF | ECMR_DM |
 569		   (ndev->features & NETIF_F_RXCSUM ? ECMR_RCSC : 0) |
 570		   ECMR_TE | ECMR_RE, ECMR);
 571
 572	ravb_set_rate_rcar(ndev);
 573
 574	/* Set MAC address */
 575	ravb_write(ndev,
 576		   (ndev->dev_addr[0] << 24) | (ndev->dev_addr[1] << 16) |
 577		   (ndev->dev_addr[2] << 8)  | (ndev->dev_addr[3]), MAHR);
 578	ravb_write(ndev,
 579		   (ndev->dev_addr[4] << 8)  | (ndev->dev_addr[5]), MALR);
 580
 581	/* E-MAC status register clear */
 582	ravb_write(ndev, ECSR_ICD | ECSR_MPD, ECSR);
 583
 584	/* E-MAC interrupt enable register */
 585	ravb_write(ndev, ECSIPR_ICDIP | ECSIPR_MPDIP | ECSIPR_LCHNGIP, ECSIPR);
 586}
 587
 588/* E-MAC init function */
 589static void ravb_emac_init(struct net_device *ndev)
 590{
 591	struct ravb_private *priv = netdev_priv(ndev);
 592	const struct ravb_hw_info *info = priv->info;
 593
 594	info->emac_init(ndev);
 595}
 596
 597static int ravb_dmac_init_gbeth(struct net_device *ndev)
 598{
 599	int error;
 600
 601	error = ravb_ring_init(ndev, RAVB_BE);
 602	if (error)
 603		return error;
 604
 605	/* Descriptor format */
 606	ravb_ring_format(ndev, RAVB_BE);
 607
 608	/* Set DMAC RX */
 609	ravb_write(ndev, 0x60000000, RCR);
 610
 611	/* Set Max Frame Length (RTC) */
 612	ravb_write(ndev, 0x7ffc0000 | GBETH_RX_BUFF_MAX, RTC);
 613
 614	/* Set FIFO size */
 615	ravb_write(ndev, 0x00222200, TGC);
 616
 617	ravb_write(ndev, 0, TCCR);
 618
 619	/* Frame receive */
 620	ravb_write(ndev, RIC0_FRE0, RIC0);
 621	/* Disable FIFO full warning */
 622	ravb_write(ndev, 0x0, RIC1);
 623	/* Receive FIFO full error, descriptor empty */
 624	ravb_write(ndev, RIC2_QFE0 | RIC2_RFFE, RIC2);
 625
 626	ravb_write(ndev, TIC_FTE0, TIC);
 627
 628	return 0;
 629}
 630
 631static int ravb_dmac_init_rcar(struct net_device *ndev)
 632{
 633	struct ravb_private *priv = netdev_priv(ndev);
 634	const struct ravb_hw_info *info = priv->info;
 635	int error;
 636
 637	error = ravb_ring_init(ndev, RAVB_BE);
 638	if (error)
 639		return error;
 640	error = ravb_ring_init(ndev, RAVB_NC);
 641	if (error) {
 642		ravb_ring_free(ndev, RAVB_BE);
 643		return error;
 644	}
 645
 646	/* Descriptor format */
 647	ravb_ring_format(ndev, RAVB_BE);
 648	ravb_ring_format(ndev, RAVB_NC);
 649
 650	/* Set AVB RX */
 651	ravb_write(ndev,
 652		   RCR_EFFS | RCR_ENCF | RCR_ETS0 | RCR_ESF | 0x18000000, RCR);
 653
 654	/* Set FIFO size */
 655	ravb_write(ndev, TGC_TQP_AVBMODE1 | 0x00112200, TGC);
 656
 657	/* Timestamp enable */
 658	ravb_write(ndev, TCCR_TFEN, TCCR);
 659
 660	/* Interrupt init: */
 661	if (info->multi_irqs) {
 662		/* Clear DIL.DPLx */
 663		ravb_write(ndev, 0, DIL);
 664		/* Set queue specific interrupt */
 665		ravb_write(ndev, CIE_CRIE | CIE_CTIE | CIE_CL0M, CIE);
 666	}
 667	/* Frame receive */
 668	ravb_write(ndev, RIC0_FRE0 | RIC0_FRE1, RIC0);
 669	/* Disable FIFO full warning */
 670	ravb_write(ndev, 0, RIC1);
 671	/* Receive FIFO full error, descriptor empty */
 672	ravb_write(ndev, RIC2_QFE0 | RIC2_QFE1 | RIC2_RFFE, RIC2);
 673	/* Frame transmitted, timestamp FIFO updated */
 674	ravb_write(ndev, TIC_FTE0 | TIC_FTE1 | TIC_TFUE, TIC);
 675
 676	return 0;
 677}
 678
 679/* Device init function for Ethernet AVB */
 680static int ravb_dmac_init(struct net_device *ndev)
 681{
 682	struct ravb_private *priv = netdev_priv(ndev);
 683	const struct ravb_hw_info *info = priv->info;
 684	int error;
 685
 686	/* Set CONFIG mode */
 687	error = ravb_set_opmode(ndev, CCC_OPC_CONFIG);
 688	if (error)
 689		return error;
 690
 691	error = info->dmac_init(ndev);
 692	if (error)
 693		return error;
 694
 695	/* Setting the control will start the AVB-DMAC process. */
 696	return ravb_set_opmode(ndev, CCC_OPC_OPERATION);
 
 
 697}
 698
 699static void ravb_get_tx_tstamp(struct net_device *ndev)
 700{
 701	struct ravb_private *priv = netdev_priv(ndev);
 702	struct ravb_tstamp_skb *ts_skb, *ts_skb2;
 703	struct skb_shared_hwtstamps shhwtstamps;
 704	struct sk_buff *skb;
 705	struct timespec64 ts;
 706	u16 tag, tfa_tag;
 707	int count;
 708	u32 tfa2;
 709
 710	count = (ravb_read(ndev, TSR) & TSR_TFFL) >> 8;
 711	while (count--) {
 712		tfa2 = ravb_read(ndev, TFA2);
 713		tfa_tag = (tfa2 & TFA2_TST) >> 16;
 714		ts.tv_nsec = (u64)ravb_read(ndev, TFA0);
 715		ts.tv_sec = ((u64)(tfa2 & TFA2_TSV) << 32) |
 716			    ravb_read(ndev, TFA1);
 717		memset(&shhwtstamps, 0, sizeof(shhwtstamps));
 718		shhwtstamps.hwtstamp = timespec64_to_ktime(ts);
 719		list_for_each_entry_safe(ts_skb, ts_skb2, &priv->ts_skb_list,
 720					 list) {
 721			skb = ts_skb->skb;
 722			tag = ts_skb->tag;
 723			list_del(&ts_skb->list);
 724			kfree(ts_skb);
 725			if (tag == tfa_tag) {
 726				skb_tstamp_tx(skb, &shhwtstamps);
 727				dev_consume_skb_any(skb);
 728				break;
 729			} else {
 730				dev_kfree_skb_any(skb);
 731			}
 732		}
 733		ravb_modify(ndev, TCCR, TCCR_TFR, TCCR_TFR);
 734	}
 735}
 736
 737static void ravb_rx_csum(struct sk_buff *skb)
 738{
 739	u8 *hw_csum;
 740
 741	/* The hardware checksum is contained in sizeof(__sum16) (2) bytes
 742	 * appended to packet data
 743	 */
 744	if (unlikely(skb->len < sizeof(__sum16)))
 745		return;
 746	hw_csum = skb_tail_pointer(skb) - sizeof(__sum16);
 747	skb->csum = csum_unfold((__force __sum16)get_unaligned_le16(hw_csum));
 748	skb->ip_summed = CHECKSUM_COMPLETE;
 749	skb_trim(skb, skb->len - sizeof(__sum16));
 750}
 751
 752static struct sk_buff *ravb_get_skb_gbeth(struct net_device *ndev, int entry,
 753					  struct ravb_rx_desc *desc)
 754{
 755	struct ravb_private *priv = netdev_priv(ndev);
 756	struct sk_buff *skb;
 757
 758	skb = priv->rx_skb[RAVB_BE][entry];
 759	priv->rx_skb[RAVB_BE][entry] = NULL;
 760	dma_unmap_single(ndev->dev.parent, le32_to_cpu(desc->dptr),
 761			 ALIGN(GBETH_RX_BUFF_MAX, 16), DMA_FROM_DEVICE);
 762
 763	return skb;
 764}
 765
 766/* Packet receive function for Gigabit Ethernet */
 767static bool ravb_rx_gbeth(struct net_device *ndev, int *quota, int q)
 768{
 769	struct ravb_private *priv = netdev_priv(ndev);
 770	const struct ravb_hw_info *info = priv->info;
 771	struct net_device_stats *stats;
 772	struct ravb_rx_desc *desc;
 773	struct sk_buff *skb;
 774	dma_addr_t dma_addr;
 775	int rx_packets = 0;
 776	u8  desc_status;
 
 777	u16 pkt_len;
 778	u8  die_dt;
 779	int entry;
 780	int limit;
 781	int i;
 782
 783	entry = priv->cur_rx[q] % priv->num_rx_ring[q];
 784	limit = priv->dirty_rx[q] + priv->num_rx_ring[q] - priv->cur_rx[q];
 785	stats = &priv->stats[q];
 786
 
 
 787	desc = &priv->gbeth_rx_ring[entry];
 788	for (i = 0; i < limit && rx_packets < *quota && desc->die_dt != DT_FEMPTY; i++) {
 789		/* Descriptor type must be checked before all other reads */
 790		dma_rmb();
 791		desc_status = desc->msc;
 792		pkt_len = le16_to_cpu(desc->ds_cc) & RX_DS;
 793
 
 
 
 794		/* We use 0-byte descriptors to mark the DMA mapping errors */
 795		if (!pkt_len)
 796			continue;
 797
 798		if (desc_status & MSC_MC)
 799			stats->multicast++;
 800
 801		if (desc_status & (MSC_CRC | MSC_RFE | MSC_RTSF | MSC_RTLF | MSC_CEEF)) {
 802			stats->rx_errors++;
 803			if (desc_status & MSC_CRC)
 804				stats->rx_crc_errors++;
 805			if (desc_status & MSC_RFE)
 806				stats->rx_frame_errors++;
 807			if (desc_status & (MSC_RTLF | MSC_RTSF))
 808				stats->rx_length_errors++;
 809			if (desc_status & MSC_CEEF)
 810				stats->rx_missed_errors++;
 811		} else {
 812			die_dt = desc->die_dt & 0xF0;
 813			switch (die_dt) {
 814			case DT_FSINGLE:
 815				skb = ravb_get_skb_gbeth(ndev, entry, desc);
 816				skb_put(skb, pkt_len);
 817				skb->protocol = eth_type_trans(skb, ndev);
 818				napi_gro_receive(&priv->napi[q], skb);
 819				rx_packets++;
 820				stats->rx_bytes += pkt_len;
 821				break;
 822			case DT_FSTART:
 823				priv->rx_1st_skb = ravb_get_skb_gbeth(ndev, entry, desc);
 824				skb_put(priv->rx_1st_skb, pkt_len);
 825				break;
 826			case DT_FMID:
 827				skb = ravb_get_skb_gbeth(ndev, entry, desc);
 828				skb_copy_to_linear_data_offset(priv->rx_1st_skb,
 829							       priv->rx_1st_skb->len,
 830							       skb->data,
 831							       pkt_len);
 832				skb_put(priv->rx_1st_skb, pkt_len);
 833				dev_kfree_skb(skb);
 834				break;
 835			case DT_FEND:
 836				skb = ravb_get_skb_gbeth(ndev, entry, desc);
 837				skb_copy_to_linear_data_offset(priv->rx_1st_skb,
 838							       priv->rx_1st_skb->len,
 839							       skb->data,
 840							       pkt_len);
 841				skb_put(priv->rx_1st_skb, pkt_len);
 842				dev_kfree_skb(skb);
 843				priv->rx_1st_skb->protocol =
 844					eth_type_trans(priv->rx_1st_skb, ndev);
 845				napi_gro_receive(&priv->napi[q],
 846						 priv->rx_1st_skb);
 847				rx_packets++;
 848				stats->rx_bytes += pkt_len;
 849				break;
 850			}
 851		}
 852
 853		entry = (++priv->cur_rx[q]) % priv->num_rx_ring[q];
 854		desc = &priv->gbeth_rx_ring[entry];
 855	}
 856
 857	/* Refill the RX ring buffers. */
 858	for (; priv->cur_rx[q] - priv->dirty_rx[q] > 0; priv->dirty_rx[q]++) {
 859		entry = priv->dirty_rx[q] % priv->num_rx_ring[q];
 860		desc = &priv->gbeth_rx_ring[entry];
 861		desc->ds_cc = cpu_to_le16(GBETH_RX_DESC_DATA_SIZE);
 862
 863		if (!priv->rx_skb[q][entry]) {
 864			skb = netdev_alloc_skb(ndev, info->max_rx_len);
 865			if (!skb)
 866				break;
 867			ravb_set_buffer_align(skb);
 868			dma_addr = dma_map_single(ndev->dev.parent,
 869						  skb->data,
 870						  GBETH_RX_BUFF_MAX,
 871						  DMA_FROM_DEVICE);
 872			skb_checksum_none_assert(skb);
 873			/* We just set the data size to 0 for a failed mapping
 874			 * which should prevent DMA  from happening...
 875			 */
 876			if (dma_mapping_error(ndev->dev.parent, dma_addr))
 877				desc->ds_cc = cpu_to_le16(0);
 878			desc->dptr = cpu_to_le32(dma_addr);
 879			priv->rx_skb[q][entry] = skb;
 880		}
 881		/* Descriptor type must be set after all the above writes */
 882		dma_wmb();
 883		desc->die_dt = DT_FEMPTY;
 884	}
 885
 886	stats->rx_packets += rx_packets;
 887	*quota -= rx_packets;
 888	return *quota == 0;
 889}
 890
 891/* Packet receive function for Ethernet AVB */
 892static bool ravb_rx_rcar(struct net_device *ndev, int *quota, int q)
 893{
 894	struct ravb_private *priv = netdev_priv(ndev);
 895	const struct ravb_hw_info *info = priv->info;
 896	int entry = priv->cur_rx[q] % priv->num_rx_ring[q];
 897	int boguscnt = (priv->dirty_rx[q] + priv->num_rx_ring[q]) -
 898			priv->cur_rx[q];
 899	struct net_device_stats *stats = &priv->stats[q];
 900	struct ravb_ex_rx_desc *desc;
 901	struct sk_buff *skb;
 902	dma_addr_t dma_addr;
 903	struct timespec64 ts;
 904	u8  desc_status;
 905	u16 pkt_len;
 906	int limit;
 907
 908	boguscnt = min(boguscnt, *quota);
 909	limit = boguscnt;
 910	desc = &priv->rx_ring[q][entry];
 911	while (desc->die_dt != DT_FEMPTY) {
 912		/* Descriptor type must be checked before all other reads */
 913		dma_rmb();
 914		desc_status = desc->msc;
 915		pkt_len = le16_to_cpu(desc->ds_cc) & RX_DS;
 916
 917		if (--boguscnt < 0)
 918			break;
 919
 920		/* We use 0-byte descriptors to mark the DMA mapping errors */
 921		if (!pkt_len)
 922			continue;
 923
 924		if (desc_status & MSC_MC)
 925			stats->multicast++;
 926
 927		if (desc_status & (MSC_CRC | MSC_RFE | MSC_RTSF | MSC_RTLF |
 928				   MSC_CEEF)) {
 929			stats->rx_errors++;
 930			if (desc_status & MSC_CRC)
 931				stats->rx_crc_errors++;
 932			if (desc_status & MSC_RFE)
 933				stats->rx_frame_errors++;
 934			if (desc_status & (MSC_RTLF | MSC_RTSF))
 935				stats->rx_length_errors++;
 936			if (desc_status & MSC_CEEF)
 937				stats->rx_missed_errors++;
 938		} else {
 939			u32 get_ts = priv->tstamp_rx_ctrl & RAVB_RXTSTAMP_TYPE;
 940
 941			skb = priv->rx_skb[q][entry];
 942			priv->rx_skb[q][entry] = NULL;
 943			dma_unmap_single(ndev->dev.parent, le32_to_cpu(desc->dptr),
 944					 RX_BUF_SZ,
 945					 DMA_FROM_DEVICE);
 946			get_ts &= (q == RAVB_NC) ?
 947					RAVB_RXTSTAMP_TYPE_V2_L2_EVENT :
 948					~RAVB_RXTSTAMP_TYPE_V2_L2_EVENT;
 949			if (get_ts) {
 950				struct skb_shared_hwtstamps *shhwtstamps;
 951
 952				shhwtstamps = skb_hwtstamps(skb);
 953				memset(shhwtstamps, 0, sizeof(*shhwtstamps));
 954				ts.tv_sec = ((u64) le16_to_cpu(desc->ts_sh) <<
 955					     32) | le32_to_cpu(desc->ts_sl);
 956				ts.tv_nsec = le32_to_cpu(desc->ts_n);
 957				shhwtstamps->hwtstamp = timespec64_to_ktime(ts);
 958			}
 959
 960			skb_put(skb, pkt_len);
 961			skb->protocol = eth_type_trans(skb, ndev);
 962			if (ndev->features & NETIF_F_RXCSUM)
 963				ravb_rx_csum(skb);
 964			napi_gro_receive(&priv->napi[q], skb);
 965			stats->rx_packets++;
 966			stats->rx_bytes += pkt_len;
 967		}
 968
 969		entry = (++priv->cur_rx[q]) % priv->num_rx_ring[q];
 970		desc = &priv->rx_ring[q][entry];
 971	}
 972
 973	/* Refill the RX ring buffers. */
 974	for (; priv->cur_rx[q] - priv->dirty_rx[q] > 0; priv->dirty_rx[q]++) {
 975		entry = priv->dirty_rx[q] % priv->num_rx_ring[q];
 976		desc = &priv->rx_ring[q][entry];
 977		desc->ds_cc = cpu_to_le16(RX_BUF_SZ);
 978
 979		if (!priv->rx_skb[q][entry]) {
 980			skb = netdev_alloc_skb(ndev, info->max_rx_len);
 981			if (!skb)
 982				break;	/* Better luck next round. */
 983			ravb_set_buffer_align(skb);
 984			dma_addr = dma_map_single(ndev->dev.parent, skb->data,
 985						  le16_to_cpu(desc->ds_cc),
 986						  DMA_FROM_DEVICE);
 987			skb_checksum_none_assert(skb);
 988			/* We just set the data size to 0 for a failed mapping
 989			 * which should prevent DMA  from happening...
 990			 */
 991			if (dma_mapping_error(ndev->dev.parent, dma_addr))
 992				desc->ds_cc = cpu_to_le16(0);
 993			desc->dptr = cpu_to_le32(dma_addr);
 994			priv->rx_skb[q][entry] = skb;
 995		}
 996		/* Descriptor type must be set after all the above writes */
 997		dma_wmb();
 998		desc->die_dt = DT_FEMPTY;
 999	}
1000
1001	*quota -= limit - (++boguscnt);
1002
1003	return boguscnt <= 0;
1004}
1005
1006/* Packet receive function for Ethernet AVB */
1007static bool ravb_rx(struct net_device *ndev, int *quota, int q)
1008{
1009	struct ravb_private *priv = netdev_priv(ndev);
1010	const struct ravb_hw_info *info = priv->info;
1011
1012	return info->receive(ndev, quota, q);
1013}
1014
1015static void ravb_rcv_snd_disable(struct net_device *ndev)
1016{
1017	/* Disable TX and RX */
1018	ravb_modify(ndev, ECMR, ECMR_RE | ECMR_TE, 0);
1019}
1020
1021static void ravb_rcv_snd_enable(struct net_device *ndev)
1022{
1023	/* Enable TX and RX */
1024	ravb_modify(ndev, ECMR, ECMR_RE | ECMR_TE, ECMR_RE | ECMR_TE);
1025}
1026
1027/* function for waiting dma process finished */
1028static int ravb_stop_dma(struct net_device *ndev)
1029{
1030	struct ravb_private *priv = netdev_priv(ndev);
1031	const struct ravb_hw_info *info = priv->info;
1032	int error;
1033
1034	/* Wait for stopping the hardware TX process */
1035	error = ravb_wait(ndev, TCCR, info->tccr_mask, 0);
1036
1037	if (error)
1038		return error;
1039
1040	error = ravb_wait(ndev, CSR, CSR_TPO0 | CSR_TPO1 | CSR_TPO2 | CSR_TPO3,
1041			  0);
1042	if (error)
1043		return error;
1044
1045	/* Stop the E-MAC's RX/TX processes. */
1046	ravb_rcv_snd_disable(ndev);
1047
1048	/* Wait for stopping the RX DMA process */
1049	error = ravb_wait(ndev, CSR, CSR_RPO, 0);
1050	if (error)
1051		return error;
1052
1053	/* Stop AVB-DMAC process */
1054	return ravb_set_opmode(ndev, CCC_OPC_CONFIG);
1055}
1056
1057/* E-MAC interrupt handler */
1058static void ravb_emac_interrupt_unlocked(struct net_device *ndev)
1059{
1060	struct ravb_private *priv = netdev_priv(ndev);
1061	u32 ecsr, psr;
1062
1063	ecsr = ravb_read(ndev, ECSR);
1064	ravb_write(ndev, ecsr, ECSR);	/* clear interrupt */
1065
1066	if (ecsr & ECSR_MPD)
1067		pm_wakeup_event(&priv->pdev->dev, 0);
1068	if (ecsr & ECSR_ICD)
1069		ndev->stats.tx_carrier_errors++;
1070	if (ecsr & ECSR_LCHNG) {
1071		/* Link changed */
1072		if (priv->no_avb_link)
1073			return;
1074		psr = ravb_read(ndev, PSR);
1075		if (priv->avb_link_active_low)
1076			psr ^= PSR_LMON;
1077		if (!(psr & PSR_LMON)) {
1078			/* DIsable RX and TX */
1079			ravb_rcv_snd_disable(ndev);
1080		} else {
1081			/* Enable RX and TX */
1082			ravb_rcv_snd_enable(ndev);
1083		}
1084	}
1085}
1086
1087static irqreturn_t ravb_emac_interrupt(int irq, void *dev_id)
1088{
1089	struct net_device *ndev = dev_id;
1090	struct ravb_private *priv = netdev_priv(ndev);
1091
1092	spin_lock(&priv->lock);
1093	ravb_emac_interrupt_unlocked(ndev);
1094	spin_unlock(&priv->lock);
1095	return IRQ_HANDLED;
1096}
1097
1098/* Error interrupt handler */
1099static void ravb_error_interrupt(struct net_device *ndev)
1100{
1101	struct ravb_private *priv = netdev_priv(ndev);
1102	u32 eis, ris2;
1103
1104	eis = ravb_read(ndev, EIS);
1105	ravb_write(ndev, ~(EIS_QFS | EIS_RESERVED), EIS);
1106	if (eis & EIS_QFS) {
1107		ris2 = ravb_read(ndev, RIS2);
1108		ravb_write(ndev, ~(RIS2_QFF0 | RIS2_QFF1 | RIS2_RFFF | RIS2_RESERVED),
1109			   RIS2);
1110
1111		/* Receive Descriptor Empty int */
1112		if (ris2 & RIS2_QFF0)
1113			priv->stats[RAVB_BE].rx_over_errors++;
1114
1115		/* Receive Descriptor Empty int */
1116		if (ris2 & RIS2_QFF1)
1117			priv->stats[RAVB_NC].rx_over_errors++;
1118
1119		/* Receive FIFO Overflow int */
1120		if (ris2 & RIS2_RFFF)
1121			priv->rx_fifo_errors++;
1122	}
1123}
1124
1125static bool ravb_queue_interrupt(struct net_device *ndev, int q)
1126{
1127	struct ravb_private *priv = netdev_priv(ndev);
1128	const struct ravb_hw_info *info = priv->info;
1129	u32 ris0 = ravb_read(ndev, RIS0);
1130	u32 ric0 = ravb_read(ndev, RIC0);
1131	u32 tis  = ravb_read(ndev, TIS);
1132	u32 tic  = ravb_read(ndev, TIC);
1133
1134	if (((ris0 & ric0) & BIT(q)) || ((tis  & tic)  & BIT(q))) {
1135		if (napi_schedule_prep(&priv->napi[q])) {
1136			/* Mask RX and TX interrupts */
1137			if (!info->irq_en_dis) {
1138				ravb_write(ndev, ric0 & ~BIT(q), RIC0);
1139				ravb_write(ndev, tic & ~BIT(q), TIC);
1140			} else {
1141				ravb_write(ndev, BIT(q), RID0);
1142				ravb_write(ndev, BIT(q), TID);
1143			}
1144			__napi_schedule(&priv->napi[q]);
1145		} else {
1146			netdev_warn(ndev,
1147				    "ignoring interrupt, rx status 0x%08x, rx mask 0x%08x,\n",
1148				    ris0, ric0);
1149			netdev_warn(ndev,
1150				    "                    tx status 0x%08x, tx mask 0x%08x.\n",
1151				    tis, tic);
1152		}
1153		return true;
1154	}
1155	return false;
1156}
1157
1158static bool ravb_timestamp_interrupt(struct net_device *ndev)
1159{
1160	u32 tis = ravb_read(ndev, TIS);
1161
1162	if (tis & TIS_TFUF) {
1163		ravb_write(ndev, ~(TIS_TFUF | TIS_RESERVED), TIS);
1164		ravb_get_tx_tstamp(ndev);
1165		return true;
1166	}
1167	return false;
1168}
1169
1170static irqreturn_t ravb_interrupt(int irq, void *dev_id)
1171{
1172	struct net_device *ndev = dev_id;
1173	struct ravb_private *priv = netdev_priv(ndev);
1174	const struct ravb_hw_info *info = priv->info;
1175	irqreturn_t result = IRQ_NONE;
1176	u32 iss;
1177
1178	spin_lock(&priv->lock);
1179	/* Get interrupt status */
1180	iss = ravb_read(ndev, ISS);
1181
1182	/* Received and transmitted interrupts */
1183	if (iss & (ISS_FRS | ISS_FTS | ISS_TFUS)) {
1184		int q;
1185
1186		/* Timestamp updated */
1187		if (ravb_timestamp_interrupt(ndev))
1188			result = IRQ_HANDLED;
1189
1190		/* Network control and best effort queue RX/TX */
1191		if (info->nc_queues) {
1192			for (q = RAVB_NC; q >= RAVB_BE; q--) {
1193				if (ravb_queue_interrupt(ndev, q))
1194					result = IRQ_HANDLED;
1195			}
1196		} else {
1197			if (ravb_queue_interrupt(ndev, RAVB_BE))
1198				result = IRQ_HANDLED;
1199		}
1200	}
1201
1202	/* E-MAC status summary */
1203	if (iss & ISS_MS) {
1204		ravb_emac_interrupt_unlocked(ndev);
1205		result = IRQ_HANDLED;
1206	}
1207
1208	/* Error status summary */
1209	if (iss & ISS_ES) {
1210		ravb_error_interrupt(ndev);
1211		result = IRQ_HANDLED;
1212	}
1213
1214	/* gPTP interrupt status summary */
1215	if (iss & ISS_CGIS) {
1216		ravb_ptp_interrupt(ndev);
1217		result = IRQ_HANDLED;
1218	}
1219
1220	spin_unlock(&priv->lock);
1221	return result;
1222}
1223
1224/* Timestamp/Error/gPTP interrupt handler */
1225static irqreturn_t ravb_multi_interrupt(int irq, void *dev_id)
1226{
1227	struct net_device *ndev = dev_id;
1228	struct ravb_private *priv = netdev_priv(ndev);
1229	irqreturn_t result = IRQ_NONE;
1230	u32 iss;
1231
1232	spin_lock(&priv->lock);
1233	/* Get interrupt status */
1234	iss = ravb_read(ndev, ISS);
1235
1236	/* Timestamp updated */
1237	if ((iss & ISS_TFUS) && ravb_timestamp_interrupt(ndev))
1238		result = IRQ_HANDLED;
1239
1240	/* Error status summary */
1241	if (iss & ISS_ES) {
1242		ravb_error_interrupt(ndev);
1243		result = IRQ_HANDLED;
1244	}
1245
1246	/* gPTP interrupt status summary */
1247	if (iss & ISS_CGIS) {
1248		ravb_ptp_interrupt(ndev);
1249		result = IRQ_HANDLED;
1250	}
1251
1252	spin_unlock(&priv->lock);
1253	return result;
1254}
1255
1256static irqreturn_t ravb_dma_interrupt(int irq, void *dev_id, int q)
1257{
1258	struct net_device *ndev = dev_id;
1259	struct ravb_private *priv = netdev_priv(ndev);
1260	irqreturn_t result = IRQ_NONE;
1261
1262	spin_lock(&priv->lock);
1263
1264	/* Network control/Best effort queue RX/TX */
1265	if (ravb_queue_interrupt(ndev, q))
1266		result = IRQ_HANDLED;
1267
1268	spin_unlock(&priv->lock);
1269	return result;
1270}
1271
1272static irqreturn_t ravb_be_interrupt(int irq, void *dev_id)
1273{
1274	return ravb_dma_interrupt(irq, dev_id, RAVB_BE);
1275}
1276
1277static irqreturn_t ravb_nc_interrupt(int irq, void *dev_id)
1278{
1279	return ravb_dma_interrupt(irq, dev_id, RAVB_NC);
1280}
1281
1282static int ravb_poll(struct napi_struct *napi, int budget)
1283{
1284	struct net_device *ndev = napi->dev;
1285	struct ravb_private *priv = netdev_priv(ndev);
1286	const struct ravb_hw_info *info = priv->info;
1287	bool gptp = info->gptp || info->ccc_gac;
1288	struct ravb_rx_desc *desc;
1289	unsigned long flags;
1290	int q = napi - priv->napi;
1291	int mask = BIT(q);
1292	int quota = budget;
1293	unsigned int entry;
1294
1295	if (!gptp) {
1296		entry = priv->cur_rx[q] % priv->num_rx_ring[q];
1297		desc = &priv->gbeth_rx_ring[entry];
1298	}
1299	/* Processing RX Descriptor Ring */
1300	/* Clear RX interrupt */
1301	ravb_write(ndev, ~(mask | RIS0_RESERVED), RIS0);
1302	if (gptp || desc->die_dt != DT_FEMPTY) {
1303		if (ravb_rx(ndev, &quota, q))
1304			goto out;
1305	}
1306
1307	/* Processing TX Descriptor Ring */
1308	spin_lock_irqsave(&priv->lock, flags);
1309	/* Clear TX interrupt */
1310	ravb_write(ndev, ~(mask | TIS_RESERVED), TIS);
1311	ravb_tx_free(ndev, q, true);
1312	netif_wake_subqueue(ndev, q);
1313	spin_unlock_irqrestore(&priv->lock, flags);
1314
1315	napi_complete(napi);
1316
1317	/* Re-enable RX/TX interrupts */
1318	spin_lock_irqsave(&priv->lock, flags);
1319	if (!info->irq_en_dis) {
1320		ravb_modify(ndev, RIC0, mask, mask);
1321		ravb_modify(ndev, TIC,  mask, mask);
1322	} else {
1323		ravb_write(ndev, mask, RIE0);
1324		ravb_write(ndev, mask, TIE);
1325	}
1326	spin_unlock_irqrestore(&priv->lock, flags);
1327
1328	/* Receive error message handling */
1329	priv->rx_over_errors =  priv->stats[RAVB_BE].rx_over_errors;
1330	if (info->nc_queues)
1331		priv->rx_over_errors += priv->stats[RAVB_NC].rx_over_errors;
1332	if (priv->rx_over_errors != ndev->stats.rx_over_errors)
1333		ndev->stats.rx_over_errors = priv->rx_over_errors;
1334	if (priv->rx_fifo_errors != ndev->stats.rx_fifo_errors)
1335		ndev->stats.rx_fifo_errors = priv->rx_fifo_errors;
1336out:
1337	return budget - quota;
1338}
1339
1340static void ravb_set_duplex_gbeth(struct net_device *ndev)
1341{
1342	struct ravb_private *priv = netdev_priv(ndev);
1343
1344	ravb_modify(ndev, ECMR, ECMR_DM, priv->duplex > 0 ? ECMR_DM : 0);
1345}
1346
1347/* PHY state control function */
1348static void ravb_adjust_link(struct net_device *ndev)
1349{
1350	struct ravb_private *priv = netdev_priv(ndev);
1351	const struct ravb_hw_info *info = priv->info;
1352	struct phy_device *phydev = ndev->phydev;
1353	bool new_state = false;
1354	unsigned long flags;
1355
1356	spin_lock_irqsave(&priv->lock, flags);
1357
1358	/* Disable TX and RX right over here, if E-MAC change is ignored */
1359	if (priv->no_avb_link)
1360		ravb_rcv_snd_disable(ndev);
1361
1362	if (phydev->link) {
1363		if (info->half_duplex && phydev->duplex != priv->duplex) {
1364			new_state = true;
1365			priv->duplex = phydev->duplex;
1366			ravb_set_duplex_gbeth(ndev);
1367		}
1368
1369		if (phydev->speed != priv->speed) {
1370			new_state = true;
1371			priv->speed = phydev->speed;
1372			info->set_rate(ndev);
1373		}
1374		if (!priv->link) {
1375			ravb_modify(ndev, ECMR, ECMR_TXF, 0);
1376			new_state = true;
1377			priv->link = phydev->link;
1378		}
1379	} else if (priv->link) {
1380		new_state = true;
1381		priv->link = 0;
1382		priv->speed = 0;
1383		if (info->half_duplex)
1384			priv->duplex = -1;
1385	}
1386
1387	/* Enable TX and RX right over here, if E-MAC change is ignored */
1388	if (priv->no_avb_link && phydev->link)
1389		ravb_rcv_snd_enable(ndev);
1390
1391	spin_unlock_irqrestore(&priv->lock, flags);
1392
1393	if (new_state && netif_msg_link(priv))
1394		phy_print_status(phydev);
1395}
1396
 
 
 
 
 
1397/* PHY init function */
1398static int ravb_phy_init(struct net_device *ndev)
1399{
1400	struct device_node *np = ndev->dev.parent->of_node;
1401	struct ravb_private *priv = netdev_priv(ndev);
1402	const struct ravb_hw_info *info = priv->info;
1403	struct phy_device *phydev;
1404	struct device_node *pn;
1405	phy_interface_t iface;
1406	int err;
1407
1408	priv->link = 0;
1409	priv->speed = 0;
1410	priv->duplex = -1;
1411
1412	/* Try connecting to PHY */
1413	pn = of_parse_phandle(np, "phy-handle", 0);
1414	if (!pn) {
1415		/* In the case of a fixed PHY, the DT node associated
1416		 * to the PHY is the Ethernet MAC DT node.
1417		 */
1418		if (of_phy_is_fixed_link(np)) {
1419			err = of_phy_register_fixed_link(np);
1420			if (err)
1421				return err;
1422		}
1423		pn = of_node_get(np);
1424	}
1425
1426	iface = priv->rgmii_override ? PHY_INTERFACE_MODE_RGMII
1427				     : priv->phy_interface;
1428	phydev = of_phy_connect(ndev, pn, ravb_adjust_link, 0, iface);
1429	of_node_put(pn);
1430	if (!phydev) {
1431		netdev_err(ndev, "failed to connect PHY\n");
1432		err = -ENOENT;
1433		goto err_deregister_fixed_link;
1434	}
1435
 
 
 
 
 
 
 
 
 
1436	if (!info->half_duplex) {
1437		/* 10BASE, Pause and Asym Pause is not supported */
1438		phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_10baseT_Half_BIT);
1439		phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_10baseT_Full_BIT);
1440		phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_Pause_BIT);
1441		phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_Asym_Pause_BIT);
1442
1443		/* Half Duplex is not supported */
1444		phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_1000baseT_Half_BIT);
1445		phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_100baseT_Half_BIT);
1446	}
1447
 
 
1448	phy_attached_info(phydev);
1449
1450	return 0;
1451
1452err_deregister_fixed_link:
1453	if (of_phy_is_fixed_link(np))
1454		of_phy_deregister_fixed_link(np);
1455
1456	return err;
1457}
1458
1459/* PHY control start function */
1460static int ravb_phy_start(struct net_device *ndev)
1461{
1462	int error;
1463
1464	error = ravb_phy_init(ndev);
1465	if (error)
1466		return error;
1467
1468	phy_start(ndev->phydev);
1469
1470	return 0;
1471}
1472
1473static u32 ravb_get_msglevel(struct net_device *ndev)
1474{
1475	struct ravb_private *priv = netdev_priv(ndev);
1476
1477	return priv->msg_enable;
1478}
1479
1480static void ravb_set_msglevel(struct net_device *ndev, u32 value)
1481{
1482	struct ravb_private *priv = netdev_priv(ndev);
1483
1484	priv->msg_enable = value;
1485}
1486
1487static const char ravb_gstrings_stats_gbeth[][ETH_GSTRING_LEN] = {
1488	"rx_queue_0_current",
1489	"tx_queue_0_current",
1490	"rx_queue_0_dirty",
1491	"tx_queue_0_dirty",
1492	"rx_queue_0_packets",
1493	"tx_queue_0_packets",
1494	"rx_queue_0_bytes",
1495	"tx_queue_0_bytes",
1496	"rx_queue_0_mcast_packets",
1497	"rx_queue_0_errors",
1498	"rx_queue_0_crc_errors",
1499	"rx_queue_0_frame_errors",
1500	"rx_queue_0_length_errors",
1501	"rx_queue_0_csum_offload_errors",
1502	"rx_queue_0_over_errors",
1503};
1504
1505static const char ravb_gstrings_stats[][ETH_GSTRING_LEN] = {
1506	"rx_queue_0_current",
1507	"tx_queue_0_current",
1508	"rx_queue_0_dirty",
1509	"tx_queue_0_dirty",
1510	"rx_queue_0_packets",
1511	"tx_queue_0_packets",
1512	"rx_queue_0_bytes",
1513	"tx_queue_0_bytes",
1514	"rx_queue_0_mcast_packets",
1515	"rx_queue_0_errors",
1516	"rx_queue_0_crc_errors",
1517	"rx_queue_0_frame_errors",
1518	"rx_queue_0_length_errors",
1519	"rx_queue_0_missed_errors",
1520	"rx_queue_0_over_errors",
1521
1522	"rx_queue_1_current",
1523	"tx_queue_1_current",
1524	"rx_queue_1_dirty",
1525	"tx_queue_1_dirty",
1526	"rx_queue_1_packets",
1527	"tx_queue_1_packets",
1528	"rx_queue_1_bytes",
1529	"tx_queue_1_bytes",
1530	"rx_queue_1_mcast_packets",
1531	"rx_queue_1_errors",
1532	"rx_queue_1_crc_errors",
1533	"rx_queue_1_frame_errors",
1534	"rx_queue_1_length_errors",
1535	"rx_queue_1_missed_errors",
1536	"rx_queue_1_over_errors",
1537};
1538
1539static int ravb_get_sset_count(struct net_device *netdev, int sset)
1540{
1541	struct ravb_private *priv = netdev_priv(netdev);
1542	const struct ravb_hw_info *info = priv->info;
1543
1544	switch (sset) {
1545	case ETH_SS_STATS:
1546		return info->stats_len;
1547	default:
1548		return -EOPNOTSUPP;
1549	}
1550}
1551
1552static void ravb_get_ethtool_stats(struct net_device *ndev,
1553				   struct ethtool_stats *estats, u64 *data)
1554{
1555	struct ravb_private *priv = netdev_priv(ndev);
1556	const struct ravb_hw_info *info = priv->info;
1557	int num_rx_q;
1558	int i = 0;
1559	int q;
1560
1561	num_rx_q = info->nc_queues ? NUM_RX_QUEUE : 1;
1562	/* Device-specific stats */
1563	for (q = RAVB_BE; q < num_rx_q; q++) {
1564		struct net_device_stats *stats = &priv->stats[q];
1565
1566		data[i++] = priv->cur_rx[q];
1567		data[i++] = priv->cur_tx[q];
1568		data[i++] = priv->dirty_rx[q];
1569		data[i++] = priv->dirty_tx[q];
1570		data[i++] = stats->rx_packets;
1571		data[i++] = stats->tx_packets;
1572		data[i++] = stats->rx_bytes;
1573		data[i++] = stats->tx_bytes;
1574		data[i++] = stats->multicast;
1575		data[i++] = stats->rx_errors;
1576		data[i++] = stats->rx_crc_errors;
1577		data[i++] = stats->rx_frame_errors;
1578		data[i++] = stats->rx_length_errors;
1579		data[i++] = stats->rx_missed_errors;
1580		data[i++] = stats->rx_over_errors;
1581	}
1582}
1583
1584static void ravb_get_strings(struct net_device *ndev, u32 stringset, u8 *data)
1585{
1586	struct ravb_private *priv = netdev_priv(ndev);
1587	const struct ravb_hw_info *info = priv->info;
1588
1589	switch (stringset) {
1590	case ETH_SS_STATS:
1591		memcpy(data, info->gstrings_stats, info->gstrings_size);
1592		break;
1593	}
1594}
1595
1596static void ravb_get_ringparam(struct net_device *ndev,
1597			       struct ethtool_ringparam *ring,
1598			       struct kernel_ethtool_ringparam *kernel_ring,
1599			       struct netlink_ext_ack *extack)
1600{
1601	struct ravb_private *priv = netdev_priv(ndev);
1602
1603	ring->rx_max_pending = BE_RX_RING_MAX;
1604	ring->tx_max_pending = BE_TX_RING_MAX;
1605	ring->rx_pending = priv->num_rx_ring[RAVB_BE];
1606	ring->tx_pending = priv->num_tx_ring[RAVB_BE];
1607}
1608
1609static int ravb_set_ringparam(struct net_device *ndev,
1610			      struct ethtool_ringparam *ring,
1611			      struct kernel_ethtool_ringparam *kernel_ring,
1612			      struct netlink_ext_ack *extack)
1613{
1614	struct ravb_private *priv = netdev_priv(ndev);
1615	const struct ravb_hw_info *info = priv->info;
1616	int error;
1617
1618	if (ring->tx_pending > BE_TX_RING_MAX ||
1619	    ring->rx_pending > BE_RX_RING_MAX ||
1620	    ring->tx_pending < BE_TX_RING_MIN ||
1621	    ring->rx_pending < BE_RX_RING_MIN)
1622		return -EINVAL;
1623	if (ring->rx_mini_pending || ring->rx_jumbo_pending)
1624		return -EINVAL;
1625
1626	if (netif_running(ndev)) {
1627		netif_device_detach(ndev);
1628		/* Stop PTP Clock driver */
1629		if (info->gptp)
1630			ravb_ptp_stop(ndev);
1631		/* Wait for DMA stopping */
1632		error = ravb_stop_dma(ndev);
1633		if (error) {
1634			netdev_err(ndev,
1635				   "cannot set ringparam! Any AVB processes are still running?\n");
1636			return error;
1637		}
1638		synchronize_irq(ndev->irq);
1639
1640		/* Free all the skb's in the RX queue and the DMA buffers. */
1641		ravb_ring_free(ndev, RAVB_BE);
1642		if (info->nc_queues)
1643			ravb_ring_free(ndev, RAVB_NC);
1644	}
1645
1646	/* Set new parameters */
1647	priv->num_rx_ring[RAVB_BE] = ring->rx_pending;
1648	priv->num_tx_ring[RAVB_BE] = ring->tx_pending;
1649
1650	if (netif_running(ndev)) {
1651		error = ravb_dmac_init(ndev);
1652		if (error) {
1653			netdev_err(ndev,
1654				   "%s: ravb_dmac_init() failed, error %d\n",
1655				   __func__, error);
1656			return error;
1657		}
1658
1659		ravb_emac_init(ndev);
1660
1661		/* Initialise PTP Clock driver */
1662		if (info->gptp)
1663			ravb_ptp_init(ndev, priv->pdev);
1664
1665		netif_device_attach(ndev);
1666	}
1667
1668	return 0;
1669}
1670
1671static int ravb_get_ts_info(struct net_device *ndev,
1672			    struct ethtool_ts_info *info)
1673{
1674	struct ravb_private *priv = netdev_priv(ndev);
1675	const struct ravb_hw_info *hw_info = priv->info;
1676
1677	info->so_timestamping =
1678		SOF_TIMESTAMPING_TX_SOFTWARE |
1679		SOF_TIMESTAMPING_RX_SOFTWARE |
1680		SOF_TIMESTAMPING_SOFTWARE |
1681		SOF_TIMESTAMPING_TX_HARDWARE |
1682		SOF_TIMESTAMPING_RX_HARDWARE |
1683		SOF_TIMESTAMPING_RAW_HARDWARE;
1684	info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON);
1685	info->rx_filters =
1686		(1 << HWTSTAMP_FILTER_NONE) |
1687		(1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT) |
1688		(1 << HWTSTAMP_FILTER_ALL);
1689	if (hw_info->gptp || hw_info->ccc_gac)
1690		info->phc_index = ptp_clock_index(priv->ptp.clock);
1691
1692	return 0;
1693}
1694
1695static void ravb_get_wol(struct net_device *ndev, struct ethtool_wolinfo *wol)
1696{
1697	struct ravb_private *priv = netdev_priv(ndev);
1698
1699	wol->supported = WAKE_MAGIC;
1700	wol->wolopts = priv->wol_enabled ? WAKE_MAGIC : 0;
1701}
1702
1703static int ravb_set_wol(struct net_device *ndev, struct ethtool_wolinfo *wol)
1704{
1705	struct ravb_private *priv = netdev_priv(ndev);
1706	const struct ravb_hw_info *info = priv->info;
1707
1708	if (!info->magic_pkt || (wol->wolopts & ~WAKE_MAGIC))
1709		return -EOPNOTSUPP;
1710
1711	priv->wol_enabled = !!(wol->wolopts & WAKE_MAGIC);
1712
1713	device_set_wakeup_enable(&priv->pdev->dev, priv->wol_enabled);
1714
1715	return 0;
1716}
1717
1718static const struct ethtool_ops ravb_ethtool_ops = {
1719	.nway_reset		= phy_ethtool_nway_reset,
1720	.get_msglevel		= ravb_get_msglevel,
1721	.set_msglevel		= ravb_set_msglevel,
1722	.get_link		= ethtool_op_get_link,
1723	.get_strings		= ravb_get_strings,
1724	.get_ethtool_stats	= ravb_get_ethtool_stats,
1725	.get_sset_count		= ravb_get_sset_count,
1726	.get_ringparam		= ravb_get_ringparam,
1727	.set_ringparam		= ravb_set_ringparam,
1728	.get_ts_info		= ravb_get_ts_info,
1729	.get_link_ksettings	= phy_ethtool_get_link_ksettings,
1730	.set_link_ksettings	= phy_ethtool_set_link_ksettings,
1731	.get_wol		= ravb_get_wol,
1732	.set_wol		= ravb_set_wol,
1733};
1734
1735static inline int ravb_hook_irq(unsigned int irq, irq_handler_t handler,
1736				struct net_device *ndev, struct device *dev,
1737				const char *ch)
1738{
1739	char *name;
1740	int error;
1741
1742	name = devm_kasprintf(dev, GFP_KERNEL, "%s:%s", ndev->name, ch);
1743	if (!name)
1744		return -ENOMEM;
1745	error = request_irq(irq, handler, 0, name, ndev);
1746	if (error)
1747		netdev_err(ndev, "cannot request IRQ %s\n", name);
1748
1749	return error;
1750}
1751
1752/* Network device open function for Ethernet AVB */
1753static int ravb_open(struct net_device *ndev)
1754{
1755	struct ravb_private *priv = netdev_priv(ndev);
1756	const struct ravb_hw_info *info = priv->info;
1757	struct platform_device *pdev = priv->pdev;
1758	struct device *dev = &pdev->dev;
1759	int error;
1760
1761	napi_enable(&priv->napi[RAVB_BE]);
1762	if (info->nc_queues)
1763		napi_enable(&priv->napi[RAVB_NC]);
1764
1765	if (!info->multi_irqs) {
1766		error = request_irq(ndev->irq, ravb_interrupt, IRQF_SHARED,
1767				    ndev->name, ndev);
1768		if (error) {
1769			netdev_err(ndev, "cannot request IRQ\n");
1770			goto out_napi_off;
1771		}
1772	} else {
1773		error = ravb_hook_irq(ndev->irq, ravb_multi_interrupt, ndev,
1774				      dev, "ch22:multi");
1775		if (error)
1776			goto out_napi_off;
1777		error = ravb_hook_irq(priv->emac_irq, ravb_emac_interrupt, ndev,
1778				      dev, "ch24:emac");
1779		if (error)
1780			goto out_free_irq;
1781		error = ravb_hook_irq(priv->rx_irqs[RAVB_BE], ravb_be_interrupt,
1782				      ndev, dev, "ch0:rx_be");
1783		if (error)
1784			goto out_free_irq_emac;
1785		error = ravb_hook_irq(priv->tx_irqs[RAVB_BE], ravb_be_interrupt,
1786				      ndev, dev, "ch18:tx_be");
1787		if (error)
1788			goto out_free_irq_be_rx;
1789		error = ravb_hook_irq(priv->rx_irqs[RAVB_NC], ravb_nc_interrupt,
1790				      ndev, dev, "ch1:rx_nc");
1791		if (error)
1792			goto out_free_irq_be_tx;
1793		error = ravb_hook_irq(priv->tx_irqs[RAVB_NC], ravb_nc_interrupt,
1794				      ndev, dev, "ch19:tx_nc");
1795		if (error)
1796			goto out_free_irq_nc_rx;
1797
1798		if (info->err_mgmt_irqs) {
1799			error = ravb_hook_irq(priv->erra_irq, ravb_multi_interrupt,
1800					      ndev, dev, "err_a");
1801			if (error)
1802				goto out_free_irq_nc_tx;
1803			error = ravb_hook_irq(priv->mgmta_irq, ravb_multi_interrupt,
1804					      ndev, dev, "mgmt_a");
1805			if (error)
1806				goto out_free_irq_erra;
1807		}
1808	}
1809
1810	/* Device init */
1811	error = ravb_dmac_init(ndev);
1812	if (error)
1813		goto out_free_irq_mgmta;
1814	ravb_emac_init(ndev);
1815
1816	/* Initialise PTP Clock driver */
1817	if (info->gptp)
1818		ravb_ptp_init(ndev, priv->pdev);
1819
 
 
1820	/* PHY control start */
1821	error = ravb_phy_start(ndev);
1822	if (error)
1823		goto out_ptp_stop;
1824
1825	netif_tx_start_all_queues(ndev);
1826
1827	return 0;
1828
1829out_ptp_stop:
1830	/* Stop PTP Clock driver */
1831	if (info->gptp)
1832		ravb_ptp_stop(ndev);
1833	ravb_stop_dma(ndev);
1834out_free_irq_mgmta:
1835	if (!info->multi_irqs)
1836		goto out_free_irq;
1837	if (info->err_mgmt_irqs)
1838		free_irq(priv->mgmta_irq, ndev);
1839out_free_irq_erra:
1840	if (info->err_mgmt_irqs)
1841		free_irq(priv->erra_irq, ndev);
1842out_free_irq_nc_tx:
1843	free_irq(priv->tx_irqs[RAVB_NC], ndev);
1844out_free_irq_nc_rx:
1845	free_irq(priv->rx_irqs[RAVB_NC], ndev);
1846out_free_irq_be_tx:
1847	free_irq(priv->tx_irqs[RAVB_BE], ndev);
1848out_free_irq_be_rx:
1849	free_irq(priv->rx_irqs[RAVB_BE], ndev);
1850out_free_irq_emac:
1851	free_irq(priv->emac_irq, ndev);
1852out_free_irq:
1853	free_irq(ndev->irq, ndev);
1854out_napi_off:
1855	if (info->nc_queues)
1856		napi_disable(&priv->napi[RAVB_NC]);
1857	napi_disable(&priv->napi[RAVB_BE]);
1858	return error;
1859}
1860
1861/* Timeout function for Ethernet AVB */
1862static void ravb_tx_timeout(struct net_device *ndev, unsigned int txqueue)
1863{
1864	struct ravb_private *priv = netdev_priv(ndev);
1865
1866	netif_err(priv, tx_err, ndev,
1867		  "transmit timed out, status %08x, resetting...\n",
1868		  ravb_read(ndev, ISS));
1869
1870	/* tx_errors count up */
1871	ndev->stats.tx_errors++;
1872
1873	schedule_work(&priv->work);
1874}
1875
1876static void ravb_tx_timeout_work(struct work_struct *work)
1877{
1878	struct ravb_private *priv = container_of(work, struct ravb_private,
1879						 work);
1880	const struct ravb_hw_info *info = priv->info;
1881	struct net_device *ndev = priv->ndev;
1882	int error;
1883
1884	if (!rtnl_trylock()) {
1885		usleep_range(1000, 2000);
1886		schedule_work(&priv->work);
1887		return;
1888	}
1889
1890	netif_tx_stop_all_queues(ndev);
1891
1892	/* Stop PTP Clock driver */
1893	if (info->gptp)
1894		ravb_ptp_stop(ndev);
1895
1896	/* Wait for DMA stopping */
1897	if (ravb_stop_dma(ndev)) {
1898		/* If ravb_stop_dma() fails, the hardware is still operating
1899		 * for TX and/or RX. So, this should not call the following
1900		 * functions because ravb_dmac_init() is possible to fail too.
1901		 * Also, this should not retry ravb_stop_dma() again and again
1902		 * here because it's possible to wait forever. So, this just
1903		 * re-enables the TX and RX and skip the following
1904		 * re-initialization procedure.
1905		 */
1906		ravb_rcv_snd_enable(ndev);
1907		goto out;
1908	}
1909
1910	ravb_ring_free(ndev, RAVB_BE);
1911	if (info->nc_queues)
1912		ravb_ring_free(ndev, RAVB_NC);
1913
1914	/* Device init */
1915	error = ravb_dmac_init(ndev);
1916	if (error) {
1917		/* If ravb_dmac_init() fails, descriptors are freed. So, this
1918		 * should return here to avoid re-enabling the TX and RX in
1919		 * ravb_emac_init().
1920		 */
1921		netdev_err(ndev, "%s: ravb_dmac_init() failed, error %d\n",
1922			   __func__, error);
1923		goto out_unlock;
1924	}
1925	ravb_emac_init(ndev);
1926
1927out:
1928	/* Initialise PTP Clock driver */
1929	if (info->gptp)
1930		ravb_ptp_init(ndev, priv->pdev);
1931
1932	netif_tx_start_all_queues(ndev);
1933
1934out_unlock:
1935	rtnl_unlock();
1936}
1937
1938/* Packet transmit function for Ethernet AVB */
1939static netdev_tx_t ravb_start_xmit(struct sk_buff *skb, struct net_device *ndev)
1940{
1941	struct ravb_private *priv = netdev_priv(ndev);
1942	const struct ravb_hw_info *info = priv->info;
1943	unsigned int num_tx_desc = priv->num_tx_desc;
1944	u16 q = skb_get_queue_mapping(skb);
1945	struct ravb_tstamp_skb *ts_skb;
1946	struct ravb_tx_desc *desc;
1947	unsigned long flags;
1948	dma_addr_t dma_addr;
1949	void *buffer;
1950	u32 entry;
1951	u32 len;
1952
1953	spin_lock_irqsave(&priv->lock, flags);
1954	if (priv->cur_tx[q] - priv->dirty_tx[q] > (priv->num_tx_ring[q] - 1) *
1955	    num_tx_desc) {
1956		netif_err(priv, tx_queued, ndev,
1957			  "still transmitting with the full ring!\n");
1958		netif_stop_subqueue(ndev, q);
1959		spin_unlock_irqrestore(&priv->lock, flags);
1960		return NETDEV_TX_BUSY;
1961	}
1962
1963	if (skb_put_padto(skb, ETH_ZLEN))
1964		goto exit;
1965
1966	entry = priv->cur_tx[q] % (priv->num_tx_ring[q] * num_tx_desc);
1967	priv->tx_skb[q][entry / num_tx_desc] = skb;
1968
1969	if (num_tx_desc > 1) {
1970		buffer = PTR_ALIGN(priv->tx_align[q], DPTR_ALIGN) +
1971			 entry / num_tx_desc * DPTR_ALIGN;
1972		len = PTR_ALIGN(skb->data, DPTR_ALIGN) - skb->data;
1973
1974		/* Zero length DMA descriptors are problematic as they seem
1975		 * to terminate DMA transfers. Avoid them by simply using a
1976		 * length of DPTR_ALIGN (4) when skb data is aligned to
1977		 * DPTR_ALIGN.
1978		 *
1979		 * As skb is guaranteed to have at least ETH_ZLEN (60)
1980		 * bytes of data by the call to skb_put_padto() above this
1981		 * is safe with respect to both the length of the first DMA
1982		 * descriptor (len) overflowing the available data and the
1983		 * length of the second DMA descriptor (skb->len - len)
1984		 * being negative.
1985		 */
1986		if (len == 0)
1987			len = DPTR_ALIGN;
1988
1989		memcpy(buffer, skb->data, len);
1990		dma_addr = dma_map_single(ndev->dev.parent, buffer, len,
1991					  DMA_TO_DEVICE);
1992		if (dma_mapping_error(ndev->dev.parent, dma_addr))
1993			goto drop;
1994
1995		desc = &priv->tx_ring[q][entry];
1996		desc->ds_tagl = cpu_to_le16(len);
1997		desc->dptr = cpu_to_le32(dma_addr);
1998
1999		buffer = skb->data + len;
2000		len = skb->len - len;
2001		dma_addr = dma_map_single(ndev->dev.parent, buffer, len,
2002					  DMA_TO_DEVICE);
2003		if (dma_mapping_error(ndev->dev.parent, dma_addr))
2004			goto unmap;
2005
2006		desc++;
2007	} else {
2008		desc = &priv->tx_ring[q][entry];
2009		len = skb->len;
2010		dma_addr = dma_map_single(ndev->dev.parent, skb->data, skb->len,
2011					  DMA_TO_DEVICE);
2012		if (dma_mapping_error(ndev->dev.parent, dma_addr))
2013			goto drop;
2014	}
2015	desc->ds_tagl = cpu_to_le16(len);
2016	desc->dptr = cpu_to_le32(dma_addr);
2017
2018	/* TX timestamp required */
2019	if (info->gptp || info->ccc_gac) {
2020		if (q == RAVB_NC) {
2021			ts_skb = kmalloc(sizeof(*ts_skb), GFP_ATOMIC);
2022			if (!ts_skb) {
2023				if (num_tx_desc > 1) {
2024					desc--;
2025					dma_unmap_single(ndev->dev.parent, dma_addr,
2026							 len, DMA_TO_DEVICE);
2027				}
2028				goto unmap;
2029			}
2030			ts_skb->skb = skb_get(skb);
2031			ts_skb->tag = priv->ts_skb_tag++;
2032			priv->ts_skb_tag &= 0x3ff;
2033			list_add_tail(&ts_skb->list, &priv->ts_skb_list);
2034
2035			/* TAG and timestamp required flag */
2036			skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
2037			desc->tagh_tsr = (ts_skb->tag >> 4) | TX_TSR;
2038			desc->ds_tagl |= cpu_to_le16(ts_skb->tag << 12);
2039		}
2040
2041		skb_tx_timestamp(skb);
2042	}
2043	/* Descriptor type must be set after all the above writes */
2044	dma_wmb();
2045	if (num_tx_desc > 1) {
2046		desc->die_dt = DT_FEND;
2047		desc--;
2048		desc->die_dt = DT_FSTART;
2049	} else {
2050		desc->die_dt = DT_FSINGLE;
2051	}
2052	ravb_modify(ndev, TCCR, TCCR_TSRQ0 << q, TCCR_TSRQ0 << q);
2053
2054	priv->cur_tx[q] += num_tx_desc;
2055	if (priv->cur_tx[q] - priv->dirty_tx[q] >
2056	    (priv->num_tx_ring[q] - 1) * num_tx_desc &&
2057	    !ravb_tx_free(ndev, q, true))
2058		netif_stop_subqueue(ndev, q);
2059
2060exit:
2061	spin_unlock_irqrestore(&priv->lock, flags);
2062	return NETDEV_TX_OK;
2063
2064unmap:
2065	dma_unmap_single(ndev->dev.parent, le32_to_cpu(desc->dptr),
2066			 le16_to_cpu(desc->ds_tagl), DMA_TO_DEVICE);
2067drop:
2068	dev_kfree_skb_any(skb);
2069	priv->tx_skb[q][entry / num_tx_desc] = NULL;
2070	goto exit;
2071}
2072
2073static u16 ravb_select_queue(struct net_device *ndev, struct sk_buff *skb,
2074			     struct net_device *sb_dev)
2075{
2076	/* If skb needs TX timestamp, it is handled in network control queue */
2077	return (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) ? RAVB_NC :
2078							       RAVB_BE;
2079
2080}
2081
2082static struct net_device_stats *ravb_get_stats(struct net_device *ndev)
2083{
2084	struct ravb_private *priv = netdev_priv(ndev);
2085	const struct ravb_hw_info *info = priv->info;
2086	struct net_device_stats *nstats, *stats0, *stats1;
2087
2088	nstats = &ndev->stats;
2089	stats0 = &priv->stats[RAVB_BE];
2090
2091	if (info->tx_counters) {
2092		nstats->tx_dropped += ravb_read(ndev, TROCR);
2093		ravb_write(ndev, 0, TROCR);	/* (write clear) */
2094	}
2095
2096	if (info->carrier_counters) {
2097		nstats->collisions += ravb_read(ndev, CXR41);
2098		ravb_write(ndev, 0, CXR41);	/* (write clear) */
2099		nstats->tx_carrier_errors += ravb_read(ndev, CXR42);
2100		ravb_write(ndev, 0, CXR42);	/* (write clear) */
2101	}
2102
2103	nstats->rx_packets = stats0->rx_packets;
2104	nstats->tx_packets = stats0->tx_packets;
2105	nstats->rx_bytes = stats0->rx_bytes;
2106	nstats->tx_bytes = stats0->tx_bytes;
2107	nstats->multicast = stats0->multicast;
2108	nstats->rx_errors = stats0->rx_errors;
2109	nstats->rx_crc_errors = stats0->rx_crc_errors;
2110	nstats->rx_frame_errors = stats0->rx_frame_errors;
2111	nstats->rx_length_errors = stats0->rx_length_errors;
2112	nstats->rx_missed_errors = stats0->rx_missed_errors;
2113	nstats->rx_over_errors = stats0->rx_over_errors;
2114	if (info->nc_queues) {
2115		stats1 = &priv->stats[RAVB_NC];
2116
2117		nstats->rx_packets += stats1->rx_packets;
2118		nstats->tx_packets += stats1->tx_packets;
2119		nstats->rx_bytes += stats1->rx_bytes;
2120		nstats->tx_bytes += stats1->tx_bytes;
2121		nstats->multicast += stats1->multicast;
2122		nstats->rx_errors += stats1->rx_errors;
2123		nstats->rx_crc_errors += stats1->rx_crc_errors;
2124		nstats->rx_frame_errors += stats1->rx_frame_errors;
2125		nstats->rx_length_errors += stats1->rx_length_errors;
2126		nstats->rx_missed_errors += stats1->rx_missed_errors;
2127		nstats->rx_over_errors += stats1->rx_over_errors;
2128	}
2129
2130	return nstats;
2131}
2132
2133/* Update promiscuous bit */
2134static void ravb_set_rx_mode(struct net_device *ndev)
2135{
2136	struct ravb_private *priv = netdev_priv(ndev);
2137	unsigned long flags;
2138
2139	spin_lock_irqsave(&priv->lock, flags);
2140	ravb_modify(ndev, ECMR, ECMR_PRM,
2141		    ndev->flags & IFF_PROMISC ? ECMR_PRM : 0);
2142	spin_unlock_irqrestore(&priv->lock, flags);
2143}
2144
2145/* Device close function for Ethernet AVB */
2146static int ravb_close(struct net_device *ndev)
2147{
2148	struct device_node *np = ndev->dev.parent->of_node;
2149	struct ravb_private *priv = netdev_priv(ndev);
2150	const struct ravb_hw_info *info = priv->info;
2151	struct ravb_tstamp_skb *ts_skb, *ts_skb2;
2152
2153	netif_tx_stop_all_queues(ndev);
2154
2155	/* Disable interrupts by clearing the interrupt masks. */
2156	ravb_write(ndev, 0, RIC0);
2157	ravb_write(ndev, 0, RIC2);
2158	ravb_write(ndev, 0, TIC);
2159
2160	/* Stop PTP Clock driver */
2161	if (info->gptp)
2162		ravb_ptp_stop(ndev);
2163
2164	/* Set the config mode to stop the AVB-DMAC's processes */
2165	if (ravb_stop_dma(ndev) < 0)
2166		netdev_err(ndev,
2167			   "device will be stopped after h/w processes are done.\n");
2168
2169	/* Clear the timestamp list */
2170	if (info->gptp || info->ccc_gac) {
2171		list_for_each_entry_safe(ts_skb, ts_skb2, &priv->ts_skb_list, list) {
2172			list_del(&ts_skb->list);
2173			kfree_skb(ts_skb->skb);
2174			kfree(ts_skb);
2175		}
2176	}
2177
2178	/* PHY disconnect */
2179	if (ndev->phydev) {
2180		phy_stop(ndev->phydev);
2181		phy_disconnect(ndev->phydev);
2182		if (of_phy_is_fixed_link(np))
2183			of_phy_deregister_fixed_link(np);
2184	}
2185
2186	cancel_work_sync(&priv->work);
2187
2188	if (info->multi_irqs) {
2189		free_irq(priv->tx_irqs[RAVB_NC], ndev);
2190		free_irq(priv->rx_irqs[RAVB_NC], ndev);
2191		free_irq(priv->tx_irqs[RAVB_BE], ndev);
2192		free_irq(priv->rx_irqs[RAVB_BE], ndev);
2193		free_irq(priv->emac_irq, ndev);
2194		if (info->err_mgmt_irqs) {
2195			free_irq(priv->erra_irq, ndev);
2196			free_irq(priv->mgmta_irq, ndev);
2197		}
2198	}
2199	free_irq(ndev->irq, ndev);
2200
2201	if (info->nc_queues)
2202		napi_disable(&priv->napi[RAVB_NC]);
2203	napi_disable(&priv->napi[RAVB_BE]);
2204
2205	/* Free all the skb's in the RX queue and the DMA buffers. */
2206	ravb_ring_free(ndev, RAVB_BE);
2207	if (info->nc_queues)
2208		ravb_ring_free(ndev, RAVB_NC);
2209
2210	return 0;
2211}
2212
2213static int ravb_hwtstamp_get(struct net_device *ndev, struct ifreq *req)
2214{
2215	struct ravb_private *priv = netdev_priv(ndev);
2216	struct hwtstamp_config config;
2217
2218	config.flags = 0;
2219	config.tx_type = priv->tstamp_tx_ctrl ? HWTSTAMP_TX_ON :
2220						HWTSTAMP_TX_OFF;
2221	switch (priv->tstamp_rx_ctrl & RAVB_RXTSTAMP_TYPE) {
2222	case RAVB_RXTSTAMP_TYPE_V2_L2_EVENT:
2223		config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_EVENT;
2224		break;
2225	case RAVB_RXTSTAMP_TYPE_ALL:
2226		config.rx_filter = HWTSTAMP_FILTER_ALL;
2227		break;
2228	default:
2229		config.rx_filter = HWTSTAMP_FILTER_NONE;
2230	}
2231
2232	return copy_to_user(req->ifr_data, &config, sizeof(config)) ?
2233		-EFAULT : 0;
2234}
2235
2236/* Control hardware time stamping */
2237static int ravb_hwtstamp_set(struct net_device *ndev, struct ifreq *req)
2238{
2239	struct ravb_private *priv = netdev_priv(ndev);
2240	struct hwtstamp_config config;
2241	u32 tstamp_rx_ctrl = RAVB_RXTSTAMP_ENABLED;
2242	u32 tstamp_tx_ctrl;
2243
2244	if (copy_from_user(&config, req->ifr_data, sizeof(config)))
2245		return -EFAULT;
2246
2247	switch (config.tx_type) {
2248	case HWTSTAMP_TX_OFF:
2249		tstamp_tx_ctrl = 0;
2250		break;
2251	case HWTSTAMP_TX_ON:
2252		tstamp_tx_ctrl = RAVB_TXTSTAMP_ENABLED;
2253		break;
2254	default:
2255		return -ERANGE;
2256	}
2257
2258	switch (config.rx_filter) {
2259	case HWTSTAMP_FILTER_NONE:
2260		tstamp_rx_ctrl = 0;
2261		break;
2262	case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
2263		tstamp_rx_ctrl |= RAVB_RXTSTAMP_TYPE_V2_L2_EVENT;
2264		break;
2265	default:
2266		config.rx_filter = HWTSTAMP_FILTER_ALL;
2267		tstamp_rx_ctrl |= RAVB_RXTSTAMP_TYPE_ALL;
2268	}
2269
2270	priv->tstamp_tx_ctrl = tstamp_tx_ctrl;
2271	priv->tstamp_rx_ctrl = tstamp_rx_ctrl;
2272
2273	return copy_to_user(req->ifr_data, &config, sizeof(config)) ?
2274		-EFAULT : 0;
2275}
2276
2277/* ioctl to device function */
2278static int ravb_do_ioctl(struct net_device *ndev, struct ifreq *req, int cmd)
2279{
2280	struct phy_device *phydev = ndev->phydev;
2281
2282	if (!netif_running(ndev))
2283		return -EINVAL;
2284
2285	if (!phydev)
2286		return -ENODEV;
2287
2288	switch (cmd) {
2289	case SIOCGHWTSTAMP:
2290		return ravb_hwtstamp_get(ndev, req);
2291	case SIOCSHWTSTAMP:
2292		return ravb_hwtstamp_set(ndev, req);
2293	}
2294
2295	return phy_mii_ioctl(phydev, req, cmd);
2296}
2297
2298static int ravb_change_mtu(struct net_device *ndev, int new_mtu)
2299{
2300	struct ravb_private *priv = netdev_priv(ndev);
2301
2302	ndev->mtu = new_mtu;
2303
2304	if (netif_running(ndev)) {
2305		synchronize_irq(priv->emac_irq);
2306		ravb_emac_init(ndev);
2307	}
2308
2309	netdev_update_features(ndev);
2310
2311	return 0;
2312}
2313
2314static void ravb_set_rx_csum(struct net_device *ndev, bool enable)
2315{
2316	struct ravb_private *priv = netdev_priv(ndev);
2317	unsigned long flags;
2318
2319	spin_lock_irqsave(&priv->lock, flags);
2320
2321	/* Disable TX and RX */
2322	ravb_rcv_snd_disable(ndev);
2323
2324	/* Modify RX Checksum setting */
2325	ravb_modify(ndev, ECMR, ECMR_RCSC, enable ? ECMR_RCSC : 0);
2326
2327	/* Enable TX and RX */
2328	ravb_rcv_snd_enable(ndev);
2329
2330	spin_unlock_irqrestore(&priv->lock, flags);
2331}
2332
2333static int ravb_set_features_gbeth(struct net_device *ndev,
2334				   netdev_features_t features)
2335{
2336	/* Place holder */
2337	return 0;
2338}
2339
2340static int ravb_set_features_rcar(struct net_device *ndev,
2341				  netdev_features_t features)
2342{
2343	netdev_features_t changed = ndev->features ^ features;
2344
2345	if (changed & NETIF_F_RXCSUM)
2346		ravb_set_rx_csum(ndev, features & NETIF_F_RXCSUM);
2347
2348	ndev->features = features;
2349
2350	return 0;
2351}
2352
2353static int ravb_set_features(struct net_device *ndev,
2354			     netdev_features_t features)
2355{
2356	struct ravb_private *priv = netdev_priv(ndev);
2357	const struct ravb_hw_info *info = priv->info;
2358
2359	return info->set_feature(ndev, features);
2360}
2361
2362static const struct net_device_ops ravb_netdev_ops = {
2363	.ndo_open		= ravb_open,
2364	.ndo_stop		= ravb_close,
2365	.ndo_start_xmit		= ravb_start_xmit,
2366	.ndo_select_queue	= ravb_select_queue,
2367	.ndo_get_stats		= ravb_get_stats,
2368	.ndo_set_rx_mode	= ravb_set_rx_mode,
2369	.ndo_tx_timeout		= ravb_tx_timeout,
2370	.ndo_eth_ioctl		= ravb_do_ioctl,
2371	.ndo_change_mtu		= ravb_change_mtu,
2372	.ndo_validate_addr	= eth_validate_addr,
2373	.ndo_set_mac_address	= eth_mac_addr,
2374	.ndo_set_features	= ravb_set_features,
2375};
2376
2377/* MDIO bus init function */
2378static int ravb_mdio_init(struct ravb_private *priv)
2379{
2380	struct platform_device *pdev = priv->pdev;
2381	struct device *dev = &pdev->dev;
2382	struct phy_device *phydev;
2383	struct device_node *pn;
2384	int error;
2385
2386	/* Bitbang init */
2387	priv->mdiobb.ops = &bb_ops;
2388
2389	/* MII controller setting */
2390	priv->mii_bus = alloc_mdio_bitbang(&priv->mdiobb);
2391	if (!priv->mii_bus)
2392		return -ENOMEM;
2393
2394	/* Hook up MII support for ethtool */
2395	priv->mii_bus->name = "ravb_mii";
2396	priv->mii_bus->parent = dev;
2397	snprintf(priv->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x",
2398		 pdev->name, pdev->id);
2399
2400	/* Register MDIO bus */
2401	error = of_mdiobus_register(priv->mii_bus, dev->of_node);
2402	if (error)
2403		goto out_free_bus;
2404
2405	pn = of_parse_phandle(dev->of_node, "phy-handle", 0);
2406	phydev = of_phy_find_device(pn);
2407	if (phydev) {
2408		phydev->mac_managed_pm = true;
2409		put_device(&phydev->mdio.dev);
2410	}
2411	of_node_put(pn);
2412
2413	return 0;
2414
2415out_free_bus:
2416	free_mdio_bitbang(priv->mii_bus);
2417	return error;
2418}
2419
2420/* MDIO bus release function */
2421static int ravb_mdio_release(struct ravb_private *priv)
2422{
2423	/* Unregister mdio bus */
2424	mdiobus_unregister(priv->mii_bus);
2425
2426	/* Free bitbang info */
2427	free_mdio_bitbang(priv->mii_bus);
2428
2429	return 0;
2430}
2431
2432static const struct ravb_hw_info ravb_gen3_hw_info = {
2433	.rx_ring_free = ravb_rx_ring_free_rcar,
2434	.rx_ring_format = ravb_rx_ring_format_rcar,
2435	.alloc_rx_desc = ravb_alloc_rx_desc_rcar,
2436	.receive = ravb_rx_rcar,
2437	.set_rate = ravb_set_rate_rcar,
2438	.set_feature = ravb_set_features_rcar,
2439	.dmac_init = ravb_dmac_init_rcar,
2440	.emac_init = ravb_emac_init_rcar,
2441	.gstrings_stats = ravb_gstrings_stats,
2442	.gstrings_size = sizeof(ravb_gstrings_stats),
2443	.net_hw_features = NETIF_F_RXCSUM,
2444	.net_features = NETIF_F_RXCSUM,
2445	.stats_len = ARRAY_SIZE(ravb_gstrings_stats),
2446	.max_rx_len = RX_BUF_SZ + RAVB_ALIGN - 1,
2447	.tccr_mask = TCCR_TSRQ0 | TCCR_TSRQ1 | TCCR_TSRQ2 | TCCR_TSRQ3,
2448	.rx_max_buf_size = SZ_2K,
2449	.internal_delay = 1,
2450	.tx_counters = 1,
2451	.multi_irqs = 1,
2452	.irq_en_dis = 1,
2453	.ccc_gac = 1,
2454	.nc_queues = 1,
2455	.magic_pkt = 1,
2456};
2457
2458static const struct ravb_hw_info ravb_gen2_hw_info = {
2459	.rx_ring_free = ravb_rx_ring_free_rcar,
2460	.rx_ring_format = ravb_rx_ring_format_rcar,
2461	.alloc_rx_desc = ravb_alloc_rx_desc_rcar,
2462	.receive = ravb_rx_rcar,
2463	.set_rate = ravb_set_rate_rcar,
2464	.set_feature = ravb_set_features_rcar,
2465	.dmac_init = ravb_dmac_init_rcar,
2466	.emac_init = ravb_emac_init_rcar,
2467	.gstrings_stats = ravb_gstrings_stats,
2468	.gstrings_size = sizeof(ravb_gstrings_stats),
2469	.net_hw_features = NETIF_F_RXCSUM,
2470	.net_features = NETIF_F_RXCSUM,
2471	.stats_len = ARRAY_SIZE(ravb_gstrings_stats),
2472	.max_rx_len = RX_BUF_SZ + RAVB_ALIGN - 1,
2473	.tccr_mask = TCCR_TSRQ0 | TCCR_TSRQ1 | TCCR_TSRQ2 | TCCR_TSRQ3,
2474	.rx_max_buf_size = SZ_2K,
2475	.aligned_tx = 1,
2476	.gptp = 1,
2477	.nc_queues = 1,
2478	.magic_pkt = 1,
2479};
2480
2481static const struct ravb_hw_info ravb_rzv2m_hw_info = {
2482	.rx_ring_free = ravb_rx_ring_free_rcar,
2483	.rx_ring_format = ravb_rx_ring_format_rcar,
2484	.alloc_rx_desc = ravb_alloc_rx_desc_rcar,
2485	.receive = ravb_rx_rcar,
2486	.set_rate = ravb_set_rate_rcar,
2487	.set_feature = ravb_set_features_rcar,
2488	.dmac_init = ravb_dmac_init_rcar,
2489	.emac_init = ravb_emac_init_rcar,
2490	.gstrings_stats = ravb_gstrings_stats,
2491	.gstrings_size = sizeof(ravb_gstrings_stats),
2492	.net_hw_features = NETIF_F_RXCSUM,
2493	.net_features = NETIF_F_RXCSUM,
2494	.stats_len = ARRAY_SIZE(ravb_gstrings_stats),
2495	.max_rx_len = RX_BUF_SZ + RAVB_ALIGN - 1,
2496	.tccr_mask = TCCR_TSRQ0 | TCCR_TSRQ1 | TCCR_TSRQ2 | TCCR_TSRQ3,
2497	.rx_max_buf_size = SZ_2K,
2498	.multi_irqs = 1,
2499	.err_mgmt_irqs = 1,
2500	.gptp = 1,
2501	.gptp_ref_clk = 1,
2502	.nc_queues = 1,
2503	.magic_pkt = 1,
2504};
2505
2506static const struct ravb_hw_info gbeth_hw_info = {
2507	.rx_ring_free = ravb_rx_ring_free_gbeth,
2508	.rx_ring_format = ravb_rx_ring_format_gbeth,
2509	.alloc_rx_desc = ravb_alloc_rx_desc_gbeth,
2510	.receive = ravb_rx_gbeth,
2511	.set_rate = ravb_set_rate_gbeth,
2512	.set_feature = ravb_set_features_gbeth,
2513	.dmac_init = ravb_dmac_init_gbeth,
2514	.emac_init = ravb_emac_init_gbeth,
2515	.gstrings_stats = ravb_gstrings_stats_gbeth,
2516	.gstrings_size = sizeof(ravb_gstrings_stats_gbeth),
2517	.stats_len = ARRAY_SIZE(ravb_gstrings_stats_gbeth),
2518	.max_rx_len = ALIGN(GBETH_RX_BUFF_MAX, RAVB_ALIGN),
2519	.tccr_mask = TCCR_TSRQ0,
2520	.rx_max_buf_size = SZ_8K,
2521	.aligned_tx = 1,
2522	.tx_counters = 1,
2523	.carrier_counters = 1,
2524	.half_duplex = 1,
2525};
2526
2527static const struct of_device_id ravb_match_table[] = {
2528	{ .compatible = "renesas,etheravb-r8a7790", .data = &ravb_gen2_hw_info },
2529	{ .compatible = "renesas,etheravb-r8a7794", .data = &ravb_gen2_hw_info },
2530	{ .compatible = "renesas,etheravb-rcar-gen2", .data = &ravb_gen2_hw_info },
2531	{ .compatible = "renesas,etheravb-r8a7795", .data = &ravb_gen3_hw_info },
2532	{ .compatible = "renesas,etheravb-rcar-gen3", .data = &ravb_gen3_hw_info },
2533	{ .compatible = "renesas,etheravb-rcar-gen4", .data = &ravb_gen3_hw_info },
2534	{ .compatible = "renesas,etheravb-rzv2m", .data = &ravb_rzv2m_hw_info },
2535	{ .compatible = "renesas,rzg2l-gbeth", .data = &gbeth_hw_info },
2536	{ }
2537};
2538MODULE_DEVICE_TABLE(of, ravb_match_table);
2539
2540static int ravb_set_gti(struct net_device *ndev)
2541{
2542	struct ravb_private *priv = netdev_priv(ndev);
2543	const struct ravb_hw_info *info = priv->info;
2544	struct device *dev = ndev->dev.parent;
2545	unsigned long rate;
2546	uint64_t inc;
2547
2548	if (info->gptp_ref_clk)
2549		rate = clk_get_rate(priv->gptp_clk);
2550	else
2551		rate = clk_get_rate(priv->clk);
2552	if (!rate)
2553		return -EINVAL;
2554
2555	inc = div64_ul(1000000000ULL << 20, rate);
2556
2557	if (inc < GTI_TIV_MIN || inc > GTI_TIV_MAX) {
2558		dev_err(dev, "gti.tiv increment 0x%llx is outside the range 0x%x - 0x%x\n",
2559			inc, GTI_TIV_MIN, GTI_TIV_MAX);
2560		return -EINVAL;
2561	}
2562
2563	ravb_write(ndev, inc, GTI);
2564
2565	return 0;
2566}
2567
2568static int ravb_set_config_mode(struct net_device *ndev)
2569{
2570	struct ravb_private *priv = netdev_priv(ndev);
2571	const struct ravb_hw_info *info = priv->info;
2572	int error;
2573
2574	if (info->gptp) {
2575		error = ravb_set_opmode(ndev, CCC_OPC_CONFIG);
2576		if (error)
2577			return error;
2578		/* Set CSEL value */
2579		ravb_modify(ndev, CCC, CCC_CSEL, CCC_CSEL_HPB);
2580	} else if (info->ccc_gac) {
2581		error = ravb_set_opmode(ndev, CCC_OPC_CONFIG | CCC_GAC | CCC_CSEL_HPB);
 
2582	} else {
2583		error = ravb_set_opmode(ndev, CCC_OPC_CONFIG);
2584	}
2585
2586	return error;
2587}
2588
2589/* Set tx and rx clock internal delay modes */
2590static void ravb_parse_delay_mode(struct device_node *np, struct net_device *ndev)
2591{
2592	struct ravb_private *priv = netdev_priv(ndev);
2593	bool explicit_delay = false;
2594	u32 delay;
2595
2596	if (!of_property_read_u32(np, "rx-internal-delay-ps", &delay)) {
2597		/* Valid values are 0 and 1800, according to DT bindings */
2598		priv->rxcidm = !!delay;
2599		explicit_delay = true;
2600	}
2601	if (!of_property_read_u32(np, "tx-internal-delay-ps", &delay)) {
2602		/* Valid values are 0 and 2000, according to DT bindings */
2603		priv->txcidm = !!delay;
2604		explicit_delay = true;
2605	}
2606
2607	if (explicit_delay)
2608		return;
2609
2610	/* Fall back to legacy rgmii-*id behavior */
2611	if (priv->phy_interface == PHY_INTERFACE_MODE_RGMII_ID ||
2612	    priv->phy_interface == PHY_INTERFACE_MODE_RGMII_RXID) {
2613		priv->rxcidm = 1;
2614		priv->rgmii_override = 1;
2615	}
2616
2617	if (priv->phy_interface == PHY_INTERFACE_MODE_RGMII_ID ||
2618	    priv->phy_interface == PHY_INTERFACE_MODE_RGMII_TXID) {
2619		priv->txcidm = 1;
2620		priv->rgmii_override = 1;
2621	}
2622}
2623
2624static void ravb_set_delay_mode(struct net_device *ndev)
2625{
2626	struct ravb_private *priv = netdev_priv(ndev);
2627	u32 set = 0;
2628
2629	if (priv->rxcidm)
2630		set |= APSR_RDM;
2631	if (priv->txcidm)
2632		set |= APSR_TDM;
2633	ravb_modify(ndev, APSR, APSR_RDM | APSR_TDM, set);
2634}
2635
2636static int ravb_probe(struct platform_device *pdev)
2637{
2638	struct device_node *np = pdev->dev.of_node;
2639	const struct ravb_hw_info *info;
2640	struct reset_control *rstc;
2641	struct ravb_private *priv;
2642	struct net_device *ndev;
2643	int error, irq, q;
2644	struct resource *res;
2645	int i;
2646
2647	if (!np) {
2648		dev_err(&pdev->dev,
2649			"this driver is required to be instantiated from device tree\n");
2650		return -EINVAL;
2651	}
2652
2653	rstc = devm_reset_control_get_optional_exclusive(&pdev->dev, NULL);
2654	if (IS_ERR(rstc))
2655		return dev_err_probe(&pdev->dev, PTR_ERR(rstc),
2656				     "failed to get cpg reset\n");
2657
2658	ndev = alloc_etherdev_mqs(sizeof(struct ravb_private),
2659				  NUM_TX_QUEUE, NUM_RX_QUEUE);
2660	if (!ndev)
2661		return -ENOMEM;
2662
2663	info = of_device_get_match_data(&pdev->dev);
2664
2665	ndev->features = info->net_features;
2666	ndev->hw_features = info->net_hw_features;
2667
2668	error = reset_control_deassert(rstc);
2669	if (error)
2670		goto out_free_netdev;
2671
2672	pm_runtime_enable(&pdev->dev);
2673	error = pm_runtime_resume_and_get(&pdev->dev);
2674	if (error < 0)
2675		goto out_rpm_disable;
2676
2677	if (info->multi_irqs) {
2678		if (info->err_mgmt_irqs)
2679			irq = platform_get_irq_byname(pdev, "dia");
2680		else
2681			irq = platform_get_irq_byname(pdev, "ch22");
2682	} else {
2683		irq = platform_get_irq(pdev, 0);
2684	}
2685	if (irq < 0) {
2686		error = irq;
2687		goto out_release;
2688	}
2689	ndev->irq = irq;
2690
2691	SET_NETDEV_DEV(ndev, &pdev->dev);
2692
2693	priv = netdev_priv(ndev);
2694	priv->info = info;
2695	priv->rstc = rstc;
2696	priv->ndev = ndev;
2697	priv->pdev = pdev;
2698	priv->num_tx_ring[RAVB_BE] = BE_TX_RING_SIZE;
2699	priv->num_rx_ring[RAVB_BE] = BE_RX_RING_SIZE;
2700	if (info->nc_queues) {
2701		priv->num_tx_ring[RAVB_NC] = NC_TX_RING_SIZE;
2702		priv->num_rx_ring[RAVB_NC] = NC_RX_RING_SIZE;
2703	}
2704
2705	priv->addr = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
2706	if (IS_ERR(priv->addr)) {
2707		error = PTR_ERR(priv->addr);
2708		goto out_release;
2709	}
2710
2711	/* The Ether-specific entries in the device structure. */
2712	ndev->base_addr = res->start;
2713
2714	spin_lock_init(&priv->lock);
2715	INIT_WORK(&priv->work, ravb_tx_timeout_work);
2716
2717	error = of_get_phy_mode(np, &priv->phy_interface);
2718	if (error && error != -ENODEV)
2719		goto out_release;
2720
2721	priv->no_avb_link = of_property_read_bool(np, "renesas,no-ether-link");
2722	priv->avb_link_active_low =
2723		of_property_read_bool(np, "renesas,ether-link-active-low");
2724
2725	if (info->multi_irqs) {
2726		if (info->err_mgmt_irqs)
2727			irq = platform_get_irq_byname(pdev, "line3");
2728		else
2729			irq = platform_get_irq_byname(pdev, "ch24");
2730		if (irq < 0) {
2731			error = irq;
2732			goto out_release;
2733		}
2734		priv->emac_irq = irq;
2735		for (i = 0; i < NUM_RX_QUEUE; i++) {
2736			irq = platform_get_irq_byname(pdev, ravb_rx_irqs[i]);
2737			if (irq < 0) {
2738				error = irq;
2739				goto out_release;
2740			}
2741			priv->rx_irqs[i] = irq;
2742		}
2743		for (i = 0; i < NUM_TX_QUEUE; i++) {
2744			irq = platform_get_irq_byname(pdev, ravb_tx_irqs[i]);
2745			if (irq < 0) {
2746				error = irq;
2747				goto out_release;
2748			}
2749			priv->tx_irqs[i] = irq;
2750		}
2751
2752		if (info->err_mgmt_irqs) {
2753			irq = platform_get_irq_byname(pdev, "err_a");
2754			if (irq < 0) {
2755				error = irq;
2756				goto out_release;
2757			}
2758			priv->erra_irq = irq;
2759
2760			irq = platform_get_irq_byname(pdev, "mgmt_a");
2761			if (irq < 0) {
2762				error = irq;
2763				goto out_release;
2764			}
2765			priv->mgmta_irq = irq;
2766		}
2767	}
2768
2769	priv->clk = devm_clk_get(&pdev->dev, NULL);
2770	if (IS_ERR(priv->clk)) {
2771		error = PTR_ERR(priv->clk);
2772		goto out_release;
2773	}
2774
2775	priv->refclk = devm_clk_get_optional(&pdev->dev, "refclk");
2776	if (IS_ERR(priv->refclk)) {
2777		error = PTR_ERR(priv->refclk);
2778		goto out_release;
2779	}
2780	clk_prepare_enable(priv->refclk);
2781
2782	if (info->gptp_ref_clk) {
2783		priv->gptp_clk = devm_clk_get(&pdev->dev, "gptp");
2784		if (IS_ERR(priv->gptp_clk)) {
2785			error = PTR_ERR(priv->gptp_clk);
2786			goto out_disable_refclk;
2787		}
2788		clk_prepare_enable(priv->gptp_clk);
2789	}
2790
2791	ndev->max_mtu = info->rx_max_buf_size - (ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN);
2792	ndev->min_mtu = ETH_MIN_MTU;
2793
2794	/* FIXME: R-Car Gen2 has 4byte alignment restriction for tx buffer
2795	 * Use two descriptor to handle such situation. First descriptor to
2796	 * handle aligned data buffer and second descriptor to handle the
2797	 * overflow data because of alignment.
2798	 */
2799	priv->num_tx_desc = info->aligned_tx ? 2 : 1;
2800
2801	/* Set function */
2802	ndev->netdev_ops = &ravb_netdev_ops;
2803	ndev->ethtool_ops = &ravb_ethtool_ops;
2804
2805	/* Set AVB config mode */
2806	error = ravb_set_config_mode(ndev);
2807	if (error)
2808		goto out_disable_gptp_clk;
2809
2810	if (info->gptp || info->ccc_gac) {
2811		/* Set GTI value */
2812		error = ravb_set_gti(ndev);
2813		if (error)
2814			goto out_disable_gptp_clk;
2815
2816		/* Request GTI loading */
2817		ravb_modify(ndev, GCCR, GCCR_LTI, GCCR_LTI);
2818	}
2819
2820	if (info->internal_delay) {
2821		ravb_parse_delay_mode(np, ndev);
2822		ravb_set_delay_mode(ndev);
2823	}
2824
2825	/* Allocate descriptor base address table */
2826	priv->desc_bat_size = sizeof(struct ravb_desc) * DBAT_ENTRY_NUM;
2827	priv->desc_bat = dma_alloc_coherent(ndev->dev.parent, priv->desc_bat_size,
2828					    &priv->desc_bat_dma, GFP_KERNEL);
2829	if (!priv->desc_bat) {
2830		dev_err(&pdev->dev,
2831			"Cannot allocate desc base address table (size %d bytes)\n",
2832			priv->desc_bat_size);
2833		error = -ENOMEM;
2834		goto out_disable_gptp_clk;
2835	}
2836	for (q = RAVB_BE; q < DBAT_ENTRY_NUM; q++)
2837		priv->desc_bat[q].die_dt = DT_EOS;
2838	ravb_write(ndev, priv->desc_bat_dma, DBAT);
2839
2840	/* Initialise HW timestamp list */
2841	INIT_LIST_HEAD(&priv->ts_skb_list);
2842
2843	/* Initialise PTP Clock driver */
2844	if (info->ccc_gac)
2845		ravb_ptp_init(ndev, pdev);
2846
2847	/* Debug message level */
2848	priv->msg_enable = RAVB_DEF_MSG_ENABLE;
2849
2850	/* Read and set MAC address */
2851	ravb_read_mac_address(np, ndev);
2852	if (!is_valid_ether_addr(ndev->dev_addr)) {
2853		dev_warn(&pdev->dev,
2854			 "no valid MAC address supplied, using a random one\n");
2855		eth_hw_addr_random(ndev);
2856	}
2857
2858	/* MDIO bus init */
2859	error = ravb_mdio_init(priv);
2860	if (error) {
2861		dev_err(&pdev->dev, "failed to initialize MDIO\n");
2862		goto out_dma_free;
2863	}
2864
2865	netif_napi_add(ndev, &priv->napi[RAVB_BE], ravb_poll);
2866	if (info->nc_queues)
2867		netif_napi_add(ndev, &priv->napi[RAVB_NC], ravb_poll);
2868
2869	/* Network device register */
2870	error = register_netdev(ndev);
2871	if (error)
2872		goto out_napi_del;
2873
2874	device_set_wakeup_capable(&pdev->dev, 1);
2875
2876	/* Print device information */
2877	netdev_info(ndev, "Base address at %#x, %pM, IRQ %d.\n",
2878		    (u32)ndev->base_addr, ndev->dev_addr, ndev->irq);
2879
2880	platform_set_drvdata(pdev, ndev);
2881
2882	return 0;
2883
2884out_napi_del:
2885	if (info->nc_queues)
2886		netif_napi_del(&priv->napi[RAVB_NC]);
2887
2888	netif_napi_del(&priv->napi[RAVB_BE]);
2889	ravb_mdio_release(priv);
2890out_dma_free:
2891	dma_free_coherent(ndev->dev.parent, priv->desc_bat_size, priv->desc_bat,
2892			  priv->desc_bat_dma);
2893
2894	/* Stop PTP Clock driver */
2895	if (info->ccc_gac)
2896		ravb_ptp_stop(ndev);
2897out_disable_gptp_clk:
2898	clk_disable_unprepare(priv->gptp_clk);
2899out_disable_refclk:
2900	clk_disable_unprepare(priv->refclk);
2901out_release:
 
 
2902	pm_runtime_put(&pdev->dev);
2903out_rpm_disable:
2904	pm_runtime_disable(&pdev->dev);
2905	reset_control_assert(rstc);
2906out_free_netdev:
2907	free_netdev(ndev);
2908	return error;
2909}
2910
2911static void ravb_remove(struct platform_device *pdev)
2912{
2913	struct net_device *ndev = platform_get_drvdata(pdev);
2914	struct ravb_private *priv = netdev_priv(ndev);
2915	const struct ravb_hw_info *info = priv->info;
2916
2917	unregister_netdev(ndev);
2918	if (info->nc_queues)
2919		netif_napi_del(&priv->napi[RAVB_NC]);
2920	netif_napi_del(&priv->napi[RAVB_BE]);
2921
2922	ravb_mdio_release(priv);
2923
2924	/* Stop PTP Clock driver */
2925	if (info->ccc_gac)
2926		ravb_ptp_stop(ndev);
2927
2928	dma_free_coherent(ndev->dev.parent, priv->desc_bat_size, priv->desc_bat,
2929			  priv->desc_bat_dma);
2930
2931	ravb_set_opmode(ndev, CCC_OPC_RESET);
2932
2933	clk_disable_unprepare(priv->gptp_clk);
2934	clk_disable_unprepare(priv->refclk);
2935
 
 
 
 
 
 
 
 
 
2936	pm_runtime_put_sync(&pdev->dev);
2937	pm_runtime_disable(&pdev->dev);
2938	reset_control_assert(priv->rstc);
2939	free_netdev(ndev);
2940	platform_set_drvdata(pdev, NULL);
 
 
2941}
2942
2943static int ravb_wol_setup(struct net_device *ndev)
2944{
2945	struct ravb_private *priv = netdev_priv(ndev);
2946	const struct ravb_hw_info *info = priv->info;
2947
2948	/* Disable interrupts by clearing the interrupt masks. */
2949	ravb_write(ndev, 0, RIC0);
2950	ravb_write(ndev, 0, RIC2);
2951	ravb_write(ndev, 0, TIC);
2952
2953	/* Only allow ECI interrupts */
2954	synchronize_irq(priv->emac_irq);
2955	if (info->nc_queues)
2956		napi_disable(&priv->napi[RAVB_NC]);
2957	napi_disable(&priv->napi[RAVB_BE]);
2958	ravb_write(ndev, ECSIPR_MPDIP, ECSIPR);
2959
2960	/* Enable MagicPacket */
2961	ravb_modify(ndev, ECMR, ECMR_MPDE, ECMR_MPDE);
2962
2963	return enable_irq_wake(priv->emac_irq);
2964}
2965
2966static int ravb_wol_restore(struct net_device *ndev)
2967{
2968	struct ravb_private *priv = netdev_priv(ndev);
2969	const struct ravb_hw_info *info = priv->info;
2970
2971	if (info->nc_queues)
2972		napi_enable(&priv->napi[RAVB_NC]);
2973	napi_enable(&priv->napi[RAVB_BE]);
2974
2975	/* Disable MagicPacket */
2976	ravb_modify(ndev, ECMR, ECMR_MPDE, 0);
2977
2978	ravb_close(ndev);
2979
2980	return disable_irq_wake(priv->emac_irq);
2981}
2982
2983static int __maybe_unused ravb_suspend(struct device *dev)
2984{
2985	struct net_device *ndev = dev_get_drvdata(dev);
2986	struct ravb_private *priv = netdev_priv(ndev);
2987	int ret;
2988
2989	if (!netif_running(ndev))
2990		return 0;
2991
2992	netif_device_detach(ndev);
2993
2994	if (priv->wol_enabled)
2995		ret = ravb_wol_setup(ndev);
2996	else
2997		ret = ravb_close(ndev);
2998
2999	if (priv->info->ccc_gac)
3000		ravb_ptp_stop(ndev);
3001
3002	return ret;
3003}
3004
3005static int __maybe_unused ravb_resume(struct device *dev)
3006{
3007	struct net_device *ndev = dev_get_drvdata(dev);
3008	struct ravb_private *priv = netdev_priv(ndev);
3009	const struct ravb_hw_info *info = priv->info;
3010	int ret = 0;
3011
3012	/* If WoL is enabled set reset mode to rearm the WoL logic */
3013	if (priv->wol_enabled) {
3014		ret = ravb_set_opmode(ndev, CCC_OPC_RESET);
3015		if (ret)
3016			return ret;
3017	}
3018
3019	/* All register have been reset to default values.
3020	 * Restore all registers which where setup at probe time and
3021	 * reopen device if it was running before system suspended.
3022	 */
3023
3024	/* Set AVB config mode */
3025	ret = ravb_set_config_mode(ndev);
3026	if (ret)
3027		return ret;
3028
3029	if (info->gptp || info->ccc_gac) {
3030		/* Set GTI value */
3031		ret = ravb_set_gti(ndev);
3032		if (ret)
3033			return ret;
3034
3035		/* Request GTI loading */
3036		ravb_modify(ndev, GCCR, GCCR_LTI, GCCR_LTI);
3037	}
3038
3039	if (info->internal_delay)
3040		ravb_set_delay_mode(ndev);
3041
3042	/* Restore descriptor base address table */
3043	ravb_write(ndev, priv->desc_bat_dma, DBAT);
3044
3045	if (priv->info->ccc_gac)
3046		ravb_ptp_init(ndev, priv->pdev);
3047
3048	if (netif_running(ndev)) {
3049		if (priv->wol_enabled) {
3050			ret = ravb_wol_restore(ndev);
3051			if (ret)
3052				return ret;
3053		}
3054		ret = ravb_open(ndev);
3055		if (ret < 0)
3056			return ret;
3057		ravb_set_rx_mode(ndev);
3058		netif_device_attach(ndev);
3059	}
3060
3061	return ret;
3062}
3063
3064static int __maybe_unused ravb_runtime_nop(struct device *dev)
3065{
3066	/* Runtime PM callback shared between ->runtime_suspend()
3067	 * and ->runtime_resume(). Simply returns success.
3068	 *
3069	 * This driver re-initializes all registers after
3070	 * pm_runtime_get_sync() anyway so there is no need
3071	 * to save and restore registers here.
3072	 */
3073	return 0;
3074}
3075
3076static const struct dev_pm_ops ravb_dev_pm_ops = {
3077	SET_SYSTEM_SLEEP_PM_OPS(ravb_suspend, ravb_resume)
3078	SET_RUNTIME_PM_OPS(ravb_runtime_nop, ravb_runtime_nop, NULL)
3079};
3080
3081static struct platform_driver ravb_driver = {
3082	.probe		= ravb_probe,
3083	.remove_new	= ravb_remove,
3084	.driver = {
3085		.name	= "ravb",
3086		.pm	= &ravb_dev_pm_ops,
3087		.of_match_table = ravb_match_table,
3088	},
3089};
3090
3091module_platform_driver(ravb_driver);
3092
3093MODULE_AUTHOR("Mitsuhiro Kimura, Masaru Nagai");
3094MODULE_DESCRIPTION("Renesas Ethernet AVB driver");
3095MODULE_LICENSE("GPL v2");
v6.2
   1// SPDX-License-Identifier: GPL-2.0
   2/* Renesas Ethernet AVB device driver
   3 *
   4 * Copyright (C) 2014-2019 Renesas Electronics Corporation
   5 * Copyright (C) 2015 Renesas Solutions Corp.
   6 * Copyright (C) 2015-2016 Cogent Embedded, Inc. <source@cogentembedded.com>
   7 *
   8 * Based on the SuperH Ethernet driver
   9 */
  10
  11#include <linux/cache.h>
  12#include <linux/clk.h>
  13#include <linux/delay.h>
  14#include <linux/dma-mapping.h>
  15#include <linux/err.h>
  16#include <linux/etherdevice.h>
  17#include <linux/ethtool.h>
  18#include <linux/if_vlan.h>
  19#include <linux/kernel.h>
  20#include <linux/list.h>
  21#include <linux/module.h>
  22#include <linux/net_tstamp.h>
  23#include <linux/of.h>
  24#include <linux/of_device.h>
  25#include <linux/of_irq.h>
  26#include <linux/of_mdio.h>
  27#include <linux/of_net.h>
 
  28#include <linux/pm_runtime.h>
  29#include <linux/slab.h>
  30#include <linux/spinlock.h>
  31#include <linux/sys_soc.h>
  32#include <linux/reset.h>
  33#include <linux/math64.h>
  34
  35#include "ravb.h"
  36
  37#define RAVB_DEF_MSG_ENABLE \
  38		(NETIF_MSG_LINK	  | \
  39		 NETIF_MSG_TIMER  | \
  40		 NETIF_MSG_RX_ERR | \
  41		 NETIF_MSG_TX_ERR)
  42
  43static const char *ravb_rx_irqs[NUM_RX_QUEUE] = {
  44	"ch0", /* RAVB_BE */
  45	"ch1", /* RAVB_NC */
  46};
  47
  48static const char *ravb_tx_irqs[NUM_TX_QUEUE] = {
  49	"ch18", /* RAVB_BE */
  50	"ch19", /* RAVB_NC */
  51};
  52
  53void ravb_modify(struct net_device *ndev, enum ravb_reg reg, u32 clear,
  54		 u32 set)
  55{
  56	ravb_write(ndev, (ravb_read(ndev, reg) & ~clear) | set, reg);
  57}
  58
  59int ravb_wait(struct net_device *ndev, enum ravb_reg reg, u32 mask, u32 value)
  60{
  61	int i;
  62
  63	for (i = 0; i < 10000; i++) {
  64		if ((ravb_read(ndev, reg) & mask) == value)
  65			return 0;
  66		udelay(10);
  67	}
  68	return -ETIMEDOUT;
  69}
  70
  71static int ravb_config(struct net_device *ndev)
  72{
 
 
  73	int error;
  74
  75	/* Set config mode */
  76	ravb_modify(ndev, CCC, CCC_OPC, CCC_OPC_CONFIG);
  77	/* Check if the operating mode is changed to the config mode */
  78	error = ravb_wait(ndev, CSR, CSR_OPS, CSR_OPS_CONFIG);
  79	if (error)
  80		netdev_err(ndev, "failed to switch device to config mode\n");
 
 
 
 
 
 
 
 
 
  81
  82	return error;
  83}
  84
  85static void ravb_set_rate_gbeth(struct net_device *ndev)
  86{
  87	struct ravb_private *priv = netdev_priv(ndev);
  88
  89	switch (priv->speed) {
  90	case 10:                /* 10BASE */
  91		ravb_write(ndev, GBETH_GECMR_SPEED_10, GECMR);
  92		break;
  93	case 100:               /* 100BASE */
  94		ravb_write(ndev, GBETH_GECMR_SPEED_100, GECMR);
  95		break;
  96	case 1000:              /* 1000BASE */
  97		ravb_write(ndev, GBETH_GECMR_SPEED_1000, GECMR);
  98		break;
  99	}
 100}
 101
 102static void ravb_set_rate_rcar(struct net_device *ndev)
 103{
 104	struct ravb_private *priv = netdev_priv(ndev);
 105
 106	switch (priv->speed) {
 107	case 100:		/* 100BASE */
 108		ravb_write(ndev, GECMR_SPEED_100, GECMR);
 109		break;
 110	case 1000:		/* 1000BASE */
 111		ravb_write(ndev, GECMR_SPEED_1000, GECMR);
 112		break;
 113	}
 114}
 115
 116static void ravb_set_buffer_align(struct sk_buff *skb)
 117{
 118	u32 reserve = (unsigned long)skb->data & (RAVB_ALIGN - 1);
 119
 120	if (reserve)
 121		skb_reserve(skb, RAVB_ALIGN - reserve);
 122}
 123
 124/* Get MAC address from the MAC address registers
 125 *
 126 * Ethernet AVB device doesn't have ROM for MAC address.
 127 * This function gets the MAC address that was used by a bootloader.
 128 */
 129static void ravb_read_mac_address(struct device_node *np,
 130				  struct net_device *ndev)
 131{
 132	int ret;
 133
 134	ret = of_get_ethdev_address(np, ndev);
 135	if (ret) {
 136		u32 mahr = ravb_read(ndev, MAHR);
 137		u32 malr = ravb_read(ndev, MALR);
 138		u8 addr[ETH_ALEN];
 139
 140		addr[0] = (mahr >> 24) & 0xFF;
 141		addr[1] = (mahr >> 16) & 0xFF;
 142		addr[2] = (mahr >>  8) & 0xFF;
 143		addr[3] = (mahr >>  0) & 0xFF;
 144		addr[4] = (malr >>  8) & 0xFF;
 145		addr[5] = (malr >>  0) & 0xFF;
 146		eth_hw_addr_set(ndev, addr);
 147	}
 148}
 149
 150static void ravb_mdio_ctrl(struct mdiobb_ctrl *ctrl, u32 mask, int set)
 151{
 152	struct ravb_private *priv = container_of(ctrl, struct ravb_private,
 153						 mdiobb);
 154
 155	ravb_modify(priv->ndev, PIR, mask, set ? mask : 0);
 156}
 157
 158/* MDC pin control */
 159static void ravb_set_mdc(struct mdiobb_ctrl *ctrl, int level)
 160{
 161	ravb_mdio_ctrl(ctrl, PIR_MDC, level);
 162}
 163
 164/* Data I/O pin control */
 165static void ravb_set_mdio_dir(struct mdiobb_ctrl *ctrl, int output)
 166{
 167	ravb_mdio_ctrl(ctrl, PIR_MMD, output);
 168}
 169
 170/* Set data bit */
 171static void ravb_set_mdio_data(struct mdiobb_ctrl *ctrl, int value)
 172{
 173	ravb_mdio_ctrl(ctrl, PIR_MDO, value);
 174}
 175
 176/* Get data bit */
 177static int ravb_get_mdio_data(struct mdiobb_ctrl *ctrl)
 178{
 179	struct ravb_private *priv = container_of(ctrl, struct ravb_private,
 180						 mdiobb);
 181
 182	return (ravb_read(priv->ndev, PIR) & PIR_MDI) != 0;
 183}
 184
 185/* MDIO bus control struct */
 186static const struct mdiobb_ops bb_ops = {
 187	.owner = THIS_MODULE,
 188	.set_mdc = ravb_set_mdc,
 189	.set_mdio_dir = ravb_set_mdio_dir,
 190	.set_mdio_data = ravb_set_mdio_data,
 191	.get_mdio_data = ravb_get_mdio_data,
 192};
 193
 194/* Free TX skb function for AVB-IP */
 195static int ravb_tx_free(struct net_device *ndev, int q, bool free_txed_only)
 196{
 197	struct ravb_private *priv = netdev_priv(ndev);
 198	struct net_device_stats *stats = &priv->stats[q];
 199	unsigned int num_tx_desc = priv->num_tx_desc;
 200	struct ravb_tx_desc *desc;
 201	unsigned int entry;
 202	int free_num = 0;
 203	u32 size;
 204
 205	for (; priv->cur_tx[q] - priv->dirty_tx[q] > 0; priv->dirty_tx[q]++) {
 206		bool txed;
 207
 208		entry = priv->dirty_tx[q] % (priv->num_tx_ring[q] *
 209					     num_tx_desc);
 210		desc = &priv->tx_ring[q][entry];
 211		txed = desc->die_dt == DT_FEMPTY;
 212		if (free_txed_only && !txed)
 213			break;
 214		/* Descriptor type must be checked before all other reads */
 215		dma_rmb();
 216		size = le16_to_cpu(desc->ds_tagl) & TX_DS;
 217		/* Free the original skb. */
 218		if (priv->tx_skb[q][entry / num_tx_desc]) {
 219			dma_unmap_single(ndev->dev.parent, le32_to_cpu(desc->dptr),
 220					 size, DMA_TO_DEVICE);
 221			/* Last packet descriptor? */
 222			if (entry % num_tx_desc == num_tx_desc - 1) {
 223				entry /= num_tx_desc;
 224				dev_kfree_skb_any(priv->tx_skb[q][entry]);
 225				priv->tx_skb[q][entry] = NULL;
 226				if (txed)
 227					stats->tx_packets++;
 228			}
 229			free_num++;
 230		}
 231		if (txed)
 232			stats->tx_bytes += size;
 233		desc->die_dt = DT_EEMPTY;
 234	}
 235	return free_num;
 236}
 237
 238static void ravb_rx_ring_free_gbeth(struct net_device *ndev, int q)
 239{
 240	struct ravb_private *priv = netdev_priv(ndev);
 241	unsigned int ring_size;
 242	unsigned int i;
 243
 244	if (!priv->gbeth_rx_ring)
 245		return;
 246
 247	for (i = 0; i < priv->num_rx_ring[q]; i++) {
 248		struct ravb_rx_desc *desc = &priv->gbeth_rx_ring[i];
 249
 250		if (!dma_mapping_error(ndev->dev.parent,
 251				       le32_to_cpu(desc->dptr)))
 252			dma_unmap_single(ndev->dev.parent,
 253					 le32_to_cpu(desc->dptr),
 254					 GBETH_RX_BUFF_MAX,
 255					 DMA_FROM_DEVICE);
 256	}
 257	ring_size = sizeof(struct ravb_rx_desc) * (priv->num_rx_ring[q] + 1);
 258	dma_free_coherent(ndev->dev.parent, ring_size, priv->gbeth_rx_ring,
 259			  priv->rx_desc_dma[q]);
 260	priv->gbeth_rx_ring = NULL;
 261}
 262
 263static void ravb_rx_ring_free_rcar(struct net_device *ndev, int q)
 264{
 265	struct ravb_private *priv = netdev_priv(ndev);
 266	unsigned int ring_size;
 267	unsigned int i;
 268
 269	if (!priv->rx_ring[q])
 270		return;
 271
 272	for (i = 0; i < priv->num_rx_ring[q]; i++) {
 273		struct ravb_ex_rx_desc *desc = &priv->rx_ring[q][i];
 274
 275		if (!dma_mapping_error(ndev->dev.parent,
 276				       le32_to_cpu(desc->dptr)))
 277			dma_unmap_single(ndev->dev.parent,
 278					 le32_to_cpu(desc->dptr),
 279					 RX_BUF_SZ,
 280					 DMA_FROM_DEVICE);
 281	}
 282	ring_size = sizeof(struct ravb_ex_rx_desc) *
 283		    (priv->num_rx_ring[q] + 1);
 284	dma_free_coherent(ndev->dev.parent, ring_size, priv->rx_ring[q],
 285			  priv->rx_desc_dma[q]);
 286	priv->rx_ring[q] = NULL;
 287}
 288
 289/* Free skb's and DMA buffers for Ethernet AVB */
 290static void ravb_ring_free(struct net_device *ndev, int q)
 291{
 292	struct ravb_private *priv = netdev_priv(ndev);
 293	const struct ravb_hw_info *info = priv->info;
 294	unsigned int num_tx_desc = priv->num_tx_desc;
 295	unsigned int ring_size;
 296	unsigned int i;
 297
 298	info->rx_ring_free(ndev, q);
 299
 300	if (priv->tx_ring[q]) {
 301		ravb_tx_free(ndev, q, false);
 302
 303		ring_size = sizeof(struct ravb_tx_desc) *
 304			    (priv->num_tx_ring[q] * num_tx_desc + 1);
 305		dma_free_coherent(ndev->dev.parent, ring_size, priv->tx_ring[q],
 306				  priv->tx_desc_dma[q]);
 307		priv->tx_ring[q] = NULL;
 308	}
 309
 310	/* Free RX skb ringbuffer */
 311	if (priv->rx_skb[q]) {
 312		for (i = 0; i < priv->num_rx_ring[q]; i++)
 313			dev_kfree_skb(priv->rx_skb[q][i]);
 314	}
 315	kfree(priv->rx_skb[q]);
 316	priv->rx_skb[q] = NULL;
 317
 318	/* Free aligned TX buffers */
 319	kfree(priv->tx_align[q]);
 320	priv->tx_align[q] = NULL;
 321
 322	/* Free TX skb ringbuffer.
 323	 * SKBs are freed by ravb_tx_free() call above.
 324	 */
 325	kfree(priv->tx_skb[q]);
 326	priv->tx_skb[q] = NULL;
 327}
 328
 329static void ravb_rx_ring_format_gbeth(struct net_device *ndev, int q)
 330{
 331	struct ravb_private *priv = netdev_priv(ndev);
 332	struct ravb_rx_desc *rx_desc;
 333	unsigned int rx_ring_size;
 334	dma_addr_t dma_addr;
 335	unsigned int i;
 336
 337	rx_ring_size = sizeof(*rx_desc) * priv->num_rx_ring[q];
 338	memset(priv->gbeth_rx_ring, 0, rx_ring_size);
 339	/* Build RX ring buffer */
 340	for (i = 0; i < priv->num_rx_ring[q]; i++) {
 341		/* RX descriptor */
 342		rx_desc = &priv->gbeth_rx_ring[i];
 343		rx_desc->ds_cc = cpu_to_le16(GBETH_RX_DESC_DATA_SIZE);
 344		dma_addr = dma_map_single(ndev->dev.parent, priv->rx_skb[q][i]->data,
 345					  GBETH_RX_BUFF_MAX,
 346					  DMA_FROM_DEVICE);
 347		/* We just set the data size to 0 for a failed mapping which
 348		 * should prevent DMA from happening...
 349		 */
 350		if (dma_mapping_error(ndev->dev.parent, dma_addr))
 351			rx_desc->ds_cc = cpu_to_le16(0);
 352		rx_desc->dptr = cpu_to_le32(dma_addr);
 353		rx_desc->die_dt = DT_FEMPTY;
 354	}
 355	rx_desc = &priv->gbeth_rx_ring[i];
 356	rx_desc->dptr = cpu_to_le32((u32)priv->rx_desc_dma[q]);
 357	rx_desc->die_dt = DT_LINKFIX; /* type */
 358}
 359
 360static void ravb_rx_ring_format_rcar(struct net_device *ndev, int q)
 361{
 362	struct ravb_private *priv = netdev_priv(ndev);
 363	struct ravb_ex_rx_desc *rx_desc;
 364	unsigned int rx_ring_size = sizeof(*rx_desc) * priv->num_rx_ring[q];
 365	dma_addr_t dma_addr;
 366	unsigned int i;
 367
 368	memset(priv->rx_ring[q], 0, rx_ring_size);
 369	/* Build RX ring buffer */
 370	for (i = 0; i < priv->num_rx_ring[q]; i++) {
 371		/* RX descriptor */
 372		rx_desc = &priv->rx_ring[q][i];
 373		rx_desc->ds_cc = cpu_to_le16(RX_BUF_SZ);
 374		dma_addr = dma_map_single(ndev->dev.parent, priv->rx_skb[q][i]->data,
 375					  RX_BUF_SZ,
 376					  DMA_FROM_DEVICE);
 377		/* We just set the data size to 0 for a failed mapping which
 378		 * should prevent DMA from happening...
 379		 */
 380		if (dma_mapping_error(ndev->dev.parent, dma_addr))
 381			rx_desc->ds_cc = cpu_to_le16(0);
 382		rx_desc->dptr = cpu_to_le32(dma_addr);
 383		rx_desc->die_dt = DT_FEMPTY;
 384	}
 385	rx_desc = &priv->rx_ring[q][i];
 386	rx_desc->dptr = cpu_to_le32((u32)priv->rx_desc_dma[q]);
 387	rx_desc->die_dt = DT_LINKFIX; /* type */
 388}
 389
 390/* Format skb and descriptor buffer for Ethernet AVB */
 391static void ravb_ring_format(struct net_device *ndev, int q)
 392{
 393	struct ravb_private *priv = netdev_priv(ndev);
 394	const struct ravb_hw_info *info = priv->info;
 395	unsigned int num_tx_desc = priv->num_tx_desc;
 396	struct ravb_tx_desc *tx_desc;
 397	struct ravb_desc *desc;
 398	unsigned int tx_ring_size = sizeof(*tx_desc) * priv->num_tx_ring[q] *
 399				    num_tx_desc;
 400	unsigned int i;
 401
 402	priv->cur_rx[q] = 0;
 403	priv->cur_tx[q] = 0;
 404	priv->dirty_rx[q] = 0;
 405	priv->dirty_tx[q] = 0;
 406
 407	info->rx_ring_format(ndev, q);
 408
 409	memset(priv->tx_ring[q], 0, tx_ring_size);
 410	/* Build TX ring buffer */
 411	for (i = 0, tx_desc = priv->tx_ring[q]; i < priv->num_tx_ring[q];
 412	     i++, tx_desc++) {
 413		tx_desc->die_dt = DT_EEMPTY;
 414		if (num_tx_desc > 1) {
 415			tx_desc++;
 416			tx_desc->die_dt = DT_EEMPTY;
 417		}
 418	}
 419	tx_desc->dptr = cpu_to_le32((u32)priv->tx_desc_dma[q]);
 420	tx_desc->die_dt = DT_LINKFIX; /* type */
 421
 422	/* RX descriptor base address for best effort */
 423	desc = &priv->desc_bat[RX_QUEUE_OFFSET + q];
 424	desc->die_dt = DT_LINKFIX; /* type */
 425	desc->dptr = cpu_to_le32((u32)priv->rx_desc_dma[q]);
 426
 427	/* TX descriptor base address for best effort */
 428	desc = &priv->desc_bat[q];
 429	desc->die_dt = DT_LINKFIX; /* type */
 430	desc->dptr = cpu_to_le32((u32)priv->tx_desc_dma[q]);
 431}
 432
 433static void *ravb_alloc_rx_desc_gbeth(struct net_device *ndev, int q)
 434{
 435	struct ravb_private *priv = netdev_priv(ndev);
 436	unsigned int ring_size;
 437
 438	ring_size = sizeof(struct ravb_rx_desc) * (priv->num_rx_ring[q] + 1);
 439
 440	priv->gbeth_rx_ring = dma_alloc_coherent(ndev->dev.parent, ring_size,
 441						 &priv->rx_desc_dma[q],
 442						 GFP_KERNEL);
 443	return priv->gbeth_rx_ring;
 444}
 445
 446static void *ravb_alloc_rx_desc_rcar(struct net_device *ndev, int q)
 447{
 448	struct ravb_private *priv = netdev_priv(ndev);
 449	unsigned int ring_size;
 450
 451	ring_size = sizeof(struct ravb_ex_rx_desc) * (priv->num_rx_ring[q] + 1);
 452
 453	priv->rx_ring[q] = dma_alloc_coherent(ndev->dev.parent, ring_size,
 454					      &priv->rx_desc_dma[q],
 455					      GFP_KERNEL);
 456	return priv->rx_ring[q];
 457}
 458
 459/* Init skb and descriptor buffer for Ethernet AVB */
 460static int ravb_ring_init(struct net_device *ndev, int q)
 461{
 462	struct ravb_private *priv = netdev_priv(ndev);
 463	const struct ravb_hw_info *info = priv->info;
 464	unsigned int num_tx_desc = priv->num_tx_desc;
 465	unsigned int ring_size;
 466	struct sk_buff *skb;
 467	unsigned int i;
 468
 469	/* Allocate RX and TX skb rings */
 470	priv->rx_skb[q] = kcalloc(priv->num_rx_ring[q],
 471				  sizeof(*priv->rx_skb[q]), GFP_KERNEL);
 472	priv->tx_skb[q] = kcalloc(priv->num_tx_ring[q],
 473				  sizeof(*priv->tx_skb[q]), GFP_KERNEL);
 474	if (!priv->rx_skb[q] || !priv->tx_skb[q])
 475		goto error;
 476
 477	for (i = 0; i < priv->num_rx_ring[q]; i++) {
 478		skb = __netdev_alloc_skb(ndev, info->max_rx_len, GFP_KERNEL);
 479		if (!skb)
 480			goto error;
 481		ravb_set_buffer_align(skb);
 482		priv->rx_skb[q][i] = skb;
 483	}
 484
 485	if (num_tx_desc > 1) {
 486		/* Allocate rings for the aligned buffers */
 487		priv->tx_align[q] = kmalloc(DPTR_ALIGN * priv->num_tx_ring[q] +
 488					    DPTR_ALIGN - 1, GFP_KERNEL);
 489		if (!priv->tx_align[q])
 490			goto error;
 491	}
 492
 493	/* Allocate all RX descriptors. */
 494	if (!info->alloc_rx_desc(ndev, q))
 495		goto error;
 496
 497	priv->dirty_rx[q] = 0;
 498
 499	/* Allocate all TX descriptors. */
 500	ring_size = sizeof(struct ravb_tx_desc) *
 501		    (priv->num_tx_ring[q] * num_tx_desc + 1);
 502	priv->tx_ring[q] = dma_alloc_coherent(ndev->dev.parent, ring_size,
 503					      &priv->tx_desc_dma[q],
 504					      GFP_KERNEL);
 505	if (!priv->tx_ring[q])
 506		goto error;
 507
 508	return 0;
 509
 510error:
 511	ravb_ring_free(ndev, q);
 512
 513	return -ENOMEM;
 514}
 515
 516static void ravb_emac_init_gbeth(struct net_device *ndev)
 517{
 518	struct ravb_private *priv = netdev_priv(ndev);
 519
 
 
 
 
 
 
 
 
 
 520	/* Receive frame limit set register */
 521	ravb_write(ndev, GBETH_RX_BUFF_MAX + ETH_FCS_LEN, RFLR);
 522
 523	/* EMAC Mode: PAUSE prohibition; Duplex; TX; RX; CRC Pass Through */
 524	ravb_write(ndev, ECMR_ZPF | ((priv->duplex > 0) ? ECMR_DM : 0) |
 525			 ECMR_TE | ECMR_RE | ECMR_RCPT |
 526			 ECMR_TXF | ECMR_RXF, ECMR);
 527
 528	ravb_set_rate_gbeth(ndev);
 529
 530	/* Set MAC address */
 531	ravb_write(ndev,
 532		   (ndev->dev_addr[0] << 24) | (ndev->dev_addr[1] << 16) |
 533		   (ndev->dev_addr[2] << 8)  | (ndev->dev_addr[3]), MAHR);
 534	ravb_write(ndev, (ndev->dev_addr[4] << 8)  | (ndev->dev_addr[5]), MALR);
 535
 536	/* E-MAC status register clear */
 537	ravb_write(ndev, ECSR_ICD | ECSR_LCHNG | ECSR_PFRI, ECSR);
 538	ravb_write(ndev, CSR0_TPE | CSR0_RPE, CSR0);
 539
 540	/* E-MAC interrupt enable register */
 541	ravb_write(ndev, ECSIPR_ICDIP, ECSIPR);
 542
 543	if (priv->phy_interface == PHY_INTERFACE_MODE_MII) {
 544		ravb_modify(ndev, CXR31, CXR31_SEL_LINK0 | CXR31_SEL_LINK1, 0);
 545		ravb_write(ndev, (1000 << 16) | CXR35_SEL_XMII_MII, CXR35);
 546	} else {
 547		ravb_modify(ndev, CXR31, CXR31_SEL_LINK0 | CXR31_SEL_LINK1,
 548			    CXR31_SEL_LINK0);
 549	}
 550}
 551
 552static void ravb_emac_init_rcar(struct net_device *ndev)
 553{
 554	/* Receive frame limit set register */
 555	ravb_write(ndev, ndev->mtu + ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN, RFLR);
 556
 557	/* EMAC Mode: PAUSE prohibition; Duplex; RX Checksum; TX; RX */
 558	ravb_write(ndev, ECMR_ZPF | ECMR_DM |
 559		   (ndev->features & NETIF_F_RXCSUM ? ECMR_RCSC : 0) |
 560		   ECMR_TE | ECMR_RE, ECMR);
 561
 562	ravb_set_rate_rcar(ndev);
 563
 564	/* Set MAC address */
 565	ravb_write(ndev,
 566		   (ndev->dev_addr[0] << 24) | (ndev->dev_addr[1] << 16) |
 567		   (ndev->dev_addr[2] << 8)  | (ndev->dev_addr[3]), MAHR);
 568	ravb_write(ndev,
 569		   (ndev->dev_addr[4] << 8)  | (ndev->dev_addr[5]), MALR);
 570
 571	/* E-MAC status register clear */
 572	ravb_write(ndev, ECSR_ICD | ECSR_MPD, ECSR);
 573
 574	/* E-MAC interrupt enable register */
 575	ravb_write(ndev, ECSIPR_ICDIP | ECSIPR_MPDIP | ECSIPR_LCHNGIP, ECSIPR);
 576}
 577
 578/* E-MAC init function */
 579static void ravb_emac_init(struct net_device *ndev)
 580{
 581	struct ravb_private *priv = netdev_priv(ndev);
 582	const struct ravb_hw_info *info = priv->info;
 583
 584	info->emac_init(ndev);
 585}
 586
 587static int ravb_dmac_init_gbeth(struct net_device *ndev)
 588{
 589	int error;
 590
 591	error = ravb_ring_init(ndev, RAVB_BE);
 592	if (error)
 593		return error;
 594
 595	/* Descriptor format */
 596	ravb_ring_format(ndev, RAVB_BE);
 597
 598	/* Set DMAC RX */
 599	ravb_write(ndev, 0x60000000, RCR);
 600
 601	/* Set Max Frame Length (RTC) */
 602	ravb_write(ndev, 0x7ffc0000 | GBETH_RX_BUFF_MAX, RTC);
 603
 604	/* Set FIFO size */
 605	ravb_write(ndev, 0x00222200, TGC);
 606
 607	ravb_write(ndev, 0, TCCR);
 608
 609	/* Frame receive */
 610	ravb_write(ndev, RIC0_FRE0, RIC0);
 611	/* Disable FIFO full warning */
 612	ravb_write(ndev, 0x0, RIC1);
 613	/* Receive FIFO full error, descriptor empty */
 614	ravb_write(ndev, RIC2_QFE0 | RIC2_RFFE, RIC2);
 615
 616	ravb_write(ndev, TIC_FTE0, TIC);
 617
 618	return 0;
 619}
 620
 621static int ravb_dmac_init_rcar(struct net_device *ndev)
 622{
 623	struct ravb_private *priv = netdev_priv(ndev);
 624	const struct ravb_hw_info *info = priv->info;
 625	int error;
 626
 627	error = ravb_ring_init(ndev, RAVB_BE);
 628	if (error)
 629		return error;
 630	error = ravb_ring_init(ndev, RAVB_NC);
 631	if (error) {
 632		ravb_ring_free(ndev, RAVB_BE);
 633		return error;
 634	}
 635
 636	/* Descriptor format */
 637	ravb_ring_format(ndev, RAVB_BE);
 638	ravb_ring_format(ndev, RAVB_NC);
 639
 640	/* Set AVB RX */
 641	ravb_write(ndev,
 642		   RCR_EFFS | RCR_ENCF | RCR_ETS0 | RCR_ESF | 0x18000000, RCR);
 643
 644	/* Set FIFO size */
 645	ravb_write(ndev, TGC_TQP_AVBMODE1 | 0x00112200, TGC);
 646
 647	/* Timestamp enable */
 648	ravb_write(ndev, TCCR_TFEN, TCCR);
 649
 650	/* Interrupt init: */
 651	if (info->multi_irqs) {
 652		/* Clear DIL.DPLx */
 653		ravb_write(ndev, 0, DIL);
 654		/* Set queue specific interrupt */
 655		ravb_write(ndev, CIE_CRIE | CIE_CTIE | CIE_CL0M, CIE);
 656	}
 657	/* Frame receive */
 658	ravb_write(ndev, RIC0_FRE0 | RIC0_FRE1, RIC0);
 659	/* Disable FIFO full warning */
 660	ravb_write(ndev, 0, RIC1);
 661	/* Receive FIFO full error, descriptor empty */
 662	ravb_write(ndev, RIC2_QFE0 | RIC2_QFE1 | RIC2_RFFE, RIC2);
 663	/* Frame transmitted, timestamp FIFO updated */
 664	ravb_write(ndev, TIC_FTE0 | TIC_FTE1 | TIC_TFUE, TIC);
 665
 666	return 0;
 667}
 668
 669/* Device init function for Ethernet AVB */
 670static int ravb_dmac_init(struct net_device *ndev)
 671{
 672	struct ravb_private *priv = netdev_priv(ndev);
 673	const struct ravb_hw_info *info = priv->info;
 674	int error;
 675
 676	/* Set CONFIG mode */
 677	error = ravb_config(ndev);
 678	if (error)
 679		return error;
 680
 681	error = info->dmac_init(ndev);
 682	if (error)
 683		return error;
 684
 685	/* Setting the control will start the AVB-DMAC process. */
 686	ravb_modify(ndev, CCC, CCC_OPC, CCC_OPC_OPERATION);
 687
 688	return 0;
 689}
 690
 691static void ravb_get_tx_tstamp(struct net_device *ndev)
 692{
 693	struct ravb_private *priv = netdev_priv(ndev);
 694	struct ravb_tstamp_skb *ts_skb, *ts_skb2;
 695	struct skb_shared_hwtstamps shhwtstamps;
 696	struct sk_buff *skb;
 697	struct timespec64 ts;
 698	u16 tag, tfa_tag;
 699	int count;
 700	u32 tfa2;
 701
 702	count = (ravb_read(ndev, TSR) & TSR_TFFL) >> 8;
 703	while (count--) {
 704		tfa2 = ravb_read(ndev, TFA2);
 705		tfa_tag = (tfa2 & TFA2_TST) >> 16;
 706		ts.tv_nsec = (u64)ravb_read(ndev, TFA0);
 707		ts.tv_sec = ((u64)(tfa2 & TFA2_TSV) << 32) |
 708			    ravb_read(ndev, TFA1);
 709		memset(&shhwtstamps, 0, sizeof(shhwtstamps));
 710		shhwtstamps.hwtstamp = timespec64_to_ktime(ts);
 711		list_for_each_entry_safe(ts_skb, ts_skb2, &priv->ts_skb_list,
 712					 list) {
 713			skb = ts_skb->skb;
 714			tag = ts_skb->tag;
 715			list_del(&ts_skb->list);
 716			kfree(ts_skb);
 717			if (tag == tfa_tag) {
 718				skb_tstamp_tx(skb, &shhwtstamps);
 719				dev_consume_skb_any(skb);
 720				break;
 721			} else {
 722				dev_kfree_skb_any(skb);
 723			}
 724		}
 725		ravb_modify(ndev, TCCR, TCCR_TFR, TCCR_TFR);
 726	}
 727}
 728
 729static void ravb_rx_csum(struct sk_buff *skb)
 730{
 731	u8 *hw_csum;
 732
 733	/* The hardware checksum is contained in sizeof(__sum16) (2) bytes
 734	 * appended to packet data
 735	 */
 736	if (unlikely(skb->len < sizeof(__sum16)))
 737		return;
 738	hw_csum = skb_tail_pointer(skb) - sizeof(__sum16);
 739	skb->csum = csum_unfold((__force __sum16)get_unaligned_le16(hw_csum));
 740	skb->ip_summed = CHECKSUM_COMPLETE;
 741	skb_trim(skb, skb->len - sizeof(__sum16));
 742}
 743
 744static struct sk_buff *ravb_get_skb_gbeth(struct net_device *ndev, int entry,
 745					  struct ravb_rx_desc *desc)
 746{
 747	struct ravb_private *priv = netdev_priv(ndev);
 748	struct sk_buff *skb;
 749
 750	skb = priv->rx_skb[RAVB_BE][entry];
 751	priv->rx_skb[RAVB_BE][entry] = NULL;
 752	dma_unmap_single(ndev->dev.parent, le32_to_cpu(desc->dptr),
 753			 ALIGN(GBETH_RX_BUFF_MAX, 16), DMA_FROM_DEVICE);
 754
 755	return skb;
 756}
 757
 758/* Packet receive function for Gigabit Ethernet */
 759static bool ravb_rx_gbeth(struct net_device *ndev, int *quota, int q)
 760{
 761	struct ravb_private *priv = netdev_priv(ndev);
 762	const struct ravb_hw_info *info = priv->info;
 763	struct net_device_stats *stats;
 764	struct ravb_rx_desc *desc;
 765	struct sk_buff *skb;
 766	dma_addr_t dma_addr;
 
 767	u8  desc_status;
 768	int boguscnt;
 769	u16 pkt_len;
 770	u8  die_dt;
 771	int entry;
 772	int limit;
 
 773
 774	entry = priv->cur_rx[q] % priv->num_rx_ring[q];
 775	boguscnt = priv->dirty_rx[q] + priv->num_rx_ring[q] - priv->cur_rx[q];
 776	stats = &priv->stats[q];
 777
 778	boguscnt = min(boguscnt, *quota);
 779	limit = boguscnt;
 780	desc = &priv->gbeth_rx_ring[entry];
 781	while (desc->die_dt != DT_FEMPTY) {
 782		/* Descriptor type must be checked before all other reads */
 783		dma_rmb();
 784		desc_status = desc->msc;
 785		pkt_len = le16_to_cpu(desc->ds_cc) & RX_DS;
 786
 787		if (--boguscnt < 0)
 788			break;
 789
 790		/* We use 0-byte descriptors to mark the DMA mapping errors */
 791		if (!pkt_len)
 792			continue;
 793
 794		if (desc_status & MSC_MC)
 795			stats->multicast++;
 796
 797		if (desc_status & (MSC_CRC | MSC_RFE | MSC_RTSF | MSC_RTLF | MSC_CEEF)) {
 798			stats->rx_errors++;
 799			if (desc_status & MSC_CRC)
 800				stats->rx_crc_errors++;
 801			if (desc_status & MSC_RFE)
 802				stats->rx_frame_errors++;
 803			if (desc_status & (MSC_RTLF | MSC_RTSF))
 804				stats->rx_length_errors++;
 805			if (desc_status & MSC_CEEF)
 806				stats->rx_missed_errors++;
 807		} else {
 808			die_dt = desc->die_dt & 0xF0;
 809			switch (die_dt) {
 810			case DT_FSINGLE:
 811				skb = ravb_get_skb_gbeth(ndev, entry, desc);
 812				skb_put(skb, pkt_len);
 813				skb->protocol = eth_type_trans(skb, ndev);
 814				napi_gro_receive(&priv->napi[q], skb);
 815				stats->rx_packets++;
 816				stats->rx_bytes += pkt_len;
 817				break;
 818			case DT_FSTART:
 819				priv->rx_1st_skb = ravb_get_skb_gbeth(ndev, entry, desc);
 820				skb_put(priv->rx_1st_skb, pkt_len);
 821				break;
 822			case DT_FMID:
 823				skb = ravb_get_skb_gbeth(ndev, entry, desc);
 824				skb_copy_to_linear_data_offset(priv->rx_1st_skb,
 825							       priv->rx_1st_skb->len,
 826							       skb->data,
 827							       pkt_len);
 828				skb_put(priv->rx_1st_skb, pkt_len);
 829				dev_kfree_skb(skb);
 830				break;
 831			case DT_FEND:
 832				skb = ravb_get_skb_gbeth(ndev, entry, desc);
 833				skb_copy_to_linear_data_offset(priv->rx_1st_skb,
 834							       priv->rx_1st_skb->len,
 835							       skb->data,
 836							       pkt_len);
 837				skb_put(priv->rx_1st_skb, pkt_len);
 838				dev_kfree_skb(skb);
 839				priv->rx_1st_skb->protocol =
 840					eth_type_trans(priv->rx_1st_skb, ndev);
 841				napi_gro_receive(&priv->napi[q],
 842						 priv->rx_1st_skb);
 843				stats->rx_packets++;
 844				stats->rx_bytes += pkt_len;
 845				break;
 846			}
 847		}
 848
 849		entry = (++priv->cur_rx[q]) % priv->num_rx_ring[q];
 850		desc = &priv->gbeth_rx_ring[entry];
 851	}
 852
 853	/* Refill the RX ring buffers. */
 854	for (; priv->cur_rx[q] - priv->dirty_rx[q] > 0; priv->dirty_rx[q]++) {
 855		entry = priv->dirty_rx[q] % priv->num_rx_ring[q];
 856		desc = &priv->gbeth_rx_ring[entry];
 857		desc->ds_cc = cpu_to_le16(GBETH_RX_DESC_DATA_SIZE);
 858
 859		if (!priv->rx_skb[q][entry]) {
 860			skb = netdev_alloc_skb(ndev, info->max_rx_len);
 861			if (!skb)
 862				break;
 863			ravb_set_buffer_align(skb);
 864			dma_addr = dma_map_single(ndev->dev.parent,
 865						  skb->data,
 866						  GBETH_RX_BUFF_MAX,
 867						  DMA_FROM_DEVICE);
 868			skb_checksum_none_assert(skb);
 869			/* We just set the data size to 0 for a failed mapping
 870			 * which should prevent DMA  from happening...
 871			 */
 872			if (dma_mapping_error(ndev->dev.parent, dma_addr))
 873				desc->ds_cc = cpu_to_le16(0);
 874			desc->dptr = cpu_to_le32(dma_addr);
 875			priv->rx_skb[q][entry] = skb;
 876		}
 877		/* Descriptor type must be set after all the above writes */
 878		dma_wmb();
 879		desc->die_dt = DT_FEMPTY;
 880	}
 881
 882	*quota -= limit - (++boguscnt);
 883
 884	return boguscnt <= 0;
 885}
 886
 887/* Packet receive function for Ethernet AVB */
 888static bool ravb_rx_rcar(struct net_device *ndev, int *quota, int q)
 889{
 890	struct ravb_private *priv = netdev_priv(ndev);
 891	const struct ravb_hw_info *info = priv->info;
 892	int entry = priv->cur_rx[q] % priv->num_rx_ring[q];
 893	int boguscnt = (priv->dirty_rx[q] + priv->num_rx_ring[q]) -
 894			priv->cur_rx[q];
 895	struct net_device_stats *stats = &priv->stats[q];
 896	struct ravb_ex_rx_desc *desc;
 897	struct sk_buff *skb;
 898	dma_addr_t dma_addr;
 899	struct timespec64 ts;
 900	u8  desc_status;
 901	u16 pkt_len;
 902	int limit;
 903
 904	boguscnt = min(boguscnt, *quota);
 905	limit = boguscnt;
 906	desc = &priv->rx_ring[q][entry];
 907	while (desc->die_dt != DT_FEMPTY) {
 908		/* Descriptor type must be checked before all other reads */
 909		dma_rmb();
 910		desc_status = desc->msc;
 911		pkt_len = le16_to_cpu(desc->ds_cc) & RX_DS;
 912
 913		if (--boguscnt < 0)
 914			break;
 915
 916		/* We use 0-byte descriptors to mark the DMA mapping errors */
 917		if (!pkt_len)
 918			continue;
 919
 920		if (desc_status & MSC_MC)
 921			stats->multicast++;
 922
 923		if (desc_status & (MSC_CRC | MSC_RFE | MSC_RTSF | MSC_RTLF |
 924				   MSC_CEEF)) {
 925			stats->rx_errors++;
 926			if (desc_status & MSC_CRC)
 927				stats->rx_crc_errors++;
 928			if (desc_status & MSC_RFE)
 929				stats->rx_frame_errors++;
 930			if (desc_status & (MSC_RTLF | MSC_RTSF))
 931				stats->rx_length_errors++;
 932			if (desc_status & MSC_CEEF)
 933				stats->rx_missed_errors++;
 934		} else {
 935			u32 get_ts = priv->tstamp_rx_ctrl & RAVB_RXTSTAMP_TYPE;
 936
 937			skb = priv->rx_skb[q][entry];
 938			priv->rx_skb[q][entry] = NULL;
 939			dma_unmap_single(ndev->dev.parent, le32_to_cpu(desc->dptr),
 940					 RX_BUF_SZ,
 941					 DMA_FROM_DEVICE);
 942			get_ts &= (q == RAVB_NC) ?
 943					RAVB_RXTSTAMP_TYPE_V2_L2_EVENT :
 944					~RAVB_RXTSTAMP_TYPE_V2_L2_EVENT;
 945			if (get_ts) {
 946				struct skb_shared_hwtstamps *shhwtstamps;
 947
 948				shhwtstamps = skb_hwtstamps(skb);
 949				memset(shhwtstamps, 0, sizeof(*shhwtstamps));
 950				ts.tv_sec = ((u64) le16_to_cpu(desc->ts_sh) <<
 951					     32) | le32_to_cpu(desc->ts_sl);
 952				ts.tv_nsec = le32_to_cpu(desc->ts_n);
 953				shhwtstamps->hwtstamp = timespec64_to_ktime(ts);
 954			}
 955
 956			skb_put(skb, pkt_len);
 957			skb->protocol = eth_type_trans(skb, ndev);
 958			if (ndev->features & NETIF_F_RXCSUM)
 959				ravb_rx_csum(skb);
 960			napi_gro_receive(&priv->napi[q], skb);
 961			stats->rx_packets++;
 962			stats->rx_bytes += pkt_len;
 963		}
 964
 965		entry = (++priv->cur_rx[q]) % priv->num_rx_ring[q];
 966		desc = &priv->rx_ring[q][entry];
 967	}
 968
 969	/* Refill the RX ring buffers. */
 970	for (; priv->cur_rx[q] - priv->dirty_rx[q] > 0; priv->dirty_rx[q]++) {
 971		entry = priv->dirty_rx[q] % priv->num_rx_ring[q];
 972		desc = &priv->rx_ring[q][entry];
 973		desc->ds_cc = cpu_to_le16(RX_BUF_SZ);
 974
 975		if (!priv->rx_skb[q][entry]) {
 976			skb = netdev_alloc_skb(ndev, info->max_rx_len);
 977			if (!skb)
 978				break;	/* Better luck next round. */
 979			ravb_set_buffer_align(skb);
 980			dma_addr = dma_map_single(ndev->dev.parent, skb->data,
 981						  le16_to_cpu(desc->ds_cc),
 982						  DMA_FROM_DEVICE);
 983			skb_checksum_none_assert(skb);
 984			/* We just set the data size to 0 for a failed mapping
 985			 * which should prevent DMA  from happening...
 986			 */
 987			if (dma_mapping_error(ndev->dev.parent, dma_addr))
 988				desc->ds_cc = cpu_to_le16(0);
 989			desc->dptr = cpu_to_le32(dma_addr);
 990			priv->rx_skb[q][entry] = skb;
 991		}
 992		/* Descriptor type must be set after all the above writes */
 993		dma_wmb();
 994		desc->die_dt = DT_FEMPTY;
 995	}
 996
 997	*quota -= limit - (++boguscnt);
 998
 999	return boguscnt <= 0;
1000}
1001
1002/* Packet receive function for Ethernet AVB */
1003static bool ravb_rx(struct net_device *ndev, int *quota, int q)
1004{
1005	struct ravb_private *priv = netdev_priv(ndev);
1006	const struct ravb_hw_info *info = priv->info;
1007
1008	return info->receive(ndev, quota, q);
1009}
1010
1011static void ravb_rcv_snd_disable(struct net_device *ndev)
1012{
1013	/* Disable TX and RX */
1014	ravb_modify(ndev, ECMR, ECMR_RE | ECMR_TE, 0);
1015}
1016
1017static void ravb_rcv_snd_enable(struct net_device *ndev)
1018{
1019	/* Enable TX and RX */
1020	ravb_modify(ndev, ECMR, ECMR_RE | ECMR_TE, ECMR_RE | ECMR_TE);
1021}
1022
1023/* function for waiting dma process finished */
1024static int ravb_stop_dma(struct net_device *ndev)
1025{
1026	struct ravb_private *priv = netdev_priv(ndev);
1027	const struct ravb_hw_info *info = priv->info;
1028	int error;
1029
1030	/* Wait for stopping the hardware TX process */
1031	error = ravb_wait(ndev, TCCR, info->tccr_mask, 0);
1032
1033	if (error)
1034		return error;
1035
1036	error = ravb_wait(ndev, CSR, CSR_TPO0 | CSR_TPO1 | CSR_TPO2 | CSR_TPO3,
1037			  0);
1038	if (error)
1039		return error;
1040
1041	/* Stop the E-MAC's RX/TX processes. */
1042	ravb_rcv_snd_disable(ndev);
1043
1044	/* Wait for stopping the RX DMA process */
1045	error = ravb_wait(ndev, CSR, CSR_RPO, 0);
1046	if (error)
1047		return error;
1048
1049	/* Stop AVB-DMAC process */
1050	return ravb_config(ndev);
1051}
1052
1053/* E-MAC interrupt handler */
1054static void ravb_emac_interrupt_unlocked(struct net_device *ndev)
1055{
1056	struct ravb_private *priv = netdev_priv(ndev);
1057	u32 ecsr, psr;
1058
1059	ecsr = ravb_read(ndev, ECSR);
1060	ravb_write(ndev, ecsr, ECSR);	/* clear interrupt */
1061
1062	if (ecsr & ECSR_MPD)
1063		pm_wakeup_event(&priv->pdev->dev, 0);
1064	if (ecsr & ECSR_ICD)
1065		ndev->stats.tx_carrier_errors++;
1066	if (ecsr & ECSR_LCHNG) {
1067		/* Link changed */
1068		if (priv->no_avb_link)
1069			return;
1070		psr = ravb_read(ndev, PSR);
1071		if (priv->avb_link_active_low)
1072			psr ^= PSR_LMON;
1073		if (!(psr & PSR_LMON)) {
1074			/* DIsable RX and TX */
1075			ravb_rcv_snd_disable(ndev);
1076		} else {
1077			/* Enable RX and TX */
1078			ravb_rcv_snd_enable(ndev);
1079		}
1080	}
1081}
1082
1083static irqreturn_t ravb_emac_interrupt(int irq, void *dev_id)
1084{
1085	struct net_device *ndev = dev_id;
1086	struct ravb_private *priv = netdev_priv(ndev);
1087
1088	spin_lock(&priv->lock);
1089	ravb_emac_interrupt_unlocked(ndev);
1090	spin_unlock(&priv->lock);
1091	return IRQ_HANDLED;
1092}
1093
1094/* Error interrupt handler */
1095static void ravb_error_interrupt(struct net_device *ndev)
1096{
1097	struct ravb_private *priv = netdev_priv(ndev);
1098	u32 eis, ris2;
1099
1100	eis = ravb_read(ndev, EIS);
1101	ravb_write(ndev, ~(EIS_QFS | EIS_RESERVED), EIS);
1102	if (eis & EIS_QFS) {
1103		ris2 = ravb_read(ndev, RIS2);
1104		ravb_write(ndev, ~(RIS2_QFF0 | RIS2_QFF1 | RIS2_RFFF | RIS2_RESERVED),
1105			   RIS2);
1106
1107		/* Receive Descriptor Empty int */
1108		if (ris2 & RIS2_QFF0)
1109			priv->stats[RAVB_BE].rx_over_errors++;
1110
1111		/* Receive Descriptor Empty int */
1112		if (ris2 & RIS2_QFF1)
1113			priv->stats[RAVB_NC].rx_over_errors++;
1114
1115		/* Receive FIFO Overflow int */
1116		if (ris2 & RIS2_RFFF)
1117			priv->rx_fifo_errors++;
1118	}
1119}
1120
1121static bool ravb_queue_interrupt(struct net_device *ndev, int q)
1122{
1123	struct ravb_private *priv = netdev_priv(ndev);
1124	const struct ravb_hw_info *info = priv->info;
1125	u32 ris0 = ravb_read(ndev, RIS0);
1126	u32 ric0 = ravb_read(ndev, RIC0);
1127	u32 tis  = ravb_read(ndev, TIS);
1128	u32 tic  = ravb_read(ndev, TIC);
1129
1130	if (((ris0 & ric0) & BIT(q)) || ((tis  & tic)  & BIT(q))) {
1131		if (napi_schedule_prep(&priv->napi[q])) {
1132			/* Mask RX and TX interrupts */
1133			if (!info->irq_en_dis) {
1134				ravb_write(ndev, ric0 & ~BIT(q), RIC0);
1135				ravb_write(ndev, tic & ~BIT(q), TIC);
1136			} else {
1137				ravb_write(ndev, BIT(q), RID0);
1138				ravb_write(ndev, BIT(q), TID);
1139			}
1140			__napi_schedule(&priv->napi[q]);
1141		} else {
1142			netdev_warn(ndev,
1143				    "ignoring interrupt, rx status 0x%08x, rx mask 0x%08x,\n",
1144				    ris0, ric0);
1145			netdev_warn(ndev,
1146				    "                    tx status 0x%08x, tx mask 0x%08x.\n",
1147				    tis, tic);
1148		}
1149		return true;
1150	}
1151	return false;
1152}
1153
1154static bool ravb_timestamp_interrupt(struct net_device *ndev)
1155{
1156	u32 tis = ravb_read(ndev, TIS);
1157
1158	if (tis & TIS_TFUF) {
1159		ravb_write(ndev, ~(TIS_TFUF | TIS_RESERVED), TIS);
1160		ravb_get_tx_tstamp(ndev);
1161		return true;
1162	}
1163	return false;
1164}
1165
1166static irqreturn_t ravb_interrupt(int irq, void *dev_id)
1167{
1168	struct net_device *ndev = dev_id;
1169	struct ravb_private *priv = netdev_priv(ndev);
1170	const struct ravb_hw_info *info = priv->info;
1171	irqreturn_t result = IRQ_NONE;
1172	u32 iss;
1173
1174	spin_lock(&priv->lock);
1175	/* Get interrupt status */
1176	iss = ravb_read(ndev, ISS);
1177
1178	/* Received and transmitted interrupts */
1179	if (iss & (ISS_FRS | ISS_FTS | ISS_TFUS)) {
1180		int q;
1181
1182		/* Timestamp updated */
1183		if (ravb_timestamp_interrupt(ndev))
1184			result = IRQ_HANDLED;
1185
1186		/* Network control and best effort queue RX/TX */
1187		if (info->nc_queues) {
1188			for (q = RAVB_NC; q >= RAVB_BE; q--) {
1189				if (ravb_queue_interrupt(ndev, q))
1190					result = IRQ_HANDLED;
1191			}
1192		} else {
1193			if (ravb_queue_interrupt(ndev, RAVB_BE))
1194				result = IRQ_HANDLED;
1195		}
1196	}
1197
1198	/* E-MAC status summary */
1199	if (iss & ISS_MS) {
1200		ravb_emac_interrupt_unlocked(ndev);
1201		result = IRQ_HANDLED;
1202	}
1203
1204	/* Error status summary */
1205	if (iss & ISS_ES) {
1206		ravb_error_interrupt(ndev);
1207		result = IRQ_HANDLED;
1208	}
1209
1210	/* gPTP interrupt status summary */
1211	if (iss & ISS_CGIS) {
1212		ravb_ptp_interrupt(ndev);
1213		result = IRQ_HANDLED;
1214	}
1215
1216	spin_unlock(&priv->lock);
1217	return result;
1218}
1219
1220/* Timestamp/Error/gPTP interrupt handler */
1221static irqreturn_t ravb_multi_interrupt(int irq, void *dev_id)
1222{
1223	struct net_device *ndev = dev_id;
1224	struct ravb_private *priv = netdev_priv(ndev);
1225	irqreturn_t result = IRQ_NONE;
1226	u32 iss;
1227
1228	spin_lock(&priv->lock);
1229	/* Get interrupt status */
1230	iss = ravb_read(ndev, ISS);
1231
1232	/* Timestamp updated */
1233	if ((iss & ISS_TFUS) && ravb_timestamp_interrupt(ndev))
1234		result = IRQ_HANDLED;
1235
1236	/* Error status summary */
1237	if (iss & ISS_ES) {
1238		ravb_error_interrupt(ndev);
1239		result = IRQ_HANDLED;
1240	}
1241
1242	/* gPTP interrupt status summary */
1243	if (iss & ISS_CGIS) {
1244		ravb_ptp_interrupt(ndev);
1245		result = IRQ_HANDLED;
1246	}
1247
1248	spin_unlock(&priv->lock);
1249	return result;
1250}
1251
1252static irqreturn_t ravb_dma_interrupt(int irq, void *dev_id, int q)
1253{
1254	struct net_device *ndev = dev_id;
1255	struct ravb_private *priv = netdev_priv(ndev);
1256	irqreturn_t result = IRQ_NONE;
1257
1258	spin_lock(&priv->lock);
1259
1260	/* Network control/Best effort queue RX/TX */
1261	if (ravb_queue_interrupt(ndev, q))
1262		result = IRQ_HANDLED;
1263
1264	spin_unlock(&priv->lock);
1265	return result;
1266}
1267
1268static irqreturn_t ravb_be_interrupt(int irq, void *dev_id)
1269{
1270	return ravb_dma_interrupt(irq, dev_id, RAVB_BE);
1271}
1272
1273static irqreturn_t ravb_nc_interrupt(int irq, void *dev_id)
1274{
1275	return ravb_dma_interrupt(irq, dev_id, RAVB_NC);
1276}
1277
1278static int ravb_poll(struct napi_struct *napi, int budget)
1279{
1280	struct net_device *ndev = napi->dev;
1281	struct ravb_private *priv = netdev_priv(ndev);
1282	const struct ravb_hw_info *info = priv->info;
1283	bool gptp = info->gptp || info->ccc_gac;
1284	struct ravb_rx_desc *desc;
1285	unsigned long flags;
1286	int q = napi - priv->napi;
1287	int mask = BIT(q);
1288	int quota = budget;
1289	unsigned int entry;
1290
1291	if (!gptp) {
1292		entry = priv->cur_rx[q] % priv->num_rx_ring[q];
1293		desc = &priv->gbeth_rx_ring[entry];
1294	}
1295	/* Processing RX Descriptor Ring */
1296	/* Clear RX interrupt */
1297	ravb_write(ndev, ~(mask | RIS0_RESERVED), RIS0);
1298	if (gptp || desc->die_dt != DT_FEMPTY) {
1299		if (ravb_rx(ndev, &quota, q))
1300			goto out;
1301	}
1302
1303	/* Processing TX Descriptor Ring */
1304	spin_lock_irqsave(&priv->lock, flags);
1305	/* Clear TX interrupt */
1306	ravb_write(ndev, ~(mask | TIS_RESERVED), TIS);
1307	ravb_tx_free(ndev, q, true);
1308	netif_wake_subqueue(ndev, q);
1309	spin_unlock_irqrestore(&priv->lock, flags);
1310
1311	napi_complete(napi);
1312
1313	/* Re-enable RX/TX interrupts */
1314	spin_lock_irqsave(&priv->lock, flags);
1315	if (!info->irq_en_dis) {
1316		ravb_modify(ndev, RIC0, mask, mask);
1317		ravb_modify(ndev, TIC,  mask, mask);
1318	} else {
1319		ravb_write(ndev, mask, RIE0);
1320		ravb_write(ndev, mask, TIE);
1321	}
1322	spin_unlock_irqrestore(&priv->lock, flags);
1323
1324	/* Receive error message handling */
1325	priv->rx_over_errors =  priv->stats[RAVB_BE].rx_over_errors;
1326	if (info->nc_queues)
1327		priv->rx_over_errors += priv->stats[RAVB_NC].rx_over_errors;
1328	if (priv->rx_over_errors != ndev->stats.rx_over_errors)
1329		ndev->stats.rx_over_errors = priv->rx_over_errors;
1330	if (priv->rx_fifo_errors != ndev->stats.rx_fifo_errors)
1331		ndev->stats.rx_fifo_errors = priv->rx_fifo_errors;
1332out:
1333	return budget - quota;
1334}
1335
1336static void ravb_set_duplex_gbeth(struct net_device *ndev)
1337{
1338	struct ravb_private *priv = netdev_priv(ndev);
1339
1340	ravb_modify(ndev, ECMR, ECMR_DM, priv->duplex > 0 ? ECMR_DM : 0);
1341}
1342
1343/* PHY state control function */
1344static void ravb_adjust_link(struct net_device *ndev)
1345{
1346	struct ravb_private *priv = netdev_priv(ndev);
1347	const struct ravb_hw_info *info = priv->info;
1348	struct phy_device *phydev = ndev->phydev;
1349	bool new_state = false;
1350	unsigned long flags;
1351
1352	spin_lock_irqsave(&priv->lock, flags);
1353
1354	/* Disable TX and RX right over here, if E-MAC change is ignored */
1355	if (priv->no_avb_link)
1356		ravb_rcv_snd_disable(ndev);
1357
1358	if (phydev->link) {
1359		if (info->half_duplex && phydev->duplex != priv->duplex) {
1360			new_state = true;
1361			priv->duplex = phydev->duplex;
1362			ravb_set_duplex_gbeth(ndev);
1363		}
1364
1365		if (phydev->speed != priv->speed) {
1366			new_state = true;
1367			priv->speed = phydev->speed;
1368			info->set_rate(ndev);
1369		}
1370		if (!priv->link) {
1371			ravb_modify(ndev, ECMR, ECMR_TXF, 0);
1372			new_state = true;
1373			priv->link = phydev->link;
1374		}
1375	} else if (priv->link) {
1376		new_state = true;
1377		priv->link = 0;
1378		priv->speed = 0;
1379		if (info->half_duplex)
1380			priv->duplex = -1;
1381	}
1382
1383	/* Enable TX and RX right over here, if E-MAC change is ignored */
1384	if (priv->no_avb_link && phydev->link)
1385		ravb_rcv_snd_enable(ndev);
1386
1387	spin_unlock_irqrestore(&priv->lock, flags);
1388
1389	if (new_state && netif_msg_link(priv))
1390		phy_print_status(phydev);
1391}
1392
1393static const struct soc_device_attribute r8a7795es10[] = {
1394	{ .soc_id = "r8a7795", .revision = "ES1.0", },
1395	{ /* sentinel */ }
1396};
1397
1398/* PHY init function */
1399static int ravb_phy_init(struct net_device *ndev)
1400{
1401	struct device_node *np = ndev->dev.parent->of_node;
1402	struct ravb_private *priv = netdev_priv(ndev);
1403	const struct ravb_hw_info *info = priv->info;
1404	struct phy_device *phydev;
1405	struct device_node *pn;
1406	phy_interface_t iface;
1407	int err;
1408
1409	priv->link = 0;
1410	priv->speed = 0;
1411	priv->duplex = -1;
1412
1413	/* Try connecting to PHY */
1414	pn = of_parse_phandle(np, "phy-handle", 0);
1415	if (!pn) {
1416		/* In the case of a fixed PHY, the DT node associated
1417		 * to the PHY is the Ethernet MAC DT node.
1418		 */
1419		if (of_phy_is_fixed_link(np)) {
1420			err = of_phy_register_fixed_link(np);
1421			if (err)
1422				return err;
1423		}
1424		pn = of_node_get(np);
1425	}
1426
1427	iface = priv->rgmii_override ? PHY_INTERFACE_MODE_RGMII
1428				     : priv->phy_interface;
1429	phydev = of_phy_connect(ndev, pn, ravb_adjust_link, 0, iface);
1430	of_node_put(pn);
1431	if (!phydev) {
1432		netdev_err(ndev, "failed to connect PHY\n");
1433		err = -ENOENT;
1434		goto err_deregister_fixed_link;
1435	}
1436
1437	/* This driver only support 10/100Mbit speeds on R-Car H3 ES1.0
1438	 * at this time.
1439	 */
1440	if (soc_device_match(r8a7795es10)) {
1441		phy_set_max_speed(phydev, SPEED_100);
1442
1443		netdev_info(ndev, "limited PHY to 100Mbit/s\n");
1444	}
1445
1446	if (!info->half_duplex) {
1447		/* 10BASE, Pause and Asym Pause is not supported */
1448		phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_10baseT_Half_BIT);
1449		phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_10baseT_Full_BIT);
1450		phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_Pause_BIT);
1451		phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_Asym_Pause_BIT);
1452
1453		/* Half Duplex is not supported */
1454		phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_1000baseT_Half_BIT);
1455		phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_100baseT_Half_BIT);
1456	}
1457
1458	/* Indicate that the MAC is responsible for managing PHY PM */
1459	phydev->mac_managed_pm = true;
1460	phy_attached_info(phydev);
1461
1462	return 0;
1463
1464err_deregister_fixed_link:
1465	if (of_phy_is_fixed_link(np))
1466		of_phy_deregister_fixed_link(np);
1467
1468	return err;
1469}
1470
1471/* PHY control start function */
1472static int ravb_phy_start(struct net_device *ndev)
1473{
1474	int error;
1475
1476	error = ravb_phy_init(ndev);
1477	if (error)
1478		return error;
1479
1480	phy_start(ndev->phydev);
1481
1482	return 0;
1483}
1484
1485static u32 ravb_get_msglevel(struct net_device *ndev)
1486{
1487	struct ravb_private *priv = netdev_priv(ndev);
1488
1489	return priv->msg_enable;
1490}
1491
1492static void ravb_set_msglevel(struct net_device *ndev, u32 value)
1493{
1494	struct ravb_private *priv = netdev_priv(ndev);
1495
1496	priv->msg_enable = value;
1497}
1498
1499static const char ravb_gstrings_stats_gbeth[][ETH_GSTRING_LEN] = {
1500	"rx_queue_0_current",
1501	"tx_queue_0_current",
1502	"rx_queue_0_dirty",
1503	"tx_queue_0_dirty",
1504	"rx_queue_0_packets",
1505	"tx_queue_0_packets",
1506	"rx_queue_0_bytes",
1507	"tx_queue_0_bytes",
1508	"rx_queue_0_mcast_packets",
1509	"rx_queue_0_errors",
1510	"rx_queue_0_crc_errors",
1511	"rx_queue_0_frame_errors",
1512	"rx_queue_0_length_errors",
1513	"rx_queue_0_csum_offload_errors",
1514	"rx_queue_0_over_errors",
1515};
1516
1517static const char ravb_gstrings_stats[][ETH_GSTRING_LEN] = {
1518	"rx_queue_0_current",
1519	"tx_queue_0_current",
1520	"rx_queue_0_dirty",
1521	"tx_queue_0_dirty",
1522	"rx_queue_0_packets",
1523	"tx_queue_0_packets",
1524	"rx_queue_0_bytes",
1525	"tx_queue_0_bytes",
1526	"rx_queue_0_mcast_packets",
1527	"rx_queue_0_errors",
1528	"rx_queue_0_crc_errors",
1529	"rx_queue_0_frame_errors",
1530	"rx_queue_0_length_errors",
1531	"rx_queue_0_missed_errors",
1532	"rx_queue_0_over_errors",
1533
1534	"rx_queue_1_current",
1535	"tx_queue_1_current",
1536	"rx_queue_1_dirty",
1537	"tx_queue_1_dirty",
1538	"rx_queue_1_packets",
1539	"tx_queue_1_packets",
1540	"rx_queue_1_bytes",
1541	"tx_queue_1_bytes",
1542	"rx_queue_1_mcast_packets",
1543	"rx_queue_1_errors",
1544	"rx_queue_1_crc_errors",
1545	"rx_queue_1_frame_errors",
1546	"rx_queue_1_length_errors",
1547	"rx_queue_1_missed_errors",
1548	"rx_queue_1_over_errors",
1549};
1550
1551static int ravb_get_sset_count(struct net_device *netdev, int sset)
1552{
1553	struct ravb_private *priv = netdev_priv(netdev);
1554	const struct ravb_hw_info *info = priv->info;
1555
1556	switch (sset) {
1557	case ETH_SS_STATS:
1558		return info->stats_len;
1559	default:
1560		return -EOPNOTSUPP;
1561	}
1562}
1563
1564static void ravb_get_ethtool_stats(struct net_device *ndev,
1565				   struct ethtool_stats *estats, u64 *data)
1566{
1567	struct ravb_private *priv = netdev_priv(ndev);
1568	const struct ravb_hw_info *info = priv->info;
1569	int num_rx_q;
1570	int i = 0;
1571	int q;
1572
1573	num_rx_q = info->nc_queues ? NUM_RX_QUEUE : 1;
1574	/* Device-specific stats */
1575	for (q = RAVB_BE; q < num_rx_q; q++) {
1576		struct net_device_stats *stats = &priv->stats[q];
1577
1578		data[i++] = priv->cur_rx[q];
1579		data[i++] = priv->cur_tx[q];
1580		data[i++] = priv->dirty_rx[q];
1581		data[i++] = priv->dirty_tx[q];
1582		data[i++] = stats->rx_packets;
1583		data[i++] = stats->tx_packets;
1584		data[i++] = stats->rx_bytes;
1585		data[i++] = stats->tx_bytes;
1586		data[i++] = stats->multicast;
1587		data[i++] = stats->rx_errors;
1588		data[i++] = stats->rx_crc_errors;
1589		data[i++] = stats->rx_frame_errors;
1590		data[i++] = stats->rx_length_errors;
1591		data[i++] = stats->rx_missed_errors;
1592		data[i++] = stats->rx_over_errors;
1593	}
1594}
1595
1596static void ravb_get_strings(struct net_device *ndev, u32 stringset, u8 *data)
1597{
1598	struct ravb_private *priv = netdev_priv(ndev);
1599	const struct ravb_hw_info *info = priv->info;
1600
1601	switch (stringset) {
1602	case ETH_SS_STATS:
1603		memcpy(data, info->gstrings_stats, info->gstrings_size);
1604		break;
1605	}
1606}
1607
1608static void ravb_get_ringparam(struct net_device *ndev,
1609			       struct ethtool_ringparam *ring,
1610			       struct kernel_ethtool_ringparam *kernel_ring,
1611			       struct netlink_ext_ack *extack)
1612{
1613	struct ravb_private *priv = netdev_priv(ndev);
1614
1615	ring->rx_max_pending = BE_RX_RING_MAX;
1616	ring->tx_max_pending = BE_TX_RING_MAX;
1617	ring->rx_pending = priv->num_rx_ring[RAVB_BE];
1618	ring->tx_pending = priv->num_tx_ring[RAVB_BE];
1619}
1620
1621static int ravb_set_ringparam(struct net_device *ndev,
1622			      struct ethtool_ringparam *ring,
1623			      struct kernel_ethtool_ringparam *kernel_ring,
1624			      struct netlink_ext_ack *extack)
1625{
1626	struct ravb_private *priv = netdev_priv(ndev);
1627	const struct ravb_hw_info *info = priv->info;
1628	int error;
1629
1630	if (ring->tx_pending > BE_TX_RING_MAX ||
1631	    ring->rx_pending > BE_RX_RING_MAX ||
1632	    ring->tx_pending < BE_TX_RING_MIN ||
1633	    ring->rx_pending < BE_RX_RING_MIN)
1634		return -EINVAL;
1635	if (ring->rx_mini_pending || ring->rx_jumbo_pending)
1636		return -EINVAL;
1637
1638	if (netif_running(ndev)) {
1639		netif_device_detach(ndev);
1640		/* Stop PTP Clock driver */
1641		if (info->gptp)
1642			ravb_ptp_stop(ndev);
1643		/* Wait for DMA stopping */
1644		error = ravb_stop_dma(ndev);
1645		if (error) {
1646			netdev_err(ndev,
1647				   "cannot set ringparam! Any AVB processes are still running?\n");
1648			return error;
1649		}
1650		synchronize_irq(ndev->irq);
1651
1652		/* Free all the skb's in the RX queue and the DMA buffers. */
1653		ravb_ring_free(ndev, RAVB_BE);
1654		if (info->nc_queues)
1655			ravb_ring_free(ndev, RAVB_NC);
1656	}
1657
1658	/* Set new parameters */
1659	priv->num_rx_ring[RAVB_BE] = ring->rx_pending;
1660	priv->num_tx_ring[RAVB_BE] = ring->tx_pending;
1661
1662	if (netif_running(ndev)) {
1663		error = ravb_dmac_init(ndev);
1664		if (error) {
1665			netdev_err(ndev,
1666				   "%s: ravb_dmac_init() failed, error %d\n",
1667				   __func__, error);
1668			return error;
1669		}
1670
1671		ravb_emac_init(ndev);
1672
1673		/* Initialise PTP Clock driver */
1674		if (info->gptp)
1675			ravb_ptp_init(ndev, priv->pdev);
1676
1677		netif_device_attach(ndev);
1678	}
1679
1680	return 0;
1681}
1682
1683static int ravb_get_ts_info(struct net_device *ndev,
1684			    struct ethtool_ts_info *info)
1685{
1686	struct ravb_private *priv = netdev_priv(ndev);
1687	const struct ravb_hw_info *hw_info = priv->info;
1688
1689	info->so_timestamping =
1690		SOF_TIMESTAMPING_TX_SOFTWARE |
1691		SOF_TIMESTAMPING_RX_SOFTWARE |
1692		SOF_TIMESTAMPING_SOFTWARE |
1693		SOF_TIMESTAMPING_TX_HARDWARE |
1694		SOF_TIMESTAMPING_RX_HARDWARE |
1695		SOF_TIMESTAMPING_RAW_HARDWARE;
1696	info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON);
1697	info->rx_filters =
1698		(1 << HWTSTAMP_FILTER_NONE) |
1699		(1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT) |
1700		(1 << HWTSTAMP_FILTER_ALL);
1701	if (hw_info->gptp || hw_info->ccc_gac)
1702		info->phc_index = ptp_clock_index(priv->ptp.clock);
1703
1704	return 0;
1705}
1706
1707static void ravb_get_wol(struct net_device *ndev, struct ethtool_wolinfo *wol)
1708{
1709	struct ravb_private *priv = netdev_priv(ndev);
1710
1711	wol->supported = WAKE_MAGIC;
1712	wol->wolopts = priv->wol_enabled ? WAKE_MAGIC : 0;
1713}
1714
1715static int ravb_set_wol(struct net_device *ndev, struct ethtool_wolinfo *wol)
1716{
1717	struct ravb_private *priv = netdev_priv(ndev);
1718	const struct ravb_hw_info *info = priv->info;
1719
1720	if (!info->magic_pkt || (wol->wolopts & ~WAKE_MAGIC))
1721		return -EOPNOTSUPP;
1722
1723	priv->wol_enabled = !!(wol->wolopts & WAKE_MAGIC);
1724
1725	device_set_wakeup_enable(&priv->pdev->dev, priv->wol_enabled);
1726
1727	return 0;
1728}
1729
1730static const struct ethtool_ops ravb_ethtool_ops = {
1731	.nway_reset		= phy_ethtool_nway_reset,
1732	.get_msglevel		= ravb_get_msglevel,
1733	.set_msglevel		= ravb_set_msglevel,
1734	.get_link		= ethtool_op_get_link,
1735	.get_strings		= ravb_get_strings,
1736	.get_ethtool_stats	= ravb_get_ethtool_stats,
1737	.get_sset_count		= ravb_get_sset_count,
1738	.get_ringparam		= ravb_get_ringparam,
1739	.set_ringparam		= ravb_set_ringparam,
1740	.get_ts_info		= ravb_get_ts_info,
1741	.get_link_ksettings	= phy_ethtool_get_link_ksettings,
1742	.set_link_ksettings	= phy_ethtool_set_link_ksettings,
1743	.get_wol		= ravb_get_wol,
1744	.set_wol		= ravb_set_wol,
1745};
1746
1747static inline int ravb_hook_irq(unsigned int irq, irq_handler_t handler,
1748				struct net_device *ndev, struct device *dev,
1749				const char *ch)
1750{
1751	char *name;
1752	int error;
1753
1754	name = devm_kasprintf(dev, GFP_KERNEL, "%s:%s", ndev->name, ch);
1755	if (!name)
1756		return -ENOMEM;
1757	error = request_irq(irq, handler, 0, name, ndev);
1758	if (error)
1759		netdev_err(ndev, "cannot request IRQ %s\n", name);
1760
1761	return error;
1762}
1763
1764/* Network device open function for Ethernet AVB */
1765static int ravb_open(struct net_device *ndev)
1766{
1767	struct ravb_private *priv = netdev_priv(ndev);
1768	const struct ravb_hw_info *info = priv->info;
1769	struct platform_device *pdev = priv->pdev;
1770	struct device *dev = &pdev->dev;
1771	int error;
1772
1773	napi_enable(&priv->napi[RAVB_BE]);
1774	if (info->nc_queues)
1775		napi_enable(&priv->napi[RAVB_NC]);
1776
1777	if (!info->multi_irqs) {
1778		error = request_irq(ndev->irq, ravb_interrupt, IRQF_SHARED,
1779				    ndev->name, ndev);
1780		if (error) {
1781			netdev_err(ndev, "cannot request IRQ\n");
1782			goto out_napi_off;
1783		}
1784	} else {
1785		error = ravb_hook_irq(ndev->irq, ravb_multi_interrupt, ndev,
1786				      dev, "ch22:multi");
1787		if (error)
1788			goto out_napi_off;
1789		error = ravb_hook_irq(priv->emac_irq, ravb_emac_interrupt, ndev,
1790				      dev, "ch24:emac");
1791		if (error)
1792			goto out_free_irq;
1793		error = ravb_hook_irq(priv->rx_irqs[RAVB_BE], ravb_be_interrupt,
1794				      ndev, dev, "ch0:rx_be");
1795		if (error)
1796			goto out_free_irq_emac;
1797		error = ravb_hook_irq(priv->tx_irqs[RAVB_BE], ravb_be_interrupt,
1798				      ndev, dev, "ch18:tx_be");
1799		if (error)
1800			goto out_free_irq_be_rx;
1801		error = ravb_hook_irq(priv->rx_irqs[RAVB_NC], ravb_nc_interrupt,
1802				      ndev, dev, "ch1:rx_nc");
1803		if (error)
1804			goto out_free_irq_be_tx;
1805		error = ravb_hook_irq(priv->tx_irqs[RAVB_NC], ravb_nc_interrupt,
1806				      ndev, dev, "ch19:tx_nc");
1807		if (error)
1808			goto out_free_irq_nc_rx;
1809
1810		if (info->err_mgmt_irqs) {
1811			error = ravb_hook_irq(priv->erra_irq, ravb_multi_interrupt,
1812					      ndev, dev, "err_a");
1813			if (error)
1814				goto out_free_irq_nc_tx;
1815			error = ravb_hook_irq(priv->mgmta_irq, ravb_multi_interrupt,
1816					      ndev, dev, "mgmt_a");
1817			if (error)
1818				goto out_free_irq_erra;
1819		}
1820	}
1821
1822	/* Device init */
1823	error = ravb_dmac_init(ndev);
1824	if (error)
1825		goto out_free_irq_mgmta;
1826	ravb_emac_init(ndev);
1827
1828	/* Initialise PTP Clock driver */
1829	if (info->gptp)
1830		ravb_ptp_init(ndev, priv->pdev);
1831
1832	netif_tx_start_all_queues(ndev);
1833
1834	/* PHY control start */
1835	error = ravb_phy_start(ndev);
1836	if (error)
1837		goto out_ptp_stop;
1838
 
 
1839	return 0;
1840
1841out_ptp_stop:
1842	/* Stop PTP Clock driver */
1843	if (info->gptp)
1844		ravb_ptp_stop(ndev);
 
1845out_free_irq_mgmta:
1846	if (!info->multi_irqs)
1847		goto out_free_irq;
1848	if (info->err_mgmt_irqs)
1849		free_irq(priv->mgmta_irq, ndev);
1850out_free_irq_erra:
1851	if (info->err_mgmt_irqs)
1852		free_irq(priv->erra_irq, ndev);
1853out_free_irq_nc_tx:
1854	free_irq(priv->tx_irqs[RAVB_NC], ndev);
1855out_free_irq_nc_rx:
1856	free_irq(priv->rx_irqs[RAVB_NC], ndev);
1857out_free_irq_be_tx:
1858	free_irq(priv->tx_irqs[RAVB_BE], ndev);
1859out_free_irq_be_rx:
1860	free_irq(priv->rx_irqs[RAVB_BE], ndev);
1861out_free_irq_emac:
1862	free_irq(priv->emac_irq, ndev);
1863out_free_irq:
1864	free_irq(ndev->irq, ndev);
1865out_napi_off:
1866	if (info->nc_queues)
1867		napi_disable(&priv->napi[RAVB_NC]);
1868	napi_disable(&priv->napi[RAVB_BE]);
1869	return error;
1870}
1871
1872/* Timeout function for Ethernet AVB */
1873static void ravb_tx_timeout(struct net_device *ndev, unsigned int txqueue)
1874{
1875	struct ravb_private *priv = netdev_priv(ndev);
1876
1877	netif_err(priv, tx_err, ndev,
1878		  "transmit timed out, status %08x, resetting...\n",
1879		  ravb_read(ndev, ISS));
1880
1881	/* tx_errors count up */
1882	ndev->stats.tx_errors++;
1883
1884	schedule_work(&priv->work);
1885}
1886
1887static void ravb_tx_timeout_work(struct work_struct *work)
1888{
1889	struct ravb_private *priv = container_of(work, struct ravb_private,
1890						 work);
1891	const struct ravb_hw_info *info = priv->info;
1892	struct net_device *ndev = priv->ndev;
1893	int error;
1894
 
 
 
 
 
 
1895	netif_tx_stop_all_queues(ndev);
1896
1897	/* Stop PTP Clock driver */
1898	if (info->gptp)
1899		ravb_ptp_stop(ndev);
1900
1901	/* Wait for DMA stopping */
1902	if (ravb_stop_dma(ndev)) {
1903		/* If ravb_stop_dma() fails, the hardware is still operating
1904		 * for TX and/or RX. So, this should not call the following
1905		 * functions because ravb_dmac_init() is possible to fail too.
1906		 * Also, this should not retry ravb_stop_dma() again and again
1907		 * here because it's possible to wait forever. So, this just
1908		 * re-enables the TX and RX and skip the following
1909		 * re-initialization procedure.
1910		 */
1911		ravb_rcv_snd_enable(ndev);
1912		goto out;
1913	}
1914
1915	ravb_ring_free(ndev, RAVB_BE);
1916	if (info->nc_queues)
1917		ravb_ring_free(ndev, RAVB_NC);
1918
1919	/* Device init */
1920	error = ravb_dmac_init(ndev);
1921	if (error) {
1922		/* If ravb_dmac_init() fails, descriptors are freed. So, this
1923		 * should return here to avoid re-enabling the TX and RX in
1924		 * ravb_emac_init().
1925		 */
1926		netdev_err(ndev, "%s: ravb_dmac_init() failed, error %d\n",
1927			   __func__, error);
1928		return;
1929	}
1930	ravb_emac_init(ndev);
1931
1932out:
1933	/* Initialise PTP Clock driver */
1934	if (info->gptp)
1935		ravb_ptp_init(ndev, priv->pdev);
1936
1937	netif_tx_start_all_queues(ndev);
 
 
 
1938}
1939
1940/* Packet transmit function for Ethernet AVB */
1941static netdev_tx_t ravb_start_xmit(struct sk_buff *skb, struct net_device *ndev)
1942{
1943	struct ravb_private *priv = netdev_priv(ndev);
1944	const struct ravb_hw_info *info = priv->info;
1945	unsigned int num_tx_desc = priv->num_tx_desc;
1946	u16 q = skb_get_queue_mapping(skb);
1947	struct ravb_tstamp_skb *ts_skb;
1948	struct ravb_tx_desc *desc;
1949	unsigned long flags;
1950	u32 dma_addr;
1951	void *buffer;
1952	u32 entry;
1953	u32 len;
1954
1955	spin_lock_irqsave(&priv->lock, flags);
1956	if (priv->cur_tx[q] - priv->dirty_tx[q] > (priv->num_tx_ring[q] - 1) *
1957	    num_tx_desc) {
1958		netif_err(priv, tx_queued, ndev,
1959			  "still transmitting with the full ring!\n");
1960		netif_stop_subqueue(ndev, q);
1961		spin_unlock_irqrestore(&priv->lock, flags);
1962		return NETDEV_TX_BUSY;
1963	}
1964
1965	if (skb_put_padto(skb, ETH_ZLEN))
1966		goto exit;
1967
1968	entry = priv->cur_tx[q] % (priv->num_tx_ring[q] * num_tx_desc);
1969	priv->tx_skb[q][entry / num_tx_desc] = skb;
1970
1971	if (num_tx_desc > 1) {
1972		buffer = PTR_ALIGN(priv->tx_align[q], DPTR_ALIGN) +
1973			 entry / num_tx_desc * DPTR_ALIGN;
1974		len = PTR_ALIGN(skb->data, DPTR_ALIGN) - skb->data;
1975
1976		/* Zero length DMA descriptors are problematic as they seem
1977		 * to terminate DMA transfers. Avoid them by simply using a
1978		 * length of DPTR_ALIGN (4) when skb data is aligned to
1979		 * DPTR_ALIGN.
1980		 *
1981		 * As skb is guaranteed to have at least ETH_ZLEN (60)
1982		 * bytes of data by the call to skb_put_padto() above this
1983		 * is safe with respect to both the length of the first DMA
1984		 * descriptor (len) overflowing the available data and the
1985		 * length of the second DMA descriptor (skb->len - len)
1986		 * being negative.
1987		 */
1988		if (len == 0)
1989			len = DPTR_ALIGN;
1990
1991		memcpy(buffer, skb->data, len);
1992		dma_addr = dma_map_single(ndev->dev.parent, buffer, len,
1993					  DMA_TO_DEVICE);
1994		if (dma_mapping_error(ndev->dev.parent, dma_addr))
1995			goto drop;
1996
1997		desc = &priv->tx_ring[q][entry];
1998		desc->ds_tagl = cpu_to_le16(len);
1999		desc->dptr = cpu_to_le32(dma_addr);
2000
2001		buffer = skb->data + len;
2002		len = skb->len - len;
2003		dma_addr = dma_map_single(ndev->dev.parent, buffer, len,
2004					  DMA_TO_DEVICE);
2005		if (dma_mapping_error(ndev->dev.parent, dma_addr))
2006			goto unmap;
2007
2008		desc++;
2009	} else {
2010		desc = &priv->tx_ring[q][entry];
2011		len = skb->len;
2012		dma_addr = dma_map_single(ndev->dev.parent, skb->data, skb->len,
2013					  DMA_TO_DEVICE);
2014		if (dma_mapping_error(ndev->dev.parent, dma_addr))
2015			goto drop;
2016	}
2017	desc->ds_tagl = cpu_to_le16(len);
2018	desc->dptr = cpu_to_le32(dma_addr);
2019
2020	/* TX timestamp required */
2021	if (info->gptp || info->ccc_gac) {
2022		if (q == RAVB_NC) {
2023			ts_skb = kmalloc(sizeof(*ts_skb), GFP_ATOMIC);
2024			if (!ts_skb) {
2025				if (num_tx_desc > 1) {
2026					desc--;
2027					dma_unmap_single(ndev->dev.parent, dma_addr,
2028							 len, DMA_TO_DEVICE);
2029				}
2030				goto unmap;
2031			}
2032			ts_skb->skb = skb_get(skb);
2033			ts_skb->tag = priv->ts_skb_tag++;
2034			priv->ts_skb_tag &= 0x3ff;
2035			list_add_tail(&ts_skb->list, &priv->ts_skb_list);
2036
2037			/* TAG and timestamp required flag */
2038			skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
2039			desc->tagh_tsr = (ts_skb->tag >> 4) | TX_TSR;
2040			desc->ds_tagl |= cpu_to_le16(ts_skb->tag << 12);
2041		}
2042
2043		skb_tx_timestamp(skb);
2044	}
2045	/* Descriptor type must be set after all the above writes */
2046	dma_wmb();
2047	if (num_tx_desc > 1) {
2048		desc->die_dt = DT_FEND;
2049		desc--;
2050		desc->die_dt = DT_FSTART;
2051	} else {
2052		desc->die_dt = DT_FSINGLE;
2053	}
2054	ravb_modify(ndev, TCCR, TCCR_TSRQ0 << q, TCCR_TSRQ0 << q);
2055
2056	priv->cur_tx[q] += num_tx_desc;
2057	if (priv->cur_tx[q] - priv->dirty_tx[q] >
2058	    (priv->num_tx_ring[q] - 1) * num_tx_desc &&
2059	    !ravb_tx_free(ndev, q, true))
2060		netif_stop_subqueue(ndev, q);
2061
2062exit:
2063	spin_unlock_irqrestore(&priv->lock, flags);
2064	return NETDEV_TX_OK;
2065
2066unmap:
2067	dma_unmap_single(ndev->dev.parent, le32_to_cpu(desc->dptr),
2068			 le16_to_cpu(desc->ds_tagl), DMA_TO_DEVICE);
2069drop:
2070	dev_kfree_skb_any(skb);
2071	priv->tx_skb[q][entry / num_tx_desc] = NULL;
2072	goto exit;
2073}
2074
2075static u16 ravb_select_queue(struct net_device *ndev, struct sk_buff *skb,
2076			     struct net_device *sb_dev)
2077{
2078	/* If skb needs TX timestamp, it is handled in network control queue */
2079	return (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) ? RAVB_NC :
2080							       RAVB_BE;
2081
2082}
2083
2084static struct net_device_stats *ravb_get_stats(struct net_device *ndev)
2085{
2086	struct ravb_private *priv = netdev_priv(ndev);
2087	const struct ravb_hw_info *info = priv->info;
2088	struct net_device_stats *nstats, *stats0, *stats1;
2089
2090	nstats = &ndev->stats;
2091	stats0 = &priv->stats[RAVB_BE];
2092
2093	if (info->tx_counters) {
2094		nstats->tx_dropped += ravb_read(ndev, TROCR);
2095		ravb_write(ndev, 0, TROCR);	/* (write clear) */
2096	}
2097
2098	if (info->carrier_counters) {
2099		nstats->collisions += ravb_read(ndev, CXR41);
2100		ravb_write(ndev, 0, CXR41);	/* (write clear) */
2101		nstats->tx_carrier_errors += ravb_read(ndev, CXR42);
2102		ravb_write(ndev, 0, CXR42);	/* (write clear) */
2103	}
2104
2105	nstats->rx_packets = stats0->rx_packets;
2106	nstats->tx_packets = stats0->tx_packets;
2107	nstats->rx_bytes = stats0->rx_bytes;
2108	nstats->tx_bytes = stats0->tx_bytes;
2109	nstats->multicast = stats0->multicast;
2110	nstats->rx_errors = stats0->rx_errors;
2111	nstats->rx_crc_errors = stats0->rx_crc_errors;
2112	nstats->rx_frame_errors = stats0->rx_frame_errors;
2113	nstats->rx_length_errors = stats0->rx_length_errors;
2114	nstats->rx_missed_errors = stats0->rx_missed_errors;
2115	nstats->rx_over_errors = stats0->rx_over_errors;
2116	if (info->nc_queues) {
2117		stats1 = &priv->stats[RAVB_NC];
2118
2119		nstats->rx_packets += stats1->rx_packets;
2120		nstats->tx_packets += stats1->tx_packets;
2121		nstats->rx_bytes += stats1->rx_bytes;
2122		nstats->tx_bytes += stats1->tx_bytes;
2123		nstats->multicast += stats1->multicast;
2124		nstats->rx_errors += stats1->rx_errors;
2125		nstats->rx_crc_errors += stats1->rx_crc_errors;
2126		nstats->rx_frame_errors += stats1->rx_frame_errors;
2127		nstats->rx_length_errors += stats1->rx_length_errors;
2128		nstats->rx_missed_errors += stats1->rx_missed_errors;
2129		nstats->rx_over_errors += stats1->rx_over_errors;
2130	}
2131
2132	return nstats;
2133}
2134
2135/* Update promiscuous bit */
2136static void ravb_set_rx_mode(struct net_device *ndev)
2137{
2138	struct ravb_private *priv = netdev_priv(ndev);
2139	unsigned long flags;
2140
2141	spin_lock_irqsave(&priv->lock, flags);
2142	ravb_modify(ndev, ECMR, ECMR_PRM,
2143		    ndev->flags & IFF_PROMISC ? ECMR_PRM : 0);
2144	spin_unlock_irqrestore(&priv->lock, flags);
2145}
2146
2147/* Device close function for Ethernet AVB */
2148static int ravb_close(struct net_device *ndev)
2149{
2150	struct device_node *np = ndev->dev.parent->of_node;
2151	struct ravb_private *priv = netdev_priv(ndev);
2152	const struct ravb_hw_info *info = priv->info;
2153	struct ravb_tstamp_skb *ts_skb, *ts_skb2;
2154
2155	netif_tx_stop_all_queues(ndev);
2156
2157	/* Disable interrupts by clearing the interrupt masks. */
2158	ravb_write(ndev, 0, RIC0);
2159	ravb_write(ndev, 0, RIC2);
2160	ravb_write(ndev, 0, TIC);
2161
2162	/* Stop PTP Clock driver */
2163	if (info->gptp)
2164		ravb_ptp_stop(ndev);
2165
2166	/* Set the config mode to stop the AVB-DMAC's processes */
2167	if (ravb_stop_dma(ndev) < 0)
2168		netdev_err(ndev,
2169			   "device will be stopped after h/w processes are done.\n");
2170
2171	/* Clear the timestamp list */
2172	if (info->gptp || info->ccc_gac) {
2173		list_for_each_entry_safe(ts_skb, ts_skb2, &priv->ts_skb_list, list) {
2174			list_del(&ts_skb->list);
2175			kfree_skb(ts_skb->skb);
2176			kfree(ts_skb);
2177		}
2178	}
2179
2180	/* PHY disconnect */
2181	if (ndev->phydev) {
2182		phy_stop(ndev->phydev);
2183		phy_disconnect(ndev->phydev);
2184		if (of_phy_is_fixed_link(np))
2185			of_phy_deregister_fixed_link(np);
2186	}
2187
 
 
2188	if (info->multi_irqs) {
2189		free_irq(priv->tx_irqs[RAVB_NC], ndev);
2190		free_irq(priv->rx_irqs[RAVB_NC], ndev);
2191		free_irq(priv->tx_irqs[RAVB_BE], ndev);
2192		free_irq(priv->rx_irqs[RAVB_BE], ndev);
2193		free_irq(priv->emac_irq, ndev);
2194		if (info->err_mgmt_irqs) {
2195			free_irq(priv->erra_irq, ndev);
2196			free_irq(priv->mgmta_irq, ndev);
2197		}
2198	}
2199	free_irq(ndev->irq, ndev);
2200
2201	if (info->nc_queues)
2202		napi_disable(&priv->napi[RAVB_NC]);
2203	napi_disable(&priv->napi[RAVB_BE]);
2204
2205	/* Free all the skb's in the RX queue and the DMA buffers. */
2206	ravb_ring_free(ndev, RAVB_BE);
2207	if (info->nc_queues)
2208		ravb_ring_free(ndev, RAVB_NC);
2209
2210	return 0;
2211}
2212
2213static int ravb_hwtstamp_get(struct net_device *ndev, struct ifreq *req)
2214{
2215	struct ravb_private *priv = netdev_priv(ndev);
2216	struct hwtstamp_config config;
2217
2218	config.flags = 0;
2219	config.tx_type = priv->tstamp_tx_ctrl ? HWTSTAMP_TX_ON :
2220						HWTSTAMP_TX_OFF;
2221	switch (priv->tstamp_rx_ctrl & RAVB_RXTSTAMP_TYPE) {
2222	case RAVB_RXTSTAMP_TYPE_V2_L2_EVENT:
2223		config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_EVENT;
2224		break;
2225	case RAVB_RXTSTAMP_TYPE_ALL:
2226		config.rx_filter = HWTSTAMP_FILTER_ALL;
2227		break;
2228	default:
2229		config.rx_filter = HWTSTAMP_FILTER_NONE;
2230	}
2231
2232	return copy_to_user(req->ifr_data, &config, sizeof(config)) ?
2233		-EFAULT : 0;
2234}
2235
2236/* Control hardware time stamping */
2237static int ravb_hwtstamp_set(struct net_device *ndev, struct ifreq *req)
2238{
2239	struct ravb_private *priv = netdev_priv(ndev);
2240	struct hwtstamp_config config;
2241	u32 tstamp_rx_ctrl = RAVB_RXTSTAMP_ENABLED;
2242	u32 tstamp_tx_ctrl;
2243
2244	if (copy_from_user(&config, req->ifr_data, sizeof(config)))
2245		return -EFAULT;
2246
2247	switch (config.tx_type) {
2248	case HWTSTAMP_TX_OFF:
2249		tstamp_tx_ctrl = 0;
2250		break;
2251	case HWTSTAMP_TX_ON:
2252		tstamp_tx_ctrl = RAVB_TXTSTAMP_ENABLED;
2253		break;
2254	default:
2255		return -ERANGE;
2256	}
2257
2258	switch (config.rx_filter) {
2259	case HWTSTAMP_FILTER_NONE:
2260		tstamp_rx_ctrl = 0;
2261		break;
2262	case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
2263		tstamp_rx_ctrl |= RAVB_RXTSTAMP_TYPE_V2_L2_EVENT;
2264		break;
2265	default:
2266		config.rx_filter = HWTSTAMP_FILTER_ALL;
2267		tstamp_rx_ctrl |= RAVB_RXTSTAMP_TYPE_ALL;
2268	}
2269
2270	priv->tstamp_tx_ctrl = tstamp_tx_ctrl;
2271	priv->tstamp_rx_ctrl = tstamp_rx_ctrl;
2272
2273	return copy_to_user(req->ifr_data, &config, sizeof(config)) ?
2274		-EFAULT : 0;
2275}
2276
2277/* ioctl to device function */
2278static int ravb_do_ioctl(struct net_device *ndev, struct ifreq *req, int cmd)
2279{
2280	struct phy_device *phydev = ndev->phydev;
2281
2282	if (!netif_running(ndev))
2283		return -EINVAL;
2284
2285	if (!phydev)
2286		return -ENODEV;
2287
2288	switch (cmd) {
2289	case SIOCGHWTSTAMP:
2290		return ravb_hwtstamp_get(ndev, req);
2291	case SIOCSHWTSTAMP:
2292		return ravb_hwtstamp_set(ndev, req);
2293	}
2294
2295	return phy_mii_ioctl(phydev, req, cmd);
2296}
2297
2298static int ravb_change_mtu(struct net_device *ndev, int new_mtu)
2299{
2300	struct ravb_private *priv = netdev_priv(ndev);
2301
2302	ndev->mtu = new_mtu;
2303
2304	if (netif_running(ndev)) {
2305		synchronize_irq(priv->emac_irq);
2306		ravb_emac_init(ndev);
2307	}
2308
2309	netdev_update_features(ndev);
2310
2311	return 0;
2312}
2313
2314static void ravb_set_rx_csum(struct net_device *ndev, bool enable)
2315{
2316	struct ravb_private *priv = netdev_priv(ndev);
2317	unsigned long flags;
2318
2319	spin_lock_irqsave(&priv->lock, flags);
2320
2321	/* Disable TX and RX */
2322	ravb_rcv_snd_disable(ndev);
2323
2324	/* Modify RX Checksum setting */
2325	ravb_modify(ndev, ECMR, ECMR_RCSC, enable ? ECMR_RCSC : 0);
2326
2327	/* Enable TX and RX */
2328	ravb_rcv_snd_enable(ndev);
2329
2330	spin_unlock_irqrestore(&priv->lock, flags);
2331}
2332
2333static int ravb_set_features_gbeth(struct net_device *ndev,
2334				   netdev_features_t features)
2335{
2336	/* Place holder */
2337	return 0;
2338}
2339
2340static int ravb_set_features_rcar(struct net_device *ndev,
2341				  netdev_features_t features)
2342{
2343	netdev_features_t changed = ndev->features ^ features;
2344
2345	if (changed & NETIF_F_RXCSUM)
2346		ravb_set_rx_csum(ndev, features & NETIF_F_RXCSUM);
2347
2348	ndev->features = features;
2349
2350	return 0;
2351}
2352
2353static int ravb_set_features(struct net_device *ndev,
2354			     netdev_features_t features)
2355{
2356	struct ravb_private *priv = netdev_priv(ndev);
2357	const struct ravb_hw_info *info = priv->info;
2358
2359	return info->set_feature(ndev, features);
2360}
2361
2362static const struct net_device_ops ravb_netdev_ops = {
2363	.ndo_open		= ravb_open,
2364	.ndo_stop		= ravb_close,
2365	.ndo_start_xmit		= ravb_start_xmit,
2366	.ndo_select_queue	= ravb_select_queue,
2367	.ndo_get_stats		= ravb_get_stats,
2368	.ndo_set_rx_mode	= ravb_set_rx_mode,
2369	.ndo_tx_timeout		= ravb_tx_timeout,
2370	.ndo_eth_ioctl		= ravb_do_ioctl,
2371	.ndo_change_mtu		= ravb_change_mtu,
2372	.ndo_validate_addr	= eth_validate_addr,
2373	.ndo_set_mac_address	= eth_mac_addr,
2374	.ndo_set_features	= ravb_set_features,
2375};
2376
2377/* MDIO bus init function */
2378static int ravb_mdio_init(struct ravb_private *priv)
2379{
2380	struct platform_device *pdev = priv->pdev;
2381	struct device *dev = &pdev->dev;
 
 
2382	int error;
2383
2384	/* Bitbang init */
2385	priv->mdiobb.ops = &bb_ops;
2386
2387	/* MII controller setting */
2388	priv->mii_bus = alloc_mdio_bitbang(&priv->mdiobb);
2389	if (!priv->mii_bus)
2390		return -ENOMEM;
2391
2392	/* Hook up MII support for ethtool */
2393	priv->mii_bus->name = "ravb_mii";
2394	priv->mii_bus->parent = dev;
2395	snprintf(priv->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x",
2396		 pdev->name, pdev->id);
2397
2398	/* Register MDIO bus */
2399	error = of_mdiobus_register(priv->mii_bus, dev->of_node);
2400	if (error)
2401		goto out_free_bus;
2402
 
 
 
 
 
 
 
 
2403	return 0;
2404
2405out_free_bus:
2406	free_mdio_bitbang(priv->mii_bus);
2407	return error;
2408}
2409
2410/* MDIO bus release function */
2411static int ravb_mdio_release(struct ravb_private *priv)
2412{
2413	/* Unregister mdio bus */
2414	mdiobus_unregister(priv->mii_bus);
2415
2416	/* Free bitbang info */
2417	free_mdio_bitbang(priv->mii_bus);
2418
2419	return 0;
2420}
2421
2422static const struct ravb_hw_info ravb_gen3_hw_info = {
2423	.rx_ring_free = ravb_rx_ring_free_rcar,
2424	.rx_ring_format = ravb_rx_ring_format_rcar,
2425	.alloc_rx_desc = ravb_alloc_rx_desc_rcar,
2426	.receive = ravb_rx_rcar,
2427	.set_rate = ravb_set_rate_rcar,
2428	.set_feature = ravb_set_features_rcar,
2429	.dmac_init = ravb_dmac_init_rcar,
2430	.emac_init = ravb_emac_init_rcar,
2431	.gstrings_stats = ravb_gstrings_stats,
2432	.gstrings_size = sizeof(ravb_gstrings_stats),
2433	.net_hw_features = NETIF_F_RXCSUM,
2434	.net_features = NETIF_F_RXCSUM,
2435	.stats_len = ARRAY_SIZE(ravb_gstrings_stats),
2436	.max_rx_len = RX_BUF_SZ + RAVB_ALIGN - 1,
2437	.tccr_mask = TCCR_TSRQ0 | TCCR_TSRQ1 | TCCR_TSRQ2 | TCCR_TSRQ3,
2438	.rx_max_buf_size = SZ_2K,
2439	.internal_delay = 1,
2440	.tx_counters = 1,
2441	.multi_irqs = 1,
2442	.irq_en_dis = 1,
2443	.ccc_gac = 1,
2444	.nc_queues = 1,
2445	.magic_pkt = 1,
2446};
2447
2448static const struct ravb_hw_info ravb_gen2_hw_info = {
2449	.rx_ring_free = ravb_rx_ring_free_rcar,
2450	.rx_ring_format = ravb_rx_ring_format_rcar,
2451	.alloc_rx_desc = ravb_alloc_rx_desc_rcar,
2452	.receive = ravb_rx_rcar,
2453	.set_rate = ravb_set_rate_rcar,
2454	.set_feature = ravb_set_features_rcar,
2455	.dmac_init = ravb_dmac_init_rcar,
2456	.emac_init = ravb_emac_init_rcar,
2457	.gstrings_stats = ravb_gstrings_stats,
2458	.gstrings_size = sizeof(ravb_gstrings_stats),
2459	.net_hw_features = NETIF_F_RXCSUM,
2460	.net_features = NETIF_F_RXCSUM,
2461	.stats_len = ARRAY_SIZE(ravb_gstrings_stats),
2462	.max_rx_len = RX_BUF_SZ + RAVB_ALIGN - 1,
2463	.tccr_mask = TCCR_TSRQ0 | TCCR_TSRQ1 | TCCR_TSRQ2 | TCCR_TSRQ3,
2464	.rx_max_buf_size = SZ_2K,
2465	.aligned_tx = 1,
2466	.gptp = 1,
2467	.nc_queues = 1,
2468	.magic_pkt = 1,
2469};
2470
2471static const struct ravb_hw_info ravb_rzv2m_hw_info = {
2472	.rx_ring_free = ravb_rx_ring_free_rcar,
2473	.rx_ring_format = ravb_rx_ring_format_rcar,
2474	.alloc_rx_desc = ravb_alloc_rx_desc_rcar,
2475	.receive = ravb_rx_rcar,
2476	.set_rate = ravb_set_rate_rcar,
2477	.set_feature = ravb_set_features_rcar,
2478	.dmac_init = ravb_dmac_init_rcar,
2479	.emac_init = ravb_emac_init_rcar,
2480	.gstrings_stats = ravb_gstrings_stats,
2481	.gstrings_size = sizeof(ravb_gstrings_stats),
2482	.net_hw_features = NETIF_F_RXCSUM,
2483	.net_features = NETIF_F_RXCSUM,
2484	.stats_len = ARRAY_SIZE(ravb_gstrings_stats),
2485	.max_rx_len = RX_BUF_SZ + RAVB_ALIGN - 1,
2486	.tccr_mask = TCCR_TSRQ0 | TCCR_TSRQ1 | TCCR_TSRQ2 | TCCR_TSRQ3,
2487	.rx_max_buf_size = SZ_2K,
2488	.multi_irqs = 1,
2489	.err_mgmt_irqs = 1,
2490	.gptp = 1,
2491	.gptp_ref_clk = 1,
2492	.nc_queues = 1,
2493	.magic_pkt = 1,
2494};
2495
2496static const struct ravb_hw_info gbeth_hw_info = {
2497	.rx_ring_free = ravb_rx_ring_free_gbeth,
2498	.rx_ring_format = ravb_rx_ring_format_gbeth,
2499	.alloc_rx_desc = ravb_alloc_rx_desc_gbeth,
2500	.receive = ravb_rx_gbeth,
2501	.set_rate = ravb_set_rate_gbeth,
2502	.set_feature = ravb_set_features_gbeth,
2503	.dmac_init = ravb_dmac_init_gbeth,
2504	.emac_init = ravb_emac_init_gbeth,
2505	.gstrings_stats = ravb_gstrings_stats_gbeth,
2506	.gstrings_size = sizeof(ravb_gstrings_stats_gbeth),
2507	.stats_len = ARRAY_SIZE(ravb_gstrings_stats_gbeth),
2508	.max_rx_len = ALIGN(GBETH_RX_BUFF_MAX, RAVB_ALIGN),
2509	.tccr_mask = TCCR_TSRQ0,
2510	.rx_max_buf_size = SZ_8K,
2511	.aligned_tx = 1,
2512	.tx_counters = 1,
2513	.carrier_counters = 1,
2514	.half_duplex = 1,
2515};
2516
2517static const struct of_device_id ravb_match_table[] = {
2518	{ .compatible = "renesas,etheravb-r8a7790", .data = &ravb_gen2_hw_info },
2519	{ .compatible = "renesas,etheravb-r8a7794", .data = &ravb_gen2_hw_info },
2520	{ .compatible = "renesas,etheravb-rcar-gen2", .data = &ravb_gen2_hw_info },
2521	{ .compatible = "renesas,etheravb-r8a7795", .data = &ravb_gen3_hw_info },
2522	{ .compatible = "renesas,etheravb-rcar-gen3", .data = &ravb_gen3_hw_info },
2523	{ .compatible = "renesas,etheravb-rcar-gen4", .data = &ravb_gen3_hw_info },
2524	{ .compatible = "renesas,etheravb-rzv2m", .data = &ravb_rzv2m_hw_info },
2525	{ .compatible = "renesas,rzg2l-gbeth", .data = &gbeth_hw_info },
2526	{ }
2527};
2528MODULE_DEVICE_TABLE(of, ravb_match_table);
2529
2530static int ravb_set_gti(struct net_device *ndev)
2531{
2532	struct ravb_private *priv = netdev_priv(ndev);
2533	const struct ravb_hw_info *info = priv->info;
2534	struct device *dev = ndev->dev.parent;
2535	unsigned long rate;
2536	uint64_t inc;
2537
2538	if (info->gptp_ref_clk)
2539		rate = clk_get_rate(priv->gptp_clk);
2540	else
2541		rate = clk_get_rate(priv->clk);
2542	if (!rate)
2543		return -EINVAL;
2544
2545	inc = div64_ul(1000000000ULL << 20, rate);
2546
2547	if (inc < GTI_TIV_MIN || inc > GTI_TIV_MAX) {
2548		dev_err(dev, "gti.tiv increment 0x%llx is outside the range 0x%x - 0x%x\n",
2549			inc, GTI_TIV_MIN, GTI_TIV_MAX);
2550		return -EINVAL;
2551	}
2552
2553	ravb_write(ndev, inc, GTI);
2554
2555	return 0;
2556}
2557
2558static void ravb_set_config_mode(struct net_device *ndev)
2559{
2560	struct ravb_private *priv = netdev_priv(ndev);
2561	const struct ravb_hw_info *info = priv->info;
 
2562
2563	if (info->gptp) {
2564		ravb_modify(ndev, CCC, CCC_OPC, CCC_OPC_CONFIG);
 
 
2565		/* Set CSEL value */
2566		ravb_modify(ndev, CCC, CCC_CSEL, CCC_CSEL_HPB);
2567	} else if (info->ccc_gac) {
2568		ravb_modify(ndev, CCC, CCC_OPC, CCC_OPC_CONFIG |
2569			    CCC_GAC | CCC_CSEL_HPB);
2570	} else {
2571		ravb_modify(ndev, CCC, CCC_OPC, CCC_OPC_CONFIG);
2572	}
 
 
2573}
2574
2575/* Set tx and rx clock internal delay modes */
2576static void ravb_parse_delay_mode(struct device_node *np, struct net_device *ndev)
2577{
2578	struct ravb_private *priv = netdev_priv(ndev);
2579	bool explicit_delay = false;
2580	u32 delay;
2581
2582	if (!of_property_read_u32(np, "rx-internal-delay-ps", &delay)) {
2583		/* Valid values are 0 and 1800, according to DT bindings */
2584		priv->rxcidm = !!delay;
2585		explicit_delay = true;
2586	}
2587	if (!of_property_read_u32(np, "tx-internal-delay-ps", &delay)) {
2588		/* Valid values are 0 and 2000, according to DT bindings */
2589		priv->txcidm = !!delay;
2590		explicit_delay = true;
2591	}
2592
2593	if (explicit_delay)
2594		return;
2595
2596	/* Fall back to legacy rgmii-*id behavior */
2597	if (priv->phy_interface == PHY_INTERFACE_MODE_RGMII_ID ||
2598	    priv->phy_interface == PHY_INTERFACE_MODE_RGMII_RXID) {
2599		priv->rxcidm = 1;
2600		priv->rgmii_override = 1;
2601	}
2602
2603	if (priv->phy_interface == PHY_INTERFACE_MODE_RGMII_ID ||
2604	    priv->phy_interface == PHY_INTERFACE_MODE_RGMII_TXID) {
2605		priv->txcidm = 1;
2606		priv->rgmii_override = 1;
2607	}
2608}
2609
2610static void ravb_set_delay_mode(struct net_device *ndev)
2611{
2612	struct ravb_private *priv = netdev_priv(ndev);
2613	u32 set = 0;
2614
2615	if (priv->rxcidm)
2616		set |= APSR_RDM;
2617	if (priv->txcidm)
2618		set |= APSR_TDM;
2619	ravb_modify(ndev, APSR, APSR_RDM | APSR_TDM, set);
2620}
2621
2622static int ravb_probe(struct platform_device *pdev)
2623{
2624	struct device_node *np = pdev->dev.of_node;
2625	const struct ravb_hw_info *info;
2626	struct reset_control *rstc;
2627	struct ravb_private *priv;
2628	struct net_device *ndev;
2629	int error, irq, q;
2630	struct resource *res;
2631	int i;
2632
2633	if (!np) {
2634		dev_err(&pdev->dev,
2635			"this driver is required to be instantiated from device tree\n");
2636		return -EINVAL;
2637	}
2638
2639	rstc = devm_reset_control_get_optional_exclusive(&pdev->dev, NULL);
2640	if (IS_ERR(rstc))
2641		return dev_err_probe(&pdev->dev, PTR_ERR(rstc),
2642				     "failed to get cpg reset\n");
2643
2644	ndev = alloc_etherdev_mqs(sizeof(struct ravb_private),
2645				  NUM_TX_QUEUE, NUM_RX_QUEUE);
2646	if (!ndev)
2647		return -ENOMEM;
2648
2649	info = of_device_get_match_data(&pdev->dev);
2650
2651	ndev->features = info->net_features;
2652	ndev->hw_features = info->net_hw_features;
2653
2654	reset_control_deassert(rstc);
 
 
 
2655	pm_runtime_enable(&pdev->dev);
2656	pm_runtime_get_sync(&pdev->dev);
 
 
2657
2658	if (info->multi_irqs) {
2659		if (info->err_mgmt_irqs)
2660			irq = platform_get_irq_byname(pdev, "dia");
2661		else
2662			irq = platform_get_irq_byname(pdev, "ch22");
2663	} else {
2664		irq = platform_get_irq(pdev, 0);
2665	}
2666	if (irq < 0) {
2667		error = irq;
2668		goto out_release;
2669	}
2670	ndev->irq = irq;
2671
2672	SET_NETDEV_DEV(ndev, &pdev->dev);
2673
2674	priv = netdev_priv(ndev);
2675	priv->info = info;
2676	priv->rstc = rstc;
2677	priv->ndev = ndev;
2678	priv->pdev = pdev;
2679	priv->num_tx_ring[RAVB_BE] = BE_TX_RING_SIZE;
2680	priv->num_rx_ring[RAVB_BE] = BE_RX_RING_SIZE;
2681	if (info->nc_queues) {
2682		priv->num_tx_ring[RAVB_NC] = NC_TX_RING_SIZE;
2683		priv->num_rx_ring[RAVB_NC] = NC_RX_RING_SIZE;
2684	}
2685
2686	priv->addr = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
2687	if (IS_ERR(priv->addr)) {
2688		error = PTR_ERR(priv->addr);
2689		goto out_release;
2690	}
2691
2692	/* The Ether-specific entries in the device structure. */
2693	ndev->base_addr = res->start;
2694
2695	spin_lock_init(&priv->lock);
2696	INIT_WORK(&priv->work, ravb_tx_timeout_work);
2697
2698	error = of_get_phy_mode(np, &priv->phy_interface);
2699	if (error && error != -ENODEV)
2700		goto out_release;
2701
2702	priv->no_avb_link = of_property_read_bool(np, "renesas,no-ether-link");
2703	priv->avb_link_active_low =
2704		of_property_read_bool(np, "renesas,ether-link-active-low");
2705
2706	if (info->multi_irqs) {
2707		if (info->err_mgmt_irqs)
2708			irq = platform_get_irq_byname(pdev, "line3");
2709		else
2710			irq = platform_get_irq_byname(pdev, "ch24");
2711		if (irq < 0) {
2712			error = irq;
2713			goto out_release;
2714		}
2715		priv->emac_irq = irq;
2716		for (i = 0; i < NUM_RX_QUEUE; i++) {
2717			irq = platform_get_irq_byname(pdev, ravb_rx_irqs[i]);
2718			if (irq < 0) {
2719				error = irq;
2720				goto out_release;
2721			}
2722			priv->rx_irqs[i] = irq;
2723		}
2724		for (i = 0; i < NUM_TX_QUEUE; i++) {
2725			irq = platform_get_irq_byname(pdev, ravb_tx_irqs[i]);
2726			if (irq < 0) {
2727				error = irq;
2728				goto out_release;
2729			}
2730			priv->tx_irqs[i] = irq;
2731		}
2732
2733		if (info->err_mgmt_irqs) {
2734			irq = platform_get_irq_byname(pdev, "err_a");
2735			if (irq < 0) {
2736				error = irq;
2737				goto out_release;
2738			}
2739			priv->erra_irq = irq;
2740
2741			irq = platform_get_irq_byname(pdev, "mgmt_a");
2742			if (irq < 0) {
2743				error = irq;
2744				goto out_release;
2745			}
2746			priv->mgmta_irq = irq;
2747		}
2748	}
2749
2750	priv->clk = devm_clk_get(&pdev->dev, NULL);
2751	if (IS_ERR(priv->clk)) {
2752		error = PTR_ERR(priv->clk);
2753		goto out_release;
2754	}
2755
2756	priv->refclk = devm_clk_get_optional(&pdev->dev, "refclk");
2757	if (IS_ERR(priv->refclk)) {
2758		error = PTR_ERR(priv->refclk);
2759		goto out_release;
2760	}
2761	clk_prepare_enable(priv->refclk);
2762
2763	if (info->gptp_ref_clk) {
2764		priv->gptp_clk = devm_clk_get(&pdev->dev, "gptp");
2765		if (IS_ERR(priv->gptp_clk)) {
2766			error = PTR_ERR(priv->gptp_clk);
2767			goto out_disable_refclk;
2768		}
2769		clk_prepare_enable(priv->gptp_clk);
2770	}
2771
2772	ndev->max_mtu = info->rx_max_buf_size - (ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN);
2773	ndev->min_mtu = ETH_MIN_MTU;
2774
2775	/* FIXME: R-Car Gen2 has 4byte alignment restriction for tx buffer
2776	 * Use two descriptor to handle such situation. First descriptor to
2777	 * handle aligned data buffer and second descriptor to handle the
2778	 * overflow data because of alignment.
2779	 */
2780	priv->num_tx_desc = info->aligned_tx ? 2 : 1;
2781
2782	/* Set function */
2783	ndev->netdev_ops = &ravb_netdev_ops;
2784	ndev->ethtool_ops = &ravb_ethtool_ops;
2785
2786	/* Set AVB config mode */
2787	ravb_set_config_mode(ndev);
 
 
2788
2789	if (info->gptp || info->ccc_gac) {
2790		/* Set GTI value */
2791		error = ravb_set_gti(ndev);
2792		if (error)
2793			goto out_disable_gptp_clk;
2794
2795		/* Request GTI loading */
2796		ravb_modify(ndev, GCCR, GCCR_LTI, GCCR_LTI);
2797	}
2798
2799	if (info->internal_delay) {
2800		ravb_parse_delay_mode(np, ndev);
2801		ravb_set_delay_mode(ndev);
2802	}
2803
2804	/* Allocate descriptor base address table */
2805	priv->desc_bat_size = sizeof(struct ravb_desc) * DBAT_ENTRY_NUM;
2806	priv->desc_bat = dma_alloc_coherent(ndev->dev.parent, priv->desc_bat_size,
2807					    &priv->desc_bat_dma, GFP_KERNEL);
2808	if (!priv->desc_bat) {
2809		dev_err(&pdev->dev,
2810			"Cannot allocate desc base address table (size %d bytes)\n",
2811			priv->desc_bat_size);
2812		error = -ENOMEM;
2813		goto out_disable_gptp_clk;
2814	}
2815	for (q = RAVB_BE; q < DBAT_ENTRY_NUM; q++)
2816		priv->desc_bat[q].die_dt = DT_EOS;
2817	ravb_write(ndev, priv->desc_bat_dma, DBAT);
2818
2819	/* Initialise HW timestamp list */
2820	INIT_LIST_HEAD(&priv->ts_skb_list);
2821
2822	/* Initialise PTP Clock driver */
2823	if (info->ccc_gac)
2824		ravb_ptp_init(ndev, pdev);
2825
2826	/* Debug message level */
2827	priv->msg_enable = RAVB_DEF_MSG_ENABLE;
2828
2829	/* Read and set MAC address */
2830	ravb_read_mac_address(np, ndev);
2831	if (!is_valid_ether_addr(ndev->dev_addr)) {
2832		dev_warn(&pdev->dev,
2833			 "no valid MAC address supplied, using a random one\n");
2834		eth_hw_addr_random(ndev);
2835	}
2836
2837	/* MDIO bus init */
2838	error = ravb_mdio_init(priv);
2839	if (error) {
2840		dev_err(&pdev->dev, "failed to initialize MDIO\n");
2841		goto out_dma_free;
2842	}
2843
2844	netif_napi_add(ndev, &priv->napi[RAVB_BE], ravb_poll);
2845	if (info->nc_queues)
2846		netif_napi_add(ndev, &priv->napi[RAVB_NC], ravb_poll);
2847
2848	/* Network device register */
2849	error = register_netdev(ndev);
2850	if (error)
2851		goto out_napi_del;
2852
2853	device_set_wakeup_capable(&pdev->dev, 1);
2854
2855	/* Print device information */
2856	netdev_info(ndev, "Base address at %#x, %pM, IRQ %d.\n",
2857		    (u32)ndev->base_addr, ndev->dev_addr, ndev->irq);
2858
2859	platform_set_drvdata(pdev, ndev);
2860
2861	return 0;
2862
2863out_napi_del:
2864	if (info->nc_queues)
2865		netif_napi_del(&priv->napi[RAVB_NC]);
2866
2867	netif_napi_del(&priv->napi[RAVB_BE]);
2868	ravb_mdio_release(priv);
2869out_dma_free:
2870	dma_free_coherent(ndev->dev.parent, priv->desc_bat_size, priv->desc_bat,
2871			  priv->desc_bat_dma);
2872
2873	/* Stop PTP Clock driver */
2874	if (info->ccc_gac)
2875		ravb_ptp_stop(ndev);
2876out_disable_gptp_clk:
2877	clk_disable_unprepare(priv->gptp_clk);
2878out_disable_refclk:
2879	clk_disable_unprepare(priv->refclk);
2880out_release:
2881	free_netdev(ndev);
2882
2883	pm_runtime_put(&pdev->dev);
 
2884	pm_runtime_disable(&pdev->dev);
2885	reset_control_assert(rstc);
 
 
2886	return error;
2887}
2888
2889static int ravb_remove(struct platform_device *pdev)
2890{
2891	struct net_device *ndev = platform_get_drvdata(pdev);
2892	struct ravb_private *priv = netdev_priv(ndev);
2893	const struct ravb_hw_info *info = priv->info;
2894
 
 
 
 
 
 
 
2895	/* Stop PTP Clock driver */
2896	if (info->ccc_gac)
2897		ravb_ptp_stop(ndev);
2898
 
 
 
 
 
2899	clk_disable_unprepare(priv->gptp_clk);
2900	clk_disable_unprepare(priv->refclk);
2901
2902	dma_free_coherent(ndev->dev.parent, priv->desc_bat_size, priv->desc_bat,
2903			  priv->desc_bat_dma);
2904	/* Set reset mode */
2905	ravb_write(ndev, CCC_OPC_RESET, CCC);
2906	unregister_netdev(ndev);
2907	if (info->nc_queues)
2908		netif_napi_del(&priv->napi[RAVB_NC]);
2909	netif_napi_del(&priv->napi[RAVB_BE]);
2910	ravb_mdio_release(priv);
2911	pm_runtime_put_sync(&pdev->dev);
2912	pm_runtime_disable(&pdev->dev);
2913	reset_control_assert(priv->rstc);
2914	free_netdev(ndev);
2915	platform_set_drvdata(pdev, NULL);
2916
2917	return 0;
2918}
2919
2920static int ravb_wol_setup(struct net_device *ndev)
2921{
2922	struct ravb_private *priv = netdev_priv(ndev);
2923	const struct ravb_hw_info *info = priv->info;
2924
2925	/* Disable interrupts by clearing the interrupt masks. */
2926	ravb_write(ndev, 0, RIC0);
2927	ravb_write(ndev, 0, RIC2);
2928	ravb_write(ndev, 0, TIC);
2929
2930	/* Only allow ECI interrupts */
2931	synchronize_irq(priv->emac_irq);
2932	if (info->nc_queues)
2933		napi_disable(&priv->napi[RAVB_NC]);
2934	napi_disable(&priv->napi[RAVB_BE]);
2935	ravb_write(ndev, ECSIPR_MPDIP, ECSIPR);
2936
2937	/* Enable MagicPacket */
2938	ravb_modify(ndev, ECMR, ECMR_MPDE, ECMR_MPDE);
2939
2940	return enable_irq_wake(priv->emac_irq);
2941}
2942
2943static int ravb_wol_restore(struct net_device *ndev)
2944{
2945	struct ravb_private *priv = netdev_priv(ndev);
2946	const struct ravb_hw_info *info = priv->info;
2947
2948	if (info->nc_queues)
2949		napi_enable(&priv->napi[RAVB_NC]);
2950	napi_enable(&priv->napi[RAVB_BE]);
2951
2952	/* Disable MagicPacket */
2953	ravb_modify(ndev, ECMR, ECMR_MPDE, 0);
2954
2955	ravb_close(ndev);
2956
2957	return disable_irq_wake(priv->emac_irq);
2958}
2959
2960static int __maybe_unused ravb_suspend(struct device *dev)
2961{
2962	struct net_device *ndev = dev_get_drvdata(dev);
2963	struct ravb_private *priv = netdev_priv(ndev);
2964	int ret;
2965
2966	if (!netif_running(ndev))
2967		return 0;
2968
2969	netif_device_detach(ndev);
2970
2971	if (priv->wol_enabled)
2972		ret = ravb_wol_setup(ndev);
2973	else
2974		ret = ravb_close(ndev);
2975
2976	if (priv->info->ccc_gac)
2977		ravb_ptp_stop(ndev);
2978
2979	return ret;
2980}
2981
2982static int __maybe_unused ravb_resume(struct device *dev)
2983{
2984	struct net_device *ndev = dev_get_drvdata(dev);
2985	struct ravb_private *priv = netdev_priv(ndev);
2986	const struct ravb_hw_info *info = priv->info;
2987	int ret = 0;
2988
2989	/* If WoL is enabled set reset mode to rearm the WoL logic */
2990	if (priv->wol_enabled)
2991		ravb_write(ndev, CCC_OPC_RESET, CCC);
 
 
 
2992
2993	/* All register have been reset to default values.
2994	 * Restore all registers which where setup at probe time and
2995	 * reopen device if it was running before system suspended.
2996	 */
2997
2998	/* Set AVB config mode */
2999	ravb_set_config_mode(ndev);
 
 
3000
3001	if (info->gptp || info->ccc_gac) {
3002		/* Set GTI value */
3003		ret = ravb_set_gti(ndev);
3004		if (ret)
3005			return ret;
3006
3007		/* Request GTI loading */
3008		ravb_modify(ndev, GCCR, GCCR_LTI, GCCR_LTI);
3009	}
3010
3011	if (info->internal_delay)
3012		ravb_set_delay_mode(ndev);
3013
3014	/* Restore descriptor base address table */
3015	ravb_write(ndev, priv->desc_bat_dma, DBAT);
3016
3017	if (priv->info->ccc_gac)
3018		ravb_ptp_init(ndev, priv->pdev);
3019
3020	if (netif_running(ndev)) {
3021		if (priv->wol_enabled) {
3022			ret = ravb_wol_restore(ndev);
3023			if (ret)
3024				return ret;
3025		}
3026		ret = ravb_open(ndev);
3027		if (ret < 0)
3028			return ret;
3029		ravb_set_rx_mode(ndev);
3030		netif_device_attach(ndev);
3031	}
3032
3033	return ret;
3034}
3035
3036static int __maybe_unused ravb_runtime_nop(struct device *dev)
3037{
3038	/* Runtime PM callback shared between ->runtime_suspend()
3039	 * and ->runtime_resume(). Simply returns success.
3040	 *
3041	 * This driver re-initializes all registers after
3042	 * pm_runtime_get_sync() anyway so there is no need
3043	 * to save and restore registers here.
3044	 */
3045	return 0;
3046}
3047
3048static const struct dev_pm_ops ravb_dev_pm_ops = {
3049	SET_SYSTEM_SLEEP_PM_OPS(ravb_suspend, ravb_resume)
3050	SET_RUNTIME_PM_OPS(ravb_runtime_nop, ravb_runtime_nop, NULL)
3051};
3052
3053static struct platform_driver ravb_driver = {
3054	.probe		= ravb_probe,
3055	.remove		= ravb_remove,
3056	.driver = {
3057		.name	= "ravb",
3058		.pm	= &ravb_dev_pm_ops,
3059		.of_match_table = ravb_match_table,
3060	},
3061};
3062
3063module_platform_driver(ravb_driver);
3064
3065MODULE_AUTHOR("Mitsuhiro Kimura, Masaru Nagai");
3066MODULE_DESCRIPTION("Renesas Ethernet AVB driver");
3067MODULE_LICENSE("GPL v2");