Linux Audio

Check our new training course

Loading...
v6.8
   1// SPDX-License-Identifier: GPL-2.0
   2/* Renesas Ethernet AVB device driver
   3 *
   4 * Copyright (C) 2014-2019 Renesas Electronics Corporation
   5 * Copyright (C) 2015 Renesas Solutions Corp.
   6 * Copyright (C) 2015-2016 Cogent Embedded, Inc. <source@cogentembedded.com>
   7 *
   8 * Based on the SuperH Ethernet driver
   9 */
  10
  11#include <linux/cache.h>
  12#include <linux/clk.h>
  13#include <linux/delay.h>
  14#include <linux/dma-mapping.h>
  15#include <linux/err.h>
  16#include <linux/etherdevice.h>
  17#include <linux/ethtool.h>
  18#include <linux/if_vlan.h>
  19#include <linux/kernel.h>
  20#include <linux/list.h>
  21#include <linux/module.h>
  22#include <linux/net_tstamp.h>
  23#include <linux/of.h>
  24#include <linux/of_mdio.h>
  25#include <linux/of_net.h>
  26#include <linux/platform_device.h>
  27#include <linux/pm_runtime.h>
  28#include <linux/slab.h>
  29#include <linux/spinlock.h>
  30#include <linux/reset.h>
  31#include <linux/math64.h>
 
  32
  33#include "ravb.h"
  34
  35#define RAVB_DEF_MSG_ENABLE \
  36		(NETIF_MSG_LINK	  | \
  37		 NETIF_MSG_TIMER  | \
  38		 NETIF_MSG_RX_ERR | \
  39		 NETIF_MSG_TX_ERR)
  40
  41static const char *ravb_rx_irqs[NUM_RX_QUEUE] = {
  42	"ch0", /* RAVB_BE */
  43	"ch1", /* RAVB_NC */
  44};
  45
  46static const char *ravb_tx_irqs[NUM_TX_QUEUE] = {
  47	"ch18", /* RAVB_BE */
  48	"ch19", /* RAVB_NC */
  49};
  50
  51void ravb_modify(struct net_device *ndev, enum ravb_reg reg, u32 clear,
  52		 u32 set)
  53{
  54	ravb_write(ndev, (ravb_read(ndev, reg) & ~clear) | set, reg);
  55}
  56
  57int ravb_wait(struct net_device *ndev, enum ravb_reg reg, u32 mask, u32 value)
  58{
  59	int i;
  60
  61	for (i = 0; i < 10000; i++) {
  62		if ((ravb_read(ndev, reg) & mask) == value)
  63			return 0;
  64		udelay(10);
  65	}
  66	return -ETIMEDOUT;
  67}
  68
  69static int ravb_set_opmode(struct net_device *ndev, u32 opmode)
  70{
  71	u32 csr_ops = 1U << (opmode & CCC_OPC);
  72	u32 ccc_mask = CCC_OPC;
  73	int error;
  74
  75	/* If gPTP active in config mode is supported it needs to be configured
  76	 * along with CSEL and operating mode in the same access. This is a
  77	 * hardware limitation.
  78	 */
  79	if (opmode & CCC_GAC)
  80		ccc_mask |= CCC_GAC | CCC_CSEL;
  81
  82	/* Set operating mode */
  83	ravb_modify(ndev, CCC, ccc_mask, opmode);
  84	/* Check if the operating mode is changed to the requested one */
  85	error = ravb_wait(ndev, CSR, CSR_OPS, csr_ops);
  86	if (error) {
  87		netdev_err(ndev, "failed to switch device to requested mode (%u)\n",
  88			   opmode & CCC_OPC);
  89	}
  90
  91	return error;
  92}
  93
  94static void ravb_set_rate_gbeth(struct net_device *ndev)
  95{
  96	struct ravb_private *priv = netdev_priv(ndev);
  97
  98	switch (priv->speed) {
  99	case 10:                /* 10BASE */
 100		ravb_write(ndev, GBETH_GECMR_SPEED_10, GECMR);
 101		break;
 102	case 100:               /* 100BASE */
 103		ravb_write(ndev, GBETH_GECMR_SPEED_100, GECMR);
 104		break;
 105	case 1000:              /* 1000BASE */
 106		ravb_write(ndev, GBETH_GECMR_SPEED_1000, GECMR);
 107		break;
 108	}
 109}
 110
 111static void ravb_set_rate_rcar(struct net_device *ndev)
 112{
 113	struct ravb_private *priv = netdev_priv(ndev);
 114
 115	switch (priv->speed) {
 116	case 100:		/* 100BASE */
 117		ravb_write(ndev, GECMR_SPEED_100, GECMR);
 118		break;
 119	case 1000:		/* 1000BASE */
 120		ravb_write(ndev, GECMR_SPEED_1000, GECMR);
 121		break;
 122	}
 123}
 124
 125static void ravb_set_buffer_align(struct sk_buff *skb)
 
 
 126{
 127	u32 reserve = (unsigned long)skb->data & (RAVB_ALIGN - 1);
 
 
 
 
 
 
 128
 
 129	if (reserve)
 130		skb_reserve(skb, RAVB_ALIGN - reserve);
 
 
 131}
 132
 133/* Get MAC address from the MAC address registers
 134 *
 135 * Ethernet AVB device doesn't have ROM for MAC address.
 136 * This function gets the MAC address that was used by a bootloader.
 137 */
 138static void ravb_read_mac_address(struct device_node *np,
 139				  struct net_device *ndev)
 140{
 141	int ret;
 142
 143	ret = of_get_ethdev_address(np, ndev);
 144	if (ret) {
 145		u32 mahr = ravb_read(ndev, MAHR);
 146		u32 malr = ravb_read(ndev, MALR);
 147		u8 addr[ETH_ALEN];
 148
 149		addr[0] = (mahr >> 24) & 0xFF;
 150		addr[1] = (mahr >> 16) & 0xFF;
 151		addr[2] = (mahr >>  8) & 0xFF;
 152		addr[3] = (mahr >>  0) & 0xFF;
 153		addr[4] = (malr >>  8) & 0xFF;
 154		addr[5] = (malr >>  0) & 0xFF;
 155		eth_hw_addr_set(ndev, addr);
 156	}
 157}
 158
 159static void ravb_mdio_ctrl(struct mdiobb_ctrl *ctrl, u32 mask, int set)
 160{
 161	struct ravb_private *priv = container_of(ctrl, struct ravb_private,
 162						 mdiobb);
 163
 164	ravb_modify(priv->ndev, PIR, mask, set ? mask : 0);
 165}
 166
 167/* MDC pin control */
 168static void ravb_set_mdc(struct mdiobb_ctrl *ctrl, int level)
 169{
 170	ravb_mdio_ctrl(ctrl, PIR_MDC, level);
 171}
 172
 173/* Data I/O pin control */
 174static void ravb_set_mdio_dir(struct mdiobb_ctrl *ctrl, int output)
 175{
 176	ravb_mdio_ctrl(ctrl, PIR_MMD, output);
 177}
 178
 179/* Set data bit */
 180static void ravb_set_mdio_data(struct mdiobb_ctrl *ctrl, int value)
 181{
 182	ravb_mdio_ctrl(ctrl, PIR_MDO, value);
 183}
 184
 185/* Get data bit */
 186static int ravb_get_mdio_data(struct mdiobb_ctrl *ctrl)
 187{
 188	struct ravb_private *priv = container_of(ctrl, struct ravb_private,
 189						 mdiobb);
 190
 191	return (ravb_read(priv->ndev, PIR) & PIR_MDI) != 0;
 192}
 193
 194/* MDIO bus control struct */
 195static const struct mdiobb_ops bb_ops = {
 196	.owner = THIS_MODULE,
 197	.set_mdc = ravb_set_mdc,
 198	.set_mdio_dir = ravb_set_mdio_dir,
 199	.set_mdio_data = ravb_set_mdio_data,
 200	.get_mdio_data = ravb_get_mdio_data,
 201};
 202
 
 
 
 
 
 
 
 203/* Free TX skb function for AVB-IP */
 204static int ravb_tx_free(struct net_device *ndev, int q, bool free_txed_only)
 205{
 206	struct ravb_private *priv = netdev_priv(ndev);
 207	struct net_device_stats *stats = &priv->stats[q];
 208	unsigned int num_tx_desc = priv->num_tx_desc;
 209	struct ravb_tx_desc *desc;
 210	unsigned int entry;
 211	int free_num = 0;
 212	u32 size;
 213
 214	for (; priv->cur_tx[q] - priv->dirty_tx[q] > 0; priv->dirty_tx[q]++) {
 215		bool txed;
 216
 217		entry = priv->dirty_tx[q] % (priv->num_tx_ring[q] *
 218					     num_tx_desc);
 219		desc = &priv->tx_ring[q][entry];
 220		txed = desc->die_dt == DT_FEMPTY;
 221		if (free_txed_only && !txed)
 222			break;
 223		/* Descriptor type must be checked before all other reads */
 224		dma_rmb();
 225		size = le16_to_cpu(desc->ds_tagl) & TX_DS;
 226		/* Free the original skb. */
 227		if (priv->tx_skb[q][entry / num_tx_desc]) {
 228			dma_unmap_single(ndev->dev.parent, le32_to_cpu(desc->dptr),
 229					 size, DMA_TO_DEVICE);
 230			/* Last packet descriptor? */
 231			if (entry % num_tx_desc == num_tx_desc - 1) {
 232				entry /= num_tx_desc;
 233				dev_kfree_skb_any(priv->tx_skb[q][entry]);
 234				priv->tx_skb[q][entry] = NULL;
 235				if (txed)
 236					stats->tx_packets++;
 237			}
 238			free_num++;
 239		}
 240		if (txed)
 241			stats->tx_bytes += size;
 242		desc->die_dt = DT_EEMPTY;
 243	}
 244	return free_num;
 245}
 246
 247static void ravb_rx_ring_free_gbeth(struct net_device *ndev, int q)
 248{
 249	struct ravb_private *priv = netdev_priv(ndev);
 250	unsigned int ring_size;
 251	unsigned int i;
 252
 253	if (!priv->gbeth_rx_ring)
 254		return;
 255
 256	for (i = 0; i < priv->num_rx_ring[q]; i++) {
 257		struct ravb_rx_desc *desc = &priv->gbeth_rx_ring[i];
 258
 259		if (!dma_mapping_error(ndev->dev.parent,
 260				       le32_to_cpu(desc->dptr)))
 261			dma_unmap_single(ndev->dev.parent,
 262					 le32_to_cpu(desc->dptr),
 263					 GBETH_RX_BUFF_MAX,
 264					 DMA_FROM_DEVICE);
 265	}
 266	ring_size = sizeof(struct ravb_rx_desc) * (priv->num_rx_ring[q] + 1);
 267	dma_free_coherent(ndev->dev.parent, ring_size, priv->gbeth_rx_ring,
 268			  priv->rx_desc_dma[q]);
 269	priv->gbeth_rx_ring = NULL;
 270}
 271
 272static void ravb_rx_ring_free_rcar(struct net_device *ndev, int q)
 273{
 274	struct ravb_private *priv = netdev_priv(ndev);
 275	unsigned int ring_size;
 276	unsigned int i;
 277
 278	if (!priv->rx_ring[q])
 279		return;
 280
 281	for (i = 0; i < priv->num_rx_ring[q]; i++) {
 282		struct ravb_ex_rx_desc *desc = &priv->rx_ring[q][i];
 283
 284		if (!dma_mapping_error(ndev->dev.parent,
 285				       le32_to_cpu(desc->dptr)))
 286			dma_unmap_single(ndev->dev.parent,
 287					 le32_to_cpu(desc->dptr),
 288					 RX_BUF_SZ,
 289					 DMA_FROM_DEVICE);
 290	}
 291	ring_size = sizeof(struct ravb_ex_rx_desc) *
 292		    (priv->num_rx_ring[q] + 1);
 293	dma_free_coherent(ndev->dev.parent, ring_size, priv->rx_ring[q],
 294			  priv->rx_desc_dma[q]);
 295	priv->rx_ring[q] = NULL;
 296}
 297
 298/* Free skb's and DMA buffers for Ethernet AVB */
 299static void ravb_ring_free(struct net_device *ndev, int q)
 300{
 301	struct ravb_private *priv = netdev_priv(ndev);
 302	const struct ravb_hw_info *info = priv->info;
 303	unsigned int num_tx_desc = priv->num_tx_desc;
 304	unsigned int ring_size;
 305	unsigned int i;
 306
 307	info->rx_ring_free(ndev, q);
 308
 309	if (priv->tx_ring[q]) {
 310		ravb_tx_free(ndev, q, false);
 311
 312		ring_size = sizeof(struct ravb_tx_desc) *
 313			    (priv->num_tx_ring[q] * num_tx_desc + 1);
 314		dma_free_coherent(ndev->dev.parent, ring_size, priv->tx_ring[q],
 315				  priv->tx_desc_dma[q]);
 316		priv->tx_ring[q] = NULL;
 317	}
 318
 319	/* Free RX skb ringbuffer */
 320	if (priv->rx_skb[q]) {
 321		for (i = 0; i < priv->num_rx_ring[q]; i++)
 322			dev_kfree_skb(priv->rx_skb[q][i]);
 323	}
 324	kfree(priv->rx_skb[q]);
 325	priv->rx_skb[q] = NULL;
 326
 327	/* Free aligned TX buffers */
 328	kfree(priv->tx_align[q]);
 329	priv->tx_align[q] = NULL;
 330
 331	/* Free TX skb ringbuffer.
 332	 * SKBs are freed by ravb_tx_free() call above.
 333	 */
 334	kfree(priv->tx_skb[q]);
 335	priv->tx_skb[q] = NULL;
 336}
 337
 338static void ravb_rx_ring_format_gbeth(struct net_device *ndev, int q)
 339{
 340	struct ravb_private *priv = netdev_priv(ndev);
 341	struct ravb_rx_desc *rx_desc;
 342	unsigned int rx_ring_size;
 343	dma_addr_t dma_addr;
 344	unsigned int i;
 345
 346	rx_ring_size = sizeof(*rx_desc) * priv->num_rx_ring[q];
 347	memset(priv->gbeth_rx_ring, 0, rx_ring_size);
 348	/* Build RX ring buffer */
 349	for (i = 0; i < priv->num_rx_ring[q]; i++) {
 350		/* RX descriptor */
 351		rx_desc = &priv->gbeth_rx_ring[i];
 352		rx_desc->ds_cc = cpu_to_le16(GBETH_RX_DESC_DATA_SIZE);
 353		dma_addr = dma_map_single(ndev->dev.parent, priv->rx_skb[q][i]->data,
 354					  GBETH_RX_BUFF_MAX,
 355					  DMA_FROM_DEVICE);
 356		/* We just set the data size to 0 for a failed mapping which
 357		 * should prevent DMA from happening...
 358		 */
 359		if (dma_mapping_error(ndev->dev.parent, dma_addr))
 360			rx_desc->ds_cc = cpu_to_le16(0);
 361		rx_desc->dptr = cpu_to_le32(dma_addr);
 362		rx_desc->die_dt = DT_FEMPTY;
 363	}
 364	rx_desc = &priv->gbeth_rx_ring[i];
 365	rx_desc->dptr = cpu_to_le32((u32)priv->rx_desc_dma[q]);
 366	rx_desc->die_dt = DT_LINKFIX; /* type */
 367}
 368
 369static void ravb_rx_ring_format_rcar(struct net_device *ndev, int q)
 370{
 371	struct ravb_private *priv = netdev_priv(ndev);
 372	struct ravb_ex_rx_desc *rx_desc;
 373	unsigned int rx_ring_size = sizeof(*rx_desc) * priv->num_rx_ring[q];
 374	dma_addr_t dma_addr;
 375	unsigned int i;
 376
 377	memset(priv->rx_ring[q], 0, rx_ring_size);
 378	/* Build RX ring buffer */
 379	for (i = 0; i < priv->num_rx_ring[q]; i++) {
 380		/* RX descriptor */
 381		rx_desc = &priv->rx_ring[q][i];
 382		rx_desc->ds_cc = cpu_to_le16(RX_BUF_SZ);
 383		dma_addr = dma_map_single(ndev->dev.parent, priv->rx_skb[q][i]->data,
 384					  RX_BUF_SZ,
 385					  DMA_FROM_DEVICE);
 386		/* We just set the data size to 0 for a failed mapping which
 387		 * should prevent DMA from happening...
 388		 */
 389		if (dma_mapping_error(ndev->dev.parent, dma_addr))
 390			rx_desc->ds_cc = cpu_to_le16(0);
 391		rx_desc->dptr = cpu_to_le32(dma_addr);
 392		rx_desc->die_dt = DT_FEMPTY;
 393	}
 394	rx_desc = &priv->rx_ring[q][i];
 395	rx_desc->dptr = cpu_to_le32((u32)priv->rx_desc_dma[q]);
 396	rx_desc->die_dt = DT_LINKFIX; /* type */
 397}
 398
 399/* Format skb and descriptor buffer for Ethernet AVB */
 400static void ravb_ring_format(struct net_device *ndev, int q)
 401{
 402	struct ravb_private *priv = netdev_priv(ndev);
 403	const struct ravb_hw_info *info = priv->info;
 404	unsigned int num_tx_desc = priv->num_tx_desc;
 405	struct ravb_tx_desc *tx_desc;
 406	struct ravb_desc *desc;
 407	unsigned int tx_ring_size = sizeof(*tx_desc) * priv->num_tx_ring[q] *
 408				    num_tx_desc;
 409	unsigned int i;
 410
 411	priv->cur_rx[q] = 0;
 412	priv->cur_tx[q] = 0;
 413	priv->dirty_rx[q] = 0;
 414	priv->dirty_tx[q] = 0;
 415
 416	info->rx_ring_format(ndev, q);
 417
 418	memset(priv->tx_ring[q], 0, tx_ring_size);
 419	/* Build TX ring buffer */
 420	for (i = 0, tx_desc = priv->tx_ring[q]; i < priv->num_tx_ring[q];
 421	     i++, tx_desc++) {
 422		tx_desc->die_dt = DT_EEMPTY;
 423		if (num_tx_desc > 1) {
 424			tx_desc++;
 425			tx_desc->die_dt = DT_EEMPTY;
 426		}
 427	}
 428	tx_desc->dptr = cpu_to_le32((u32)priv->tx_desc_dma[q]);
 429	tx_desc->die_dt = DT_LINKFIX; /* type */
 430
 431	/* RX descriptor base address for best effort */
 432	desc = &priv->desc_bat[RX_QUEUE_OFFSET + q];
 433	desc->die_dt = DT_LINKFIX; /* type */
 434	desc->dptr = cpu_to_le32((u32)priv->rx_desc_dma[q]);
 435
 436	/* TX descriptor base address for best effort */
 437	desc = &priv->desc_bat[q];
 438	desc->die_dt = DT_LINKFIX; /* type */
 439	desc->dptr = cpu_to_le32((u32)priv->tx_desc_dma[q]);
 440}
 441
 442static void *ravb_alloc_rx_desc_gbeth(struct net_device *ndev, int q)
 443{
 444	struct ravb_private *priv = netdev_priv(ndev);
 445	unsigned int ring_size;
 446
 447	ring_size = sizeof(struct ravb_rx_desc) * (priv->num_rx_ring[q] + 1);
 448
 449	priv->gbeth_rx_ring = dma_alloc_coherent(ndev->dev.parent, ring_size,
 450						 &priv->rx_desc_dma[q],
 451						 GFP_KERNEL);
 452	return priv->gbeth_rx_ring;
 453}
 454
 455static void *ravb_alloc_rx_desc_rcar(struct net_device *ndev, int q)
 456{
 457	struct ravb_private *priv = netdev_priv(ndev);
 458	unsigned int ring_size;
 459
 460	ring_size = sizeof(struct ravb_ex_rx_desc) * (priv->num_rx_ring[q] + 1);
 
 
 461
 462	priv->rx_ring[q] = dma_alloc_coherent(ndev->dev.parent, ring_size,
 463					      &priv->rx_desc_dma[q],
 464					      GFP_KERNEL);
 465	return priv->rx_ring[q];
 466}
 467
 468/* Init skb and descriptor buffer for Ethernet AVB */
 469static int ravb_ring_init(struct net_device *ndev, int q)
 470{
 471	struct ravb_private *priv = netdev_priv(ndev);
 472	const struct ravb_hw_info *info = priv->info;
 473	unsigned int num_tx_desc = priv->num_tx_desc;
 474	unsigned int ring_size;
 475	struct sk_buff *skb;
 476	unsigned int i;
 477
 478	/* Allocate RX and TX skb rings */
 479	priv->rx_skb[q] = kcalloc(priv->num_rx_ring[q],
 480				  sizeof(*priv->rx_skb[q]), GFP_KERNEL);
 481	priv->tx_skb[q] = kcalloc(priv->num_tx_ring[q],
 482				  sizeof(*priv->tx_skb[q]), GFP_KERNEL);
 483	if (!priv->rx_skb[q] || !priv->tx_skb[q])
 484		goto error;
 485
 486	for (i = 0; i < priv->num_rx_ring[q]; i++) {
 487		skb = __netdev_alloc_skb(ndev, info->max_rx_len, GFP_KERNEL);
 488		if (!skb)
 489			goto error;
 490		ravb_set_buffer_align(skb);
 491		priv->rx_skb[q][i] = skb;
 492	}
 493
 494	if (num_tx_desc > 1) {
 495		/* Allocate rings for the aligned buffers */
 496		priv->tx_align[q] = kmalloc(DPTR_ALIGN * priv->num_tx_ring[q] +
 497					    DPTR_ALIGN - 1, GFP_KERNEL);
 498		if (!priv->tx_align[q])
 499			goto error;
 500	}
 501
 502	/* Allocate all RX descriptors. */
 503	if (!info->alloc_rx_desc(ndev, q))
 504		goto error;
 505
 506	priv->dirty_rx[q] = 0;
 507
 508	/* Allocate all TX descriptors. */
 509	ring_size = sizeof(struct ravb_tx_desc) *
 510		    (priv->num_tx_ring[q] * num_tx_desc + 1);
 511	priv->tx_ring[q] = dma_alloc_coherent(ndev->dev.parent, ring_size,
 512					      &priv->tx_desc_dma[q],
 513					      GFP_KERNEL);
 514	if (!priv->tx_ring[q])
 515		goto error;
 516
 517	return 0;
 518
 519error:
 520	ravb_ring_free(ndev, q);
 521
 522	return -ENOMEM;
 523}
 524
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 525static void ravb_emac_init_gbeth(struct net_device *ndev)
 526{
 527	struct ravb_private *priv = netdev_priv(ndev);
 528
 529	if (priv->phy_interface == PHY_INTERFACE_MODE_MII) {
 530		ravb_write(ndev, (1000 << 16) | CXR35_SEL_XMII_MII, CXR35);
 531		ravb_modify(ndev, CXR31, CXR31_SEL_LINK0 | CXR31_SEL_LINK1, 0);
 532	} else {
 533		ravb_write(ndev, (1000 << 16) | CXR35_SEL_XMII_RGMII, CXR35);
 534		ravb_modify(ndev, CXR31, CXR31_SEL_LINK0 | CXR31_SEL_LINK1,
 535			    CXR31_SEL_LINK0);
 536	}
 537
 538	/* Receive frame limit set register */
 539	ravb_write(ndev, GBETH_RX_BUFF_MAX + ETH_FCS_LEN, RFLR);
 540
 541	/* EMAC Mode: PAUSE prohibition; Duplex; TX; RX; CRC Pass Through */
 542	ravb_write(ndev, ECMR_ZPF | ((priv->duplex > 0) ? ECMR_DM : 0) |
 543			 ECMR_TE | ECMR_RE | ECMR_RCPT |
 544			 ECMR_TXF | ECMR_RXF, ECMR);
 545
 546	ravb_set_rate_gbeth(ndev);
 547
 548	/* Set MAC address */
 549	ravb_write(ndev,
 550		   (ndev->dev_addr[0] << 24) | (ndev->dev_addr[1] << 16) |
 551		   (ndev->dev_addr[2] << 8)  | (ndev->dev_addr[3]), MAHR);
 552	ravb_write(ndev, (ndev->dev_addr[4] << 8)  | (ndev->dev_addr[5]), MALR);
 553
 554	/* E-MAC status register clear */
 555	ravb_write(ndev, ECSR_ICD | ECSR_LCHNG | ECSR_PFRI, ECSR);
 556	ravb_write(ndev, CSR0_TPE | CSR0_RPE, CSR0);
 
 557
 558	/* E-MAC interrupt enable register */
 559	ravb_write(ndev, ECSIPR_ICDIP, ECSIPR);
 560}
 561
 562static void ravb_emac_init_rcar(struct net_device *ndev)
 563{
 564	/* Receive frame limit set register */
 565	ravb_write(ndev, ndev->mtu + ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN, RFLR);
 566
 567	/* EMAC Mode: PAUSE prohibition; Duplex; RX Checksum; TX; RX */
 568	ravb_write(ndev, ECMR_ZPF | ECMR_DM |
 569		   (ndev->features & NETIF_F_RXCSUM ? ECMR_RCSC : 0) |
 570		   ECMR_TE | ECMR_RE, ECMR);
 571
 572	ravb_set_rate_rcar(ndev);
 573
 574	/* Set MAC address */
 575	ravb_write(ndev,
 576		   (ndev->dev_addr[0] << 24) | (ndev->dev_addr[1] << 16) |
 577		   (ndev->dev_addr[2] << 8)  | (ndev->dev_addr[3]), MAHR);
 578	ravb_write(ndev,
 579		   (ndev->dev_addr[4] << 8)  | (ndev->dev_addr[5]), MALR);
 580
 581	/* E-MAC status register clear */
 582	ravb_write(ndev, ECSR_ICD | ECSR_MPD, ECSR);
 583
 584	/* E-MAC interrupt enable register */
 585	ravb_write(ndev, ECSIPR_ICDIP | ECSIPR_MPDIP | ECSIPR_LCHNGIP, ECSIPR);
 586}
 587
 588/* E-MAC init function */
 589static void ravb_emac_init(struct net_device *ndev)
 590{
 591	struct ravb_private *priv = netdev_priv(ndev);
 592	const struct ravb_hw_info *info = priv->info;
 593
 594	info->emac_init(ndev);
 595}
 596
 597static int ravb_dmac_init_gbeth(struct net_device *ndev)
 598{
 
 599	int error;
 600
 601	error = ravb_ring_init(ndev, RAVB_BE);
 602	if (error)
 603		return error;
 604
 605	/* Descriptor format */
 606	ravb_ring_format(ndev, RAVB_BE);
 607
 608	/* Set DMAC RX */
 609	ravb_write(ndev, 0x60000000, RCR);
 610
 611	/* Set Max Frame Length (RTC) */
 612	ravb_write(ndev, 0x7ffc0000 | GBETH_RX_BUFF_MAX, RTC);
 613
 614	/* Set FIFO size */
 615	ravb_write(ndev, 0x00222200, TGC);
 616
 617	ravb_write(ndev, 0, TCCR);
 618
 619	/* Frame receive */
 620	ravb_write(ndev, RIC0_FRE0, RIC0);
 621	/* Disable FIFO full warning */
 622	ravb_write(ndev, 0x0, RIC1);
 623	/* Receive FIFO full error, descriptor empty */
 624	ravb_write(ndev, RIC2_QFE0 | RIC2_RFFE, RIC2);
 625
 626	ravb_write(ndev, TIC_FTE0, TIC);
 627
 628	return 0;
 629}
 630
 631static int ravb_dmac_init_rcar(struct net_device *ndev)
 632{
 633	struct ravb_private *priv = netdev_priv(ndev);
 634	const struct ravb_hw_info *info = priv->info;
 635	int error;
 636
 637	error = ravb_ring_init(ndev, RAVB_BE);
 638	if (error)
 639		return error;
 640	error = ravb_ring_init(ndev, RAVB_NC);
 641	if (error) {
 642		ravb_ring_free(ndev, RAVB_BE);
 643		return error;
 644	}
 645
 646	/* Descriptor format */
 647	ravb_ring_format(ndev, RAVB_BE);
 648	ravb_ring_format(ndev, RAVB_NC);
 649
 650	/* Set AVB RX */
 651	ravb_write(ndev,
 652		   RCR_EFFS | RCR_ENCF | RCR_ETS0 | RCR_ESF | 0x18000000, RCR);
 653
 654	/* Set FIFO size */
 655	ravb_write(ndev, TGC_TQP_AVBMODE1 | 0x00112200, TGC);
 656
 657	/* Timestamp enable */
 658	ravb_write(ndev, TCCR_TFEN, TCCR);
 659
 660	/* Interrupt init: */
 661	if (info->multi_irqs) {
 662		/* Clear DIL.DPLx */
 663		ravb_write(ndev, 0, DIL);
 664		/* Set queue specific interrupt */
 665		ravb_write(ndev, CIE_CRIE | CIE_CTIE | CIE_CL0M, CIE);
 666	}
 667	/* Frame receive */
 668	ravb_write(ndev, RIC0_FRE0 | RIC0_FRE1, RIC0);
 669	/* Disable FIFO full warning */
 670	ravb_write(ndev, 0, RIC1);
 671	/* Receive FIFO full error, descriptor empty */
 672	ravb_write(ndev, RIC2_QFE0 | RIC2_QFE1 | RIC2_RFFE, RIC2);
 673	/* Frame transmitted, timestamp FIFO updated */
 674	ravb_write(ndev, TIC_FTE0 | TIC_FTE1 | TIC_TFUE, TIC);
 675
 676	return 0;
 677}
 678
 679/* Device init function for Ethernet AVB */
 680static int ravb_dmac_init(struct net_device *ndev)
 681{
 682	struct ravb_private *priv = netdev_priv(ndev);
 683	const struct ravb_hw_info *info = priv->info;
 684	int error;
 685
 686	/* Set CONFIG mode */
 687	error = ravb_set_opmode(ndev, CCC_OPC_CONFIG);
 688	if (error)
 689		return error;
 690
 691	error = info->dmac_init(ndev);
 692	if (error)
 693		return error;
 694
 695	/* Setting the control will start the AVB-DMAC process. */
 696	return ravb_set_opmode(ndev, CCC_OPC_OPERATION);
 697}
 698
 699static void ravb_get_tx_tstamp(struct net_device *ndev)
 700{
 701	struct ravb_private *priv = netdev_priv(ndev);
 702	struct ravb_tstamp_skb *ts_skb, *ts_skb2;
 703	struct skb_shared_hwtstamps shhwtstamps;
 704	struct sk_buff *skb;
 705	struct timespec64 ts;
 706	u16 tag, tfa_tag;
 707	int count;
 708	u32 tfa2;
 709
 710	count = (ravb_read(ndev, TSR) & TSR_TFFL) >> 8;
 711	while (count--) {
 712		tfa2 = ravb_read(ndev, TFA2);
 713		tfa_tag = (tfa2 & TFA2_TST) >> 16;
 714		ts.tv_nsec = (u64)ravb_read(ndev, TFA0);
 715		ts.tv_sec = ((u64)(tfa2 & TFA2_TSV) << 32) |
 716			    ravb_read(ndev, TFA1);
 717		memset(&shhwtstamps, 0, sizeof(shhwtstamps));
 718		shhwtstamps.hwtstamp = timespec64_to_ktime(ts);
 719		list_for_each_entry_safe(ts_skb, ts_skb2, &priv->ts_skb_list,
 720					 list) {
 721			skb = ts_skb->skb;
 722			tag = ts_skb->tag;
 723			list_del(&ts_skb->list);
 724			kfree(ts_skb);
 725			if (tag == tfa_tag) {
 726				skb_tstamp_tx(skb, &shhwtstamps);
 727				dev_consume_skb_any(skb);
 728				break;
 729			} else {
 730				dev_kfree_skb_any(skb);
 731			}
 732		}
 733		ravb_modify(ndev, TCCR, TCCR_TFR, TCCR_TFR);
 734	}
 735}
 736
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 737static void ravb_rx_csum(struct sk_buff *skb)
 738{
 739	u8 *hw_csum;
 740
 741	/* The hardware checksum is contained in sizeof(__sum16) (2) bytes
 742	 * appended to packet data
 743	 */
 744	if (unlikely(skb->len < sizeof(__sum16)))
 745		return;
 746	hw_csum = skb_tail_pointer(skb) - sizeof(__sum16);
 747	skb->csum = csum_unfold((__force __sum16)get_unaligned_le16(hw_csum));
 748	skb->ip_summed = CHECKSUM_COMPLETE;
 749	skb_trim(skb, skb->len - sizeof(__sum16));
 750}
 751
 752static struct sk_buff *ravb_get_skb_gbeth(struct net_device *ndev, int entry,
 753					  struct ravb_rx_desc *desc)
 754{
 755	struct ravb_private *priv = netdev_priv(ndev);
 756	struct sk_buff *skb;
 757
 758	skb = priv->rx_skb[RAVB_BE][entry];
 759	priv->rx_skb[RAVB_BE][entry] = NULL;
 760	dma_unmap_single(ndev->dev.parent, le32_to_cpu(desc->dptr),
 761			 ALIGN(GBETH_RX_BUFF_MAX, 16), DMA_FROM_DEVICE);
 
 762
 763	return skb;
 764}
 765
 766/* Packet receive function for Gigabit Ethernet */
 767static bool ravb_rx_gbeth(struct net_device *ndev, int *quota, int q)
 768{
 769	struct ravb_private *priv = netdev_priv(ndev);
 770	const struct ravb_hw_info *info = priv->info;
 771	struct net_device_stats *stats;
 772	struct ravb_rx_desc *desc;
 773	struct sk_buff *skb;
 774	dma_addr_t dma_addr;
 775	int rx_packets = 0;
 776	u8  desc_status;
 777	u16 pkt_len;
 778	u8  die_dt;
 779	int entry;
 780	int limit;
 781	int i;
 782
 783	entry = priv->cur_rx[q] % priv->num_rx_ring[q];
 784	limit = priv->dirty_rx[q] + priv->num_rx_ring[q] - priv->cur_rx[q];
 785	stats = &priv->stats[q];
 786
 787	desc = &priv->gbeth_rx_ring[entry];
 788	for (i = 0; i < limit && rx_packets < *quota && desc->die_dt != DT_FEMPTY; i++) {
 
 
 
 
 789		/* Descriptor type must be checked before all other reads */
 790		dma_rmb();
 791		desc_status = desc->msc;
 792		pkt_len = le16_to_cpu(desc->ds_cc) & RX_DS;
 793
 794		/* We use 0-byte descriptors to mark the DMA mapping errors */
 795		if (!pkt_len)
 796			continue;
 797
 798		if (desc_status & MSC_MC)
 799			stats->multicast++;
 800
 801		if (desc_status & (MSC_CRC | MSC_RFE | MSC_RTSF | MSC_RTLF | MSC_CEEF)) {
 802			stats->rx_errors++;
 803			if (desc_status & MSC_CRC)
 804				stats->rx_crc_errors++;
 805			if (desc_status & MSC_RFE)
 806				stats->rx_frame_errors++;
 807			if (desc_status & (MSC_RTLF | MSC_RTSF))
 808				stats->rx_length_errors++;
 809			if (desc_status & MSC_CEEF)
 810				stats->rx_missed_errors++;
 811		} else {
 812			die_dt = desc->die_dt & 0xF0;
 813			switch (die_dt) {
 814			case DT_FSINGLE:
 815				skb = ravb_get_skb_gbeth(ndev, entry, desc);
 816				skb_put(skb, pkt_len);
 817				skb->protocol = eth_type_trans(skb, ndev);
 
 
 818				napi_gro_receive(&priv->napi[q], skb);
 819				rx_packets++;
 820				stats->rx_bytes += pkt_len;
 821				break;
 822			case DT_FSTART:
 823				priv->rx_1st_skb = ravb_get_skb_gbeth(ndev, entry, desc);
 824				skb_put(priv->rx_1st_skb, pkt_len);
 825				break;
 826			case DT_FMID:
 827				skb = ravb_get_skb_gbeth(ndev, entry, desc);
 828				skb_copy_to_linear_data_offset(priv->rx_1st_skb,
 829							       priv->rx_1st_skb->len,
 830							       skb->data,
 831							       pkt_len);
 832				skb_put(priv->rx_1st_skb, pkt_len);
 833				dev_kfree_skb(skb);
 834				break;
 835			case DT_FEND:
 836				skb = ravb_get_skb_gbeth(ndev, entry, desc);
 837				skb_copy_to_linear_data_offset(priv->rx_1st_skb,
 838							       priv->rx_1st_skb->len,
 839							       skb->data,
 840							       pkt_len);
 841				skb_put(priv->rx_1st_skb, pkt_len);
 842				dev_kfree_skb(skb);
 843				priv->rx_1st_skb->protocol =
 844					eth_type_trans(priv->rx_1st_skb, ndev);
 
 
 
 845				napi_gro_receive(&priv->napi[q],
 846						 priv->rx_1st_skb);
 847				rx_packets++;
 848				stats->rx_bytes += pkt_len;
 849				break;
 850			}
 851		}
 852
 853		entry = (++priv->cur_rx[q]) % priv->num_rx_ring[q];
 854		desc = &priv->gbeth_rx_ring[entry];
 855	}
 856
 857	/* Refill the RX ring buffers. */
 858	for (; priv->cur_rx[q] - priv->dirty_rx[q] > 0; priv->dirty_rx[q]++) {
 859		entry = priv->dirty_rx[q] % priv->num_rx_ring[q];
 860		desc = &priv->gbeth_rx_ring[entry];
 861		desc->ds_cc = cpu_to_le16(GBETH_RX_DESC_DATA_SIZE);
 862
 863		if (!priv->rx_skb[q][entry]) {
 864			skb = netdev_alloc_skb(ndev, info->max_rx_len);
 865			if (!skb)
 866				break;
 867			ravb_set_buffer_align(skb);
 868			dma_addr = dma_map_single(ndev->dev.parent,
 869						  skb->data,
 870						  GBETH_RX_BUFF_MAX,
 871						  DMA_FROM_DEVICE);
 872			skb_checksum_none_assert(skb);
 873			/* We just set the data size to 0 for a failed mapping
 874			 * which should prevent DMA  from happening...
 875			 */
 876			if (dma_mapping_error(ndev->dev.parent, dma_addr))
 877				desc->ds_cc = cpu_to_le16(0);
 878			desc->dptr = cpu_to_le32(dma_addr);
 879			priv->rx_skb[q][entry] = skb;
 880		}
 881		/* Descriptor type must be set after all the above writes */
 882		dma_wmb();
 883		desc->die_dt = DT_FEMPTY;
 884	}
 885
 886	stats->rx_packets += rx_packets;
 887	*quota -= rx_packets;
 888	return *quota == 0;
 889}
 890
 891/* Packet receive function for Ethernet AVB */
 892static bool ravb_rx_rcar(struct net_device *ndev, int *quota, int q)
 893{
 894	struct ravb_private *priv = netdev_priv(ndev);
 895	const struct ravb_hw_info *info = priv->info;
 896	int entry = priv->cur_rx[q] % priv->num_rx_ring[q];
 897	int boguscnt = (priv->dirty_rx[q] + priv->num_rx_ring[q]) -
 898			priv->cur_rx[q];
 899	struct net_device_stats *stats = &priv->stats[q];
 900	struct ravb_ex_rx_desc *desc;
 
 901	struct sk_buff *skb;
 902	dma_addr_t dma_addr;
 903	struct timespec64 ts;
 
 904	u8  desc_status;
 905	u16 pkt_len;
 906	int limit;
 
 
 
 
 
 
 
 907
 908	boguscnt = min(boguscnt, *quota);
 909	limit = boguscnt;
 910	desc = &priv->rx_ring[q][entry];
 911	while (desc->die_dt != DT_FEMPTY) {
 912		/* Descriptor type must be checked before all other reads */
 913		dma_rmb();
 914		desc_status = desc->msc;
 915		pkt_len = le16_to_cpu(desc->ds_cc) & RX_DS;
 916
 917		if (--boguscnt < 0)
 918			break;
 919
 920		/* We use 0-byte descriptors to mark the DMA mapping errors */
 921		if (!pkt_len)
 922			continue;
 923
 924		if (desc_status & MSC_MC)
 925			stats->multicast++;
 926
 927		if (desc_status & (MSC_CRC | MSC_RFE | MSC_RTSF | MSC_RTLF |
 928				   MSC_CEEF)) {
 929			stats->rx_errors++;
 930			if (desc_status & MSC_CRC)
 931				stats->rx_crc_errors++;
 932			if (desc_status & MSC_RFE)
 933				stats->rx_frame_errors++;
 934			if (desc_status & (MSC_RTLF | MSC_RTSF))
 935				stats->rx_length_errors++;
 936			if (desc_status & MSC_CEEF)
 937				stats->rx_missed_errors++;
 938		} else {
 939			u32 get_ts = priv->tstamp_rx_ctrl & RAVB_RXTSTAMP_TYPE;
 940
 941			skb = priv->rx_skb[q][entry];
 942			priv->rx_skb[q][entry] = NULL;
 943			dma_unmap_single(ndev->dev.parent, le32_to_cpu(desc->dptr),
 944					 RX_BUF_SZ,
 945					 DMA_FROM_DEVICE);
 946			get_ts &= (q == RAVB_NC) ?
 947					RAVB_RXTSTAMP_TYPE_V2_L2_EVENT :
 948					~RAVB_RXTSTAMP_TYPE_V2_L2_EVENT;
 949			if (get_ts) {
 950				struct skb_shared_hwtstamps *shhwtstamps;
 951
 952				shhwtstamps = skb_hwtstamps(skb);
 953				memset(shhwtstamps, 0, sizeof(*shhwtstamps));
 954				ts.tv_sec = ((u64) le16_to_cpu(desc->ts_sh) <<
 955					     32) | le32_to_cpu(desc->ts_sl);
 956				ts.tv_nsec = le32_to_cpu(desc->ts_n);
 957				shhwtstamps->hwtstamp = timespec64_to_ktime(ts);
 958			}
 959
 960			skb_put(skb, pkt_len);
 961			skb->protocol = eth_type_trans(skb, ndev);
 962			if (ndev->features & NETIF_F_RXCSUM)
 963				ravb_rx_csum(skb);
 964			napi_gro_receive(&priv->napi[q], skb);
 965			stats->rx_packets++;
 966			stats->rx_bytes += pkt_len;
 967		}
 968
 969		entry = (++priv->cur_rx[q]) % priv->num_rx_ring[q];
 970		desc = &priv->rx_ring[q][entry];
 971	}
 972
 973	/* Refill the RX ring buffers. */
 974	for (; priv->cur_rx[q] - priv->dirty_rx[q] > 0; priv->dirty_rx[q]++) {
 975		entry = priv->dirty_rx[q] % priv->num_rx_ring[q];
 976		desc = &priv->rx_ring[q][entry];
 977		desc->ds_cc = cpu_to_le16(RX_BUF_SZ);
 978
 979		if (!priv->rx_skb[q][entry]) {
 980			skb = netdev_alloc_skb(ndev, info->max_rx_len);
 981			if (!skb)
 982				break;	/* Better luck next round. */
 983			ravb_set_buffer_align(skb);
 984			dma_addr = dma_map_single(ndev->dev.parent, skb->data,
 985						  le16_to_cpu(desc->ds_cc),
 986						  DMA_FROM_DEVICE);
 987			skb_checksum_none_assert(skb);
 988			/* We just set the data size to 0 for a failed mapping
 989			 * which should prevent DMA  from happening...
 990			 */
 991			if (dma_mapping_error(ndev->dev.parent, dma_addr))
 992				desc->ds_cc = cpu_to_le16(0);
 993			desc->dptr = cpu_to_le32(dma_addr);
 994			priv->rx_skb[q][entry] = skb;
 995		}
 996		/* Descriptor type must be set after all the above writes */
 997		dma_wmb();
 998		desc->die_dt = DT_FEMPTY;
 999	}
1000
1001	*quota -= limit - (++boguscnt);
1002
1003	return boguscnt <= 0;
1004}
1005
1006/* Packet receive function for Ethernet AVB */
1007static bool ravb_rx(struct net_device *ndev, int *quota, int q)
1008{
1009	struct ravb_private *priv = netdev_priv(ndev);
1010	const struct ravb_hw_info *info = priv->info;
1011
1012	return info->receive(ndev, quota, q);
1013}
1014
1015static void ravb_rcv_snd_disable(struct net_device *ndev)
1016{
1017	/* Disable TX and RX */
1018	ravb_modify(ndev, ECMR, ECMR_RE | ECMR_TE, 0);
1019}
1020
1021static void ravb_rcv_snd_enable(struct net_device *ndev)
1022{
1023	/* Enable TX and RX */
1024	ravb_modify(ndev, ECMR, ECMR_RE | ECMR_TE, ECMR_RE | ECMR_TE);
1025}
1026
1027/* function for waiting dma process finished */
1028static int ravb_stop_dma(struct net_device *ndev)
1029{
1030	struct ravb_private *priv = netdev_priv(ndev);
1031	const struct ravb_hw_info *info = priv->info;
1032	int error;
1033
1034	/* Wait for stopping the hardware TX process */
1035	error = ravb_wait(ndev, TCCR, info->tccr_mask, 0);
1036
1037	if (error)
1038		return error;
1039
1040	error = ravb_wait(ndev, CSR, CSR_TPO0 | CSR_TPO1 | CSR_TPO2 | CSR_TPO3,
1041			  0);
1042	if (error)
1043		return error;
1044
1045	/* Stop the E-MAC's RX/TX processes. */
1046	ravb_rcv_snd_disable(ndev);
1047
1048	/* Wait for stopping the RX DMA process */
1049	error = ravb_wait(ndev, CSR, CSR_RPO, 0);
1050	if (error)
1051		return error;
1052
1053	/* Stop AVB-DMAC process */
1054	return ravb_set_opmode(ndev, CCC_OPC_CONFIG);
1055}
1056
1057/* E-MAC interrupt handler */
1058static void ravb_emac_interrupt_unlocked(struct net_device *ndev)
1059{
1060	struct ravb_private *priv = netdev_priv(ndev);
1061	u32 ecsr, psr;
1062
1063	ecsr = ravb_read(ndev, ECSR);
1064	ravb_write(ndev, ecsr, ECSR);	/* clear interrupt */
1065
1066	if (ecsr & ECSR_MPD)
1067		pm_wakeup_event(&priv->pdev->dev, 0);
1068	if (ecsr & ECSR_ICD)
1069		ndev->stats.tx_carrier_errors++;
1070	if (ecsr & ECSR_LCHNG) {
1071		/* Link changed */
1072		if (priv->no_avb_link)
1073			return;
1074		psr = ravb_read(ndev, PSR);
1075		if (priv->avb_link_active_low)
1076			psr ^= PSR_LMON;
1077		if (!(psr & PSR_LMON)) {
1078			/* DIsable RX and TX */
1079			ravb_rcv_snd_disable(ndev);
1080		} else {
1081			/* Enable RX and TX */
1082			ravb_rcv_snd_enable(ndev);
1083		}
1084	}
1085}
1086
1087static irqreturn_t ravb_emac_interrupt(int irq, void *dev_id)
1088{
1089	struct net_device *ndev = dev_id;
1090	struct ravb_private *priv = netdev_priv(ndev);
 
 
 
 
 
 
 
 
 
1091
1092	spin_lock(&priv->lock);
1093	ravb_emac_interrupt_unlocked(ndev);
1094	spin_unlock(&priv->lock);
1095	return IRQ_HANDLED;
 
 
 
1096}
1097
1098/* Error interrupt handler */
1099static void ravb_error_interrupt(struct net_device *ndev)
1100{
1101	struct ravb_private *priv = netdev_priv(ndev);
1102	u32 eis, ris2;
1103
1104	eis = ravb_read(ndev, EIS);
1105	ravb_write(ndev, ~(EIS_QFS | EIS_RESERVED), EIS);
1106	if (eis & EIS_QFS) {
1107		ris2 = ravb_read(ndev, RIS2);
1108		ravb_write(ndev, ~(RIS2_QFF0 | RIS2_QFF1 | RIS2_RFFF | RIS2_RESERVED),
1109			   RIS2);
1110
1111		/* Receive Descriptor Empty int */
1112		if (ris2 & RIS2_QFF0)
1113			priv->stats[RAVB_BE].rx_over_errors++;
1114
1115		/* Receive Descriptor Empty int */
1116		if (ris2 & RIS2_QFF1)
1117			priv->stats[RAVB_NC].rx_over_errors++;
1118
1119		/* Receive FIFO Overflow int */
1120		if (ris2 & RIS2_RFFF)
1121			priv->rx_fifo_errors++;
1122	}
1123}
1124
1125static bool ravb_queue_interrupt(struct net_device *ndev, int q)
1126{
1127	struct ravb_private *priv = netdev_priv(ndev);
1128	const struct ravb_hw_info *info = priv->info;
1129	u32 ris0 = ravb_read(ndev, RIS0);
1130	u32 ric0 = ravb_read(ndev, RIC0);
1131	u32 tis  = ravb_read(ndev, TIS);
1132	u32 tic  = ravb_read(ndev, TIC);
1133
1134	if (((ris0 & ric0) & BIT(q)) || ((tis  & tic)  & BIT(q))) {
1135		if (napi_schedule_prep(&priv->napi[q])) {
1136			/* Mask RX and TX interrupts */
1137			if (!info->irq_en_dis) {
1138				ravb_write(ndev, ric0 & ~BIT(q), RIC0);
1139				ravb_write(ndev, tic & ~BIT(q), TIC);
1140			} else {
1141				ravb_write(ndev, BIT(q), RID0);
1142				ravb_write(ndev, BIT(q), TID);
1143			}
1144			__napi_schedule(&priv->napi[q]);
1145		} else {
1146			netdev_warn(ndev,
1147				    "ignoring interrupt, rx status 0x%08x, rx mask 0x%08x,\n",
1148				    ris0, ric0);
1149			netdev_warn(ndev,
1150				    "                    tx status 0x%08x, tx mask 0x%08x.\n",
1151				    tis, tic);
1152		}
1153		return true;
1154	}
1155	return false;
1156}
1157
1158static bool ravb_timestamp_interrupt(struct net_device *ndev)
1159{
1160	u32 tis = ravb_read(ndev, TIS);
1161
1162	if (tis & TIS_TFUF) {
1163		ravb_write(ndev, ~(TIS_TFUF | TIS_RESERVED), TIS);
1164		ravb_get_tx_tstamp(ndev);
1165		return true;
1166	}
1167	return false;
1168}
1169
1170static irqreturn_t ravb_interrupt(int irq, void *dev_id)
1171{
1172	struct net_device *ndev = dev_id;
1173	struct ravb_private *priv = netdev_priv(ndev);
1174	const struct ravb_hw_info *info = priv->info;
 
1175	irqreturn_t result = IRQ_NONE;
1176	u32 iss;
1177
 
 
 
 
 
1178	spin_lock(&priv->lock);
1179	/* Get interrupt status */
1180	iss = ravb_read(ndev, ISS);
1181
1182	/* Received and transmitted interrupts */
1183	if (iss & (ISS_FRS | ISS_FTS | ISS_TFUS)) {
1184		int q;
1185
1186		/* Timestamp updated */
1187		if (ravb_timestamp_interrupt(ndev))
1188			result = IRQ_HANDLED;
1189
1190		/* Network control and best effort queue RX/TX */
1191		if (info->nc_queues) {
1192			for (q = RAVB_NC; q >= RAVB_BE; q--) {
1193				if (ravb_queue_interrupt(ndev, q))
1194					result = IRQ_HANDLED;
1195			}
1196		} else {
1197			if (ravb_queue_interrupt(ndev, RAVB_BE))
1198				result = IRQ_HANDLED;
1199		}
1200	}
1201
1202	/* E-MAC status summary */
1203	if (iss & ISS_MS) {
1204		ravb_emac_interrupt_unlocked(ndev);
1205		result = IRQ_HANDLED;
1206	}
1207
1208	/* Error status summary */
1209	if (iss & ISS_ES) {
1210		ravb_error_interrupt(ndev);
1211		result = IRQ_HANDLED;
1212	}
1213
1214	/* gPTP interrupt status summary */
1215	if (iss & ISS_CGIS) {
1216		ravb_ptp_interrupt(ndev);
1217		result = IRQ_HANDLED;
1218	}
1219
1220	spin_unlock(&priv->lock);
 
 
 
1221	return result;
1222}
1223
1224/* Timestamp/Error/gPTP interrupt handler */
1225static irqreturn_t ravb_multi_interrupt(int irq, void *dev_id)
1226{
1227	struct net_device *ndev = dev_id;
1228	struct ravb_private *priv = netdev_priv(ndev);
 
1229	irqreturn_t result = IRQ_NONE;
1230	u32 iss;
1231
 
 
 
 
 
1232	spin_lock(&priv->lock);
1233	/* Get interrupt status */
1234	iss = ravb_read(ndev, ISS);
1235
1236	/* Timestamp updated */
1237	if ((iss & ISS_TFUS) && ravb_timestamp_interrupt(ndev))
1238		result = IRQ_HANDLED;
1239
1240	/* Error status summary */
1241	if (iss & ISS_ES) {
1242		ravb_error_interrupt(ndev);
1243		result = IRQ_HANDLED;
1244	}
1245
1246	/* gPTP interrupt status summary */
1247	if (iss & ISS_CGIS) {
1248		ravb_ptp_interrupt(ndev);
1249		result = IRQ_HANDLED;
1250	}
1251
1252	spin_unlock(&priv->lock);
 
 
 
1253	return result;
1254}
1255
1256static irqreturn_t ravb_dma_interrupt(int irq, void *dev_id, int q)
1257{
1258	struct net_device *ndev = dev_id;
1259	struct ravb_private *priv = netdev_priv(ndev);
 
1260	irqreturn_t result = IRQ_NONE;
1261
 
 
 
 
 
1262	spin_lock(&priv->lock);
1263
1264	/* Network control/Best effort queue RX/TX */
1265	if (ravb_queue_interrupt(ndev, q))
1266		result = IRQ_HANDLED;
1267
1268	spin_unlock(&priv->lock);
 
 
 
1269	return result;
1270}
1271
1272static irqreturn_t ravb_be_interrupt(int irq, void *dev_id)
1273{
1274	return ravb_dma_interrupt(irq, dev_id, RAVB_BE);
1275}
1276
1277static irqreturn_t ravb_nc_interrupt(int irq, void *dev_id)
1278{
1279	return ravb_dma_interrupt(irq, dev_id, RAVB_NC);
1280}
1281
1282static int ravb_poll(struct napi_struct *napi, int budget)
1283{
1284	struct net_device *ndev = napi->dev;
1285	struct ravb_private *priv = netdev_priv(ndev);
1286	const struct ravb_hw_info *info = priv->info;
1287	bool gptp = info->gptp || info->ccc_gac;
1288	struct ravb_rx_desc *desc;
1289	unsigned long flags;
1290	int q = napi - priv->napi;
1291	int mask = BIT(q);
1292	int quota = budget;
1293	unsigned int entry;
1294
1295	if (!gptp) {
1296		entry = priv->cur_rx[q] % priv->num_rx_ring[q];
1297		desc = &priv->gbeth_rx_ring[entry];
1298	}
1299	/* Processing RX Descriptor Ring */
1300	/* Clear RX interrupt */
1301	ravb_write(ndev, ~(mask | RIS0_RESERVED), RIS0);
1302	if (gptp || desc->die_dt != DT_FEMPTY) {
1303		if (ravb_rx(ndev, &quota, q))
1304			goto out;
1305	}
1306
1307	/* Processing TX Descriptor Ring */
1308	spin_lock_irqsave(&priv->lock, flags);
1309	/* Clear TX interrupt */
1310	ravb_write(ndev, ~(mask | TIS_RESERVED), TIS);
1311	ravb_tx_free(ndev, q, true);
1312	netif_wake_subqueue(ndev, q);
1313	spin_unlock_irqrestore(&priv->lock, flags);
1314
 
 
 
 
 
 
 
 
 
 
 
 
1315	napi_complete(napi);
1316
1317	/* Re-enable RX/TX interrupts */
1318	spin_lock_irqsave(&priv->lock, flags);
1319	if (!info->irq_en_dis) {
1320		ravb_modify(ndev, RIC0, mask, mask);
1321		ravb_modify(ndev, TIC,  mask, mask);
1322	} else {
1323		ravb_write(ndev, mask, RIE0);
1324		ravb_write(ndev, mask, TIE);
1325	}
1326	spin_unlock_irqrestore(&priv->lock, flags);
1327
1328	/* Receive error message handling */
1329	priv->rx_over_errors =  priv->stats[RAVB_BE].rx_over_errors;
1330	if (info->nc_queues)
1331		priv->rx_over_errors += priv->stats[RAVB_NC].rx_over_errors;
1332	if (priv->rx_over_errors != ndev->stats.rx_over_errors)
1333		ndev->stats.rx_over_errors = priv->rx_over_errors;
1334	if (priv->rx_fifo_errors != ndev->stats.rx_fifo_errors)
1335		ndev->stats.rx_fifo_errors = priv->rx_fifo_errors;
1336out:
1337	return budget - quota;
1338}
1339
1340static void ravb_set_duplex_gbeth(struct net_device *ndev)
1341{
1342	struct ravb_private *priv = netdev_priv(ndev);
1343
1344	ravb_modify(ndev, ECMR, ECMR_DM, priv->duplex > 0 ? ECMR_DM : 0);
1345}
1346
1347/* PHY state control function */
1348static void ravb_adjust_link(struct net_device *ndev)
1349{
1350	struct ravb_private *priv = netdev_priv(ndev);
1351	const struct ravb_hw_info *info = priv->info;
1352	struct phy_device *phydev = ndev->phydev;
1353	bool new_state = false;
1354	unsigned long flags;
1355
1356	spin_lock_irqsave(&priv->lock, flags);
1357
1358	/* Disable TX and RX right over here, if E-MAC change is ignored */
1359	if (priv->no_avb_link)
1360		ravb_rcv_snd_disable(ndev);
1361
1362	if (phydev->link) {
1363		if (info->half_duplex && phydev->duplex != priv->duplex) {
1364			new_state = true;
1365			priv->duplex = phydev->duplex;
1366			ravb_set_duplex_gbeth(ndev);
1367		}
1368
1369		if (phydev->speed != priv->speed) {
1370			new_state = true;
1371			priv->speed = phydev->speed;
1372			info->set_rate(ndev);
1373		}
1374		if (!priv->link) {
1375			ravb_modify(ndev, ECMR, ECMR_TXF, 0);
1376			new_state = true;
1377			priv->link = phydev->link;
1378		}
1379	} else if (priv->link) {
1380		new_state = true;
1381		priv->link = 0;
1382		priv->speed = 0;
1383		if (info->half_duplex)
1384			priv->duplex = -1;
1385	}
1386
1387	/* Enable TX and RX right over here, if E-MAC change is ignored */
1388	if (priv->no_avb_link && phydev->link)
1389		ravb_rcv_snd_enable(ndev);
1390
1391	spin_unlock_irqrestore(&priv->lock, flags);
1392
1393	if (new_state && netif_msg_link(priv))
1394		phy_print_status(phydev);
1395}
1396
1397/* PHY init function */
1398static int ravb_phy_init(struct net_device *ndev)
1399{
1400	struct device_node *np = ndev->dev.parent->of_node;
1401	struct ravb_private *priv = netdev_priv(ndev);
1402	const struct ravb_hw_info *info = priv->info;
1403	struct phy_device *phydev;
1404	struct device_node *pn;
1405	phy_interface_t iface;
1406	int err;
1407
1408	priv->link = 0;
1409	priv->speed = 0;
1410	priv->duplex = -1;
1411
1412	/* Try connecting to PHY */
1413	pn = of_parse_phandle(np, "phy-handle", 0);
1414	if (!pn) {
1415		/* In the case of a fixed PHY, the DT node associated
1416		 * to the PHY is the Ethernet MAC DT node.
1417		 */
1418		if (of_phy_is_fixed_link(np)) {
1419			err = of_phy_register_fixed_link(np);
1420			if (err)
1421				return err;
1422		}
1423		pn = of_node_get(np);
1424	}
1425
1426	iface = priv->rgmii_override ? PHY_INTERFACE_MODE_RGMII
1427				     : priv->phy_interface;
1428	phydev = of_phy_connect(ndev, pn, ravb_adjust_link, 0, iface);
1429	of_node_put(pn);
1430	if (!phydev) {
1431		netdev_err(ndev, "failed to connect PHY\n");
1432		err = -ENOENT;
1433		goto err_deregister_fixed_link;
1434	}
1435
1436	if (!info->half_duplex) {
1437		/* 10BASE, Pause and Asym Pause is not supported */
1438		phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_10baseT_Half_BIT);
1439		phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_10baseT_Full_BIT);
1440		phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_Pause_BIT);
1441		phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_Asym_Pause_BIT);
1442
1443		/* Half Duplex is not supported */
1444		phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_1000baseT_Half_BIT);
1445		phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_100baseT_Half_BIT);
1446	}
1447
1448	phy_attached_info(phydev);
1449
1450	return 0;
1451
1452err_deregister_fixed_link:
1453	if (of_phy_is_fixed_link(np))
1454		of_phy_deregister_fixed_link(np);
1455
1456	return err;
1457}
1458
1459/* PHY control start function */
1460static int ravb_phy_start(struct net_device *ndev)
1461{
1462	int error;
1463
1464	error = ravb_phy_init(ndev);
1465	if (error)
1466		return error;
1467
1468	phy_start(ndev->phydev);
1469
1470	return 0;
1471}
1472
1473static u32 ravb_get_msglevel(struct net_device *ndev)
1474{
1475	struct ravb_private *priv = netdev_priv(ndev);
1476
1477	return priv->msg_enable;
1478}
1479
1480static void ravb_set_msglevel(struct net_device *ndev, u32 value)
1481{
1482	struct ravb_private *priv = netdev_priv(ndev);
1483
1484	priv->msg_enable = value;
1485}
1486
1487static const char ravb_gstrings_stats_gbeth[][ETH_GSTRING_LEN] = {
1488	"rx_queue_0_current",
1489	"tx_queue_0_current",
1490	"rx_queue_0_dirty",
1491	"tx_queue_0_dirty",
1492	"rx_queue_0_packets",
1493	"tx_queue_0_packets",
1494	"rx_queue_0_bytes",
1495	"tx_queue_0_bytes",
1496	"rx_queue_0_mcast_packets",
1497	"rx_queue_0_errors",
1498	"rx_queue_0_crc_errors",
1499	"rx_queue_0_frame_errors",
1500	"rx_queue_0_length_errors",
1501	"rx_queue_0_csum_offload_errors",
1502	"rx_queue_0_over_errors",
1503};
1504
1505static const char ravb_gstrings_stats[][ETH_GSTRING_LEN] = {
1506	"rx_queue_0_current",
1507	"tx_queue_0_current",
1508	"rx_queue_0_dirty",
1509	"tx_queue_0_dirty",
1510	"rx_queue_0_packets",
1511	"tx_queue_0_packets",
1512	"rx_queue_0_bytes",
1513	"tx_queue_0_bytes",
1514	"rx_queue_0_mcast_packets",
1515	"rx_queue_0_errors",
1516	"rx_queue_0_crc_errors",
1517	"rx_queue_0_frame_errors",
1518	"rx_queue_0_length_errors",
1519	"rx_queue_0_missed_errors",
1520	"rx_queue_0_over_errors",
1521
1522	"rx_queue_1_current",
1523	"tx_queue_1_current",
1524	"rx_queue_1_dirty",
1525	"tx_queue_1_dirty",
1526	"rx_queue_1_packets",
1527	"tx_queue_1_packets",
1528	"rx_queue_1_bytes",
1529	"tx_queue_1_bytes",
1530	"rx_queue_1_mcast_packets",
1531	"rx_queue_1_errors",
1532	"rx_queue_1_crc_errors",
1533	"rx_queue_1_frame_errors",
1534	"rx_queue_1_length_errors",
1535	"rx_queue_1_missed_errors",
1536	"rx_queue_1_over_errors",
1537};
1538
1539static int ravb_get_sset_count(struct net_device *netdev, int sset)
1540{
1541	struct ravb_private *priv = netdev_priv(netdev);
1542	const struct ravb_hw_info *info = priv->info;
1543
1544	switch (sset) {
1545	case ETH_SS_STATS:
1546		return info->stats_len;
1547	default:
1548		return -EOPNOTSUPP;
1549	}
1550}
1551
1552static void ravb_get_ethtool_stats(struct net_device *ndev,
1553				   struct ethtool_stats *estats, u64 *data)
1554{
1555	struct ravb_private *priv = netdev_priv(ndev);
1556	const struct ravb_hw_info *info = priv->info;
1557	int num_rx_q;
1558	int i = 0;
1559	int q;
1560
1561	num_rx_q = info->nc_queues ? NUM_RX_QUEUE : 1;
1562	/* Device-specific stats */
1563	for (q = RAVB_BE; q < num_rx_q; q++) {
1564		struct net_device_stats *stats = &priv->stats[q];
1565
1566		data[i++] = priv->cur_rx[q];
1567		data[i++] = priv->cur_tx[q];
1568		data[i++] = priv->dirty_rx[q];
1569		data[i++] = priv->dirty_tx[q];
1570		data[i++] = stats->rx_packets;
1571		data[i++] = stats->tx_packets;
1572		data[i++] = stats->rx_bytes;
1573		data[i++] = stats->tx_bytes;
1574		data[i++] = stats->multicast;
1575		data[i++] = stats->rx_errors;
1576		data[i++] = stats->rx_crc_errors;
1577		data[i++] = stats->rx_frame_errors;
1578		data[i++] = stats->rx_length_errors;
1579		data[i++] = stats->rx_missed_errors;
1580		data[i++] = stats->rx_over_errors;
1581	}
1582}
1583
1584static void ravb_get_strings(struct net_device *ndev, u32 stringset, u8 *data)
1585{
1586	struct ravb_private *priv = netdev_priv(ndev);
1587	const struct ravb_hw_info *info = priv->info;
1588
1589	switch (stringset) {
1590	case ETH_SS_STATS:
1591		memcpy(data, info->gstrings_stats, info->gstrings_size);
1592		break;
1593	}
1594}
1595
1596static void ravb_get_ringparam(struct net_device *ndev,
1597			       struct ethtool_ringparam *ring,
1598			       struct kernel_ethtool_ringparam *kernel_ring,
1599			       struct netlink_ext_ack *extack)
1600{
1601	struct ravb_private *priv = netdev_priv(ndev);
1602
1603	ring->rx_max_pending = BE_RX_RING_MAX;
1604	ring->tx_max_pending = BE_TX_RING_MAX;
1605	ring->rx_pending = priv->num_rx_ring[RAVB_BE];
1606	ring->tx_pending = priv->num_tx_ring[RAVB_BE];
1607}
1608
1609static int ravb_set_ringparam(struct net_device *ndev,
1610			      struct ethtool_ringparam *ring,
1611			      struct kernel_ethtool_ringparam *kernel_ring,
1612			      struct netlink_ext_ack *extack)
1613{
1614	struct ravb_private *priv = netdev_priv(ndev);
1615	const struct ravb_hw_info *info = priv->info;
1616	int error;
1617
1618	if (ring->tx_pending > BE_TX_RING_MAX ||
1619	    ring->rx_pending > BE_RX_RING_MAX ||
1620	    ring->tx_pending < BE_TX_RING_MIN ||
1621	    ring->rx_pending < BE_RX_RING_MIN)
1622		return -EINVAL;
1623	if (ring->rx_mini_pending || ring->rx_jumbo_pending)
1624		return -EINVAL;
1625
1626	if (netif_running(ndev)) {
1627		netif_device_detach(ndev);
1628		/* Stop PTP Clock driver */
1629		if (info->gptp)
1630			ravb_ptp_stop(ndev);
1631		/* Wait for DMA stopping */
1632		error = ravb_stop_dma(ndev);
1633		if (error) {
1634			netdev_err(ndev,
1635				   "cannot set ringparam! Any AVB processes are still running?\n");
1636			return error;
1637		}
1638		synchronize_irq(ndev->irq);
1639
1640		/* Free all the skb's in the RX queue and the DMA buffers. */
1641		ravb_ring_free(ndev, RAVB_BE);
1642		if (info->nc_queues)
1643			ravb_ring_free(ndev, RAVB_NC);
1644	}
1645
1646	/* Set new parameters */
1647	priv->num_rx_ring[RAVB_BE] = ring->rx_pending;
1648	priv->num_tx_ring[RAVB_BE] = ring->tx_pending;
1649
1650	if (netif_running(ndev)) {
1651		error = ravb_dmac_init(ndev);
1652		if (error) {
1653			netdev_err(ndev,
1654				   "%s: ravb_dmac_init() failed, error %d\n",
1655				   __func__, error);
1656			return error;
1657		}
1658
1659		ravb_emac_init(ndev);
1660
1661		/* Initialise PTP Clock driver */
1662		if (info->gptp)
1663			ravb_ptp_init(ndev, priv->pdev);
1664
1665		netif_device_attach(ndev);
1666	}
1667
1668	return 0;
1669}
1670
1671static int ravb_get_ts_info(struct net_device *ndev,
1672			    struct ethtool_ts_info *info)
1673{
1674	struct ravb_private *priv = netdev_priv(ndev);
1675	const struct ravb_hw_info *hw_info = priv->info;
1676
1677	info->so_timestamping =
1678		SOF_TIMESTAMPING_TX_SOFTWARE |
1679		SOF_TIMESTAMPING_RX_SOFTWARE |
1680		SOF_TIMESTAMPING_SOFTWARE |
1681		SOF_TIMESTAMPING_TX_HARDWARE |
1682		SOF_TIMESTAMPING_RX_HARDWARE |
1683		SOF_TIMESTAMPING_RAW_HARDWARE;
1684	info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON);
1685	info->rx_filters =
1686		(1 << HWTSTAMP_FILTER_NONE) |
1687		(1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT) |
1688		(1 << HWTSTAMP_FILTER_ALL);
1689	if (hw_info->gptp || hw_info->ccc_gac)
1690		info->phc_index = ptp_clock_index(priv->ptp.clock);
1691
1692	return 0;
1693}
1694
1695static void ravb_get_wol(struct net_device *ndev, struct ethtool_wolinfo *wol)
1696{
1697	struct ravb_private *priv = netdev_priv(ndev);
1698
1699	wol->supported = WAKE_MAGIC;
1700	wol->wolopts = priv->wol_enabled ? WAKE_MAGIC : 0;
1701}
1702
1703static int ravb_set_wol(struct net_device *ndev, struct ethtool_wolinfo *wol)
1704{
1705	struct ravb_private *priv = netdev_priv(ndev);
1706	const struct ravb_hw_info *info = priv->info;
1707
1708	if (!info->magic_pkt || (wol->wolopts & ~WAKE_MAGIC))
1709		return -EOPNOTSUPP;
1710
1711	priv->wol_enabled = !!(wol->wolopts & WAKE_MAGIC);
1712
1713	device_set_wakeup_enable(&priv->pdev->dev, priv->wol_enabled);
1714
1715	return 0;
1716}
1717
1718static const struct ethtool_ops ravb_ethtool_ops = {
1719	.nway_reset		= phy_ethtool_nway_reset,
1720	.get_msglevel		= ravb_get_msglevel,
1721	.set_msglevel		= ravb_set_msglevel,
1722	.get_link		= ethtool_op_get_link,
1723	.get_strings		= ravb_get_strings,
1724	.get_ethtool_stats	= ravb_get_ethtool_stats,
1725	.get_sset_count		= ravb_get_sset_count,
1726	.get_ringparam		= ravb_get_ringparam,
1727	.set_ringparam		= ravb_set_ringparam,
1728	.get_ts_info		= ravb_get_ts_info,
1729	.get_link_ksettings	= phy_ethtool_get_link_ksettings,
1730	.set_link_ksettings	= phy_ethtool_set_link_ksettings,
1731	.get_wol		= ravb_get_wol,
1732	.set_wol		= ravb_set_wol,
1733};
1734
1735static inline int ravb_hook_irq(unsigned int irq, irq_handler_t handler,
1736				struct net_device *ndev, struct device *dev,
1737				const char *ch)
1738{
1739	char *name;
 
1740	int error;
1741
1742	name = devm_kasprintf(dev, GFP_KERNEL, "%s:%s", ndev->name, ch);
1743	if (!name)
1744		return -ENOMEM;
1745	error = request_irq(irq, handler, 0, name, ndev);
1746	if (error)
1747		netdev_err(ndev, "cannot request IRQ %s\n", name);
 
 
 
 
 
1748
1749	return error;
1750}
1751
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1752/* Network device open function for Ethernet AVB */
1753static int ravb_open(struct net_device *ndev)
1754{
1755	struct ravb_private *priv = netdev_priv(ndev);
1756	const struct ravb_hw_info *info = priv->info;
1757	struct platform_device *pdev = priv->pdev;
1758	struct device *dev = &pdev->dev;
1759	int error;
1760
1761	napi_enable(&priv->napi[RAVB_BE]);
1762	if (info->nc_queues)
1763		napi_enable(&priv->napi[RAVB_NC]);
1764
1765	if (!info->multi_irqs) {
1766		error = request_irq(ndev->irq, ravb_interrupt, IRQF_SHARED,
1767				    ndev->name, ndev);
1768		if (error) {
1769			netdev_err(ndev, "cannot request IRQ\n");
1770			goto out_napi_off;
1771		}
1772	} else {
1773		error = ravb_hook_irq(ndev->irq, ravb_multi_interrupt, ndev,
1774				      dev, "ch22:multi");
1775		if (error)
1776			goto out_napi_off;
1777		error = ravb_hook_irq(priv->emac_irq, ravb_emac_interrupt, ndev,
1778				      dev, "ch24:emac");
1779		if (error)
1780			goto out_free_irq;
1781		error = ravb_hook_irq(priv->rx_irqs[RAVB_BE], ravb_be_interrupt,
1782				      ndev, dev, "ch0:rx_be");
1783		if (error)
1784			goto out_free_irq_emac;
1785		error = ravb_hook_irq(priv->tx_irqs[RAVB_BE], ravb_be_interrupt,
1786				      ndev, dev, "ch18:tx_be");
1787		if (error)
1788			goto out_free_irq_be_rx;
1789		error = ravb_hook_irq(priv->rx_irqs[RAVB_NC], ravb_nc_interrupt,
1790				      ndev, dev, "ch1:rx_nc");
1791		if (error)
1792			goto out_free_irq_be_tx;
1793		error = ravb_hook_irq(priv->tx_irqs[RAVB_NC], ravb_nc_interrupt,
1794				      ndev, dev, "ch19:tx_nc");
1795		if (error)
1796			goto out_free_irq_nc_rx;
1797
1798		if (info->err_mgmt_irqs) {
1799			error = ravb_hook_irq(priv->erra_irq, ravb_multi_interrupt,
1800					      ndev, dev, "err_a");
1801			if (error)
1802				goto out_free_irq_nc_tx;
1803			error = ravb_hook_irq(priv->mgmta_irq, ravb_multi_interrupt,
1804					      ndev, dev, "mgmt_a");
1805			if (error)
1806				goto out_free_irq_erra;
1807		}
1808	}
1809
1810	/* Device init */
1811	error = ravb_dmac_init(ndev);
1812	if (error)
1813		goto out_free_irq_mgmta;
 
1814	ravb_emac_init(ndev);
1815
 
 
1816	/* Initialise PTP Clock driver */
1817	if (info->gptp)
1818		ravb_ptp_init(ndev, priv->pdev);
1819
1820	/* PHY control start */
1821	error = ravb_phy_start(ndev);
1822	if (error)
1823		goto out_ptp_stop;
1824
1825	netif_tx_start_all_queues(ndev);
1826
1827	return 0;
1828
1829out_ptp_stop:
1830	/* Stop PTP Clock driver */
1831	if (info->gptp)
1832		ravb_ptp_stop(ndev);
1833	ravb_stop_dma(ndev);
1834out_free_irq_mgmta:
1835	if (!info->multi_irqs)
1836		goto out_free_irq;
1837	if (info->err_mgmt_irqs)
1838		free_irq(priv->mgmta_irq, ndev);
1839out_free_irq_erra:
1840	if (info->err_mgmt_irqs)
1841		free_irq(priv->erra_irq, ndev);
1842out_free_irq_nc_tx:
1843	free_irq(priv->tx_irqs[RAVB_NC], ndev);
1844out_free_irq_nc_rx:
1845	free_irq(priv->rx_irqs[RAVB_NC], ndev);
1846out_free_irq_be_tx:
1847	free_irq(priv->tx_irqs[RAVB_BE], ndev);
1848out_free_irq_be_rx:
1849	free_irq(priv->rx_irqs[RAVB_BE], ndev);
1850out_free_irq_emac:
1851	free_irq(priv->emac_irq, ndev);
1852out_free_irq:
1853	free_irq(ndev->irq, ndev);
1854out_napi_off:
1855	if (info->nc_queues)
1856		napi_disable(&priv->napi[RAVB_NC]);
1857	napi_disable(&priv->napi[RAVB_BE]);
1858	return error;
1859}
1860
1861/* Timeout function for Ethernet AVB */
1862static void ravb_tx_timeout(struct net_device *ndev, unsigned int txqueue)
1863{
1864	struct ravb_private *priv = netdev_priv(ndev);
1865
1866	netif_err(priv, tx_err, ndev,
1867		  "transmit timed out, status %08x, resetting...\n",
1868		  ravb_read(ndev, ISS));
1869
1870	/* tx_errors count up */
1871	ndev->stats.tx_errors++;
1872
1873	schedule_work(&priv->work);
1874}
1875
1876static void ravb_tx_timeout_work(struct work_struct *work)
1877{
1878	struct ravb_private *priv = container_of(work, struct ravb_private,
1879						 work);
1880	const struct ravb_hw_info *info = priv->info;
1881	struct net_device *ndev = priv->ndev;
1882	int error;
1883
1884	if (!rtnl_trylock()) {
1885		usleep_range(1000, 2000);
1886		schedule_work(&priv->work);
1887		return;
1888	}
1889
1890	netif_tx_stop_all_queues(ndev);
1891
1892	/* Stop PTP Clock driver */
1893	if (info->gptp)
1894		ravb_ptp_stop(ndev);
1895
1896	/* Wait for DMA stopping */
1897	if (ravb_stop_dma(ndev)) {
1898		/* If ravb_stop_dma() fails, the hardware is still operating
1899		 * for TX and/or RX. So, this should not call the following
1900		 * functions because ravb_dmac_init() is possible to fail too.
1901		 * Also, this should not retry ravb_stop_dma() again and again
1902		 * here because it's possible to wait forever. So, this just
1903		 * re-enables the TX and RX and skip the following
1904		 * re-initialization procedure.
1905		 */
1906		ravb_rcv_snd_enable(ndev);
1907		goto out;
1908	}
1909
1910	ravb_ring_free(ndev, RAVB_BE);
1911	if (info->nc_queues)
1912		ravb_ring_free(ndev, RAVB_NC);
1913
1914	/* Device init */
1915	error = ravb_dmac_init(ndev);
1916	if (error) {
1917		/* If ravb_dmac_init() fails, descriptors are freed. So, this
1918		 * should return here to avoid re-enabling the TX and RX in
1919		 * ravb_emac_init().
1920		 */
1921		netdev_err(ndev, "%s: ravb_dmac_init() failed, error %d\n",
1922			   __func__, error);
1923		goto out_unlock;
1924	}
1925	ravb_emac_init(ndev);
1926
1927out:
1928	/* Initialise PTP Clock driver */
1929	if (info->gptp)
1930		ravb_ptp_init(ndev, priv->pdev);
1931
1932	netif_tx_start_all_queues(ndev);
1933
1934out_unlock:
1935	rtnl_unlock();
1936}
1937
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1938/* Packet transmit function for Ethernet AVB */
1939static netdev_tx_t ravb_start_xmit(struct sk_buff *skb, struct net_device *ndev)
1940{
1941	struct ravb_private *priv = netdev_priv(ndev);
1942	const struct ravb_hw_info *info = priv->info;
1943	unsigned int num_tx_desc = priv->num_tx_desc;
1944	u16 q = skb_get_queue_mapping(skb);
1945	struct ravb_tstamp_skb *ts_skb;
1946	struct ravb_tx_desc *desc;
1947	unsigned long flags;
1948	dma_addr_t dma_addr;
1949	void *buffer;
1950	u32 entry;
1951	u32 len;
1952
 
 
 
1953	spin_lock_irqsave(&priv->lock, flags);
1954	if (priv->cur_tx[q] - priv->dirty_tx[q] > (priv->num_tx_ring[q] - 1) *
1955	    num_tx_desc) {
1956		netif_err(priv, tx_queued, ndev,
1957			  "still transmitting with the full ring!\n");
1958		netif_stop_subqueue(ndev, q);
1959		spin_unlock_irqrestore(&priv->lock, flags);
1960		return NETDEV_TX_BUSY;
1961	}
1962
1963	if (skb_put_padto(skb, ETH_ZLEN))
1964		goto exit;
1965
1966	entry = priv->cur_tx[q] % (priv->num_tx_ring[q] * num_tx_desc);
1967	priv->tx_skb[q][entry / num_tx_desc] = skb;
1968
1969	if (num_tx_desc > 1) {
1970		buffer = PTR_ALIGN(priv->tx_align[q], DPTR_ALIGN) +
1971			 entry / num_tx_desc * DPTR_ALIGN;
1972		len = PTR_ALIGN(skb->data, DPTR_ALIGN) - skb->data;
1973
1974		/* Zero length DMA descriptors are problematic as they seem
1975		 * to terminate DMA transfers. Avoid them by simply using a
1976		 * length of DPTR_ALIGN (4) when skb data is aligned to
1977		 * DPTR_ALIGN.
1978		 *
1979		 * As skb is guaranteed to have at least ETH_ZLEN (60)
1980		 * bytes of data by the call to skb_put_padto() above this
1981		 * is safe with respect to both the length of the first DMA
1982		 * descriptor (len) overflowing the available data and the
1983		 * length of the second DMA descriptor (skb->len - len)
1984		 * being negative.
1985		 */
1986		if (len == 0)
1987			len = DPTR_ALIGN;
1988
1989		memcpy(buffer, skb->data, len);
1990		dma_addr = dma_map_single(ndev->dev.parent, buffer, len,
1991					  DMA_TO_DEVICE);
1992		if (dma_mapping_error(ndev->dev.parent, dma_addr))
1993			goto drop;
1994
1995		desc = &priv->tx_ring[q][entry];
1996		desc->ds_tagl = cpu_to_le16(len);
1997		desc->dptr = cpu_to_le32(dma_addr);
1998
1999		buffer = skb->data + len;
2000		len = skb->len - len;
2001		dma_addr = dma_map_single(ndev->dev.parent, buffer, len,
2002					  DMA_TO_DEVICE);
2003		if (dma_mapping_error(ndev->dev.parent, dma_addr))
2004			goto unmap;
2005
2006		desc++;
2007	} else {
2008		desc = &priv->tx_ring[q][entry];
2009		len = skb->len;
2010		dma_addr = dma_map_single(ndev->dev.parent, skb->data, skb->len,
2011					  DMA_TO_DEVICE);
2012		if (dma_mapping_error(ndev->dev.parent, dma_addr))
2013			goto drop;
2014	}
2015	desc->ds_tagl = cpu_to_le16(len);
2016	desc->dptr = cpu_to_le32(dma_addr);
2017
2018	/* TX timestamp required */
2019	if (info->gptp || info->ccc_gac) {
2020		if (q == RAVB_NC) {
2021			ts_skb = kmalloc(sizeof(*ts_skb), GFP_ATOMIC);
2022			if (!ts_skb) {
2023				if (num_tx_desc > 1) {
2024					desc--;
2025					dma_unmap_single(ndev->dev.parent, dma_addr,
2026							 len, DMA_TO_DEVICE);
2027				}
2028				goto unmap;
2029			}
2030			ts_skb->skb = skb_get(skb);
2031			ts_skb->tag = priv->ts_skb_tag++;
2032			priv->ts_skb_tag &= 0x3ff;
2033			list_add_tail(&ts_skb->list, &priv->ts_skb_list);
2034
2035			/* TAG and timestamp required flag */
2036			skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
2037			desc->tagh_tsr = (ts_skb->tag >> 4) | TX_TSR;
2038			desc->ds_tagl |= cpu_to_le16(ts_skb->tag << 12);
2039		}
2040
2041		skb_tx_timestamp(skb);
2042	}
2043	/* Descriptor type must be set after all the above writes */
2044	dma_wmb();
2045	if (num_tx_desc > 1) {
2046		desc->die_dt = DT_FEND;
2047		desc--;
2048		desc->die_dt = DT_FSTART;
2049	} else {
2050		desc->die_dt = DT_FSINGLE;
2051	}
2052	ravb_modify(ndev, TCCR, TCCR_TSRQ0 << q, TCCR_TSRQ0 << q);
2053
2054	priv->cur_tx[q] += num_tx_desc;
2055	if (priv->cur_tx[q] - priv->dirty_tx[q] >
2056	    (priv->num_tx_ring[q] - 1) * num_tx_desc &&
2057	    !ravb_tx_free(ndev, q, true))
2058		netif_stop_subqueue(ndev, q);
2059
2060exit:
2061	spin_unlock_irqrestore(&priv->lock, flags);
2062	return NETDEV_TX_OK;
2063
2064unmap:
2065	dma_unmap_single(ndev->dev.parent, le32_to_cpu(desc->dptr),
2066			 le16_to_cpu(desc->ds_tagl), DMA_TO_DEVICE);
2067drop:
2068	dev_kfree_skb_any(skb);
2069	priv->tx_skb[q][entry / num_tx_desc] = NULL;
2070	goto exit;
2071}
2072
2073static u16 ravb_select_queue(struct net_device *ndev, struct sk_buff *skb,
2074			     struct net_device *sb_dev)
2075{
2076	/* If skb needs TX timestamp, it is handled in network control queue */
2077	return (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) ? RAVB_NC :
2078							       RAVB_BE;
2079
2080}
2081
2082static struct net_device_stats *ravb_get_stats(struct net_device *ndev)
2083{
2084	struct ravb_private *priv = netdev_priv(ndev);
2085	const struct ravb_hw_info *info = priv->info;
2086	struct net_device_stats *nstats, *stats0, *stats1;
 
2087
2088	nstats = &ndev->stats;
 
 
 
 
 
 
2089	stats0 = &priv->stats[RAVB_BE];
2090
2091	if (info->tx_counters) {
2092		nstats->tx_dropped += ravb_read(ndev, TROCR);
2093		ravb_write(ndev, 0, TROCR);	/* (write clear) */
2094	}
2095
2096	if (info->carrier_counters) {
2097		nstats->collisions += ravb_read(ndev, CXR41);
2098		ravb_write(ndev, 0, CXR41);	/* (write clear) */
2099		nstats->tx_carrier_errors += ravb_read(ndev, CXR42);
2100		ravb_write(ndev, 0, CXR42);	/* (write clear) */
2101	}
2102
2103	nstats->rx_packets = stats0->rx_packets;
2104	nstats->tx_packets = stats0->tx_packets;
2105	nstats->rx_bytes = stats0->rx_bytes;
2106	nstats->tx_bytes = stats0->tx_bytes;
2107	nstats->multicast = stats0->multicast;
2108	nstats->rx_errors = stats0->rx_errors;
2109	nstats->rx_crc_errors = stats0->rx_crc_errors;
2110	nstats->rx_frame_errors = stats0->rx_frame_errors;
2111	nstats->rx_length_errors = stats0->rx_length_errors;
2112	nstats->rx_missed_errors = stats0->rx_missed_errors;
2113	nstats->rx_over_errors = stats0->rx_over_errors;
2114	if (info->nc_queues) {
2115		stats1 = &priv->stats[RAVB_NC];
2116
2117		nstats->rx_packets += stats1->rx_packets;
2118		nstats->tx_packets += stats1->tx_packets;
2119		nstats->rx_bytes += stats1->rx_bytes;
2120		nstats->tx_bytes += stats1->tx_bytes;
2121		nstats->multicast += stats1->multicast;
2122		nstats->rx_errors += stats1->rx_errors;
2123		nstats->rx_crc_errors += stats1->rx_crc_errors;
2124		nstats->rx_frame_errors += stats1->rx_frame_errors;
2125		nstats->rx_length_errors += stats1->rx_length_errors;
2126		nstats->rx_missed_errors += stats1->rx_missed_errors;
2127		nstats->rx_over_errors += stats1->rx_over_errors;
2128	}
2129
 
 
2130	return nstats;
2131}
2132
2133/* Update promiscuous bit */
2134static void ravb_set_rx_mode(struct net_device *ndev)
2135{
2136	struct ravb_private *priv = netdev_priv(ndev);
2137	unsigned long flags;
2138
2139	spin_lock_irqsave(&priv->lock, flags);
2140	ravb_modify(ndev, ECMR, ECMR_PRM,
2141		    ndev->flags & IFF_PROMISC ? ECMR_PRM : 0);
2142	spin_unlock_irqrestore(&priv->lock, flags);
2143}
2144
2145/* Device close function for Ethernet AVB */
2146static int ravb_close(struct net_device *ndev)
2147{
2148	struct device_node *np = ndev->dev.parent->of_node;
2149	struct ravb_private *priv = netdev_priv(ndev);
2150	const struct ravb_hw_info *info = priv->info;
2151	struct ravb_tstamp_skb *ts_skb, *ts_skb2;
 
 
2152
2153	netif_tx_stop_all_queues(ndev);
2154
2155	/* Disable interrupts by clearing the interrupt masks. */
2156	ravb_write(ndev, 0, RIC0);
2157	ravb_write(ndev, 0, RIC2);
2158	ravb_write(ndev, 0, TIC);
2159
 
 
 
 
 
 
 
 
2160	/* Stop PTP Clock driver */
2161	if (info->gptp)
2162		ravb_ptp_stop(ndev);
2163
2164	/* Set the config mode to stop the AVB-DMAC's processes */
2165	if (ravb_stop_dma(ndev) < 0)
2166		netdev_err(ndev,
2167			   "device will be stopped after h/w processes are done.\n");
2168
2169	/* Clear the timestamp list */
2170	if (info->gptp || info->ccc_gac) {
2171		list_for_each_entry_safe(ts_skb, ts_skb2, &priv->ts_skb_list, list) {
2172			list_del(&ts_skb->list);
2173			kfree_skb(ts_skb->skb);
2174			kfree(ts_skb);
2175		}
2176	}
2177
2178	/* PHY disconnect */
2179	if (ndev->phydev) {
2180		phy_stop(ndev->phydev);
2181		phy_disconnect(ndev->phydev);
2182		if (of_phy_is_fixed_link(np))
2183			of_phy_deregister_fixed_link(np);
2184	}
2185
2186	cancel_work_sync(&priv->work);
2187
2188	if (info->multi_irqs) {
2189		free_irq(priv->tx_irqs[RAVB_NC], ndev);
2190		free_irq(priv->rx_irqs[RAVB_NC], ndev);
2191		free_irq(priv->tx_irqs[RAVB_BE], ndev);
2192		free_irq(priv->rx_irqs[RAVB_BE], ndev);
2193		free_irq(priv->emac_irq, ndev);
2194		if (info->err_mgmt_irqs) {
2195			free_irq(priv->erra_irq, ndev);
2196			free_irq(priv->mgmta_irq, ndev);
2197		}
2198	}
2199	free_irq(ndev->irq, ndev);
2200
2201	if (info->nc_queues)
2202		napi_disable(&priv->napi[RAVB_NC]);
2203	napi_disable(&priv->napi[RAVB_BE]);
2204
2205	/* Free all the skb's in the RX queue and the DMA buffers. */
2206	ravb_ring_free(ndev, RAVB_BE);
2207	if (info->nc_queues)
2208		ravb_ring_free(ndev, RAVB_NC);
2209
 
 
 
 
 
 
 
 
 
 
 
2210	return 0;
2211}
2212
2213static int ravb_hwtstamp_get(struct net_device *ndev, struct ifreq *req)
2214{
2215	struct ravb_private *priv = netdev_priv(ndev);
2216	struct hwtstamp_config config;
2217
2218	config.flags = 0;
2219	config.tx_type = priv->tstamp_tx_ctrl ? HWTSTAMP_TX_ON :
2220						HWTSTAMP_TX_OFF;
2221	switch (priv->tstamp_rx_ctrl & RAVB_RXTSTAMP_TYPE) {
2222	case RAVB_RXTSTAMP_TYPE_V2_L2_EVENT:
2223		config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_EVENT;
2224		break;
2225	case RAVB_RXTSTAMP_TYPE_ALL:
2226		config.rx_filter = HWTSTAMP_FILTER_ALL;
2227		break;
2228	default:
2229		config.rx_filter = HWTSTAMP_FILTER_NONE;
2230	}
2231
2232	return copy_to_user(req->ifr_data, &config, sizeof(config)) ?
2233		-EFAULT : 0;
2234}
2235
2236/* Control hardware time stamping */
2237static int ravb_hwtstamp_set(struct net_device *ndev, struct ifreq *req)
2238{
2239	struct ravb_private *priv = netdev_priv(ndev);
2240	struct hwtstamp_config config;
2241	u32 tstamp_rx_ctrl = RAVB_RXTSTAMP_ENABLED;
2242	u32 tstamp_tx_ctrl;
2243
2244	if (copy_from_user(&config, req->ifr_data, sizeof(config)))
2245		return -EFAULT;
2246
2247	switch (config.tx_type) {
2248	case HWTSTAMP_TX_OFF:
2249		tstamp_tx_ctrl = 0;
2250		break;
2251	case HWTSTAMP_TX_ON:
2252		tstamp_tx_ctrl = RAVB_TXTSTAMP_ENABLED;
2253		break;
2254	default:
2255		return -ERANGE;
2256	}
2257
2258	switch (config.rx_filter) {
2259	case HWTSTAMP_FILTER_NONE:
2260		tstamp_rx_ctrl = 0;
2261		break;
2262	case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
2263		tstamp_rx_ctrl |= RAVB_RXTSTAMP_TYPE_V2_L2_EVENT;
2264		break;
2265	default:
2266		config.rx_filter = HWTSTAMP_FILTER_ALL;
2267		tstamp_rx_ctrl |= RAVB_RXTSTAMP_TYPE_ALL;
2268	}
2269
2270	priv->tstamp_tx_ctrl = tstamp_tx_ctrl;
2271	priv->tstamp_rx_ctrl = tstamp_rx_ctrl;
2272
2273	return copy_to_user(req->ifr_data, &config, sizeof(config)) ?
2274		-EFAULT : 0;
2275}
2276
2277/* ioctl to device function */
2278static int ravb_do_ioctl(struct net_device *ndev, struct ifreq *req, int cmd)
2279{
2280	struct phy_device *phydev = ndev->phydev;
2281
2282	if (!netif_running(ndev))
2283		return -EINVAL;
2284
2285	if (!phydev)
2286		return -ENODEV;
2287
2288	switch (cmd) {
2289	case SIOCGHWTSTAMP:
2290		return ravb_hwtstamp_get(ndev, req);
2291	case SIOCSHWTSTAMP:
2292		return ravb_hwtstamp_set(ndev, req);
2293	}
2294
2295	return phy_mii_ioctl(phydev, req, cmd);
2296}
2297
2298static int ravb_change_mtu(struct net_device *ndev, int new_mtu)
2299{
2300	struct ravb_private *priv = netdev_priv(ndev);
2301
2302	ndev->mtu = new_mtu;
2303
2304	if (netif_running(ndev)) {
2305		synchronize_irq(priv->emac_irq);
2306		ravb_emac_init(ndev);
2307	}
2308
2309	netdev_update_features(ndev);
2310
2311	return 0;
2312}
2313
2314static void ravb_set_rx_csum(struct net_device *ndev, bool enable)
2315{
2316	struct ravb_private *priv = netdev_priv(ndev);
2317	unsigned long flags;
2318
2319	spin_lock_irqsave(&priv->lock, flags);
2320
2321	/* Disable TX and RX */
2322	ravb_rcv_snd_disable(ndev);
2323
2324	/* Modify RX Checksum setting */
2325	ravb_modify(ndev, ECMR, ECMR_RCSC, enable ? ECMR_RCSC : 0);
2326
2327	/* Enable TX and RX */
2328	ravb_rcv_snd_enable(ndev);
2329
2330	spin_unlock_irqrestore(&priv->lock, flags);
2331}
2332
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2333static int ravb_set_features_gbeth(struct net_device *ndev,
2334				   netdev_features_t features)
2335{
2336	/* Place holder */
2337	return 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2338}
2339
2340static int ravb_set_features_rcar(struct net_device *ndev,
2341				  netdev_features_t features)
2342{
2343	netdev_features_t changed = ndev->features ^ features;
2344
2345	if (changed & NETIF_F_RXCSUM)
2346		ravb_set_rx_csum(ndev, features & NETIF_F_RXCSUM);
2347
2348	ndev->features = features;
2349
2350	return 0;
2351}
2352
2353static int ravb_set_features(struct net_device *ndev,
2354			     netdev_features_t features)
2355{
2356	struct ravb_private *priv = netdev_priv(ndev);
2357	const struct ravb_hw_info *info = priv->info;
 
 
2358
2359	return info->set_feature(ndev, features);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2360}
2361
2362static const struct net_device_ops ravb_netdev_ops = {
2363	.ndo_open		= ravb_open,
2364	.ndo_stop		= ravb_close,
2365	.ndo_start_xmit		= ravb_start_xmit,
2366	.ndo_select_queue	= ravb_select_queue,
2367	.ndo_get_stats		= ravb_get_stats,
2368	.ndo_set_rx_mode	= ravb_set_rx_mode,
2369	.ndo_tx_timeout		= ravb_tx_timeout,
2370	.ndo_eth_ioctl		= ravb_do_ioctl,
2371	.ndo_change_mtu		= ravb_change_mtu,
2372	.ndo_validate_addr	= eth_validate_addr,
2373	.ndo_set_mac_address	= eth_mac_addr,
2374	.ndo_set_features	= ravb_set_features,
2375};
2376
2377/* MDIO bus init function */
2378static int ravb_mdio_init(struct ravb_private *priv)
2379{
2380	struct platform_device *pdev = priv->pdev;
2381	struct device *dev = &pdev->dev;
2382	struct phy_device *phydev;
2383	struct device_node *pn;
2384	int error;
2385
2386	/* Bitbang init */
2387	priv->mdiobb.ops = &bb_ops;
2388
2389	/* MII controller setting */
2390	priv->mii_bus = alloc_mdio_bitbang(&priv->mdiobb);
2391	if (!priv->mii_bus)
2392		return -ENOMEM;
2393
2394	/* Hook up MII support for ethtool */
2395	priv->mii_bus->name = "ravb_mii";
2396	priv->mii_bus->parent = dev;
2397	snprintf(priv->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x",
2398		 pdev->name, pdev->id);
2399
2400	/* Register MDIO bus */
2401	error = of_mdiobus_register(priv->mii_bus, dev->of_node);
2402	if (error)
2403		goto out_free_bus;
2404
2405	pn = of_parse_phandle(dev->of_node, "phy-handle", 0);
2406	phydev = of_phy_find_device(pn);
2407	if (phydev) {
2408		phydev->mac_managed_pm = true;
2409		put_device(&phydev->mdio.dev);
2410	}
2411	of_node_put(pn);
2412
2413	return 0;
2414
2415out_free_bus:
2416	free_mdio_bitbang(priv->mii_bus);
2417	return error;
2418}
2419
2420/* MDIO bus release function */
2421static int ravb_mdio_release(struct ravb_private *priv)
2422{
2423	/* Unregister mdio bus */
2424	mdiobus_unregister(priv->mii_bus);
2425
2426	/* Free bitbang info */
2427	free_mdio_bitbang(priv->mii_bus);
2428
2429	return 0;
2430}
2431
2432static const struct ravb_hw_info ravb_gen3_hw_info = {
2433	.rx_ring_free = ravb_rx_ring_free_rcar,
2434	.rx_ring_format = ravb_rx_ring_format_rcar,
2435	.alloc_rx_desc = ravb_alloc_rx_desc_rcar,
2436	.receive = ravb_rx_rcar,
2437	.set_rate = ravb_set_rate_rcar,
2438	.set_feature = ravb_set_features_rcar,
2439	.dmac_init = ravb_dmac_init_rcar,
2440	.emac_init = ravb_emac_init_rcar,
2441	.gstrings_stats = ravb_gstrings_stats,
2442	.gstrings_size = sizeof(ravb_gstrings_stats),
2443	.net_hw_features = NETIF_F_RXCSUM,
2444	.net_features = NETIF_F_RXCSUM,
2445	.stats_len = ARRAY_SIZE(ravb_gstrings_stats),
2446	.max_rx_len = RX_BUF_SZ + RAVB_ALIGN - 1,
2447	.tccr_mask = TCCR_TSRQ0 | TCCR_TSRQ1 | TCCR_TSRQ2 | TCCR_TSRQ3,
2448	.rx_max_buf_size = SZ_2K,
 
 
2449	.internal_delay = 1,
2450	.tx_counters = 1,
2451	.multi_irqs = 1,
2452	.irq_en_dis = 1,
2453	.ccc_gac = 1,
2454	.nc_queues = 1,
2455	.magic_pkt = 1,
2456};
2457
2458static const struct ravb_hw_info ravb_gen2_hw_info = {
2459	.rx_ring_free = ravb_rx_ring_free_rcar,
2460	.rx_ring_format = ravb_rx_ring_format_rcar,
2461	.alloc_rx_desc = ravb_alloc_rx_desc_rcar,
2462	.receive = ravb_rx_rcar,
2463	.set_rate = ravb_set_rate_rcar,
2464	.set_feature = ravb_set_features_rcar,
2465	.dmac_init = ravb_dmac_init_rcar,
2466	.emac_init = ravb_emac_init_rcar,
2467	.gstrings_stats = ravb_gstrings_stats,
2468	.gstrings_size = sizeof(ravb_gstrings_stats),
2469	.net_hw_features = NETIF_F_RXCSUM,
2470	.net_features = NETIF_F_RXCSUM,
2471	.stats_len = ARRAY_SIZE(ravb_gstrings_stats),
2472	.max_rx_len = RX_BUF_SZ + RAVB_ALIGN - 1,
2473	.tccr_mask = TCCR_TSRQ0 | TCCR_TSRQ1 | TCCR_TSRQ2 | TCCR_TSRQ3,
2474	.rx_max_buf_size = SZ_2K,
 
 
2475	.aligned_tx = 1,
2476	.gptp = 1,
2477	.nc_queues = 1,
2478	.magic_pkt = 1,
2479};
2480
2481static const struct ravb_hw_info ravb_rzv2m_hw_info = {
2482	.rx_ring_free = ravb_rx_ring_free_rcar,
2483	.rx_ring_format = ravb_rx_ring_format_rcar,
2484	.alloc_rx_desc = ravb_alloc_rx_desc_rcar,
2485	.receive = ravb_rx_rcar,
2486	.set_rate = ravb_set_rate_rcar,
2487	.set_feature = ravb_set_features_rcar,
2488	.dmac_init = ravb_dmac_init_rcar,
2489	.emac_init = ravb_emac_init_rcar,
2490	.gstrings_stats = ravb_gstrings_stats,
2491	.gstrings_size = sizeof(ravb_gstrings_stats),
2492	.net_hw_features = NETIF_F_RXCSUM,
2493	.net_features = NETIF_F_RXCSUM,
2494	.stats_len = ARRAY_SIZE(ravb_gstrings_stats),
2495	.max_rx_len = RX_BUF_SZ + RAVB_ALIGN - 1,
2496	.tccr_mask = TCCR_TSRQ0 | TCCR_TSRQ1 | TCCR_TSRQ2 | TCCR_TSRQ3,
2497	.rx_max_buf_size = SZ_2K,
 
 
2498	.multi_irqs = 1,
2499	.err_mgmt_irqs = 1,
2500	.gptp = 1,
2501	.gptp_ref_clk = 1,
2502	.nc_queues = 1,
2503	.magic_pkt = 1,
2504};
2505
2506static const struct ravb_hw_info gbeth_hw_info = {
2507	.rx_ring_free = ravb_rx_ring_free_gbeth,
2508	.rx_ring_format = ravb_rx_ring_format_gbeth,
2509	.alloc_rx_desc = ravb_alloc_rx_desc_gbeth,
2510	.receive = ravb_rx_gbeth,
2511	.set_rate = ravb_set_rate_gbeth,
2512	.set_feature = ravb_set_features_gbeth,
2513	.dmac_init = ravb_dmac_init_gbeth,
2514	.emac_init = ravb_emac_init_gbeth,
2515	.gstrings_stats = ravb_gstrings_stats_gbeth,
2516	.gstrings_size = sizeof(ravb_gstrings_stats_gbeth),
 
 
2517	.stats_len = ARRAY_SIZE(ravb_gstrings_stats_gbeth),
2518	.max_rx_len = ALIGN(GBETH_RX_BUFF_MAX, RAVB_ALIGN),
2519	.tccr_mask = TCCR_TSRQ0,
2520	.rx_max_buf_size = SZ_8K,
 
 
2521	.aligned_tx = 1,
2522	.tx_counters = 1,
2523	.carrier_counters = 1,
2524	.half_duplex = 1,
2525};
2526
2527static const struct of_device_id ravb_match_table[] = {
2528	{ .compatible = "renesas,etheravb-r8a7790", .data = &ravb_gen2_hw_info },
2529	{ .compatible = "renesas,etheravb-r8a7794", .data = &ravb_gen2_hw_info },
2530	{ .compatible = "renesas,etheravb-rcar-gen2", .data = &ravb_gen2_hw_info },
2531	{ .compatible = "renesas,etheravb-r8a7795", .data = &ravb_gen3_hw_info },
2532	{ .compatible = "renesas,etheravb-rcar-gen3", .data = &ravb_gen3_hw_info },
2533	{ .compatible = "renesas,etheravb-rcar-gen4", .data = &ravb_gen3_hw_info },
2534	{ .compatible = "renesas,etheravb-rzv2m", .data = &ravb_rzv2m_hw_info },
2535	{ .compatible = "renesas,rzg2l-gbeth", .data = &gbeth_hw_info },
2536	{ }
2537};
2538MODULE_DEVICE_TABLE(of, ravb_match_table);
2539
2540static int ravb_set_gti(struct net_device *ndev)
 
2541{
2542	struct ravb_private *priv = netdev_priv(ndev);
2543	const struct ravb_hw_info *info = priv->info;
2544	struct device *dev = ndev->dev.parent;
2545	unsigned long rate;
2546	uint64_t inc;
2547
2548	if (info->gptp_ref_clk)
2549		rate = clk_get_rate(priv->gptp_clk);
2550	else
2551		rate = clk_get_rate(priv->clk);
2552	if (!rate)
2553		return -EINVAL;
2554
2555	inc = div64_ul(1000000000ULL << 20, rate);
 
 
 
2556
2557	if (inc < GTI_TIV_MIN || inc > GTI_TIV_MAX) {
2558		dev_err(dev, "gti.tiv increment 0x%llx is outside the range 0x%x - 0x%x\n",
2559			inc, GTI_TIV_MIN, GTI_TIV_MAX);
2560		return -EINVAL;
 
2561	}
 
 
2562
2563	ravb_write(ndev, inc, GTI);
 
2564
2565	return 0;
 
 
 
 
2566}
2567
2568static int ravb_set_config_mode(struct net_device *ndev)
2569{
2570	struct ravb_private *priv = netdev_priv(ndev);
2571	const struct ravb_hw_info *info = priv->info;
 
 
2572	int error;
2573
2574	if (info->gptp) {
2575		error = ravb_set_opmode(ndev, CCC_OPC_CONFIG);
2576		if (error)
2577			return error;
2578		/* Set CSEL value */
2579		ravb_modify(ndev, CCC, CCC_CSEL, CCC_CSEL_HPB);
2580	} else if (info->ccc_gac) {
2581		error = ravb_set_opmode(ndev, CCC_OPC_CONFIG | CCC_GAC | CCC_CSEL_HPB);
2582	} else {
2583		error = ravb_set_opmode(ndev, CCC_OPC_CONFIG);
 
2584	}
2585
2586	return error;
2587}
2588
2589/* Set tx and rx clock internal delay modes */
2590static void ravb_parse_delay_mode(struct device_node *np, struct net_device *ndev)
2591{
2592	struct ravb_private *priv = netdev_priv(ndev);
2593	bool explicit_delay = false;
2594	u32 delay;
2595
2596	if (!of_property_read_u32(np, "rx-internal-delay-ps", &delay)) {
2597		/* Valid values are 0 and 1800, according to DT bindings */
2598		priv->rxcidm = !!delay;
2599		explicit_delay = true;
2600	}
2601	if (!of_property_read_u32(np, "tx-internal-delay-ps", &delay)) {
2602		/* Valid values are 0 and 2000, according to DT bindings */
2603		priv->txcidm = !!delay;
2604		explicit_delay = true;
2605	}
2606
2607	if (explicit_delay)
2608		return;
 
 
2609
2610	/* Fall back to legacy rgmii-*id behavior */
2611	if (priv->phy_interface == PHY_INTERFACE_MODE_RGMII_ID ||
2612	    priv->phy_interface == PHY_INTERFACE_MODE_RGMII_RXID) {
2613		priv->rxcidm = 1;
2614		priv->rgmii_override = 1;
2615	}
2616
2617	if (priv->phy_interface == PHY_INTERFACE_MODE_RGMII_ID ||
2618	    priv->phy_interface == PHY_INTERFACE_MODE_RGMII_TXID) {
2619		priv->txcidm = 1;
2620		priv->rgmii_override = 1;
2621	}
2622}
2623
2624static void ravb_set_delay_mode(struct net_device *ndev)
2625{
2626	struct ravb_private *priv = netdev_priv(ndev);
2627	u32 set = 0;
2628
2629	if (priv->rxcidm)
2630		set |= APSR_RDM;
2631	if (priv->txcidm)
2632		set |= APSR_TDM;
2633	ravb_modify(ndev, APSR, APSR_RDM | APSR_TDM, set);
2634}
2635
2636static int ravb_probe(struct platform_device *pdev)
2637{
2638	struct device_node *np = pdev->dev.of_node;
2639	const struct ravb_hw_info *info;
2640	struct reset_control *rstc;
2641	struct ravb_private *priv;
2642	struct net_device *ndev;
2643	int error, irq, q;
2644	struct resource *res;
2645	int i;
2646
2647	if (!np) {
2648		dev_err(&pdev->dev,
2649			"this driver is required to be instantiated from device tree\n");
2650		return -EINVAL;
2651	}
2652
2653	rstc = devm_reset_control_get_optional_exclusive(&pdev->dev, NULL);
2654	if (IS_ERR(rstc))
2655		return dev_err_probe(&pdev->dev, PTR_ERR(rstc),
2656				     "failed to get cpg reset\n");
2657
2658	ndev = alloc_etherdev_mqs(sizeof(struct ravb_private),
2659				  NUM_TX_QUEUE, NUM_RX_QUEUE);
2660	if (!ndev)
2661		return -ENOMEM;
2662
2663	info = of_device_get_match_data(&pdev->dev);
2664
2665	ndev->features = info->net_features;
2666	ndev->hw_features = info->net_hw_features;
2667
2668	error = reset_control_deassert(rstc);
2669	if (error)
2670		goto out_free_netdev;
2671
2672	pm_runtime_enable(&pdev->dev);
2673	error = pm_runtime_resume_and_get(&pdev->dev);
2674	if (error < 0)
2675		goto out_rpm_disable;
2676
2677	if (info->multi_irqs) {
2678		if (info->err_mgmt_irqs)
2679			irq = platform_get_irq_byname(pdev, "dia");
2680		else
2681			irq = platform_get_irq_byname(pdev, "ch22");
2682	} else {
2683		irq = platform_get_irq(pdev, 0);
2684	}
2685	if (irq < 0) {
2686		error = irq;
2687		goto out_release;
2688	}
2689	ndev->irq = irq;
2690
2691	SET_NETDEV_DEV(ndev, &pdev->dev);
2692
2693	priv = netdev_priv(ndev);
2694	priv->info = info;
2695	priv->rstc = rstc;
2696	priv->ndev = ndev;
2697	priv->pdev = pdev;
2698	priv->num_tx_ring[RAVB_BE] = BE_TX_RING_SIZE;
2699	priv->num_rx_ring[RAVB_BE] = BE_RX_RING_SIZE;
2700	if (info->nc_queues) {
2701		priv->num_tx_ring[RAVB_NC] = NC_TX_RING_SIZE;
2702		priv->num_rx_ring[RAVB_NC] = NC_RX_RING_SIZE;
2703	}
2704
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2705	priv->addr = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
2706	if (IS_ERR(priv->addr)) {
2707		error = PTR_ERR(priv->addr);
2708		goto out_release;
2709	}
2710
2711	/* The Ether-specific entries in the device structure. */
2712	ndev->base_addr = res->start;
2713
2714	spin_lock_init(&priv->lock);
2715	INIT_WORK(&priv->work, ravb_tx_timeout_work);
2716
2717	error = of_get_phy_mode(np, &priv->phy_interface);
2718	if (error && error != -ENODEV)
2719		goto out_release;
2720
2721	priv->no_avb_link = of_property_read_bool(np, "renesas,no-ether-link");
2722	priv->avb_link_active_low =
2723		of_property_read_bool(np, "renesas,ether-link-active-low");
2724
2725	if (info->multi_irqs) {
2726		if (info->err_mgmt_irqs)
2727			irq = platform_get_irq_byname(pdev, "line3");
2728		else
2729			irq = platform_get_irq_byname(pdev, "ch24");
2730		if (irq < 0) {
2731			error = irq;
2732			goto out_release;
2733		}
2734		priv->emac_irq = irq;
2735		for (i = 0; i < NUM_RX_QUEUE; i++) {
2736			irq = platform_get_irq_byname(pdev, ravb_rx_irqs[i]);
2737			if (irq < 0) {
2738				error = irq;
2739				goto out_release;
2740			}
2741			priv->rx_irqs[i] = irq;
2742		}
2743		for (i = 0; i < NUM_TX_QUEUE; i++) {
2744			irq = platform_get_irq_byname(pdev, ravb_tx_irqs[i]);
2745			if (irq < 0) {
2746				error = irq;
2747				goto out_release;
2748			}
2749			priv->tx_irqs[i] = irq;
2750		}
2751
2752		if (info->err_mgmt_irqs) {
2753			irq = platform_get_irq_byname(pdev, "err_a");
2754			if (irq < 0) {
2755				error = irq;
2756				goto out_release;
2757			}
2758			priv->erra_irq = irq;
2759
2760			irq = platform_get_irq_byname(pdev, "mgmt_a");
2761			if (irq < 0) {
2762				error = irq;
2763				goto out_release;
2764			}
2765			priv->mgmta_irq = irq;
2766		}
2767	}
2768
2769	priv->clk = devm_clk_get(&pdev->dev, NULL);
2770	if (IS_ERR(priv->clk)) {
2771		error = PTR_ERR(priv->clk);
2772		goto out_release;
2773	}
2774
2775	priv->refclk = devm_clk_get_optional(&pdev->dev, "refclk");
2776	if (IS_ERR(priv->refclk)) {
2777		error = PTR_ERR(priv->refclk);
2778		goto out_release;
2779	}
2780	clk_prepare_enable(priv->refclk);
2781
2782	if (info->gptp_ref_clk) {
2783		priv->gptp_clk = devm_clk_get(&pdev->dev, "gptp");
2784		if (IS_ERR(priv->gptp_clk)) {
2785			error = PTR_ERR(priv->gptp_clk);
2786			goto out_disable_refclk;
2787		}
2788		clk_prepare_enable(priv->gptp_clk);
2789	}
2790
2791	ndev->max_mtu = info->rx_max_buf_size - (ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN);
2792	ndev->min_mtu = ETH_MIN_MTU;
2793
2794	/* FIXME: R-Car Gen2 has 4byte alignment restriction for tx buffer
2795	 * Use two descriptor to handle such situation. First descriptor to
2796	 * handle aligned data buffer and second descriptor to handle the
2797	 * overflow data because of alignment.
2798	 */
2799	priv->num_tx_desc = info->aligned_tx ? 2 : 1;
2800
2801	/* Set function */
2802	ndev->netdev_ops = &ravb_netdev_ops;
2803	ndev->ethtool_ops = &ravb_ethtool_ops;
2804
2805	/* Set AVB config mode */
2806	error = ravb_set_config_mode(ndev);
2807	if (error)
2808		goto out_disable_gptp_clk;
2809
2810	if (info->gptp || info->ccc_gac) {
2811		/* Set GTI value */
2812		error = ravb_set_gti(ndev);
2813		if (error)
2814			goto out_disable_gptp_clk;
2815
2816		/* Request GTI loading */
2817		ravb_modify(ndev, GCCR, GCCR_LTI, GCCR_LTI);
2818	}
2819
2820	if (info->internal_delay) {
2821		ravb_parse_delay_mode(np, ndev);
2822		ravb_set_delay_mode(ndev);
2823	}
2824
2825	/* Allocate descriptor base address table */
2826	priv->desc_bat_size = sizeof(struct ravb_desc) * DBAT_ENTRY_NUM;
2827	priv->desc_bat = dma_alloc_coherent(ndev->dev.parent, priv->desc_bat_size,
2828					    &priv->desc_bat_dma, GFP_KERNEL);
2829	if (!priv->desc_bat) {
2830		dev_err(&pdev->dev,
2831			"Cannot allocate desc base address table (size %d bytes)\n",
2832			priv->desc_bat_size);
2833		error = -ENOMEM;
2834		goto out_disable_gptp_clk;
2835	}
2836	for (q = RAVB_BE; q < DBAT_ENTRY_NUM; q++)
2837		priv->desc_bat[q].die_dt = DT_EOS;
2838	ravb_write(ndev, priv->desc_bat_dma, DBAT);
2839
2840	/* Initialise HW timestamp list */
2841	INIT_LIST_HEAD(&priv->ts_skb_list);
2842
2843	/* Initialise PTP Clock driver */
2844	if (info->ccc_gac)
2845		ravb_ptp_init(ndev, pdev);
2846
2847	/* Debug message level */
2848	priv->msg_enable = RAVB_DEF_MSG_ENABLE;
2849
 
 
 
 
 
2850	/* Read and set MAC address */
2851	ravb_read_mac_address(np, ndev);
2852	if (!is_valid_ether_addr(ndev->dev_addr)) {
2853		dev_warn(&pdev->dev,
2854			 "no valid MAC address supplied, using a random one\n");
2855		eth_hw_addr_random(ndev);
2856	}
2857
2858	/* MDIO bus init */
2859	error = ravb_mdio_init(priv);
2860	if (error) {
2861		dev_err(&pdev->dev, "failed to initialize MDIO\n");
2862		goto out_dma_free;
2863	}
2864
 
 
 
 
 
2865	netif_napi_add(ndev, &priv->napi[RAVB_BE], ravb_poll);
2866	if (info->nc_queues)
2867		netif_napi_add(ndev, &priv->napi[RAVB_NC], ravb_poll);
2868
2869	/* Network device register */
2870	error = register_netdev(ndev);
2871	if (error)
2872		goto out_napi_del;
2873
2874	device_set_wakeup_capable(&pdev->dev, 1);
2875
2876	/* Print device information */
2877	netdev_info(ndev, "Base address at %#x, %pM, IRQ %d.\n",
2878		    (u32)ndev->base_addr, ndev->dev_addr, ndev->irq);
2879
2880	platform_set_drvdata(pdev, ndev);
 
2881
2882	return 0;
2883
2884out_napi_del:
2885	if (info->nc_queues)
2886		netif_napi_del(&priv->napi[RAVB_NC]);
2887
2888	netif_napi_del(&priv->napi[RAVB_BE]);
 
2889	ravb_mdio_release(priv);
2890out_dma_free:
 
2891	dma_free_coherent(ndev->dev.parent, priv->desc_bat_size, priv->desc_bat,
2892			  priv->desc_bat_dma);
2893
2894	/* Stop PTP Clock driver */
2895	if (info->ccc_gac)
2896		ravb_ptp_stop(ndev);
2897out_disable_gptp_clk:
2898	clk_disable_unprepare(priv->gptp_clk);
2899out_disable_refclk:
2900	clk_disable_unprepare(priv->refclk);
2901out_release:
2902	pm_runtime_put(&pdev->dev);
2903out_rpm_disable:
2904	pm_runtime_disable(&pdev->dev);
 
 
 
2905	reset_control_assert(rstc);
2906out_free_netdev:
2907	free_netdev(ndev);
2908	return error;
2909}
2910
2911static void ravb_remove(struct platform_device *pdev)
2912{
2913	struct net_device *ndev = platform_get_drvdata(pdev);
2914	struct ravb_private *priv = netdev_priv(ndev);
2915	const struct ravb_hw_info *info = priv->info;
 
 
 
 
 
 
2916
2917	unregister_netdev(ndev);
2918	if (info->nc_queues)
2919		netif_napi_del(&priv->napi[RAVB_NC]);
2920	netif_napi_del(&priv->napi[RAVB_BE]);
2921
2922	ravb_mdio_release(priv);
2923
2924	/* Stop PTP Clock driver */
2925	if (info->ccc_gac)
2926		ravb_ptp_stop(ndev);
2927
2928	dma_free_coherent(ndev->dev.parent, priv->desc_bat_size, priv->desc_bat,
2929			  priv->desc_bat_dma);
2930
2931	ravb_set_opmode(ndev, CCC_OPC_RESET);
2932
2933	clk_disable_unprepare(priv->gptp_clk);
2934	clk_disable_unprepare(priv->refclk);
2935
2936	pm_runtime_put_sync(&pdev->dev);
2937	pm_runtime_disable(&pdev->dev);
 
 
2938	reset_control_assert(priv->rstc);
2939	free_netdev(ndev);
2940	platform_set_drvdata(pdev, NULL);
2941}
2942
2943static int ravb_wol_setup(struct net_device *ndev)
2944{
2945	struct ravb_private *priv = netdev_priv(ndev);
2946	const struct ravb_hw_info *info = priv->info;
2947
2948	/* Disable interrupts by clearing the interrupt masks. */
2949	ravb_write(ndev, 0, RIC0);
2950	ravb_write(ndev, 0, RIC2);
2951	ravb_write(ndev, 0, TIC);
2952
2953	/* Only allow ECI interrupts */
2954	synchronize_irq(priv->emac_irq);
2955	if (info->nc_queues)
2956		napi_disable(&priv->napi[RAVB_NC]);
2957	napi_disable(&priv->napi[RAVB_BE]);
2958	ravb_write(ndev, ECSIPR_MPDIP, ECSIPR);
2959
2960	/* Enable MagicPacket */
2961	ravb_modify(ndev, ECMR, ECMR_MPDE, ECMR_MPDE);
2962
 
 
 
2963	return enable_irq_wake(priv->emac_irq);
2964}
2965
2966static int ravb_wol_restore(struct net_device *ndev)
2967{
2968	struct ravb_private *priv = netdev_priv(ndev);
2969	const struct ravb_hw_info *info = priv->info;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2970
2971	if (info->nc_queues)
2972		napi_enable(&priv->napi[RAVB_NC]);
2973	napi_enable(&priv->napi[RAVB_BE]);
2974
2975	/* Disable MagicPacket */
2976	ravb_modify(ndev, ECMR, ECMR_MPDE, 0);
2977
2978	ravb_close(ndev);
2979
2980	return disable_irq_wake(priv->emac_irq);
2981}
2982
2983static int __maybe_unused ravb_suspend(struct device *dev)
2984{
2985	struct net_device *ndev = dev_get_drvdata(dev);
2986	struct ravb_private *priv = netdev_priv(ndev);
2987	int ret;
2988
2989	if (!netif_running(ndev))
2990		return 0;
2991
2992	netif_device_detach(ndev);
2993
2994	if (priv->wol_enabled)
2995		ret = ravb_wol_setup(ndev);
2996	else
2997		ret = ravb_close(ndev);
2998
2999	if (priv->info->ccc_gac)
3000		ravb_ptp_stop(ndev);
 
3001
3002	return ret;
 
 
 
 
 
3003}
3004
3005static int __maybe_unused ravb_resume(struct device *dev)
3006{
3007	struct net_device *ndev = dev_get_drvdata(dev);
3008	struct ravb_private *priv = netdev_priv(ndev);
3009	const struct ravb_hw_info *info = priv->info;
3010	int ret = 0;
3011
3012	/* If WoL is enabled set reset mode to rearm the WoL logic */
3013	if (priv->wol_enabled) {
3014		ret = ravb_set_opmode(ndev, CCC_OPC_RESET);
3015		if (ret)
3016			return ret;
3017	}
3018
3019	/* All register have been reset to default values.
3020	 * Restore all registers which where setup at probe time and
3021	 * reopen device if it was running before system suspended.
3022	 */
3023
3024	/* Set AVB config mode */
3025	ret = ravb_set_config_mode(ndev);
3026	if (ret)
3027		return ret;
3028
3029	if (info->gptp || info->ccc_gac) {
3030		/* Set GTI value */
3031		ret = ravb_set_gti(ndev);
 
 
 
 
 
 
 
3032		if (ret)
3033			return ret;
3034
3035		/* Request GTI loading */
3036		ravb_modify(ndev, GCCR, GCCR_LTI, GCCR_LTI);
3037	}
3038
3039	if (info->internal_delay)
3040		ravb_set_delay_mode(ndev);
 
 
3041
3042	/* Restore descriptor base address table */
3043	ravb_write(ndev, priv->desc_bat_dma, DBAT);
3044
3045	if (priv->info->ccc_gac)
3046		ravb_ptp_init(ndev, priv->pdev);
3047
3048	if (netif_running(ndev)) {
3049		if (priv->wol_enabled) {
3050			ret = ravb_wol_restore(ndev);
3051			if (ret)
3052				return ret;
3053		}
3054		ret = ravb_open(ndev);
3055		if (ret < 0)
3056			return ret;
3057		ravb_set_rx_mode(ndev);
3058		netif_device_attach(ndev);
3059	}
3060
3061	return ret;
3062}
3063
3064static int __maybe_unused ravb_runtime_nop(struct device *dev)
3065{
3066	/* Runtime PM callback shared between ->runtime_suspend()
3067	 * and ->runtime_resume(). Simply returns success.
3068	 *
3069	 * This driver re-initializes all registers after
3070	 * pm_runtime_get_sync() anyway so there is no need
3071	 * to save and restore registers here.
3072	 */
3073	return 0;
3074}
3075
 
 
 
 
 
 
 
 
3076static const struct dev_pm_ops ravb_dev_pm_ops = {
3077	SET_SYSTEM_SLEEP_PM_OPS(ravb_suspend, ravb_resume)
3078	SET_RUNTIME_PM_OPS(ravb_runtime_nop, ravb_runtime_nop, NULL)
3079};
3080
3081static struct platform_driver ravb_driver = {
3082	.probe		= ravb_probe,
3083	.remove_new	= ravb_remove,
3084	.driver = {
3085		.name	= "ravb",
3086		.pm	= &ravb_dev_pm_ops,
3087		.of_match_table = ravb_match_table,
3088	},
3089};
3090
3091module_platform_driver(ravb_driver);
3092
3093MODULE_AUTHOR("Mitsuhiro Kimura, Masaru Nagai");
3094MODULE_DESCRIPTION("Renesas Ethernet AVB driver");
3095MODULE_LICENSE("GPL v2");
v6.9.4
   1// SPDX-License-Identifier: GPL-2.0
   2/* Renesas Ethernet AVB device driver
   3 *
   4 * Copyright (C) 2014-2019 Renesas Electronics Corporation
   5 * Copyright (C) 2015 Renesas Solutions Corp.
   6 * Copyright (C) 2015-2016 Cogent Embedded, Inc. <source@cogentembedded.com>
   7 *
   8 * Based on the SuperH Ethernet driver
   9 */
  10
  11#include <linux/cache.h>
  12#include <linux/clk.h>
  13#include <linux/delay.h>
  14#include <linux/dma-mapping.h>
  15#include <linux/err.h>
  16#include <linux/etherdevice.h>
  17#include <linux/ethtool.h>
  18#include <linux/if_vlan.h>
  19#include <linux/kernel.h>
  20#include <linux/list.h>
  21#include <linux/module.h>
  22#include <linux/net_tstamp.h>
  23#include <linux/of.h>
  24#include <linux/of_mdio.h>
  25#include <linux/of_net.h>
  26#include <linux/platform_device.h>
  27#include <linux/pm_runtime.h>
  28#include <linux/slab.h>
  29#include <linux/spinlock.h>
  30#include <linux/reset.h>
  31#include <linux/math64.h>
  32#include <net/ip.h>
  33
  34#include "ravb.h"
  35
  36#define RAVB_DEF_MSG_ENABLE \
  37		(NETIF_MSG_LINK	  | \
  38		 NETIF_MSG_TIMER  | \
  39		 NETIF_MSG_RX_ERR | \
  40		 NETIF_MSG_TX_ERR)
  41
 
 
 
 
 
 
 
 
 
 
  42void ravb_modify(struct net_device *ndev, enum ravb_reg reg, u32 clear,
  43		 u32 set)
  44{
  45	ravb_write(ndev, (ravb_read(ndev, reg) & ~clear) | set, reg);
  46}
  47
  48int ravb_wait(struct net_device *ndev, enum ravb_reg reg, u32 mask, u32 value)
  49{
  50	int i;
  51
  52	for (i = 0; i < 10000; i++) {
  53		if ((ravb_read(ndev, reg) & mask) == value)
  54			return 0;
  55		udelay(10);
  56	}
  57	return -ETIMEDOUT;
  58}
  59
  60static int ravb_set_opmode(struct net_device *ndev, u32 opmode)
  61{
  62	u32 csr_ops = 1U << (opmode & CCC_OPC);
  63	u32 ccc_mask = CCC_OPC;
  64	int error;
  65
  66	/* If gPTP active in config mode is supported it needs to be configured
  67	 * along with CSEL and operating mode in the same access. This is a
  68	 * hardware limitation.
  69	 */
  70	if (opmode & CCC_GAC)
  71		ccc_mask |= CCC_GAC | CCC_CSEL;
  72
  73	/* Set operating mode */
  74	ravb_modify(ndev, CCC, ccc_mask, opmode);
  75	/* Check if the operating mode is changed to the requested one */
  76	error = ravb_wait(ndev, CSR, CSR_OPS, csr_ops);
  77	if (error) {
  78		netdev_err(ndev, "failed to switch device to requested mode (%u)\n",
  79			   opmode & CCC_OPC);
  80	}
  81
  82	return error;
  83}
  84
  85static void ravb_set_rate_gbeth(struct net_device *ndev)
  86{
  87	struct ravb_private *priv = netdev_priv(ndev);
  88
  89	switch (priv->speed) {
  90	case 10:		/* 10BASE */
  91		ravb_write(ndev, GBETH_GECMR_SPEED_10, GECMR);
  92		break;
  93	case 100:		/* 100BASE */
  94		ravb_write(ndev, GBETH_GECMR_SPEED_100, GECMR);
  95		break;
  96	case 1000:		/* 1000BASE */
  97		ravb_write(ndev, GBETH_GECMR_SPEED_1000, GECMR);
  98		break;
  99	}
 100}
 101
 102static void ravb_set_rate_rcar(struct net_device *ndev)
 103{
 104	struct ravb_private *priv = netdev_priv(ndev);
 105
 106	switch (priv->speed) {
 107	case 100:		/* 100BASE */
 108		ravb_write(ndev, GECMR_SPEED_100, GECMR);
 109		break;
 110	case 1000:		/* 1000BASE */
 111		ravb_write(ndev, GECMR_SPEED_1000, GECMR);
 112		break;
 113	}
 114}
 115
 116static struct sk_buff *
 117ravb_alloc_skb(struct net_device *ndev, const struct ravb_hw_info *info,
 118	       gfp_t gfp_mask)
 119{
 120	struct sk_buff *skb;
 121	u32 reserve;
 122
 123	skb = __netdev_alloc_skb(ndev, info->rx_max_frame_size + RAVB_ALIGN - 1,
 124				 gfp_mask);
 125	if (!skb)
 126		return NULL;
 127
 128	reserve = (unsigned long)skb->data & (RAVB_ALIGN - 1);
 129	if (reserve)
 130		skb_reserve(skb, RAVB_ALIGN - reserve);
 131
 132	return skb;
 133}
 134
 135/* Get MAC address from the MAC address registers
 136 *
 137 * Ethernet AVB device doesn't have ROM for MAC address.
 138 * This function gets the MAC address that was used by a bootloader.
 139 */
 140static void ravb_read_mac_address(struct device_node *np,
 141				  struct net_device *ndev)
 142{
 143	int ret;
 144
 145	ret = of_get_ethdev_address(np, ndev);
 146	if (ret) {
 147		u32 mahr = ravb_read(ndev, MAHR);
 148		u32 malr = ravb_read(ndev, MALR);
 149		u8 addr[ETH_ALEN];
 150
 151		addr[0] = (mahr >> 24) & 0xFF;
 152		addr[1] = (mahr >> 16) & 0xFF;
 153		addr[2] = (mahr >>  8) & 0xFF;
 154		addr[3] = (mahr >>  0) & 0xFF;
 155		addr[4] = (malr >>  8) & 0xFF;
 156		addr[5] = (malr >>  0) & 0xFF;
 157		eth_hw_addr_set(ndev, addr);
 158	}
 159}
 160
 161static void ravb_mdio_ctrl(struct mdiobb_ctrl *ctrl, u32 mask, int set)
 162{
 163	struct ravb_private *priv = container_of(ctrl, struct ravb_private,
 164						 mdiobb);
 165
 166	ravb_modify(priv->ndev, PIR, mask, set ? mask : 0);
 167}
 168
 169/* MDC pin control */
 170static void ravb_set_mdc(struct mdiobb_ctrl *ctrl, int level)
 171{
 172	ravb_mdio_ctrl(ctrl, PIR_MDC, level);
 173}
 174
 175/* Data I/O pin control */
 176static void ravb_set_mdio_dir(struct mdiobb_ctrl *ctrl, int output)
 177{
 178	ravb_mdio_ctrl(ctrl, PIR_MMD, output);
 179}
 180
 181/* Set data bit */
 182static void ravb_set_mdio_data(struct mdiobb_ctrl *ctrl, int value)
 183{
 184	ravb_mdio_ctrl(ctrl, PIR_MDO, value);
 185}
 186
 187/* Get data bit */
 188static int ravb_get_mdio_data(struct mdiobb_ctrl *ctrl)
 189{
 190	struct ravb_private *priv = container_of(ctrl, struct ravb_private,
 191						 mdiobb);
 192
 193	return (ravb_read(priv->ndev, PIR) & PIR_MDI) != 0;
 194}
 195
 196/* MDIO bus control struct */
 197static const struct mdiobb_ops bb_ops = {
 198	.owner = THIS_MODULE,
 199	.set_mdc = ravb_set_mdc,
 200	.set_mdio_dir = ravb_set_mdio_dir,
 201	.set_mdio_data = ravb_set_mdio_data,
 202	.get_mdio_data = ravb_get_mdio_data,
 203};
 204
 205static struct ravb_rx_desc *
 206ravb_rx_get_desc(struct ravb_private *priv, unsigned int q,
 207		 unsigned int i)
 208{
 209	return priv->rx_ring[q].raw + priv->info->rx_desc_size * i;
 210}
 211
 212/* Free TX skb function for AVB-IP */
 213static int ravb_tx_free(struct net_device *ndev, int q, bool free_txed_only)
 214{
 215	struct ravb_private *priv = netdev_priv(ndev);
 216	struct net_device_stats *stats = &priv->stats[q];
 217	unsigned int num_tx_desc = priv->num_tx_desc;
 218	struct ravb_tx_desc *desc;
 219	unsigned int entry;
 220	int free_num = 0;
 221	u32 size;
 222
 223	for (; priv->cur_tx[q] - priv->dirty_tx[q] > 0; priv->dirty_tx[q]++) {
 224		bool txed;
 225
 226		entry = priv->dirty_tx[q] % (priv->num_tx_ring[q] *
 227					     num_tx_desc);
 228		desc = &priv->tx_ring[q][entry];
 229		txed = desc->die_dt == DT_FEMPTY;
 230		if (free_txed_only && !txed)
 231			break;
 232		/* Descriptor type must be checked before all other reads */
 233		dma_rmb();
 234		size = le16_to_cpu(desc->ds_tagl) & TX_DS;
 235		/* Free the original skb. */
 236		if (priv->tx_skb[q][entry / num_tx_desc]) {
 237			dma_unmap_single(ndev->dev.parent, le32_to_cpu(desc->dptr),
 238					 size, DMA_TO_DEVICE);
 239			/* Last packet descriptor? */
 240			if (entry % num_tx_desc == num_tx_desc - 1) {
 241				entry /= num_tx_desc;
 242				dev_kfree_skb_any(priv->tx_skb[q][entry]);
 243				priv->tx_skb[q][entry] = NULL;
 244				if (txed)
 245					stats->tx_packets++;
 246			}
 247			free_num++;
 248		}
 249		if (txed)
 250			stats->tx_bytes += size;
 251		desc->die_dt = DT_EEMPTY;
 252	}
 253	return free_num;
 254}
 255
 256static void ravb_rx_ring_free(struct net_device *ndev, int q)
 257{
 258	struct ravb_private *priv = netdev_priv(ndev);
 259	unsigned int ring_size;
 260	unsigned int i;
 261
 262	if (!priv->rx_ring[q].raw)
 263		return;
 264
 265	for (i = 0; i < priv->num_rx_ring[q]; i++) {
 266		struct ravb_rx_desc *desc = ravb_rx_get_desc(priv, q, i);
 267
 268		if (!dma_mapping_error(ndev->dev.parent,
 269				       le32_to_cpu(desc->dptr)))
 270			dma_unmap_single(ndev->dev.parent,
 271					 le32_to_cpu(desc->dptr),
 272					 priv->info->rx_max_frame_size,
 273					 DMA_FROM_DEVICE);
 274	}
 275	ring_size = priv->info->rx_desc_size * (priv->num_rx_ring[q] + 1);
 276	dma_free_coherent(ndev->dev.parent, ring_size, priv->rx_ring[q].raw,
 277			  priv->rx_desc_dma[q]);
 278	priv->rx_ring[q].raw = NULL;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 279}
 280
 281/* Free skb's and DMA buffers for Ethernet AVB */
 282static void ravb_ring_free(struct net_device *ndev, int q)
 283{
 284	struct ravb_private *priv = netdev_priv(ndev);
 
 285	unsigned int num_tx_desc = priv->num_tx_desc;
 286	unsigned int ring_size;
 287	unsigned int i;
 288
 289	ravb_rx_ring_free(ndev, q);
 290
 291	if (priv->tx_ring[q]) {
 292		ravb_tx_free(ndev, q, false);
 293
 294		ring_size = sizeof(struct ravb_tx_desc) *
 295			    (priv->num_tx_ring[q] * num_tx_desc + 1);
 296		dma_free_coherent(ndev->dev.parent, ring_size, priv->tx_ring[q],
 297				  priv->tx_desc_dma[q]);
 298		priv->tx_ring[q] = NULL;
 299	}
 300
 301	/* Free RX skb ringbuffer */
 302	if (priv->rx_skb[q]) {
 303		for (i = 0; i < priv->num_rx_ring[q]; i++)
 304			dev_kfree_skb(priv->rx_skb[q][i]);
 305	}
 306	kfree(priv->rx_skb[q]);
 307	priv->rx_skb[q] = NULL;
 308
 309	/* Free aligned TX buffers */
 310	kfree(priv->tx_align[q]);
 311	priv->tx_align[q] = NULL;
 312
 313	/* Free TX skb ringbuffer.
 314	 * SKBs are freed by ravb_tx_free() call above.
 315	 */
 316	kfree(priv->tx_skb[q]);
 317	priv->tx_skb[q] = NULL;
 318}
 319
 320static void ravb_rx_ring_format(struct net_device *ndev, int q)
 321{
 322	struct ravb_private *priv = netdev_priv(ndev);
 323	struct ravb_rx_desc *rx_desc;
 324	unsigned int rx_ring_size;
 325	dma_addr_t dma_addr;
 326	unsigned int i;
 327
 328	rx_ring_size = priv->info->rx_desc_size * priv->num_rx_ring[q];
 329	memset(priv->rx_ring[q].raw, 0, rx_ring_size);
 330	/* Build RX ring buffer */
 331	for (i = 0; i < priv->num_rx_ring[q]; i++) {
 332		/* RX descriptor */
 333		rx_desc = ravb_rx_get_desc(priv, q, i);
 334		rx_desc->ds_cc = cpu_to_le16(priv->info->rx_max_desc_use);
 335		dma_addr = dma_map_single(ndev->dev.parent, priv->rx_skb[q][i]->data,
 336					  priv->info->rx_max_frame_size,
 337					  DMA_FROM_DEVICE);
 338		/* We just set the data size to 0 for a failed mapping which
 339		 * should prevent DMA from happening...
 340		 */
 341		if (dma_mapping_error(ndev->dev.parent, dma_addr))
 342			rx_desc->ds_cc = cpu_to_le16(0);
 343		rx_desc->dptr = cpu_to_le32(dma_addr);
 344		rx_desc->die_dt = DT_FEMPTY;
 345	}
 346	rx_desc = ravb_rx_get_desc(priv, q, i);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 347	rx_desc->dptr = cpu_to_le32((u32)priv->rx_desc_dma[q]);
 348	rx_desc->die_dt = DT_LINKFIX; /* type */
 349}
 350
 351/* Format skb and descriptor buffer for Ethernet AVB */
 352static void ravb_ring_format(struct net_device *ndev, int q)
 353{
 354	struct ravb_private *priv = netdev_priv(ndev);
 
 355	unsigned int num_tx_desc = priv->num_tx_desc;
 356	struct ravb_tx_desc *tx_desc;
 357	struct ravb_desc *desc;
 358	unsigned int tx_ring_size = sizeof(*tx_desc) * priv->num_tx_ring[q] *
 359				    num_tx_desc;
 360	unsigned int i;
 361
 362	priv->cur_rx[q] = 0;
 363	priv->cur_tx[q] = 0;
 364	priv->dirty_rx[q] = 0;
 365	priv->dirty_tx[q] = 0;
 366
 367	ravb_rx_ring_format(ndev, q);
 368
 369	memset(priv->tx_ring[q], 0, tx_ring_size);
 370	/* Build TX ring buffer */
 371	for (i = 0, tx_desc = priv->tx_ring[q]; i < priv->num_tx_ring[q];
 372	     i++, tx_desc++) {
 373		tx_desc->die_dt = DT_EEMPTY;
 374		if (num_tx_desc > 1) {
 375			tx_desc++;
 376			tx_desc->die_dt = DT_EEMPTY;
 377		}
 378	}
 379	tx_desc->dptr = cpu_to_le32((u32)priv->tx_desc_dma[q]);
 380	tx_desc->die_dt = DT_LINKFIX; /* type */
 381
 382	/* RX descriptor base address for best effort */
 383	desc = &priv->desc_bat[RX_QUEUE_OFFSET + q];
 384	desc->die_dt = DT_LINKFIX; /* type */
 385	desc->dptr = cpu_to_le32((u32)priv->rx_desc_dma[q]);
 386
 387	/* TX descriptor base address for best effort */
 388	desc = &priv->desc_bat[q];
 389	desc->die_dt = DT_LINKFIX; /* type */
 390	desc->dptr = cpu_to_le32((u32)priv->tx_desc_dma[q]);
 391}
 392
 393static void *ravb_alloc_rx_desc(struct net_device *ndev, int q)
 394{
 395	struct ravb_private *priv = netdev_priv(ndev);
 396	unsigned int ring_size;
 397
 398	ring_size = priv->info->rx_desc_size * (priv->num_rx_ring[q] + 1);
 
 
 
 
 
 
 
 
 
 
 
 399
 400	priv->rx_ring[q].raw = dma_alloc_coherent(ndev->dev.parent, ring_size,
 401						  &priv->rx_desc_dma[q],
 402						  GFP_KERNEL);
 403
 404	return priv->rx_ring[q].raw;
 
 
 
 405}
 406
 407/* Init skb and descriptor buffer for Ethernet AVB */
 408static int ravb_ring_init(struct net_device *ndev, int q)
 409{
 410	struct ravb_private *priv = netdev_priv(ndev);
 411	const struct ravb_hw_info *info = priv->info;
 412	unsigned int num_tx_desc = priv->num_tx_desc;
 413	unsigned int ring_size;
 414	struct sk_buff *skb;
 415	unsigned int i;
 416
 417	/* Allocate RX and TX skb rings */
 418	priv->rx_skb[q] = kcalloc(priv->num_rx_ring[q],
 419				  sizeof(*priv->rx_skb[q]), GFP_KERNEL);
 420	priv->tx_skb[q] = kcalloc(priv->num_tx_ring[q],
 421				  sizeof(*priv->tx_skb[q]), GFP_KERNEL);
 422	if (!priv->rx_skb[q] || !priv->tx_skb[q])
 423		goto error;
 424
 425	for (i = 0; i < priv->num_rx_ring[q]; i++) {
 426		skb = ravb_alloc_skb(ndev, info, GFP_KERNEL);
 427		if (!skb)
 428			goto error;
 
 429		priv->rx_skb[q][i] = skb;
 430	}
 431
 432	if (num_tx_desc > 1) {
 433		/* Allocate rings for the aligned buffers */
 434		priv->tx_align[q] = kmalloc(DPTR_ALIGN * priv->num_tx_ring[q] +
 435					    DPTR_ALIGN - 1, GFP_KERNEL);
 436		if (!priv->tx_align[q])
 437			goto error;
 438	}
 439
 440	/* Allocate all RX descriptors. */
 441	if (!ravb_alloc_rx_desc(ndev, q))
 442		goto error;
 443
 444	priv->dirty_rx[q] = 0;
 445
 446	/* Allocate all TX descriptors. */
 447	ring_size = sizeof(struct ravb_tx_desc) *
 448		    (priv->num_tx_ring[q] * num_tx_desc + 1);
 449	priv->tx_ring[q] = dma_alloc_coherent(ndev->dev.parent, ring_size,
 450					      &priv->tx_desc_dma[q],
 451					      GFP_KERNEL);
 452	if (!priv->tx_ring[q])
 453		goto error;
 454
 455	return 0;
 456
 457error:
 458	ravb_ring_free(ndev, q);
 459
 460	return -ENOMEM;
 461}
 462
 463static void ravb_csum_init_gbeth(struct net_device *ndev)
 464{
 465	bool tx_enable = ndev->features & NETIF_F_HW_CSUM;
 466	bool rx_enable = ndev->features & NETIF_F_RXCSUM;
 467
 468	if (!(tx_enable || rx_enable))
 469		goto done;
 470
 471	ravb_write(ndev, 0, CSR0);
 472	if (ravb_wait(ndev, CSR0, CSR0_TPE | CSR0_RPE, 0)) {
 473		netdev_err(ndev, "Timeout enabling hardware checksum\n");
 474
 475		if (tx_enable)
 476			ndev->features &= ~NETIF_F_HW_CSUM;
 477
 478		if (rx_enable)
 479			ndev->features &= ~NETIF_F_RXCSUM;
 480	} else {
 481		if (tx_enable)
 482			ravb_write(ndev, CSR1_TIP4 | CSR1_TTCP4 | CSR1_TUDP4, CSR1);
 483
 484		if (rx_enable)
 485			ravb_write(ndev, CSR2_RIP4 | CSR2_RTCP4 | CSR2_RUDP4 | CSR2_RICMP4,
 486				   CSR2);
 487	}
 488
 489done:
 490	ravb_write(ndev, CSR0_TPE | CSR0_RPE, CSR0);
 491}
 492
 493static void ravb_emac_init_gbeth(struct net_device *ndev)
 494{
 495	struct ravb_private *priv = netdev_priv(ndev);
 496
 497	if (priv->phy_interface == PHY_INTERFACE_MODE_MII) {
 498		ravb_write(ndev, (1000 << 16) | CXR35_SEL_XMII_MII, CXR35);
 499		ravb_modify(ndev, CXR31, CXR31_SEL_LINK0 | CXR31_SEL_LINK1, 0);
 500	} else {
 501		ravb_write(ndev, (1000 << 16) | CXR35_SEL_XMII_RGMII, CXR35);
 502		ravb_modify(ndev, CXR31, CXR31_SEL_LINK0 | CXR31_SEL_LINK1,
 503			    CXR31_SEL_LINK0);
 504	}
 505
 506	/* Receive frame limit set register */
 507	ravb_write(ndev, priv->info->rx_max_frame_size + ETH_FCS_LEN, RFLR);
 508
 509	/* EMAC Mode: PAUSE prohibition; Duplex; TX; RX; CRC Pass Through */
 510	ravb_write(ndev, ECMR_ZPF | ((priv->duplex > 0) ? ECMR_DM : 0) |
 511			 ECMR_TE | ECMR_RE | ECMR_RCPT |
 512			 ECMR_TXF | ECMR_RXF, ECMR);
 513
 514	ravb_set_rate_gbeth(ndev);
 515
 516	/* Set MAC address */
 517	ravb_write(ndev,
 518		   (ndev->dev_addr[0] << 24) | (ndev->dev_addr[1] << 16) |
 519		   (ndev->dev_addr[2] << 8)  | (ndev->dev_addr[3]), MAHR);
 520	ravb_write(ndev, (ndev->dev_addr[4] << 8)  | (ndev->dev_addr[5]), MALR);
 521
 522	/* E-MAC status register clear */
 523	ravb_write(ndev, ECSR_ICD | ECSR_LCHNG | ECSR_PFRI, ECSR);
 524
 525	ravb_csum_init_gbeth(ndev);
 526
 527	/* E-MAC interrupt enable register */
 528	ravb_write(ndev, ECSIPR_ICDIP, ECSIPR);
 529}
 530
 531static void ravb_emac_init_rcar(struct net_device *ndev)
 532{
 533	/* Receive frame limit set register */
 534	ravb_write(ndev, ndev->mtu + ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN, RFLR);
 535
 536	/* EMAC Mode: PAUSE prohibition; Duplex; RX Checksum; TX; RX */
 537	ravb_write(ndev, ECMR_ZPF | ECMR_DM |
 538		   (ndev->features & NETIF_F_RXCSUM ? ECMR_RCSC : 0) |
 539		   ECMR_TE | ECMR_RE, ECMR);
 540
 541	ravb_set_rate_rcar(ndev);
 542
 543	/* Set MAC address */
 544	ravb_write(ndev,
 545		   (ndev->dev_addr[0] << 24) | (ndev->dev_addr[1] << 16) |
 546		   (ndev->dev_addr[2] << 8)  | (ndev->dev_addr[3]), MAHR);
 547	ravb_write(ndev,
 548		   (ndev->dev_addr[4] << 8)  | (ndev->dev_addr[5]), MALR);
 549
 550	/* E-MAC status register clear */
 551	ravb_write(ndev, ECSR_ICD | ECSR_MPD, ECSR);
 552
 553	/* E-MAC interrupt enable register */
 554	ravb_write(ndev, ECSIPR_ICDIP | ECSIPR_MPDIP | ECSIPR_LCHNGIP, ECSIPR);
 555}
 556
 557/* E-MAC init function */
 558static void ravb_emac_init(struct net_device *ndev)
 559{
 560	struct ravb_private *priv = netdev_priv(ndev);
 561	const struct ravb_hw_info *info = priv->info;
 562
 563	info->emac_init(ndev);
 564}
 565
 566static int ravb_dmac_init_gbeth(struct net_device *ndev)
 567{
 568	struct ravb_private *priv = netdev_priv(ndev);
 569	int error;
 570
 571	error = ravb_ring_init(ndev, RAVB_BE);
 572	if (error)
 573		return error;
 574
 575	/* Descriptor format */
 576	ravb_ring_format(ndev, RAVB_BE);
 577
 578	/* Set DMAC RX */
 579	ravb_write(ndev, 0x60000000, RCR);
 580
 581	/* Set Max Frame Length (RTC) */
 582	ravb_write(ndev, 0x7ffc0000 | priv->info->rx_max_frame_size, RTC);
 583
 584	/* Set FIFO size */
 585	ravb_write(ndev, 0x00222200, TGC);
 586
 587	ravb_write(ndev, 0, TCCR);
 588
 589	/* Frame receive */
 590	ravb_write(ndev, RIC0_FRE0, RIC0);
 591	/* Disable FIFO full warning */
 592	ravb_write(ndev, 0x0, RIC1);
 593	/* Receive FIFO full error, descriptor empty */
 594	ravb_write(ndev, RIC2_QFE0 | RIC2_RFFE, RIC2);
 595
 596	ravb_write(ndev, TIC_FTE0, TIC);
 597
 598	return 0;
 599}
 600
 601static int ravb_dmac_init_rcar(struct net_device *ndev)
 602{
 603	struct ravb_private *priv = netdev_priv(ndev);
 604	const struct ravb_hw_info *info = priv->info;
 605	int error;
 606
 607	error = ravb_ring_init(ndev, RAVB_BE);
 608	if (error)
 609		return error;
 610	error = ravb_ring_init(ndev, RAVB_NC);
 611	if (error) {
 612		ravb_ring_free(ndev, RAVB_BE);
 613		return error;
 614	}
 615
 616	/* Descriptor format */
 617	ravb_ring_format(ndev, RAVB_BE);
 618	ravb_ring_format(ndev, RAVB_NC);
 619
 620	/* Set AVB RX */
 621	ravb_write(ndev,
 622		   RCR_EFFS | RCR_ENCF | RCR_ETS0 | RCR_ESF | 0x18000000, RCR);
 623
 624	/* Set FIFO size */
 625	ravb_write(ndev, TGC_TQP_AVBMODE1 | 0x00112200, TGC);
 626
 627	/* Timestamp enable */
 628	ravb_write(ndev, TCCR_TFEN, TCCR);
 629
 630	/* Interrupt init: */
 631	if (info->multi_irqs) {
 632		/* Clear DIL.DPLx */
 633		ravb_write(ndev, 0, DIL);
 634		/* Set queue specific interrupt */
 635		ravb_write(ndev, CIE_CRIE | CIE_CTIE | CIE_CL0M, CIE);
 636	}
 637	/* Frame receive */
 638	ravb_write(ndev, RIC0_FRE0 | RIC0_FRE1, RIC0);
 639	/* Disable FIFO full warning */
 640	ravb_write(ndev, 0, RIC1);
 641	/* Receive FIFO full error, descriptor empty */
 642	ravb_write(ndev, RIC2_QFE0 | RIC2_QFE1 | RIC2_RFFE, RIC2);
 643	/* Frame transmitted, timestamp FIFO updated */
 644	ravb_write(ndev, TIC_FTE0 | TIC_FTE1 | TIC_TFUE, TIC);
 645
 646	return 0;
 647}
 648
 649/* Device init function for Ethernet AVB */
 650static int ravb_dmac_init(struct net_device *ndev)
 651{
 652	struct ravb_private *priv = netdev_priv(ndev);
 653	const struct ravb_hw_info *info = priv->info;
 654	int error;
 655
 656	/* Set CONFIG mode */
 657	error = ravb_set_opmode(ndev, CCC_OPC_CONFIG);
 658	if (error)
 659		return error;
 660
 661	error = info->dmac_init(ndev);
 662	if (error)
 663		return error;
 664
 665	/* Setting the control will start the AVB-DMAC process. */
 666	return ravb_set_opmode(ndev, CCC_OPC_OPERATION);
 667}
 668
 669static void ravb_get_tx_tstamp(struct net_device *ndev)
 670{
 671	struct ravb_private *priv = netdev_priv(ndev);
 672	struct ravb_tstamp_skb *ts_skb, *ts_skb2;
 673	struct skb_shared_hwtstamps shhwtstamps;
 674	struct sk_buff *skb;
 675	struct timespec64 ts;
 676	u16 tag, tfa_tag;
 677	int count;
 678	u32 tfa2;
 679
 680	count = (ravb_read(ndev, TSR) & TSR_TFFL) >> 8;
 681	while (count--) {
 682		tfa2 = ravb_read(ndev, TFA2);
 683		tfa_tag = (tfa2 & TFA2_TST) >> 16;
 684		ts.tv_nsec = (u64)ravb_read(ndev, TFA0);
 685		ts.tv_sec = ((u64)(tfa2 & TFA2_TSV) << 32) |
 686			    ravb_read(ndev, TFA1);
 687		memset(&shhwtstamps, 0, sizeof(shhwtstamps));
 688		shhwtstamps.hwtstamp = timespec64_to_ktime(ts);
 689		list_for_each_entry_safe(ts_skb, ts_skb2, &priv->ts_skb_list,
 690					 list) {
 691			skb = ts_skb->skb;
 692			tag = ts_skb->tag;
 693			list_del(&ts_skb->list);
 694			kfree(ts_skb);
 695			if (tag == tfa_tag) {
 696				skb_tstamp_tx(skb, &shhwtstamps);
 697				dev_consume_skb_any(skb);
 698				break;
 699			} else {
 700				dev_kfree_skb_any(skb);
 701			}
 702		}
 703		ravb_modify(ndev, TCCR, TCCR_TFR, TCCR_TFR);
 704	}
 705}
 706
 707static void ravb_rx_csum_gbeth(struct sk_buff *skb)
 708{
 709	__wsum csum_ip_hdr, csum_proto;
 710	u8 *hw_csum;
 711
 712	/* The hardware checksum status is contained in sizeof(__sum16) * 2 = 4
 713	 * bytes appended to packet data. First 2 bytes is ip header checksum
 714	 * and last 2 bytes is protocol checksum.
 715	 */
 716	if (unlikely(skb->len < sizeof(__sum16) * 2))
 717		return;
 718
 719	hw_csum = skb_tail_pointer(skb) - sizeof(__sum16);
 720	csum_proto = csum_unfold((__force __sum16)get_unaligned_le16(hw_csum));
 721
 722	hw_csum -= sizeof(__sum16);
 723	csum_ip_hdr = csum_unfold((__force __sum16)get_unaligned_le16(hw_csum));
 724	skb_trim(skb, skb->len - 2 * sizeof(__sum16));
 725
 726	/* TODO: IPV6 Rx checksum */
 727	if (skb->protocol == htons(ETH_P_IP) && !csum_ip_hdr && !csum_proto)
 728		skb->ip_summed = CHECKSUM_UNNECESSARY;
 729}
 730
 731static void ravb_rx_csum(struct sk_buff *skb)
 732{
 733	u8 *hw_csum;
 734
 735	/* The hardware checksum is contained in sizeof(__sum16) (2) bytes
 736	 * appended to packet data
 737	 */
 738	if (unlikely(skb->len < sizeof(__sum16)))
 739		return;
 740	hw_csum = skb_tail_pointer(skb) - sizeof(__sum16);
 741	skb->csum = csum_unfold((__force __sum16)get_unaligned_le16(hw_csum));
 742	skb->ip_summed = CHECKSUM_COMPLETE;
 743	skb_trim(skb, skb->len - sizeof(__sum16));
 744}
 745
 746static struct sk_buff *ravb_get_skb_gbeth(struct net_device *ndev, int entry,
 747					  struct ravb_rx_desc *desc)
 748{
 749	struct ravb_private *priv = netdev_priv(ndev);
 750	struct sk_buff *skb;
 751
 752	skb = priv->rx_skb[RAVB_BE][entry];
 753	priv->rx_skb[RAVB_BE][entry] = NULL;
 754	dma_unmap_single(ndev->dev.parent, le32_to_cpu(desc->dptr),
 755			 ALIGN(priv->info->rx_max_frame_size, 16),
 756			 DMA_FROM_DEVICE);
 757
 758	return skb;
 759}
 760
 761/* Packet receive function for Gigabit Ethernet */
 762static bool ravb_rx_gbeth(struct net_device *ndev, int *quota, int q)
 763{
 764	struct ravb_private *priv = netdev_priv(ndev);
 765	const struct ravb_hw_info *info = priv->info;
 766	struct net_device_stats *stats;
 767	struct ravb_rx_desc *desc;
 768	struct sk_buff *skb;
 769	dma_addr_t dma_addr;
 770	int rx_packets = 0;
 771	u8  desc_status;
 772	u16 desc_len;
 773	u8  die_dt;
 774	int entry;
 775	int limit;
 776	int i;
 777
 
 778	limit = priv->dirty_rx[q] + priv->num_rx_ring[q] - priv->cur_rx[q];
 779	stats = &priv->stats[q];
 780
 781	for (i = 0; i < limit; i++, priv->cur_rx[q]++) {
 782		entry = priv->cur_rx[q] % priv->num_rx_ring[q];
 783		desc = &priv->rx_ring[q].desc[entry];
 784		if (rx_packets == *quota || desc->die_dt == DT_FEMPTY)
 785			break;
 786
 787		/* Descriptor type must be checked before all other reads */
 788		dma_rmb();
 789		desc_status = desc->msc;
 790		desc_len = le16_to_cpu(desc->ds_cc) & RX_DS;
 791
 792		/* We use 0-byte descriptors to mark the DMA mapping errors */
 793		if (!desc_len)
 794			continue;
 795
 796		if (desc_status & MSC_MC)
 797			stats->multicast++;
 798
 799		if (desc_status & (MSC_CRC | MSC_RFE | MSC_RTSF | MSC_RTLF | MSC_CEEF)) {
 800			stats->rx_errors++;
 801			if (desc_status & MSC_CRC)
 802				stats->rx_crc_errors++;
 803			if (desc_status & MSC_RFE)
 804				stats->rx_frame_errors++;
 805			if (desc_status & (MSC_RTLF | MSC_RTSF))
 806				stats->rx_length_errors++;
 807			if (desc_status & MSC_CEEF)
 808				stats->rx_missed_errors++;
 809		} else {
 810			die_dt = desc->die_dt & 0xF0;
 811			switch (die_dt) {
 812			case DT_FSINGLE:
 813				skb = ravb_get_skb_gbeth(ndev, entry, desc);
 814				skb_put(skb, desc_len);
 815				skb->protocol = eth_type_trans(skb, ndev);
 816				if (ndev->features & NETIF_F_RXCSUM)
 817					ravb_rx_csum_gbeth(skb);
 818				napi_gro_receive(&priv->napi[q], skb);
 819				rx_packets++;
 820				stats->rx_bytes += desc_len;
 821				break;
 822			case DT_FSTART:
 823				priv->rx_1st_skb = ravb_get_skb_gbeth(ndev, entry, desc);
 824				skb_put(priv->rx_1st_skb, desc_len);
 825				break;
 826			case DT_FMID:
 827				skb = ravb_get_skb_gbeth(ndev, entry, desc);
 828				skb_copy_to_linear_data_offset(priv->rx_1st_skb,
 829							       priv->rx_1st_skb->len,
 830							       skb->data,
 831							       desc_len);
 832				skb_put(priv->rx_1st_skb, desc_len);
 833				dev_kfree_skb(skb);
 834				break;
 835			case DT_FEND:
 836				skb = ravb_get_skb_gbeth(ndev, entry, desc);
 837				skb_copy_to_linear_data_offset(priv->rx_1st_skb,
 838							       priv->rx_1st_skb->len,
 839							       skb->data,
 840							       desc_len);
 841				skb_put(priv->rx_1st_skb, desc_len);
 842				dev_kfree_skb(skb);
 843				priv->rx_1st_skb->protocol =
 844					eth_type_trans(priv->rx_1st_skb, ndev);
 845				if (ndev->features & NETIF_F_RXCSUM)
 846					ravb_rx_csum_gbeth(priv->rx_1st_skb);
 847				stats->rx_bytes += priv->rx_1st_skb->len;
 848				napi_gro_receive(&priv->napi[q],
 849						 priv->rx_1st_skb);
 850				rx_packets++;
 
 851				break;
 852			}
 853		}
 
 
 
 854	}
 855
 856	/* Refill the RX ring buffers. */
 857	for (; priv->cur_rx[q] - priv->dirty_rx[q] > 0; priv->dirty_rx[q]++) {
 858		entry = priv->dirty_rx[q] % priv->num_rx_ring[q];
 859		desc = &priv->rx_ring[q].desc[entry];
 860		desc->ds_cc = cpu_to_le16(priv->info->rx_max_desc_use);
 861
 862		if (!priv->rx_skb[q][entry]) {
 863			skb = ravb_alloc_skb(ndev, info, GFP_ATOMIC);
 864			if (!skb)
 865				break;
 
 866			dma_addr = dma_map_single(ndev->dev.parent,
 867						  skb->data,
 868						  priv->info->rx_max_frame_size,
 869						  DMA_FROM_DEVICE);
 870			skb_checksum_none_assert(skb);
 871			/* We just set the data size to 0 for a failed mapping
 872			 * which should prevent DMA  from happening...
 873			 */
 874			if (dma_mapping_error(ndev->dev.parent, dma_addr))
 875				desc->ds_cc = cpu_to_le16(0);
 876			desc->dptr = cpu_to_le32(dma_addr);
 877			priv->rx_skb[q][entry] = skb;
 878		}
 879		/* Descriptor type must be set after all the above writes */
 880		dma_wmb();
 881		desc->die_dt = DT_FEMPTY;
 882	}
 883
 884	stats->rx_packets += rx_packets;
 885	*quota -= rx_packets;
 886	return *quota == 0;
 887}
 888
 889/* Packet receive function for Ethernet AVB */
 890static bool ravb_rx_rcar(struct net_device *ndev, int *quota, int q)
 891{
 892	struct ravb_private *priv = netdev_priv(ndev);
 893	const struct ravb_hw_info *info = priv->info;
 
 
 
 894	struct net_device_stats *stats = &priv->stats[q];
 895	struct ravb_ex_rx_desc *desc;
 896	unsigned int limit, i;
 897	struct sk_buff *skb;
 898	dma_addr_t dma_addr;
 899	struct timespec64 ts;
 900	int rx_packets = 0;
 901	u8  desc_status;
 902	u16 pkt_len;
 903	int entry;
 904
 905	limit = priv->dirty_rx[q] + priv->num_rx_ring[q] - priv->cur_rx[q];
 906	for (i = 0; i < limit; i++, priv->cur_rx[q]++) {
 907		entry = priv->cur_rx[q] % priv->num_rx_ring[q];
 908		desc = &priv->rx_ring[q].ex_desc[entry];
 909		if (rx_packets == *quota || desc->die_dt == DT_FEMPTY)
 910			break;
 911
 
 
 
 
 912		/* Descriptor type must be checked before all other reads */
 913		dma_rmb();
 914		desc_status = desc->msc;
 915		pkt_len = le16_to_cpu(desc->ds_cc) & RX_DS;
 916
 
 
 
 917		/* We use 0-byte descriptors to mark the DMA mapping errors */
 918		if (!pkt_len)
 919			continue;
 920
 921		if (desc_status & MSC_MC)
 922			stats->multicast++;
 923
 924		if (desc_status & (MSC_CRC | MSC_RFE | MSC_RTSF | MSC_RTLF |
 925				   MSC_CEEF)) {
 926			stats->rx_errors++;
 927			if (desc_status & MSC_CRC)
 928				stats->rx_crc_errors++;
 929			if (desc_status & MSC_RFE)
 930				stats->rx_frame_errors++;
 931			if (desc_status & (MSC_RTLF | MSC_RTSF))
 932				stats->rx_length_errors++;
 933			if (desc_status & MSC_CEEF)
 934				stats->rx_missed_errors++;
 935		} else {
 936			u32 get_ts = priv->tstamp_rx_ctrl & RAVB_RXTSTAMP_TYPE;
 937
 938			skb = priv->rx_skb[q][entry];
 939			priv->rx_skb[q][entry] = NULL;
 940			dma_unmap_single(ndev->dev.parent, le32_to_cpu(desc->dptr),
 941					 priv->info->rx_max_frame_size,
 942					 DMA_FROM_DEVICE);
 943			get_ts &= (q == RAVB_NC) ?
 944					RAVB_RXTSTAMP_TYPE_V2_L2_EVENT :
 945					~RAVB_RXTSTAMP_TYPE_V2_L2_EVENT;
 946			if (get_ts) {
 947				struct skb_shared_hwtstamps *shhwtstamps;
 948
 949				shhwtstamps = skb_hwtstamps(skb);
 950				memset(shhwtstamps, 0, sizeof(*shhwtstamps));
 951				ts.tv_sec = ((u64) le16_to_cpu(desc->ts_sh) <<
 952					     32) | le32_to_cpu(desc->ts_sl);
 953				ts.tv_nsec = le32_to_cpu(desc->ts_n);
 954				shhwtstamps->hwtstamp = timespec64_to_ktime(ts);
 955			}
 956
 957			skb_put(skb, pkt_len);
 958			skb->protocol = eth_type_trans(skb, ndev);
 959			if (ndev->features & NETIF_F_RXCSUM)
 960				ravb_rx_csum(skb);
 961			napi_gro_receive(&priv->napi[q], skb);
 962			rx_packets++;
 963			stats->rx_bytes += pkt_len;
 964		}
 
 
 
 965	}
 966
 967	/* Refill the RX ring buffers. */
 968	for (; priv->cur_rx[q] - priv->dirty_rx[q] > 0; priv->dirty_rx[q]++) {
 969		entry = priv->dirty_rx[q] % priv->num_rx_ring[q];
 970		desc = &priv->rx_ring[q].ex_desc[entry];
 971		desc->ds_cc = cpu_to_le16(priv->info->rx_max_desc_use);
 972
 973		if (!priv->rx_skb[q][entry]) {
 974			skb = ravb_alloc_skb(ndev, info, GFP_ATOMIC);
 975			if (!skb)
 976				break;	/* Better luck next round. */
 
 977			dma_addr = dma_map_single(ndev->dev.parent, skb->data,
 978						  priv->info->rx_max_frame_size,
 979						  DMA_FROM_DEVICE);
 980			skb_checksum_none_assert(skb);
 981			/* We just set the data size to 0 for a failed mapping
 982			 * which should prevent DMA  from happening...
 983			 */
 984			if (dma_mapping_error(ndev->dev.parent, dma_addr))
 985				desc->ds_cc = cpu_to_le16(0);
 986			desc->dptr = cpu_to_le32(dma_addr);
 987			priv->rx_skb[q][entry] = skb;
 988		}
 989		/* Descriptor type must be set after all the above writes */
 990		dma_wmb();
 991		desc->die_dt = DT_FEMPTY;
 992	}
 993
 994	stats->rx_packets += rx_packets;
 995	*quota -= rx_packets;
 996	return *quota == 0;
 997}
 998
 999/* Packet receive function for Ethernet AVB */
1000static bool ravb_rx(struct net_device *ndev, int *quota, int q)
1001{
1002	struct ravb_private *priv = netdev_priv(ndev);
1003	const struct ravb_hw_info *info = priv->info;
1004
1005	return info->receive(ndev, quota, q);
1006}
1007
1008static void ravb_rcv_snd_disable(struct net_device *ndev)
1009{
1010	/* Disable TX and RX */
1011	ravb_modify(ndev, ECMR, ECMR_RE | ECMR_TE, 0);
1012}
1013
1014static void ravb_rcv_snd_enable(struct net_device *ndev)
1015{
1016	/* Enable TX and RX */
1017	ravb_modify(ndev, ECMR, ECMR_RE | ECMR_TE, ECMR_RE | ECMR_TE);
1018}
1019
1020/* function for waiting dma process finished */
1021static int ravb_stop_dma(struct net_device *ndev)
1022{
1023	struct ravb_private *priv = netdev_priv(ndev);
1024	const struct ravb_hw_info *info = priv->info;
1025	int error;
1026
1027	/* Wait for stopping the hardware TX process */
1028	error = ravb_wait(ndev, TCCR, info->tccr_mask, 0);
1029
1030	if (error)
1031		return error;
1032
1033	error = ravb_wait(ndev, CSR, CSR_TPO0 | CSR_TPO1 | CSR_TPO2 | CSR_TPO3,
1034			  0);
1035	if (error)
1036		return error;
1037
1038	/* Stop the E-MAC's RX/TX processes. */
1039	ravb_rcv_snd_disable(ndev);
1040
1041	/* Wait for stopping the RX DMA process */
1042	error = ravb_wait(ndev, CSR, CSR_RPO, 0);
1043	if (error)
1044		return error;
1045
1046	/* Stop AVB-DMAC process */
1047	return ravb_set_opmode(ndev, CCC_OPC_CONFIG);
1048}
1049
1050/* E-MAC interrupt handler */
1051static void ravb_emac_interrupt_unlocked(struct net_device *ndev)
1052{
1053	struct ravb_private *priv = netdev_priv(ndev);
1054	u32 ecsr, psr;
1055
1056	ecsr = ravb_read(ndev, ECSR);
1057	ravb_write(ndev, ecsr, ECSR);	/* clear interrupt */
1058
1059	if (ecsr & ECSR_MPD)
1060		pm_wakeup_event(&priv->pdev->dev, 0);
1061	if (ecsr & ECSR_ICD)
1062		ndev->stats.tx_carrier_errors++;
1063	if (ecsr & ECSR_LCHNG) {
1064		/* Link changed */
1065		if (priv->no_avb_link)
1066			return;
1067		psr = ravb_read(ndev, PSR);
1068		if (priv->avb_link_active_low)
1069			psr ^= PSR_LMON;
1070		if (!(psr & PSR_LMON)) {
1071			/* DIsable RX and TX */
1072			ravb_rcv_snd_disable(ndev);
1073		} else {
1074			/* Enable RX and TX */
1075			ravb_rcv_snd_enable(ndev);
1076		}
1077	}
1078}
1079
1080static irqreturn_t ravb_emac_interrupt(int irq, void *dev_id)
1081{
1082	struct net_device *ndev = dev_id;
1083	struct ravb_private *priv = netdev_priv(ndev);
1084	struct device *dev = &priv->pdev->dev;
1085	irqreturn_t result = IRQ_HANDLED;
1086
1087	pm_runtime_get_noresume(dev);
1088
1089	if (unlikely(!pm_runtime_active(dev))) {
1090		result = IRQ_NONE;
1091		goto out_rpm_put;
1092	}
1093
1094	spin_lock(&priv->lock);
1095	ravb_emac_interrupt_unlocked(ndev);
1096	spin_unlock(&priv->lock);
1097
1098out_rpm_put:
1099	pm_runtime_put_noidle(dev);
1100	return result;
1101}
1102
1103/* Error interrupt handler */
1104static void ravb_error_interrupt(struct net_device *ndev)
1105{
1106	struct ravb_private *priv = netdev_priv(ndev);
1107	u32 eis, ris2;
1108
1109	eis = ravb_read(ndev, EIS);
1110	ravb_write(ndev, ~(EIS_QFS | EIS_RESERVED), EIS);
1111	if (eis & EIS_QFS) {
1112		ris2 = ravb_read(ndev, RIS2);
1113		ravb_write(ndev, ~(RIS2_QFF0 | RIS2_QFF1 | RIS2_RFFF | RIS2_RESERVED),
1114			   RIS2);
1115
1116		/* Receive Descriptor Empty int */
1117		if (ris2 & RIS2_QFF0)
1118			priv->stats[RAVB_BE].rx_over_errors++;
1119
1120		/* Receive Descriptor Empty int */
1121		if (ris2 & RIS2_QFF1)
1122			priv->stats[RAVB_NC].rx_over_errors++;
1123
1124		/* Receive FIFO Overflow int */
1125		if (ris2 & RIS2_RFFF)
1126			priv->rx_fifo_errors++;
1127	}
1128}
1129
1130static bool ravb_queue_interrupt(struct net_device *ndev, int q)
1131{
1132	struct ravb_private *priv = netdev_priv(ndev);
1133	const struct ravb_hw_info *info = priv->info;
1134	u32 ris0 = ravb_read(ndev, RIS0);
1135	u32 ric0 = ravb_read(ndev, RIC0);
1136	u32 tis  = ravb_read(ndev, TIS);
1137	u32 tic  = ravb_read(ndev, TIC);
1138
1139	if (((ris0 & ric0) & BIT(q)) || ((tis  & tic)  & BIT(q))) {
1140		if (napi_schedule_prep(&priv->napi[q])) {
1141			/* Mask RX and TX interrupts */
1142			if (!info->irq_en_dis) {
1143				ravb_write(ndev, ric0 & ~BIT(q), RIC0);
1144				ravb_write(ndev, tic & ~BIT(q), TIC);
1145			} else {
1146				ravb_write(ndev, BIT(q), RID0);
1147				ravb_write(ndev, BIT(q), TID);
1148			}
1149			__napi_schedule(&priv->napi[q]);
1150		} else {
1151			netdev_warn(ndev,
1152				    "ignoring interrupt, rx status 0x%08x, rx mask 0x%08x,\n",
1153				    ris0, ric0);
1154			netdev_warn(ndev,
1155				    "                    tx status 0x%08x, tx mask 0x%08x.\n",
1156				    tis, tic);
1157		}
1158		return true;
1159	}
1160	return false;
1161}
1162
1163static bool ravb_timestamp_interrupt(struct net_device *ndev)
1164{
1165	u32 tis = ravb_read(ndev, TIS);
1166
1167	if (tis & TIS_TFUF) {
1168		ravb_write(ndev, ~(TIS_TFUF | TIS_RESERVED), TIS);
1169		ravb_get_tx_tstamp(ndev);
1170		return true;
1171	}
1172	return false;
1173}
1174
1175static irqreturn_t ravb_interrupt(int irq, void *dev_id)
1176{
1177	struct net_device *ndev = dev_id;
1178	struct ravb_private *priv = netdev_priv(ndev);
1179	const struct ravb_hw_info *info = priv->info;
1180	struct device *dev = &priv->pdev->dev;
1181	irqreturn_t result = IRQ_NONE;
1182	u32 iss;
1183
1184	pm_runtime_get_noresume(dev);
1185
1186	if (unlikely(!pm_runtime_active(dev)))
1187		goto out_rpm_put;
1188
1189	spin_lock(&priv->lock);
1190	/* Get interrupt status */
1191	iss = ravb_read(ndev, ISS);
1192
1193	/* Received and transmitted interrupts */
1194	if (iss & (ISS_FRS | ISS_FTS | ISS_TFUS)) {
1195		int q;
1196
1197		/* Timestamp updated */
1198		if (ravb_timestamp_interrupt(ndev))
1199			result = IRQ_HANDLED;
1200
1201		/* Network control and best effort queue RX/TX */
1202		if (info->nc_queues) {
1203			for (q = RAVB_NC; q >= RAVB_BE; q--) {
1204				if (ravb_queue_interrupt(ndev, q))
1205					result = IRQ_HANDLED;
1206			}
1207		} else {
1208			if (ravb_queue_interrupt(ndev, RAVB_BE))
1209				result = IRQ_HANDLED;
1210		}
1211	}
1212
1213	/* E-MAC status summary */
1214	if (iss & ISS_MS) {
1215		ravb_emac_interrupt_unlocked(ndev);
1216		result = IRQ_HANDLED;
1217	}
1218
1219	/* Error status summary */
1220	if (iss & ISS_ES) {
1221		ravb_error_interrupt(ndev);
1222		result = IRQ_HANDLED;
1223	}
1224
1225	/* gPTP interrupt status summary */
1226	if (iss & ISS_CGIS) {
1227		ravb_ptp_interrupt(ndev);
1228		result = IRQ_HANDLED;
1229	}
1230
1231	spin_unlock(&priv->lock);
1232
1233out_rpm_put:
1234	pm_runtime_put_noidle(dev);
1235	return result;
1236}
1237
1238/* Timestamp/Error/gPTP interrupt handler */
1239static irqreturn_t ravb_multi_interrupt(int irq, void *dev_id)
1240{
1241	struct net_device *ndev = dev_id;
1242	struct ravb_private *priv = netdev_priv(ndev);
1243	struct device *dev = &priv->pdev->dev;
1244	irqreturn_t result = IRQ_NONE;
1245	u32 iss;
1246
1247	pm_runtime_get_noresume(dev);
1248
1249	if (unlikely(!pm_runtime_active(dev)))
1250		goto out_rpm_put;
1251
1252	spin_lock(&priv->lock);
1253	/* Get interrupt status */
1254	iss = ravb_read(ndev, ISS);
1255
1256	/* Timestamp updated */
1257	if ((iss & ISS_TFUS) && ravb_timestamp_interrupt(ndev))
1258		result = IRQ_HANDLED;
1259
1260	/* Error status summary */
1261	if (iss & ISS_ES) {
1262		ravb_error_interrupt(ndev);
1263		result = IRQ_HANDLED;
1264	}
1265
1266	/* gPTP interrupt status summary */
1267	if (iss & ISS_CGIS) {
1268		ravb_ptp_interrupt(ndev);
1269		result = IRQ_HANDLED;
1270	}
1271
1272	spin_unlock(&priv->lock);
1273
1274out_rpm_put:
1275	pm_runtime_put_noidle(dev);
1276	return result;
1277}
1278
1279static irqreturn_t ravb_dma_interrupt(int irq, void *dev_id, int q)
1280{
1281	struct net_device *ndev = dev_id;
1282	struct ravb_private *priv = netdev_priv(ndev);
1283	struct device *dev = &priv->pdev->dev;
1284	irqreturn_t result = IRQ_NONE;
1285
1286	pm_runtime_get_noresume(dev);
1287
1288	if (unlikely(!pm_runtime_active(dev)))
1289		goto out_rpm_put;
1290
1291	spin_lock(&priv->lock);
1292
1293	/* Network control/Best effort queue RX/TX */
1294	if (ravb_queue_interrupt(ndev, q))
1295		result = IRQ_HANDLED;
1296
1297	spin_unlock(&priv->lock);
1298
1299out_rpm_put:
1300	pm_runtime_put_noidle(dev);
1301	return result;
1302}
1303
1304static irqreturn_t ravb_be_interrupt(int irq, void *dev_id)
1305{
1306	return ravb_dma_interrupt(irq, dev_id, RAVB_BE);
1307}
1308
1309static irqreturn_t ravb_nc_interrupt(int irq, void *dev_id)
1310{
1311	return ravb_dma_interrupt(irq, dev_id, RAVB_NC);
1312}
1313
1314static int ravb_poll(struct napi_struct *napi, int budget)
1315{
1316	struct net_device *ndev = napi->dev;
1317	struct ravb_private *priv = netdev_priv(ndev);
1318	const struct ravb_hw_info *info = priv->info;
 
 
1319	unsigned long flags;
1320	int q = napi - priv->napi;
1321	int mask = BIT(q);
1322	int quota = budget;
1323	bool unmask;
1324
 
 
 
 
1325	/* Processing RX Descriptor Ring */
1326	/* Clear RX interrupt */
1327	ravb_write(ndev, ~(mask | RIS0_RESERVED), RIS0);
1328	unmask = !ravb_rx(ndev, &quota, q);
 
 
 
1329
1330	/* Processing TX Descriptor Ring */
1331	spin_lock_irqsave(&priv->lock, flags);
1332	/* Clear TX interrupt */
1333	ravb_write(ndev, ~(mask | TIS_RESERVED), TIS);
1334	ravb_tx_free(ndev, q, true);
1335	netif_wake_subqueue(ndev, q);
1336	spin_unlock_irqrestore(&priv->lock, flags);
1337
1338	/* Receive error message handling */
1339	priv->rx_over_errors = priv->stats[RAVB_BE].rx_over_errors;
1340	if (info->nc_queues)
1341		priv->rx_over_errors += priv->stats[RAVB_NC].rx_over_errors;
1342	if (priv->rx_over_errors != ndev->stats.rx_over_errors)
1343		ndev->stats.rx_over_errors = priv->rx_over_errors;
1344	if (priv->rx_fifo_errors != ndev->stats.rx_fifo_errors)
1345		ndev->stats.rx_fifo_errors = priv->rx_fifo_errors;
1346
1347	if (!unmask)
1348		goto out;
1349
1350	napi_complete(napi);
1351
1352	/* Re-enable RX/TX interrupts */
1353	spin_lock_irqsave(&priv->lock, flags);
1354	if (!info->irq_en_dis) {
1355		ravb_modify(ndev, RIC0, mask, mask);
1356		ravb_modify(ndev, TIC,  mask, mask);
1357	} else {
1358		ravb_write(ndev, mask, RIE0);
1359		ravb_write(ndev, mask, TIE);
1360	}
1361	spin_unlock_irqrestore(&priv->lock, flags);
1362
 
 
 
 
 
 
 
 
1363out:
1364	return budget - quota;
1365}
1366
1367static void ravb_set_duplex_gbeth(struct net_device *ndev)
1368{
1369	struct ravb_private *priv = netdev_priv(ndev);
1370
1371	ravb_modify(ndev, ECMR, ECMR_DM, priv->duplex > 0 ? ECMR_DM : 0);
1372}
1373
1374/* PHY state control function */
1375static void ravb_adjust_link(struct net_device *ndev)
1376{
1377	struct ravb_private *priv = netdev_priv(ndev);
1378	const struct ravb_hw_info *info = priv->info;
1379	struct phy_device *phydev = ndev->phydev;
1380	bool new_state = false;
1381	unsigned long flags;
1382
1383	spin_lock_irqsave(&priv->lock, flags);
1384
1385	/* Disable TX and RX right over here, if E-MAC change is ignored */
1386	if (priv->no_avb_link)
1387		ravb_rcv_snd_disable(ndev);
1388
1389	if (phydev->link) {
1390		if (info->half_duplex && phydev->duplex != priv->duplex) {
1391			new_state = true;
1392			priv->duplex = phydev->duplex;
1393			ravb_set_duplex_gbeth(ndev);
1394		}
1395
1396		if (phydev->speed != priv->speed) {
1397			new_state = true;
1398			priv->speed = phydev->speed;
1399			info->set_rate(ndev);
1400		}
1401		if (!priv->link) {
1402			ravb_modify(ndev, ECMR, ECMR_TXF, 0);
1403			new_state = true;
1404			priv->link = phydev->link;
1405		}
1406	} else if (priv->link) {
1407		new_state = true;
1408		priv->link = 0;
1409		priv->speed = 0;
1410		if (info->half_duplex)
1411			priv->duplex = -1;
1412	}
1413
1414	/* Enable TX and RX right over here, if E-MAC change is ignored */
1415	if (priv->no_avb_link && phydev->link)
1416		ravb_rcv_snd_enable(ndev);
1417
1418	spin_unlock_irqrestore(&priv->lock, flags);
1419
1420	if (new_state && netif_msg_link(priv))
1421		phy_print_status(phydev);
1422}
1423
1424/* PHY init function */
1425static int ravb_phy_init(struct net_device *ndev)
1426{
1427	struct device_node *np = ndev->dev.parent->of_node;
1428	struct ravb_private *priv = netdev_priv(ndev);
1429	const struct ravb_hw_info *info = priv->info;
1430	struct phy_device *phydev;
1431	struct device_node *pn;
1432	phy_interface_t iface;
1433	int err;
1434
1435	priv->link = 0;
1436	priv->speed = 0;
1437	priv->duplex = -1;
1438
1439	/* Try connecting to PHY */
1440	pn = of_parse_phandle(np, "phy-handle", 0);
1441	if (!pn) {
1442		/* In the case of a fixed PHY, the DT node associated
1443		 * to the PHY is the Ethernet MAC DT node.
1444		 */
1445		if (of_phy_is_fixed_link(np)) {
1446			err = of_phy_register_fixed_link(np);
1447			if (err)
1448				return err;
1449		}
1450		pn = of_node_get(np);
1451	}
1452
1453	iface = priv->rgmii_override ? PHY_INTERFACE_MODE_RGMII
1454				     : priv->phy_interface;
1455	phydev = of_phy_connect(ndev, pn, ravb_adjust_link, 0, iface);
1456	of_node_put(pn);
1457	if (!phydev) {
1458		netdev_err(ndev, "failed to connect PHY\n");
1459		err = -ENOENT;
1460		goto err_deregister_fixed_link;
1461	}
1462
1463	if (!info->half_duplex) {
1464		/* 10BASE, Pause and Asym Pause is not supported */
1465		phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_10baseT_Half_BIT);
1466		phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_10baseT_Full_BIT);
1467		phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_Pause_BIT);
1468		phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_Asym_Pause_BIT);
1469
1470		/* Half Duplex is not supported */
1471		phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_1000baseT_Half_BIT);
1472		phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_100baseT_Half_BIT);
1473	}
1474
1475	phy_attached_info(phydev);
1476
1477	return 0;
1478
1479err_deregister_fixed_link:
1480	if (of_phy_is_fixed_link(np))
1481		of_phy_deregister_fixed_link(np);
1482
1483	return err;
1484}
1485
1486/* PHY control start function */
1487static int ravb_phy_start(struct net_device *ndev)
1488{
1489	int error;
1490
1491	error = ravb_phy_init(ndev);
1492	if (error)
1493		return error;
1494
1495	phy_start(ndev->phydev);
1496
1497	return 0;
1498}
1499
1500static u32 ravb_get_msglevel(struct net_device *ndev)
1501{
1502	struct ravb_private *priv = netdev_priv(ndev);
1503
1504	return priv->msg_enable;
1505}
1506
1507static void ravb_set_msglevel(struct net_device *ndev, u32 value)
1508{
1509	struct ravb_private *priv = netdev_priv(ndev);
1510
1511	priv->msg_enable = value;
1512}
1513
1514static const char ravb_gstrings_stats_gbeth[][ETH_GSTRING_LEN] = {
1515	"rx_queue_0_current",
1516	"tx_queue_0_current",
1517	"rx_queue_0_dirty",
1518	"tx_queue_0_dirty",
1519	"rx_queue_0_packets",
1520	"tx_queue_0_packets",
1521	"rx_queue_0_bytes",
1522	"tx_queue_0_bytes",
1523	"rx_queue_0_mcast_packets",
1524	"rx_queue_0_errors",
1525	"rx_queue_0_crc_errors",
1526	"rx_queue_0_frame_errors",
1527	"rx_queue_0_length_errors",
1528	"rx_queue_0_csum_offload_errors",
1529	"rx_queue_0_over_errors",
1530};
1531
1532static const char ravb_gstrings_stats[][ETH_GSTRING_LEN] = {
1533	"rx_queue_0_current",
1534	"tx_queue_0_current",
1535	"rx_queue_0_dirty",
1536	"tx_queue_0_dirty",
1537	"rx_queue_0_packets",
1538	"tx_queue_0_packets",
1539	"rx_queue_0_bytes",
1540	"tx_queue_0_bytes",
1541	"rx_queue_0_mcast_packets",
1542	"rx_queue_0_errors",
1543	"rx_queue_0_crc_errors",
1544	"rx_queue_0_frame_errors",
1545	"rx_queue_0_length_errors",
1546	"rx_queue_0_missed_errors",
1547	"rx_queue_0_over_errors",
1548
1549	"rx_queue_1_current",
1550	"tx_queue_1_current",
1551	"rx_queue_1_dirty",
1552	"tx_queue_1_dirty",
1553	"rx_queue_1_packets",
1554	"tx_queue_1_packets",
1555	"rx_queue_1_bytes",
1556	"tx_queue_1_bytes",
1557	"rx_queue_1_mcast_packets",
1558	"rx_queue_1_errors",
1559	"rx_queue_1_crc_errors",
1560	"rx_queue_1_frame_errors",
1561	"rx_queue_1_length_errors",
1562	"rx_queue_1_missed_errors",
1563	"rx_queue_1_over_errors",
1564};
1565
1566static int ravb_get_sset_count(struct net_device *netdev, int sset)
1567{
1568	struct ravb_private *priv = netdev_priv(netdev);
1569	const struct ravb_hw_info *info = priv->info;
1570
1571	switch (sset) {
1572	case ETH_SS_STATS:
1573		return info->stats_len;
1574	default:
1575		return -EOPNOTSUPP;
1576	}
1577}
1578
1579static void ravb_get_ethtool_stats(struct net_device *ndev,
1580				   struct ethtool_stats *estats, u64 *data)
1581{
1582	struct ravb_private *priv = netdev_priv(ndev);
1583	const struct ravb_hw_info *info = priv->info;
1584	int num_rx_q;
1585	int i = 0;
1586	int q;
1587
1588	num_rx_q = info->nc_queues ? NUM_RX_QUEUE : 1;
1589	/* Device-specific stats */
1590	for (q = RAVB_BE; q < num_rx_q; q++) {
1591		struct net_device_stats *stats = &priv->stats[q];
1592
1593		data[i++] = priv->cur_rx[q];
1594		data[i++] = priv->cur_tx[q];
1595		data[i++] = priv->dirty_rx[q];
1596		data[i++] = priv->dirty_tx[q];
1597		data[i++] = stats->rx_packets;
1598		data[i++] = stats->tx_packets;
1599		data[i++] = stats->rx_bytes;
1600		data[i++] = stats->tx_bytes;
1601		data[i++] = stats->multicast;
1602		data[i++] = stats->rx_errors;
1603		data[i++] = stats->rx_crc_errors;
1604		data[i++] = stats->rx_frame_errors;
1605		data[i++] = stats->rx_length_errors;
1606		data[i++] = stats->rx_missed_errors;
1607		data[i++] = stats->rx_over_errors;
1608	}
1609}
1610
1611static void ravb_get_strings(struct net_device *ndev, u32 stringset, u8 *data)
1612{
1613	struct ravb_private *priv = netdev_priv(ndev);
1614	const struct ravb_hw_info *info = priv->info;
1615
1616	switch (stringset) {
1617	case ETH_SS_STATS:
1618		memcpy(data, info->gstrings_stats, info->gstrings_size);
1619		break;
1620	}
1621}
1622
1623static void ravb_get_ringparam(struct net_device *ndev,
1624			       struct ethtool_ringparam *ring,
1625			       struct kernel_ethtool_ringparam *kernel_ring,
1626			       struct netlink_ext_ack *extack)
1627{
1628	struct ravb_private *priv = netdev_priv(ndev);
1629
1630	ring->rx_max_pending = BE_RX_RING_MAX;
1631	ring->tx_max_pending = BE_TX_RING_MAX;
1632	ring->rx_pending = priv->num_rx_ring[RAVB_BE];
1633	ring->tx_pending = priv->num_tx_ring[RAVB_BE];
1634}
1635
1636static int ravb_set_ringparam(struct net_device *ndev,
1637			      struct ethtool_ringparam *ring,
1638			      struct kernel_ethtool_ringparam *kernel_ring,
1639			      struct netlink_ext_ack *extack)
1640{
1641	struct ravb_private *priv = netdev_priv(ndev);
1642	const struct ravb_hw_info *info = priv->info;
1643	int error;
1644
1645	if (ring->tx_pending > BE_TX_RING_MAX ||
1646	    ring->rx_pending > BE_RX_RING_MAX ||
1647	    ring->tx_pending < BE_TX_RING_MIN ||
1648	    ring->rx_pending < BE_RX_RING_MIN)
1649		return -EINVAL;
1650	if (ring->rx_mini_pending || ring->rx_jumbo_pending)
1651		return -EINVAL;
1652
1653	if (netif_running(ndev)) {
1654		netif_device_detach(ndev);
1655		/* Stop PTP Clock driver */
1656		if (info->gptp)
1657			ravb_ptp_stop(ndev);
1658		/* Wait for DMA stopping */
1659		error = ravb_stop_dma(ndev);
1660		if (error) {
1661			netdev_err(ndev,
1662				   "cannot set ringparam! Any AVB processes are still running?\n");
1663			return error;
1664		}
1665		synchronize_irq(ndev->irq);
1666
1667		/* Free all the skb's in the RX queue and the DMA buffers. */
1668		ravb_ring_free(ndev, RAVB_BE);
1669		if (info->nc_queues)
1670			ravb_ring_free(ndev, RAVB_NC);
1671	}
1672
1673	/* Set new parameters */
1674	priv->num_rx_ring[RAVB_BE] = ring->rx_pending;
1675	priv->num_tx_ring[RAVB_BE] = ring->tx_pending;
1676
1677	if (netif_running(ndev)) {
1678		error = ravb_dmac_init(ndev);
1679		if (error) {
1680			netdev_err(ndev,
1681				   "%s: ravb_dmac_init() failed, error %d\n",
1682				   __func__, error);
1683			return error;
1684		}
1685
1686		ravb_emac_init(ndev);
1687
1688		/* Initialise PTP Clock driver */
1689		if (info->gptp)
1690			ravb_ptp_init(ndev, priv->pdev);
1691
1692		netif_device_attach(ndev);
1693	}
1694
1695	return 0;
1696}
1697
1698static int ravb_get_ts_info(struct net_device *ndev,
1699			    struct ethtool_ts_info *info)
1700{
1701	struct ravb_private *priv = netdev_priv(ndev);
1702	const struct ravb_hw_info *hw_info = priv->info;
1703
1704	info->so_timestamping =
1705		SOF_TIMESTAMPING_TX_SOFTWARE |
1706		SOF_TIMESTAMPING_RX_SOFTWARE |
1707		SOF_TIMESTAMPING_SOFTWARE |
1708		SOF_TIMESTAMPING_TX_HARDWARE |
1709		SOF_TIMESTAMPING_RX_HARDWARE |
1710		SOF_TIMESTAMPING_RAW_HARDWARE;
1711	info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON);
1712	info->rx_filters =
1713		(1 << HWTSTAMP_FILTER_NONE) |
1714		(1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT) |
1715		(1 << HWTSTAMP_FILTER_ALL);
1716	if (hw_info->gptp || hw_info->ccc_gac)
1717		info->phc_index = ptp_clock_index(priv->ptp.clock);
1718
1719	return 0;
1720}
1721
1722static void ravb_get_wol(struct net_device *ndev, struct ethtool_wolinfo *wol)
1723{
1724	struct ravb_private *priv = netdev_priv(ndev);
1725
1726	wol->supported = WAKE_MAGIC;
1727	wol->wolopts = priv->wol_enabled ? WAKE_MAGIC : 0;
1728}
1729
1730static int ravb_set_wol(struct net_device *ndev, struct ethtool_wolinfo *wol)
1731{
1732	struct ravb_private *priv = netdev_priv(ndev);
1733	const struct ravb_hw_info *info = priv->info;
1734
1735	if (!info->magic_pkt || (wol->wolopts & ~WAKE_MAGIC))
1736		return -EOPNOTSUPP;
1737
1738	priv->wol_enabled = !!(wol->wolopts & WAKE_MAGIC);
1739
1740	device_set_wakeup_enable(&priv->pdev->dev, priv->wol_enabled);
1741
1742	return 0;
1743}
1744
1745static const struct ethtool_ops ravb_ethtool_ops = {
1746	.nway_reset		= phy_ethtool_nway_reset,
1747	.get_msglevel		= ravb_get_msglevel,
1748	.set_msglevel		= ravb_set_msglevel,
1749	.get_link		= ethtool_op_get_link,
1750	.get_strings		= ravb_get_strings,
1751	.get_ethtool_stats	= ravb_get_ethtool_stats,
1752	.get_sset_count		= ravb_get_sset_count,
1753	.get_ringparam		= ravb_get_ringparam,
1754	.set_ringparam		= ravb_set_ringparam,
1755	.get_ts_info		= ravb_get_ts_info,
1756	.get_link_ksettings	= phy_ethtool_get_link_ksettings,
1757	.set_link_ksettings	= phy_ethtool_set_link_ksettings,
1758	.get_wol		= ravb_get_wol,
1759	.set_wol		= ravb_set_wol,
1760};
1761
1762static int ravb_set_config_mode(struct net_device *ndev)
 
 
1763{
1764	struct ravb_private *priv = netdev_priv(ndev);
1765	const struct ravb_hw_info *info = priv->info;
1766	int error;
1767
1768	if (info->gptp) {
1769		error = ravb_set_opmode(ndev, CCC_OPC_CONFIG);
1770		if (error)
1771			return error;
1772		/* Set CSEL value */
1773		ravb_modify(ndev, CCC, CCC_CSEL, CCC_CSEL_HPB);
1774	} else if (info->ccc_gac) {
1775		error = ravb_set_opmode(ndev, CCC_OPC_CONFIG | CCC_GAC | CCC_CSEL_HPB);
1776	} else {
1777		error = ravb_set_opmode(ndev, CCC_OPC_CONFIG);
1778	}
1779
1780	return error;
1781}
1782
1783static void ravb_set_gti(struct net_device *ndev)
1784{
1785	struct ravb_private *priv = netdev_priv(ndev);
1786	const struct ravb_hw_info *info = priv->info;
1787
1788	if (!(info->gptp || info->ccc_gac))
1789		return;
1790
1791	ravb_write(ndev, priv->gti_tiv, GTI);
1792
1793	/* Request GTI loading */
1794	ravb_modify(ndev, GCCR, GCCR_LTI, GCCR_LTI);
1795}
1796
1797static int ravb_compute_gti(struct net_device *ndev)
1798{
1799	struct ravb_private *priv = netdev_priv(ndev);
1800	const struct ravb_hw_info *info = priv->info;
1801	struct device *dev = ndev->dev.parent;
1802	unsigned long rate;
1803	u64 inc;
1804
1805	if (!(info->gptp || info->ccc_gac))
1806		return 0;
1807
1808	if (info->gptp_ref_clk)
1809		rate = clk_get_rate(priv->gptp_clk);
1810	else
1811		rate = clk_get_rate(priv->clk);
1812	if (!rate)
1813		return -EINVAL;
1814
1815	inc = div64_ul(1000000000ULL << 20, rate);
1816
1817	if (inc < GTI_TIV_MIN || inc > GTI_TIV_MAX) {
1818		dev_err(dev, "gti.tiv increment 0x%llx is outside the range 0x%x - 0x%x\n",
1819			inc, GTI_TIV_MIN, GTI_TIV_MAX);
1820		return -EINVAL;
1821	}
1822	priv->gti_tiv = inc;
1823
1824	return 0;
1825}
1826
1827/* Set tx and rx clock internal delay modes */
1828static void ravb_parse_delay_mode(struct device_node *np, struct net_device *ndev)
1829{
1830	struct ravb_private *priv = netdev_priv(ndev);
1831	bool explicit_delay = false;
1832	u32 delay;
1833
1834	if (!priv->info->internal_delay)
1835		return;
1836
1837	if (!of_property_read_u32(np, "rx-internal-delay-ps", &delay)) {
1838		/* Valid values are 0 and 1800, according to DT bindings */
1839		priv->rxcidm = !!delay;
1840		explicit_delay = true;
1841	}
1842	if (!of_property_read_u32(np, "tx-internal-delay-ps", &delay)) {
1843		/* Valid values are 0 and 2000, according to DT bindings */
1844		priv->txcidm = !!delay;
1845		explicit_delay = true;
1846	}
1847
1848	if (explicit_delay)
1849		return;
1850
1851	/* Fall back to legacy rgmii-*id behavior */
1852	if (priv->phy_interface == PHY_INTERFACE_MODE_RGMII_ID ||
1853	    priv->phy_interface == PHY_INTERFACE_MODE_RGMII_RXID) {
1854		priv->rxcidm = 1;
1855		priv->rgmii_override = 1;
1856	}
1857
1858	if (priv->phy_interface == PHY_INTERFACE_MODE_RGMII_ID ||
1859	    priv->phy_interface == PHY_INTERFACE_MODE_RGMII_TXID) {
1860		priv->txcidm = 1;
1861		priv->rgmii_override = 1;
1862	}
1863}
1864
1865static void ravb_set_delay_mode(struct net_device *ndev)
1866{
1867	struct ravb_private *priv = netdev_priv(ndev);
1868	u32 set = 0;
1869
1870	if (!priv->info->internal_delay)
1871		return;
1872
1873	if (priv->rxcidm)
1874		set |= APSR_RDM;
1875	if (priv->txcidm)
1876		set |= APSR_TDM;
1877	ravb_modify(ndev, APSR, APSR_RDM | APSR_TDM, set);
1878}
1879
1880/* Network device open function for Ethernet AVB */
1881static int ravb_open(struct net_device *ndev)
1882{
1883	struct ravb_private *priv = netdev_priv(ndev);
1884	const struct ravb_hw_info *info = priv->info;
1885	struct device *dev = &priv->pdev->dev;
 
1886	int error;
1887
1888	napi_enable(&priv->napi[RAVB_BE]);
1889	if (info->nc_queues)
1890		napi_enable(&priv->napi[RAVB_NC]);
1891
1892	error = pm_runtime_resume_and_get(dev);
1893	if (error < 0)
1894		goto out_napi_off;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1895
1896	/* Set AVB config mode */
1897	error = ravb_set_config_mode(ndev);
1898	if (error)
1899		goto out_rpm_put;
1900
1901	ravb_set_delay_mode(ndev);
1902	ravb_write(ndev, priv->desc_bat_dma, DBAT);
 
 
 
 
1903
1904	/* Device init */
1905	error = ravb_dmac_init(ndev);
1906	if (error)
1907		goto out_set_reset;
1908
1909	ravb_emac_init(ndev);
1910
1911	ravb_set_gti(ndev);
1912
1913	/* Initialise PTP Clock driver */
1914	if (info->gptp || info->ccc_gac)
1915		ravb_ptp_init(ndev, priv->pdev);
1916
1917	/* PHY control start */
1918	error = ravb_phy_start(ndev);
1919	if (error)
1920		goto out_ptp_stop;
1921
1922	netif_tx_start_all_queues(ndev);
1923
1924	return 0;
1925
1926out_ptp_stop:
1927	/* Stop PTP Clock driver */
1928	if (info->gptp || info->ccc_gac)
1929		ravb_ptp_stop(ndev);
1930	ravb_stop_dma(ndev);
1931out_set_reset:
1932	ravb_set_opmode(ndev, CCC_OPC_RESET);
1933out_rpm_put:
1934	pm_runtime_mark_last_busy(dev);
1935	pm_runtime_put_autosuspend(dev);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1936out_napi_off:
1937	if (info->nc_queues)
1938		napi_disable(&priv->napi[RAVB_NC]);
1939	napi_disable(&priv->napi[RAVB_BE]);
1940	return error;
1941}
1942
1943/* Timeout function for Ethernet AVB */
1944static void ravb_tx_timeout(struct net_device *ndev, unsigned int txqueue)
1945{
1946	struct ravb_private *priv = netdev_priv(ndev);
1947
1948	netif_err(priv, tx_err, ndev,
1949		  "transmit timed out, status %08x, resetting...\n",
1950		  ravb_read(ndev, ISS));
1951
1952	/* tx_errors count up */
1953	ndev->stats.tx_errors++;
1954
1955	schedule_work(&priv->work);
1956}
1957
1958static void ravb_tx_timeout_work(struct work_struct *work)
1959{
1960	struct ravb_private *priv = container_of(work, struct ravb_private,
1961						 work);
1962	const struct ravb_hw_info *info = priv->info;
1963	struct net_device *ndev = priv->ndev;
1964	int error;
1965
1966	if (!rtnl_trylock()) {
1967		usleep_range(1000, 2000);
1968		schedule_work(&priv->work);
1969		return;
1970	}
1971
1972	netif_tx_stop_all_queues(ndev);
1973
1974	/* Stop PTP Clock driver */
1975	if (info->gptp)
1976		ravb_ptp_stop(ndev);
1977
1978	/* Wait for DMA stopping */
1979	if (ravb_stop_dma(ndev)) {
1980		/* If ravb_stop_dma() fails, the hardware is still operating
1981		 * for TX and/or RX. So, this should not call the following
1982		 * functions because ravb_dmac_init() is possible to fail too.
1983		 * Also, this should not retry ravb_stop_dma() again and again
1984		 * here because it's possible to wait forever. So, this just
1985		 * re-enables the TX and RX and skip the following
1986		 * re-initialization procedure.
1987		 */
1988		ravb_rcv_snd_enable(ndev);
1989		goto out;
1990	}
1991
1992	ravb_ring_free(ndev, RAVB_BE);
1993	if (info->nc_queues)
1994		ravb_ring_free(ndev, RAVB_NC);
1995
1996	/* Device init */
1997	error = ravb_dmac_init(ndev);
1998	if (error) {
1999		/* If ravb_dmac_init() fails, descriptors are freed. So, this
2000		 * should return here to avoid re-enabling the TX and RX in
2001		 * ravb_emac_init().
2002		 */
2003		netdev_err(ndev, "%s: ravb_dmac_init() failed, error %d\n",
2004			   __func__, error);
2005		goto out_unlock;
2006	}
2007	ravb_emac_init(ndev);
2008
2009out:
2010	/* Initialise PTP Clock driver */
2011	if (info->gptp)
2012		ravb_ptp_init(ndev, priv->pdev);
2013
2014	netif_tx_start_all_queues(ndev);
2015
2016out_unlock:
2017	rtnl_unlock();
2018}
2019
2020static bool ravb_can_tx_csum_gbeth(struct sk_buff *skb)
2021{
2022	struct iphdr *ip = ip_hdr(skb);
2023
2024	/* TODO: Need to add support for VLAN tag 802.1Q */
2025	if (skb_vlan_tag_present(skb))
2026		return false;
2027
2028	/* TODO: Need to add hardware checksum for IPv6 */
2029	if (skb->protocol != htons(ETH_P_IP))
2030		return false;
2031
2032	switch (ip->protocol) {
2033	case IPPROTO_TCP:
2034		break;
2035	case IPPROTO_UDP:
2036		/* If the checksum value in the UDP header field is 0, TOE does
2037		 * not calculate checksum for UDP part of this frame as it is
2038		 * optional function as per standards.
2039		 */
2040		if (udp_hdr(skb)->check == 0)
2041			return false;
2042		break;
2043	default:
2044		return false;
2045	}
2046
2047	return true;
2048}
2049
2050/* Packet transmit function for Ethernet AVB */
2051static netdev_tx_t ravb_start_xmit(struct sk_buff *skb, struct net_device *ndev)
2052{
2053	struct ravb_private *priv = netdev_priv(ndev);
2054	const struct ravb_hw_info *info = priv->info;
2055	unsigned int num_tx_desc = priv->num_tx_desc;
2056	u16 q = skb_get_queue_mapping(skb);
2057	struct ravb_tstamp_skb *ts_skb;
2058	struct ravb_tx_desc *desc;
2059	unsigned long flags;
2060	dma_addr_t dma_addr;
2061	void *buffer;
2062	u32 entry;
2063	u32 len;
2064
2065	if (skb->ip_summed == CHECKSUM_PARTIAL && !ravb_can_tx_csum_gbeth(skb))
2066		skb_checksum_help(skb);
2067
2068	spin_lock_irqsave(&priv->lock, flags);
2069	if (priv->cur_tx[q] - priv->dirty_tx[q] > (priv->num_tx_ring[q] - 1) *
2070	    num_tx_desc) {
2071		netif_err(priv, tx_queued, ndev,
2072			  "still transmitting with the full ring!\n");
2073		netif_stop_subqueue(ndev, q);
2074		spin_unlock_irqrestore(&priv->lock, flags);
2075		return NETDEV_TX_BUSY;
2076	}
2077
2078	if (skb_put_padto(skb, ETH_ZLEN))
2079		goto exit;
2080
2081	entry = priv->cur_tx[q] % (priv->num_tx_ring[q] * num_tx_desc);
2082	priv->tx_skb[q][entry / num_tx_desc] = skb;
2083
2084	if (num_tx_desc > 1) {
2085		buffer = PTR_ALIGN(priv->tx_align[q], DPTR_ALIGN) +
2086			 entry / num_tx_desc * DPTR_ALIGN;
2087		len = PTR_ALIGN(skb->data, DPTR_ALIGN) - skb->data;
2088
2089		/* Zero length DMA descriptors are problematic as they seem
2090		 * to terminate DMA transfers. Avoid them by simply using a
2091		 * length of DPTR_ALIGN (4) when skb data is aligned to
2092		 * DPTR_ALIGN.
2093		 *
2094		 * As skb is guaranteed to have at least ETH_ZLEN (60)
2095		 * bytes of data by the call to skb_put_padto() above this
2096		 * is safe with respect to both the length of the first DMA
2097		 * descriptor (len) overflowing the available data and the
2098		 * length of the second DMA descriptor (skb->len - len)
2099		 * being negative.
2100		 */
2101		if (len == 0)
2102			len = DPTR_ALIGN;
2103
2104		memcpy(buffer, skb->data, len);
2105		dma_addr = dma_map_single(ndev->dev.parent, buffer, len,
2106					  DMA_TO_DEVICE);
2107		if (dma_mapping_error(ndev->dev.parent, dma_addr))
2108			goto drop;
2109
2110		desc = &priv->tx_ring[q][entry];
2111		desc->ds_tagl = cpu_to_le16(len);
2112		desc->dptr = cpu_to_le32(dma_addr);
2113
2114		buffer = skb->data + len;
2115		len = skb->len - len;
2116		dma_addr = dma_map_single(ndev->dev.parent, buffer, len,
2117					  DMA_TO_DEVICE);
2118		if (dma_mapping_error(ndev->dev.parent, dma_addr))
2119			goto unmap;
2120
2121		desc++;
2122	} else {
2123		desc = &priv->tx_ring[q][entry];
2124		len = skb->len;
2125		dma_addr = dma_map_single(ndev->dev.parent, skb->data, skb->len,
2126					  DMA_TO_DEVICE);
2127		if (dma_mapping_error(ndev->dev.parent, dma_addr))
2128			goto drop;
2129	}
2130	desc->ds_tagl = cpu_to_le16(len);
2131	desc->dptr = cpu_to_le32(dma_addr);
2132
2133	/* TX timestamp required */
2134	if (info->gptp || info->ccc_gac) {
2135		if (q == RAVB_NC) {
2136			ts_skb = kmalloc(sizeof(*ts_skb), GFP_ATOMIC);
2137			if (!ts_skb) {
2138				if (num_tx_desc > 1) {
2139					desc--;
2140					dma_unmap_single(ndev->dev.parent, dma_addr,
2141							 len, DMA_TO_DEVICE);
2142				}
2143				goto unmap;
2144			}
2145			ts_skb->skb = skb_get(skb);
2146			ts_skb->tag = priv->ts_skb_tag++;
2147			priv->ts_skb_tag &= 0x3ff;
2148			list_add_tail(&ts_skb->list, &priv->ts_skb_list);
2149
2150			/* TAG and timestamp required flag */
2151			skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
2152			desc->tagh_tsr = (ts_skb->tag >> 4) | TX_TSR;
2153			desc->ds_tagl |= cpu_to_le16(ts_skb->tag << 12);
2154		}
2155
2156		skb_tx_timestamp(skb);
2157	}
2158	/* Descriptor type must be set after all the above writes */
2159	dma_wmb();
2160	if (num_tx_desc > 1) {
2161		desc->die_dt = DT_FEND;
2162		desc--;
2163		desc->die_dt = DT_FSTART;
2164	} else {
2165		desc->die_dt = DT_FSINGLE;
2166	}
2167	ravb_modify(ndev, TCCR, TCCR_TSRQ0 << q, TCCR_TSRQ0 << q);
2168
2169	priv->cur_tx[q] += num_tx_desc;
2170	if (priv->cur_tx[q] - priv->dirty_tx[q] >
2171	    (priv->num_tx_ring[q] - 1) * num_tx_desc &&
2172	    !ravb_tx_free(ndev, q, true))
2173		netif_stop_subqueue(ndev, q);
2174
2175exit:
2176	spin_unlock_irqrestore(&priv->lock, flags);
2177	return NETDEV_TX_OK;
2178
2179unmap:
2180	dma_unmap_single(ndev->dev.parent, le32_to_cpu(desc->dptr),
2181			 le16_to_cpu(desc->ds_tagl), DMA_TO_DEVICE);
2182drop:
2183	dev_kfree_skb_any(skb);
2184	priv->tx_skb[q][entry / num_tx_desc] = NULL;
2185	goto exit;
2186}
2187
2188static u16 ravb_select_queue(struct net_device *ndev, struct sk_buff *skb,
2189			     struct net_device *sb_dev)
2190{
2191	/* If skb needs TX timestamp, it is handled in network control queue */
2192	return (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) ? RAVB_NC :
2193							       RAVB_BE;
2194
2195}
2196
2197static struct net_device_stats *ravb_get_stats(struct net_device *ndev)
2198{
2199	struct ravb_private *priv = netdev_priv(ndev);
2200	const struct ravb_hw_info *info = priv->info;
2201	struct net_device_stats *nstats, *stats0, *stats1;
2202	struct device *dev = &priv->pdev->dev;
2203
2204	nstats = &ndev->stats;
2205
2206	pm_runtime_get_noresume(dev);
2207
2208	if (!pm_runtime_active(dev))
2209		goto out_rpm_put;
2210
2211	stats0 = &priv->stats[RAVB_BE];
2212
2213	if (info->tx_counters) {
2214		nstats->tx_dropped += ravb_read(ndev, TROCR);
2215		ravb_write(ndev, 0, TROCR);	/* (write clear) */
2216	}
2217
2218	if (info->carrier_counters) {
2219		nstats->collisions += ravb_read(ndev, CXR41);
2220		ravb_write(ndev, 0, CXR41);	/* (write clear) */
2221		nstats->tx_carrier_errors += ravb_read(ndev, CXR42);
2222		ravb_write(ndev, 0, CXR42);	/* (write clear) */
2223	}
2224
2225	nstats->rx_packets = stats0->rx_packets;
2226	nstats->tx_packets = stats0->tx_packets;
2227	nstats->rx_bytes = stats0->rx_bytes;
2228	nstats->tx_bytes = stats0->tx_bytes;
2229	nstats->multicast = stats0->multicast;
2230	nstats->rx_errors = stats0->rx_errors;
2231	nstats->rx_crc_errors = stats0->rx_crc_errors;
2232	nstats->rx_frame_errors = stats0->rx_frame_errors;
2233	nstats->rx_length_errors = stats0->rx_length_errors;
2234	nstats->rx_missed_errors = stats0->rx_missed_errors;
2235	nstats->rx_over_errors = stats0->rx_over_errors;
2236	if (info->nc_queues) {
2237		stats1 = &priv->stats[RAVB_NC];
2238
2239		nstats->rx_packets += stats1->rx_packets;
2240		nstats->tx_packets += stats1->tx_packets;
2241		nstats->rx_bytes += stats1->rx_bytes;
2242		nstats->tx_bytes += stats1->tx_bytes;
2243		nstats->multicast += stats1->multicast;
2244		nstats->rx_errors += stats1->rx_errors;
2245		nstats->rx_crc_errors += stats1->rx_crc_errors;
2246		nstats->rx_frame_errors += stats1->rx_frame_errors;
2247		nstats->rx_length_errors += stats1->rx_length_errors;
2248		nstats->rx_missed_errors += stats1->rx_missed_errors;
2249		nstats->rx_over_errors += stats1->rx_over_errors;
2250	}
2251
2252out_rpm_put:
2253	pm_runtime_put_noidle(dev);
2254	return nstats;
2255}
2256
2257/* Update promiscuous bit */
2258static void ravb_set_rx_mode(struct net_device *ndev)
2259{
2260	struct ravb_private *priv = netdev_priv(ndev);
2261	unsigned long flags;
2262
2263	spin_lock_irqsave(&priv->lock, flags);
2264	ravb_modify(ndev, ECMR, ECMR_PRM,
2265		    ndev->flags & IFF_PROMISC ? ECMR_PRM : 0);
2266	spin_unlock_irqrestore(&priv->lock, flags);
2267}
2268
2269/* Device close function for Ethernet AVB */
2270static int ravb_close(struct net_device *ndev)
2271{
2272	struct device_node *np = ndev->dev.parent->of_node;
2273	struct ravb_private *priv = netdev_priv(ndev);
2274	const struct ravb_hw_info *info = priv->info;
2275	struct ravb_tstamp_skb *ts_skb, *ts_skb2;
2276	struct device *dev = &priv->pdev->dev;
2277	int error;
2278
2279	netif_tx_stop_all_queues(ndev);
2280
2281	/* Disable interrupts by clearing the interrupt masks. */
2282	ravb_write(ndev, 0, RIC0);
2283	ravb_write(ndev, 0, RIC2);
2284	ravb_write(ndev, 0, TIC);
2285
2286	/* PHY disconnect */
2287	if (ndev->phydev) {
2288		phy_stop(ndev->phydev);
2289		phy_disconnect(ndev->phydev);
2290		if (of_phy_is_fixed_link(np))
2291			of_phy_deregister_fixed_link(np);
2292	}
2293
2294	/* Stop PTP Clock driver */
2295	if (info->gptp || info->ccc_gac)
2296		ravb_ptp_stop(ndev);
2297
2298	/* Set the config mode to stop the AVB-DMAC's processes */
2299	if (ravb_stop_dma(ndev) < 0)
2300		netdev_err(ndev,
2301			   "device will be stopped after h/w processes are done.\n");
2302
2303	/* Clear the timestamp list */
2304	if (info->gptp || info->ccc_gac) {
2305		list_for_each_entry_safe(ts_skb, ts_skb2, &priv->ts_skb_list, list) {
2306			list_del(&ts_skb->list);
2307			kfree_skb(ts_skb->skb);
2308			kfree(ts_skb);
2309		}
2310	}
2311
 
 
 
 
 
 
 
 
2312	cancel_work_sync(&priv->work);
2313
 
 
 
 
 
 
 
 
 
 
 
 
 
2314	if (info->nc_queues)
2315		napi_disable(&priv->napi[RAVB_NC]);
2316	napi_disable(&priv->napi[RAVB_BE]);
2317
2318	/* Free all the skb's in the RX queue and the DMA buffers. */
2319	ravb_ring_free(ndev, RAVB_BE);
2320	if (info->nc_queues)
2321		ravb_ring_free(ndev, RAVB_NC);
2322
2323	/* Update statistics. */
2324	ravb_get_stats(ndev);
2325
2326	/* Set reset mode. */
2327	error = ravb_set_opmode(ndev, CCC_OPC_RESET);
2328	if (error)
2329		return error;
2330
2331	pm_runtime_mark_last_busy(dev);
2332	pm_runtime_put_autosuspend(dev);
2333
2334	return 0;
2335}
2336
2337static int ravb_hwtstamp_get(struct net_device *ndev, struct ifreq *req)
2338{
2339	struct ravb_private *priv = netdev_priv(ndev);
2340	struct hwtstamp_config config;
2341
2342	config.flags = 0;
2343	config.tx_type = priv->tstamp_tx_ctrl ? HWTSTAMP_TX_ON :
2344						HWTSTAMP_TX_OFF;
2345	switch (priv->tstamp_rx_ctrl & RAVB_RXTSTAMP_TYPE) {
2346	case RAVB_RXTSTAMP_TYPE_V2_L2_EVENT:
2347		config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_EVENT;
2348		break;
2349	case RAVB_RXTSTAMP_TYPE_ALL:
2350		config.rx_filter = HWTSTAMP_FILTER_ALL;
2351		break;
2352	default:
2353		config.rx_filter = HWTSTAMP_FILTER_NONE;
2354	}
2355
2356	return copy_to_user(req->ifr_data, &config, sizeof(config)) ?
2357		-EFAULT : 0;
2358}
2359
2360/* Control hardware time stamping */
2361static int ravb_hwtstamp_set(struct net_device *ndev, struct ifreq *req)
2362{
2363	struct ravb_private *priv = netdev_priv(ndev);
2364	struct hwtstamp_config config;
2365	u32 tstamp_rx_ctrl = RAVB_RXTSTAMP_ENABLED;
2366	u32 tstamp_tx_ctrl;
2367
2368	if (copy_from_user(&config, req->ifr_data, sizeof(config)))
2369		return -EFAULT;
2370
2371	switch (config.tx_type) {
2372	case HWTSTAMP_TX_OFF:
2373		tstamp_tx_ctrl = 0;
2374		break;
2375	case HWTSTAMP_TX_ON:
2376		tstamp_tx_ctrl = RAVB_TXTSTAMP_ENABLED;
2377		break;
2378	default:
2379		return -ERANGE;
2380	}
2381
2382	switch (config.rx_filter) {
2383	case HWTSTAMP_FILTER_NONE:
2384		tstamp_rx_ctrl = 0;
2385		break;
2386	case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
2387		tstamp_rx_ctrl |= RAVB_RXTSTAMP_TYPE_V2_L2_EVENT;
2388		break;
2389	default:
2390		config.rx_filter = HWTSTAMP_FILTER_ALL;
2391		tstamp_rx_ctrl |= RAVB_RXTSTAMP_TYPE_ALL;
2392	}
2393
2394	priv->tstamp_tx_ctrl = tstamp_tx_ctrl;
2395	priv->tstamp_rx_ctrl = tstamp_rx_ctrl;
2396
2397	return copy_to_user(req->ifr_data, &config, sizeof(config)) ?
2398		-EFAULT : 0;
2399}
2400
2401/* ioctl to device function */
2402static int ravb_do_ioctl(struct net_device *ndev, struct ifreq *req, int cmd)
2403{
2404	struct phy_device *phydev = ndev->phydev;
2405
2406	if (!netif_running(ndev))
2407		return -EINVAL;
2408
2409	if (!phydev)
2410		return -ENODEV;
2411
2412	switch (cmd) {
2413	case SIOCGHWTSTAMP:
2414		return ravb_hwtstamp_get(ndev, req);
2415	case SIOCSHWTSTAMP:
2416		return ravb_hwtstamp_set(ndev, req);
2417	}
2418
2419	return phy_mii_ioctl(phydev, req, cmd);
2420}
2421
2422static int ravb_change_mtu(struct net_device *ndev, int new_mtu)
2423{
2424	struct ravb_private *priv = netdev_priv(ndev);
2425
2426	ndev->mtu = new_mtu;
2427
2428	if (netif_running(ndev)) {
2429		synchronize_irq(priv->emac_irq);
2430		ravb_emac_init(ndev);
2431	}
2432
2433	netdev_update_features(ndev);
2434
2435	return 0;
2436}
2437
2438static void ravb_set_rx_csum(struct net_device *ndev, bool enable)
2439{
2440	struct ravb_private *priv = netdev_priv(ndev);
2441	unsigned long flags;
2442
2443	spin_lock_irqsave(&priv->lock, flags);
2444
2445	/* Disable TX and RX */
2446	ravb_rcv_snd_disable(ndev);
2447
2448	/* Modify RX Checksum setting */
2449	ravb_modify(ndev, ECMR, ECMR_RCSC, enable ? ECMR_RCSC : 0);
2450
2451	/* Enable TX and RX */
2452	ravb_rcv_snd_enable(ndev);
2453
2454	spin_unlock_irqrestore(&priv->lock, flags);
2455}
2456
2457static int ravb_endisable_csum_gbeth(struct net_device *ndev, enum ravb_reg reg,
2458				     u32 val, u32 mask)
2459{
2460	u32 csr0 = CSR0_TPE | CSR0_RPE;
2461	int ret;
2462
2463	ravb_write(ndev, csr0 & ~mask, CSR0);
2464	ret = ravb_wait(ndev, CSR0, mask, 0);
2465	if (!ret)
2466		ravb_write(ndev, val, reg);
2467
2468	ravb_write(ndev, csr0, CSR0);
2469
2470	return ret;
2471}
2472
2473static int ravb_set_features_gbeth(struct net_device *ndev,
2474				   netdev_features_t features)
2475{
2476	netdev_features_t changed = ndev->features ^ features;
2477	struct ravb_private *priv = netdev_priv(ndev);
2478	unsigned long flags;
2479	int ret = 0;
2480	u32 val;
2481
2482	spin_lock_irqsave(&priv->lock, flags);
2483	if (changed & NETIF_F_RXCSUM) {
2484		if (features & NETIF_F_RXCSUM)
2485			val = CSR2_RIP4 | CSR2_RTCP4 | CSR2_RUDP4 | CSR2_RICMP4;
2486		else
2487			val = 0;
2488
2489		ret = ravb_endisable_csum_gbeth(ndev, CSR2, val, CSR0_RPE);
2490		if (ret)
2491			goto done;
2492	}
2493
2494	if (changed & NETIF_F_HW_CSUM) {
2495		if (features & NETIF_F_HW_CSUM)
2496			val = CSR1_TIP4 | CSR1_TTCP4 | CSR1_TUDP4;
2497		else
2498			val = 0;
2499
2500		ret = ravb_endisable_csum_gbeth(ndev, CSR1, val, CSR0_TPE);
2501		if (ret)
2502			goto done;
2503	}
2504
2505done:
2506	spin_unlock_irqrestore(&priv->lock, flags);
2507
2508	return ret;
2509}
2510
2511static int ravb_set_features_rcar(struct net_device *ndev,
2512				  netdev_features_t features)
2513{
2514	netdev_features_t changed = ndev->features ^ features;
2515
2516	if (changed & NETIF_F_RXCSUM)
2517		ravb_set_rx_csum(ndev, features & NETIF_F_RXCSUM);
2518
 
 
2519	return 0;
2520}
2521
2522static int ravb_set_features(struct net_device *ndev,
2523			     netdev_features_t features)
2524{
2525	struct ravb_private *priv = netdev_priv(ndev);
2526	const struct ravb_hw_info *info = priv->info;
2527	struct device *dev = &priv->pdev->dev;
2528	int ret;
2529
2530	pm_runtime_get_noresume(dev);
2531
2532	if (pm_runtime_active(dev))
2533		ret = info->set_feature(ndev, features);
2534	else
2535		ret = 0;
2536
2537	pm_runtime_put_noidle(dev);
2538
2539	if (ret)
2540		return ret;
2541
2542	ndev->features = features;
2543
2544	return 0;
2545}
2546
2547static const struct net_device_ops ravb_netdev_ops = {
2548	.ndo_open		= ravb_open,
2549	.ndo_stop		= ravb_close,
2550	.ndo_start_xmit		= ravb_start_xmit,
2551	.ndo_select_queue	= ravb_select_queue,
2552	.ndo_get_stats		= ravb_get_stats,
2553	.ndo_set_rx_mode	= ravb_set_rx_mode,
2554	.ndo_tx_timeout		= ravb_tx_timeout,
2555	.ndo_eth_ioctl		= ravb_do_ioctl,
2556	.ndo_change_mtu		= ravb_change_mtu,
2557	.ndo_validate_addr	= eth_validate_addr,
2558	.ndo_set_mac_address	= eth_mac_addr,
2559	.ndo_set_features	= ravb_set_features,
2560};
2561
2562/* MDIO bus init function */
2563static int ravb_mdio_init(struct ravb_private *priv)
2564{
2565	struct platform_device *pdev = priv->pdev;
2566	struct device *dev = &pdev->dev;
2567	struct phy_device *phydev;
2568	struct device_node *pn;
2569	int error;
2570
2571	/* Bitbang init */
2572	priv->mdiobb.ops = &bb_ops;
2573
2574	/* MII controller setting */
2575	priv->mii_bus = alloc_mdio_bitbang(&priv->mdiobb);
2576	if (!priv->mii_bus)
2577		return -ENOMEM;
2578
2579	/* Hook up MII support for ethtool */
2580	priv->mii_bus->name = "ravb_mii";
2581	priv->mii_bus->parent = dev;
2582	snprintf(priv->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x",
2583		 pdev->name, pdev->id);
2584
2585	/* Register MDIO bus */
2586	error = of_mdiobus_register(priv->mii_bus, dev->of_node);
2587	if (error)
2588		goto out_free_bus;
2589
2590	pn = of_parse_phandle(dev->of_node, "phy-handle", 0);
2591	phydev = of_phy_find_device(pn);
2592	if (phydev) {
2593		phydev->mac_managed_pm = true;
2594		put_device(&phydev->mdio.dev);
2595	}
2596	of_node_put(pn);
2597
2598	return 0;
2599
2600out_free_bus:
2601	free_mdio_bitbang(priv->mii_bus);
2602	return error;
2603}
2604
2605/* MDIO bus release function */
2606static int ravb_mdio_release(struct ravb_private *priv)
2607{
2608	/* Unregister mdio bus */
2609	mdiobus_unregister(priv->mii_bus);
2610
2611	/* Free bitbang info */
2612	free_mdio_bitbang(priv->mii_bus);
2613
2614	return 0;
2615}
2616
2617static const struct ravb_hw_info ravb_gen3_hw_info = {
 
 
 
2618	.receive = ravb_rx_rcar,
2619	.set_rate = ravb_set_rate_rcar,
2620	.set_feature = ravb_set_features_rcar,
2621	.dmac_init = ravb_dmac_init_rcar,
2622	.emac_init = ravb_emac_init_rcar,
2623	.gstrings_stats = ravb_gstrings_stats,
2624	.gstrings_size = sizeof(ravb_gstrings_stats),
2625	.net_hw_features = NETIF_F_RXCSUM,
2626	.net_features = NETIF_F_RXCSUM,
2627	.stats_len = ARRAY_SIZE(ravb_gstrings_stats),
 
2628	.tccr_mask = TCCR_TSRQ0 | TCCR_TSRQ1 | TCCR_TSRQ2 | TCCR_TSRQ3,
2629	.rx_max_frame_size = SZ_2K,
2630	.rx_max_desc_use = SZ_2K - ETH_FCS_LEN + sizeof(__sum16),
2631	.rx_desc_size = sizeof(struct ravb_ex_rx_desc),
2632	.internal_delay = 1,
2633	.tx_counters = 1,
2634	.multi_irqs = 1,
2635	.irq_en_dis = 1,
2636	.ccc_gac = 1,
2637	.nc_queues = 1,
2638	.magic_pkt = 1,
2639};
2640
2641static const struct ravb_hw_info ravb_gen2_hw_info = {
 
 
 
2642	.receive = ravb_rx_rcar,
2643	.set_rate = ravb_set_rate_rcar,
2644	.set_feature = ravb_set_features_rcar,
2645	.dmac_init = ravb_dmac_init_rcar,
2646	.emac_init = ravb_emac_init_rcar,
2647	.gstrings_stats = ravb_gstrings_stats,
2648	.gstrings_size = sizeof(ravb_gstrings_stats),
2649	.net_hw_features = NETIF_F_RXCSUM,
2650	.net_features = NETIF_F_RXCSUM,
2651	.stats_len = ARRAY_SIZE(ravb_gstrings_stats),
 
2652	.tccr_mask = TCCR_TSRQ0 | TCCR_TSRQ1 | TCCR_TSRQ2 | TCCR_TSRQ3,
2653	.rx_max_frame_size = SZ_2K,
2654	.rx_max_desc_use = SZ_2K - ETH_FCS_LEN + sizeof(__sum16),
2655	.rx_desc_size = sizeof(struct ravb_ex_rx_desc),
2656	.aligned_tx = 1,
2657	.gptp = 1,
2658	.nc_queues = 1,
2659	.magic_pkt = 1,
2660};
2661
2662static const struct ravb_hw_info ravb_rzv2m_hw_info = {
 
 
 
2663	.receive = ravb_rx_rcar,
2664	.set_rate = ravb_set_rate_rcar,
2665	.set_feature = ravb_set_features_rcar,
2666	.dmac_init = ravb_dmac_init_rcar,
2667	.emac_init = ravb_emac_init_rcar,
2668	.gstrings_stats = ravb_gstrings_stats,
2669	.gstrings_size = sizeof(ravb_gstrings_stats),
2670	.net_hw_features = NETIF_F_RXCSUM,
2671	.net_features = NETIF_F_RXCSUM,
2672	.stats_len = ARRAY_SIZE(ravb_gstrings_stats),
 
2673	.tccr_mask = TCCR_TSRQ0 | TCCR_TSRQ1 | TCCR_TSRQ2 | TCCR_TSRQ3,
2674	.rx_max_frame_size = SZ_2K,
2675	.rx_max_desc_use = SZ_2K - ETH_FCS_LEN + sizeof(__sum16),
2676	.rx_desc_size = sizeof(struct ravb_ex_rx_desc),
2677	.multi_irqs = 1,
2678	.err_mgmt_irqs = 1,
2679	.gptp = 1,
2680	.gptp_ref_clk = 1,
2681	.nc_queues = 1,
2682	.magic_pkt = 1,
2683};
2684
2685static const struct ravb_hw_info gbeth_hw_info = {
 
 
 
2686	.receive = ravb_rx_gbeth,
2687	.set_rate = ravb_set_rate_gbeth,
2688	.set_feature = ravb_set_features_gbeth,
2689	.dmac_init = ravb_dmac_init_gbeth,
2690	.emac_init = ravb_emac_init_gbeth,
2691	.gstrings_stats = ravb_gstrings_stats_gbeth,
2692	.gstrings_size = sizeof(ravb_gstrings_stats_gbeth),
2693	.net_hw_features = NETIF_F_RXCSUM | NETIF_F_HW_CSUM,
2694	.net_features = NETIF_F_RXCSUM | NETIF_F_HW_CSUM,
2695	.stats_len = ARRAY_SIZE(ravb_gstrings_stats_gbeth),
 
2696	.tccr_mask = TCCR_TSRQ0,
2697	.rx_max_frame_size = SZ_8K,
2698	.rx_max_desc_use = 4080,
2699	.rx_desc_size = sizeof(struct ravb_rx_desc),
2700	.aligned_tx = 1,
2701	.tx_counters = 1,
2702	.carrier_counters = 1,
2703	.half_duplex = 1,
2704};
2705
2706static const struct of_device_id ravb_match_table[] = {
2707	{ .compatible = "renesas,etheravb-r8a7790", .data = &ravb_gen2_hw_info },
2708	{ .compatible = "renesas,etheravb-r8a7794", .data = &ravb_gen2_hw_info },
2709	{ .compatible = "renesas,etheravb-rcar-gen2", .data = &ravb_gen2_hw_info },
2710	{ .compatible = "renesas,etheravb-r8a7795", .data = &ravb_gen3_hw_info },
2711	{ .compatible = "renesas,etheravb-rcar-gen3", .data = &ravb_gen3_hw_info },
2712	{ .compatible = "renesas,etheravb-rcar-gen4", .data = &ravb_gen3_hw_info },
2713	{ .compatible = "renesas,etheravb-rzv2m", .data = &ravb_rzv2m_hw_info },
2714	{ .compatible = "renesas,rzg2l-gbeth", .data = &gbeth_hw_info },
2715	{ }
2716};
2717MODULE_DEVICE_TABLE(of, ravb_match_table);
2718
2719static int ravb_setup_irq(struct ravb_private *priv, const char *irq_name,
2720			  const char *ch, int *irq, irq_handler_t handler)
2721{
2722	struct platform_device *pdev = priv->pdev;
2723	struct net_device *ndev = priv->ndev;
2724	struct device *dev = &pdev->dev;
2725	const char *devname = dev_name(dev);
2726	unsigned long flags;
2727	int error, irq_num;
 
 
 
 
 
 
2728
2729	if (irq_name) {
2730		devname = devm_kasprintf(dev, GFP_KERNEL, "%s:%s", devname, ch);
2731		if (!devname)
2732			return -ENOMEM;
2733
2734		irq_num = platform_get_irq_byname(pdev, irq_name);
2735		flags = 0;
2736	} else {
2737		irq_num = platform_get_irq(pdev, 0);
2738		flags = IRQF_SHARED;
2739	}
2740	if (irq_num < 0)
2741		return irq_num;
2742
2743	if (irq)
2744		*irq = irq_num;
2745
2746	error = devm_request_irq(dev, irq_num, handler, flags, devname, ndev);
2747	if (error)
2748		netdev_err(ndev, "cannot request IRQ %s\n", devname);
2749
2750	return error;
2751}
2752
2753static int ravb_setup_irqs(struct ravb_private *priv)
2754{
 
2755	const struct ravb_hw_info *info = priv->info;
2756	struct net_device *ndev = priv->ndev;
2757	const char *irq_name, *emac_irq_name;
2758	int error;
2759
2760	if (!info->multi_irqs)
2761		return ravb_setup_irq(priv, NULL, NULL, &ndev->irq, ravb_interrupt);
2762
2763	if (info->err_mgmt_irqs) {
2764		irq_name = "dia";
2765		emac_irq_name = "line3";
 
 
2766	} else {
2767		irq_name = "ch22";
2768		emac_irq_name = "ch24";
2769	}
2770
2771	error = ravb_setup_irq(priv, irq_name, "ch22:multi", &ndev->irq, ravb_multi_interrupt);
2772	if (error)
2773		return error;
 
 
 
 
 
 
2774
2775	error = ravb_setup_irq(priv, emac_irq_name, "ch24:emac", &priv->emac_irq,
2776			       ravb_emac_interrupt);
2777	if (error)
2778		return error;
 
 
 
 
 
 
2779
2780	if (info->err_mgmt_irqs) {
2781		error = ravb_setup_irq(priv, "err_a", "err_a", NULL, ravb_multi_interrupt);
2782		if (error)
2783			return error;
2784
2785		error = ravb_setup_irq(priv, "mgmt_a", "mgmt_a", NULL, ravb_multi_interrupt);
2786		if (error)
2787			return error;
 
 
2788	}
2789
2790	error = ravb_setup_irq(priv, "ch0", "ch0:rx_be", NULL, ravb_be_interrupt);
2791	if (error)
2792		return error;
 
 
 
2793
2794	error = ravb_setup_irq(priv, "ch1", "ch1:rx_nc", NULL, ravb_nc_interrupt);
2795	if (error)
2796		return error;
 
2797
2798	error = ravb_setup_irq(priv, "ch18", "ch18:tx_be", NULL, ravb_be_interrupt);
2799	if (error)
2800		return error;
2801
2802	return ravb_setup_irq(priv, "ch19", "ch19:tx_nc", NULL, ravb_nc_interrupt);
2803}
2804
2805static int ravb_probe(struct platform_device *pdev)
2806{
2807	struct device_node *np = pdev->dev.of_node;
2808	const struct ravb_hw_info *info;
2809	struct reset_control *rstc;
2810	struct ravb_private *priv;
2811	struct net_device *ndev;
 
2812	struct resource *res;
2813	int error, q;
2814
2815	if (!np) {
2816		dev_err(&pdev->dev,
2817			"this driver is required to be instantiated from device tree\n");
2818		return -EINVAL;
2819	}
2820
2821	rstc = devm_reset_control_get_exclusive(&pdev->dev, NULL);
2822	if (IS_ERR(rstc))
2823		return dev_err_probe(&pdev->dev, PTR_ERR(rstc),
2824				     "failed to get cpg reset\n");
2825
2826	ndev = alloc_etherdev_mqs(sizeof(struct ravb_private),
2827				  NUM_TX_QUEUE, NUM_RX_QUEUE);
2828	if (!ndev)
2829		return -ENOMEM;
2830
2831	info = of_device_get_match_data(&pdev->dev);
2832
2833	ndev->features = info->net_features;
2834	ndev->hw_features = info->net_hw_features;
2835
2836	error = reset_control_deassert(rstc);
2837	if (error)
2838		goto out_free_netdev;
2839
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2840	SET_NETDEV_DEV(ndev, &pdev->dev);
2841
2842	priv = netdev_priv(ndev);
2843	priv->info = info;
2844	priv->rstc = rstc;
2845	priv->ndev = ndev;
2846	priv->pdev = pdev;
2847	priv->num_tx_ring[RAVB_BE] = BE_TX_RING_SIZE;
2848	priv->num_rx_ring[RAVB_BE] = BE_RX_RING_SIZE;
2849	if (info->nc_queues) {
2850		priv->num_tx_ring[RAVB_NC] = NC_TX_RING_SIZE;
2851		priv->num_rx_ring[RAVB_NC] = NC_RX_RING_SIZE;
2852	}
2853
2854	error = ravb_setup_irqs(priv);
2855	if (error)
2856		goto out_reset_assert;
2857
2858	priv->clk = devm_clk_get(&pdev->dev, NULL);
2859	if (IS_ERR(priv->clk)) {
2860		error = PTR_ERR(priv->clk);
2861		goto out_reset_assert;
2862	}
2863
2864	if (info->gptp_ref_clk) {
2865		priv->gptp_clk = devm_clk_get(&pdev->dev, "gptp");
2866		if (IS_ERR(priv->gptp_clk)) {
2867			error = PTR_ERR(priv->gptp_clk);
2868			goto out_reset_assert;
2869		}
2870	}
2871
2872	priv->refclk = devm_clk_get_optional(&pdev->dev, "refclk");
2873	if (IS_ERR(priv->refclk)) {
2874		error = PTR_ERR(priv->refclk);
2875		goto out_reset_assert;
2876	}
2877	clk_prepare(priv->refclk);
2878
2879	platform_set_drvdata(pdev, ndev);
2880	pm_runtime_set_autosuspend_delay(&pdev->dev, 100);
2881	pm_runtime_use_autosuspend(&pdev->dev);
2882	pm_runtime_enable(&pdev->dev);
2883	error = pm_runtime_resume_and_get(&pdev->dev);
2884	if (error < 0)
2885		goto out_rpm_disable;
2886
2887	priv->addr = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
2888	if (IS_ERR(priv->addr)) {
2889		error = PTR_ERR(priv->addr);
2890		goto out_rpm_put;
2891	}
2892
2893	/* The Ether-specific entries in the device structure. */
2894	ndev->base_addr = res->start;
2895
2896	spin_lock_init(&priv->lock);
2897	INIT_WORK(&priv->work, ravb_tx_timeout_work);
2898
2899	error = of_get_phy_mode(np, &priv->phy_interface);
2900	if (error && error != -ENODEV)
2901		goto out_rpm_put;
2902
2903	priv->no_avb_link = of_property_read_bool(np, "renesas,no-ether-link");
2904	priv->avb_link_active_low =
2905		of_property_read_bool(np, "renesas,ether-link-active-low");
2906
2907	ndev->max_mtu = info->rx_max_frame_size -
2908		(ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2909	ndev->min_mtu = ETH_MIN_MTU;
2910
2911	/* FIXME: R-Car Gen2 has 4byte alignment restriction for tx buffer
2912	 * Use two descriptor to handle such situation. First descriptor to
2913	 * handle aligned data buffer and second descriptor to handle the
2914	 * overflow data because of alignment.
2915	 */
2916	priv->num_tx_desc = info->aligned_tx ? 2 : 1;
2917
2918	/* Set function */
2919	ndev->netdev_ops = &ravb_netdev_ops;
2920	ndev->ethtool_ops = &ravb_ethtool_ops;
2921
2922	error = ravb_compute_gti(ndev);
 
2923	if (error)
2924		goto out_rpm_put;
 
 
 
 
 
 
2925
2926	ravb_parse_delay_mode(np, ndev);
 
 
 
 
 
 
 
2927
2928	/* Allocate descriptor base address table */
2929	priv->desc_bat_size = sizeof(struct ravb_desc) * DBAT_ENTRY_NUM;
2930	priv->desc_bat = dma_alloc_coherent(ndev->dev.parent, priv->desc_bat_size,
2931					    &priv->desc_bat_dma, GFP_KERNEL);
2932	if (!priv->desc_bat) {
2933		dev_err(&pdev->dev,
2934			"Cannot allocate desc base address table (size %d bytes)\n",
2935			priv->desc_bat_size);
2936		error = -ENOMEM;
2937		goto out_rpm_put;
2938	}
2939	for (q = RAVB_BE; q < DBAT_ENTRY_NUM; q++)
2940		priv->desc_bat[q].die_dt = DT_EOS;
 
2941
2942	/* Initialise HW timestamp list */
2943	INIT_LIST_HEAD(&priv->ts_skb_list);
2944
 
 
 
 
2945	/* Debug message level */
2946	priv->msg_enable = RAVB_DEF_MSG_ENABLE;
2947
2948	/* Set config mode as this is needed for PHY initialization. */
2949	error = ravb_set_opmode(ndev, CCC_OPC_CONFIG);
2950	if (error)
2951		goto out_rpm_put;
2952
2953	/* Read and set MAC address */
2954	ravb_read_mac_address(np, ndev);
2955	if (!is_valid_ether_addr(ndev->dev_addr)) {
2956		dev_warn(&pdev->dev,
2957			 "no valid MAC address supplied, using a random one\n");
2958		eth_hw_addr_random(ndev);
2959	}
2960
2961	/* MDIO bus init */
2962	error = ravb_mdio_init(priv);
2963	if (error) {
2964		dev_err(&pdev->dev, "failed to initialize MDIO\n");
2965		goto out_reset_mode;
2966	}
2967
2968	/* Undo previous switch to config opmode. */
2969	error = ravb_set_opmode(ndev, CCC_OPC_RESET);
2970	if (error)
2971		goto out_mdio_release;
2972
2973	netif_napi_add(ndev, &priv->napi[RAVB_BE], ravb_poll);
2974	if (info->nc_queues)
2975		netif_napi_add(ndev, &priv->napi[RAVB_NC], ravb_poll);
2976
2977	/* Network device register */
2978	error = register_netdev(ndev);
2979	if (error)
2980		goto out_napi_del;
2981
2982	device_set_wakeup_capable(&pdev->dev, 1);
2983
2984	/* Print device information */
2985	netdev_info(ndev, "Base address at %#x, %pM, IRQ %d.\n",
2986		    (u32)ndev->base_addr, ndev->dev_addr, ndev->irq);
2987
2988	pm_runtime_mark_last_busy(&pdev->dev);
2989	pm_runtime_put_autosuspend(&pdev->dev);
2990
2991	return 0;
2992
2993out_napi_del:
2994	if (info->nc_queues)
2995		netif_napi_del(&priv->napi[RAVB_NC]);
2996
2997	netif_napi_del(&priv->napi[RAVB_BE]);
2998out_mdio_release:
2999	ravb_mdio_release(priv);
3000out_reset_mode:
3001	ravb_set_opmode(ndev, CCC_OPC_RESET);
3002	dma_free_coherent(ndev->dev.parent, priv->desc_bat_size, priv->desc_bat,
3003			  priv->desc_bat_dma);
3004out_rpm_put:
 
 
 
 
 
 
 
 
3005	pm_runtime_put(&pdev->dev);
3006out_rpm_disable:
3007	pm_runtime_disable(&pdev->dev);
3008	pm_runtime_dont_use_autosuspend(&pdev->dev);
3009	clk_unprepare(priv->refclk);
3010out_reset_assert:
3011	reset_control_assert(rstc);
3012out_free_netdev:
3013	free_netdev(ndev);
3014	return error;
3015}
3016
3017static void ravb_remove(struct platform_device *pdev)
3018{
3019	struct net_device *ndev = platform_get_drvdata(pdev);
3020	struct ravb_private *priv = netdev_priv(ndev);
3021	const struct ravb_hw_info *info = priv->info;
3022	struct device *dev = &priv->pdev->dev;
3023	int error;
3024
3025	error = pm_runtime_resume_and_get(dev);
3026	if (error < 0)
3027		return;
3028
3029	unregister_netdev(ndev);
3030	if (info->nc_queues)
3031		netif_napi_del(&priv->napi[RAVB_NC]);
3032	netif_napi_del(&priv->napi[RAVB_BE]);
3033
3034	ravb_mdio_release(priv);
3035
 
 
 
 
3036	dma_free_coherent(ndev->dev.parent, priv->desc_bat_size, priv->desc_bat,
3037			  priv->desc_bat_dma);
3038
3039	pm_runtime_put_sync_suspend(&pdev->dev);
 
 
 
 
 
3040	pm_runtime_disable(&pdev->dev);
3041	pm_runtime_dont_use_autosuspend(dev);
3042	clk_unprepare(priv->refclk);
3043	reset_control_assert(priv->rstc);
3044	free_netdev(ndev);
3045	platform_set_drvdata(pdev, NULL);
3046}
3047
3048static int ravb_wol_setup(struct net_device *ndev)
3049{
3050	struct ravb_private *priv = netdev_priv(ndev);
3051	const struct ravb_hw_info *info = priv->info;
3052
3053	/* Disable interrupts by clearing the interrupt masks. */
3054	ravb_write(ndev, 0, RIC0);
3055	ravb_write(ndev, 0, RIC2);
3056	ravb_write(ndev, 0, TIC);
3057
3058	/* Only allow ECI interrupts */
3059	synchronize_irq(priv->emac_irq);
3060	if (info->nc_queues)
3061		napi_disable(&priv->napi[RAVB_NC]);
3062	napi_disable(&priv->napi[RAVB_BE]);
3063	ravb_write(ndev, ECSIPR_MPDIP, ECSIPR);
3064
3065	/* Enable MagicPacket */
3066	ravb_modify(ndev, ECMR, ECMR_MPDE, ECMR_MPDE);
3067
3068	if (priv->info->ccc_gac)
3069		ravb_ptp_stop(ndev);
3070
3071	return enable_irq_wake(priv->emac_irq);
3072}
3073
3074static int ravb_wol_restore(struct net_device *ndev)
3075{
3076	struct ravb_private *priv = netdev_priv(ndev);
3077	const struct ravb_hw_info *info = priv->info;
3078	int error;
3079
3080	/* Set reset mode to rearm the WoL logic. */
3081	error = ravb_set_opmode(ndev, CCC_OPC_RESET);
3082	if (error)
3083		return error;
3084
3085	/* Set AVB config mode. */
3086	error = ravb_set_config_mode(ndev);
3087	if (error)
3088		return error;
3089
3090	if (priv->info->ccc_gac)
3091		ravb_ptp_init(ndev, priv->pdev);
3092
3093	if (info->nc_queues)
3094		napi_enable(&priv->napi[RAVB_NC]);
3095	napi_enable(&priv->napi[RAVB_BE]);
3096
3097	/* Disable MagicPacket */
3098	ravb_modify(ndev, ECMR, ECMR_MPDE, 0);
3099
3100	ravb_close(ndev);
3101
3102	return disable_irq_wake(priv->emac_irq);
3103}
3104
3105static int ravb_suspend(struct device *dev)
3106{
3107	struct net_device *ndev = dev_get_drvdata(dev);
3108	struct ravb_private *priv = netdev_priv(ndev);
3109	int ret;
3110
3111	if (!netif_running(ndev))
3112		goto reset_assert;
3113
3114	netif_device_detach(ndev);
3115
3116	if (priv->wol_enabled)
3117		return ravb_wol_setup(ndev);
 
 
3118
3119	ret = ravb_close(ndev);
3120	if (ret)
3121		return ret;
3122
3123	ret = pm_runtime_force_suspend(&priv->pdev->dev);
3124	if (ret)
3125		return ret;
3126
3127reset_assert:
3128	return reset_control_assert(priv->rstc);
3129}
3130
3131static int ravb_resume(struct device *dev)
3132{
3133	struct net_device *ndev = dev_get_drvdata(dev);
3134	struct ravb_private *priv = netdev_priv(ndev);
3135	int ret;
 
 
 
 
 
 
 
 
 
 
 
 
 
3136
3137	ret = reset_control_deassert(priv->rstc);
 
3138	if (ret)
3139		return ret;
3140
3141	if (!netif_running(ndev))
3142		return 0;
3143
3144	/* If WoL is enabled restore the interface. */
3145	if (priv->wol_enabled) {
3146		ret = ravb_wol_restore(ndev);
3147		if (ret)
3148			return ret;
3149	} else {
3150		ret = pm_runtime_force_resume(dev);
3151		if (ret)
3152			return ret;
 
 
 
3153	}
3154
3155	/* Reopening the interface will restore the device to the working state. */
3156	ret = ravb_open(ndev);
3157	if (ret < 0)
3158		goto out_rpm_put;
3159
3160	ravb_set_rx_mode(ndev);
3161	netif_device_attach(ndev);
3162
3163	return 0;
 
3164
3165out_rpm_put:
3166	if (!priv->wol_enabled) {
3167		pm_runtime_mark_last_busy(dev);
3168		pm_runtime_put_autosuspend(dev);
 
 
 
 
 
 
 
3169	}
3170
3171	return ret;
3172}
3173
3174static int ravb_runtime_suspend(struct device *dev)
3175{
3176	struct net_device *ndev = dev_get_drvdata(dev);
3177	struct ravb_private *priv = netdev_priv(ndev);
3178
3179	clk_disable(priv->refclk);
3180
 
 
3181	return 0;
3182}
3183
3184static int ravb_runtime_resume(struct device *dev)
3185{
3186	struct net_device *ndev = dev_get_drvdata(dev);
3187	struct ravb_private *priv = netdev_priv(ndev);
3188
3189	return clk_enable(priv->refclk);
3190}
3191
3192static const struct dev_pm_ops ravb_dev_pm_ops = {
3193	SYSTEM_SLEEP_PM_OPS(ravb_suspend, ravb_resume)
3194	RUNTIME_PM_OPS(ravb_runtime_suspend, ravb_runtime_resume, NULL)
3195};
3196
3197static struct platform_driver ravb_driver = {
3198	.probe		= ravb_probe,
3199	.remove_new	= ravb_remove,
3200	.driver = {
3201		.name	= "ravb",
3202		.pm	= pm_ptr(&ravb_dev_pm_ops),
3203		.of_match_table = ravb_match_table,
3204	},
3205};
3206
3207module_platform_driver(ravb_driver);
3208
3209MODULE_AUTHOR("Mitsuhiro Kimura, Masaru Nagai");
3210MODULE_DESCRIPTION("Renesas Ethernet AVB driver");
3211MODULE_LICENSE("GPL v2");