Linux Audio

Check our new training course

Loading...
v6.2
   1// SPDX-License-Identifier: GPL-2.0
   2/*  SuperH Ethernet device driver
   3 *
   4 *  Copyright (C) 2014 Renesas Electronics Corporation
   5 *  Copyright (C) 2006-2012 Nobuhiro Iwamatsu
   6 *  Copyright (C) 2008-2014 Renesas Solutions Corp.
   7 *  Copyright (C) 2013-2017 Cogent Embedded, Inc.
   8 *  Copyright (C) 2014 Codethink Limited
   9 */
  10
  11#include <linux/module.h>
  12#include <linux/kernel.h>
  13#include <linux/spinlock.h>
  14#include <linux/interrupt.h>
  15#include <linux/dma-mapping.h>
  16#include <linux/etherdevice.h>
  17#include <linux/delay.h>
  18#include <linux/platform_device.h>
  19#include <linux/mdio-bitbang.h>
  20#include <linux/netdevice.h>
  21#include <linux/of.h>
  22#include <linux/of_device.h>
  23#include <linux/of_irq.h>
  24#include <linux/of_net.h>
  25#include <linux/phy.h>
  26#include <linux/cache.h>
  27#include <linux/io.h>
  28#include <linux/pm_runtime.h>
  29#include <linux/slab.h>
  30#include <linux/ethtool.h>
  31#include <linux/if_vlan.h>
  32#include <linux/sh_eth.h>
  33#include <linux/of_mdio.h>
  34
  35#include "sh_eth.h"
  36
  37#define SH_ETH_DEF_MSG_ENABLE \
  38		(NETIF_MSG_LINK	| \
  39		NETIF_MSG_TIMER	| \
  40		NETIF_MSG_RX_ERR| \
  41		NETIF_MSG_TX_ERR)
  42
  43#define SH_ETH_OFFSET_INVALID	((u16)~0)
  44
  45#define SH_ETH_OFFSET_DEFAULTS			\
  46	[0 ... SH_ETH_MAX_REGISTER_OFFSET - 1] = SH_ETH_OFFSET_INVALID
  47
  48/* use some intentionally tricky logic here to initialize the whole struct to
  49 * 0xffff, but then override certain fields, requiring us to indicate that we
  50 * "know" that there are overrides in this structure, and we'll need to disable
  51 * that warning from W=1 builds. GCC has supported this option since 4.2.X, but
  52 * the macros available to do this only define GCC 8.
  53 */
  54__diag_push();
  55__diag_ignore(GCC, 8, "-Woverride-init",
  56	      "logic to initialize all and then override some is OK");
  57static const u16 sh_eth_offset_gigabit[SH_ETH_MAX_REGISTER_OFFSET] = {
  58	SH_ETH_OFFSET_DEFAULTS,
  59
  60	[EDSR]		= 0x0000,
  61	[EDMR]		= 0x0400,
  62	[EDTRR]		= 0x0408,
  63	[EDRRR]		= 0x0410,
  64	[EESR]		= 0x0428,
  65	[EESIPR]	= 0x0430,
  66	[TDLAR]		= 0x0010,
  67	[TDFAR]		= 0x0014,
  68	[TDFXR]		= 0x0018,
  69	[TDFFR]		= 0x001c,
  70	[RDLAR]		= 0x0030,
  71	[RDFAR]		= 0x0034,
  72	[RDFXR]		= 0x0038,
  73	[RDFFR]		= 0x003c,
  74	[TRSCER]	= 0x0438,
  75	[RMFCR]		= 0x0440,
  76	[TFTR]		= 0x0448,
  77	[FDR]		= 0x0450,
  78	[RMCR]		= 0x0458,
  79	[RPADIR]	= 0x0460,
  80	[FCFTR]		= 0x0468,
  81	[CSMR]		= 0x04E4,
  82
  83	[ECMR]		= 0x0500,
  84	[ECSR]		= 0x0510,
  85	[ECSIPR]	= 0x0518,
  86	[PIR]		= 0x0520,
  87	[PSR]		= 0x0528,
  88	[PIPR]		= 0x052c,
  89	[RFLR]		= 0x0508,
  90	[APR]		= 0x0554,
  91	[MPR]		= 0x0558,
  92	[PFTCR]		= 0x055c,
  93	[PFRCR]		= 0x0560,
  94	[TPAUSER]	= 0x0564,
  95	[GECMR]		= 0x05b0,
  96	[BCULR]		= 0x05b4,
  97	[MAHR]		= 0x05c0,
  98	[MALR]		= 0x05c8,
  99	[TROCR]		= 0x0700,
 100	[CDCR]		= 0x0708,
 101	[LCCR]		= 0x0710,
 102	[CEFCR]		= 0x0740,
 103	[FRECR]		= 0x0748,
 104	[TSFRCR]	= 0x0750,
 105	[TLFRCR]	= 0x0758,
 106	[RFCR]		= 0x0760,
 107	[CERCR]		= 0x0768,
 108	[CEECR]		= 0x0770,
 109	[MAFCR]		= 0x0778,
 110	[RMII_MII]	= 0x0790,
 111
 112	[ARSTR]		= 0x0000,
 113	[TSU_CTRST]	= 0x0004,
 114	[TSU_FWEN0]	= 0x0010,
 115	[TSU_FWEN1]	= 0x0014,
 116	[TSU_FCM]	= 0x0018,
 117	[TSU_BSYSL0]	= 0x0020,
 118	[TSU_BSYSL1]	= 0x0024,
 119	[TSU_PRISL0]	= 0x0028,
 120	[TSU_PRISL1]	= 0x002c,
 121	[TSU_FWSL0]	= 0x0030,
 122	[TSU_FWSL1]	= 0x0034,
 123	[TSU_FWSLC]	= 0x0038,
 124	[TSU_QTAGM0]	= 0x0040,
 125	[TSU_QTAGM1]	= 0x0044,
 126	[TSU_FWSR]	= 0x0050,
 127	[TSU_FWINMK]	= 0x0054,
 128	[TSU_ADQT0]	= 0x0048,
 129	[TSU_ADQT1]	= 0x004c,
 130	[TSU_VTAG0]	= 0x0058,
 131	[TSU_VTAG1]	= 0x005c,
 132	[TSU_ADSBSY]	= 0x0060,
 133	[TSU_TEN]	= 0x0064,
 134	[TSU_POST1]	= 0x0070,
 135	[TSU_POST2]	= 0x0074,
 136	[TSU_POST3]	= 0x0078,
 137	[TSU_POST4]	= 0x007c,
 138	[TSU_ADRH0]	= 0x0100,
 139
 140	[TXNLCR0]	= 0x0080,
 141	[TXALCR0]	= 0x0084,
 142	[RXNLCR0]	= 0x0088,
 143	[RXALCR0]	= 0x008c,
 144	[FWNLCR0]	= 0x0090,
 145	[FWALCR0]	= 0x0094,
 146	[TXNLCR1]	= 0x00a0,
 147	[TXALCR1]	= 0x00a4,
 148	[RXNLCR1]	= 0x00a8,
 149	[RXALCR1]	= 0x00ac,
 150	[FWNLCR1]	= 0x00b0,
 151	[FWALCR1]	= 0x00b4,
 152};
 153
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 154static const u16 sh_eth_offset_fast_rcar[SH_ETH_MAX_REGISTER_OFFSET] = {
 155	SH_ETH_OFFSET_DEFAULTS,
 156
 157	[ECMR]		= 0x0300,
 158	[RFLR]		= 0x0308,
 159	[ECSR]		= 0x0310,
 160	[ECSIPR]	= 0x0318,
 161	[PIR]		= 0x0320,
 162	[PSR]		= 0x0328,
 163	[RDMLR]		= 0x0340,
 164	[IPGR]		= 0x0350,
 165	[APR]		= 0x0354,
 166	[MPR]		= 0x0358,
 167	[RFCF]		= 0x0360,
 168	[TPAUSER]	= 0x0364,
 169	[TPAUSECR]	= 0x0368,
 170	[MAHR]		= 0x03c0,
 171	[MALR]		= 0x03c8,
 172	[TROCR]		= 0x03d0,
 173	[CDCR]		= 0x03d4,
 174	[LCCR]		= 0x03d8,
 175	[CNDCR]		= 0x03dc,
 176	[CEFCR]		= 0x03e4,
 177	[FRECR]		= 0x03e8,
 178	[TSFRCR]	= 0x03ec,
 179	[TLFRCR]	= 0x03f0,
 180	[RFCR]		= 0x03f4,
 181	[MAFCR]		= 0x03f8,
 182
 183	[EDMR]		= 0x0200,
 184	[EDTRR]		= 0x0208,
 185	[EDRRR]		= 0x0210,
 186	[TDLAR]		= 0x0218,
 187	[RDLAR]		= 0x0220,
 188	[EESR]		= 0x0228,
 189	[EESIPR]	= 0x0230,
 190	[TRSCER]	= 0x0238,
 191	[RMFCR]		= 0x0240,
 192	[TFTR]		= 0x0248,
 193	[FDR]		= 0x0250,
 194	[RMCR]		= 0x0258,
 195	[TFUCR]		= 0x0264,
 196	[RFOCR]		= 0x0268,
 197	[RMIIMODE]      = 0x026c,
 198	[FCFTR]		= 0x0270,
 199	[TRIMD]		= 0x027c,
 200};
 201
 202static const u16 sh_eth_offset_fast_sh4[SH_ETH_MAX_REGISTER_OFFSET] = {
 203	SH_ETH_OFFSET_DEFAULTS,
 204
 205	[ECMR]		= 0x0100,
 206	[RFLR]		= 0x0108,
 207	[ECSR]		= 0x0110,
 208	[ECSIPR]	= 0x0118,
 209	[PIR]		= 0x0120,
 210	[PSR]		= 0x0128,
 211	[RDMLR]		= 0x0140,
 212	[IPGR]		= 0x0150,
 213	[APR]		= 0x0154,
 214	[MPR]		= 0x0158,
 215	[TPAUSER]	= 0x0164,
 216	[RFCF]		= 0x0160,
 217	[TPAUSECR]	= 0x0168,
 218	[BCFRR]		= 0x016c,
 219	[MAHR]		= 0x01c0,
 220	[MALR]		= 0x01c8,
 221	[TROCR]		= 0x01d0,
 222	[CDCR]		= 0x01d4,
 223	[LCCR]		= 0x01d8,
 224	[CNDCR]		= 0x01dc,
 225	[CEFCR]		= 0x01e4,
 226	[FRECR]		= 0x01e8,
 227	[TSFRCR]	= 0x01ec,
 228	[TLFRCR]	= 0x01f0,
 229	[RFCR]		= 0x01f4,
 230	[MAFCR]		= 0x01f8,
 231	[RTRATE]	= 0x01fc,
 232
 233	[EDMR]		= 0x0000,
 234	[EDTRR]		= 0x0008,
 235	[EDRRR]		= 0x0010,
 236	[TDLAR]		= 0x0018,
 237	[RDLAR]		= 0x0020,
 238	[EESR]		= 0x0028,
 239	[EESIPR]	= 0x0030,
 240	[TRSCER]	= 0x0038,
 241	[RMFCR]		= 0x0040,
 242	[TFTR]		= 0x0048,
 243	[FDR]		= 0x0050,
 244	[RMCR]		= 0x0058,
 245	[TFUCR]		= 0x0064,
 246	[RFOCR]		= 0x0068,
 247	[FCFTR]		= 0x0070,
 248	[RPADIR]	= 0x0078,
 249	[TRIMD]		= 0x007c,
 250	[RBWAR]		= 0x00c8,
 251	[RDFAR]		= 0x00cc,
 252	[TBRAR]		= 0x00d4,
 253	[TDFAR]		= 0x00d8,
 254};
 255
 256static const u16 sh_eth_offset_fast_sh3_sh2[SH_ETH_MAX_REGISTER_OFFSET] = {
 257	SH_ETH_OFFSET_DEFAULTS,
 258
 259	[EDMR]		= 0x0000,
 260	[EDTRR]		= 0x0004,
 261	[EDRRR]		= 0x0008,
 262	[TDLAR]		= 0x000c,
 263	[RDLAR]		= 0x0010,
 264	[EESR]		= 0x0014,
 265	[EESIPR]	= 0x0018,
 266	[TRSCER]	= 0x001c,
 267	[RMFCR]		= 0x0020,
 268	[TFTR]		= 0x0024,
 269	[FDR]		= 0x0028,
 270	[RMCR]		= 0x002c,
 271	[EDOCR]		= 0x0030,
 272	[FCFTR]		= 0x0034,
 273	[RPADIR]	= 0x0038,
 274	[TRIMD]		= 0x003c,
 275	[RBWAR]		= 0x0040,
 276	[RDFAR]		= 0x0044,
 277	[TBRAR]		= 0x004c,
 278	[TDFAR]		= 0x0050,
 279
 280	[ECMR]		= 0x0160,
 281	[ECSR]		= 0x0164,
 282	[ECSIPR]	= 0x0168,
 283	[PIR]		= 0x016c,
 284	[MAHR]		= 0x0170,
 285	[MALR]		= 0x0174,
 286	[RFLR]		= 0x0178,
 287	[PSR]		= 0x017c,
 288	[TROCR]		= 0x0180,
 289	[CDCR]		= 0x0184,
 290	[LCCR]		= 0x0188,
 291	[CNDCR]		= 0x018c,
 292	[CEFCR]		= 0x0194,
 293	[FRECR]		= 0x0198,
 294	[TSFRCR]	= 0x019c,
 295	[TLFRCR]	= 0x01a0,
 296	[RFCR]		= 0x01a4,
 297	[MAFCR]		= 0x01a8,
 298	[IPGR]		= 0x01b4,
 299	[APR]		= 0x01b8,
 300	[MPR]		= 0x01bc,
 301	[TPAUSER]	= 0x01c4,
 302	[BCFR]		= 0x01cc,
 303
 304	[ARSTR]		= 0x0000,
 305	[TSU_CTRST]	= 0x0004,
 306	[TSU_FWEN0]	= 0x0010,
 307	[TSU_FWEN1]	= 0x0014,
 308	[TSU_FCM]	= 0x0018,
 309	[TSU_BSYSL0]	= 0x0020,
 310	[TSU_BSYSL1]	= 0x0024,
 311	[TSU_PRISL0]	= 0x0028,
 312	[TSU_PRISL1]	= 0x002c,
 313	[TSU_FWSL0]	= 0x0030,
 314	[TSU_FWSL1]	= 0x0034,
 315	[TSU_FWSLC]	= 0x0038,
 316	[TSU_QTAGM0]	= 0x0040,
 317	[TSU_QTAGM1]	= 0x0044,
 318	[TSU_ADQT0]	= 0x0048,
 319	[TSU_ADQT1]	= 0x004c,
 320	[TSU_FWSR]	= 0x0050,
 321	[TSU_FWINMK]	= 0x0054,
 322	[TSU_ADSBSY]	= 0x0060,
 323	[TSU_TEN]	= 0x0064,
 324	[TSU_POST1]	= 0x0070,
 325	[TSU_POST2]	= 0x0074,
 326	[TSU_POST3]	= 0x0078,
 327	[TSU_POST4]	= 0x007c,
 328
 329	[TXNLCR0]	= 0x0080,
 330	[TXALCR0]	= 0x0084,
 331	[RXNLCR0]	= 0x0088,
 332	[RXALCR0]	= 0x008c,
 333	[FWNLCR0]	= 0x0090,
 334	[FWALCR0]	= 0x0094,
 335	[TXNLCR1]	= 0x00a0,
 336	[TXALCR1]	= 0x00a4,
 337	[RXNLCR1]	= 0x00a8,
 338	[RXALCR1]	= 0x00ac,
 339	[FWNLCR1]	= 0x00b0,
 340	[FWALCR1]	= 0x00b4,
 341
 342	[TSU_ADRH0]	= 0x0100,
 343};
 344__diag_pop();
 345
 346static void sh_eth_rcv_snd_disable(struct net_device *ndev);
 347static struct net_device_stats *sh_eth_get_stats(struct net_device *ndev);
 348
 349static void sh_eth_write(struct net_device *ndev, u32 data, int enum_index)
 350{
 351	struct sh_eth_private *mdp = netdev_priv(ndev);
 352	u16 offset = mdp->reg_offset[enum_index];
 353
 354	if (WARN_ON(offset == SH_ETH_OFFSET_INVALID))
 355		return;
 356
 357	iowrite32(data, mdp->addr + offset);
 358}
 359
 360static u32 sh_eth_read(struct net_device *ndev, int enum_index)
 361{
 362	struct sh_eth_private *mdp = netdev_priv(ndev);
 363	u16 offset = mdp->reg_offset[enum_index];
 364
 365	if (WARN_ON(offset == SH_ETH_OFFSET_INVALID))
 366		return ~0U;
 367
 368	return ioread32(mdp->addr + offset);
 369}
 370
 371static void sh_eth_modify(struct net_device *ndev, int enum_index, u32 clear,
 372			  u32 set)
 373{
 374	sh_eth_write(ndev, (sh_eth_read(ndev, enum_index) & ~clear) | set,
 375		     enum_index);
 376}
 377
 378static u16 sh_eth_tsu_get_offset(struct sh_eth_private *mdp, int enum_index)
 379{
 380	return mdp->reg_offset[enum_index];
 381}
 382
 383static void sh_eth_tsu_write(struct sh_eth_private *mdp, u32 data,
 384			     int enum_index)
 385{
 386	u16 offset = sh_eth_tsu_get_offset(mdp, enum_index);
 387
 388	if (WARN_ON(offset == SH_ETH_OFFSET_INVALID))
 389		return;
 390
 391	iowrite32(data, mdp->tsu_addr + offset);
 392}
 393
 394static u32 sh_eth_tsu_read(struct sh_eth_private *mdp, int enum_index)
 395{
 396	u16 offset = sh_eth_tsu_get_offset(mdp, enum_index);
 397
 398	if (WARN_ON(offset == SH_ETH_OFFSET_INVALID))
 399		return ~0U;
 400
 401	return ioread32(mdp->tsu_addr + offset);
 402}
 403
 404static void sh_eth_soft_swap(char *src, int len)
 405{
 406#ifdef __LITTLE_ENDIAN
 407	u32 *p = (u32 *)src;
 408	u32 *maxp = p + DIV_ROUND_UP(len, sizeof(u32));
 409
 410	for (; p < maxp; p++)
 411		*p = swab32(*p);
 412#endif
 413}
 414
 415static void sh_eth_select_mii(struct net_device *ndev)
 416{
 417	struct sh_eth_private *mdp = netdev_priv(ndev);
 418	u32 value;
 419
 420	switch (mdp->phy_interface) {
 421	case PHY_INTERFACE_MODE_RGMII ... PHY_INTERFACE_MODE_RGMII_TXID:
 422		value = 0x3;
 423		break;
 424	case PHY_INTERFACE_MODE_GMII:
 425		value = 0x2;
 426		break;
 427	case PHY_INTERFACE_MODE_MII:
 428		value = 0x1;
 429		break;
 430	case PHY_INTERFACE_MODE_RMII:
 431		value = 0x0;
 432		break;
 433	default:
 434		netdev_warn(ndev,
 435			    "PHY interface mode was not setup. Set to MII.\n");
 436		value = 0x1;
 437		break;
 438	}
 439
 440	sh_eth_write(ndev, value, RMII_MII);
 441}
 442
 443static void sh_eth_set_duplex(struct net_device *ndev)
 444{
 445	struct sh_eth_private *mdp = netdev_priv(ndev);
 446
 447	sh_eth_modify(ndev, ECMR, ECMR_DM, mdp->duplex ? ECMR_DM : 0);
 448}
 449
 450static void sh_eth_chip_reset(struct net_device *ndev)
 451{
 452	struct sh_eth_private *mdp = netdev_priv(ndev);
 453
 454	/* reset device */
 455	sh_eth_tsu_write(mdp, ARSTR_ARST, ARSTR);
 456	mdelay(1);
 457}
 458
 459static int sh_eth_soft_reset(struct net_device *ndev)
 460{
 461	sh_eth_modify(ndev, EDMR, EDMR_SRST_ETHER, EDMR_SRST_ETHER);
 462	mdelay(3);
 463	sh_eth_modify(ndev, EDMR, EDMR_SRST_ETHER, 0);
 464
 465	return 0;
 466}
 467
 468static int sh_eth_check_soft_reset(struct net_device *ndev)
 469{
 470	int cnt;
 471
 472	for (cnt = 100; cnt > 0; cnt--) {
 473		if (!(sh_eth_read(ndev, EDMR) & EDMR_SRST_GETHER))
 474			return 0;
 475		mdelay(1);
 476	}
 477
 478	netdev_err(ndev, "Device reset failed\n");
 479	return -ETIMEDOUT;
 480}
 481
 482static int sh_eth_soft_reset_gether(struct net_device *ndev)
 483{
 484	struct sh_eth_private *mdp = netdev_priv(ndev);
 485	int ret;
 486
 487	sh_eth_write(ndev, EDSR_ENALL, EDSR);
 488	sh_eth_modify(ndev, EDMR, EDMR_SRST_GETHER, EDMR_SRST_GETHER);
 489
 490	ret = sh_eth_check_soft_reset(ndev);
 491	if (ret)
 492		return ret;
 493
 494	/* Table Init */
 495	sh_eth_write(ndev, 0, TDLAR);
 496	sh_eth_write(ndev, 0, TDFAR);
 497	sh_eth_write(ndev, 0, TDFXR);
 498	sh_eth_write(ndev, 0, TDFFR);
 499	sh_eth_write(ndev, 0, RDLAR);
 500	sh_eth_write(ndev, 0, RDFAR);
 501	sh_eth_write(ndev, 0, RDFXR);
 502	sh_eth_write(ndev, 0, RDFFR);
 503
 504	/* Reset HW CRC register */
 505	if (mdp->cd->csmr)
 506		sh_eth_write(ndev, 0, CSMR);
 507
 508	/* Select MII mode */
 509	if (mdp->cd->select_mii)
 510		sh_eth_select_mii(ndev);
 511
 512	return ret;
 513}
 514
 515static void sh_eth_set_rate_gether(struct net_device *ndev)
 516{
 517	struct sh_eth_private *mdp = netdev_priv(ndev);
 518
 519	if (WARN_ON(!mdp->cd->gecmr))
 520		return;
 521
 522	switch (mdp->speed) {
 523	case 10: /* 10BASE */
 524		sh_eth_write(ndev, GECMR_10, GECMR);
 525		break;
 526	case 100:/* 100BASE */
 527		sh_eth_write(ndev, GECMR_100, GECMR);
 528		break;
 529	case 1000: /* 1000BASE */
 530		sh_eth_write(ndev, GECMR_1000, GECMR);
 531		break;
 532	}
 533}
 534
 535#ifdef CONFIG_OF
 536/* R7S72100 */
 537static struct sh_eth_cpu_data r7s72100_data = {
 538	.soft_reset	= sh_eth_soft_reset_gether,
 539
 540	.chip_reset	= sh_eth_chip_reset,
 541	.set_duplex	= sh_eth_set_duplex,
 542
 543	.register_type	= SH_ETH_REG_GIGABIT,
 544
 545	.edtrr_trns	= EDTRR_TRNS_GETHER,
 546	.ecsr_value	= ECSR_ICD,
 547	.ecsipr_value	= ECSIPR_ICDIP,
 548	.eesipr_value	= EESIPR_TWB1IP | EESIPR_TWBIP | EESIPR_TC1IP |
 549			  EESIPR_TABTIP | EESIPR_RABTIP | EESIPR_RFCOFIP |
 550			  EESIPR_ECIIP |
 551			  EESIPR_FTCIP | EESIPR_TDEIP | EESIPR_TFUFIP |
 552			  EESIPR_FRIP | EESIPR_RDEIP | EESIPR_RFOFIP |
 553			  EESIPR_RMAFIP | EESIPR_RRFIP |
 554			  EESIPR_RTLFIP | EESIPR_RTSFIP |
 555			  EESIPR_PREIP | EESIPR_CERFIP,
 556
 557	.tx_check	= EESR_TC1 | EESR_FTC,
 558	.eesr_err_check	= EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_RABT |
 559			  EESR_RFE | EESR_RDE | EESR_RFRMER | EESR_TFE |
 560			  EESR_TDE,
 561	.fdr_value	= 0x0000070f,
 562
 563	.trscer_err_mask = TRSCER_RMAFCE | TRSCER_RRFCE,
 564
 565	.no_psr		= 1,
 566	.apr		= 1,
 567	.mpr		= 1,
 568	.tpauser	= 1,
 569	.hw_swap	= 1,
 570	.rpadir		= 1,
 571	.no_trimd	= 1,
 572	.no_ade		= 1,
 573	.xdfar_rw	= 1,
 574	.csmr		= 1,
 575	.rx_csum	= 1,
 576	.tsu		= 1,
 577	.no_tx_cntrs	= 1,
 578};
 579
 580static void sh_eth_chip_reset_r8a7740(struct net_device *ndev)
 581{
 582	sh_eth_chip_reset(ndev);
 583
 584	sh_eth_select_mii(ndev);
 585}
 586
 587/* R8A7740 */
 588static struct sh_eth_cpu_data r8a7740_data = {
 589	.soft_reset	= sh_eth_soft_reset_gether,
 590
 591	.chip_reset	= sh_eth_chip_reset_r8a7740,
 592	.set_duplex	= sh_eth_set_duplex,
 593	.set_rate	= sh_eth_set_rate_gether,
 594
 595	.register_type	= SH_ETH_REG_GIGABIT,
 596
 597	.edtrr_trns	= EDTRR_TRNS_GETHER,
 598	.ecsr_value	= ECSR_ICD | ECSR_MPD,
 599	.ecsipr_value	= ECSIPR_LCHNGIP | ECSIPR_ICDIP | ECSIPR_MPDIP,
 600	.eesipr_value	= EESIPR_RFCOFIP | EESIPR_ECIIP |
 601			  EESIPR_FTCIP | EESIPR_TDEIP | EESIPR_TFUFIP |
 602			  EESIPR_FRIP | EESIPR_RDEIP | EESIPR_RFOFIP |
 603			  0x0000f000 | EESIPR_CNDIP | EESIPR_DLCIP |
 604			  EESIPR_CDIP | EESIPR_TROIP | EESIPR_RMAFIP |
 605			  EESIPR_CEEFIP | EESIPR_CELFIP |
 606			  EESIPR_RRFIP | EESIPR_RTLFIP | EESIPR_RTSFIP |
 607			  EESIPR_PREIP | EESIPR_CERFIP,
 608
 609	.tx_check	= EESR_TC1 | EESR_FTC,
 610	.eesr_err_check	= EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_RABT |
 611			  EESR_RFE | EESR_RDE | EESR_RFRMER | EESR_TFE |
 612			  EESR_TDE,
 613	.fdr_value	= 0x0000070f,
 614
 615	.apr		= 1,
 616	.mpr		= 1,
 617	.tpauser	= 1,
 618	.gecmr		= 1,
 619	.bculr		= 1,
 620	.hw_swap	= 1,
 621	.rpadir		= 1,
 622	.no_trimd	= 1,
 623	.no_ade		= 1,
 624	.xdfar_rw	= 1,
 625	.csmr		= 1,
 626	.rx_csum	= 1,
 627	.tsu		= 1,
 628	.select_mii	= 1,
 629	.magic		= 1,
 630	.cexcr		= 1,
 631};
 632
 633/* There is CPU dependent code */
 634static void sh_eth_set_rate_rcar(struct net_device *ndev)
 635{
 636	struct sh_eth_private *mdp = netdev_priv(ndev);
 637
 638	switch (mdp->speed) {
 639	case 10: /* 10BASE */
 640		sh_eth_modify(ndev, ECMR, ECMR_ELB, 0);
 641		break;
 642	case 100:/* 100BASE */
 643		sh_eth_modify(ndev, ECMR, ECMR_ELB, ECMR_ELB);
 644		break;
 645	}
 646}
 647
 648/* R-Car Gen1 */
 649static struct sh_eth_cpu_data rcar_gen1_data = {
 650	.soft_reset	= sh_eth_soft_reset,
 651
 652	.set_duplex	= sh_eth_set_duplex,
 653	.set_rate	= sh_eth_set_rate_rcar,
 654
 655	.register_type	= SH_ETH_REG_FAST_RCAR,
 656
 657	.edtrr_trns	= EDTRR_TRNS_ETHER,
 658	.ecsr_value	= ECSR_PSRTO | ECSR_LCHNG | ECSR_ICD,
 659	.ecsipr_value	= ECSIPR_PSRTOIP | ECSIPR_LCHNGIP | ECSIPR_ICDIP,
 660	.eesipr_value	= EESIPR_RFCOFIP | EESIPR_ADEIP | EESIPR_ECIIP |
 661			  EESIPR_FTCIP | EESIPR_TDEIP | EESIPR_TFUFIP |
 662			  EESIPR_FRIP | EESIPR_RDEIP | EESIPR_RFOFIP |
 663			  EESIPR_RMAFIP | EESIPR_RRFIP |
 664			  EESIPR_RTLFIP | EESIPR_RTSFIP |
 665			  EESIPR_PREIP | EESIPR_CERFIP,
 666
 667	.tx_check	= EESR_FTC | EESR_CND | EESR_DLC | EESR_CD | EESR_TRO,
 668	.eesr_err_check	= EESR_TWB | EESR_TABT | EESR_RABT | EESR_RFE |
 669			  EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE,
 670	.fdr_value	= 0x00000f0f,
 671
 672	.apr		= 1,
 673	.mpr		= 1,
 674	.tpauser	= 1,
 675	.hw_swap	= 1,
 676	.no_xdfar	= 1,
 677};
 678
 679/* R-Car Gen2 and RZ/G1 */
 680static struct sh_eth_cpu_data rcar_gen2_data = {
 681	.soft_reset	= sh_eth_soft_reset,
 682
 683	.set_duplex	= sh_eth_set_duplex,
 684	.set_rate	= sh_eth_set_rate_rcar,
 685
 686	.register_type	= SH_ETH_REG_FAST_RCAR,
 687
 688	.edtrr_trns	= EDTRR_TRNS_ETHER,
 689	.ecsr_value	= ECSR_PSRTO | ECSR_LCHNG | ECSR_ICD | ECSR_MPD,
 690	.ecsipr_value	= ECSIPR_PSRTOIP | ECSIPR_LCHNGIP | ECSIPR_ICDIP |
 691			  ECSIPR_MPDIP,
 692	.eesipr_value	= EESIPR_RFCOFIP | EESIPR_ADEIP | EESIPR_ECIIP |
 693			  EESIPR_FTCIP | EESIPR_TDEIP | EESIPR_TFUFIP |
 694			  EESIPR_FRIP | EESIPR_RDEIP | EESIPR_RFOFIP |
 695			  EESIPR_RMAFIP | EESIPR_RRFIP |
 696			  EESIPR_RTLFIP | EESIPR_RTSFIP |
 697			  EESIPR_PREIP | EESIPR_CERFIP,
 698
 699	.tx_check	= EESR_FTC | EESR_CND | EESR_DLC | EESR_CD | EESR_TRO,
 700	.eesr_err_check	= EESR_TWB | EESR_TABT | EESR_RABT | EESR_RFE |
 701			  EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE,
 702	.fdr_value	= 0x00000f0f,
 703
 704	.trscer_err_mask = TRSCER_RMAFCE,
 705
 706	.apr		= 1,
 707	.mpr		= 1,
 708	.tpauser	= 1,
 709	.hw_swap	= 1,
 710	.no_xdfar	= 1,
 711	.rmiimode	= 1,
 712	.magic		= 1,
 713};
 714
 715/* R8A77980 */
 716static struct sh_eth_cpu_data r8a77980_data = {
 717	.soft_reset	= sh_eth_soft_reset_gether,
 718
 719	.set_duplex	= sh_eth_set_duplex,
 720	.set_rate	= sh_eth_set_rate_gether,
 721
 722	.register_type  = SH_ETH_REG_GIGABIT,
 723
 724	.edtrr_trns	= EDTRR_TRNS_GETHER,
 725	.ecsr_value	= ECSR_PSRTO | ECSR_LCHNG | ECSR_ICD | ECSR_MPD,
 726	.ecsipr_value	= ECSIPR_PSRTOIP | ECSIPR_LCHNGIP | ECSIPR_ICDIP |
 727			  ECSIPR_MPDIP,
 728	.eesipr_value	= EESIPR_RFCOFIP | EESIPR_ECIIP |
 729			  EESIPR_FTCIP | EESIPR_TDEIP | EESIPR_TFUFIP |
 730			  EESIPR_FRIP | EESIPR_RDEIP | EESIPR_RFOFIP |
 731			  EESIPR_RMAFIP | EESIPR_RRFIP |
 732			  EESIPR_RTLFIP | EESIPR_RTSFIP |
 733			  EESIPR_PREIP | EESIPR_CERFIP,
 734
 735	.tx_check       = EESR_FTC | EESR_CD | EESR_TRO,
 736	.eesr_err_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_RABT |
 737			  EESR_RFE | EESR_RDE | EESR_RFRMER |
 738			  EESR_TFE | EESR_TDE | EESR_ECI,
 739	.fdr_value	= 0x0000070f,
 740
 741	.apr		= 1,
 742	.mpr		= 1,
 743	.tpauser	= 1,
 744	.gecmr		= 1,
 745	.bculr		= 1,
 746	.hw_swap	= 1,
 747	.nbst		= 1,
 748	.rpadir		= 1,
 749	.no_trimd	= 1,
 750	.no_ade		= 1,
 751	.xdfar_rw	= 1,
 752	.csmr		= 1,
 753	.rx_csum	= 1,
 754	.select_mii	= 1,
 755	.magic		= 1,
 756	.cexcr		= 1,
 757};
 758
 759/* R7S9210 */
 760static struct sh_eth_cpu_data r7s9210_data = {
 761	.soft_reset	= sh_eth_soft_reset,
 762
 763	.set_duplex	= sh_eth_set_duplex,
 764	.set_rate	= sh_eth_set_rate_rcar,
 765
 766	.register_type	= SH_ETH_REG_FAST_SH4,
 767
 768	.edtrr_trns	= EDTRR_TRNS_ETHER,
 769	.ecsr_value	= ECSR_ICD,
 770	.ecsipr_value	= ECSIPR_ICDIP,
 771	.eesipr_value	= EESIPR_TWBIP | EESIPR_TABTIP | EESIPR_RABTIP |
 772			  EESIPR_RFCOFIP | EESIPR_ECIIP | EESIPR_FTCIP |
 773			  EESIPR_TDEIP | EESIPR_TFUFIP | EESIPR_FRIP |
 774			  EESIPR_RDEIP | EESIPR_RFOFIP | EESIPR_CNDIP |
 775			  EESIPR_DLCIP | EESIPR_CDIP | EESIPR_TROIP |
 776			  EESIPR_RMAFIP | EESIPR_RRFIP | EESIPR_RTLFIP |
 777			  EESIPR_RTSFIP | EESIPR_PREIP | EESIPR_CERFIP,
 778
 779	.tx_check	= EESR_FTC | EESR_CND | EESR_DLC | EESR_CD | EESR_TRO,
 780	.eesr_err_check	= EESR_TWB | EESR_TABT | EESR_RABT | EESR_RFE |
 781			  EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE,
 782
 783	.fdr_value	= 0x0000070f,
 784
 785	.trscer_err_mask = TRSCER_RMAFCE | TRSCER_RRFCE,
 786
 787	.apr		= 1,
 788	.mpr		= 1,
 789	.tpauser	= 1,
 790	.hw_swap	= 1,
 791	.rpadir		= 1,
 792	.no_ade		= 1,
 793	.xdfar_rw	= 1,
 794};
 795#endif /* CONFIG_OF */
 796
 797static void sh_eth_set_rate_sh7724(struct net_device *ndev)
 798{
 799	struct sh_eth_private *mdp = netdev_priv(ndev);
 800
 801	switch (mdp->speed) {
 802	case 10: /* 10BASE */
 803		sh_eth_modify(ndev, ECMR, ECMR_RTM, 0);
 804		break;
 805	case 100:/* 100BASE */
 806		sh_eth_modify(ndev, ECMR, ECMR_RTM, ECMR_RTM);
 807		break;
 808	}
 809}
 810
 811/* SH7724 */
 812static struct sh_eth_cpu_data sh7724_data = {
 813	.soft_reset	= sh_eth_soft_reset,
 814
 815	.set_duplex	= sh_eth_set_duplex,
 816	.set_rate	= sh_eth_set_rate_sh7724,
 817
 818	.register_type	= SH_ETH_REG_FAST_SH4,
 819
 820	.edtrr_trns	= EDTRR_TRNS_ETHER,
 821	.ecsr_value	= ECSR_PSRTO | ECSR_LCHNG | ECSR_ICD,
 822	.ecsipr_value	= ECSIPR_PSRTOIP | ECSIPR_LCHNGIP | ECSIPR_ICDIP,
 823	.eesipr_value	= EESIPR_RFCOFIP | EESIPR_ADEIP | EESIPR_ECIIP |
 824			  EESIPR_FTCIP | EESIPR_TDEIP | EESIPR_TFUFIP |
 825			  EESIPR_FRIP | EESIPR_RDEIP | EESIPR_RFOFIP |
 826			  EESIPR_RMAFIP | EESIPR_RRFIP |
 827			  EESIPR_RTLFIP | EESIPR_RTSFIP |
 828			  EESIPR_PREIP | EESIPR_CERFIP,
 829
 830	.tx_check	= EESR_FTC | EESR_CND | EESR_DLC | EESR_CD | EESR_TRO,
 831	.eesr_err_check	= EESR_TWB | EESR_TABT | EESR_RABT | EESR_RFE |
 832			  EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE,
 833
 834	.apr		= 1,
 835	.mpr		= 1,
 836	.tpauser	= 1,
 837	.hw_swap	= 1,
 838	.rpadir		= 1,
 839};
 840
 841static void sh_eth_set_rate_sh7757(struct net_device *ndev)
 842{
 843	struct sh_eth_private *mdp = netdev_priv(ndev);
 844
 845	switch (mdp->speed) {
 846	case 10: /* 10BASE */
 847		sh_eth_write(ndev, 0, RTRATE);
 848		break;
 849	case 100:/* 100BASE */
 850		sh_eth_write(ndev, 1, RTRATE);
 851		break;
 852	}
 853}
 854
 855/* SH7757 */
 856static struct sh_eth_cpu_data sh7757_data = {
 857	.soft_reset	= sh_eth_soft_reset,
 858
 859	.set_duplex	= sh_eth_set_duplex,
 860	.set_rate	= sh_eth_set_rate_sh7757,
 861
 862	.register_type	= SH_ETH_REG_FAST_SH4,
 863
 864	.edtrr_trns	= EDTRR_TRNS_ETHER,
 865	.eesipr_value	= EESIPR_RFCOFIP | EESIPR_ECIIP |
 866			  EESIPR_FTCIP | EESIPR_TDEIP | EESIPR_TFUFIP |
 867			  EESIPR_FRIP | EESIPR_RDEIP | EESIPR_RFOFIP |
 868			  0x0000f000 | EESIPR_CNDIP | EESIPR_DLCIP |
 869			  EESIPR_CDIP | EESIPR_TROIP | EESIPR_RMAFIP |
 870			  EESIPR_CEEFIP | EESIPR_CELFIP |
 871			  EESIPR_RRFIP | EESIPR_RTLFIP | EESIPR_RTSFIP |
 872			  EESIPR_PREIP | EESIPR_CERFIP,
 873
 874	.tx_check	= EESR_FTC | EESR_CND | EESR_DLC | EESR_CD | EESR_TRO,
 875	.eesr_err_check	= EESR_TWB | EESR_TABT | EESR_RABT | EESR_RFE |
 876			  EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE,
 877
 878	.irq_flags	= IRQF_SHARED,
 879	.apr		= 1,
 880	.mpr		= 1,
 881	.tpauser	= 1,
 882	.hw_swap	= 1,
 883	.no_ade		= 1,
 884	.rpadir		= 1,
 885	.rtrate		= 1,
 886	.dual_port	= 1,
 887};
 888
 889#define SH_GIGA_ETH_BASE	0xfee00000UL
 890#define GIGA_MALR(port)		(SH_GIGA_ETH_BASE + 0x800 * (port) + 0x05c8)
 891#define GIGA_MAHR(port)		(SH_GIGA_ETH_BASE + 0x800 * (port) + 0x05c0)
 892static void sh_eth_chip_reset_giga(struct net_device *ndev)
 893{
 894	u32 mahr[2], malr[2];
 895	int i;
 896
 897	/* save MAHR and MALR */
 898	for (i = 0; i < 2; i++) {
 899		malr[i] = ioread32((void *)GIGA_MALR(i));
 900		mahr[i] = ioread32((void *)GIGA_MAHR(i));
 901	}
 902
 903	sh_eth_chip_reset(ndev);
 904
 905	/* restore MAHR and MALR */
 906	for (i = 0; i < 2; i++) {
 907		iowrite32(malr[i], (void *)GIGA_MALR(i));
 908		iowrite32(mahr[i], (void *)GIGA_MAHR(i));
 909	}
 910}
 911
 912static void sh_eth_set_rate_giga(struct net_device *ndev)
 913{
 914	struct sh_eth_private *mdp = netdev_priv(ndev);
 915
 916	if (WARN_ON(!mdp->cd->gecmr))
 917		return;
 918
 919	switch (mdp->speed) {
 920	case 10: /* 10BASE */
 921		sh_eth_write(ndev, 0x00000000, GECMR);
 922		break;
 923	case 100:/* 100BASE */
 924		sh_eth_write(ndev, 0x00000010, GECMR);
 925		break;
 926	case 1000: /* 1000BASE */
 927		sh_eth_write(ndev, 0x00000020, GECMR);
 928		break;
 929	}
 930}
 931
 932/* SH7757(GETHERC) */
 933static struct sh_eth_cpu_data sh7757_data_giga = {
 934	.soft_reset	= sh_eth_soft_reset_gether,
 935
 936	.chip_reset	= sh_eth_chip_reset_giga,
 937	.set_duplex	= sh_eth_set_duplex,
 938	.set_rate	= sh_eth_set_rate_giga,
 939
 940	.register_type	= SH_ETH_REG_GIGABIT,
 941
 942	.edtrr_trns	= EDTRR_TRNS_GETHER,
 943	.ecsr_value	= ECSR_ICD | ECSR_MPD,
 944	.ecsipr_value	= ECSIPR_LCHNGIP | ECSIPR_ICDIP | ECSIPR_MPDIP,
 945	.eesipr_value	= EESIPR_RFCOFIP | EESIPR_ECIIP |
 946			  EESIPR_FTCIP | EESIPR_TDEIP | EESIPR_TFUFIP |
 947			  EESIPR_FRIP | EESIPR_RDEIP | EESIPR_RFOFIP |
 948			  0x0000f000 | EESIPR_CNDIP | EESIPR_DLCIP |
 949			  EESIPR_CDIP | EESIPR_TROIP | EESIPR_RMAFIP |
 950			  EESIPR_CEEFIP | EESIPR_CELFIP |
 951			  EESIPR_RRFIP | EESIPR_RTLFIP | EESIPR_RTSFIP |
 952			  EESIPR_PREIP | EESIPR_CERFIP,
 953
 954	.tx_check	= EESR_TC1 | EESR_FTC,
 955	.eesr_err_check	= EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_RABT |
 956			  EESR_RFE | EESR_RDE | EESR_RFRMER | EESR_TFE |
 957			  EESR_TDE,
 958	.fdr_value	= 0x0000072f,
 959
 960	.irq_flags	= IRQF_SHARED,
 961	.apr		= 1,
 962	.mpr		= 1,
 963	.tpauser	= 1,
 964	.gecmr		= 1,
 965	.bculr		= 1,
 966	.hw_swap	= 1,
 967	.rpadir		= 1,
 968	.no_trimd	= 1,
 969	.no_ade		= 1,
 970	.xdfar_rw	= 1,
 971	.tsu		= 1,
 972	.cexcr		= 1,
 973	.dual_port	= 1,
 974};
 975
 976/* SH7734 */
 977static struct sh_eth_cpu_data sh7734_data = {
 978	.soft_reset	= sh_eth_soft_reset_gether,
 979
 980	.chip_reset	= sh_eth_chip_reset,
 981	.set_duplex	= sh_eth_set_duplex,
 982	.set_rate	= sh_eth_set_rate_gether,
 983
 984	.register_type	= SH_ETH_REG_GIGABIT,
 985
 986	.edtrr_trns	= EDTRR_TRNS_GETHER,
 987	.ecsr_value	= ECSR_ICD | ECSR_MPD,
 988	.ecsipr_value	= ECSIPR_LCHNGIP | ECSIPR_ICDIP | ECSIPR_MPDIP,
 989	.eesipr_value	= EESIPR_RFCOFIP | EESIPR_ECIIP |
 990			  EESIPR_FTCIP | EESIPR_TDEIP | EESIPR_TFUFIP |
 991			  EESIPR_FRIP | EESIPR_RDEIP | EESIPR_RFOFIP |
 992			  EESIPR_DLCIP | EESIPR_CDIP | EESIPR_TROIP |
 993			  EESIPR_RMAFIP | EESIPR_CEEFIP | EESIPR_CELFIP |
 994			  EESIPR_RRFIP | EESIPR_RTLFIP | EESIPR_RTSFIP |
 995			  EESIPR_PREIP | EESIPR_CERFIP,
 996
 997	.tx_check	= EESR_TC1 | EESR_FTC,
 998	.eesr_err_check	= EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_RABT |
 999			  EESR_RFE | EESR_RDE | EESR_RFRMER | EESR_TFE |
1000			  EESR_TDE,
1001
1002	.apr		= 1,
1003	.mpr		= 1,
1004	.tpauser	= 1,
1005	.gecmr		= 1,
1006	.bculr		= 1,
1007	.hw_swap	= 1,
1008	.no_trimd	= 1,
1009	.no_ade		= 1,
1010	.xdfar_rw	= 1,
1011	.tsu		= 1,
1012	.csmr		= 1,
1013	.rx_csum	= 1,
1014	.select_mii	= 1,
1015	.magic		= 1,
1016	.cexcr		= 1,
1017};
1018
1019/* SH7763 */
1020static struct sh_eth_cpu_data sh7763_data = {
1021	.soft_reset	= sh_eth_soft_reset_gether,
1022
1023	.chip_reset	= sh_eth_chip_reset,
1024	.set_duplex	= sh_eth_set_duplex,
1025	.set_rate	= sh_eth_set_rate_gether,
1026
1027	.register_type	= SH_ETH_REG_GIGABIT,
1028
1029	.edtrr_trns	= EDTRR_TRNS_GETHER,
1030	.ecsr_value	= ECSR_ICD | ECSR_MPD,
1031	.ecsipr_value	= ECSIPR_LCHNGIP | ECSIPR_ICDIP | ECSIPR_MPDIP,
1032	.eesipr_value	= EESIPR_RFCOFIP | EESIPR_ECIIP |
1033			  EESIPR_FTCIP | EESIPR_TDEIP | EESIPR_TFUFIP |
1034			  EESIPR_FRIP | EESIPR_RDEIP | EESIPR_RFOFIP |
1035			  EESIPR_DLCIP | EESIPR_CDIP | EESIPR_TROIP |
1036			  EESIPR_RMAFIP | EESIPR_CEEFIP | EESIPR_CELFIP |
1037			  EESIPR_RRFIP | EESIPR_RTLFIP | EESIPR_RTSFIP |
1038			  EESIPR_PREIP | EESIPR_CERFIP,
1039
1040	.tx_check	= EESR_TC1 | EESR_FTC,
1041	.eesr_err_check	= EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_RABT |
1042			  EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE,
1043
1044	.apr		= 1,
1045	.mpr		= 1,
1046	.tpauser	= 1,
1047	.gecmr		= 1,
1048	.bculr		= 1,
1049	.hw_swap	= 1,
1050	.no_trimd	= 1,
1051	.no_ade		= 1,
1052	.xdfar_rw	= 1,
1053	.tsu		= 1,
1054	.irq_flags	= IRQF_SHARED,
1055	.magic		= 1,
1056	.cexcr		= 1,
1057	.rx_csum	= 1,
1058	.dual_port	= 1,
1059};
1060
1061static struct sh_eth_cpu_data sh7619_data = {
1062	.soft_reset	= sh_eth_soft_reset,
1063
1064	.register_type	= SH_ETH_REG_FAST_SH3_SH2,
1065
1066	.edtrr_trns	= EDTRR_TRNS_ETHER,
1067	.eesipr_value	= EESIPR_RFCOFIP | EESIPR_ECIIP |
1068			  EESIPR_FTCIP | EESIPR_TDEIP | EESIPR_TFUFIP |
1069			  EESIPR_FRIP | EESIPR_RDEIP | EESIPR_RFOFIP |
1070			  0x0000f000 | EESIPR_CNDIP | EESIPR_DLCIP |
1071			  EESIPR_CDIP | EESIPR_TROIP | EESIPR_RMAFIP |
1072			  EESIPR_CEEFIP | EESIPR_CELFIP |
1073			  EESIPR_RRFIP | EESIPR_RTLFIP | EESIPR_RTSFIP |
1074			  EESIPR_PREIP | EESIPR_CERFIP,
1075
1076	.apr		= 1,
1077	.mpr		= 1,
1078	.tpauser	= 1,
1079	.hw_swap	= 1,
1080};
1081
1082static struct sh_eth_cpu_data sh771x_data = {
1083	.soft_reset	= sh_eth_soft_reset,
1084
1085	.register_type	= SH_ETH_REG_FAST_SH3_SH2,
1086
1087	.edtrr_trns	= EDTRR_TRNS_ETHER,
1088	.eesipr_value	= EESIPR_RFCOFIP | EESIPR_ECIIP |
1089			  EESIPR_FTCIP | EESIPR_TDEIP | EESIPR_TFUFIP |
1090			  EESIPR_FRIP | EESIPR_RDEIP | EESIPR_RFOFIP |
1091			  0x0000f000 | EESIPR_CNDIP | EESIPR_DLCIP |
1092			  EESIPR_CDIP | EESIPR_TROIP | EESIPR_RMAFIP |
1093			  EESIPR_CEEFIP | EESIPR_CELFIP |
1094			  EESIPR_RRFIP | EESIPR_RTLFIP | EESIPR_RTSFIP |
1095			  EESIPR_PREIP | EESIPR_CERFIP,
1096
1097	.trscer_err_mask = TRSCER_RMAFCE,
1098
1099	.tsu		= 1,
1100	.dual_port	= 1,
1101};
1102
1103static void sh_eth_set_default_cpu_data(struct sh_eth_cpu_data *cd)
1104{
1105	if (!cd->ecsr_value)
1106		cd->ecsr_value = DEFAULT_ECSR_INIT;
1107
1108	if (!cd->ecsipr_value)
1109		cd->ecsipr_value = DEFAULT_ECSIPR_INIT;
1110
1111	if (!cd->fcftr_value)
1112		cd->fcftr_value = DEFAULT_FIFO_F_D_RFF |
1113				  DEFAULT_FIFO_F_D_RFD;
1114
1115	if (!cd->fdr_value)
1116		cd->fdr_value = DEFAULT_FDR_INIT;
1117
1118	if (!cd->tx_check)
1119		cd->tx_check = DEFAULT_TX_CHECK;
1120
1121	if (!cd->eesr_err_check)
1122		cd->eesr_err_check = DEFAULT_EESR_ERR_CHECK;
1123
1124	if (!cd->trscer_err_mask)
1125		cd->trscer_err_mask = DEFAULT_TRSCER_ERR_MASK;
1126}
1127
1128static void sh_eth_set_receive_align(struct sk_buff *skb)
1129{
1130	uintptr_t reserve = (uintptr_t)skb->data & (SH_ETH_RX_ALIGN - 1);
1131
1132	if (reserve)
1133		skb_reserve(skb, SH_ETH_RX_ALIGN - reserve);
1134}
1135
1136/* Program the hardware MAC address from dev->dev_addr. */
1137static void update_mac_address(struct net_device *ndev)
1138{
1139	sh_eth_write(ndev,
1140		     (ndev->dev_addr[0] << 24) | (ndev->dev_addr[1] << 16) |
1141		     (ndev->dev_addr[2] << 8) | (ndev->dev_addr[3]), MAHR);
1142	sh_eth_write(ndev,
1143		     (ndev->dev_addr[4] << 8) | (ndev->dev_addr[5]), MALR);
1144}
1145
1146/* Get MAC address from SuperH MAC address register
1147 *
1148 * SuperH's Ethernet device doesn't have 'ROM' to MAC address.
1149 * This driver get MAC address that use by bootloader(U-boot or sh-ipl+g).
1150 * When you want use this device, you must set MAC address in bootloader.
1151 *
1152 */
1153static void read_mac_address(struct net_device *ndev, unsigned char *mac)
1154{
1155	if (mac[0] || mac[1] || mac[2] || mac[3] || mac[4] || mac[5]) {
1156		eth_hw_addr_set(ndev, mac);
1157	} else {
1158		u32 mahr = sh_eth_read(ndev, MAHR);
1159		u32 malr = sh_eth_read(ndev, MALR);
1160		u8 addr[ETH_ALEN];
1161
1162		addr[0] = (mahr >> 24) & 0xFF;
1163		addr[1] = (mahr >> 16) & 0xFF;
1164		addr[2] = (mahr >>  8) & 0xFF;
1165		addr[3] = (mahr >>  0) & 0xFF;
1166		addr[4] = (malr >>  8) & 0xFF;
1167		addr[5] = (malr >>  0) & 0xFF;
1168		eth_hw_addr_set(ndev, addr);
1169	}
1170}
1171
1172struct bb_info {
1173	void (*set_gate)(void *addr);
1174	struct mdiobb_ctrl ctrl;
1175	void *addr;
1176};
1177
1178static void sh_mdio_ctrl(struct mdiobb_ctrl *ctrl, u32 mask, int set)
1179{
1180	struct bb_info *bitbang = container_of(ctrl, struct bb_info, ctrl);
1181	u32 pir;
1182
1183	if (bitbang->set_gate)
1184		bitbang->set_gate(bitbang->addr);
1185
1186	pir = ioread32(bitbang->addr);
1187	if (set)
1188		pir |=  mask;
1189	else
1190		pir &= ~mask;
1191	iowrite32(pir, bitbang->addr);
1192}
1193
1194/* Data I/O pin control */
1195static void sh_mmd_ctrl(struct mdiobb_ctrl *ctrl, int bit)
1196{
1197	sh_mdio_ctrl(ctrl, PIR_MMD, bit);
1198}
1199
1200/* Set bit data*/
1201static void sh_set_mdio(struct mdiobb_ctrl *ctrl, int bit)
1202{
1203	sh_mdio_ctrl(ctrl, PIR_MDO, bit);
1204}
1205
1206/* Get bit data*/
1207static int sh_get_mdio(struct mdiobb_ctrl *ctrl)
1208{
1209	struct bb_info *bitbang = container_of(ctrl, struct bb_info, ctrl);
1210
1211	if (bitbang->set_gate)
1212		bitbang->set_gate(bitbang->addr);
1213
1214	return (ioread32(bitbang->addr) & PIR_MDI) != 0;
1215}
1216
1217/* MDC pin control */
1218static void sh_mdc_ctrl(struct mdiobb_ctrl *ctrl, int bit)
1219{
1220	sh_mdio_ctrl(ctrl, PIR_MDC, bit);
1221}
1222
1223/* mdio bus control struct */
1224static const struct mdiobb_ops bb_ops = {
1225	.owner = THIS_MODULE,
1226	.set_mdc = sh_mdc_ctrl,
1227	.set_mdio_dir = sh_mmd_ctrl,
1228	.set_mdio_data = sh_set_mdio,
1229	.get_mdio_data = sh_get_mdio,
1230};
1231
1232/* free Tx skb function */
1233static int sh_eth_tx_free(struct net_device *ndev, bool sent_only)
1234{
1235	struct sh_eth_private *mdp = netdev_priv(ndev);
1236	struct sh_eth_txdesc *txdesc;
1237	int free_num = 0;
1238	int entry;
1239	bool sent;
1240
1241	for (; mdp->cur_tx - mdp->dirty_tx > 0; mdp->dirty_tx++) {
1242		entry = mdp->dirty_tx % mdp->num_tx_ring;
1243		txdesc = &mdp->tx_ring[entry];
1244		sent = !(txdesc->status & cpu_to_le32(TD_TACT));
1245		if (sent_only && !sent)
1246			break;
1247		/* TACT bit must be checked before all the following reads */
1248		dma_rmb();
1249		netif_info(mdp, tx_done, ndev,
1250			   "tx entry %d status 0x%08x\n",
1251			   entry, le32_to_cpu(txdesc->status));
1252		/* Free the original skb. */
1253		if (mdp->tx_skbuff[entry]) {
1254			dma_unmap_single(&mdp->pdev->dev,
1255					 le32_to_cpu(txdesc->addr),
1256					 le32_to_cpu(txdesc->len) >> 16,
1257					 DMA_TO_DEVICE);
1258			dev_kfree_skb_irq(mdp->tx_skbuff[entry]);
1259			mdp->tx_skbuff[entry] = NULL;
1260			free_num++;
1261		}
1262		txdesc->status = cpu_to_le32(TD_TFP);
1263		if (entry >= mdp->num_tx_ring - 1)
1264			txdesc->status |= cpu_to_le32(TD_TDLE);
1265
1266		if (sent) {
1267			ndev->stats.tx_packets++;
1268			ndev->stats.tx_bytes += le32_to_cpu(txdesc->len) >> 16;
1269		}
1270	}
1271	return free_num;
1272}
1273
1274/* free skb and descriptor buffer */
1275static void sh_eth_ring_free(struct net_device *ndev)
1276{
1277	struct sh_eth_private *mdp = netdev_priv(ndev);
1278	int ringsize, i;
1279
1280	if (mdp->rx_ring) {
1281		for (i = 0; i < mdp->num_rx_ring; i++) {
1282			if (mdp->rx_skbuff[i]) {
1283				struct sh_eth_rxdesc *rxdesc = &mdp->rx_ring[i];
1284
1285				dma_unmap_single(&mdp->pdev->dev,
1286						 le32_to_cpu(rxdesc->addr),
1287						 ALIGN(mdp->rx_buf_sz, 32),
1288						 DMA_FROM_DEVICE);
1289			}
1290		}
1291		ringsize = sizeof(struct sh_eth_rxdesc) * mdp->num_rx_ring;
1292		dma_free_coherent(&mdp->pdev->dev, ringsize, mdp->rx_ring,
1293				  mdp->rx_desc_dma);
1294		mdp->rx_ring = NULL;
1295	}
1296
1297	/* Free Rx skb ringbuffer */
1298	if (mdp->rx_skbuff) {
1299		for (i = 0; i < mdp->num_rx_ring; i++)
1300			dev_kfree_skb(mdp->rx_skbuff[i]);
1301	}
1302	kfree(mdp->rx_skbuff);
1303	mdp->rx_skbuff = NULL;
1304
1305	if (mdp->tx_ring) {
1306		sh_eth_tx_free(ndev, false);
1307
1308		ringsize = sizeof(struct sh_eth_txdesc) * mdp->num_tx_ring;
1309		dma_free_coherent(&mdp->pdev->dev, ringsize, mdp->tx_ring,
1310				  mdp->tx_desc_dma);
1311		mdp->tx_ring = NULL;
1312	}
1313
1314	/* Free Tx skb ringbuffer */
1315	kfree(mdp->tx_skbuff);
1316	mdp->tx_skbuff = NULL;
1317}
1318
1319/* format skb and descriptor buffer */
1320static void sh_eth_ring_format(struct net_device *ndev)
1321{
1322	struct sh_eth_private *mdp = netdev_priv(ndev);
1323	int i;
1324	struct sk_buff *skb;
1325	struct sh_eth_rxdesc *rxdesc = NULL;
1326	struct sh_eth_txdesc *txdesc = NULL;
1327	int rx_ringsize = sizeof(*rxdesc) * mdp->num_rx_ring;
1328	int tx_ringsize = sizeof(*txdesc) * mdp->num_tx_ring;
1329	int skbuff_size = mdp->rx_buf_sz + SH_ETH_RX_ALIGN + 32 - 1;
1330	dma_addr_t dma_addr;
1331	u32 buf_len;
1332
1333	mdp->cur_rx = 0;
1334	mdp->cur_tx = 0;
1335	mdp->dirty_rx = 0;
1336	mdp->dirty_tx = 0;
1337
1338	memset(mdp->rx_ring, 0, rx_ringsize);
1339
1340	/* build Rx ring buffer */
1341	for (i = 0; i < mdp->num_rx_ring; i++) {
1342		/* skb */
1343		mdp->rx_skbuff[i] = NULL;
1344		skb = netdev_alloc_skb(ndev, skbuff_size);
1345		if (skb == NULL)
1346			break;
1347		sh_eth_set_receive_align(skb);
1348
1349		/* The size of the buffer is a multiple of 32 bytes. */
1350		buf_len = ALIGN(mdp->rx_buf_sz, 32);
1351		dma_addr = dma_map_single(&mdp->pdev->dev, skb->data, buf_len,
1352					  DMA_FROM_DEVICE);
1353		if (dma_mapping_error(&mdp->pdev->dev, dma_addr)) {
1354			kfree_skb(skb);
1355			break;
1356		}
1357		mdp->rx_skbuff[i] = skb;
1358
1359		/* RX descriptor */
1360		rxdesc = &mdp->rx_ring[i];
1361		rxdesc->len = cpu_to_le32(buf_len << 16);
1362		rxdesc->addr = cpu_to_le32(dma_addr);
1363		rxdesc->status = cpu_to_le32(RD_RACT | RD_RFP);
1364
1365		/* Rx descriptor address set */
1366		if (i == 0) {
1367			sh_eth_write(ndev, mdp->rx_desc_dma, RDLAR);
1368			if (mdp->cd->xdfar_rw)
1369				sh_eth_write(ndev, mdp->rx_desc_dma, RDFAR);
1370		}
1371	}
1372
1373	mdp->dirty_rx = (u32) (i - mdp->num_rx_ring);
1374
1375	/* Mark the last entry as wrapping the ring. */
1376	if (rxdesc)
1377		rxdesc->status |= cpu_to_le32(RD_RDLE);
1378
1379	memset(mdp->tx_ring, 0, tx_ringsize);
1380
1381	/* build Tx ring buffer */
1382	for (i = 0; i < mdp->num_tx_ring; i++) {
1383		mdp->tx_skbuff[i] = NULL;
1384		txdesc = &mdp->tx_ring[i];
1385		txdesc->status = cpu_to_le32(TD_TFP);
1386		txdesc->len = cpu_to_le32(0);
1387		if (i == 0) {
1388			/* Tx descriptor address set */
1389			sh_eth_write(ndev, mdp->tx_desc_dma, TDLAR);
1390			if (mdp->cd->xdfar_rw)
1391				sh_eth_write(ndev, mdp->tx_desc_dma, TDFAR);
1392		}
1393	}
1394
1395	txdesc->status |= cpu_to_le32(TD_TDLE);
1396}
1397
1398/* Get skb and descriptor buffer */
1399static int sh_eth_ring_init(struct net_device *ndev)
1400{
1401	struct sh_eth_private *mdp = netdev_priv(ndev);
1402	int rx_ringsize, tx_ringsize;
1403
1404	/* +26 gets the maximum ethernet encapsulation, +7 & ~7 because the
1405	 * card needs room to do 8 byte alignment, +2 so we can reserve
1406	 * the first 2 bytes, and +16 gets room for the status word from the
1407	 * card.
1408	 */
1409	mdp->rx_buf_sz = (ndev->mtu <= 1492 ? PKT_BUF_SZ :
1410			  (((ndev->mtu + 26 + 7) & ~7) + 2 + 16));
1411	if (mdp->cd->rpadir)
1412		mdp->rx_buf_sz += NET_IP_ALIGN;
1413
1414	/* Allocate RX and TX skb rings */
1415	mdp->rx_skbuff = kcalloc(mdp->num_rx_ring, sizeof(*mdp->rx_skbuff),
1416				 GFP_KERNEL);
1417	if (!mdp->rx_skbuff)
1418		return -ENOMEM;
1419
1420	mdp->tx_skbuff = kcalloc(mdp->num_tx_ring, sizeof(*mdp->tx_skbuff),
1421				 GFP_KERNEL);
1422	if (!mdp->tx_skbuff)
1423		goto ring_free;
1424
1425	/* Allocate all Rx descriptors. */
1426	rx_ringsize = sizeof(struct sh_eth_rxdesc) * mdp->num_rx_ring;
1427	mdp->rx_ring = dma_alloc_coherent(&mdp->pdev->dev, rx_ringsize,
1428					  &mdp->rx_desc_dma, GFP_KERNEL);
1429	if (!mdp->rx_ring)
1430		goto ring_free;
1431
1432	mdp->dirty_rx = 0;
1433
1434	/* Allocate all Tx descriptors. */
1435	tx_ringsize = sizeof(struct sh_eth_txdesc) * mdp->num_tx_ring;
1436	mdp->tx_ring = dma_alloc_coherent(&mdp->pdev->dev, tx_ringsize,
1437					  &mdp->tx_desc_dma, GFP_KERNEL);
1438	if (!mdp->tx_ring)
1439		goto ring_free;
1440	return 0;
1441
1442ring_free:
1443	/* Free Rx and Tx skb ring buffer and DMA buffer */
1444	sh_eth_ring_free(ndev);
1445
1446	return -ENOMEM;
1447}
1448
1449static int sh_eth_dev_init(struct net_device *ndev)
1450{
1451	struct sh_eth_private *mdp = netdev_priv(ndev);
1452	int ret;
1453
1454	/* Soft Reset */
1455	ret = mdp->cd->soft_reset(ndev);
1456	if (ret)
1457		return ret;
1458
1459	if (mdp->cd->rmiimode)
1460		sh_eth_write(ndev, 0x1, RMIIMODE);
1461
1462	/* Descriptor format */
1463	sh_eth_ring_format(ndev);
1464	if (mdp->cd->rpadir)
1465		sh_eth_write(ndev, NET_IP_ALIGN << 16, RPADIR);
1466
1467	/* all sh_eth int mask */
1468	sh_eth_write(ndev, 0, EESIPR);
1469
1470#if defined(__LITTLE_ENDIAN)
1471	if (mdp->cd->hw_swap)
1472		sh_eth_write(ndev, EDMR_EL, EDMR);
1473	else
1474#endif
1475		sh_eth_write(ndev, 0, EDMR);
1476
1477	/* FIFO size set */
1478	sh_eth_write(ndev, mdp->cd->fdr_value, FDR);
1479	sh_eth_write(ndev, 0, TFTR);
1480
1481	/* Frame recv control (enable multiple-packets per rx irq) */
1482	sh_eth_write(ndev, RMCR_RNC, RMCR);
1483
1484	sh_eth_write(ndev, mdp->cd->trscer_err_mask, TRSCER);
1485
1486	/* DMA transfer burst mode */
1487	if (mdp->cd->nbst)
1488		sh_eth_modify(ndev, EDMR, EDMR_NBST, EDMR_NBST);
1489
1490	/* Burst cycle count upper-limit */
1491	if (mdp->cd->bculr)
1492		sh_eth_write(ndev, 0x800, BCULR);
1493
1494	sh_eth_write(ndev, mdp->cd->fcftr_value, FCFTR);
1495
1496	if (!mdp->cd->no_trimd)
1497		sh_eth_write(ndev, 0, TRIMD);
1498
1499	/* Recv frame limit set register */
1500	sh_eth_write(ndev, ndev->mtu + ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN,
1501		     RFLR);
1502
1503	sh_eth_modify(ndev, EESR, 0, 0);
1504	mdp->irq_enabled = true;
1505	sh_eth_write(ndev, mdp->cd->eesipr_value, EESIPR);
1506
1507	/* EMAC Mode: PAUSE prohibition; Duplex; RX Checksum; TX; RX */
1508	sh_eth_write(ndev, ECMR_ZPF | (mdp->duplex ? ECMR_DM : 0) |
1509		     (ndev->features & NETIF_F_RXCSUM ? ECMR_RCSC : 0) |
1510		     ECMR_TE | ECMR_RE, ECMR);
1511
1512	if (mdp->cd->set_rate)
1513		mdp->cd->set_rate(ndev);
1514
1515	/* E-MAC Status Register clear */
1516	sh_eth_write(ndev, mdp->cd->ecsr_value, ECSR);
1517
1518	/* E-MAC Interrupt Enable register */
1519	sh_eth_write(ndev, mdp->cd->ecsipr_value, ECSIPR);
1520
1521	/* Set MAC address */
1522	update_mac_address(ndev);
1523
1524	/* mask reset */
1525	if (mdp->cd->apr)
1526		sh_eth_write(ndev, 1, APR);
1527	if (mdp->cd->mpr)
1528		sh_eth_write(ndev, 1, MPR);
1529	if (mdp->cd->tpauser)
1530		sh_eth_write(ndev, TPAUSER_UNLIMITED, TPAUSER);
1531
1532	/* Setting the Rx mode will start the Rx process. */
1533	sh_eth_write(ndev, EDRRR_R, EDRRR);
1534
1535	return ret;
1536}
1537
1538static void sh_eth_dev_exit(struct net_device *ndev)
1539{
1540	struct sh_eth_private *mdp = netdev_priv(ndev);
1541	int i;
1542
1543	/* Deactivate all TX descriptors, so DMA should stop at next
1544	 * packet boundary if it's currently running
1545	 */
1546	for (i = 0; i < mdp->num_tx_ring; i++)
1547		mdp->tx_ring[i].status &= ~cpu_to_le32(TD_TACT);
1548
1549	/* Disable TX FIFO egress to MAC */
1550	sh_eth_rcv_snd_disable(ndev);
1551
1552	/* Stop RX DMA at next packet boundary */
1553	sh_eth_write(ndev, 0, EDRRR);
1554
1555	/* Aside from TX DMA, we can't tell when the hardware is
1556	 * really stopped, so we need to reset to make sure.
1557	 * Before doing that, wait for long enough to *probably*
1558	 * finish transmitting the last packet and poll stats.
1559	 */
1560	msleep(2); /* max frame time at 10 Mbps < 1250 us */
1561	sh_eth_get_stats(ndev);
1562	mdp->cd->soft_reset(ndev);
1563
1564	/* Set the RMII mode again if required */
1565	if (mdp->cd->rmiimode)
1566		sh_eth_write(ndev, 0x1, RMIIMODE);
1567
1568	/* Set MAC address again */
1569	update_mac_address(ndev);
1570}
1571
1572static void sh_eth_rx_csum(struct sk_buff *skb)
1573{
1574	u8 *hw_csum;
1575
1576	/* The hardware checksum is 2 bytes appended to packet data */
1577	if (unlikely(skb->len < sizeof(__sum16)))
1578		return;
1579	hw_csum = skb_tail_pointer(skb) - sizeof(__sum16);
1580	skb->csum = csum_unfold((__force __sum16)get_unaligned_le16(hw_csum));
1581	skb->ip_summed = CHECKSUM_COMPLETE;
1582	skb_trim(skb, skb->len - sizeof(__sum16));
1583}
1584
1585/* Packet receive function */
1586static int sh_eth_rx(struct net_device *ndev, u32 intr_status, int *quota)
1587{
1588	struct sh_eth_private *mdp = netdev_priv(ndev);
1589	struct sh_eth_rxdesc *rxdesc;
1590
1591	int entry = mdp->cur_rx % mdp->num_rx_ring;
1592	int boguscnt = (mdp->dirty_rx + mdp->num_rx_ring) - mdp->cur_rx;
1593	int limit;
1594	struct sk_buff *skb;
1595	u32 desc_status;
1596	int skbuff_size = mdp->rx_buf_sz + SH_ETH_RX_ALIGN + 32 - 1;
1597	dma_addr_t dma_addr;
1598	u16 pkt_len;
1599	u32 buf_len;
1600
1601	boguscnt = min(boguscnt, *quota);
1602	limit = boguscnt;
1603	rxdesc = &mdp->rx_ring[entry];
1604	while (!(rxdesc->status & cpu_to_le32(RD_RACT))) {
1605		/* RACT bit must be checked before all the following reads */
1606		dma_rmb();
1607		desc_status = le32_to_cpu(rxdesc->status);
1608		pkt_len = le32_to_cpu(rxdesc->len) & RD_RFL;
1609
1610		if (--boguscnt < 0)
1611			break;
1612
1613		netif_info(mdp, rx_status, ndev,
1614			   "rx entry %d status 0x%08x len %d\n",
1615			   entry, desc_status, pkt_len);
1616
1617		if (!(desc_status & RDFEND))
1618			ndev->stats.rx_length_errors++;
1619
1620		/* In case of almost all GETHER/ETHERs, the Receive Frame State
1621		 * (RFS) bits in the Receive Descriptor 0 are from bit 9 to
1622		 * bit 0. However, in case of the R8A7740 and R7S72100
1623		 * the RFS bits are from bit 25 to bit 16. So, the
1624		 * driver needs right shifting by 16.
1625		 */
1626		if (mdp->cd->csmr)
1627			desc_status >>= 16;
1628
1629		skb = mdp->rx_skbuff[entry];
1630		if (desc_status & (RD_RFS1 | RD_RFS2 | RD_RFS3 | RD_RFS4 |
1631				   RD_RFS5 | RD_RFS6 | RD_RFS10)) {
1632			ndev->stats.rx_errors++;
1633			if (desc_status & RD_RFS1)
1634				ndev->stats.rx_crc_errors++;
1635			if (desc_status & RD_RFS2)
1636				ndev->stats.rx_frame_errors++;
1637			if (desc_status & RD_RFS3)
1638				ndev->stats.rx_length_errors++;
1639			if (desc_status & RD_RFS4)
1640				ndev->stats.rx_length_errors++;
1641			if (desc_status & RD_RFS6)
1642				ndev->stats.rx_missed_errors++;
1643			if (desc_status & RD_RFS10)
1644				ndev->stats.rx_over_errors++;
1645		} else	if (skb) {
1646			dma_addr = le32_to_cpu(rxdesc->addr);
1647			if (!mdp->cd->hw_swap)
1648				sh_eth_soft_swap(
1649					phys_to_virt(ALIGN(dma_addr, 4)),
1650					pkt_len + 2);
1651			mdp->rx_skbuff[entry] = NULL;
1652			if (mdp->cd->rpadir)
1653				skb_reserve(skb, NET_IP_ALIGN);
1654			dma_unmap_single(&mdp->pdev->dev, dma_addr,
1655					 ALIGN(mdp->rx_buf_sz, 32),
1656					 DMA_FROM_DEVICE);
1657			skb_put(skb, pkt_len);
1658			skb->protocol = eth_type_trans(skb, ndev);
1659			if (ndev->features & NETIF_F_RXCSUM)
1660				sh_eth_rx_csum(skb);
1661			netif_receive_skb(skb);
1662			ndev->stats.rx_packets++;
1663			ndev->stats.rx_bytes += pkt_len;
1664			if (desc_status & RD_RFS8)
1665				ndev->stats.multicast++;
1666		}
1667		entry = (++mdp->cur_rx) % mdp->num_rx_ring;
1668		rxdesc = &mdp->rx_ring[entry];
1669	}
1670
1671	/* Refill the Rx ring buffers. */
1672	for (; mdp->cur_rx - mdp->dirty_rx > 0; mdp->dirty_rx++) {
1673		entry = mdp->dirty_rx % mdp->num_rx_ring;
1674		rxdesc = &mdp->rx_ring[entry];
1675		/* The size of the buffer is 32 byte boundary. */
1676		buf_len = ALIGN(mdp->rx_buf_sz, 32);
1677		rxdesc->len = cpu_to_le32(buf_len << 16);
1678
1679		if (mdp->rx_skbuff[entry] == NULL) {
1680			skb = netdev_alloc_skb(ndev, skbuff_size);
1681			if (skb == NULL)
1682				break;	/* Better luck next round. */
1683			sh_eth_set_receive_align(skb);
1684			dma_addr = dma_map_single(&mdp->pdev->dev, skb->data,
1685						  buf_len, DMA_FROM_DEVICE);
1686			if (dma_mapping_error(&mdp->pdev->dev, dma_addr)) {
1687				kfree_skb(skb);
1688				break;
1689			}
1690			mdp->rx_skbuff[entry] = skb;
1691
1692			skb_checksum_none_assert(skb);
1693			rxdesc->addr = cpu_to_le32(dma_addr);
1694		}
1695		dma_wmb(); /* RACT bit must be set after all the above writes */
1696		if (entry >= mdp->num_rx_ring - 1)
1697			rxdesc->status |=
1698				cpu_to_le32(RD_RACT | RD_RFP | RD_RDLE);
1699		else
1700			rxdesc->status |= cpu_to_le32(RD_RACT | RD_RFP);
1701	}
1702
1703	/* Restart Rx engine if stopped. */
1704	/* If we don't need to check status, don't. -KDU */
1705	if (!(sh_eth_read(ndev, EDRRR) & EDRRR_R)) {
1706		/* fix the values for the next receiving if RDE is set */
1707		if (intr_status & EESR_RDE && !mdp->cd->no_xdfar) {
1708			u32 count = (sh_eth_read(ndev, RDFAR) -
1709				     sh_eth_read(ndev, RDLAR)) >> 4;
1710
1711			mdp->cur_rx = count;
1712			mdp->dirty_rx = count;
1713		}
1714		sh_eth_write(ndev, EDRRR_R, EDRRR);
1715	}
1716
1717	*quota -= limit - boguscnt - 1;
1718
1719	return *quota <= 0;
1720}
1721
1722static void sh_eth_rcv_snd_disable(struct net_device *ndev)
1723{
1724	/* disable tx and rx */
1725	sh_eth_modify(ndev, ECMR, ECMR_RE | ECMR_TE, 0);
1726}
1727
1728static void sh_eth_rcv_snd_enable(struct net_device *ndev)
1729{
1730	/* enable tx and rx */
1731	sh_eth_modify(ndev, ECMR, ECMR_RE | ECMR_TE, ECMR_RE | ECMR_TE);
1732}
1733
1734/* E-MAC interrupt handler */
1735static void sh_eth_emac_interrupt(struct net_device *ndev)
1736{
1737	struct sh_eth_private *mdp = netdev_priv(ndev);
1738	u32 felic_stat;
1739	u32 link_stat;
1740
1741	felic_stat = sh_eth_read(ndev, ECSR) & sh_eth_read(ndev, ECSIPR);
1742	sh_eth_write(ndev, felic_stat, ECSR);	/* clear int */
1743	if (felic_stat & ECSR_ICD)
1744		ndev->stats.tx_carrier_errors++;
1745	if (felic_stat & ECSR_MPD)
1746		pm_wakeup_event(&mdp->pdev->dev, 0);
1747	if (felic_stat & ECSR_LCHNG) {
1748		/* Link Changed */
1749		if (mdp->cd->no_psr || mdp->no_ether_link)
1750			return;
1751		link_stat = sh_eth_read(ndev, PSR);
1752		if (mdp->ether_link_active_low)
1753			link_stat = ~link_stat;
1754		if (!(link_stat & PSR_LMON)) {
1755			sh_eth_rcv_snd_disable(ndev);
1756		} else {
1757			/* Link Up */
1758			sh_eth_modify(ndev, EESIPR, EESIPR_ECIIP, 0);
1759			/* clear int */
1760			sh_eth_modify(ndev, ECSR, 0, 0);
1761			sh_eth_modify(ndev, EESIPR, EESIPR_ECIIP, EESIPR_ECIIP);
1762			/* enable tx and rx */
1763			sh_eth_rcv_snd_enable(ndev);
1764		}
1765	}
1766}
1767
1768/* error control function */
1769static void sh_eth_error(struct net_device *ndev, u32 intr_status)
1770{
1771	struct sh_eth_private *mdp = netdev_priv(ndev);
1772	u32 mask;
1773
1774	if (intr_status & EESR_TWB) {
1775		/* Unused write back interrupt */
1776		if (intr_status & EESR_TABT) {	/* Transmit Abort int */
1777			ndev->stats.tx_aborted_errors++;
1778			netif_err(mdp, tx_err, ndev, "Transmit Abort\n");
1779		}
1780	}
1781
1782	if (intr_status & EESR_RABT) {
1783		/* Receive Abort int */
1784		if (intr_status & EESR_RFRMER) {
1785			/* Receive Frame Overflow int */
1786			ndev->stats.rx_frame_errors++;
1787		}
1788	}
1789
1790	if (intr_status & EESR_TDE) {
1791		/* Transmit Descriptor Empty int */
1792		ndev->stats.tx_fifo_errors++;
1793		netif_err(mdp, tx_err, ndev, "Transmit Descriptor Empty\n");
1794	}
1795
1796	if (intr_status & EESR_TFE) {
1797		/* FIFO under flow */
1798		ndev->stats.tx_fifo_errors++;
1799		netif_err(mdp, tx_err, ndev, "Transmit FIFO Under flow\n");
1800	}
1801
1802	if (intr_status & EESR_RDE) {
1803		/* Receive Descriptor Empty int */
1804		ndev->stats.rx_over_errors++;
1805	}
1806
1807	if (intr_status & EESR_RFE) {
1808		/* Receive FIFO Overflow int */
1809		ndev->stats.rx_fifo_errors++;
1810	}
1811
1812	if (!mdp->cd->no_ade && (intr_status & EESR_ADE)) {
1813		/* Address Error */
1814		ndev->stats.tx_fifo_errors++;
1815		netif_err(mdp, tx_err, ndev, "Address Error\n");
1816	}
1817
1818	mask = EESR_TWB | EESR_TABT | EESR_ADE | EESR_TDE | EESR_TFE;
1819	if (mdp->cd->no_ade)
1820		mask &= ~EESR_ADE;
1821	if (intr_status & mask) {
1822		/* Tx error */
1823		u32 edtrr = sh_eth_read(ndev, EDTRR);
1824
1825		/* dmesg */
1826		netdev_err(ndev, "TX error. status=%8.8x cur_tx=%8.8x dirty_tx=%8.8x state=%8.8x EDTRR=%8.8x.\n",
1827			   intr_status, mdp->cur_tx, mdp->dirty_tx,
1828			   (u32)ndev->state, edtrr);
1829		/* dirty buffer free */
1830		sh_eth_tx_free(ndev, true);
1831
1832		/* SH7712 BUG */
1833		if (edtrr ^ mdp->cd->edtrr_trns) {
1834			/* tx dma start */
1835			sh_eth_write(ndev, mdp->cd->edtrr_trns, EDTRR);
1836		}
1837		/* wakeup */
1838		netif_wake_queue(ndev);
1839	}
1840}
1841
1842static irqreturn_t sh_eth_interrupt(int irq, void *netdev)
1843{
1844	struct net_device *ndev = netdev;
1845	struct sh_eth_private *mdp = netdev_priv(ndev);
1846	struct sh_eth_cpu_data *cd = mdp->cd;
1847	irqreturn_t ret = IRQ_NONE;
1848	u32 intr_status, intr_enable;
1849
1850	spin_lock(&mdp->lock);
1851
1852	/* Get interrupt status */
1853	intr_status = sh_eth_read(ndev, EESR);
1854	/* Mask it with the interrupt mask, forcing ECI interrupt  to be always
1855	 * enabled since it's the one that  comes  thru regardless of the mask,
1856	 * and  we need to fully handle it  in sh_eth_emac_interrupt() in order
1857	 * to quench it as it doesn't get cleared by just writing 1 to the  ECI
1858	 * bit...
1859	 */
1860	intr_enable = sh_eth_read(ndev, EESIPR);
1861	intr_status &= intr_enable | EESIPR_ECIIP;
1862	if (intr_status & (EESR_RX_CHECK | cd->tx_check | EESR_ECI |
1863			   cd->eesr_err_check))
1864		ret = IRQ_HANDLED;
1865	else
1866		goto out;
1867
1868	if (unlikely(!mdp->irq_enabled)) {
1869		sh_eth_write(ndev, 0, EESIPR);
1870		goto out;
1871	}
1872
1873	if (intr_status & EESR_RX_CHECK) {
1874		if (napi_schedule_prep(&mdp->napi)) {
1875			/* Mask Rx interrupts */
1876			sh_eth_write(ndev, intr_enable & ~EESR_RX_CHECK,
1877				     EESIPR);
1878			__napi_schedule(&mdp->napi);
1879		} else {
1880			netdev_warn(ndev,
1881				    "ignoring interrupt, status 0x%08x, mask 0x%08x.\n",
1882				    intr_status, intr_enable);
1883		}
1884	}
1885
1886	/* Tx Check */
1887	if (intr_status & cd->tx_check) {
1888		/* Clear Tx interrupts */
1889		sh_eth_write(ndev, intr_status & cd->tx_check, EESR);
1890
1891		sh_eth_tx_free(ndev, true);
1892		netif_wake_queue(ndev);
1893	}
1894
1895	/* E-MAC interrupt */
1896	if (intr_status & EESR_ECI)
1897		sh_eth_emac_interrupt(ndev);
1898
1899	if (intr_status & cd->eesr_err_check) {
1900		/* Clear error interrupts */
1901		sh_eth_write(ndev, intr_status & cd->eesr_err_check, EESR);
1902
1903		sh_eth_error(ndev, intr_status);
1904	}
1905
1906out:
1907	spin_unlock(&mdp->lock);
1908
1909	return ret;
1910}
1911
1912static int sh_eth_poll(struct napi_struct *napi, int budget)
1913{
1914	struct sh_eth_private *mdp = container_of(napi, struct sh_eth_private,
1915						  napi);
1916	struct net_device *ndev = napi->dev;
1917	int quota = budget;
1918	u32 intr_status;
1919
1920	for (;;) {
1921		intr_status = sh_eth_read(ndev, EESR);
1922		if (!(intr_status & EESR_RX_CHECK))
1923			break;
1924		/* Clear Rx interrupts */
1925		sh_eth_write(ndev, intr_status & EESR_RX_CHECK, EESR);
1926
1927		if (sh_eth_rx(ndev, intr_status, &quota))
1928			goto out;
1929	}
1930
1931	napi_complete(napi);
1932
1933	/* Reenable Rx interrupts */
1934	if (mdp->irq_enabled)
1935		sh_eth_write(ndev, mdp->cd->eesipr_value, EESIPR);
1936out:
1937	return budget - quota;
1938}
1939
1940/* PHY state control function */
1941static void sh_eth_adjust_link(struct net_device *ndev)
1942{
1943	struct sh_eth_private *mdp = netdev_priv(ndev);
1944	struct phy_device *phydev = ndev->phydev;
1945	unsigned long flags;
1946	int new_state = 0;
1947
1948	spin_lock_irqsave(&mdp->lock, flags);
1949
1950	/* Disable TX and RX right over here, if E-MAC change is ignored */
1951	if (mdp->cd->no_psr || mdp->no_ether_link)
1952		sh_eth_rcv_snd_disable(ndev);
1953
1954	if (phydev->link) {
1955		if (phydev->duplex != mdp->duplex) {
1956			new_state = 1;
1957			mdp->duplex = phydev->duplex;
1958			if (mdp->cd->set_duplex)
1959				mdp->cd->set_duplex(ndev);
1960		}
1961
1962		if (phydev->speed != mdp->speed) {
1963			new_state = 1;
1964			mdp->speed = phydev->speed;
1965			if (mdp->cd->set_rate)
1966				mdp->cd->set_rate(ndev);
1967		}
1968		if (!mdp->link) {
1969			sh_eth_modify(ndev, ECMR, ECMR_TXF, 0);
1970			new_state = 1;
1971			mdp->link = phydev->link;
1972		}
1973	} else if (mdp->link) {
1974		new_state = 1;
1975		mdp->link = 0;
1976		mdp->speed = 0;
1977		mdp->duplex = -1;
1978	}
1979
1980	/* Enable TX and RX right over here, if E-MAC change is ignored */
1981	if ((mdp->cd->no_psr || mdp->no_ether_link) && phydev->link)
1982		sh_eth_rcv_snd_enable(ndev);
1983
1984	spin_unlock_irqrestore(&mdp->lock, flags);
1985
1986	if (new_state && netif_msg_link(mdp))
1987		phy_print_status(phydev);
1988}
1989
1990/* PHY init function */
1991static int sh_eth_phy_init(struct net_device *ndev)
1992{
1993	struct device_node *np = ndev->dev.parent->of_node;
1994	struct sh_eth_private *mdp = netdev_priv(ndev);
1995	struct phy_device *phydev;
1996
1997	mdp->link = 0;
1998	mdp->speed = 0;
1999	mdp->duplex = -1;
2000
2001	/* Try connect to PHY */
2002	if (np) {
2003		struct device_node *pn;
2004
2005		pn = of_parse_phandle(np, "phy-handle", 0);
2006		phydev = of_phy_connect(ndev, pn,
2007					sh_eth_adjust_link, 0,
2008					mdp->phy_interface);
2009
2010		of_node_put(pn);
2011		if (!phydev)
2012			phydev = ERR_PTR(-ENOENT);
2013	} else {
2014		char phy_id[MII_BUS_ID_SIZE + 3];
2015
2016		snprintf(phy_id, sizeof(phy_id), PHY_ID_FMT,
2017			 mdp->mii_bus->id, mdp->phy_id);
2018
2019		phydev = phy_connect(ndev, phy_id, sh_eth_adjust_link,
2020				     mdp->phy_interface);
2021	}
2022
2023	if (IS_ERR(phydev)) {
2024		netdev_err(ndev, "failed to connect PHY\n");
2025		return PTR_ERR(phydev);
2026	}
2027
2028	/* mask with MAC supported features */
2029	if (mdp->cd->register_type != SH_ETH_REG_GIGABIT)
2030		phy_set_max_speed(phydev, SPEED_100);
 
 
 
 
 
 
2031
2032	/* Indicate that the MAC is responsible for managing PHY PM */
2033	phydev->mac_managed_pm = true;
2034	phy_attached_info(phydev);
2035
2036	return 0;
2037}
2038
2039/* PHY control start function */
2040static int sh_eth_phy_start(struct net_device *ndev)
2041{
2042	int ret;
2043
2044	ret = sh_eth_phy_init(ndev);
2045	if (ret)
2046		return ret;
2047
2048	phy_start(ndev->phydev);
2049
2050	return 0;
2051}
2052
2053/* If it is ever necessary to increase SH_ETH_REG_DUMP_MAX_REGS, the
2054 * version must be bumped as well.  Just adding registers up to that
2055 * limit is fine, as long as the existing register indices don't
2056 * change.
2057 */
2058#define SH_ETH_REG_DUMP_VERSION		1
2059#define SH_ETH_REG_DUMP_MAX_REGS	256
2060
2061static size_t __sh_eth_get_regs(struct net_device *ndev, u32 *buf)
2062{
2063	struct sh_eth_private *mdp = netdev_priv(ndev);
2064	struct sh_eth_cpu_data *cd = mdp->cd;
2065	u32 *valid_map;
2066	size_t len;
2067
2068	BUILD_BUG_ON(SH_ETH_MAX_REGISTER_OFFSET > SH_ETH_REG_DUMP_MAX_REGS);
2069
2070	/* Dump starts with a bitmap that tells ethtool which
2071	 * registers are defined for this chip.
2072	 */
2073	len = DIV_ROUND_UP(SH_ETH_REG_DUMP_MAX_REGS, 32);
2074	if (buf) {
2075		valid_map = buf;
2076		buf += len;
2077	} else {
2078		valid_map = NULL;
2079	}
2080
2081	/* Add a register to the dump, if it has a defined offset.
2082	 * This automatically skips most undefined registers, but for
2083	 * some it is also necessary to check a capability flag in
2084	 * struct sh_eth_cpu_data.
2085	 */
2086#define mark_reg_valid(reg) valid_map[reg / 32] |= 1U << (reg % 32)
2087#define add_reg_from(reg, read_expr) do {				\
2088		if (mdp->reg_offset[reg] != SH_ETH_OFFSET_INVALID) {	\
2089			if (buf) {					\
2090				mark_reg_valid(reg);			\
2091				*buf++ = read_expr;			\
2092			}						\
2093			++len;						\
2094		}							\
2095	} while (0)
2096#define add_reg(reg) add_reg_from(reg, sh_eth_read(ndev, reg))
2097#define add_tsu_reg(reg) add_reg_from(reg, sh_eth_tsu_read(mdp, reg))
2098
2099	add_reg(EDSR);
2100	add_reg(EDMR);
2101	add_reg(EDTRR);
2102	add_reg(EDRRR);
2103	add_reg(EESR);
2104	add_reg(EESIPR);
2105	add_reg(TDLAR);
2106	if (!cd->no_xdfar)
2107		add_reg(TDFAR);
2108	add_reg(TDFXR);
2109	add_reg(TDFFR);
2110	add_reg(RDLAR);
2111	if (!cd->no_xdfar)
2112		add_reg(RDFAR);
2113	add_reg(RDFXR);
2114	add_reg(RDFFR);
2115	add_reg(TRSCER);
2116	add_reg(RMFCR);
2117	add_reg(TFTR);
2118	add_reg(FDR);
2119	add_reg(RMCR);
2120	add_reg(TFUCR);
2121	add_reg(RFOCR);
2122	if (cd->rmiimode)
2123		add_reg(RMIIMODE);
2124	add_reg(FCFTR);
2125	if (cd->rpadir)
2126		add_reg(RPADIR);
2127	if (!cd->no_trimd)
2128		add_reg(TRIMD);
2129	add_reg(ECMR);
2130	add_reg(ECSR);
2131	add_reg(ECSIPR);
2132	add_reg(PIR);
2133	if (!cd->no_psr)
2134		add_reg(PSR);
2135	add_reg(RDMLR);
2136	add_reg(RFLR);
2137	add_reg(IPGR);
2138	if (cd->apr)
2139		add_reg(APR);
2140	if (cd->mpr)
2141		add_reg(MPR);
2142	add_reg(RFCR);
2143	add_reg(RFCF);
2144	if (cd->tpauser)
2145		add_reg(TPAUSER);
2146	add_reg(TPAUSECR);
2147	if (cd->gecmr)
2148		add_reg(GECMR);
2149	if (cd->bculr)
2150		add_reg(BCULR);
2151	add_reg(MAHR);
2152	add_reg(MALR);
2153	if (!cd->no_tx_cntrs) {
2154		add_reg(TROCR);
2155		add_reg(CDCR);
2156		add_reg(LCCR);
2157		add_reg(CNDCR);
2158	}
2159	add_reg(CEFCR);
2160	add_reg(FRECR);
2161	add_reg(TSFRCR);
2162	add_reg(TLFRCR);
2163	if (cd->cexcr) {
2164		add_reg(CERCR);
2165		add_reg(CEECR);
2166	}
2167	add_reg(MAFCR);
2168	if (cd->rtrate)
2169		add_reg(RTRATE);
2170	if (cd->csmr)
2171		add_reg(CSMR);
2172	if (cd->select_mii)
2173		add_reg(RMII_MII);
2174	if (cd->tsu) {
2175		add_tsu_reg(ARSTR);
2176		add_tsu_reg(TSU_CTRST);
2177		if (cd->dual_port) {
2178			add_tsu_reg(TSU_FWEN0);
2179			add_tsu_reg(TSU_FWEN1);
2180			add_tsu_reg(TSU_FCM);
2181			add_tsu_reg(TSU_BSYSL0);
2182			add_tsu_reg(TSU_BSYSL1);
2183			add_tsu_reg(TSU_PRISL0);
2184			add_tsu_reg(TSU_PRISL1);
2185			add_tsu_reg(TSU_FWSL0);
2186			add_tsu_reg(TSU_FWSL1);
2187		}
2188		add_tsu_reg(TSU_FWSLC);
2189		if (cd->dual_port) {
2190			add_tsu_reg(TSU_QTAGM0);
2191			add_tsu_reg(TSU_QTAGM1);
2192			add_tsu_reg(TSU_FWSR);
2193			add_tsu_reg(TSU_FWINMK);
2194			add_tsu_reg(TSU_ADQT0);
2195			add_tsu_reg(TSU_ADQT1);
2196			add_tsu_reg(TSU_VTAG0);
2197			add_tsu_reg(TSU_VTAG1);
2198		}
2199		add_tsu_reg(TSU_ADSBSY);
2200		add_tsu_reg(TSU_TEN);
2201		add_tsu_reg(TSU_POST1);
2202		add_tsu_reg(TSU_POST2);
2203		add_tsu_reg(TSU_POST3);
2204		add_tsu_reg(TSU_POST4);
2205		/* This is the start of a table, not just a single register. */
2206		if (buf) {
2207			unsigned int i;
2208
2209			mark_reg_valid(TSU_ADRH0);
2210			for (i = 0; i < SH_ETH_TSU_CAM_ENTRIES * 2; i++)
2211				*buf++ = ioread32(mdp->tsu_addr +
2212						  mdp->reg_offset[TSU_ADRH0] +
2213						  i * 4);
2214		}
2215		len += SH_ETH_TSU_CAM_ENTRIES * 2;
2216	}
2217
2218#undef mark_reg_valid
2219#undef add_reg_from
2220#undef add_reg
2221#undef add_tsu_reg
2222
2223	return len * 4;
2224}
2225
2226static int sh_eth_get_regs_len(struct net_device *ndev)
2227{
2228	return __sh_eth_get_regs(ndev, NULL);
2229}
2230
2231static void sh_eth_get_regs(struct net_device *ndev, struct ethtool_regs *regs,
2232			    void *buf)
2233{
2234	struct sh_eth_private *mdp = netdev_priv(ndev);
2235
2236	regs->version = SH_ETH_REG_DUMP_VERSION;
2237
2238	pm_runtime_get_sync(&mdp->pdev->dev);
2239	__sh_eth_get_regs(ndev, buf);
2240	pm_runtime_put_sync(&mdp->pdev->dev);
2241}
2242
2243static u32 sh_eth_get_msglevel(struct net_device *ndev)
2244{
2245	struct sh_eth_private *mdp = netdev_priv(ndev);
2246	return mdp->msg_enable;
2247}
2248
2249static void sh_eth_set_msglevel(struct net_device *ndev, u32 value)
2250{
2251	struct sh_eth_private *mdp = netdev_priv(ndev);
2252	mdp->msg_enable = value;
2253}
2254
2255static const char sh_eth_gstrings_stats[][ETH_GSTRING_LEN] = {
2256	"rx_current", "tx_current",
2257	"rx_dirty", "tx_dirty",
2258};
2259#define SH_ETH_STATS_LEN  ARRAY_SIZE(sh_eth_gstrings_stats)
2260
2261static int sh_eth_get_sset_count(struct net_device *netdev, int sset)
2262{
2263	switch (sset) {
2264	case ETH_SS_STATS:
2265		return SH_ETH_STATS_LEN;
2266	default:
2267		return -EOPNOTSUPP;
2268	}
2269}
2270
2271static void sh_eth_get_ethtool_stats(struct net_device *ndev,
2272				     struct ethtool_stats *stats, u64 *data)
2273{
2274	struct sh_eth_private *mdp = netdev_priv(ndev);
2275	int i = 0;
2276
2277	/* device-specific stats */
2278	data[i++] = mdp->cur_rx;
2279	data[i++] = mdp->cur_tx;
2280	data[i++] = mdp->dirty_rx;
2281	data[i++] = mdp->dirty_tx;
2282}
2283
2284static void sh_eth_get_strings(struct net_device *ndev, u32 stringset, u8 *data)
2285{
2286	switch (stringset) {
2287	case ETH_SS_STATS:
2288		memcpy(data, sh_eth_gstrings_stats,
2289		       sizeof(sh_eth_gstrings_stats));
2290		break;
2291	}
2292}
2293
2294static void sh_eth_get_ringparam(struct net_device *ndev,
2295				 struct ethtool_ringparam *ring,
2296				 struct kernel_ethtool_ringparam *kernel_ring,
2297				 struct netlink_ext_ack *extack)
2298{
2299	struct sh_eth_private *mdp = netdev_priv(ndev);
2300
2301	ring->rx_max_pending = RX_RING_MAX;
2302	ring->tx_max_pending = TX_RING_MAX;
2303	ring->rx_pending = mdp->num_rx_ring;
2304	ring->tx_pending = mdp->num_tx_ring;
2305}
2306
2307static int sh_eth_set_ringparam(struct net_device *ndev,
2308				struct ethtool_ringparam *ring,
2309				struct kernel_ethtool_ringparam *kernel_ring,
2310				struct netlink_ext_ack *extack)
2311{
2312	struct sh_eth_private *mdp = netdev_priv(ndev);
2313	int ret;
2314
2315	if (ring->tx_pending > TX_RING_MAX ||
2316	    ring->rx_pending > RX_RING_MAX ||
2317	    ring->tx_pending < TX_RING_MIN ||
2318	    ring->rx_pending < RX_RING_MIN)
2319		return -EINVAL;
2320	if (ring->rx_mini_pending || ring->rx_jumbo_pending)
2321		return -EINVAL;
2322
2323	if (netif_running(ndev)) {
2324		netif_device_detach(ndev);
2325		netif_tx_disable(ndev);
2326
2327		/* Serialise with the interrupt handler and NAPI, then
2328		 * disable interrupts.  We have to clear the
2329		 * irq_enabled flag first to ensure that interrupts
2330		 * won't be re-enabled.
2331		 */
2332		mdp->irq_enabled = false;
2333		synchronize_irq(ndev->irq);
2334		napi_synchronize(&mdp->napi);
2335		sh_eth_write(ndev, 0x0000, EESIPR);
2336
2337		sh_eth_dev_exit(ndev);
2338
2339		/* Free all the skbuffs in the Rx queue and the DMA buffers. */
2340		sh_eth_ring_free(ndev);
2341	}
2342
2343	/* Set new parameters */
2344	mdp->num_rx_ring = ring->rx_pending;
2345	mdp->num_tx_ring = ring->tx_pending;
2346
2347	if (netif_running(ndev)) {
2348		ret = sh_eth_ring_init(ndev);
2349		if (ret < 0) {
2350			netdev_err(ndev, "%s: sh_eth_ring_init failed.\n",
2351				   __func__);
2352			return ret;
2353		}
2354		ret = sh_eth_dev_init(ndev);
2355		if (ret < 0) {
2356			netdev_err(ndev, "%s: sh_eth_dev_init failed.\n",
2357				   __func__);
2358			return ret;
2359		}
2360
2361		netif_device_attach(ndev);
2362	}
2363
2364	return 0;
2365}
2366
2367static void sh_eth_get_wol(struct net_device *ndev, struct ethtool_wolinfo *wol)
2368{
2369	struct sh_eth_private *mdp = netdev_priv(ndev);
2370
2371	wol->supported = 0;
2372	wol->wolopts = 0;
2373
2374	if (mdp->cd->magic) {
2375		wol->supported = WAKE_MAGIC;
2376		wol->wolopts = mdp->wol_enabled ? WAKE_MAGIC : 0;
2377	}
2378}
2379
2380static int sh_eth_set_wol(struct net_device *ndev, struct ethtool_wolinfo *wol)
2381{
2382	struct sh_eth_private *mdp = netdev_priv(ndev);
2383
2384	if (!mdp->cd->magic || wol->wolopts & ~WAKE_MAGIC)
2385		return -EOPNOTSUPP;
2386
2387	mdp->wol_enabled = !!(wol->wolopts & WAKE_MAGIC);
2388
2389	device_set_wakeup_enable(&mdp->pdev->dev, mdp->wol_enabled);
2390
2391	return 0;
2392}
2393
2394static const struct ethtool_ops sh_eth_ethtool_ops = {
2395	.get_regs_len	= sh_eth_get_regs_len,
2396	.get_regs	= sh_eth_get_regs,
2397	.nway_reset	= phy_ethtool_nway_reset,
2398	.get_msglevel	= sh_eth_get_msglevel,
2399	.set_msglevel	= sh_eth_set_msglevel,
2400	.get_link	= ethtool_op_get_link,
2401	.get_strings	= sh_eth_get_strings,
2402	.get_ethtool_stats  = sh_eth_get_ethtool_stats,
2403	.get_sset_count     = sh_eth_get_sset_count,
2404	.get_ringparam	= sh_eth_get_ringparam,
2405	.set_ringparam	= sh_eth_set_ringparam,
2406	.get_link_ksettings = phy_ethtool_get_link_ksettings,
2407	.set_link_ksettings = phy_ethtool_set_link_ksettings,
2408	.get_wol	= sh_eth_get_wol,
2409	.set_wol	= sh_eth_set_wol,
2410};
2411
2412/* network device open function */
2413static int sh_eth_open(struct net_device *ndev)
2414{
2415	struct sh_eth_private *mdp = netdev_priv(ndev);
2416	int ret;
2417
2418	pm_runtime_get_sync(&mdp->pdev->dev);
2419
2420	napi_enable(&mdp->napi);
2421
2422	ret = request_irq(ndev->irq, sh_eth_interrupt,
2423			  mdp->cd->irq_flags, ndev->name, ndev);
2424	if (ret) {
2425		netdev_err(ndev, "Can not assign IRQ number\n");
2426		goto out_napi_off;
2427	}
2428
2429	/* Descriptor set */
2430	ret = sh_eth_ring_init(ndev);
2431	if (ret)
2432		goto out_free_irq;
2433
2434	/* device init */
2435	ret = sh_eth_dev_init(ndev);
2436	if (ret)
2437		goto out_free_irq;
2438
2439	/* PHY control start*/
2440	ret = sh_eth_phy_start(ndev);
2441	if (ret)
2442		goto out_free_irq;
2443
2444	netif_start_queue(ndev);
2445
2446	mdp->is_opened = 1;
2447
2448	return ret;
2449
2450out_free_irq:
2451	free_irq(ndev->irq, ndev);
2452out_napi_off:
2453	napi_disable(&mdp->napi);
2454	pm_runtime_put_sync(&mdp->pdev->dev);
2455	return ret;
2456}
2457
2458/* Timeout function */
2459static void sh_eth_tx_timeout(struct net_device *ndev, unsigned int txqueue)
2460{
2461	struct sh_eth_private *mdp = netdev_priv(ndev);
2462	struct sh_eth_rxdesc *rxdesc;
2463	int i;
2464
2465	netif_stop_queue(ndev);
2466
2467	netif_err(mdp, timer, ndev,
2468		  "transmit timed out, status %8.8x, resetting...\n",
2469		  sh_eth_read(ndev, EESR));
2470
2471	/* tx_errors count up */
2472	ndev->stats.tx_errors++;
2473
2474	/* Free all the skbuffs in the Rx queue. */
2475	for (i = 0; i < mdp->num_rx_ring; i++) {
2476		rxdesc = &mdp->rx_ring[i];
2477		rxdesc->status = cpu_to_le32(0);
2478		rxdesc->addr = cpu_to_le32(0xBADF00D0);
2479		dev_kfree_skb(mdp->rx_skbuff[i]);
2480		mdp->rx_skbuff[i] = NULL;
2481	}
2482	for (i = 0; i < mdp->num_tx_ring; i++) {
2483		dev_kfree_skb(mdp->tx_skbuff[i]);
2484		mdp->tx_skbuff[i] = NULL;
2485	}
2486
2487	/* device init */
2488	sh_eth_dev_init(ndev);
2489
2490	netif_start_queue(ndev);
2491}
2492
2493/* Packet transmit function */
2494static netdev_tx_t sh_eth_start_xmit(struct sk_buff *skb,
2495				     struct net_device *ndev)
2496{
2497	struct sh_eth_private *mdp = netdev_priv(ndev);
2498	struct sh_eth_txdesc *txdesc;
2499	dma_addr_t dma_addr;
2500	u32 entry;
2501	unsigned long flags;
2502
2503	spin_lock_irqsave(&mdp->lock, flags);
2504	if ((mdp->cur_tx - mdp->dirty_tx) >= (mdp->num_tx_ring - 4)) {
2505		if (!sh_eth_tx_free(ndev, true)) {
2506			netif_warn(mdp, tx_queued, ndev, "TxFD exhausted.\n");
2507			netif_stop_queue(ndev);
2508			spin_unlock_irqrestore(&mdp->lock, flags);
2509			return NETDEV_TX_BUSY;
2510		}
2511	}
2512	spin_unlock_irqrestore(&mdp->lock, flags);
2513
2514	if (skb_put_padto(skb, ETH_ZLEN))
2515		return NETDEV_TX_OK;
2516
2517	entry = mdp->cur_tx % mdp->num_tx_ring;
2518	mdp->tx_skbuff[entry] = skb;
2519	txdesc = &mdp->tx_ring[entry];
2520	/* soft swap. */
2521	if (!mdp->cd->hw_swap)
2522		sh_eth_soft_swap(PTR_ALIGN(skb->data, 4), skb->len + 2);
2523	dma_addr = dma_map_single(&mdp->pdev->dev, skb->data, skb->len,
2524				  DMA_TO_DEVICE);
2525	if (dma_mapping_error(&mdp->pdev->dev, dma_addr)) {
2526		kfree_skb(skb);
2527		return NETDEV_TX_OK;
2528	}
2529	txdesc->addr = cpu_to_le32(dma_addr);
2530	txdesc->len  = cpu_to_le32(skb->len << 16);
2531
2532	dma_wmb(); /* TACT bit must be set after all the above writes */
2533	if (entry >= mdp->num_tx_ring - 1)
2534		txdesc->status |= cpu_to_le32(TD_TACT | TD_TDLE);
2535	else
2536		txdesc->status |= cpu_to_le32(TD_TACT);
2537
2538	wmb(); /* cur_tx must be incremented after TACT bit was set */
2539	mdp->cur_tx++;
2540
2541	if (!(sh_eth_read(ndev, EDTRR) & mdp->cd->edtrr_trns))
2542		sh_eth_write(ndev, mdp->cd->edtrr_trns, EDTRR);
2543
2544	return NETDEV_TX_OK;
2545}
2546
2547/* The statistics registers have write-clear behaviour, which means we
2548 * will lose any increment between the read and write.  We mitigate
2549 * this by only clearing when we read a non-zero value, so we will
2550 * never falsely report a total of zero.
2551 */
2552static void
2553sh_eth_update_stat(struct net_device *ndev, unsigned long *stat, int reg)
2554{
2555	u32 delta = sh_eth_read(ndev, reg);
2556
2557	if (delta) {
2558		*stat += delta;
2559		sh_eth_write(ndev, 0, reg);
2560	}
2561}
2562
2563static struct net_device_stats *sh_eth_get_stats(struct net_device *ndev)
2564{
2565	struct sh_eth_private *mdp = netdev_priv(ndev);
2566
2567	if (mdp->cd->no_tx_cntrs)
2568		return &ndev->stats;
2569
2570	if (!mdp->is_opened)
2571		return &ndev->stats;
2572
2573	sh_eth_update_stat(ndev, &ndev->stats.tx_dropped, TROCR);
2574	sh_eth_update_stat(ndev, &ndev->stats.collisions, CDCR);
2575	sh_eth_update_stat(ndev, &ndev->stats.tx_carrier_errors, LCCR);
2576
2577	if (mdp->cd->cexcr) {
2578		sh_eth_update_stat(ndev, &ndev->stats.tx_carrier_errors,
2579				   CERCR);
2580		sh_eth_update_stat(ndev, &ndev->stats.tx_carrier_errors,
2581				   CEECR);
2582	} else {
2583		sh_eth_update_stat(ndev, &ndev->stats.tx_carrier_errors,
2584				   CNDCR);
2585	}
2586
2587	return &ndev->stats;
2588}
2589
2590/* device close function */
2591static int sh_eth_close(struct net_device *ndev)
2592{
2593	struct sh_eth_private *mdp = netdev_priv(ndev);
2594
2595	netif_stop_queue(ndev);
2596
2597	/* Serialise with the interrupt handler and NAPI, then disable
2598	 * interrupts.  We have to clear the irq_enabled flag first to
2599	 * ensure that interrupts won't be re-enabled.
2600	 */
2601	mdp->irq_enabled = false;
2602	synchronize_irq(ndev->irq);
2603	napi_disable(&mdp->napi);
2604	sh_eth_write(ndev, 0x0000, EESIPR);
2605
2606	sh_eth_dev_exit(ndev);
2607
2608	/* PHY Disconnect */
2609	if (ndev->phydev) {
2610		phy_stop(ndev->phydev);
2611		phy_disconnect(ndev->phydev);
2612	}
2613
2614	free_irq(ndev->irq, ndev);
2615
2616	/* Free all the skbuffs in the Rx queue and the DMA buffer. */
2617	sh_eth_ring_free(ndev);
2618
2619	mdp->is_opened = 0;
2620
2621	pm_runtime_put(&mdp->pdev->dev);
2622
2623	return 0;
2624}
2625
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2626static int sh_eth_change_mtu(struct net_device *ndev, int new_mtu)
2627{
2628	if (netif_running(ndev))
2629		return -EBUSY;
2630
2631	ndev->mtu = new_mtu;
2632	netdev_update_features(ndev);
2633
2634	return 0;
2635}
2636
2637/* For TSU_POSTn. Please refer to the manual about this (strange) bitfields */
2638static u32 sh_eth_tsu_get_post_mask(int entry)
2639{
2640	return 0x0f << (28 - ((entry % 8) * 4));
2641}
2642
2643static u32 sh_eth_tsu_get_post_bit(struct sh_eth_private *mdp, int entry)
2644{
2645	return (0x08 >> (mdp->port << 1)) << (28 - ((entry % 8) * 4));
2646}
2647
2648static void sh_eth_tsu_enable_cam_entry_post(struct net_device *ndev,
2649					     int entry)
2650{
2651	struct sh_eth_private *mdp = netdev_priv(ndev);
2652	int reg = TSU_POST1 + entry / 8;
2653	u32 tmp;
2654
2655	tmp = sh_eth_tsu_read(mdp, reg);
2656	sh_eth_tsu_write(mdp, tmp | sh_eth_tsu_get_post_bit(mdp, entry), reg);
2657}
2658
2659static bool sh_eth_tsu_disable_cam_entry_post(struct net_device *ndev,
2660					      int entry)
2661{
2662	struct sh_eth_private *mdp = netdev_priv(ndev);
2663	int reg = TSU_POST1 + entry / 8;
2664	u32 post_mask, ref_mask, tmp;
2665
2666	post_mask = sh_eth_tsu_get_post_mask(entry);
2667	ref_mask = sh_eth_tsu_get_post_bit(mdp, entry) & ~post_mask;
2668
2669	tmp = sh_eth_tsu_read(mdp, reg);
2670	sh_eth_tsu_write(mdp, tmp & ~post_mask, reg);
2671
2672	/* If other port enables, the function returns "true" */
2673	return tmp & ref_mask;
2674}
2675
2676static int sh_eth_tsu_busy(struct net_device *ndev)
2677{
2678	int timeout = SH_ETH_TSU_TIMEOUT_MS * 100;
2679	struct sh_eth_private *mdp = netdev_priv(ndev);
2680
2681	while ((sh_eth_tsu_read(mdp, TSU_ADSBSY) & TSU_ADSBSY_0)) {
2682		udelay(10);
2683		timeout--;
2684		if (timeout <= 0) {
2685			netdev_err(ndev, "%s: timeout\n", __func__);
2686			return -ETIMEDOUT;
2687		}
2688	}
2689
2690	return 0;
2691}
2692
2693static int sh_eth_tsu_write_entry(struct net_device *ndev, u16 offset,
2694				  const u8 *addr)
2695{
2696	struct sh_eth_private *mdp = netdev_priv(ndev);
2697	u32 val;
2698
2699	val = addr[0] << 24 | addr[1] << 16 | addr[2] << 8 | addr[3];
2700	iowrite32(val, mdp->tsu_addr + offset);
2701	if (sh_eth_tsu_busy(ndev) < 0)
2702		return -EBUSY;
2703
2704	val = addr[4] << 8 | addr[5];
2705	iowrite32(val, mdp->tsu_addr + offset + 4);
2706	if (sh_eth_tsu_busy(ndev) < 0)
2707		return -EBUSY;
2708
2709	return 0;
2710}
2711
2712static void sh_eth_tsu_read_entry(struct net_device *ndev, u16 offset, u8 *addr)
2713{
2714	struct sh_eth_private *mdp = netdev_priv(ndev);
2715	u32 val;
2716
2717	val = ioread32(mdp->tsu_addr + offset);
2718	addr[0] = (val >> 24) & 0xff;
2719	addr[1] = (val >> 16) & 0xff;
2720	addr[2] = (val >> 8) & 0xff;
2721	addr[3] = val & 0xff;
2722	val = ioread32(mdp->tsu_addr + offset + 4);
2723	addr[4] = (val >> 8) & 0xff;
2724	addr[5] = val & 0xff;
2725}
2726
2727
2728static int sh_eth_tsu_find_entry(struct net_device *ndev, const u8 *addr)
2729{
2730	struct sh_eth_private *mdp = netdev_priv(ndev);
2731	u16 reg_offset = sh_eth_tsu_get_offset(mdp, TSU_ADRH0);
2732	int i;
2733	u8 c_addr[ETH_ALEN];
2734
2735	for (i = 0; i < SH_ETH_TSU_CAM_ENTRIES; i++, reg_offset += 8) {
2736		sh_eth_tsu_read_entry(ndev, reg_offset, c_addr);
2737		if (ether_addr_equal(addr, c_addr))
2738			return i;
2739	}
2740
2741	return -ENOENT;
2742}
2743
2744static int sh_eth_tsu_find_empty(struct net_device *ndev)
2745{
2746	u8 blank[ETH_ALEN];
2747	int entry;
2748
2749	memset(blank, 0, sizeof(blank));
2750	entry = sh_eth_tsu_find_entry(ndev, blank);
2751	return (entry < 0) ? -ENOMEM : entry;
2752}
2753
2754static int sh_eth_tsu_disable_cam_entry_table(struct net_device *ndev,
2755					      int entry)
2756{
2757	struct sh_eth_private *mdp = netdev_priv(ndev);
2758	u16 reg_offset = sh_eth_tsu_get_offset(mdp, TSU_ADRH0);
2759	int ret;
2760	u8 blank[ETH_ALEN];
2761
2762	sh_eth_tsu_write(mdp, sh_eth_tsu_read(mdp, TSU_TEN) &
2763			 ~(1 << (31 - entry)), TSU_TEN);
2764
2765	memset(blank, 0, sizeof(blank));
2766	ret = sh_eth_tsu_write_entry(ndev, reg_offset + entry * 8, blank);
2767	if (ret < 0)
2768		return ret;
2769	return 0;
2770}
2771
2772static int sh_eth_tsu_add_entry(struct net_device *ndev, const u8 *addr)
2773{
2774	struct sh_eth_private *mdp = netdev_priv(ndev);
2775	u16 reg_offset = sh_eth_tsu_get_offset(mdp, TSU_ADRH0);
2776	int i, ret;
2777
2778	if (!mdp->cd->tsu)
2779		return 0;
2780
2781	i = sh_eth_tsu_find_entry(ndev, addr);
2782	if (i < 0) {
2783		/* No entry found, create one */
2784		i = sh_eth_tsu_find_empty(ndev);
2785		if (i < 0)
2786			return -ENOMEM;
2787		ret = sh_eth_tsu_write_entry(ndev, reg_offset + i * 8, addr);
2788		if (ret < 0)
2789			return ret;
2790
2791		/* Enable the entry */
2792		sh_eth_tsu_write(mdp, sh_eth_tsu_read(mdp, TSU_TEN) |
2793				 (1 << (31 - i)), TSU_TEN);
2794	}
2795
2796	/* Entry found or created, enable POST */
2797	sh_eth_tsu_enable_cam_entry_post(ndev, i);
2798
2799	return 0;
2800}
2801
2802static int sh_eth_tsu_del_entry(struct net_device *ndev, const u8 *addr)
2803{
2804	struct sh_eth_private *mdp = netdev_priv(ndev);
2805	int i, ret;
2806
2807	if (!mdp->cd->tsu)
2808		return 0;
2809
2810	i = sh_eth_tsu_find_entry(ndev, addr);
2811	if (i) {
2812		/* Entry found */
2813		if (sh_eth_tsu_disable_cam_entry_post(ndev, i))
2814			goto done;
2815
2816		/* Disable the entry if both ports was disabled */
2817		ret = sh_eth_tsu_disable_cam_entry_table(ndev, i);
2818		if (ret < 0)
2819			return ret;
2820	}
2821done:
2822	return 0;
2823}
2824
2825static int sh_eth_tsu_purge_all(struct net_device *ndev)
2826{
2827	struct sh_eth_private *mdp = netdev_priv(ndev);
2828	int i, ret;
2829
2830	if (!mdp->cd->tsu)
2831		return 0;
2832
2833	for (i = 0; i < SH_ETH_TSU_CAM_ENTRIES; i++) {
2834		if (sh_eth_tsu_disable_cam_entry_post(ndev, i))
2835			continue;
2836
2837		/* Disable the entry if both ports was disabled */
2838		ret = sh_eth_tsu_disable_cam_entry_table(ndev, i);
2839		if (ret < 0)
2840			return ret;
2841	}
2842
2843	return 0;
2844}
2845
2846static void sh_eth_tsu_purge_mcast(struct net_device *ndev)
2847{
2848	struct sh_eth_private *mdp = netdev_priv(ndev);
2849	u16 reg_offset = sh_eth_tsu_get_offset(mdp, TSU_ADRH0);
2850	u8 addr[ETH_ALEN];
2851	int i;
2852
2853	if (!mdp->cd->tsu)
2854		return;
2855
2856	for (i = 0; i < SH_ETH_TSU_CAM_ENTRIES; i++, reg_offset += 8) {
2857		sh_eth_tsu_read_entry(ndev, reg_offset, addr);
2858		if (is_multicast_ether_addr(addr))
2859			sh_eth_tsu_del_entry(ndev, addr);
2860	}
2861}
2862
2863/* Update promiscuous flag and multicast filter */
2864static void sh_eth_set_rx_mode(struct net_device *ndev)
2865{
2866	struct sh_eth_private *mdp = netdev_priv(ndev);
2867	u32 ecmr_bits;
2868	int mcast_all = 0;
2869	unsigned long flags;
2870
2871	spin_lock_irqsave(&mdp->lock, flags);
2872	/* Initial condition is MCT = 1, PRM = 0.
2873	 * Depending on ndev->flags, set PRM or clear MCT
2874	 */
2875	ecmr_bits = sh_eth_read(ndev, ECMR) & ~ECMR_PRM;
2876	if (mdp->cd->tsu)
2877		ecmr_bits |= ECMR_MCT;
2878
2879	if (!(ndev->flags & IFF_MULTICAST)) {
2880		sh_eth_tsu_purge_mcast(ndev);
2881		mcast_all = 1;
2882	}
2883	if (ndev->flags & IFF_ALLMULTI) {
2884		sh_eth_tsu_purge_mcast(ndev);
2885		ecmr_bits &= ~ECMR_MCT;
2886		mcast_all = 1;
2887	}
2888
2889	if (ndev->flags & IFF_PROMISC) {
2890		sh_eth_tsu_purge_all(ndev);
2891		ecmr_bits = (ecmr_bits & ~ECMR_MCT) | ECMR_PRM;
2892	} else if (mdp->cd->tsu) {
2893		struct netdev_hw_addr *ha;
2894		netdev_for_each_mc_addr(ha, ndev) {
2895			if (mcast_all && is_multicast_ether_addr(ha->addr))
2896				continue;
2897
2898			if (sh_eth_tsu_add_entry(ndev, ha->addr) < 0) {
2899				if (!mcast_all) {
2900					sh_eth_tsu_purge_mcast(ndev);
2901					ecmr_bits &= ~ECMR_MCT;
2902					mcast_all = 1;
2903				}
2904			}
2905		}
2906	}
2907
2908	/* update the ethernet mode */
2909	sh_eth_write(ndev, ecmr_bits, ECMR);
2910
2911	spin_unlock_irqrestore(&mdp->lock, flags);
2912}
2913
2914static void sh_eth_set_rx_csum(struct net_device *ndev, bool enable)
2915{
2916	struct sh_eth_private *mdp = netdev_priv(ndev);
2917	unsigned long flags;
2918
2919	spin_lock_irqsave(&mdp->lock, flags);
2920
2921	/* Disable TX and RX */
2922	sh_eth_rcv_snd_disable(ndev);
2923
2924	/* Modify RX Checksum setting */
2925	sh_eth_modify(ndev, ECMR, ECMR_RCSC, enable ? ECMR_RCSC : 0);
2926
2927	/* Enable TX and RX */
2928	sh_eth_rcv_snd_enable(ndev);
2929
2930	spin_unlock_irqrestore(&mdp->lock, flags);
2931}
2932
2933static int sh_eth_set_features(struct net_device *ndev,
2934			       netdev_features_t features)
2935{
2936	netdev_features_t changed = ndev->features ^ features;
2937	struct sh_eth_private *mdp = netdev_priv(ndev);
2938
2939	if (changed & NETIF_F_RXCSUM && mdp->cd->rx_csum)
2940		sh_eth_set_rx_csum(ndev, features & NETIF_F_RXCSUM);
2941
2942	ndev->features = features;
2943
2944	return 0;
2945}
2946
2947static int sh_eth_get_vtag_index(struct sh_eth_private *mdp)
2948{
2949	if (!mdp->port)
2950		return TSU_VTAG0;
2951	else
2952		return TSU_VTAG1;
2953}
2954
2955static int sh_eth_vlan_rx_add_vid(struct net_device *ndev,
2956				  __be16 proto, u16 vid)
2957{
2958	struct sh_eth_private *mdp = netdev_priv(ndev);
2959	int vtag_reg_index = sh_eth_get_vtag_index(mdp);
2960
2961	if (unlikely(!mdp->cd->tsu))
2962		return -EPERM;
2963
2964	/* No filtering if vid = 0 */
2965	if (!vid)
2966		return 0;
2967
2968	mdp->vlan_num_ids++;
2969
2970	/* The controller has one VLAN tag HW filter. So, if the filter is
2971	 * already enabled, the driver disables it and the filte
2972	 */
2973	if (mdp->vlan_num_ids > 1) {
2974		/* disable VLAN filter */
2975		sh_eth_tsu_write(mdp, 0, vtag_reg_index);
2976		return 0;
2977	}
2978
2979	sh_eth_tsu_write(mdp, TSU_VTAG_ENABLE | (vid & TSU_VTAG_VID_MASK),
2980			 vtag_reg_index);
2981
2982	return 0;
2983}
2984
2985static int sh_eth_vlan_rx_kill_vid(struct net_device *ndev,
2986				   __be16 proto, u16 vid)
2987{
2988	struct sh_eth_private *mdp = netdev_priv(ndev);
2989	int vtag_reg_index = sh_eth_get_vtag_index(mdp);
2990
2991	if (unlikely(!mdp->cd->tsu))
2992		return -EPERM;
2993
2994	/* No filtering if vid = 0 */
2995	if (!vid)
2996		return 0;
2997
2998	mdp->vlan_num_ids--;
2999	sh_eth_tsu_write(mdp, 0, vtag_reg_index);
3000
3001	return 0;
3002}
3003
3004/* SuperH's TSU register init function */
3005static void sh_eth_tsu_init(struct sh_eth_private *mdp)
3006{
3007	if (!mdp->cd->dual_port) {
3008		sh_eth_tsu_write(mdp, 0, TSU_TEN); /* Disable all CAM entry */
3009		sh_eth_tsu_write(mdp, TSU_FWSLC_POSTENU | TSU_FWSLC_POSTENL,
3010				 TSU_FWSLC);	/* Enable POST registers */
3011		return;
3012	}
3013
3014	sh_eth_tsu_write(mdp, 0, TSU_FWEN0);	/* Disable forward(0->1) */
3015	sh_eth_tsu_write(mdp, 0, TSU_FWEN1);	/* Disable forward(1->0) */
3016	sh_eth_tsu_write(mdp, 0, TSU_FCM);	/* forward fifo 3k-3k */
3017	sh_eth_tsu_write(mdp, 0xc, TSU_BSYSL0);
3018	sh_eth_tsu_write(mdp, 0xc, TSU_BSYSL1);
3019	sh_eth_tsu_write(mdp, 0, TSU_PRISL0);
3020	sh_eth_tsu_write(mdp, 0, TSU_PRISL1);
3021	sh_eth_tsu_write(mdp, 0, TSU_FWSL0);
3022	sh_eth_tsu_write(mdp, 0, TSU_FWSL1);
3023	sh_eth_tsu_write(mdp, TSU_FWSLC_POSTENU | TSU_FWSLC_POSTENL, TSU_FWSLC);
3024	sh_eth_tsu_write(mdp, 0, TSU_QTAGM0);	/* Disable QTAG(0->1) */
3025	sh_eth_tsu_write(mdp, 0, TSU_QTAGM1);	/* Disable QTAG(1->0) */
3026	sh_eth_tsu_write(mdp, 0, TSU_FWSR);	/* all interrupt status clear */
3027	sh_eth_tsu_write(mdp, 0, TSU_FWINMK);	/* Disable all interrupt */
3028	sh_eth_tsu_write(mdp, 0, TSU_TEN);	/* Disable all CAM entry */
3029	sh_eth_tsu_write(mdp, 0, TSU_POST1);	/* Disable CAM entry [ 0- 7] */
3030	sh_eth_tsu_write(mdp, 0, TSU_POST2);	/* Disable CAM entry [ 8-15] */
3031	sh_eth_tsu_write(mdp, 0, TSU_POST3);	/* Disable CAM entry [16-23] */
3032	sh_eth_tsu_write(mdp, 0, TSU_POST4);	/* Disable CAM entry [24-31] */
3033}
3034
3035/* MDIO bus release function */
3036static int sh_mdio_release(struct sh_eth_private *mdp)
3037{
3038	/* unregister mdio bus */
3039	mdiobus_unregister(mdp->mii_bus);
3040
3041	/* free bitbang info */
3042	free_mdio_bitbang(mdp->mii_bus);
3043
3044	return 0;
3045}
3046
3047static int sh_mdiobb_read(struct mii_bus *bus, int phy, int reg)
3048{
3049	int res;
3050
3051	pm_runtime_get_sync(bus->parent);
3052	res = mdiobb_read(bus, phy, reg);
3053	pm_runtime_put(bus->parent);
3054
3055	return res;
3056}
3057
3058static int sh_mdiobb_write(struct mii_bus *bus, int phy, int reg, u16 val)
3059{
3060	int res;
3061
3062	pm_runtime_get_sync(bus->parent);
3063	res = mdiobb_write(bus, phy, reg, val);
3064	pm_runtime_put(bus->parent);
3065
3066	return res;
3067}
3068
3069/* MDIO bus init function */
3070static int sh_mdio_init(struct sh_eth_private *mdp,
3071			struct sh_eth_plat_data *pd)
3072{
3073	int ret;
3074	struct bb_info *bitbang;
3075	struct platform_device *pdev = mdp->pdev;
3076	struct device *dev = &mdp->pdev->dev;
3077
3078	/* create bit control struct for PHY */
3079	bitbang = devm_kzalloc(dev, sizeof(struct bb_info), GFP_KERNEL);
3080	if (!bitbang)
3081		return -ENOMEM;
3082
3083	/* bitbang init */
3084	bitbang->addr = mdp->addr + mdp->reg_offset[PIR];
3085	bitbang->set_gate = pd->set_mdio_gate;
3086	bitbang->ctrl.ops = &bb_ops;
3087
3088	/* MII controller setting */
3089	mdp->mii_bus = alloc_mdio_bitbang(&bitbang->ctrl);
3090	if (!mdp->mii_bus)
3091		return -ENOMEM;
3092
3093	/* Wrap accessors with Runtime PM-aware ops */
3094	mdp->mii_bus->read = sh_mdiobb_read;
3095	mdp->mii_bus->write = sh_mdiobb_write;
3096
3097	/* Hook up MII support for ethtool */
3098	mdp->mii_bus->name = "sh_mii";
3099	mdp->mii_bus->parent = dev;
3100	snprintf(mdp->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x",
3101		 pdev->name, pdev->id);
3102
3103	/* register MDIO bus */
3104	if (pd->phy_irq > 0)
3105		mdp->mii_bus->irq[pd->phy] = pd->phy_irq;
3106
3107	ret = of_mdiobus_register(mdp->mii_bus, dev->of_node);
3108	if (ret)
3109		goto out_free_bus;
3110
3111	return 0;
3112
3113out_free_bus:
3114	free_mdio_bitbang(mdp->mii_bus);
3115	return ret;
3116}
3117
3118static const u16 *sh_eth_get_register_offset(int register_type)
3119{
3120	const u16 *reg_offset = NULL;
3121
3122	switch (register_type) {
3123	case SH_ETH_REG_GIGABIT:
3124		reg_offset = sh_eth_offset_gigabit;
3125		break;
 
 
 
3126	case SH_ETH_REG_FAST_RCAR:
3127		reg_offset = sh_eth_offset_fast_rcar;
3128		break;
3129	case SH_ETH_REG_FAST_SH4:
3130		reg_offset = sh_eth_offset_fast_sh4;
3131		break;
3132	case SH_ETH_REG_FAST_SH3_SH2:
3133		reg_offset = sh_eth_offset_fast_sh3_sh2;
3134		break;
3135	}
3136
3137	return reg_offset;
3138}
3139
3140static const struct net_device_ops sh_eth_netdev_ops = {
3141	.ndo_open		= sh_eth_open,
3142	.ndo_stop		= sh_eth_close,
3143	.ndo_start_xmit		= sh_eth_start_xmit,
3144	.ndo_get_stats		= sh_eth_get_stats,
3145	.ndo_set_rx_mode	= sh_eth_set_rx_mode,
3146	.ndo_tx_timeout		= sh_eth_tx_timeout,
3147	.ndo_eth_ioctl		= phy_do_ioctl_running,
3148	.ndo_change_mtu		= sh_eth_change_mtu,
3149	.ndo_validate_addr	= eth_validate_addr,
3150	.ndo_set_mac_address	= eth_mac_addr,
3151	.ndo_set_features	= sh_eth_set_features,
3152};
3153
3154static const struct net_device_ops sh_eth_netdev_ops_tsu = {
3155	.ndo_open		= sh_eth_open,
3156	.ndo_stop		= sh_eth_close,
3157	.ndo_start_xmit		= sh_eth_start_xmit,
3158	.ndo_get_stats		= sh_eth_get_stats,
3159	.ndo_set_rx_mode	= sh_eth_set_rx_mode,
3160	.ndo_vlan_rx_add_vid	= sh_eth_vlan_rx_add_vid,
3161	.ndo_vlan_rx_kill_vid	= sh_eth_vlan_rx_kill_vid,
3162	.ndo_tx_timeout		= sh_eth_tx_timeout,
3163	.ndo_eth_ioctl		= phy_do_ioctl_running,
3164	.ndo_change_mtu		= sh_eth_change_mtu,
3165	.ndo_validate_addr	= eth_validate_addr,
3166	.ndo_set_mac_address	= eth_mac_addr,
3167	.ndo_set_features	= sh_eth_set_features,
3168};
3169
3170#ifdef CONFIG_OF
3171static struct sh_eth_plat_data *sh_eth_parse_dt(struct device *dev)
3172{
3173	struct device_node *np = dev->of_node;
3174	struct sh_eth_plat_data *pdata;
3175	phy_interface_t interface;
3176	int ret;
3177
3178	pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL);
3179	if (!pdata)
3180		return NULL;
3181
3182	ret = of_get_phy_mode(np, &interface);
3183	if (ret)
3184		return NULL;
3185	pdata->phy_interface = interface;
3186
3187	of_get_mac_address(np, pdata->mac_addr);
 
 
3188
3189	pdata->no_ether_link =
3190		of_property_read_bool(np, "renesas,no-ether-link");
3191	pdata->ether_link_active_low =
3192		of_property_read_bool(np, "renesas,ether-link-active-low");
3193
3194	return pdata;
3195}
3196
3197static const struct of_device_id sh_eth_match_table[] = {
3198	{ .compatible = "renesas,gether-r8a7740", .data = &r8a7740_data },
3199	{ .compatible = "renesas,ether-r8a7743", .data = &rcar_gen2_data },
3200	{ .compatible = "renesas,ether-r8a7745", .data = &rcar_gen2_data },
3201	{ .compatible = "renesas,ether-r8a7778", .data = &rcar_gen1_data },
3202	{ .compatible = "renesas,ether-r8a7779", .data = &rcar_gen1_data },
3203	{ .compatible = "renesas,ether-r8a7790", .data = &rcar_gen2_data },
3204	{ .compatible = "renesas,ether-r8a7791", .data = &rcar_gen2_data },
3205	{ .compatible = "renesas,ether-r8a7793", .data = &rcar_gen2_data },
3206	{ .compatible = "renesas,ether-r8a7794", .data = &rcar_gen2_data },
3207	{ .compatible = "renesas,gether-r8a77980", .data = &r8a77980_data },
3208	{ .compatible = "renesas,ether-r7s72100", .data = &r7s72100_data },
3209	{ .compatible = "renesas,ether-r7s9210", .data = &r7s9210_data },
3210	{ .compatible = "renesas,rcar-gen1-ether", .data = &rcar_gen1_data },
3211	{ .compatible = "renesas,rcar-gen2-ether", .data = &rcar_gen2_data },
3212	{ }
3213};
3214MODULE_DEVICE_TABLE(of, sh_eth_match_table);
3215#else
3216static inline struct sh_eth_plat_data *sh_eth_parse_dt(struct device *dev)
3217{
3218	return NULL;
3219}
3220#endif
3221
3222static int sh_eth_drv_probe(struct platform_device *pdev)
3223{
3224	struct resource *res;
3225	struct sh_eth_plat_data *pd = dev_get_platdata(&pdev->dev);
3226	const struct platform_device_id *id = platform_get_device_id(pdev);
3227	struct sh_eth_private *mdp;
3228	struct net_device *ndev;
3229	int ret;
3230
 
 
 
3231	ndev = alloc_etherdev(sizeof(struct sh_eth_private));
3232	if (!ndev)
3233		return -ENOMEM;
3234
3235	pm_runtime_enable(&pdev->dev);
3236	pm_runtime_get_sync(&pdev->dev);
3237
3238	ret = platform_get_irq(pdev, 0);
3239	if (ret < 0)
3240		goto out_release;
3241	ndev->irq = ret;
3242
3243	SET_NETDEV_DEV(ndev, &pdev->dev);
3244
3245	mdp = netdev_priv(ndev);
3246	mdp->num_tx_ring = TX_RING_SIZE;
3247	mdp->num_rx_ring = RX_RING_SIZE;
3248	mdp->addr = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
3249	if (IS_ERR(mdp->addr)) {
3250		ret = PTR_ERR(mdp->addr);
3251		goto out_release;
3252	}
3253
3254	ndev->base_addr = res->start;
3255
3256	spin_lock_init(&mdp->lock);
3257	mdp->pdev = pdev;
3258
3259	if (pdev->dev.of_node)
3260		pd = sh_eth_parse_dt(&pdev->dev);
3261	if (!pd) {
3262		dev_err(&pdev->dev, "no platform data\n");
3263		ret = -EINVAL;
3264		goto out_release;
3265	}
3266
3267	/* get PHY ID */
3268	mdp->phy_id = pd->phy;
3269	mdp->phy_interface = pd->phy_interface;
3270	mdp->no_ether_link = pd->no_ether_link;
3271	mdp->ether_link_active_low = pd->ether_link_active_low;
3272
3273	/* set cpu data */
3274	if (id)
3275		mdp->cd = (struct sh_eth_cpu_data *)id->driver_data;
3276	else
3277		mdp->cd = (struct sh_eth_cpu_data *)of_device_get_match_data(&pdev->dev);
3278
3279	mdp->reg_offset = sh_eth_get_register_offset(mdp->cd->register_type);
3280	if (!mdp->reg_offset) {
3281		dev_err(&pdev->dev, "Unknown register type (%d)\n",
3282			mdp->cd->register_type);
3283		ret = -EINVAL;
3284		goto out_release;
3285	}
3286	sh_eth_set_default_cpu_data(mdp->cd);
3287
3288	/* User's manual states max MTU should be 2048 but due to the
3289	 * alignment calculations in sh_eth_ring_init() the practical
3290	 * MTU is a bit less. Maybe this can be optimized some more.
3291	 */
3292	ndev->max_mtu = 2000 - (ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN);
3293	ndev->min_mtu = ETH_MIN_MTU;
3294
3295	if (mdp->cd->rx_csum) {
3296		ndev->features = NETIF_F_RXCSUM;
3297		ndev->hw_features = NETIF_F_RXCSUM;
3298	}
3299
3300	/* set function */
3301	if (mdp->cd->tsu)
3302		ndev->netdev_ops = &sh_eth_netdev_ops_tsu;
3303	else
3304		ndev->netdev_ops = &sh_eth_netdev_ops;
3305	ndev->ethtool_ops = &sh_eth_ethtool_ops;
3306	ndev->watchdog_timeo = TX_TIMEOUT;
3307
3308	/* debug message level */
3309	mdp->msg_enable = SH_ETH_DEF_MSG_ENABLE;
3310
3311	/* read and set MAC address */
3312	read_mac_address(ndev, pd->mac_addr);
3313	if (!is_valid_ether_addr(ndev->dev_addr)) {
3314		dev_warn(&pdev->dev,
3315			 "no valid MAC address supplied, using a random one.\n");
3316		eth_hw_addr_random(ndev);
3317	}
3318
3319	if (mdp->cd->tsu) {
3320		int port = pdev->id < 0 ? 0 : pdev->id % 2;
3321		struct resource *rtsu;
3322
3323		rtsu = platform_get_resource(pdev, IORESOURCE_MEM, 1);
3324		if (!rtsu) {
3325			dev_err(&pdev->dev, "no TSU resource\n");
3326			ret = -ENODEV;
3327			goto out_release;
3328		}
3329		/* We can only request the  TSU region  for the first port
3330		 * of the two  sharing this TSU for the probe to succeed...
3331		 */
3332		if (port == 0 &&
3333		    !devm_request_mem_region(&pdev->dev, rtsu->start,
3334					     resource_size(rtsu),
3335					     dev_name(&pdev->dev))) {
3336			dev_err(&pdev->dev, "can't request TSU resource.\n");
3337			ret = -EBUSY;
3338			goto out_release;
3339		}
3340		/* ioremap the TSU registers */
3341		mdp->tsu_addr = devm_ioremap(&pdev->dev, rtsu->start,
3342					     resource_size(rtsu));
3343		if (!mdp->tsu_addr) {
3344			dev_err(&pdev->dev, "TSU region ioremap() failed.\n");
3345			ret = -ENOMEM;
3346			goto out_release;
3347		}
3348		mdp->port = port;
3349		ndev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
3350
3351		/* Need to init only the first port of the two sharing a TSU */
3352		if (port == 0) {
3353			if (mdp->cd->chip_reset)
3354				mdp->cd->chip_reset(ndev);
3355
3356			/* TSU init (Init only)*/
3357			sh_eth_tsu_init(mdp);
3358		}
3359	}
3360
3361	if (mdp->cd->rmiimode)
3362		sh_eth_write(ndev, 0x1, RMIIMODE);
3363
3364	/* MDIO bus init */
3365	ret = sh_mdio_init(mdp, pd);
3366	if (ret) {
3367		dev_err_probe(&pdev->dev, ret, "MDIO init failed\n");
 
3368		goto out_release;
3369	}
3370
3371	netif_napi_add(ndev, &mdp->napi, sh_eth_poll);
3372
3373	/* network device register */
3374	ret = register_netdev(ndev);
3375	if (ret)
3376		goto out_napi_del;
3377
3378	if (mdp->cd->magic)
3379		device_set_wakeup_capable(&pdev->dev, 1);
3380
3381	/* print device information */
3382	netdev_info(ndev, "Base address at 0x%x, %pM, IRQ %d.\n",
3383		    (u32)ndev->base_addr, ndev->dev_addr, ndev->irq);
3384
3385	pm_runtime_put(&pdev->dev);
3386	platform_set_drvdata(pdev, ndev);
3387
3388	return ret;
3389
3390out_napi_del:
3391	netif_napi_del(&mdp->napi);
3392	sh_mdio_release(mdp);
3393
3394out_release:
3395	/* net_dev free */
3396	free_netdev(ndev);
3397
3398	pm_runtime_put(&pdev->dev);
3399	pm_runtime_disable(&pdev->dev);
3400	return ret;
3401}
3402
3403static int sh_eth_drv_remove(struct platform_device *pdev)
3404{
3405	struct net_device *ndev = platform_get_drvdata(pdev);
3406	struct sh_eth_private *mdp = netdev_priv(ndev);
3407
3408	unregister_netdev(ndev);
3409	netif_napi_del(&mdp->napi);
3410	sh_mdio_release(mdp);
3411	pm_runtime_disable(&pdev->dev);
3412	free_netdev(ndev);
3413
3414	return 0;
3415}
3416
3417#ifdef CONFIG_PM
3418#ifdef CONFIG_PM_SLEEP
3419static int sh_eth_wol_setup(struct net_device *ndev)
3420{
3421	struct sh_eth_private *mdp = netdev_priv(ndev);
3422
3423	/* Only allow ECI interrupts */
3424	synchronize_irq(ndev->irq);
3425	napi_disable(&mdp->napi);
3426	sh_eth_write(ndev, EESIPR_ECIIP, EESIPR);
3427
3428	/* Enable MagicPacket */
3429	sh_eth_modify(ndev, ECMR, ECMR_MPDE, ECMR_MPDE);
3430
3431	return enable_irq_wake(ndev->irq);
3432}
3433
3434static int sh_eth_wol_restore(struct net_device *ndev)
3435{
3436	struct sh_eth_private *mdp = netdev_priv(ndev);
3437	int ret;
3438
3439	napi_enable(&mdp->napi);
3440
3441	/* Disable MagicPacket */
3442	sh_eth_modify(ndev, ECMR, ECMR_MPDE, 0);
3443
3444	/* The device needs to be reset to restore MagicPacket logic
3445	 * for next wakeup. If we close and open the device it will
3446	 * both be reset and all registers restored. This is what
3447	 * happens during suspend and resume without WoL enabled.
3448	 */
3449	sh_eth_close(ndev);
 
 
3450	ret = sh_eth_open(ndev);
3451	if (ret < 0)
3452		return ret;
3453
3454	return disable_irq_wake(ndev->irq);
3455}
3456
3457static int sh_eth_suspend(struct device *dev)
3458{
3459	struct net_device *ndev = dev_get_drvdata(dev);
3460	struct sh_eth_private *mdp = netdev_priv(ndev);
3461	int ret;
3462
3463	if (!netif_running(ndev))
3464		return 0;
3465
3466	netif_device_detach(ndev);
3467
3468	if (mdp->wol_enabled)
3469		ret = sh_eth_wol_setup(ndev);
3470	else
3471		ret = sh_eth_close(ndev);
3472
3473	return ret;
3474}
3475
3476static int sh_eth_resume(struct device *dev)
3477{
3478	struct net_device *ndev = dev_get_drvdata(dev);
3479	struct sh_eth_private *mdp = netdev_priv(ndev);
3480	int ret;
3481
3482	if (!netif_running(ndev))
3483		return 0;
3484
3485	if (mdp->wol_enabled)
3486		ret = sh_eth_wol_restore(ndev);
3487	else
3488		ret = sh_eth_open(ndev);
3489
3490	if (ret < 0)
3491		return ret;
3492
3493	netif_device_attach(ndev);
3494
3495	return ret;
3496}
3497#endif
3498
3499static int sh_eth_runtime_nop(struct device *dev)
3500{
3501	/* Runtime PM callback shared between ->runtime_suspend()
3502	 * and ->runtime_resume(). Simply returns success.
3503	 *
3504	 * This driver re-initializes all registers after
3505	 * pm_runtime_get_sync() anyway so there is no need
3506	 * to save and restore registers here.
3507	 */
3508	return 0;
3509}
3510
3511static const struct dev_pm_ops sh_eth_dev_pm_ops = {
3512	SET_SYSTEM_SLEEP_PM_OPS(sh_eth_suspend, sh_eth_resume)
3513	SET_RUNTIME_PM_OPS(sh_eth_runtime_nop, sh_eth_runtime_nop, NULL)
3514};
3515#define SH_ETH_PM_OPS (&sh_eth_dev_pm_ops)
3516#else
3517#define SH_ETH_PM_OPS NULL
3518#endif
3519
3520static const struct platform_device_id sh_eth_id_table[] = {
3521	{ "sh7619-ether", (kernel_ulong_t)&sh7619_data },
3522	{ "sh771x-ether", (kernel_ulong_t)&sh771x_data },
3523	{ "sh7724-ether", (kernel_ulong_t)&sh7724_data },
3524	{ "sh7734-gether", (kernel_ulong_t)&sh7734_data },
3525	{ "sh7757-ether", (kernel_ulong_t)&sh7757_data },
3526	{ "sh7757-gether", (kernel_ulong_t)&sh7757_data_giga },
3527	{ "sh7763-gether", (kernel_ulong_t)&sh7763_data },
3528	{ }
3529};
3530MODULE_DEVICE_TABLE(platform, sh_eth_id_table);
3531
3532static struct platform_driver sh_eth_driver = {
3533	.probe = sh_eth_drv_probe,
3534	.remove = sh_eth_drv_remove,
3535	.id_table = sh_eth_id_table,
3536	.driver = {
3537		   .name = CARDNAME,
3538		   .pm = SH_ETH_PM_OPS,
3539		   .of_match_table = of_match_ptr(sh_eth_match_table),
3540	},
3541};
3542
3543module_platform_driver(sh_eth_driver);
3544
3545MODULE_AUTHOR("Nobuhiro Iwamatsu, Yoshihiro Shimoda");
3546MODULE_DESCRIPTION("Renesas SuperH Ethernet driver");
3547MODULE_LICENSE("GPL v2");
v5.4
   1// SPDX-License-Identifier: GPL-2.0
   2/*  SuperH Ethernet device driver
   3 *
   4 *  Copyright (C) 2014 Renesas Electronics Corporation
   5 *  Copyright (C) 2006-2012 Nobuhiro Iwamatsu
   6 *  Copyright (C) 2008-2014 Renesas Solutions Corp.
   7 *  Copyright (C) 2013-2017 Cogent Embedded, Inc.
   8 *  Copyright (C) 2014 Codethink Limited
   9 */
  10
  11#include <linux/module.h>
  12#include <linux/kernel.h>
  13#include <linux/spinlock.h>
  14#include <linux/interrupt.h>
  15#include <linux/dma-mapping.h>
  16#include <linux/etherdevice.h>
  17#include <linux/delay.h>
  18#include <linux/platform_device.h>
  19#include <linux/mdio-bitbang.h>
  20#include <linux/netdevice.h>
  21#include <linux/of.h>
  22#include <linux/of_device.h>
  23#include <linux/of_irq.h>
  24#include <linux/of_net.h>
  25#include <linux/phy.h>
  26#include <linux/cache.h>
  27#include <linux/io.h>
  28#include <linux/pm_runtime.h>
  29#include <linux/slab.h>
  30#include <linux/ethtool.h>
  31#include <linux/if_vlan.h>
  32#include <linux/sh_eth.h>
  33#include <linux/of_mdio.h>
  34
  35#include "sh_eth.h"
  36
  37#define SH_ETH_DEF_MSG_ENABLE \
  38		(NETIF_MSG_LINK	| \
  39		NETIF_MSG_TIMER	| \
  40		NETIF_MSG_RX_ERR| \
  41		NETIF_MSG_TX_ERR)
  42
  43#define SH_ETH_OFFSET_INVALID	((u16)~0)
  44
  45#define SH_ETH_OFFSET_DEFAULTS			\
  46	[0 ... SH_ETH_MAX_REGISTER_OFFSET - 1] = SH_ETH_OFFSET_INVALID
  47
 
 
 
 
 
 
 
 
 
  48static const u16 sh_eth_offset_gigabit[SH_ETH_MAX_REGISTER_OFFSET] = {
  49	SH_ETH_OFFSET_DEFAULTS,
  50
  51	[EDSR]		= 0x0000,
  52	[EDMR]		= 0x0400,
  53	[EDTRR]		= 0x0408,
  54	[EDRRR]		= 0x0410,
  55	[EESR]		= 0x0428,
  56	[EESIPR]	= 0x0430,
  57	[TDLAR]		= 0x0010,
  58	[TDFAR]		= 0x0014,
  59	[TDFXR]		= 0x0018,
  60	[TDFFR]		= 0x001c,
  61	[RDLAR]		= 0x0030,
  62	[RDFAR]		= 0x0034,
  63	[RDFXR]		= 0x0038,
  64	[RDFFR]		= 0x003c,
  65	[TRSCER]	= 0x0438,
  66	[RMFCR]		= 0x0440,
  67	[TFTR]		= 0x0448,
  68	[FDR]		= 0x0450,
  69	[RMCR]		= 0x0458,
  70	[RPADIR]	= 0x0460,
  71	[FCFTR]		= 0x0468,
  72	[CSMR]		= 0x04E4,
  73
  74	[ECMR]		= 0x0500,
  75	[ECSR]		= 0x0510,
  76	[ECSIPR]	= 0x0518,
  77	[PIR]		= 0x0520,
  78	[PSR]		= 0x0528,
  79	[PIPR]		= 0x052c,
  80	[RFLR]		= 0x0508,
  81	[APR]		= 0x0554,
  82	[MPR]		= 0x0558,
  83	[PFTCR]		= 0x055c,
  84	[PFRCR]		= 0x0560,
  85	[TPAUSER]	= 0x0564,
  86	[GECMR]		= 0x05b0,
  87	[BCULR]		= 0x05b4,
  88	[MAHR]		= 0x05c0,
  89	[MALR]		= 0x05c8,
  90	[TROCR]		= 0x0700,
  91	[CDCR]		= 0x0708,
  92	[LCCR]		= 0x0710,
  93	[CEFCR]		= 0x0740,
  94	[FRECR]		= 0x0748,
  95	[TSFRCR]	= 0x0750,
  96	[TLFRCR]	= 0x0758,
  97	[RFCR]		= 0x0760,
  98	[CERCR]		= 0x0768,
  99	[CEECR]		= 0x0770,
 100	[MAFCR]		= 0x0778,
 101	[RMII_MII]	= 0x0790,
 102
 103	[ARSTR]		= 0x0000,
 104	[TSU_CTRST]	= 0x0004,
 105	[TSU_FWEN0]	= 0x0010,
 106	[TSU_FWEN1]	= 0x0014,
 107	[TSU_FCM]	= 0x0018,
 108	[TSU_BSYSL0]	= 0x0020,
 109	[TSU_BSYSL1]	= 0x0024,
 110	[TSU_PRISL0]	= 0x0028,
 111	[TSU_PRISL1]	= 0x002c,
 112	[TSU_FWSL0]	= 0x0030,
 113	[TSU_FWSL1]	= 0x0034,
 114	[TSU_FWSLC]	= 0x0038,
 115	[TSU_QTAGM0]	= 0x0040,
 116	[TSU_QTAGM1]	= 0x0044,
 117	[TSU_FWSR]	= 0x0050,
 118	[TSU_FWINMK]	= 0x0054,
 119	[TSU_ADQT0]	= 0x0048,
 120	[TSU_ADQT1]	= 0x004c,
 121	[TSU_VTAG0]	= 0x0058,
 122	[TSU_VTAG1]	= 0x005c,
 123	[TSU_ADSBSY]	= 0x0060,
 124	[TSU_TEN]	= 0x0064,
 125	[TSU_POST1]	= 0x0070,
 126	[TSU_POST2]	= 0x0074,
 127	[TSU_POST3]	= 0x0078,
 128	[TSU_POST4]	= 0x007c,
 129	[TSU_ADRH0]	= 0x0100,
 130
 131	[TXNLCR0]	= 0x0080,
 132	[TXALCR0]	= 0x0084,
 133	[RXNLCR0]	= 0x0088,
 134	[RXALCR0]	= 0x008c,
 135	[FWNLCR0]	= 0x0090,
 136	[FWALCR0]	= 0x0094,
 137	[TXNLCR1]	= 0x00a0,
 138	[TXALCR1]	= 0x00a4,
 139	[RXNLCR1]	= 0x00a8,
 140	[RXALCR1]	= 0x00ac,
 141	[FWNLCR1]	= 0x00b0,
 142	[FWALCR1]	= 0x00b4,
 143};
 144
 145static const u16 sh_eth_offset_fast_rz[SH_ETH_MAX_REGISTER_OFFSET] = {
 146	SH_ETH_OFFSET_DEFAULTS,
 147
 148	[EDSR]		= 0x0000,
 149	[EDMR]		= 0x0400,
 150	[EDTRR]		= 0x0408,
 151	[EDRRR]		= 0x0410,
 152	[EESR]		= 0x0428,
 153	[EESIPR]	= 0x0430,
 154	[TDLAR]		= 0x0010,
 155	[TDFAR]		= 0x0014,
 156	[TDFXR]		= 0x0018,
 157	[TDFFR]		= 0x001c,
 158	[RDLAR]		= 0x0030,
 159	[RDFAR]		= 0x0034,
 160	[RDFXR]		= 0x0038,
 161	[RDFFR]		= 0x003c,
 162	[TRSCER]	= 0x0438,
 163	[RMFCR]		= 0x0440,
 164	[TFTR]		= 0x0448,
 165	[FDR]		= 0x0450,
 166	[RMCR]		= 0x0458,
 167	[RPADIR]	= 0x0460,
 168	[FCFTR]		= 0x0468,
 169	[CSMR]		= 0x04E4,
 170
 171	[ECMR]		= 0x0500,
 172	[RFLR]		= 0x0508,
 173	[ECSR]		= 0x0510,
 174	[ECSIPR]	= 0x0518,
 175	[PIR]		= 0x0520,
 176	[APR]		= 0x0554,
 177	[MPR]		= 0x0558,
 178	[PFTCR]		= 0x055c,
 179	[PFRCR]		= 0x0560,
 180	[TPAUSER]	= 0x0564,
 181	[MAHR]		= 0x05c0,
 182	[MALR]		= 0x05c8,
 183	[CEFCR]		= 0x0740,
 184	[FRECR]		= 0x0748,
 185	[TSFRCR]	= 0x0750,
 186	[TLFRCR]	= 0x0758,
 187	[RFCR]		= 0x0760,
 188	[MAFCR]		= 0x0778,
 189
 190	[ARSTR]		= 0x0000,
 191	[TSU_CTRST]	= 0x0004,
 192	[TSU_FWSLC]	= 0x0038,
 193	[TSU_VTAG0]	= 0x0058,
 194	[TSU_ADSBSY]	= 0x0060,
 195	[TSU_TEN]	= 0x0064,
 196	[TSU_POST1]	= 0x0070,
 197	[TSU_POST2]	= 0x0074,
 198	[TSU_POST3]	= 0x0078,
 199	[TSU_POST4]	= 0x007c,
 200	[TSU_ADRH0]	= 0x0100,
 201
 202	[TXNLCR0]	= 0x0080,
 203	[TXALCR0]	= 0x0084,
 204	[RXNLCR0]	= 0x0088,
 205	[RXALCR0]	= 0x008C,
 206};
 207
 208static const u16 sh_eth_offset_fast_rcar[SH_ETH_MAX_REGISTER_OFFSET] = {
 209	SH_ETH_OFFSET_DEFAULTS,
 210
 211	[ECMR]		= 0x0300,
 212	[RFLR]		= 0x0308,
 213	[ECSR]		= 0x0310,
 214	[ECSIPR]	= 0x0318,
 215	[PIR]		= 0x0320,
 216	[PSR]		= 0x0328,
 217	[RDMLR]		= 0x0340,
 218	[IPGR]		= 0x0350,
 219	[APR]		= 0x0354,
 220	[MPR]		= 0x0358,
 221	[RFCF]		= 0x0360,
 222	[TPAUSER]	= 0x0364,
 223	[TPAUSECR]	= 0x0368,
 224	[MAHR]		= 0x03c0,
 225	[MALR]		= 0x03c8,
 226	[TROCR]		= 0x03d0,
 227	[CDCR]		= 0x03d4,
 228	[LCCR]		= 0x03d8,
 229	[CNDCR]		= 0x03dc,
 230	[CEFCR]		= 0x03e4,
 231	[FRECR]		= 0x03e8,
 232	[TSFRCR]	= 0x03ec,
 233	[TLFRCR]	= 0x03f0,
 234	[RFCR]		= 0x03f4,
 235	[MAFCR]		= 0x03f8,
 236
 237	[EDMR]		= 0x0200,
 238	[EDTRR]		= 0x0208,
 239	[EDRRR]		= 0x0210,
 240	[TDLAR]		= 0x0218,
 241	[RDLAR]		= 0x0220,
 242	[EESR]		= 0x0228,
 243	[EESIPR]	= 0x0230,
 244	[TRSCER]	= 0x0238,
 245	[RMFCR]		= 0x0240,
 246	[TFTR]		= 0x0248,
 247	[FDR]		= 0x0250,
 248	[RMCR]		= 0x0258,
 249	[TFUCR]		= 0x0264,
 250	[RFOCR]		= 0x0268,
 251	[RMIIMODE]      = 0x026c,
 252	[FCFTR]		= 0x0270,
 253	[TRIMD]		= 0x027c,
 254};
 255
 256static const u16 sh_eth_offset_fast_sh4[SH_ETH_MAX_REGISTER_OFFSET] = {
 257	SH_ETH_OFFSET_DEFAULTS,
 258
 259	[ECMR]		= 0x0100,
 260	[RFLR]		= 0x0108,
 261	[ECSR]		= 0x0110,
 262	[ECSIPR]	= 0x0118,
 263	[PIR]		= 0x0120,
 264	[PSR]		= 0x0128,
 265	[RDMLR]		= 0x0140,
 266	[IPGR]		= 0x0150,
 267	[APR]		= 0x0154,
 268	[MPR]		= 0x0158,
 269	[TPAUSER]	= 0x0164,
 270	[RFCF]		= 0x0160,
 271	[TPAUSECR]	= 0x0168,
 272	[BCFRR]		= 0x016c,
 273	[MAHR]		= 0x01c0,
 274	[MALR]		= 0x01c8,
 275	[TROCR]		= 0x01d0,
 276	[CDCR]		= 0x01d4,
 277	[LCCR]		= 0x01d8,
 278	[CNDCR]		= 0x01dc,
 279	[CEFCR]		= 0x01e4,
 280	[FRECR]		= 0x01e8,
 281	[TSFRCR]	= 0x01ec,
 282	[TLFRCR]	= 0x01f0,
 283	[RFCR]		= 0x01f4,
 284	[MAFCR]		= 0x01f8,
 285	[RTRATE]	= 0x01fc,
 286
 287	[EDMR]		= 0x0000,
 288	[EDTRR]		= 0x0008,
 289	[EDRRR]		= 0x0010,
 290	[TDLAR]		= 0x0018,
 291	[RDLAR]		= 0x0020,
 292	[EESR]		= 0x0028,
 293	[EESIPR]	= 0x0030,
 294	[TRSCER]	= 0x0038,
 295	[RMFCR]		= 0x0040,
 296	[TFTR]		= 0x0048,
 297	[FDR]		= 0x0050,
 298	[RMCR]		= 0x0058,
 299	[TFUCR]		= 0x0064,
 300	[RFOCR]		= 0x0068,
 301	[FCFTR]		= 0x0070,
 302	[RPADIR]	= 0x0078,
 303	[TRIMD]		= 0x007c,
 304	[RBWAR]		= 0x00c8,
 305	[RDFAR]		= 0x00cc,
 306	[TBRAR]		= 0x00d4,
 307	[TDFAR]		= 0x00d8,
 308};
 309
 310static const u16 sh_eth_offset_fast_sh3_sh2[SH_ETH_MAX_REGISTER_OFFSET] = {
 311	SH_ETH_OFFSET_DEFAULTS,
 312
 313	[EDMR]		= 0x0000,
 314	[EDTRR]		= 0x0004,
 315	[EDRRR]		= 0x0008,
 316	[TDLAR]		= 0x000c,
 317	[RDLAR]		= 0x0010,
 318	[EESR]		= 0x0014,
 319	[EESIPR]	= 0x0018,
 320	[TRSCER]	= 0x001c,
 321	[RMFCR]		= 0x0020,
 322	[TFTR]		= 0x0024,
 323	[FDR]		= 0x0028,
 324	[RMCR]		= 0x002c,
 325	[EDOCR]		= 0x0030,
 326	[FCFTR]		= 0x0034,
 327	[RPADIR]	= 0x0038,
 328	[TRIMD]		= 0x003c,
 329	[RBWAR]		= 0x0040,
 330	[RDFAR]		= 0x0044,
 331	[TBRAR]		= 0x004c,
 332	[TDFAR]		= 0x0050,
 333
 334	[ECMR]		= 0x0160,
 335	[ECSR]		= 0x0164,
 336	[ECSIPR]	= 0x0168,
 337	[PIR]		= 0x016c,
 338	[MAHR]		= 0x0170,
 339	[MALR]		= 0x0174,
 340	[RFLR]		= 0x0178,
 341	[PSR]		= 0x017c,
 342	[TROCR]		= 0x0180,
 343	[CDCR]		= 0x0184,
 344	[LCCR]		= 0x0188,
 345	[CNDCR]		= 0x018c,
 346	[CEFCR]		= 0x0194,
 347	[FRECR]		= 0x0198,
 348	[TSFRCR]	= 0x019c,
 349	[TLFRCR]	= 0x01a0,
 350	[RFCR]		= 0x01a4,
 351	[MAFCR]		= 0x01a8,
 352	[IPGR]		= 0x01b4,
 353	[APR]		= 0x01b8,
 354	[MPR]		= 0x01bc,
 355	[TPAUSER]	= 0x01c4,
 356	[BCFR]		= 0x01cc,
 357
 358	[ARSTR]		= 0x0000,
 359	[TSU_CTRST]	= 0x0004,
 360	[TSU_FWEN0]	= 0x0010,
 361	[TSU_FWEN1]	= 0x0014,
 362	[TSU_FCM]	= 0x0018,
 363	[TSU_BSYSL0]	= 0x0020,
 364	[TSU_BSYSL1]	= 0x0024,
 365	[TSU_PRISL0]	= 0x0028,
 366	[TSU_PRISL1]	= 0x002c,
 367	[TSU_FWSL0]	= 0x0030,
 368	[TSU_FWSL1]	= 0x0034,
 369	[TSU_FWSLC]	= 0x0038,
 370	[TSU_QTAGM0]	= 0x0040,
 371	[TSU_QTAGM1]	= 0x0044,
 372	[TSU_ADQT0]	= 0x0048,
 373	[TSU_ADQT1]	= 0x004c,
 374	[TSU_FWSR]	= 0x0050,
 375	[TSU_FWINMK]	= 0x0054,
 376	[TSU_ADSBSY]	= 0x0060,
 377	[TSU_TEN]	= 0x0064,
 378	[TSU_POST1]	= 0x0070,
 379	[TSU_POST2]	= 0x0074,
 380	[TSU_POST3]	= 0x0078,
 381	[TSU_POST4]	= 0x007c,
 382
 383	[TXNLCR0]	= 0x0080,
 384	[TXALCR0]	= 0x0084,
 385	[RXNLCR0]	= 0x0088,
 386	[RXALCR0]	= 0x008c,
 387	[FWNLCR0]	= 0x0090,
 388	[FWALCR0]	= 0x0094,
 389	[TXNLCR1]	= 0x00a0,
 390	[TXALCR1]	= 0x00a4,
 391	[RXNLCR1]	= 0x00a8,
 392	[RXALCR1]	= 0x00ac,
 393	[FWNLCR1]	= 0x00b0,
 394	[FWALCR1]	= 0x00b4,
 395
 396	[TSU_ADRH0]	= 0x0100,
 397};
 
 398
 399static void sh_eth_rcv_snd_disable(struct net_device *ndev);
 400static struct net_device_stats *sh_eth_get_stats(struct net_device *ndev);
 401
 402static void sh_eth_write(struct net_device *ndev, u32 data, int enum_index)
 403{
 404	struct sh_eth_private *mdp = netdev_priv(ndev);
 405	u16 offset = mdp->reg_offset[enum_index];
 406
 407	if (WARN_ON(offset == SH_ETH_OFFSET_INVALID))
 408		return;
 409
 410	iowrite32(data, mdp->addr + offset);
 411}
 412
 413static u32 sh_eth_read(struct net_device *ndev, int enum_index)
 414{
 415	struct sh_eth_private *mdp = netdev_priv(ndev);
 416	u16 offset = mdp->reg_offset[enum_index];
 417
 418	if (WARN_ON(offset == SH_ETH_OFFSET_INVALID))
 419		return ~0U;
 420
 421	return ioread32(mdp->addr + offset);
 422}
 423
 424static void sh_eth_modify(struct net_device *ndev, int enum_index, u32 clear,
 425			  u32 set)
 426{
 427	sh_eth_write(ndev, (sh_eth_read(ndev, enum_index) & ~clear) | set,
 428		     enum_index);
 429}
 430
 431static u16 sh_eth_tsu_get_offset(struct sh_eth_private *mdp, int enum_index)
 432{
 433	return mdp->reg_offset[enum_index];
 434}
 435
 436static void sh_eth_tsu_write(struct sh_eth_private *mdp, u32 data,
 437			     int enum_index)
 438{
 439	u16 offset = sh_eth_tsu_get_offset(mdp, enum_index);
 440
 441	if (WARN_ON(offset == SH_ETH_OFFSET_INVALID))
 442		return;
 443
 444	iowrite32(data, mdp->tsu_addr + offset);
 445}
 446
 447static u32 sh_eth_tsu_read(struct sh_eth_private *mdp, int enum_index)
 448{
 449	u16 offset = sh_eth_tsu_get_offset(mdp, enum_index);
 450
 451	if (WARN_ON(offset == SH_ETH_OFFSET_INVALID))
 452		return ~0U;
 453
 454	return ioread32(mdp->tsu_addr + offset);
 455}
 456
 457static void sh_eth_soft_swap(char *src, int len)
 458{
 459#ifdef __LITTLE_ENDIAN
 460	u32 *p = (u32 *)src;
 461	u32 *maxp = p + DIV_ROUND_UP(len, sizeof(u32));
 462
 463	for (; p < maxp; p++)
 464		*p = swab32(*p);
 465#endif
 466}
 467
 468static void sh_eth_select_mii(struct net_device *ndev)
 469{
 470	struct sh_eth_private *mdp = netdev_priv(ndev);
 471	u32 value;
 472
 473	switch (mdp->phy_interface) {
 474	case PHY_INTERFACE_MODE_RGMII ... PHY_INTERFACE_MODE_RGMII_TXID:
 475		value = 0x3;
 476		break;
 477	case PHY_INTERFACE_MODE_GMII:
 478		value = 0x2;
 479		break;
 480	case PHY_INTERFACE_MODE_MII:
 481		value = 0x1;
 482		break;
 483	case PHY_INTERFACE_MODE_RMII:
 484		value = 0x0;
 485		break;
 486	default:
 487		netdev_warn(ndev,
 488			    "PHY interface mode was not setup. Set to MII.\n");
 489		value = 0x1;
 490		break;
 491	}
 492
 493	sh_eth_write(ndev, value, RMII_MII);
 494}
 495
 496static void sh_eth_set_duplex(struct net_device *ndev)
 497{
 498	struct sh_eth_private *mdp = netdev_priv(ndev);
 499
 500	sh_eth_modify(ndev, ECMR, ECMR_DM, mdp->duplex ? ECMR_DM : 0);
 501}
 502
 503static void sh_eth_chip_reset(struct net_device *ndev)
 504{
 505	struct sh_eth_private *mdp = netdev_priv(ndev);
 506
 507	/* reset device */
 508	sh_eth_tsu_write(mdp, ARSTR_ARST, ARSTR);
 509	mdelay(1);
 510}
 511
 512static int sh_eth_soft_reset(struct net_device *ndev)
 513{
 514	sh_eth_modify(ndev, EDMR, EDMR_SRST_ETHER, EDMR_SRST_ETHER);
 515	mdelay(3);
 516	sh_eth_modify(ndev, EDMR, EDMR_SRST_ETHER, 0);
 517
 518	return 0;
 519}
 520
 521static int sh_eth_check_soft_reset(struct net_device *ndev)
 522{
 523	int cnt;
 524
 525	for (cnt = 100; cnt > 0; cnt--) {
 526		if (!(sh_eth_read(ndev, EDMR) & EDMR_SRST_GETHER))
 527			return 0;
 528		mdelay(1);
 529	}
 530
 531	netdev_err(ndev, "Device reset failed\n");
 532	return -ETIMEDOUT;
 533}
 534
 535static int sh_eth_soft_reset_gether(struct net_device *ndev)
 536{
 537	struct sh_eth_private *mdp = netdev_priv(ndev);
 538	int ret;
 539
 540	sh_eth_write(ndev, EDSR_ENALL, EDSR);
 541	sh_eth_modify(ndev, EDMR, EDMR_SRST_GETHER, EDMR_SRST_GETHER);
 542
 543	ret = sh_eth_check_soft_reset(ndev);
 544	if (ret)
 545		return ret;
 546
 547	/* Table Init */
 548	sh_eth_write(ndev, 0, TDLAR);
 549	sh_eth_write(ndev, 0, TDFAR);
 550	sh_eth_write(ndev, 0, TDFXR);
 551	sh_eth_write(ndev, 0, TDFFR);
 552	sh_eth_write(ndev, 0, RDLAR);
 553	sh_eth_write(ndev, 0, RDFAR);
 554	sh_eth_write(ndev, 0, RDFXR);
 555	sh_eth_write(ndev, 0, RDFFR);
 556
 557	/* Reset HW CRC register */
 558	if (mdp->cd->csmr)
 559		sh_eth_write(ndev, 0, CSMR);
 560
 561	/* Select MII mode */
 562	if (mdp->cd->select_mii)
 563		sh_eth_select_mii(ndev);
 564
 565	return ret;
 566}
 567
 568static void sh_eth_set_rate_gether(struct net_device *ndev)
 569{
 570	struct sh_eth_private *mdp = netdev_priv(ndev);
 571
 
 
 
 572	switch (mdp->speed) {
 573	case 10: /* 10BASE */
 574		sh_eth_write(ndev, GECMR_10, GECMR);
 575		break;
 576	case 100:/* 100BASE */
 577		sh_eth_write(ndev, GECMR_100, GECMR);
 578		break;
 579	case 1000: /* 1000BASE */
 580		sh_eth_write(ndev, GECMR_1000, GECMR);
 581		break;
 582	}
 583}
 584
 585#ifdef CONFIG_OF
 586/* R7S72100 */
 587static struct sh_eth_cpu_data r7s72100_data = {
 588	.soft_reset	= sh_eth_soft_reset_gether,
 589
 590	.chip_reset	= sh_eth_chip_reset,
 591	.set_duplex	= sh_eth_set_duplex,
 592
 593	.register_type	= SH_ETH_REG_FAST_RZ,
 594
 595	.edtrr_trns	= EDTRR_TRNS_GETHER,
 596	.ecsr_value	= ECSR_ICD,
 597	.ecsipr_value	= ECSIPR_ICDIP,
 598	.eesipr_value	= EESIPR_TWB1IP | EESIPR_TWBIP | EESIPR_TC1IP |
 599			  EESIPR_TABTIP | EESIPR_RABTIP | EESIPR_RFCOFIP |
 600			  EESIPR_ECIIP |
 601			  EESIPR_FTCIP | EESIPR_TDEIP | EESIPR_TFUFIP |
 602			  EESIPR_FRIP | EESIPR_RDEIP | EESIPR_RFOFIP |
 603			  EESIPR_RMAFIP | EESIPR_RRFIP |
 604			  EESIPR_RTLFIP | EESIPR_RTSFIP |
 605			  EESIPR_PREIP | EESIPR_CERFIP,
 606
 607	.tx_check	= EESR_TC1 | EESR_FTC,
 608	.eesr_err_check	= EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_RABT |
 609			  EESR_RFE | EESR_RDE | EESR_RFRMER | EESR_TFE |
 610			  EESR_TDE,
 611	.fdr_value	= 0x0000070f,
 612
 
 
 613	.no_psr		= 1,
 614	.apr		= 1,
 615	.mpr		= 1,
 616	.tpauser	= 1,
 617	.hw_swap	= 1,
 618	.rpadir		= 1,
 619	.no_trimd	= 1,
 620	.no_ade		= 1,
 621	.xdfar_rw	= 1,
 622	.csmr		= 1,
 623	.rx_csum	= 1,
 624	.tsu		= 1,
 625	.no_tx_cntrs	= 1,
 626};
 627
 628static void sh_eth_chip_reset_r8a7740(struct net_device *ndev)
 629{
 630	sh_eth_chip_reset(ndev);
 631
 632	sh_eth_select_mii(ndev);
 633}
 634
 635/* R8A7740 */
 636static struct sh_eth_cpu_data r8a7740_data = {
 637	.soft_reset	= sh_eth_soft_reset_gether,
 638
 639	.chip_reset	= sh_eth_chip_reset_r8a7740,
 640	.set_duplex	= sh_eth_set_duplex,
 641	.set_rate	= sh_eth_set_rate_gether,
 642
 643	.register_type	= SH_ETH_REG_GIGABIT,
 644
 645	.edtrr_trns	= EDTRR_TRNS_GETHER,
 646	.ecsr_value	= ECSR_ICD | ECSR_MPD,
 647	.ecsipr_value	= ECSIPR_LCHNGIP | ECSIPR_ICDIP | ECSIPR_MPDIP,
 648	.eesipr_value	= EESIPR_RFCOFIP | EESIPR_ECIIP |
 649			  EESIPR_FTCIP | EESIPR_TDEIP | EESIPR_TFUFIP |
 650			  EESIPR_FRIP | EESIPR_RDEIP | EESIPR_RFOFIP |
 651			  0x0000f000 | EESIPR_CNDIP | EESIPR_DLCIP |
 652			  EESIPR_CDIP | EESIPR_TROIP | EESIPR_RMAFIP |
 653			  EESIPR_CEEFIP | EESIPR_CELFIP |
 654			  EESIPR_RRFIP | EESIPR_RTLFIP | EESIPR_RTSFIP |
 655			  EESIPR_PREIP | EESIPR_CERFIP,
 656
 657	.tx_check	= EESR_TC1 | EESR_FTC,
 658	.eesr_err_check	= EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_RABT |
 659			  EESR_RFE | EESR_RDE | EESR_RFRMER | EESR_TFE |
 660			  EESR_TDE,
 661	.fdr_value	= 0x0000070f,
 662
 663	.apr		= 1,
 664	.mpr		= 1,
 665	.tpauser	= 1,
 
 666	.bculr		= 1,
 667	.hw_swap	= 1,
 668	.rpadir		= 1,
 669	.no_trimd	= 1,
 670	.no_ade		= 1,
 671	.xdfar_rw	= 1,
 672	.csmr		= 1,
 673	.rx_csum	= 1,
 674	.tsu		= 1,
 675	.select_mii	= 1,
 676	.magic		= 1,
 677	.cexcr		= 1,
 678};
 679
 680/* There is CPU dependent code */
 681static void sh_eth_set_rate_rcar(struct net_device *ndev)
 682{
 683	struct sh_eth_private *mdp = netdev_priv(ndev);
 684
 685	switch (mdp->speed) {
 686	case 10: /* 10BASE */
 687		sh_eth_modify(ndev, ECMR, ECMR_ELB, 0);
 688		break;
 689	case 100:/* 100BASE */
 690		sh_eth_modify(ndev, ECMR, ECMR_ELB, ECMR_ELB);
 691		break;
 692	}
 693}
 694
 695/* R-Car Gen1 */
 696static struct sh_eth_cpu_data rcar_gen1_data = {
 697	.soft_reset	= sh_eth_soft_reset,
 698
 699	.set_duplex	= sh_eth_set_duplex,
 700	.set_rate	= sh_eth_set_rate_rcar,
 701
 702	.register_type	= SH_ETH_REG_FAST_RCAR,
 703
 704	.edtrr_trns	= EDTRR_TRNS_ETHER,
 705	.ecsr_value	= ECSR_PSRTO | ECSR_LCHNG | ECSR_ICD,
 706	.ecsipr_value	= ECSIPR_PSRTOIP | ECSIPR_LCHNGIP | ECSIPR_ICDIP,
 707	.eesipr_value	= EESIPR_RFCOFIP | EESIPR_ADEIP | EESIPR_ECIIP |
 708			  EESIPR_FTCIP | EESIPR_TDEIP | EESIPR_TFUFIP |
 709			  EESIPR_FRIP | EESIPR_RDEIP | EESIPR_RFOFIP |
 710			  EESIPR_RMAFIP | EESIPR_RRFIP |
 711			  EESIPR_RTLFIP | EESIPR_RTSFIP |
 712			  EESIPR_PREIP | EESIPR_CERFIP,
 713
 714	.tx_check	= EESR_FTC | EESR_CND | EESR_DLC | EESR_CD | EESR_TRO,
 715	.eesr_err_check	= EESR_TWB | EESR_TABT | EESR_RABT | EESR_RFE |
 716			  EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE,
 717	.fdr_value	= 0x00000f0f,
 718
 719	.apr		= 1,
 720	.mpr		= 1,
 721	.tpauser	= 1,
 722	.hw_swap	= 1,
 723	.no_xdfar	= 1,
 724};
 725
 726/* R-Car Gen2 and RZ/G1 */
 727static struct sh_eth_cpu_data rcar_gen2_data = {
 728	.soft_reset	= sh_eth_soft_reset,
 729
 730	.set_duplex	= sh_eth_set_duplex,
 731	.set_rate	= sh_eth_set_rate_rcar,
 732
 733	.register_type	= SH_ETH_REG_FAST_RCAR,
 734
 735	.edtrr_trns	= EDTRR_TRNS_ETHER,
 736	.ecsr_value	= ECSR_PSRTO | ECSR_LCHNG | ECSR_ICD | ECSR_MPD,
 737	.ecsipr_value	= ECSIPR_PSRTOIP | ECSIPR_LCHNGIP | ECSIPR_ICDIP |
 738			  ECSIPR_MPDIP,
 739	.eesipr_value	= EESIPR_RFCOFIP | EESIPR_ADEIP | EESIPR_ECIIP |
 740			  EESIPR_FTCIP | EESIPR_TDEIP | EESIPR_TFUFIP |
 741			  EESIPR_FRIP | EESIPR_RDEIP | EESIPR_RFOFIP |
 742			  EESIPR_RMAFIP | EESIPR_RRFIP |
 743			  EESIPR_RTLFIP | EESIPR_RTSFIP |
 744			  EESIPR_PREIP | EESIPR_CERFIP,
 745
 746	.tx_check	= EESR_FTC | EESR_CND | EESR_DLC | EESR_CD | EESR_TRO,
 747	.eesr_err_check	= EESR_TWB | EESR_TABT | EESR_RABT | EESR_RFE |
 748			  EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE,
 749	.fdr_value	= 0x00000f0f,
 750
 751	.trscer_err_mask = DESC_I_RINT8,
 752
 753	.apr		= 1,
 754	.mpr		= 1,
 755	.tpauser	= 1,
 756	.hw_swap	= 1,
 757	.no_xdfar	= 1,
 758	.rmiimode	= 1,
 759	.magic		= 1,
 760};
 761
 762/* R8A77980 */
 763static struct sh_eth_cpu_data r8a77980_data = {
 764	.soft_reset	= sh_eth_soft_reset_gether,
 765
 766	.set_duplex	= sh_eth_set_duplex,
 767	.set_rate	= sh_eth_set_rate_gether,
 768
 769	.register_type  = SH_ETH_REG_GIGABIT,
 770
 771	.edtrr_trns	= EDTRR_TRNS_GETHER,
 772	.ecsr_value	= ECSR_PSRTO | ECSR_LCHNG | ECSR_ICD | ECSR_MPD,
 773	.ecsipr_value	= ECSIPR_PSRTOIP | ECSIPR_LCHNGIP | ECSIPR_ICDIP |
 774			  ECSIPR_MPDIP,
 775	.eesipr_value	= EESIPR_RFCOFIP | EESIPR_ECIIP |
 776			  EESIPR_FTCIP | EESIPR_TDEIP | EESIPR_TFUFIP |
 777			  EESIPR_FRIP | EESIPR_RDEIP | EESIPR_RFOFIP |
 778			  EESIPR_RMAFIP | EESIPR_RRFIP |
 779			  EESIPR_RTLFIP | EESIPR_RTSFIP |
 780			  EESIPR_PREIP | EESIPR_CERFIP,
 781
 782	.tx_check       = EESR_FTC | EESR_CD | EESR_TRO,
 783	.eesr_err_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_RABT |
 784			  EESR_RFE | EESR_RDE | EESR_RFRMER |
 785			  EESR_TFE | EESR_TDE | EESR_ECI,
 786	.fdr_value	= 0x0000070f,
 787
 788	.apr		= 1,
 789	.mpr		= 1,
 790	.tpauser	= 1,
 
 791	.bculr		= 1,
 792	.hw_swap	= 1,
 793	.nbst		= 1,
 794	.rpadir		= 1,
 795	.no_trimd	= 1,
 796	.no_ade		= 1,
 797	.xdfar_rw	= 1,
 798	.csmr		= 1,
 799	.rx_csum	= 1,
 800	.select_mii	= 1,
 801	.magic		= 1,
 802	.cexcr		= 1,
 803};
 804
 805/* R7S9210 */
 806static struct sh_eth_cpu_data r7s9210_data = {
 807	.soft_reset	= sh_eth_soft_reset,
 808
 809	.set_duplex	= sh_eth_set_duplex,
 810	.set_rate	= sh_eth_set_rate_rcar,
 811
 812	.register_type	= SH_ETH_REG_FAST_SH4,
 813
 814	.edtrr_trns	= EDTRR_TRNS_ETHER,
 815	.ecsr_value	= ECSR_ICD,
 816	.ecsipr_value	= ECSIPR_ICDIP,
 817	.eesipr_value	= EESIPR_TWBIP | EESIPR_TABTIP | EESIPR_RABTIP |
 818			  EESIPR_RFCOFIP | EESIPR_ECIIP | EESIPR_FTCIP |
 819			  EESIPR_TDEIP | EESIPR_TFUFIP | EESIPR_FRIP |
 820			  EESIPR_RDEIP | EESIPR_RFOFIP | EESIPR_CNDIP |
 821			  EESIPR_DLCIP | EESIPR_CDIP | EESIPR_TROIP |
 822			  EESIPR_RMAFIP | EESIPR_RRFIP | EESIPR_RTLFIP |
 823			  EESIPR_RTSFIP | EESIPR_PREIP | EESIPR_CERFIP,
 824
 825	.tx_check	= EESR_FTC | EESR_CND | EESR_DLC | EESR_CD | EESR_TRO,
 826	.eesr_err_check	= EESR_TWB | EESR_TABT | EESR_RABT | EESR_RFE |
 827			  EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE,
 828
 829	.fdr_value	= 0x0000070f,
 830
 
 
 831	.apr		= 1,
 832	.mpr		= 1,
 833	.tpauser	= 1,
 834	.hw_swap	= 1,
 835	.rpadir		= 1,
 836	.no_ade		= 1,
 837	.xdfar_rw	= 1,
 838};
 839#endif /* CONFIG_OF */
 840
 841static void sh_eth_set_rate_sh7724(struct net_device *ndev)
 842{
 843	struct sh_eth_private *mdp = netdev_priv(ndev);
 844
 845	switch (mdp->speed) {
 846	case 10: /* 10BASE */
 847		sh_eth_modify(ndev, ECMR, ECMR_RTM, 0);
 848		break;
 849	case 100:/* 100BASE */
 850		sh_eth_modify(ndev, ECMR, ECMR_RTM, ECMR_RTM);
 851		break;
 852	}
 853}
 854
 855/* SH7724 */
 856static struct sh_eth_cpu_data sh7724_data = {
 857	.soft_reset	= sh_eth_soft_reset,
 858
 859	.set_duplex	= sh_eth_set_duplex,
 860	.set_rate	= sh_eth_set_rate_sh7724,
 861
 862	.register_type	= SH_ETH_REG_FAST_SH4,
 863
 864	.edtrr_trns	= EDTRR_TRNS_ETHER,
 865	.ecsr_value	= ECSR_PSRTO | ECSR_LCHNG | ECSR_ICD,
 866	.ecsipr_value	= ECSIPR_PSRTOIP | ECSIPR_LCHNGIP | ECSIPR_ICDIP,
 867	.eesipr_value	= EESIPR_RFCOFIP | EESIPR_ADEIP | EESIPR_ECIIP |
 868			  EESIPR_FTCIP | EESIPR_TDEIP | EESIPR_TFUFIP |
 869			  EESIPR_FRIP | EESIPR_RDEIP | EESIPR_RFOFIP |
 870			  EESIPR_RMAFIP | EESIPR_RRFIP |
 871			  EESIPR_RTLFIP | EESIPR_RTSFIP |
 872			  EESIPR_PREIP | EESIPR_CERFIP,
 873
 874	.tx_check	= EESR_FTC | EESR_CND | EESR_DLC | EESR_CD | EESR_TRO,
 875	.eesr_err_check	= EESR_TWB | EESR_TABT | EESR_RABT | EESR_RFE |
 876			  EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE,
 877
 878	.apr		= 1,
 879	.mpr		= 1,
 880	.tpauser	= 1,
 881	.hw_swap	= 1,
 882	.rpadir		= 1,
 883};
 884
 885static void sh_eth_set_rate_sh7757(struct net_device *ndev)
 886{
 887	struct sh_eth_private *mdp = netdev_priv(ndev);
 888
 889	switch (mdp->speed) {
 890	case 10: /* 10BASE */
 891		sh_eth_write(ndev, 0, RTRATE);
 892		break;
 893	case 100:/* 100BASE */
 894		sh_eth_write(ndev, 1, RTRATE);
 895		break;
 896	}
 897}
 898
 899/* SH7757 */
 900static struct sh_eth_cpu_data sh7757_data = {
 901	.soft_reset	= sh_eth_soft_reset,
 902
 903	.set_duplex	= sh_eth_set_duplex,
 904	.set_rate	= sh_eth_set_rate_sh7757,
 905
 906	.register_type	= SH_ETH_REG_FAST_SH4,
 907
 908	.edtrr_trns	= EDTRR_TRNS_ETHER,
 909	.eesipr_value	= EESIPR_RFCOFIP | EESIPR_ECIIP |
 910			  EESIPR_FTCIP | EESIPR_TDEIP | EESIPR_TFUFIP |
 911			  EESIPR_FRIP | EESIPR_RDEIP | EESIPR_RFOFIP |
 912			  0x0000f000 | EESIPR_CNDIP | EESIPR_DLCIP |
 913			  EESIPR_CDIP | EESIPR_TROIP | EESIPR_RMAFIP |
 914			  EESIPR_CEEFIP | EESIPR_CELFIP |
 915			  EESIPR_RRFIP | EESIPR_RTLFIP | EESIPR_RTSFIP |
 916			  EESIPR_PREIP | EESIPR_CERFIP,
 917
 918	.tx_check	= EESR_FTC | EESR_CND | EESR_DLC | EESR_CD | EESR_TRO,
 919	.eesr_err_check	= EESR_TWB | EESR_TABT | EESR_RABT | EESR_RFE |
 920			  EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE,
 921
 922	.irq_flags	= IRQF_SHARED,
 923	.apr		= 1,
 924	.mpr		= 1,
 925	.tpauser	= 1,
 926	.hw_swap	= 1,
 927	.no_ade		= 1,
 928	.rpadir		= 1,
 929	.rtrate		= 1,
 930	.dual_port	= 1,
 931};
 932
 933#define SH_GIGA_ETH_BASE	0xfee00000UL
 934#define GIGA_MALR(port)		(SH_GIGA_ETH_BASE + 0x800 * (port) + 0x05c8)
 935#define GIGA_MAHR(port)		(SH_GIGA_ETH_BASE + 0x800 * (port) + 0x05c0)
 936static void sh_eth_chip_reset_giga(struct net_device *ndev)
 937{
 938	u32 mahr[2], malr[2];
 939	int i;
 940
 941	/* save MAHR and MALR */
 942	for (i = 0; i < 2; i++) {
 943		malr[i] = ioread32((void *)GIGA_MALR(i));
 944		mahr[i] = ioread32((void *)GIGA_MAHR(i));
 945	}
 946
 947	sh_eth_chip_reset(ndev);
 948
 949	/* restore MAHR and MALR */
 950	for (i = 0; i < 2; i++) {
 951		iowrite32(malr[i], (void *)GIGA_MALR(i));
 952		iowrite32(mahr[i], (void *)GIGA_MAHR(i));
 953	}
 954}
 955
 956static void sh_eth_set_rate_giga(struct net_device *ndev)
 957{
 958	struct sh_eth_private *mdp = netdev_priv(ndev);
 959
 
 
 
 960	switch (mdp->speed) {
 961	case 10: /* 10BASE */
 962		sh_eth_write(ndev, 0x00000000, GECMR);
 963		break;
 964	case 100:/* 100BASE */
 965		sh_eth_write(ndev, 0x00000010, GECMR);
 966		break;
 967	case 1000: /* 1000BASE */
 968		sh_eth_write(ndev, 0x00000020, GECMR);
 969		break;
 970	}
 971}
 972
 973/* SH7757(GETHERC) */
 974static struct sh_eth_cpu_data sh7757_data_giga = {
 975	.soft_reset	= sh_eth_soft_reset_gether,
 976
 977	.chip_reset	= sh_eth_chip_reset_giga,
 978	.set_duplex	= sh_eth_set_duplex,
 979	.set_rate	= sh_eth_set_rate_giga,
 980
 981	.register_type	= SH_ETH_REG_GIGABIT,
 982
 983	.edtrr_trns	= EDTRR_TRNS_GETHER,
 984	.ecsr_value	= ECSR_ICD | ECSR_MPD,
 985	.ecsipr_value	= ECSIPR_LCHNGIP | ECSIPR_ICDIP | ECSIPR_MPDIP,
 986	.eesipr_value	= EESIPR_RFCOFIP | EESIPR_ECIIP |
 987			  EESIPR_FTCIP | EESIPR_TDEIP | EESIPR_TFUFIP |
 988			  EESIPR_FRIP | EESIPR_RDEIP | EESIPR_RFOFIP |
 989			  0x0000f000 | EESIPR_CNDIP | EESIPR_DLCIP |
 990			  EESIPR_CDIP | EESIPR_TROIP | EESIPR_RMAFIP |
 991			  EESIPR_CEEFIP | EESIPR_CELFIP |
 992			  EESIPR_RRFIP | EESIPR_RTLFIP | EESIPR_RTSFIP |
 993			  EESIPR_PREIP | EESIPR_CERFIP,
 994
 995	.tx_check	= EESR_TC1 | EESR_FTC,
 996	.eesr_err_check	= EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_RABT |
 997			  EESR_RFE | EESR_RDE | EESR_RFRMER | EESR_TFE |
 998			  EESR_TDE,
 999	.fdr_value	= 0x0000072f,
1000
1001	.irq_flags	= IRQF_SHARED,
1002	.apr		= 1,
1003	.mpr		= 1,
1004	.tpauser	= 1,
 
1005	.bculr		= 1,
1006	.hw_swap	= 1,
1007	.rpadir		= 1,
1008	.no_trimd	= 1,
1009	.no_ade		= 1,
1010	.xdfar_rw	= 1,
1011	.tsu		= 1,
1012	.cexcr		= 1,
1013	.dual_port	= 1,
1014};
1015
1016/* SH7734 */
1017static struct sh_eth_cpu_data sh7734_data = {
1018	.soft_reset	= sh_eth_soft_reset_gether,
1019
1020	.chip_reset	= sh_eth_chip_reset,
1021	.set_duplex	= sh_eth_set_duplex,
1022	.set_rate	= sh_eth_set_rate_gether,
1023
1024	.register_type	= SH_ETH_REG_GIGABIT,
1025
1026	.edtrr_trns	= EDTRR_TRNS_GETHER,
1027	.ecsr_value	= ECSR_ICD | ECSR_MPD,
1028	.ecsipr_value	= ECSIPR_LCHNGIP | ECSIPR_ICDIP | ECSIPR_MPDIP,
1029	.eesipr_value	= EESIPR_RFCOFIP | EESIPR_ECIIP |
1030			  EESIPR_FTCIP | EESIPR_TDEIP | EESIPR_TFUFIP |
1031			  EESIPR_FRIP | EESIPR_RDEIP | EESIPR_RFOFIP |
1032			  EESIPR_DLCIP | EESIPR_CDIP | EESIPR_TROIP |
1033			  EESIPR_RMAFIP | EESIPR_CEEFIP | EESIPR_CELFIP |
1034			  EESIPR_RRFIP | EESIPR_RTLFIP | EESIPR_RTSFIP |
1035			  EESIPR_PREIP | EESIPR_CERFIP,
1036
1037	.tx_check	= EESR_TC1 | EESR_FTC,
1038	.eesr_err_check	= EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_RABT |
1039			  EESR_RFE | EESR_RDE | EESR_RFRMER | EESR_TFE |
1040			  EESR_TDE,
1041
1042	.apr		= 1,
1043	.mpr		= 1,
1044	.tpauser	= 1,
 
1045	.bculr		= 1,
1046	.hw_swap	= 1,
1047	.no_trimd	= 1,
1048	.no_ade		= 1,
1049	.xdfar_rw	= 1,
1050	.tsu		= 1,
1051	.csmr		= 1,
1052	.rx_csum	= 1,
1053	.select_mii	= 1,
1054	.magic		= 1,
1055	.cexcr		= 1,
1056};
1057
1058/* SH7763 */
1059static struct sh_eth_cpu_data sh7763_data = {
1060	.soft_reset	= sh_eth_soft_reset_gether,
1061
1062	.chip_reset	= sh_eth_chip_reset,
1063	.set_duplex	= sh_eth_set_duplex,
1064	.set_rate	= sh_eth_set_rate_gether,
1065
1066	.register_type	= SH_ETH_REG_GIGABIT,
1067
1068	.edtrr_trns	= EDTRR_TRNS_GETHER,
1069	.ecsr_value	= ECSR_ICD | ECSR_MPD,
1070	.ecsipr_value	= ECSIPR_LCHNGIP | ECSIPR_ICDIP | ECSIPR_MPDIP,
1071	.eesipr_value	= EESIPR_RFCOFIP | EESIPR_ECIIP |
1072			  EESIPR_FTCIP | EESIPR_TDEIP | EESIPR_TFUFIP |
1073			  EESIPR_FRIP | EESIPR_RDEIP | EESIPR_RFOFIP |
1074			  EESIPR_DLCIP | EESIPR_CDIP | EESIPR_TROIP |
1075			  EESIPR_RMAFIP | EESIPR_CEEFIP | EESIPR_CELFIP |
1076			  EESIPR_RRFIP | EESIPR_RTLFIP | EESIPR_RTSFIP |
1077			  EESIPR_PREIP | EESIPR_CERFIP,
1078
1079	.tx_check	= EESR_TC1 | EESR_FTC,
1080	.eesr_err_check	= EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_RABT |
1081			  EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE,
1082
1083	.apr		= 1,
1084	.mpr		= 1,
1085	.tpauser	= 1,
 
1086	.bculr		= 1,
1087	.hw_swap	= 1,
1088	.no_trimd	= 1,
1089	.no_ade		= 1,
1090	.xdfar_rw	= 1,
1091	.tsu		= 1,
1092	.irq_flags	= IRQF_SHARED,
1093	.magic		= 1,
1094	.cexcr		= 1,
1095	.rx_csum	= 1,
1096	.dual_port	= 1,
1097};
1098
1099static struct sh_eth_cpu_data sh7619_data = {
1100	.soft_reset	= sh_eth_soft_reset,
1101
1102	.register_type	= SH_ETH_REG_FAST_SH3_SH2,
1103
1104	.edtrr_trns	= EDTRR_TRNS_ETHER,
1105	.eesipr_value	= EESIPR_RFCOFIP | EESIPR_ECIIP |
1106			  EESIPR_FTCIP | EESIPR_TDEIP | EESIPR_TFUFIP |
1107			  EESIPR_FRIP | EESIPR_RDEIP | EESIPR_RFOFIP |
1108			  0x0000f000 | EESIPR_CNDIP | EESIPR_DLCIP |
1109			  EESIPR_CDIP | EESIPR_TROIP | EESIPR_RMAFIP |
1110			  EESIPR_CEEFIP | EESIPR_CELFIP |
1111			  EESIPR_RRFIP | EESIPR_RTLFIP | EESIPR_RTSFIP |
1112			  EESIPR_PREIP | EESIPR_CERFIP,
1113
1114	.apr		= 1,
1115	.mpr		= 1,
1116	.tpauser	= 1,
1117	.hw_swap	= 1,
1118};
1119
1120static struct sh_eth_cpu_data sh771x_data = {
1121	.soft_reset	= sh_eth_soft_reset,
1122
1123	.register_type	= SH_ETH_REG_FAST_SH3_SH2,
1124
1125	.edtrr_trns	= EDTRR_TRNS_ETHER,
1126	.eesipr_value	= EESIPR_RFCOFIP | EESIPR_ECIIP |
1127			  EESIPR_FTCIP | EESIPR_TDEIP | EESIPR_TFUFIP |
1128			  EESIPR_FRIP | EESIPR_RDEIP | EESIPR_RFOFIP |
1129			  0x0000f000 | EESIPR_CNDIP | EESIPR_DLCIP |
1130			  EESIPR_CDIP | EESIPR_TROIP | EESIPR_RMAFIP |
1131			  EESIPR_CEEFIP | EESIPR_CELFIP |
1132			  EESIPR_RRFIP | EESIPR_RTLFIP | EESIPR_RTSFIP |
1133			  EESIPR_PREIP | EESIPR_CERFIP,
 
 
 
1134	.tsu		= 1,
1135	.dual_port	= 1,
1136};
1137
1138static void sh_eth_set_default_cpu_data(struct sh_eth_cpu_data *cd)
1139{
1140	if (!cd->ecsr_value)
1141		cd->ecsr_value = DEFAULT_ECSR_INIT;
1142
1143	if (!cd->ecsipr_value)
1144		cd->ecsipr_value = DEFAULT_ECSIPR_INIT;
1145
1146	if (!cd->fcftr_value)
1147		cd->fcftr_value = DEFAULT_FIFO_F_D_RFF |
1148				  DEFAULT_FIFO_F_D_RFD;
1149
1150	if (!cd->fdr_value)
1151		cd->fdr_value = DEFAULT_FDR_INIT;
1152
1153	if (!cd->tx_check)
1154		cd->tx_check = DEFAULT_TX_CHECK;
1155
1156	if (!cd->eesr_err_check)
1157		cd->eesr_err_check = DEFAULT_EESR_ERR_CHECK;
1158
1159	if (!cd->trscer_err_mask)
1160		cd->trscer_err_mask = DEFAULT_TRSCER_ERR_MASK;
1161}
1162
1163static void sh_eth_set_receive_align(struct sk_buff *skb)
1164{
1165	uintptr_t reserve = (uintptr_t)skb->data & (SH_ETH_RX_ALIGN - 1);
1166
1167	if (reserve)
1168		skb_reserve(skb, SH_ETH_RX_ALIGN - reserve);
1169}
1170
1171/* Program the hardware MAC address from dev->dev_addr. */
1172static void update_mac_address(struct net_device *ndev)
1173{
1174	sh_eth_write(ndev,
1175		     (ndev->dev_addr[0] << 24) | (ndev->dev_addr[1] << 16) |
1176		     (ndev->dev_addr[2] << 8) | (ndev->dev_addr[3]), MAHR);
1177	sh_eth_write(ndev,
1178		     (ndev->dev_addr[4] << 8) | (ndev->dev_addr[5]), MALR);
1179}
1180
1181/* Get MAC address from SuperH MAC address register
1182 *
1183 * SuperH's Ethernet device doesn't have 'ROM' to MAC address.
1184 * This driver get MAC address that use by bootloader(U-boot or sh-ipl+g).
1185 * When you want use this device, you must set MAC address in bootloader.
1186 *
1187 */
1188static void read_mac_address(struct net_device *ndev, unsigned char *mac)
1189{
1190	if (mac[0] || mac[1] || mac[2] || mac[3] || mac[4] || mac[5]) {
1191		memcpy(ndev->dev_addr, mac, ETH_ALEN);
1192	} else {
1193		u32 mahr = sh_eth_read(ndev, MAHR);
1194		u32 malr = sh_eth_read(ndev, MALR);
 
1195
1196		ndev->dev_addr[0] = (mahr >> 24) & 0xFF;
1197		ndev->dev_addr[1] = (mahr >> 16) & 0xFF;
1198		ndev->dev_addr[2] = (mahr >>  8) & 0xFF;
1199		ndev->dev_addr[3] = (mahr >>  0) & 0xFF;
1200		ndev->dev_addr[4] = (malr >>  8) & 0xFF;
1201		ndev->dev_addr[5] = (malr >>  0) & 0xFF;
 
1202	}
1203}
1204
1205struct bb_info {
1206	void (*set_gate)(void *addr);
1207	struct mdiobb_ctrl ctrl;
1208	void *addr;
1209};
1210
1211static void sh_mdio_ctrl(struct mdiobb_ctrl *ctrl, u32 mask, int set)
1212{
1213	struct bb_info *bitbang = container_of(ctrl, struct bb_info, ctrl);
1214	u32 pir;
1215
1216	if (bitbang->set_gate)
1217		bitbang->set_gate(bitbang->addr);
1218
1219	pir = ioread32(bitbang->addr);
1220	if (set)
1221		pir |=  mask;
1222	else
1223		pir &= ~mask;
1224	iowrite32(pir, bitbang->addr);
1225}
1226
1227/* Data I/O pin control */
1228static void sh_mmd_ctrl(struct mdiobb_ctrl *ctrl, int bit)
1229{
1230	sh_mdio_ctrl(ctrl, PIR_MMD, bit);
1231}
1232
1233/* Set bit data*/
1234static void sh_set_mdio(struct mdiobb_ctrl *ctrl, int bit)
1235{
1236	sh_mdio_ctrl(ctrl, PIR_MDO, bit);
1237}
1238
1239/* Get bit data*/
1240static int sh_get_mdio(struct mdiobb_ctrl *ctrl)
1241{
1242	struct bb_info *bitbang = container_of(ctrl, struct bb_info, ctrl);
1243
1244	if (bitbang->set_gate)
1245		bitbang->set_gate(bitbang->addr);
1246
1247	return (ioread32(bitbang->addr) & PIR_MDI) != 0;
1248}
1249
1250/* MDC pin control */
1251static void sh_mdc_ctrl(struct mdiobb_ctrl *ctrl, int bit)
1252{
1253	sh_mdio_ctrl(ctrl, PIR_MDC, bit);
1254}
1255
1256/* mdio bus control struct */
1257static struct mdiobb_ops bb_ops = {
1258	.owner = THIS_MODULE,
1259	.set_mdc = sh_mdc_ctrl,
1260	.set_mdio_dir = sh_mmd_ctrl,
1261	.set_mdio_data = sh_set_mdio,
1262	.get_mdio_data = sh_get_mdio,
1263};
1264
1265/* free Tx skb function */
1266static int sh_eth_tx_free(struct net_device *ndev, bool sent_only)
1267{
1268	struct sh_eth_private *mdp = netdev_priv(ndev);
1269	struct sh_eth_txdesc *txdesc;
1270	int free_num = 0;
1271	int entry;
1272	bool sent;
1273
1274	for (; mdp->cur_tx - mdp->dirty_tx > 0; mdp->dirty_tx++) {
1275		entry = mdp->dirty_tx % mdp->num_tx_ring;
1276		txdesc = &mdp->tx_ring[entry];
1277		sent = !(txdesc->status & cpu_to_le32(TD_TACT));
1278		if (sent_only && !sent)
1279			break;
1280		/* TACT bit must be checked before all the following reads */
1281		dma_rmb();
1282		netif_info(mdp, tx_done, ndev,
1283			   "tx entry %d status 0x%08x\n",
1284			   entry, le32_to_cpu(txdesc->status));
1285		/* Free the original skb. */
1286		if (mdp->tx_skbuff[entry]) {
1287			dma_unmap_single(&mdp->pdev->dev,
1288					 le32_to_cpu(txdesc->addr),
1289					 le32_to_cpu(txdesc->len) >> 16,
1290					 DMA_TO_DEVICE);
1291			dev_kfree_skb_irq(mdp->tx_skbuff[entry]);
1292			mdp->tx_skbuff[entry] = NULL;
1293			free_num++;
1294		}
1295		txdesc->status = cpu_to_le32(TD_TFP);
1296		if (entry >= mdp->num_tx_ring - 1)
1297			txdesc->status |= cpu_to_le32(TD_TDLE);
1298
1299		if (sent) {
1300			ndev->stats.tx_packets++;
1301			ndev->stats.tx_bytes += le32_to_cpu(txdesc->len) >> 16;
1302		}
1303	}
1304	return free_num;
1305}
1306
1307/* free skb and descriptor buffer */
1308static void sh_eth_ring_free(struct net_device *ndev)
1309{
1310	struct sh_eth_private *mdp = netdev_priv(ndev);
1311	int ringsize, i;
1312
1313	if (mdp->rx_ring) {
1314		for (i = 0; i < mdp->num_rx_ring; i++) {
1315			if (mdp->rx_skbuff[i]) {
1316				struct sh_eth_rxdesc *rxdesc = &mdp->rx_ring[i];
1317
1318				dma_unmap_single(&mdp->pdev->dev,
1319						 le32_to_cpu(rxdesc->addr),
1320						 ALIGN(mdp->rx_buf_sz, 32),
1321						 DMA_FROM_DEVICE);
1322			}
1323		}
1324		ringsize = sizeof(struct sh_eth_rxdesc) * mdp->num_rx_ring;
1325		dma_free_coherent(&mdp->pdev->dev, ringsize, mdp->rx_ring,
1326				  mdp->rx_desc_dma);
1327		mdp->rx_ring = NULL;
1328	}
1329
1330	/* Free Rx skb ringbuffer */
1331	if (mdp->rx_skbuff) {
1332		for (i = 0; i < mdp->num_rx_ring; i++)
1333			dev_kfree_skb(mdp->rx_skbuff[i]);
1334	}
1335	kfree(mdp->rx_skbuff);
1336	mdp->rx_skbuff = NULL;
1337
1338	if (mdp->tx_ring) {
1339		sh_eth_tx_free(ndev, false);
1340
1341		ringsize = sizeof(struct sh_eth_txdesc) * mdp->num_tx_ring;
1342		dma_free_coherent(&mdp->pdev->dev, ringsize, mdp->tx_ring,
1343				  mdp->tx_desc_dma);
1344		mdp->tx_ring = NULL;
1345	}
1346
1347	/* Free Tx skb ringbuffer */
1348	kfree(mdp->tx_skbuff);
1349	mdp->tx_skbuff = NULL;
1350}
1351
1352/* format skb and descriptor buffer */
1353static void sh_eth_ring_format(struct net_device *ndev)
1354{
1355	struct sh_eth_private *mdp = netdev_priv(ndev);
1356	int i;
1357	struct sk_buff *skb;
1358	struct sh_eth_rxdesc *rxdesc = NULL;
1359	struct sh_eth_txdesc *txdesc = NULL;
1360	int rx_ringsize = sizeof(*rxdesc) * mdp->num_rx_ring;
1361	int tx_ringsize = sizeof(*txdesc) * mdp->num_tx_ring;
1362	int skbuff_size = mdp->rx_buf_sz + SH_ETH_RX_ALIGN + 32 - 1;
1363	dma_addr_t dma_addr;
1364	u32 buf_len;
1365
1366	mdp->cur_rx = 0;
1367	mdp->cur_tx = 0;
1368	mdp->dirty_rx = 0;
1369	mdp->dirty_tx = 0;
1370
1371	memset(mdp->rx_ring, 0, rx_ringsize);
1372
1373	/* build Rx ring buffer */
1374	for (i = 0; i < mdp->num_rx_ring; i++) {
1375		/* skb */
1376		mdp->rx_skbuff[i] = NULL;
1377		skb = netdev_alloc_skb(ndev, skbuff_size);
1378		if (skb == NULL)
1379			break;
1380		sh_eth_set_receive_align(skb);
1381
1382		/* The size of the buffer is a multiple of 32 bytes. */
1383		buf_len = ALIGN(mdp->rx_buf_sz, 32);
1384		dma_addr = dma_map_single(&mdp->pdev->dev, skb->data, buf_len,
1385					  DMA_FROM_DEVICE);
1386		if (dma_mapping_error(&mdp->pdev->dev, dma_addr)) {
1387			kfree_skb(skb);
1388			break;
1389		}
1390		mdp->rx_skbuff[i] = skb;
1391
1392		/* RX descriptor */
1393		rxdesc = &mdp->rx_ring[i];
1394		rxdesc->len = cpu_to_le32(buf_len << 16);
1395		rxdesc->addr = cpu_to_le32(dma_addr);
1396		rxdesc->status = cpu_to_le32(RD_RACT | RD_RFP);
1397
1398		/* Rx descriptor address set */
1399		if (i == 0) {
1400			sh_eth_write(ndev, mdp->rx_desc_dma, RDLAR);
1401			if (mdp->cd->xdfar_rw)
1402				sh_eth_write(ndev, mdp->rx_desc_dma, RDFAR);
1403		}
1404	}
1405
1406	mdp->dirty_rx = (u32) (i - mdp->num_rx_ring);
1407
1408	/* Mark the last entry as wrapping the ring. */
1409	if (rxdesc)
1410		rxdesc->status |= cpu_to_le32(RD_RDLE);
1411
1412	memset(mdp->tx_ring, 0, tx_ringsize);
1413
1414	/* build Tx ring buffer */
1415	for (i = 0; i < mdp->num_tx_ring; i++) {
1416		mdp->tx_skbuff[i] = NULL;
1417		txdesc = &mdp->tx_ring[i];
1418		txdesc->status = cpu_to_le32(TD_TFP);
1419		txdesc->len = cpu_to_le32(0);
1420		if (i == 0) {
1421			/* Tx descriptor address set */
1422			sh_eth_write(ndev, mdp->tx_desc_dma, TDLAR);
1423			if (mdp->cd->xdfar_rw)
1424				sh_eth_write(ndev, mdp->tx_desc_dma, TDFAR);
1425		}
1426	}
1427
1428	txdesc->status |= cpu_to_le32(TD_TDLE);
1429}
1430
1431/* Get skb and descriptor buffer */
1432static int sh_eth_ring_init(struct net_device *ndev)
1433{
1434	struct sh_eth_private *mdp = netdev_priv(ndev);
1435	int rx_ringsize, tx_ringsize;
1436
1437	/* +26 gets the maximum ethernet encapsulation, +7 & ~7 because the
1438	 * card needs room to do 8 byte alignment, +2 so we can reserve
1439	 * the first 2 bytes, and +16 gets room for the status word from the
1440	 * card.
1441	 */
1442	mdp->rx_buf_sz = (ndev->mtu <= 1492 ? PKT_BUF_SZ :
1443			  (((ndev->mtu + 26 + 7) & ~7) + 2 + 16));
1444	if (mdp->cd->rpadir)
1445		mdp->rx_buf_sz += NET_IP_ALIGN;
1446
1447	/* Allocate RX and TX skb rings */
1448	mdp->rx_skbuff = kcalloc(mdp->num_rx_ring, sizeof(*mdp->rx_skbuff),
1449				 GFP_KERNEL);
1450	if (!mdp->rx_skbuff)
1451		return -ENOMEM;
1452
1453	mdp->tx_skbuff = kcalloc(mdp->num_tx_ring, sizeof(*mdp->tx_skbuff),
1454				 GFP_KERNEL);
1455	if (!mdp->tx_skbuff)
1456		goto ring_free;
1457
1458	/* Allocate all Rx descriptors. */
1459	rx_ringsize = sizeof(struct sh_eth_rxdesc) * mdp->num_rx_ring;
1460	mdp->rx_ring = dma_alloc_coherent(&mdp->pdev->dev, rx_ringsize,
1461					  &mdp->rx_desc_dma, GFP_KERNEL);
1462	if (!mdp->rx_ring)
1463		goto ring_free;
1464
1465	mdp->dirty_rx = 0;
1466
1467	/* Allocate all Tx descriptors. */
1468	tx_ringsize = sizeof(struct sh_eth_txdesc) * mdp->num_tx_ring;
1469	mdp->tx_ring = dma_alloc_coherent(&mdp->pdev->dev, tx_ringsize,
1470					  &mdp->tx_desc_dma, GFP_KERNEL);
1471	if (!mdp->tx_ring)
1472		goto ring_free;
1473	return 0;
1474
1475ring_free:
1476	/* Free Rx and Tx skb ring buffer and DMA buffer */
1477	sh_eth_ring_free(ndev);
1478
1479	return -ENOMEM;
1480}
1481
1482static int sh_eth_dev_init(struct net_device *ndev)
1483{
1484	struct sh_eth_private *mdp = netdev_priv(ndev);
1485	int ret;
1486
1487	/* Soft Reset */
1488	ret = mdp->cd->soft_reset(ndev);
1489	if (ret)
1490		return ret;
1491
1492	if (mdp->cd->rmiimode)
1493		sh_eth_write(ndev, 0x1, RMIIMODE);
1494
1495	/* Descriptor format */
1496	sh_eth_ring_format(ndev);
1497	if (mdp->cd->rpadir)
1498		sh_eth_write(ndev, NET_IP_ALIGN << 16, RPADIR);
1499
1500	/* all sh_eth int mask */
1501	sh_eth_write(ndev, 0, EESIPR);
1502
1503#if defined(__LITTLE_ENDIAN)
1504	if (mdp->cd->hw_swap)
1505		sh_eth_write(ndev, EDMR_EL, EDMR);
1506	else
1507#endif
1508		sh_eth_write(ndev, 0, EDMR);
1509
1510	/* FIFO size set */
1511	sh_eth_write(ndev, mdp->cd->fdr_value, FDR);
1512	sh_eth_write(ndev, 0, TFTR);
1513
1514	/* Frame recv control (enable multiple-packets per rx irq) */
1515	sh_eth_write(ndev, RMCR_RNC, RMCR);
1516
1517	sh_eth_write(ndev, mdp->cd->trscer_err_mask, TRSCER);
1518
1519	/* DMA transfer burst mode */
1520	if (mdp->cd->nbst)
1521		sh_eth_modify(ndev, EDMR, EDMR_NBST, EDMR_NBST);
1522
1523	/* Burst cycle count upper-limit */
1524	if (mdp->cd->bculr)
1525		sh_eth_write(ndev, 0x800, BCULR);
1526
1527	sh_eth_write(ndev, mdp->cd->fcftr_value, FCFTR);
1528
1529	if (!mdp->cd->no_trimd)
1530		sh_eth_write(ndev, 0, TRIMD);
1531
1532	/* Recv frame limit set register */
1533	sh_eth_write(ndev, ndev->mtu + ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN,
1534		     RFLR);
1535
1536	sh_eth_modify(ndev, EESR, 0, 0);
1537	mdp->irq_enabled = true;
1538	sh_eth_write(ndev, mdp->cd->eesipr_value, EESIPR);
1539
1540	/* EMAC Mode: PAUSE prohibition; Duplex; RX Checksum; TX; RX */
1541	sh_eth_write(ndev, ECMR_ZPF | (mdp->duplex ? ECMR_DM : 0) |
1542		     (ndev->features & NETIF_F_RXCSUM ? ECMR_RCSC : 0) |
1543		     ECMR_TE | ECMR_RE, ECMR);
1544
1545	if (mdp->cd->set_rate)
1546		mdp->cd->set_rate(ndev);
1547
1548	/* E-MAC Status Register clear */
1549	sh_eth_write(ndev, mdp->cd->ecsr_value, ECSR);
1550
1551	/* E-MAC Interrupt Enable register */
1552	sh_eth_write(ndev, mdp->cd->ecsipr_value, ECSIPR);
1553
1554	/* Set MAC address */
1555	update_mac_address(ndev);
1556
1557	/* mask reset */
1558	if (mdp->cd->apr)
1559		sh_eth_write(ndev, 1, APR);
1560	if (mdp->cd->mpr)
1561		sh_eth_write(ndev, 1, MPR);
1562	if (mdp->cd->tpauser)
1563		sh_eth_write(ndev, TPAUSER_UNLIMITED, TPAUSER);
1564
1565	/* Setting the Rx mode will start the Rx process. */
1566	sh_eth_write(ndev, EDRRR_R, EDRRR);
1567
1568	return ret;
1569}
1570
1571static void sh_eth_dev_exit(struct net_device *ndev)
1572{
1573	struct sh_eth_private *mdp = netdev_priv(ndev);
1574	int i;
1575
1576	/* Deactivate all TX descriptors, so DMA should stop at next
1577	 * packet boundary if it's currently running
1578	 */
1579	for (i = 0; i < mdp->num_tx_ring; i++)
1580		mdp->tx_ring[i].status &= ~cpu_to_le32(TD_TACT);
1581
1582	/* Disable TX FIFO egress to MAC */
1583	sh_eth_rcv_snd_disable(ndev);
1584
1585	/* Stop RX DMA at next packet boundary */
1586	sh_eth_write(ndev, 0, EDRRR);
1587
1588	/* Aside from TX DMA, we can't tell when the hardware is
1589	 * really stopped, so we need to reset to make sure.
1590	 * Before doing that, wait for long enough to *probably*
1591	 * finish transmitting the last packet and poll stats.
1592	 */
1593	msleep(2); /* max frame time at 10 Mbps < 1250 us */
1594	sh_eth_get_stats(ndev);
1595	mdp->cd->soft_reset(ndev);
1596
1597	/* Set the RMII mode again if required */
1598	if (mdp->cd->rmiimode)
1599		sh_eth_write(ndev, 0x1, RMIIMODE);
1600
1601	/* Set MAC address again */
1602	update_mac_address(ndev);
1603}
1604
1605static void sh_eth_rx_csum(struct sk_buff *skb)
1606{
1607	u8 *hw_csum;
1608
1609	/* The hardware checksum is 2 bytes appended to packet data */
1610	if (unlikely(skb->len < sizeof(__sum16)))
1611		return;
1612	hw_csum = skb_tail_pointer(skb) - sizeof(__sum16);
1613	skb->csum = csum_unfold((__force __sum16)get_unaligned_le16(hw_csum));
1614	skb->ip_summed = CHECKSUM_COMPLETE;
1615	skb_trim(skb, skb->len - sizeof(__sum16));
1616}
1617
1618/* Packet receive function */
1619static int sh_eth_rx(struct net_device *ndev, u32 intr_status, int *quota)
1620{
1621	struct sh_eth_private *mdp = netdev_priv(ndev);
1622	struct sh_eth_rxdesc *rxdesc;
1623
1624	int entry = mdp->cur_rx % mdp->num_rx_ring;
1625	int boguscnt = (mdp->dirty_rx + mdp->num_rx_ring) - mdp->cur_rx;
1626	int limit;
1627	struct sk_buff *skb;
1628	u32 desc_status;
1629	int skbuff_size = mdp->rx_buf_sz + SH_ETH_RX_ALIGN + 32 - 1;
1630	dma_addr_t dma_addr;
1631	u16 pkt_len;
1632	u32 buf_len;
1633
1634	boguscnt = min(boguscnt, *quota);
1635	limit = boguscnt;
1636	rxdesc = &mdp->rx_ring[entry];
1637	while (!(rxdesc->status & cpu_to_le32(RD_RACT))) {
1638		/* RACT bit must be checked before all the following reads */
1639		dma_rmb();
1640		desc_status = le32_to_cpu(rxdesc->status);
1641		pkt_len = le32_to_cpu(rxdesc->len) & RD_RFL;
1642
1643		if (--boguscnt < 0)
1644			break;
1645
1646		netif_info(mdp, rx_status, ndev,
1647			   "rx entry %d status 0x%08x len %d\n",
1648			   entry, desc_status, pkt_len);
1649
1650		if (!(desc_status & RDFEND))
1651			ndev->stats.rx_length_errors++;
1652
1653		/* In case of almost all GETHER/ETHERs, the Receive Frame State
1654		 * (RFS) bits in the Receive Descriptor 0 are from bit 9 to
1655		 * bit 0. However, in case of the R8A7740 and R7S72100
1656		 * the RFS bits are from bit 25 to bit 16. So, the
1657		 * driver needs right shifting by 16.
1658		 */
1659		if (mdp->cd->csmr)
1660			desc_status >>= 16;
1661
1662		skb = mdp->rx_skbuff[entry];
1663		if (desc_status & (RD_RFS1 | RD_RFS2 | RD_RFS3 | RD_RFS4 |
1664				   RD_RFS5 | RD_RFS6 | RD_RFS10)) {
1665			ndev->stats.rx_errors++;
1666			if (desc_status & RD_RFS1)
1667				ndev->stats.rx_crc_errors++;
1668			if (desc_status & RD_RFS2)
1669				ndev->stats.rx_frame_errors++;
1670			if (desc_status & RD_RFS3)
1671				ndev->stats.rx_length_errors++;
1672			if (desc_status & RD_RFS4)
1673				ndev->stats.rx_length_errors++;
1674			if (desc_status & RD_RFS6)
1675				ndev->stats.rx_missed_errors++;
1676			if (desc_status & RD_RFS10)
1677				ndev->stats.rx_over_errors++;
1678		} else	if (skb) {
1679			dma_addr = le32_to_cpu(rxdesc->addr);
1680			if (!mdp->cd->hw_swap)
1681				sh_eth_soft_swap(
1682					phys_to_virt(ALIGN(dma_addr, 4)),
1683					pkt_len + 2);
1684			mdp->rx_skbuff[entry] = NULL;
1685			if (mdp->cd->rpadir)
1686				skb_reserve(skb, NET_IP_ALIGN);
1687			dma_unmap_single(&mdp->pdev->dev, dma_addr,
1688					 ALIGN(mdp->rx_buf_sz, 32),
1689					 DMA_FROM_DEVICE);
1690			skb_put(skb, pkt_len);
1691			skb->protocol = eth_type_trans(skb, ndev);
1692			if (ndev->features & NETIF_F_RXCSUM)
1693				sh_eth_rx_csum(skb);
1694			netif_receive_skb(skb);
1695			ndev->stats.rx_packets++;
1696			ndev->stats.rx_bytes += pkt_len;
1697			if (desc_status & RD_RFS8)
1698				ndev->stats.multicast++;
1699		}
1700		entry = (++mdp->cur_rx) % mdp->num_rx_ring;
1701		rxdesc = &mdp->rx_ring[entry];
1702	}
1703
1704	/* Refill the Rx ring buffers. */
1705	for (; mdp->cur_rx - mdp->dirty_rx > 0; mdp->dirty_rx++) {
1706		entry = mdp->dirty_rx % mdp->num_rx_ring;
1707		rxdesc = &mdp->rx_ring[entry];
1708		/* The size of the buffer is 32 byte boundary. */
1709		buf_len = ALIGN(mdp->rx_buf_sz, 32);
1710		rxdesc->len = cpu_to_le32(buf_len << 16);
1711
1712		if (mdp->rx_skbuff[entry] == NULL) {
1713			skb = netdev_alloc_skb(ndev, skbuff_size);
1714			if (skb == NULL)
1715				break;	/* Better luck next round. */
1716			sh_eth_set_receive_align(skb);
1717			dma_addr = dma_map_single(&mdp->pdev->dev, skb->data,
1718						  buf_len, DMA_FROM_DEVICE);
1719			if (dma_mapping_error(&mdp->pdev->dev, dma_addr)) {
1720				kfree_skb(skb);
1721				break;
1722			}
1723			mdp->rx_skbuff[entry] = skb;
1724
1725			skb_checksum_none_assert(skb);
1726			rxdesc->addr = cpu_to_le32(dma_addr);
1727		}
1728		dma_wmb(); /* RACT bit must be set after all the above writes */
1729		if (entry >= mdp->num_rx_ring - 1)
1730			rxdesc->status |=
1731				cpu_to_le32(RD_RACT | RD_RFP | RD_RDLE);
1732		else
1733			rxdesc->status |= cpu_to_le32(RD_RACT | RD_RFP);
1734	}
1735
1736	/* Restart Rx engine if stopped. */
1737	/* If we don't need to check status, don't. -KDU */
1738	if (!(sh_eth_read(ndev, EDRRR) & EDRRR_R)) {
1739		/* fix the values for the next receiving if RDE is set */
1740		if (intr_status & EESR_RDE && !mdp->cd->no_xdfar) {
1741			u32 count = (sh_eth_read(ndev, RDFAR) -
1742				     sh_eth_read(ndev, RDLAR)) >> 4;
1743
1744			mdp->cur_rx = count;
1745			mdp->dirty_rx = count;
1746		}
1747		sh_eth_write(ndev, EDRRR_R, EDRRR);
1748	}
1749
1750	*quota -= limit - boguscnt - 1;
1751
1752	return *quota <= 0;
1753}
1754
1755static void sh_eth_rcv_snd_disable(struct net_device *ndev)
1756{
1757	/* disable tx and rx */
1758	sh_eth_modify(ndev, ECMR, ECMR_RE | ECMR_TE, 0);
1759}
1760
1761static void sh_eth_rcv_snd_enable(struct net_device *ndev)
1762{
1763	/* enable tx and rx */
1764	sh_eth_modify(ndev, ECMR, ECMR_RE | ECMR_TE, ECMR_RE | ECMR_TE);
1765}
1766
1767/* E-MAC interrupt handler */
1768static void sh_eth_emac_interrupt(struct net_device *ndev)
1769{
1770	struct sh_eth_private *mdp = netdev_priv(ndev);
1771	u32 felic_stat;
1772	u32 link_stat;
1773
1774	felic_stat = sh_eth_read(ndev, ECSR) & sh_eth_read(ndev, ECSIPR);
1775	sh_eth_write(ndev, felic_stat, ECSR);	/* clear int */
1776	if (felic_stat & ECSR_ICD)
1777		ndev->stats.tx_carrier_errors++;
1778	if (felic_stat & ECSR_MPD)
1779		pm_wakeup_event(&mdp->pdev->dev, 0);
1780	if (felic_stat & ECSR_LCHNG) {
1781		/* Link Changed */
1782		if (mdp->cd->no_psr || mdp->no_ether_link)
1783			return;
1784		link_stat = sh_eth_read(ndev, PSR);
1785		if (mdp->ether_link_active_low)
1786			link_stat = ~link_stat;
1787		if (!(link_stat & PHY_ST_LINK)) {
1788			sh_eth_rcv_snd_disable(ndev);
1789		} else {
1790			/* Link Up */
1791			sh_eth_modify(ndev, EESIPR, EESIPR_ECIIP, 0);
1792			/* clear int */
1793			sh_eth_modify(ndev, ECSR, 0, 0);
1794			sh_eth_modify(ndev, EESIPR, EESIPR_ECIIP, EESIPR_ECIIP);
1795			/* enable tx and rx */
1796			sh_eth_rcv_snd_enable(ndev);
1797		}
1798	}
1799}
1800
1801/* error control function */
1802static void sh_eth_error(struct net_device *ndev, u32 intr_status)
1803{
1804	struct sh_eth_private *mdp = netdev_priv(ndev);
1805	u32 mask;
1806
1807	if (intr_status & EESR_TWB) {
1808		/* Unused write back interrupt */
1809		if (intr_status & EESR_TABT) {	/* Transmit Abort int */
1810			ndev->stats.tx_aborted_errors++;
1811			netif_err(mdp, tx_err, ndev, "Transmit Abort\n");
1812		}
1813	}
1814
1815	if (intr_status & EESR_RABT) {
1816		/* Receive Abort int */
1817		if (intr_status & EESR_RFRMER) {
1818			/* Receive Frame Overflow int */
1819			ndev->stats.rx_frame_errors++;
1820		}
1821	}
1822
1823	if (intr_status & EESR_TDE) {
1824		/* Transmit Descriptor Empty int */
1825		ndev->stats.tx_fifo_errors++;
1826		netif_err(mdp, tx_err, ndev, "Transmit Descriptor Empty\n");
1827	}
1828
1829	if (intr_status & EESR_TFE) {
1830		/* FIFO under flow */
1831		ndev->stats.tx_fifo_errors++;
1832		netif_err(mdp, tx_err, ndev, "Transmit FIFO Under flow\n");
1833	}
1834
1835	if (intr_status & EESR_RDE) {
1836		/* Receive Descriptor Empty int */
1837		ndev->stats.rx_over_errors++;
1838	}
1839
1840	if (intr_status & EESR_RFE) {
1841		/* Receive FIFO Overflow int */
1842		ndev->stats.rx_fifo_errors++;
1843	}
1844
1845	if (!mdp->cd->no_ade && (intr_status & EESR_ADE)) {
1846		/* Address Error */
1847		ndev->stats.tx_fifo_errors++;
1848		netif_err(mdp, tx_err, ndev, "Address Error\n");
1849	}
1850
1851	mask = EESR_TWB | EESR_TABT | EESR_ADE | EESR_TDE | EESR_TFE;
1852	if (mdp->cd->no_ade)
1853		mask &= ~EESR_ADE;
1854	if (intr_status & mask) {
1855		/* Tx error */
1856		u32 edtrr = sh_eth_read(ndev, EDTRR);
1857
1858		/* dmesg */
1859		netdev_err(ndev, "TX error. status=%8.8x cur_tx=%8.8x dirty_tx=%8.8x state=%8.8x EDTRR=%8.8x.\n",
1860			   intr_status, mdp->cur_tx, mdp->dirty_tx,
1861			   (u32)ndev->state, edtrr);
1862		/* dirty buffer free */
1863		sh_eth_tx_free(ndev, true);
1864
1865		/* SH7712 BUG */
1866		if (edtrr ^ mdp->cd->edtrr_trns) {
1867			/* tx dma start */
1868			sh_eth_write(ndev, mdp->cd->edtrr_trns, EDTRR);
1869		}
1870		/* wakeup */
1871		netif_wake_queue(ndev);
1872	}
1873}
1874
1875static irqreturn_t sh_eth_interrupt(int irq, void *netdev)
1876{
1877	struct net_device *ndev = netdev;
1878	struct sh_eth_private *mdp = netdev_priv(ndev);
1879	struct sh_eth_cpu_data *cd = mdp->cd;
1880	irqreturn_t ret = IRQ_NONE;
1881	u32 intr_status, intr_enable;
1882
1883	spin_lock(&mdp->lock);
1884
1885	/* Get interrupt status */
1886	intr_status = sh_eth_read(ndev, EESR);
1887	/* Mask it with the interrupt mask, forcing ECI interrupt  to be always
1888	 * enabled since it's the one that  comes  thru regardless of the mask,
1889	 * and  we need to fully handle it  in sh_eth_emac_interrupt() in order
1890	 * to quench it as it doesn't get cleared by just writing 1 to the  ECI
1891	 * bit...
1892	 */
1893	intr_enable = sh_eth_read(ndev, EESIPR);
1894	intr_status &= intr_enable | EESIPR_ECIIP;
1895	if (intr_status & (EESR_RX_CHECK | cd->tx_check | EESR_ECI |
1896			   cd->eesr_err_check))
1897		ret = IRQ_HANDLED;
1898	else
1899		goto out;
1900
1901	if (unlikely(!mdp->irq_enabled)) {
1902		sh_eth_write(ndev, 0, EESIPR);
1903		goto out;
1904	}
1905
1906	if (intr_status & EESR_RX_CHECK) {
1907		if (napi_schedule_prep(&mdp->napi)) {
1908			/* Mask Rx interrupts */
1909			sh_eth_write(ndev, intr_enable & ~EESR_RX_CHECK,
1910				     EESIPR);
1911			__napi_schedule(&mdp->napi);
1912		} else {
1913			netdev_warn(ndev,
1914				    "ignoring interrupt, status 0x%08x, mask 0x%08x.\n",
1915				    intr_status, intr_enable);
1916		}
1917	}
1918
1919	/* Tx Check */
1920	if (intr_status & cd->tx_check) {
1921		/* Clear Tx interrupts */
1922		sh_eth_write(ndev, intr_status & cd->tx_check, EESR);
1923
1924		sh_eth_tx_free(ndev, true);
1925		netif_wake_queue(ndev);
1926	}
1927
1928	/* E-MAC interrupt */
1929	if (intr_status & EESR_ECI)
1930		sh_eth_emac_interrupt(ndev);
1931
1932	if (intr_status & cd->eesr_err_check) {
1933		/* Clear error interrupts */
1934		sh_eth_write(ndev, intr_status & cd->eesr_err_check, EESR);
1935
1936		sh_eth_error(ndev, intr_status);
1937	}
1938
1939out:
1940	spin_unlock(&mdp->lock);
1941
1942	return ret;
1943}
1944
1945static int sh_eth_poll(struct napi_struct *napi, int budget)
1946{
1947	struct sh_eth_private *mdp = container_of(napi, struct sh_eth_private,
1948						  napi);
1949	struct net_device *ndev = napi->dev;
1950	int quota = budget;
1951	u32 intr_status;
1952
1953	for (;;) {
1954		intr_status = sh_eth_read(ndev, EESR);
1955		if (!(intr_status & EESR_RX_CHECK))
1956			break;
1957		/* Clear Rx interrupts */
1958		sh_eth_write(ndev, intr_status & EESR_RX_CHECK, EESR);
1959
1960		if (sh_eth_rx(ndev, intr_status, &quota))
1961			goto out;
1962	}
1963
1964	napi_complete(napi);
1965
1966	/* Reenable Rx interrupts */
1967	if (mdp->irq_enabled)
1968		sh_eth_write(ndev, mdp->cd->eesipr_value, EESIPR);
1969out:
1970	return budget - quota;
1971}
1972
1973/* PHY state control function */
1974static void sh_eth_adjust_link(struct net_device *ndev)
1975{
1976	struct sh_eth_private *mdp = netdev_priv(ndev);
1977	struct phy_device *phydev = ndev->phydev;
1978	unsigned long flags;
1979	int new_state = 0;
1980
1981	spin_lock_irqsave(&mdp->lock, flags);
1982
1983	/* Disable TX and RX right over here, if E-MAC change is ignored */
1984	if (mdp->cd->no_psr || mdp->no_ether_link)
1985		sh_eth_rcv_snd_disable(ndev);
1986
1987	if (phydev->link) {
1988		if (phydev->duplex != mdp->duplex) {
1989			new_state = 1;
1990			mdp->duplex = phydev->duplex;
1991			if (mdp->cd->set_duplex)
1992				mdp->cd->set_duplex(ndev);
1993		}
1994
1995		if (phydev->speed != mdp->speed) {
1996			new_state = 1;
1997			mdp->speed = phydev->speed;
1998			if (mdp->cd->set_rate)
1999				mdp->cd->set_rate(ndev);
2000		}
2001		if (!mdp->link) {
2002			sh_eth_modify(ndev, ECMR, ECMR_TXF, 0);
2003			new_state = 1;
2004			mdp->link = phydev->link;
2005		}
2006	} else if (mdp->link) {
2007		new_state = 1;
2008		mdp->link = 0;
2009		mdp->speed = 0;
2010		mdp->duplex = -1;
2011	}
2012
2013	/* Enable TX and RX right over here, if E-MAC change is ignored */
2014	if ((mdp->cd->no_psr || mdp->no_ether_link) && phydev->link)
2015		sh_eth_rcv_snd_enable(ndev);
2016
2017	spin_unlock_irqrestore(&mdp->lock, flags);
2018
2019	if (new_state && netif_msg_link(mdp))
2020		phy_print_status(phydev);
2021}
2022
2023/* PHY init function */
2024static int sh_eth_phy_init(struct net_device *ndev)
2025{
2026	struct device_node *np = ndev->dev.parent->of_node;
2027	struct sh_eth_private *mdp = netdev_priv(ndev);
2028	struct phy_device *phydev;
2029
2030	mdp->link = 0;
2031	mdp->speed = 0;
2032	mdp->duplex = -1;
2033
2034	/* Try connect to PHY */
2035	if (np) {
2036		struct device_node *pn;
2037
2038		pn = of_parse_phandle(np, "phy-handle", 0);
2039		phydev = of_phy_connect(ndev, pn,
2040					sh_eth_adjust_link, 0,
2041					mdp->phy_interface);
2042
2043		of_node_put(pn);
2044		if (!phydev)
2045			phydev = ERR_PTR(-ENOENT);
2046	} else {
2047		char phy_id[MII_BUS_ID_SIZE + 3];
2048
2049		snprintf(phy_id, sizeof(phy_id), PHY_ID_FMT,
2050			 mdp->mii_bus->id, mdp->phy_id);
2051
2052		phydev = phy_connect(ndev, phy_id, sh_eth_adjust_link,
2053				     mdp->phy_interface);
2054	}
2055
2056	if (IS_ERR(phydev)) {
2057		netdev_err(ndev, "failed to connect PHY\n");
2058		return PTR_ERR(phydev);
2059	}
2060
2061	/* mask with MAC supported features */
2062	if (mdp->cd->register_type != SH_ETH_REG_GIGABIT) {
2063		int err = phy_set_max_speed(phydev, SPEED_100);
2064		if (err) {
2065			netdev_err(ndev, "failed to limit PHY to 100 Mbit/s\n");
2066			phy_disconnect(phydev);
2067			return err;
2068		}
2069	}
2070
 
 
2071	phy_attached_info(phydev);
2072
2073	return 0;
2074}
2075
2076/* PHY control start function */
2077static int sh_eth_phy_start(struct net_device *ndev)
2078{
2079	int ret;
2080
2081	ret = sh_eth_phy_init(ndev);
2082	if (ret)
2083		return ret;
2084
2085	phy_start(ndev->phydev);
2086
2087	return 0;
2088}
2089
2090/* If it is ever necessary to increase SH_ETH_REG_DUMP_MAX_REGS, the
2091 * version must be bumped as well.  Just adding registers up to that
2092 * limit is fine, as long as the existing register indices don't
2093 * change.
2094 */
2095#define SH_ETH_REG_DUMP_VERSION		1
2096#define SH_ETH_REG_DUMP_MAX_REGS	256
2097
2098static size_t __sh_eth_get_regs(struct net_device *ndev, u32 *buf)
2099{
2100	struct sh_eth_private *mdp = netdev_priv(ndev);
2101	struct sh_eth_cpu_data *cd = mdp->cd;
2102	u32 *valid_map;
2103	size_t len;
2104
2105	BUILD_BUG_ON(SH_ETH_MAX_REGISTER_OFFSET > SH_ETH_REG_DUMP_MAX_REGS);
2106
2107	/* Dump starts with a bitmap that tells ethtool which
2108	 * registers are defined for this chip.
2109	 */
2110	len = DIV_ROUND_UP(SH_ETH_REG_DUMP_MAX_REGS, 32);
2111	if (buf) {
2112		valid_map = buf;
2113		buf += len;
2114	} else {
2115		valid_map = NULL;
2116	}
2117
2118	/* Add a register to the dump, if it has a defined offset.
2119	 * This automatically skips most undefined registers, but for
2120	 * some it is also necessary to check a capability flag in
2121	 * struct sh_eth_cpu_data.
2122	 */
2123#define mark_reg_valid(reg) valid_map[reg / 32] |= 1U << (reg % 32)
2124#define add_reg_from(reg, read_expr) do {				\
2125		if (mdp->reg_offset[reg] != SH_ETH_OFFSET_INVALID) {	\
2126			if (buf) {					\
2127				mark_reg_valid(reg);			\
2128				*buf++ = read_expr;			\
2129			}						\
2130			++len;						\
2131		}							\
2132	} while (0)
2133#define add_reg(reg) add_reg_from(reg, sh_eth_read(ndev, reg))
2134#define add_tsu_reg(reg) add_reg_from(reg, sh_eth_tsu_read(mdp, reg))
2135
2136	add_reg(EDSR);
2137	add_reg(EDMR);
2138	add_reg(EDTRR);
2139	add_reg(EDRRR);
2140	add_reg(EESR);
2141	add_reg(EESIPR);
2142	add_reg(TDLAR);
2143	add_reg(TDFAR);
 
2144	add_reg(TDFXR);
2145	add_reg(TDFFR);
2146	add_reg(RDLAR);
2147	add_reg(RDFAR);
 
2148	add_reg(RDFXR);
2149	add_reg(RDFFR);
2150	add_reg(TRSCER);
2151	add_reg(RMFCR);
2152	add_reg(TFTR);
2153	add_reg(FDR);
2154	add_reg(RMCR);
2155	add_reg(TFUCR);
2156	add_reg(RFOCR);
2157	if (cd->rmiimode)
2158		add_reg(RMIIMODE);
2159	add_reg(FCFTR);
2160	if (cd->rpadir)
2161		add_reg(RPADIR);
2162	if (!cd->no_trimd)
2163		add_reg(TRIMD);
2164	add_reg(ECMR);
2165	add_reg(ECSR);
2166	add_reg(ECSIPR);
2167	add_reg(PIR);
2168	if (!cd->no_psr)
2169		add_reg(PSR);
2170	add_reg(RDMLR);
2171	add_reg(RFLR);
2172	add_reg(IPGR);
2173	if (cd->apr)
2174		add_reg(APR);
2175	if (cd->mpr)
2176		add_reg(MPR);
2177	add_reg(RFCR);
2178	add_reg(RFCF);
2179	if (cd->tpauser)
2180		add_reg(TPAUSER);
2181	add_reg(TPAUSECR);
2182	add_reg(GECMR);
 
2183	if (cd->bculr)
2184		add_reg(BCULR);
2185	add_reg(MAHR);
2186	add_reg(MALR);
2187	add_reg(TROCR);
2188	add_reg(CDCR);
2189	add_reg(LCCR);
2190	add_reg(CNDCR);
 
 
2191	add_reg(CEFCR);
2192	add_reg(FRECR);
2193	add_reg(TSFRCR);
2194	add_reg(TLFRCR);
2195	add_reg(CERCR);
2196	add_reg(CEECR);
 
 
2197	add_reg(MAFCR);
2198	if (cd->rtrate)
2199		add_reg(RTRATE);
2200	if (cd->csmr)
2201		add_reg(CSMR);
2202	if (cd->select_mii)
2203		add_reg(RMII_MII);
2204	if (cd->tsu) {
2205		add_tsu_reg(ARSTR);
2206		add_tsu_reg(TSU_CTRST);
2207		add_tsu_reg(TSU_FWEN0);
2208		add_tsu_reg(TSU_FWEN1);
2209		add_tsu_reg(TSU_FCM);
2210		add_tsu_reg(TSU_BSYSL0);
2211		add_tsu_reg(TSU_BSYSL1);
2212		add_tsu_reg(TSU_PRISL0);
2213		add_tsu_reg(TSU_PRISL1);
2214		add_tsu_reg(TSU_FWSL0);
2215		add_tsu_reg(TSU_FWSL1);
 
 
2216		add_tsu_reg(TSU_FWSLC);
2217		add_tsu_reg(TSU_QTAGM0);
2218		add_tsu_reg(TSU_QTAGM1);
2219		add_tsu_reg(TSU_FWSR);
2220		add_tsu_reg(TSU_FWINMK);
2221		add_tsu_reg(TSU_ADQT0);
2222		add_tsu_reg(TSU_ADQT1);
2223		add_tsu_reg(TSU_VTAG0);
2224		add_tsu_reg(TSU_VTAG1);
 
 
2225		add_tsu_reg(TSU_ADSBSY);
2226		add_tsu_reg(TSU_TEN);
2227		add_tsu_reg(TSU_POST1);
2228		add_tsu_reg(TSU_POST2);
2229		add_tsu_reg(TSU_POST3);
2230		add_tsu_reg(TSU_POST4);
2231		/* This is the start of a table, not just a single register. */
2232		if (buf) {
2233			unsigned int i;
2234
2235			mark_reg_valid(TSU_ADRH0);
2236			for (i = 0; i < SH_ETH_TSU_CAM_ENTRIES * 2; i++)
2237				*buf++ = ioread32(mdp->tsu_addr +
2238						  mdp->reg_offset[TSU_ADRH0] +
2239						  i * 4);
2240		}
2241		len += SH_ETH_TSU_CAM_ENTRIES * 2;
2242	}
2243
2244#undef mark_reg_valid
2245#undef add_reg_from
2246#undef add_reg
2247#undef add_tsu_reg
2248
2249	return len * 4;
2250}
2251
2252static int sh_eth_get_regs_len(struct net_device *ndev)
2253{
2254	return __sh_eth_get_regs(ndev, NULL);
2255}
2256
2257static void sh_eth_get_regs(struct net_device *ndev, struct ethtool_regs *regs,
2258			    void *buf)
2259{
2260	struct sh_eth_private *mdp = netdev_priv(ndev);
2261
2262	regs->version = SH_ETH_REG_DUMP_VERSION;
2263
2264	pm_runtime_get_sync(&mdp->pdev->dev);
2265	__sh_eth_get_regs(ndev, buf);
2266	pm_runtime_put_sync(&mdp->pdev->dev);
2267}
2268
2269static u32 sh_eth_get_msglevel(struct net_device *ndev)
2270{
2271	struct sh_eth_private *mdp = netdev_priv(ndev);
2272	return mdp->msg_enable;
2273}
2274
2275static void sh_eth_set_msglevel(struct net_device *ndev, u32 value)
2276{
2277	struct sh_eth_private *mdp = netdev_priv(ndev);
2278	mdp->msg_enable = value;
2279}
2280
2281static const char sh_eth_gstrings_stats[][ETH_GSTRING_LEN] = {
2282	"rx_current", "tx_current",
2283	"rx_dirty", "tx_dirty",
2284};
2285#define SH_ETH_STATS_LEN  ARRAY_SIZE(sh_eth_gstrings_stats)
2286
2287static int sh_eth_get_sset_count(struct net_device *netdev, int sset)
2288{
2289	switch (sset) {
2290	case ETH_SS_STATS:
2291		return SH_ETH_STATS_LEN;
2292	default:
2293		return -EOPNOTSUPP;
2294	}
2295}
2296
2297static void sh_eth_get_ethtool_stats(struct net_device *ndev,
2298				     struct ethtool_stats *stats, u64 *data)
2299{
2300	struct sh_eth_private *mdp = netdev_priv(ndev);
2301	int i = 0;
2302
2303	/* device-specific stats */
2304	data[i++] = mdp->cur_rx;
2305	data[i++] = mdp->cur_tx;
2306	data[i++] = mdp->dirty_rx;
2307	data[i++] = mdp->dirty_tx;
2308}
2309
2310static void sh_eth_get_strings(struct net_device *ndev, u32 stringset, u8 *data)
2311{
2312	switch (stringset) {
2313	case ETH_SS_STATS:
2314		memcpy(data, *sh_eth_gstrings_stats,
2315		       sizeof(sh_eth_gstrings_stats));
2316		break;
2317	}
2318}
2319
2320static void sh_eth_get_ringparam(struct net_device *ndev,
2321				 struct ethtool_ringparam *ring)
 
 
2322{
2323	struct sh_eth_private *mdp = netdev_priv(ndev);
2324
2325	ring->rx_max_pending = RX_RING_MAX;
2326	ring->tx_max_pending = TX_RING_MAX;
2327	ring->rx_pending = mdp->num_rx_ring;
2328	ring->tx_pending = mdp->num_tx_ring;
2329}
2330
2331static int sh_eth_set_ringparam(struct net_device *ndev,
2332				struct ethtool_ringparam *ring)
 
 
2333{
2334	struct sh_eth_private *mdp = netdev_priv(ndev);
2335	int ret;
2336
2337	if (ring->tx_pending > TX_RING_MAX ||
2338	    ring->rx_pending > RX_RING_MAX ||
2339	    ring->tx_pending < TX_RING_MIN ||
2340	    ring->rx_pending < RX_RING_MIN)
2341		return -EINVAL;
2342	if (ring->rx_mini_pending || ring->rx_jumbo_pending)
2343		return -EINVAL;
2344
2345	if (netif_running(ndev)) {
2346		netif_device_detach(ndev);
2347		netif_tx_disable(ndev);
2348
2349		/* Serialise with the interrupt handler and NAPI, then
2350		 * disable interrupts.  We have to clear the
2351		 * irq_enabled flag first to ensure that interrupts
2352		 * won't be re-enabled.
2353		 */
2354		mdp->irq_enabled = false;
2355		synchronize_irq(ndev->irq);
2356		napi_synchronize(&mdp->napi);
2357		sh_eth_write(ndev, 0x0000, EESIPR);
2358
2359		sh_eth_dev_exit(ndev);
2360
2361		/* Free all the skbuffs in the Rx queue and the DMA buffers. */
2362		sh_eth_ring_free(ndev);
2363	}
2364
2365	/* Set new parameters */
2366	mdp->num_rx_ring = ring->rx_pending;
2367	mdp->num_tx_ring = ring->tx_pending;
2368
2369	if (netif_running(ndev)) {
2370		ret = sh_eth_ring_init(ndev);
2371		if (ret < 0) {
2372			netdev_err(ndev, "%s: sh_eth_ring_init failed.\n",
2373				   __func__);
2374			return ret;
2375		}
2376		ret = sh_eth_dev_init(ndev);
2377		if (ret < 0) {
2378			netdev_err(ndev, "%s: sh_eth_dev_init failed.\n",
2379				   __func__);
2380			return ret;
2381		}
2382
2383		netif_device_attach(ndev);
2384	}
2385
2386	return 0;
2387}
2388
2389static void sh_eth_get_wol(struct net_device *ndev, struct ethtool_wolinfo *wol)
2390{
2391	struct sh_eth_private *mdp = netdev_priv(ndev);
2392
2393	wol->supported = 0;
2394	wol->wolopts = 0;
2395
2396	if (mdp->cd->magic) {
2397		wol->supported = WAKE_MAGIC;
2398		wol->wolopts = mdp->wol_enabled ? WAKE_MAGIC : 0;
2399	}
2400}
2401
2402static int sh_eth_set_wol(struct net_device *ndev, struct ethtool_wolinfo *wol)
2403{
2404	struct sh_eth_private *mdp = netdev_priv(ndev);
2405
2406	if (!mdp->cd->magic || wol->wolopts & ~WAKE_MAGIC)
2407		return -EOPNOTSUPP;
2408
2409	mdp->wol_enabled = !!(wol->wolopts & WAKE_MAGIC);
2410
2411	device_set_wakeup_enable(&mdp->pdev->dev, mdp->wol_enabled);
2412
2413	return 0;
2414}
2415
2416static const struct ethtool_ops sh_eth_ethtool_ops = {
2417	.get_regs_len	= sh_eth_get_regs_len,
2418	.get_regs	= sh_eth_get_regs,
2419	.nway_reset	= phy_ethtool_nway_reset,
2420	.get_msglevel	= sh_eth_get_msglevel,
2421	.set_msglevel	= sh_eth_set_msglevel,
2422	.get_link	= ethtool_op_get_link,
2423	.get_strings	= sh_eth_get_strings,
2424	.get_ethtool_stats  = sh_eth_get_ethtool_stats,
2425	.get_sset_count     = sh_eth_get_sset_count,
2426	.get_ringparam	= sh_eth_get_ringparam,
2427	.set_ringparam	= sh_eth_set_ringparam,
2428	.get_link_ksettings = phy_ethtool_get_link_ksettings,
2429	.set_link_ksettings = phy_ethtool_set_link_ksettings,
2430	.get_wol	= sh_eth_get_wol,
2431	.set_wol	= sh_eth_set_wol,
2432};
2433
2434/* network device open function */
2435static int sh_eth_open(struct net_device *ndev)
2436{
2437	struct sh_eth_private *mdp = netdev_priv(ndev);
2438	int ret;
2439
2440	pm_runtime_get_sync(&mdp->pdev->dev);
2441
2442	napi_enable(&mdp->napi);
2443
2444	ret = request_irq(ndev->irq, sh_eth_interrupt,
2445			  mdp->cd->irq_flags, ndev->name, ndev);
2446	if (ret) {
2447		netdev_err(ndev, "Can not assign IRQ number\n");
2448		goto out_napi_off;
2449	}
2450
2451	/* Descriptor set */
2452	ret = sh_eth_ring_init(ndev);
2453	if (ret)
2454		goto out_free_irq;
2455
2456	/* device init */
2457	ret = sh_eth_dev_init(ndev);
2458	if (ret)
2459		goto out_free_irq;
2460
2461	/* PHY control start*/
2462	ret = sh_eth_phy_start(ndev);
2463	if (ret)
2464		goto out_free_irq;
2465
2466	netif_start_queue(ndev);
2467
2468	mdp->is_opened = 1;
2469
2470	return ret;
2471
2472out_free_irq:
2473	free_irq(ndev->irq, ndev);
2474out_napi_off:
2475	napi_disable(&mdp->napi);
2476	pm_runtime_put_sync(&mdp->pdev->dev);
2477	return ret;
2478}
2479
2480/* Timeout function */
2481static void sh_eth_tx_timeout(struct net_device *ndev)
2482{
2483	struct sh_eth_private *mdp = netdev_priv(ndev);
2484	struct sh_eth_rxdesc *rxdesc;
2485	int i;
2486
2487	netif_stop_queue(ndev);
2488
2489	netif_err(mdp, timer, ndev,
2490		  "transmit timed out, status %8.8x, resetting...\n",
2491		  sh_eth_read(ndev, EESR));
2492
2493	/* tx_errors count up */
2494	ndev->stats.tx_errors++;
2495
2496	/* Free all the skbuffs in the Rx queue. */
2497	for (i = 0; i < mdp->num_rx_ring; i++) {
2498		rxdesc = &mdp->rx_ring[i];
2499		rxdesc->status = cpu_to_le32(0);
2500		rxdesc->addr = cpu_to_le32(0xBADF00D0);
2501		dev_kfree_skb(mdp->rx_skbuff[i]);
2502		mdp->rx_skbuff[i] = NULL;
2503	}
2504	for (i = 0; i < mdp->num_tx_ring; i++) {
2505		dev_kfree_skb(mdp->tx_skbuff[i]);
2506		mdp->tx_skbuff[i] = NULL;
2507	}
2508
2509	/* device init */
2510	sh_eth_dev_init(ndev);
2511
2512	netif_start_queue(ndev);
2513}
2514
2515/* Packet transmit function */
2516static int sh_eth_start_xmit(struct sk_buff *skb, struct net_device *ndev)
 
2517{
2518	struct sh_eth_private *mdp = netdev_priv(ndev);
2519	struct sh_eth_txdesc *txdesc;
2520	dma_addr_t dma_addr;
2521	u32 entry;
2522	unsigned long flags;
2523
2524	spin_lock_irqsave(&mdp->lock, flags);
2525	if ((mdp->cur_tx - mdp->dirty_tx) >= (mdp->num_tx_ring - 4)) {
2526		if (!sh_eth_tx_free(ndev, true)) {
2527			netif_warn(mdp, tx_queued, ndev, "TxFD exhausted.\n");
2528			netif_stop_queue(ndev);
2529			spin_unlock_irqrestore(&mdp->lock, flags);
2530			return NETDEV_TX_BUSY;
2531		}
2532	}
2533	spin_unlock_irqrestore(&mdp->lock, flags);
2534
2535	if (skb_put_padto(skb, ETH_ZLEN))
2536		return NETDEV_TX_OK;
2537
2538	entry = mdp->cur_tx % mdp->num_tx_ring;
2539	mdp->tx_skbuff[entry] = skb;
2540	txdesc = &mdp->tx_ring[entry];
2541	/* soft swap. */
2542	if (!mdp->cd->hw_swap)
2543		sh_eth_soft_swap(PTR_ALIGN(skb->data, 4), skb->len + 2);
2544	dma_addr = dma_map_single(&mdp->pdev->dev, skb->data, skb->len,
2545				  DMA_TO_DEVICE);
2546	if (dma_mapping_error(&mdp->pdev->dev, dma_addr)) {
2547		kfree_skb(skb);
2548		return NETDEV_TX_OK;
2549	}
2550	txdesc->addr = cpu_to_le32(dma_addr);
2551	txdesc->len  = cpu_to_le32(skb->len << 16);
2552
2553	dma_wmb(); /* TACT bit must be set after all the above writes */
2554	if (entry >= mdp->num_tx_ring - 1)
2555		txdesc->status |= cpu_to_le32(TD_TACT | TD_TDLE);
2556	else
2557		txdesc->status |= cpu_to_le32(TD_TACT);
2558
 
2559	mdp->cur_tx++;
2560
2561	if (!(sh_eth_read(ndev, EDTRR) & mdp->cd->edtrr_trns))
2562		sh_eth_write(ndev, mdp->cd->edtrr_trns, EDTRR);
2563
2564	return NETDEV_TX_OK;
2565}
2566
2567/* The statistics registers have write-clear behaviour, which means we
2568 * will lose any increment between the read and write.  We mitigate
2569 * this by only clearing when we read a non-zero value, so we will
2570 * never falsely report a total of zero.
2571 */
2572static void
2573sh_eth_update_stat(struct net_device *ndev, unsigned long *stat, int reg)
2574{
2575	u32 delta = sh_eth_read(ndev, reg);
2576
2577	if (delta) {
2578		*stat += delta;
2579		sh_eth_write(ndev, 0, reg);
2580	}
2581}
2582
2583static struct net_device_stats *sh_eth_get_stats(struct net_device *ndev)
2584{
2585	struct sh_eth_private *mdp = netdev_priv(ndev);
2586
2587	if (mdp->cd->no_tx_cntrs)
2588		return &ndev->stats;
2589
2590	if (!mdp->is_opened)
2591		return &ndev->stats;
2592
2593	sh_eth_update_stat(ndev, &ndev->stats.tx_dropped, TROCR);
2594	sh_eth_update_stat(ndev, &ndev->stats.collisions, CDCR);
2595	sh_eth_update_stat(ndev, &ndev->stats.tx_carrier_errors, LCCR);
2596
2597	if (mdp->cd->cexcr) {
2598		sh_eth_update_stat(ndev, &ndev->stats.tx_carrier_errors,
2599				   CERCR);
2600		sh_eth_update_stat(ndev, &ndev->stats.tx_carrier_errors,
2601				   CEECR);
2602	} else {
2603		sh_eth_update_stat(ndev, &ndev->stats.tx_carrier_errors,
2604				   CNDCR);
2605	}
2606
2607	return &ndev->stats;
2608}
2609
2610/* device close function */
2611static int sh_eth_close(struct net_device *ndev)
2612{
2613	struct sh_eth_private *mdp = netdev_priv(ndev);
2614
2615	netif_stop_queue(ndev);
2616
2617	/* Serialise with the interrupt handler and NAPI, then disable
2618	 * interrupts.  We have to clear the irq_enabled flag first to
2619	 * ensure that interrupts won't be re-enabled.
2620	 */
2621	mdp->irq_enabled = false;
2622	synchronize_irq(ndev->irq);
2623	napi_disable(&mdp->napi);
2624	sh_eth_write(ndev, 0x0000, EESIPR);
2625
2626	sh_eth_dev_exit(ndev);
2627
2628	/* PHY Disconnect */
2629	if (ndev->phydev) {
2630		phy_stop(ndev->phydev);
2631		phy_disconnect(ndev->phydev);
2632	}
2633
2634	free_irq(ndev->irq, ndev);
2635
2636	/* Free all the skbuffs in the Rx queue and the DMA buffer. */
2637	sh_eth_ring_free(ndev);
2638
2639	pm_runtime_put_sync(&mdp->pdev->dev);
2640
2641	mdp->is_opened = 0;
2642
2643	return 0;
2644}
2645
2646/* ioctl to device function */
2647static int sh_eth_do_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd)
2648{
2649	struct phy_device *phydev = ndev->phydev;
2650
2651	if (!netif_running(ndev))
2652		return -EINVAL;
2653
2654	if (!phydev)
2655		return -ENODEV;
2656
2657	return phy_mii_ioctl(phydev, rq, cmd);
2658}
2659
2660static int sh_eth_change_mtu(struct net_device *ndev, int new_mtu)
2661{
2662	if (netif_running(ndev))
2663		return -EBUSY;
2664
2665	ndev->mtu = new_mtu;
2666	netdev_update_features(ndev);
2667
2668	return 0;
2669}
2670
2671/* For TSU_POSTn. Please refer to the manual about this (strange) bitfields */
2672static u32 sh_eth_tsu_get_post_mask(int entry)
2673{
2674	return 0x0f << (28 - ((entry % 8) * 4));
2675}
2676
2677static u32 sh_eth_tsu_get_post_bit(struct sh_eth_private *mdp, int entry)
2678{
2679	return (0x08 >> (mdp->port << 1)) << (28 - ((entry % 8) * 4));
2680}
2681
2682static void sh_eth_tsu_enable_cam_entry_post(struct net_device *ndev,
2683					     int entry)
2684{
2685	struct sh_eth_private *mdp = netdev_priv(ndev);
2686	int reg = TSU_POST1 + entry / 8;
2687	u32 tmp;
2688
2689	tmp = sh_eth_tsu_read(mdp, reg);
2690	sh_eth_tsu_write(mdp, tmp | sh_eth_tsu_get_post_bit(mdp, entry), reg);
2691}
2692
2693static bool sh_eth_tsu_disable_cam_entry_post(struct net_device *ndev,
2694					      int entry)
2695{
2696	struct sh_eth_private *mdp = netdev_priv(ndev);
2697	int reg = TSU_POST1 + entry / 8;
2698	u32 post_mask, ref_mask, tmp;
2699
2700	post_mask = sh_eth_tsu_get_post_mask(entry);
2701	ref_mask = sh_eth_tsu_get_post_bit(mdp, entry) & ~post_mask;
2702
2703	tmp = sh_eth_tsu_read(mdp, reg);
2704	sh_eth_tsu_write(mdp, tmp & ~post_mask, reg);
2705
2706	/* If other port enables, the function returns "true" */
2707	return tmp & ref_mask;
2708}
2709
2710static int sh_eth_tsu_busy(struct net_device *ndev)
2711{
2712	int timeout = SH_ETH_TSU_TIMEOUT_MS * 100;
2713	struct sh_eth_private *mdp = netdev_priv(ndev);
2714
2715	while ((sh_eth_tsu_read(mdp, TSU_ADSBSY) & TSU_ADSBSY_0)) {
2716		udelay(10);
2717		timeout--;
2718		if (timeout <= 0) {
2719			netdev_err(ndev, "%s: timeout\n", __func__);
2720			return -ETIMEDOUT;
2721		}
2722	}
2723
2724	return 0;
2725}
2726
2727static int sh_eth_tsu_write_entry(struct net_device *ndev, u16 offset,
2728				  const u8 *addr)
2729{
2730	struct sh_eth_private *mdp = netdev_priv(ndev);
2731	u32 val;
2732
2733	val = addr[0] << 24 | addr[1] << 16 | addr[2] << 8 | addr[3];
2734	iowrite32(val, mdp->tsu_addr + offset);
2735	if (sh_eth_tsu_busy(ndev) < 0)
2736		return -EBUSY;
2737
2738	val = addr[4] << 8 | addr[5];
2739	iowrite32(val, mdp->tsu_addr + offset + 4);
2740	if (sh_eth_tsu_busy(ndev) < 0)
2741		return -EBUSY;
2742
2743	return 0;
2744}
2745
2746static void sh_eth_tsu_read_entry(struct net_device *ndev, u16 offset, u8 *addr)
2747{
2748	struct sh_eth_private *mdp = netdev_priv(ndev);
2749	u32 val;
2750
2751	val = ioread32(mdp->tsu_addr + offset);
2752	addr[0] = (val >> 24) & 0xff;
2753	addr[1] = (val >> 16) & 0xff;
2754	addr[2] = (val >> 8) & 0xff;
2755	addr[3] = val & 0xff;
2756	val = ioread32(mdp->tsu_addr + offset + 4);
2757	addr[4] = (val >> 8) & 0xff;
2758	addr[5] = val & 0xff;
2759}
2760
2761
2762static int sh_eth_tsu_find_entry(struct net_device *ndev, const u8 *addr)
2763{
2764	struct sh_eth_private *mdp = netdev_priv(ndev);
2765	u16 reg_offset = sh_eth_tsu_get_offset(mdp, TSU_ADRH0);
2766	int i;
2767	u8 c_addr[ETH_ALEN];
2768
2769	for (i = 0; i < SH_ETH_TSU_CAM_ENTRIES; i++, reg_offset += 8) {
2770		sh_eth_tsu_read_entry(ndev, reg_offset, c_addr);
2771		if (ether_addr_equal(addr, c_addr))
2772			return i;
2773	}
2774
2775	return -ENOENT;
2776}
2777
2778static int sh_eth_tsu_find_empty(struct net_device *ndev)
2779{
2780	u8 blank[ETH_ALEN];
2781	int entry;
2782
2783	memset(blank, 0, sizeof(blank));
2784	entry = sh_eth_tsu_find_entry(ndev, blank);
2785	return (entry < 0) ? -ENOMEM : entry;
2786}
2787
2788static int sh_eth_tsu_disable_cam_entry_table(struct net_device *ndev,
2789					      int entry)
2790{
2791	struct sh_eth_private *mdp = netdev_priv(ndev);
2792	u16 reg_offset = sh_eth_tsu_get_offset(mdp, TSU_ADRH0);
2793	int ret;
2794	u8 blank[ETH_ALEN];
2795
2796	sh_eth_tsu_write(mdp, sh_eth_tsu_read(mdp, TSU_TEN) &
2797			 ~(1 << (31 - entry)), TSU_TEN);
2798
2799	memset(blank, 0, sizeof(blank));
2800	ret = sh_eth_tsu_write_entry(ndev, reg_offset + entry * 8, blank);
2801	if (ret < 0)
2802		return ret;
2803	return 0;
2804}
2805
2806static int sh_eth_tsu_add_entry(struct net_device *ndev, const u8 *addr)
2807{
2808	struct sh_eth_private *mdp = netdev_priv(ndev);
2809	u16 reg_offset = sh_eth_tsu_get_offset(mdp, TSU_ADRH0);
2810	int i, ret;
2811
2812	if (!mdp->cd->tsu)
2813		return 0;
2814
2815	i = sh_eth_tsu_find_entry(ndev, addr);
2816	if (i < 0) {
2817		/* No entry found, create one */
2818		i = sh_eth_tsu_find_empty(ndev);
2819		if (i < 0)
2820			return -ENOMEM;
2821		ret = sh_eth_tsu_write_entry(ndev, reg_offset + i * 8, addr);
2822		if (ret < 0)
2823			return ret;
2824
2825		/* Enable the entry */
2826		sh_eth_tsu_write(mdp, sh_eth_tsu_read(mdp, TSU_TEN) |
2827				 (1 << (31 - i)), TSU_TEN);
2828	}
2829
2830	/* Entry found or created, enable POST */
2831	sh_eth_tsu_enable_cam_entry_post(ndev, i);
2832
2833	return 0;
2834}
2835
2836static int sh_eth_tsu_del_entry(struct net_device *ndev, const u8 *addr)
2837{
2838	struct sh_eth_private *mdp = netdev_priv(ndev);
2839	int i, ret;
2840
2841	if (!mdp->cd->tsu)
2842		return 0;
2843
2844	i = sh_eth_tsu_find_entry(ndev, addr);
2845	if (i) {
2846		/* Entry found */
2847		if (sh_eth_tsu_disable_cam_entry_post(ndev, i))
2848			goto done;
2849
2850		/* Disable the entry if both ports was disabled */
2851		ret = sh_eth_tsu_disable_cam_entry_table(ndev, i);
2852		if (ret < 0)
2853			return ret;
2854	}
2855done:
2856	return 0;
2857}
2858
2859static int sh_eth_tsu_purge_all(struct net_device *ndev)
2860{
2861	struct sh_eth_private *mdp = netdev_priv(ndev);
2862	int i, ret;
2863
2864	if (!mdp->cd->tsu)
2865		return 0;
2866
2867	for (i = 0; i < SH_ETH_TSU_CAM_ENTRIES; i++) {
2868		if (sh_eth_tsu_disable_cam_entry_post(ndev, i))
2869			continue;
2870
2871		/* Disable the entry if both ports was disabled */
2872		ret = sh_eth_tsu_disable_cam_entry_table(ndev, i);
2873		if (ret < 0)
2874			return ret;
2875	}
2876
2877	return 0;
2878}
2879
2880static void sh_eth_tsu_purge_mcast(struct net_device *ndev)
2881{
2882	struct sh_eth_private *mdp = netdev_priv(ndev);
2883	u16 reg_offset = sh_eth_tsu_get_offset(mdp, TSU_ADRH0);
2884	u8 addr[ETH_ALEN];
2885	int i;
2886
2887	if (!mdp->cd->tsu)
2888		return;
2889
2890	for (i = 0; i < SH_ETH_TSU_CAM_ENTRIES; i++, reg_offset += 8) {
2891		sh_eth_tsu_read_entry(ndev, reg_offset, addr);
2892		if (is_multicast_ether_addr(addr))
2893			sh_eth_tsu_del_entry(ndev, addr);
2894	}
2895}
2896
2897/* Update promiscuous flag and multicast filter */
2898static void sh_eth_set_rx_mode(struct net_device *ndev)
2899{
2900	struct sh_eth_private *mdp = netdev_priv(ndev);
2901	u32 ecmr_bits;
2902	int mcast_all = 0;
2903	unsigned long flags;
2904
2905	spin_lock_irqsave(&mdp->lock, flags);
2906	/* Initial condition is MCT = 1, PRM = 0.
2907	 * Depending on ndev->flags, set PRM or clear MCT
2908	 */
2909	ecmr_bits = sh_eth_read(ndev, ECMR) & ~ECMR_PRM;
2910	if (mdp->cd->tsu)
2911		ecmr_bits |= ECMR_MCT;
2912
2913	if (!(ndev->flags & IFF_MULTICAST)) {
2914		sh_eth_tsu_purge_mcast(ndev);
2915		mcast_all = 1;
2916	}
2917	if (ndev->flags & IFF_ALLMULTI) {
2918		sh_eth_tsu_purge_mcast(ndev);
2919		ecmr_bits &= ~ECMR_MCT;
2920		mcast_all = 1;
2921	}
2922
2923	if (ndev->flags & IFF_PROMISC) {
2924		sh_eth_tsu_purge_all(ndev);
2925		ecmr_bits = (ecmr_bits & ~ECMR_MCT) | ECMR_PRM;
2926	} else if (mdp->cd->tsu) {
2927		struct netdev_hw_addr *ha;
2928		netdev_for_each_mc_addr(ha, ndev) {
2929			if (mcast_all && is_multicast_ether_addr(ha->addr))
2930				continue;
2931
2932			if (sh_eth_tsu_add_entry(ndev, ha->addr) < 0) {
2933				if (!mcast_all) {
2934					sh_eth_tsu_purge_mcast(ndev);
2935					ecmr_bits &= ~ECMR_MCT;
2936					mcast_all = 1;
2937				}
2938			}
2939		}
2940	}
2941
2942	/* update the ethernet mode */
2943	sh_eth_write(ndev, ecmr_bits, ECMR);
2944
2945	spin_unlock_irqrestore(&mdp->lock, flags);
2946}
2947
2948static void sh_eth_set_rx_csum(struct net_device *ndev, bool enable)
2949{
2950	struct sh_eth_private *mdp = netdev_priv(ndev);
2951	unsigned long flags;
2952
2953	spin_lock_irqsave(&mdp->lock, flags);
2954
2955	/* Disable TX and RX */
2956	sh_eth_rcv_snd_disable(ndev);
2957
2958	/* Modify RX Checksum setting */
2959	sh_eth_modify(ndev, ECMR, ECMR_RCSC, enable ? ECMR_RCSC : 0);
2960
2961	/* Enable TX and RX */
2962	sh_eth_rcv_snd_enable(ndev);
2963
2964	spin_unlock_irqrestore(&mdp->lock, flags);
2965}
2966
2967static int sh_eth_set_features(struct net_device *ndev,
2968			       netdev_features_t features)
2969{
2970	netdev_features_t changed = ndev->features ^ features;
2971	struct sh_eth_private *mdp = netdev_priv(ndev);
2972
2973	if (changed & NETIF_F_RXCSUM && mdp->cd->rx_csum)
2974		sh_eth_set_rx_csum(ndev, features & NETIF_F_RXCSUM);
2975
2976	ndev->features = features;
2977
2978	return 0;
2979}
2980
2981static int sh_eth_get_vtag_index(struct sh_eth_private *mdp)
2982{
2983	if (!mdp->port)
2984		return TSU_VTAG0;
2985	else
2986		return TSU_VTAG1;
2987}
2988
2989static int sh_eth_vlan_rx_add_vid(struct net_device *ndev,
2990				  __be16 proto, u16 vid)
2991{
2992	struct sh_eth_private *mdp = netdev_priv(ndev);
2993	int vtag_reg_index = sh_eth_get_vtag_index(mdp);
2994
2995	if (unlikely(!mdp->cd->tsu))
2996		return -EPERM;
2997
2998	/* No filtering if vid = 0 */
2999	if (!vid)
3000		return 0;
3001
3002	mdp->vlan_num_ids++;
3003
3004	/* The controller has one VLAN tag HW filter. So, if the filter is
3005	 * already enabled, the driver disables it and the filte
3006	 */
3007	if (mdp->vlan_num_ids > 1) {
3008		/* disable VLAN filter */
3009		sh_eth_tsu_write(mdp, 0, vtag_reg_index);
3010		return 0;
3011	}
3012
3013	sh_eth_tsu_write(mdp, TSU_VTAG_ENABLE | (vid & TSU_VTAG_VID_MASK),
3014			 vtag_reg_index);
3015
3016	return 0;
3017}
3018
3019static int sh_eth_vlan_rx_kill_vid(struct net_device *ndev,
3020				   __be16 proto, u16 vid)
3021{
3022	struct sh_eth_private *mdp = netdev_priv(ndev);
3023	int vtag_reg_index = sh_eth_get_vtag_index(mdp);
3024
3025	if (unlikely(!mdp->cd->tsu))
3026		return -EPERM;
3027
3028	/* No filtering if vid = 0 */
3029	if (!vid)
3030		return 0;
3031
3032	mdp->vlan_num_ids--;
3033	sh_eth_tsu_write(mdp, 0, vtag_reg_index);
3034
3035	return 0;
3036}
3037
3038/* SuperH's TSU register init function */
3039static void sh_eth_tsu_init(struct sh_eth_private *mdp)
3040{
3041	if (!mdp->cd->dual_port) {
3042		sh_eth_tsu_write(mdp, 0, TSU_TEN); /* Disable all CAM entry */
3043		sh_eth_tsu_write(mdp, TSU_FWSLC_POSTENU | TSU_FWSLC_POSTENL,
3044				 TSU_FWSLC);	/* Enable POST registers */
3045		return;
3046	}
3047
3048	sh_eth_tsu_write(mdp, 0, TSU_FWEN0);	/* Disable forward(0->1) */
3049	sh_eth_tsu_write(mdp, 0, TSU_FWEN1);	/* Disable forward(1->0) */
3050	sh_eth_tsu_write(mdp, 0, TSU_FCM);	/* forward fifo 3k-3k */
3051	sh_eth_tsu_write(mdp, 0xc, TSU_BSYSL0);
3052	sh_eth_tsu_write(mdp, 0xc, TSU_BSYSL1);
3053	sh_eth_tsu_write(mdp, 0, TSU_PRISL0);
3054	sh_eth_tsu_write(mdp, 0, TSU_PRISL1);
3055	sh_eth_tsu_write(mdp, 0, TSU_FWSL0);
3056	sh_eth_tsu_write(mdp, 0, TSU_FWSL1);
3057	sh_eth_tsu_write(mdp, TSU_FWSLC_POSTENU | TSU_FWSLC_POSTENL, TSU_FWSLC);
3058	sh_eth_tsu_write(mdp, 0, TSU_QTAGM0);	/* Disable QTAG(0->1) */
3059	sh_eth_tsu_write(mdp, 0, TSU_QTAGM1);	/* Disable QTAG(1->0) */
3060	sh_eth_tsu_write(mdp, 0, TSU_FWSR);	/* all interrupt status clear */
3061	sh_eth_tsu_write(mdp, 0, TSU_FWINMK);	/* Disable all interrupt */
3062	sh_eth_tsu_write(mdp, 0, TSU_TEN);	/* Disable all CAM entry */
3063	sh_eth_tsu_write(mdp, 0, TSU_POST1);	/* Disable CAM entry [ 0- 7] */
3064	sh_eth_tsu_write(mdp, 0, TSU_POST2);	/* Disable CAM entry [ 8-15] */
3065	sh_eth_tsu_write(mdp, 0, TSU_POST3);	/* Disable CAM entry [16-23] */
3066	sh_eth_tsu_write(mdp, 0, TSU_POST4);	/* Disable CAM entry [24-31] */
3067}
3068
3069/* MDIO bus release function */
3070static int sh_mdio_release(struct sh_eth_private *mdp)
3071{
3072	/* unregister mdio bus */
3073	mdiobus_unregister(mdp->mii_bus);
3074
3075	/* free bitbang info */
3076	free_mdio_bitbang(mdp->mii_bus);
3077
3078	return 0;
3079}
3080
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3081/* MDIO bus init function */
3082static int sh_mdio_init(struct sh_eth_private *mdp,
3083			struct sh_eth_plat_data *pd)
3084{
3085	int ret;
3086	struct bb_info *bitbang;
3087	struct platform_device *pdev = mdp->pdev;
3088	struct device *dev = &mdp->pdev->dev;
3089
3090	/* create bit control struct for PHY */
3091	bitbang = devm_kzalloc(dev, sizeof(struct bb_info), GFP_KERNEL);
3092	if (!bitbang)
3093		return -ENOMEM;
3094
3095	/* bitbang init */
3096	bitbang->addr = mdp->addr + mdp->reg_offset[PIR];
3097	bitbang->set_gate = pd->set_mdio_gate;
3098	bitbang->ctrl.ops = &bb_ops;
3099
3100	/* MII controller setting */
3101	mdp->mii_bus = alloc_mdio_bitbang(&bitbang->ctrl);
3102	if (!mdp->mii_bus)
3103		return -ENOMEM;
3104
 
 
 
 
3105	/* Hook up MII support for ethtool */
3106	mdp->mii_bus->name = "sh_mii";
3107	mdp->mii_bus->parent = dev;
3108	snprintf(mdp->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x",
3109		 pdev->name, pdev->id);
3110
3111	/* register MDIO bus */
3112	if (pd->phy_irq > 0)
3113		mdp->mii_bus->irq[pd->phy] = pd->phy_irq;
3114
3115	ret = of_mdiobus_register(mdp->mii_bus, dev->of_node);
3116	if (ret)
3117		goto out_free_bus;
3118
3119	return 0;
3120
3121out_free_bus:
3122	free_mdio_bitbang(mdp->mii_bus);
3123	return ret;
3124}
3125
3126static const u16 *sh_eth_get_register_offset(int register_type)
3127{
3128	const u16 *reg_offset = NULL;
3129
3130	switch (register_type) {
3131	case SH_ETH_REG_GIGABIT:
3132		reg_offset = sh_eth_offset_gigabit;
3133		break;
3134	case SH_ETH_REG_FAST_RZ:
3135		reg_offset = sh_eth_offset_fast_rz;
3136		break;
3137	case SH_ETH_REG_FAST_RCAR:
3138		reg_offset = sh_eth_offset_fast_rcar;
3139		break;
3140	case SH_ETH_REG_FAST_SH4:
3141		reg_offset = sh_eth_offset_fast_sh4;
3142		break;
3143	case SH_ETH_REG_FAST_SH3_SH2:
3144		reg_offset = sh_eth_offset_fast_sh3_sh2;
3145		break;
3146	}
3147
3148	return reg_offset;
3149}
3150
3151static const struct net_device_ops sh_eth_netdev_ops = {
3152	.ndo_open		= sh_eth_open,
3153	.ndo_stop		= sh_eth_close,
3154	.ndo_start_xmit		= sh_eth_start_xmit,
3155	.ndo_get_stats		= sh_eth_get_stats,
3156	.ndo_set_rx_mode	= sh_eth_set_rx_mode,
3157	.ndo_tx_timeout		= sh_eth_tx_timeout,
3158	.ndo_do_ioctl		= sh_eth_do_ioctl,
3159	.ndo_change_mtu		= sh_eth_change_mtu,
3160	.ndo_validate_addr	= eth_validate_addr,
3161	.ndo_set_mac_address	= eth_mac_addr,
3162	.ndo_set_features	= sh_eth_set_features,
3163};
3164
3165static const struct net_device_ops sh_eth_netdev_ops_tsu = {
3166	.ndo_open		= sh_eth_open,
3167	.ndo_stop		= sh_eth_close,
3168	.ndo_start_xmit		= sh_eth_start_xmit,
3169	.ndo_get_stats		= sh_eth_get_stats,
3170	.ndo_set_rx_mode	= sh_eth_set_rx_mode,
3171	.ndo_vlan_rx_add_vid	= sh_eth_vlan_rx_add_vid,
3172	.ndo_vlan_rx_kill_vid	= sh_eth_vlan_rx_kill_vid,
3173	.ndo_tx_timeout		= sh_eth_tx_timeout,
3174	.ndo_do_ioctl		= sh_eth_do_ioctl,
3175	.ndo_change_mtu		= sh_eth_change_mtu,
3176	.ndo_validate_addr	= eth_validate_addr,
3177	.ndo_set_mac_address	= eth_mac_addr,
3178	.ndo_set_features	= sh_eth_set_features,
3179};
3180
3181#ifdef CONFIG_OF
3182static struct sh_eth_plat_data *sh_eth_parse_dt(struct device *dev)
3183{
3184	struct device_node *np = dev->of_node;
3185	struct sh_eth_plat_data *pdata;
3186	const char *mac_addr;
3187	int ret;
3188
3189	pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL);
3190	if (!pdata)
3191		return NULL;
3192
3193	ret = of_get_phy_mode(np);
3194	if (ret < 0)
3195		return NULL;
3196	pdata->phy_interface = ret;
3197
3198	mac_addr = of_get_mac_address(np);
3199	if (!IS_ERR(mac_addr))
3200		ether_addr_copy(pdata->mac_addr, mac_addr);
3201
3202	pdata->no_ether_link =
3203		of_property_read_bool(np, "renesas,no-ether-link");
3204	pdata->ether_link_active_low =
3205		of_property_read_bool(np, "renesas,ether-link-active-low");
3206
3207	return pdata;
3208}
3209
3210static const struct of_device_id sh_eth_match_table[] = {
3211	{ .compatible = "renesas,gether-r8a7740", .data = &r8a7740_data },
3212	{ .compatible = "renesas,ether-r8a7743", .data = &rcar_gen2_data },
3213	{ .compatible = "renesas,ether-r8a7745", .data = &rcar_gen2_data },
3214	{ .compatible = "renesas,ether-r8a7778", .data = &rcar_gen1_data },
3215	{ .compatible = "renesas,ether-r8a7779", .data = &rcar_gen1_data },
3216	{ .compatible = "renesas,ether-r8a7790", .data = &rcar_gen2_data },
3217	{ .compatible = "renesas,ether-r8a7791", .data = &rcar_gen2_data },
3218	{ .compatible = "renesas,ether-r8a7793", .data = &rcar_gen2_data },
3219	{ .compatible = "renesas,ether-r8a7794", .data = &rcar_gen2_data },
3220	{ .compatible = "renesas,gether-r8a77980", .data = &r8a77980_data },
3221	{ .compatible = "renesas,ether-r7s72100", .data = &r7s72100_data },
3222	{ .compatible = "renesas,ether-r7s9210", .data = &r7s9210_data },
3223	{ .compatible = "renesas,rcar-gen1-ether", .data = &rcar_gen1_data },
3224	{ .compatible = "renesas,rcar-gen2-ether", .data = &rcar_gen2_data },
3225	{ }
3226};
3227MODULE_DEVICE_TABLE(of, sh_eth_match_table);
3228#else
3229static inline struct sh_eth_plat_data *sh_eth_parse_dt(struct device *dev)
3230{
3231	return NULL;
3232}
3233#endif
3234
3235static int sh_eth_drv_probe(struct platform_device *pdev)
3236{
3237	struct resource *res;
3238	struct sh_eth_plat_data *pd = dev_get_platdata(&pdev->dev);
3239	const struct platform_device_id *id = platform_get_device_id(pdev);
3240	struct sh_eth_private *mdp;
3241	struct net_device *ndev;
3242	int ret;
3243
3244	/* get base addr */
3245	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
3246
3247	ndev = alloc_etherdev(sizeof(struct sh_eth_private));
3248	if (!ndev)
3249		return -ENOMEM;
3250
3251	pm_runtime_enable(&pdev->dev);
3252	pm_runtime_get_sync(&pdev->dev);
3253
3254	ret = platform_get_irq(pdev, 0);
3255	if (ret < 0)
3256		goto out_release;
3257	ndev->irq = ret;
3258
3259	SET_NETDEV_DEV(ndev, &pdev->dev);
3260
3261	mdp = netdev_priv(ndev);
3262	mdp->num_tx_ring = TX_RING_SIZE;
3263	mdp->num_rx_ring = RX_RING_SIZE;
3264	mdp->addr = devm_ioremap_resource(&pdev->dev, res);
3265	if (IS_ERR(mdp->addr)) {
3266		ret = PTR_ERR(mdp->addr);
3267		goto out_release;
3268	}
3269
3270	ndev->base_addr = res->start;
3271
3272	spin_lock_init(&mdp->lock);
3273	mdp->pdev = pdev;
3274
3275	if (pdev->dev.of_node)
3276		pd = sh_eth_parse_dt(&pdev->dev);
3277	if (!pd) {
3278		dev_err(&pdev->dev, "no platform data\n");
3279		ret = -EINVAL;
3280		goto out_release;
3281	}
3282
3283	/* get PHY ID */
3284	mdp->phy_id = pd->phy;
3285	mdp->phy_interface = pd->phy_interface;
3286	mdp->no_ether_link = pd->no_ether_link;
3287	mdp->ether_link_active_low = pd->ether_link_active_low;
3288
3289	/* set cpu data */
3290	if (id)
3291		mdp->cd = (struct sh_eth_cpu_data *)id->driver_data;
3292	else
3293		mdp->cd = (struct sh_eth_cpu_data *)of_device_get_match_data(&pdev->dev);
3294
3295	mdp->reg_offset = sh_eth_get_register_offset(mdp->cd->register_type);
3296	if (!mdp->reg_offset) {
3297		dev_err(&pdev->dev, "Unknown register type (%d)\n",
3298			mdp->cd->register_type);
3299		ret = -EINVAL;
3300		goto out_release;
3301	}
3302	sh_eth_set_default_cpu_data(mdp->cd);
3303
3304	/* User's manual states max MTU should be 2048 but due to the
3305	 * alignment calculations in sh_eth_ring_init() the practical
3306	 * MTU is a bit less. Maybe this can be optimized some more.
3307	 */
3308	ndev->max_mtu = 2000 - (ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN);
3309	ndev->min_mtu = ETH_MIN_MTU;
3310
3311	if (mdp->cd->rx_csum) {
3312		ndev->features = NETIF_F_RXCSUM;
3313		ndev->hw_features = NETIF_F_RXCSUM;
3314	}
3315
3316	/* set function */
3317	if (mdp->cd->tsu)
3318		ndev->netdev_ops = &sh_eth_netdev_ops_tsu;
3319	else
3320		ndev->netdev_ops = &sh_eth_netdev_ops;
3321	ndev->ethtool_ops = &sh_eth_ethtool_ops;
3322	ndev->watchdog_timeo = TX_TIMEOUT;
3323
3324	/* debug message level */
3325	mdp->msg_enable = SH_ETH_DEF_MSG_ENABLE;
3326
3327	/* read and set MAC address */
3328	read_mac_address(ndev, pd->mac_addr);
3329	if (!is_valid_ether_addr(ndev->dev_addr)) {
3330		dev_warn(&pdev->dev,
3331			 "no valid MAC address supplied, using a random one.\n");
3332		eth_hw_addr_random(ndev);
3333	}
3334
3335	if (mdp->cd->tsu) {
3336		int port = pdev->id < 0 ? 0 : pdev->id % 2;
3337		struct resource *rtsu;
3338
3339		rtsu = platform_get_resource(pdev, IORESOURCE_MEM, 1);
3340		if (!rtsu) {
3341			dev_err(&pdev->dev, "no TSU resource\n");
3342			ret = -ENODEV;
3343			goto out_release;
3344		}
3345		/* We can only request the  TSU region  for the first port
3346		 * of the two  sharing this TSU for the probe to succeed...
3347		 */
3348		if (port == 0 &&
3349		    !devm_request_mem_region(&pdev->dev, rtsu->start,
3350					     resource_size(rtsu),
3351					     dev_name(&pdev->dev))) {
3352			dev_err(&pdev->dev, "can't request TSU resource.\n");
3353			ret = -EBUSY;
3354			goto out_release;
3355		}
3356		/* ioremap the TSU registers */
3357		mdp->tsu_addr = devm_ioremap(&pdev->dev, rtsu->start,
3358					     resource_size(rtsu));
3359		if (!mdp->tsu_addr) {
3360			dev_err(&pdev->dev, "TSU region ioremap() failed.\n");
3361			ret = -ENOMEM;
3362			goto out_release;
3363		}
3364		mdp->port = port;
3365		ndev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
3366
3367		/* Need to init only the first port of the two sharing a TSU */
3368		if (port == 0) {
3369			if (mdp->cd->chip_reset)
3370				mdp->cd->chip_reset(ndev);
3371
3372			/* TSU init (Init only)*/
3373			sh_eth_tsu_init(mdp);
3374		}
3375	}
3376
3377	if (mdp->cd->rmiimode)
3378		sh_eth_write(ndev, 0x1, RMIIMODE);
3379
3380	/* MDIO bus init */
3381	ret = sh_mdio_init(mdp, pd);
3382	if (ret) {
3383		if (ret != -EPROBE_DEFER)
3384			dev_err(&pdev->dev, "MDIO init failed: %d\n", ret);
3385		goto out_release;
3386	}
3387
3388	netif_napi_add(ndev, &mdp->napi, sh_eth_poll, 64);
3389
3390	/* network device register */
3391	ret = register_netdev(ndev);
3392	if (ret)
3393		goto out_napi_del;
3394
3395	if (mdp->cd->magic)
3396		device_set_wakeup_capable(&pdev->dev, 1);
3397
3398	/* print device information */
3399	netdev_info(ndev, "Base address at 0x%x, %pM, IRQ %d.\n",
3400		    (u32)ndev->base_addr, ndev->dev_addr, ndev->irq);
3401
3402	pm_runtime_put(&pdev->dev);
3403	platform_set_drvdata(pdev, ndev);
3404
3405	return ret;
3406
3407out_napi_del:
3408	netif_napi_del(&mdp->napi);
3409	sh_mdio_release(mdp);
3410
3411out_release:
3412	/* net_dev free */
3413	free_netdev(ndev);
3414
3415	pm_runtime_put(&pdev->dev);
3416	pm_runtime_disable(&pdev->dev);
3417	return ret;
3418}
3419
3420static int sh_eth_drv_remove(struct platform_device *pdev)
3421{
3422	struct net_device *ndev = platform_get_drvdata(pdev);
3423	struct sh_eth_private *mdp = netdev_priv(ndev);
3424
3425	unregister_netdev(ndev);
3426	netif_napi_del(&mdp->napi);
3427	sh_mdio_release(mdp);
3428	pm_runtime_disable(&pdev->dev);
3429	free_netdev(ndev);
3430
3431	return 0;
3432}
3433
3434#ifdef CONFIG_PM
3435#ifdef CONFIG_PM_SLEEP
3436static int sh_eth_wol_setup(struct net_device *ndev)
3437{
3438	struct sh_eth_private *mdp = netdev_priv(ndev);
3439
3440	/* Only allow ECI interrupts */
3441	synchronize_irq(ndev->irq);
3442	napi_disable(&mdp->napi);
3443	sh_eth_write(ndev, EESIPR_ECIIP, EESIPR);
3444
3445	/* Enable MagicPacket */
3446	sh_eth_modify(ndev, ECMR, ECMR_MPDE, ECMR_MPDE);
3447
3448	return enable_irq_wake(ndev->irq);
3449}
3450
3451static int sh_eth_wol_restore(struct net_device *ndev)
3452{
3453	struct sh_eth_private *mdp = netdev_priv(ndev);
3454	int ret;
3455
3456	napi_enable(&mdp->napi);
3457
3458	/* Disable MagicPacket */
3459	sh_eth_modify(ndev, ECMR, ECMR_MPDE, 0);
3460
3461	/* The device needs to be reset to restore MagicPacket logic
3462	 * for next wakeup. If we close and open the device it will
3463	 * both be reset and all registers restored. This is what
3464	 * happens during suspend and resume without WoL enabled.
3465	 */
3466	ret = sh_eth_close(ndev);
3467	if (ret < 0)
3468		return ret;
3469	ret = sh_eth_open(ndev);
3470	if (ret < 0)
3471		return ret;
3472
3473	return disable_irq_wake(ndev->irq);
3474}
3475
3476static int sh_eth_suspend(struct device *dev)
3477{
3478	struct net_device *ndev = dev_get_drvdata(dev);
3479	struct sh_eth_private *mdp = netdev_priv(ndev);
3480	int ret = 0;
3481
3482	if (!netif_running(ndev))
3483		return 0;
3484
3485	netif_device_detach(ndev);
3486
3487	if (mdp->wol_enabled)
3488		ret = sh_eth_wol_setup(ndev);
3489	else
3490		ret = sh_eth_close(ndev);
3491
3492	return ret;
3493}
3494
3495static int sh_eth_resume(struct device *dev)
3496{
3497	struct net_device *ndev = dev_get_drvdata(dev);
3498	struct sh_eth_private *mdp = netdev_priv(ndev);
3499	int ret = 0;
3500
3501	if (!netif_running(ndev))
3502		return 0;
3503
3504	if (mdp->wol_enabled)
3505		ret = sh_eth_wol_restore(ndev);
3506	else
3507		ret = sh_eth_open(ndev);
3508
3509	if (ret < 0)
3510		return ret;
3511
3512	netif_device_attach(ndev);
3513
3514	return ret;
3515}
3516#endif
3517
3518static int sh_eth_runtime_nop(struct device *dev)
3519{
3520	/* Runtime PM callback shared between ->runtime_suspend()
3521	 * and ->runtime_resume(). Simply returns success.
3522	 *
3523	 * This driver re-initializes all registers after
3524	 * pm_runtime_get_sync() anyway so there is no need
3525	 * to save and restore registers here.
3526	 */
3527	return 0;
3528}
3529
3530static const struct dev_pm_ops sh_eth_dev_pm_ops = {
3531	SET_SYSTEM_SLEEP_PM_OPS(sh_eth_suspend, sh_eth_resume)
3532	SET_RUNTIME_PM_OPS(sh_eth_runtime_nop, sh_eth_runtime_nop, NULL)
3533};
3534#define SH_ETH_PM_OPS (&sh_eth_dev_pm_ops)
3535#else
3536#define SH_ETH_PM_OPS NULL
3537#endif
3538
3539static const struct platform_device_id sh_eth_id_table[] = {
3540	{ "sh7619-ether", (kernel_ulong_t)&sh7619_data },
3541	{ "sh771x-ether", (kernel_ulong_t)&sh771x_data },
3542	{ "sh7724-ether", (kernel_ulong_t)&sh7724_data },
3543	{ "sh7734-gether", (kernel_ulong_t)&sh7734_data },
3544	{ "sh7757-ether", (kernel_ulong_t)&sh7757_data },
3545	{ "sh7757-gether", (kernel_ulong_t)&sh7757_data_giga },
3546	{ "sh7763-gether", (kernel_ulong_t)&sh7763_data },
3547	{ }
3548};
3549MODULE_DEVICE_TABLE(platform, sh_eth_id_table);
3550
3551static struct platform_driver sh_eth_driver = {
3552	.probe = sh_eth_drv_probe,
3553	.remove = sh_eth_drv_remove,
3554	.id_table = sh_eth_id_table,
3555	.driver = {
3556		   .name = CARDNAME,
3557		   .pm = SH_ETH_PM_OPS,
3558		   .of_match_table = of_match_ptr(sh_eth_match_table),
3559	},
3560};
3561
3562module_platform_driver(sh_eth_driver);
3563
3564MODULE_AUTHOR("Nobuhiro Iwamatsu, Yoshihiro Shimoda");
3565MODULE_DESCRIPTION("Renesas SuperH Ethernet driver");
3566MODULE_LICENSE("GPL v2");